From 2197533b9b04a76f275e23a24449687089a1c528 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2001 17:39:47 -0500 Subject: [PATCH 01/33] manual.texi @xref{safe_mysqld} manual.texi -> @xref{safe_mysqld, @code{safe_mysqld}} manual.texi Unixes -> versions of Unix (or similar edit) manual.texi @xref{mysqlxxx} -> @xref{mysqlxxx, @code{mysqlxxx}} Docs/manual.texi: @xref{safe_mysqld} -> @xref{safe_mysqld, @code{safe_mysqld}} BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + Docs/manual.texi | 63 ++++++++++++++++++++++------------------ 2 files changed, 35 insertions(+), 29 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index e694ec83285..0ef0a6a42c7 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -5,3 +5,4 @@ mwagner@evoq.mwagner.org paul@central.snake.net sasha@mysql.sashanet.com serg@serg.mysql.com +paul@teton.kitebird.com diff --git a/Docs/manual.texi b/Docs/manual.texi index 8f45392d3d4..abfd1806028 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -2135,7 +2135,7 @@ The server can provide error messages to clients in many languages. @item Clients may connect to the @strong{MySQL} server using TCP/IP Sockets, -Unix Sockets (Unixes), or Named Pipes (NT). +Unix Sockets (Unix), or Named Pipes (NT). @item The @strong{MySQL}-specific @code{SHOW} command can be used to retrieve @@ -5247,7 +5247,7 @@ clients can connect to both @strong{MySQL} versions. The extended @strong{MySQL} binary distribution is marked with the @code{-max} suffix and is configured with the same options as -@code{mysqld-max}. @xref{mysqld-max}. +@code{mysqld-max}. @xref{mysqld-max, @code{mysqld-max}}. If you want to use the @code{MySQL-Max} RPM, you must first install the standard @code{MySQL} RPM. @@ -5588,8 +5588,8 @@ indicates the type of operating system for which the distribution is intended @item If you see a binary distribution marked with the @code{-max} prefix, this means that the binary has support for transaction-safe tables and other -features. @xref{mysqld-max}. Note that all binaries are built from -the same @strong{MySQL} source distribution. +features. @xref{mysqld-max, @code{mysqld-max}}. Note that all binaries +are built from the same @strong{MySQL} source distribution. @item Add a user and group for @code{mysqld} to run as: @@ -5601,8 +5601,8 @@ shell> useradd -g mysql mysql These commands add the @code{mysql} group and the @code{mysql} user. The syntax for @code{useradd} and @code{groupadd} may differ slightly on different -Unixes. They may also be called @code{adduser} and @code{addgroup}. You may -wish to call the user and group something else instead of @code{mysql}. +versions of Unix. They may also be called @code{adduser} and @code{addgroup}. +You may wish to call the user and group something else instead of @code{mysql}. @item Change into the intended installation directory: @@ -5645,7 +5645,8 @@ programs properly. @xref{Environment variables}. @item scripts This directory contains the @code{mysql_install_db} script used to initialize -the server access permissions. +the @code{mysql} database containing the grant tables that store the server +access permissions. @end table @item @@ -5711,7 +5712,7 @@ You can start the @strong{MySQL} server with the following command: shell> bin/safe_mysqld --user=mysql & @end example -@xref{safe_mysqld}. +@xref{safe_mysqld, @code{safe_mysqld}}. @xref{Post-installation}. @@ -6115,8 +6116,8 @@ shell> useradd -g mysql mysql These commands add the @code{mysql} group, and the @code{mysql} user. The syntax for @code{useradd} and @code{groupadd} may differ slightly on different -Unixes. They may also be called @code{adduser} and @code{addgroup}. You may -wish to call the user and group something else instead of @code{mysql}. +versions of Unix. They may also be called @code{adduser} and @code{addgroup}. +You may wish to call the user and group something else instead of @code{mysql}. @item Unpack the distribution into the current directory: @@ -7670,13 +7671,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can start @code{mysqld} with the @code{--core-file} option. Note that you also probably need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld} -with @code{--core-file-sizes=1000000}. @xref{safe_mysqld}. +with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}. To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can start @code{mysqld} with the @code{--core-file} option. Note that you also probably need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld} with -@code{--core-file-sizes=1000000}. @xref{safe_mysqld}. +@code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}. If you are linking your own @strong{MySQL} client and get the error: @@ -8004,7 +8005,7 @@ shell> nohup mysqld [options] & @code{nohup} causes the command following it to ignore any @code{SIGHUP} signal sent from the terminal. Alternatively, start the server by running @code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you. -@xref{safe_mysqld}. +@xref{safe_mysqld, @code{safe_mysqld}}. If you get a problem when compiling mysys/get_opt.c, just remove the line #define _NO_PROTO from the start of that file! @@ -8261,7 +8262,8 @@ FreeBSD is also known to have a very low default file handle limit. safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf (and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the appropriate class for this user in the password file if you are not -using the default (use: chpass mysqld-user-name). @xref{safe_mysqld}. +using the default (use: chpass mysqld-user-name). @xref{safe_mysqld, +@code{safe_mysqld}}. If you get problems with the current date in @strong{MySQL}, setting the @code{TZ} variable will probably help. @xref{Environment variables}. @@ -9677,7 +9679,7 @@ mysqld: Can't find file: 'host.frm' The above may also happen with a binary @strong{MySQL} distribution if you don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}! -@xref{safe_mysqld}. +@xref{safe_mysqld, @code{safe_mysqld}}. You might need to run @code{mysql_install_db} as @code{root}. However, if you prefer, you can run the @strong{MySQL} server as an unprivileged @@ -9978,7 +9980,8 @@ system startup and shutdown, and is described more fully in @item By invoking @code{safe_mysqld}, which tries to determine the proper options -for @code{mysqld} and then runs it with those options. @xref{safe_mysqld}. +for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, +@code{safe_mysqld}}. @item On NT you should install @code{mysqld} as a service as follows: @@ -10227,7 +10230,8 @@ though. @item --core-file Write a core file if @code{mysqld} dies. For some systems you must also -specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld}. +specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld, +@code{safe_mysqld}}. @item -h, --datadir=path Path to the database root. @@ -24347,7 +24351,7 @@ this. @xref{Table handler support}. If you have downloaded a binary version of @strong{MySQL} that includes support for BerkeleyDB, simply follow the instructions for installing a binary version of @strong{MySQL}. -@xref{Installing binary}. @xref{mysqld-max}. +@xref{Installing binary}. @xref{mysqld-max, @code{mysqld-max}}. To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL} Version 3.23.34 or newer and configure @code{MySQL} with the @@ -25459,7 +25463,7 @@ binary. If you have downloaded a binary version of @strong{MySQL} that includes support for InnoDB (mysqld-max), simply follow the instructions for installing a binary version of @strong{MySQL}. @xref{Installing binary}. -@xref{mysqld-max}. +@xref{mysqld-max, @code{mysqld-max}}. To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer and configure @code{MySQL} with the @code{--with-innodb} option. @@ -26237,7 +26241,7 @@ time will be longer. Also the log buffer should be quite big, say 8 MB. @strong{6.} (Relevant from 3.23.39 up.) -In some versions of Linux and other Unixes flushing files to disk with the Unix +In some versions of Linux and Unix, flushing files to disk with the Unix @code{fdatasync} and other similar methods is surprisingly slow. The default method InnoDB uses is the @code{fdatasync} function. If you are not satisfied with the database write performance, you may @@ -26518,11 +26522,11 @@ integer that can be stored in the specified integer type. In disk i/o InnoDB uses asynchronous i/o. On Windows NT it uses the native asynchronous i/o provided by the operating system. -On Unixes InnoDB uses simulated asynchronous i/o built +On Unix, InnoDB uses simulated asynchronous i/o built into InnoDB: InnoDB creates a number of i/o threads to take care of i/o operations, such as read-ahead. In a future version we will add support for simulated aio on Windows NT and native aio on those -Unixes which have one. +versions of Unix which have one. On Windows NT InnoDB uses non-buffered i/o. That means that the disk pages InnoDB reads or writes are not buffered in the operating system @@ -26533,7 +26537,7 @@ just define the raw disk in place of a data file in @file{my.cnf}. You must give the exact size in bytes of the raw disk in @file{my.cnf}, because at startup InnoDB checks that the size of the file is the same as specified in the configuration file. Using a raw disk -you can on some Unixes perform non-buffered i/o. +you can on some versions of Unix perform non-buffered i/o. There are two read-ahead heuristics in InnoDB: sequential read-ahead and random read-ahead. In sequential read-ahead InnoDB notices that @@ -33106,7 +33110,7 @@ with the @code{-max} prefix. This makes it very easy to test out a another @code{mysqld} binary in an existing installation. Just run @code{configure} with the options you want and then install the new @code{mysqld} binary as @code{mysqld-max} in the same directory -where your old @code{mysqld} binary is. @xref{safe_mysqld}. +where your old @code{mysqld} binary is. @xref{safe_mysqld, @code{safe_mysqld}}. The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld} feature. It just installs the @code{mysqld-max} executable and @@ -33354,7 +33358,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys -a -x ./bin/mysqld -------------------------------------------------------------------------- -@xref{safe_mysqld}. +@xref{safe_mysqld, @code{safe_mysqld}}. @end example The above test should be successful, or you may encounter problems. @item @@ -33882,7 +33886,7 @@ server). The dump will contain SQL statements to create the table and/or populate the table. If you are doing a backup on the server, you should consider using -the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy}. +the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, @code{mysqlhotcopy}}. @example shell> mysqldump [OPTIONS] database [tables] @@ -39087,7 +39091,8 @@ If you want to make a SQL level backup of a table, you can use TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}. Another way to back up a database is to use the @code{mysqldump} program or -the @code{mysqlhotcopy script}. @xref{mysqldump}. @xref{mysqlhotcopy}. +the @code{mysqlhotcopy script}. @xref{mysqldump, @code{mysqldump}}. +@xref{mysqlhotcopy, @code{mysqlhotcopy}}. @enumerate @item @@ -46390,8 +46395,8 @@ read by @code{mysql_options()}. Added new options @code{--pager[=...]}, @code{--no-pager}, @code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The new corresponding interactive commands are @code{pager}, @code{nopager}, -@code{tee} and @code{notee}. @xref{mysql}, @code{mysql --help} and the -interactive help for more information. +@code{tee} and @code{notee}. @xref{mysql, @code{mysql}}, @code{mysql --help} +and the interactive help for more information. @item Fixed crash when automatic repair of @code{MyISAM} table failed. @item From 0b4000fe07b15d82f8542e8ab25d27f62b7471df Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2001 18:53:20 -0500 Subject: [PATCH 02/33] manual.texi mysqlhotcopy reads [mysqlhotcopy] option group, manual.texi not [mysql-hot-copy]. Docs/manual.texi: mysqlhotcopy reads [mysqlhotcopy] option group, not [mysql-hot-copy]. --- Docs/manual.texi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index abfd1806028..fb7fa66479a 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -10570,7 +10570,7 @@ password=my_password no-auto-rehash set-variable = connect_timeout=2 -[mysql-hot-copy] +[mysqlhotcopy] interactive-timeout @end example From 5332aba47173a2d1c42822ba802f588feb9c4238 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2001 20:05:21 -0500 Subject: [PATCH 03/33] manual.texi 1) earlier change to @xref{} used 2-arg form; change manual.texi to 3-arg form so last arg shows up in printed output. manual.texi 2) mysql.server no longer needs to use su or store the manual.texi root password. manual.texi 3) other misc small changes. Docs/manual.texi: earlier change to @xref{} used 2-arg form; change to 3-arg form so last arg shows up in printed output. mysql.server no longer needs to use su or store the root password. other misc small changes --- Docs/manual.texi | 143 ++++++++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 65 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index fb7fa66479a..a85a24848bb 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -5247,7 +5247,7 @@ clients can connect to both @strong{MySQL} versions. The extended @strong{MySQL} binary distribution is marked with the @code{-max} suffix and is configured with the same options as -@code{mysqld-max}. @xref{mysqld-max, @code{mysqld-max}}. +@code{mysqld-max}. @xref{mysqld-max, , @code{mysqld-max}}. If you want to use the @code{MySQL-Max} RPM, you must first install the standard @code{MySQL} RPM. @@ -5588,7 +5588,7 @@ indicates the type of operating system for which the distribution is intended @item If you see a binary distribution marked with the @code{-max} prefix, this means that the binary has support for transaction-safe tables and other -features. @xref{mysqld-max, @code{mysqld-max}}. Note that all binaries +features. @xref{mysqld-max, , @code{mysqld-max}}. Note that all binaries are built from the same @strong{MySQL} source distribution. @item @@ -5712,7 +5712,7 @@ You can start the @strong{MySQL} server with the following command: shell> bin/safe_mysqld --user=mysql & @end example -@xref{safe_mysqld, @code{safe_mysqld}}. +@xref{safe_mysqld, , @code{safe_mysqld}}. @xref{Post-installation}. @@ -5784,7 +5784,7 @@ installation, you may want to make a copy of your previously installed @strong{MySQL} startup file if you made any changes to it, so you don't lose your changes.) -After installing the RPM file(s), the @file{mysqld} daemon should be running +After installing the RPM file(s), the @code{mysqld} daemon should be running and you should now be able to start using @strong{MySQL}. @xref{Post-installation}. @@ -5820,7 +5820,7 @@ files. The following sections indicate some of the issues that have been observed on particular systems when installing @strong{MySQL} from a binary -distribution. +distribution or from RPM files. @cindex binary distributions, on Linux @cindex Linux, binary distribution @@ -7671,13 +7671,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can start @code{mysqld} with the @code{--core-file} option. Note that you also probably need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld} -with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}. +with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, , @code{safe_mysqld}}. To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can start @code{mysqld} with the @code{--core-file} option. Note that you also probably need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld} with -@code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}. +@code{--core-file-sizes=1000000}. @xref{safe_mysqld, , @code{safe_mysqld}}. If you are linking your own @strong{MySQL} client and get the error: @@ -8005,7 +8005,7 @@ shell> nohup mysqld [options] & @code{nohup} causes the command following it to ignore any @code{SIGHUP} signal sent from the terminal. Alternatively, start the server by running @code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you. -@xref{safe_mysqld, @code{safe_mysqld}}. +@xref{safe_mysqld, , @code{safe_mysqld}}. If you get a problem when compiling mysys/get_opt.c, just remove the line #define _NO_PROTO from the start of that file! @@ -8262,7 +8262,7 @@ FreeBSD is also known to have a very low default file handle limit. safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf (and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the appropriate class for this user in the password file if you are not -using the default (use: chpass mysqld-user-name). @xref{safe_mysqld, +using the default (use: chpass mysqld-user-name). @xref{safe_mysqld, , @code{safe_mysqld}}. If you get problems with the current date in @strong{MySQL}, setting the @@ -9679,7 +9679,7 @@ mysqld: Can't find file: 'host.frm' The above may also happen with a binary @strong{MySQL} distribution if you don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}! -@xref{safe_mysqld, @code{safe_mysqld}}. +@xref{safe_mysqld, , @code{safe_mysqld}}. You might need to run @code{mysql_install_db} as @code{root}. However, if you prefer, you can run the @strong{MySQL} server as an unprivileged @@ -9980,7 +9980,7 @@ system startup and shutdown, and is described more fully in @item By invoking @code{safe_mysqld}, which tries to determine the proper options -for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, +for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, , @code{safe_mysqld}}. @item @@ -10230,7 +10230,7 @@ though. @item --core-file Write a core file if @code{mysqld} dies. For some systems you must also -specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld, +specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld, , @code{safe_mysqld}}. @item -h, --datadir=path @@ -11953,9 +11953,10 @@ When running @strong{MySQL}, follow these guidelines whenever possible: @itemize @bullet @item DON'T EVER GIVE ANYONE (EXCEPT THE @strong{MySQL} ROOT USER) ACCESS TO THE -mysql.user TABLE! The encrypted password is the real password in -@strong{MySQL}. If you know this for one user, you can easily log in as -him if you have access to his 'host'. +@code{user} TABLE IN THE @code{mysql} DATABASE! The encrypted password +is the real password in @strong{MySQL}. If you know the password listed in +the @code{user} table for a given user, you can easily log in as that +user if you have access to the host listed for that account. @item Learn the @strong{MySQL} access privilege system. The @code{GRANT} and @@ -11984,15 +11985,15 @@ computer becomes compromised, the intruder can take the full list of passwords and use them. Instead use @code{MD5()} or another one-way hashing function. @item -Do not use passwords from dictionaries. There are special programs to +Do not choose passwords from dictionaries. There are special programs to break them. Even passwords like ``xfish98'' are very bad. Much better is ``duag98'' which contains the same word ``fish'' but typed one key to the left on a standard QWERTY keyboard. Another method is to use ``Mhall'' which is taken from the first characters of each word in the sentence ``Mary had -a little lamb.'' This is easy to remember and type, but hard to guess for -someone who does not know it. +a little lamb.'' This is easy to remember and type, but difficult to guess +for someone who does not know it. @item -Invest in a firewall. This protects from at least 50% of all types of +Invest in a firewall. This protects you from at least 50% of all types of exploits in any software. Put @strong{MySQL} behind the firewall or in a demilitarized zone (DMZ). @@ -12001,11 +12002,16 @@ Checklist: @item Try to scan your ports from the Internet using a tool such as @code{nmap}. @strong{MySQL} uses port 3306 by default. This port should -be inaccessible from untrusted hosts. Another simple way to check whether or -not your @strong{MySQL} port is open is to type @code{telnet -server_host 3306} from some remote machine, where -@code{server_host} is the hostname of your @strong{MySQL} -server. If you get a connection and some garbage characters, the port is +be inaccessible from untrusted hosts. Another simple way to check whether +or not your @strong{MySQL} port is open is to try the following command +from some remote machine, where @code{server_host} is the hostname of +your @strong{MySQL} server: + +@example +shell> telnet server_host 3306 +@end example + +If you get a connection and some garbage characters, the port is open, and should be closed on your firewall or router, unless you really have a good reason to keep it open. If @code{telnet} just hangs or the connection is refused, everything is OK; the port is blocked. @@ -12112,15 +12118,15 @@ connection, however the encryption algorithm is not very strong, and with some effort a clever attacker can crack the password if he is able to sniff the traffic between the client and the server. If the connection between the client and the server goes through an untrusted -network, you should use an @strong{SSH} tunnel to encrypt the +network, you should use an SSH tunnel to encrypt the communication. All other information is transferred as text that can be read by anyone who is able to watch the connection. If you are concerned about this, you can use the compressed protocol (in @strong{MySQL} Version 3.22 and above) to make things much harder. To make things even more secure you should use -@code{ssh}. You can find an open source ssh client at -@uref{http://www.openssh.org}, and a commercial ssh client at +@code{ssh}. You can find an open source @code{ssh} client at +@uref{http://www.openssh.org}, and a commercial @code{ssh} client at @uref{http://www.ssh.com}. With this, you can get an encrypted TCP/IP connection between a @strong{MySQL} server and a @strong{MySQL} client. @@ -12145,21 +12151,31 @@ mysql> FLUSH PRIVILEGES; @end example @item -Don't run the @strong{MySQL} daemon as the Unix @code{root} user. -It is very dangerous as any user with @code{FILE} privileges will be able to -create files -as @code{root} (for example, @code{~root/.bashrc}). To prevent this -@code{mysqld} will refuse to run as @code{root} unless it is specified -directly via @code{--user=root} option. +Don't run the @strong{MySQL} daemon as the Unix @code{root} user. This is +very dangerous, because any user with @code{FILE} privileges will be able +to create files as @code{root} (for example, @code{~root/.bashrc}). To +prevent this, @code{mysqld} will refuse to run as @code{root} unless it +is specified directly using a @code{--user=root} option. -@code{mysqld} can be run as any user instead. You can also create a new -Unix user @code{mysql} to make everything even more secure. If you run -@code{mysqld} as another Unix user, you don't need to change the -@code{root} user name in the @code{user} table, because @strong{MySQL} -user names have nothing to do with Unix user names. You can edit the -@code{mysql.server} script to start @code{mysqld} as another Unix user. -Normally this is done with the @code{su} command. For more details, see -@ref{Changing MySQL user, , Changing @strong{MySQL} user}. +@code{mysqld} can be run as an ordinary unprivileged user instead. +You can also create a new Unix user @code{mysql} to make everything +even more secure. If you run @code{mysqld} as another Unix user, +you don't need to change the @code{root} user name in the @code{user} +table, because @strong{MySQL} user names have nothing to do with Unix +user names. To start @code{mysqld} as another Unix user, add a @code{user} +line that specifies the user name to the @code{[mysqld]} group of the +@file{/etc/my.cnf} option file or the @file{my.cnf} option file in the +server's data directory. For example: + +@example +[mysqld] +user=mysql +@end example + +This will cause the server to start as the designated user whether you +start it manually or by using @code{safe_mysqld} or @code{mysql.server}. +For more details, see @ref{Changing MySQL user, , Changing @strong{MySQL} +user}. @item Don't support symlinks to tables (This can be disabled with the @@ -12168,18 +12184,10 @@ Don't support symlinks to tables (This can be disabled with the directories could then delete any file in the system! @xref{Symbolic links to tables}. -@item -If you put a password for the Unix @code{root} user in the @code{mysql.server} -script, make sure this script is readable only by @code{root}. - @item Check that the Unix user that @code{mysqld} runs as is the only user with read/write privileges in the database directories. -@item -On Unix platforms, do not run @code{mysqld} as root unless you really -need to. Consider creating a user named @code{mysql} for that purpose. - @item Don't give the @strong{process} privilege to all users. The output of @code{mysqladmin processlist} shows the text of the currently executing @@ -24351,7 +24359,7 @@ this. @xref{Table handler support}. If you have downloaded a binary version of @strong{MySQL} that includes support for BerkeleyDB, simply follow the instructions for installing a binary version of @strong{MySQL}. -@xref{Installing binary}. @xref{mysqld-max, @code{mysqld-max}}. +@xref{Installing binary}. @xref{mysqld-max, , @code{mysqld-max}}. To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL} Version 3.23.34 or newer and configure @code{MySQL} with the @@ -25463,7 +25471,7 @@ binary. If you have downloaded a binary version of @strong{MySQL} that includes support for InnoDB (mysqld-max), simply follow the instructions for installing a binary version of @strong{MySQL}. @xref{Installing binary}. -@xref{mysqld-max, @code{mysqld-max}}. +@xref{mysqld-max, , @code{mysqld-max}}. To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer and configure @code{MySQL} with the @code{--with-innodb} option. @@ -33110,7 +33118,7 @@ with the @code{-max} prefix. This makes it very easy to test out a another @code{mysqld} binary in an existing installation. Just run @code{configure} with the options you want and then install the new @code{mysqld} binary as @code{mysqld-max} in the same directory -where your old @code{mysqld} binary is. @xref{safe_mysqld, @code{safe_mysqld}}. +where your old @code{mysqld} binary is. @xref{safe_mysqld, , @code{safe_mysqld}}. The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld} feature. It just installs the @code{mysqld-max} executable and @@ -33358,7 +33366,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys -a -x ./bin/mysqld -------------------------------------------------------------------------- -@xref{safe_mysqld, @code{safe_mysqld}}. +@xref{safe_mysqld, , @code{safe_mysqld}}. @end example The above test should be successful, or you may encounter problems. @item @@ -33886,7 +33894,7 @@ server). The dump will contain SQL statements to create the table and/or populate the table. If you are doing a backup on the server, you should consider using -the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, @code{mysqlhotcopy}}. +the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, , @code{mysqlhotcopy}}. @example shell> mysqldump [OPTIONS] database [tables] @@ -38439,11 +38447,15 @@ user and use the @code{--user=user_name} option. @code{mysqld} will switch to run as the Unix user @code{user_name} before accepting any connections. @item -If you are using the @code{mysql.server} script to start @code{mysqld} when -the system is rebooted, you should edit @code{mysql.server} to use @code{su} -to run @code{mysqld} as user @code{user_name}, or to invoke @code{mysqld} -with the @code{--user} option. (No changes to @code{safe_mysqld} are -necessary.) +To start the server as the given user name automatically at system +startup time, add a @code{user} line that specifies the user name to +the @code{[mysqld]} group of the @file{/etc/my.cnf} option file or the +@file{my.cnf} option file in the server's data directory. For example: + +@example +[mysqld] +user=user_name +@end example @end enumerate At this point, your @code{mysqld} process should be running fine and dandy as @@ -39091,8 +39103,8 @@ If you want to make a SQL level backup of a table, you can use TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}. Another way to back up a database is to use the @code{mysqldump} program or -the @code{mysqlhotcopy script}. @xref{mysqldump, @code{mysqldump}}. -@xref{mysqlhotcopy, @code{mysqlhotcopy}}. +the @code{mysqlhotcopy script}. @xref{mysqldump, , @code{mysqldump}}. +@xref{mysqlhotcopy, , @code{mysqlhotcopy}}. @enumerate @item @@ -39184,7 +39196,8 @@ be an Internet service provider that wants to provide independent If you want to run multiple servers, the easiest way is to compile the servers with different TCP/IP ports and socket files so they are not -both listening to the same TCP/IP port or socket file. @xref{mysqld_multi}. +both listening to the same TCP/IP port or socket file. @xref{mysqld_multi, , +@code{mysqld_multi}}. Assume an existing server is configured for the default port number and socket file. Then configure the new server with a @code{configure} command @@ -41554,7 +41567,7 @@ query string.) If you want to know if the query should return a result set or not, you can use @code{mysql_field_count()} to check for this. -@xref{mysql_field_count, @code{mysql_field_count}}. +@xref{mysql_field_count, , @code{mysql_field_count}}. @subsubheading Return Values @@ -46342,7 +46355,7 @@ slave server restart. @item @code{SHOW KEYS} now shows whether or not key is @code{FULLTEXT}. @item -New script @file{mysqld_multi}. @xref{mysqld_multi}. +New script @file{mysqld_multi}. @xref{mysqld_multi, , @code{mysqld_multi}}. @item Added new script, @file{mysql-multi.server.sh}. Thanks to Tim Bunce @email{Tim.Bunce@@ig.co.uk} for modifying @file{mysql.server} to @@ -46395,7 +46408,7 @@ read by @code{mysql_options()}. Added new options @code{--pager[=...]}, @code{--no-pager}, @code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The new corresponding interactive commands are @code{pager}, @code{nopager}, -@code{tee} and @code{notee}. @xref{mysql, @code{mysql}}, @code{mysql --help} +@code{tee} and @code{notee}. @xref{mysql, , @code{mysql}}, @code{mysql --help} and the interactive help for more information. @item Fixed crash when automatic repair of @code{MyISAM} table failed. From b1e00c96809479e5de09aa97509883ea96274b57 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2001 04:05:33 +0300 Subject: [PATCH 04/33] Updated benchmarks and crash-me for postgreSQL 7.1.1 Fixed option for symlinks Docs/manual.texi: Updated symlink handling. sql-bench/Comments/postgres.benchmark: Updated benchmark text sql-bench/Makefile.am: Added graph-compare-results sql-bench/compare-results.sh: Don't reset the cmp option. sql-bench/crash-me.sh: Updated transaction testing. sql-bench/limits/mysql-3.23.cfg: Updated benchmark run sql-bench/limits/mysql.cfg: Updated benchmark run sql-bench/limits/pg.cfg: Updated benchmark run sql-bench/server-cfg.sh: Don't do vacuum too often. sql-bench/test-insert.sh: Don't do vacuum too often. sql/mysqld.cc: Changed skip-symlinks to skip-symlink --- Docs/manual.texi | 16 +- sql-bench/Comments/postgres.benchmark | 19 +- sql-bench/Makefile.am | 7 +- sql-bench/compare-results.sh | 6 +- sql-bench/crash-me.sh | 39 +- sql-bench/graph-compare-results.sh | 660 ++++++++++++++++++++++++++ sql-bench/limits/mysql-3.23.cfg | 8 +- sql-bench/limits/mysql.cfg | 8 +- sql-bench/limits/pg.cfg | 4 +- sql-bench/server-cfg.sh | 21 +- sql-bench/test-insert.sh | 8 - sql/mysqld.cc | 2 +- 12 files changed, 741 insertions(+), 57 deletions(-) create mode 100644 sql-bench/graph-compare-results.sh diff --git a/Docs/manual.texi b/Docs/manual.texi index 8f45392d3d4..e79c53e7c6e 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -10407,8 +10407,9 @@ recommended for systems where only local requests are allowed. @xref{DNS}. Don't use new, possible wrong routines. Implies @code{--skip-delay-key-write}. This will also set default table type to @code{ISAM}. @xref{ISAM}. -@item --skip-symlinks -Don't delete or rename files that symlinks in the data directory points to. +@item --skip-symlink +Don't delete or rename files that a symlinked file in the data directory +points to. @item --skip-safemalloc If @strong{MySQL} is configured with @code{--with-debug=full}, all programs @@ -12159,7 +12160,7 @@ Normally this is done with the @code{su} command. For more details, see @item Don't support symlinks to tables (This can be disabled with the -@code{--skip-symlinks} option. This is especially important if you run +@code{--skip-symlink} option. This is especially important if you run @code{mysqld} as root as anyone that has write access to the mysqld data directories could then delete any file in the system! @xref{Symbolic links to tables}. @@ -19157,7 +19158,7 @@ detect duplicated @code{UNIQUE} keys. By using @code{DATA DIRECTORY="directory"} or @code{INDEX DIRECTORY="directory"} you can specify where the table handler should put it's table and index files. This only works for @code{MyISAM} tables -in @code{MySQL} 4.0, when you are not using the @code{--skip-symlinks} +in @code{MySQL} 4.0, when you are not using the @code{--skip-symlink} option. @xref{Symbolic links to tables}. @end itemize @@ -31112,12 +31113,12 @@ If you use @code{ALTER TABLE RENAME} to move a table to another database, then the table will be moved to the other database directory and the old symlinks and the files they pointed to will be deleted. @item -If you are not using symlinks you should use the @code{--skip-symlinks} +If you are not using symlinks you should use the @code{--skip-symlink} option to @code{mysqld} to ensure that no one can drop or rename a file outside of the @code{mysqld} data directory. @end itemize -Things that are not yet fully supported: +Things that are not yet supported: @cindex TODO, symlinks @itemize @bullet @@ -35001,7 +35002,8 @@ This can be used to get faster inserts! Deactivated indexes can be reactivated by using @code{myisamchk -r}. keys. @item -l or --no-symlinks Do not follow symbolic links. Normally @code{myisamchk} repairs the -table a symlink points at. +table a symlink points at. This option doesn't exist in MySQL 4.0, +as MySQL 4.0 will not remove symlinks during repair. @item -r or --recover Can fix almost anything except unique keys that aren't unique (which is an extremely unlikely error with ISAM/MyISAM tables). diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark index cce9a8f05fe..4b417b8f97e 100644 --- a/sql-bench/Comments/postgres.benchmark +++ b/sql-bench/Comments/postgres.benchmark @@ -5,9 +5,16 @@ # Don't run the --fast test on a PostgreSQL 7.1.1 database on # which you have any critical data; During one of our test runs # PostgreSQL got a corrupted database and all data was destroyed! -# (When we tried to restart postmaster, It died with a +# When we tried to restart postmaster, It died with a # 'no such file or directory' error and never recovered from that! # +# Another time vacuum() filled our system disk with had 6G free +# while vaccuming a table of 60 M. +# +# We have sent a mail about this to the PostgreSQL mailing list, so +# the PostgreSQL developers should be aware of these problems and should +# hopefully fix this soon. +# # WARNING # The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory, @@ -73,8 +80,14 @@ make install run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql -# and a test where we do a vacuum() after each update. -# (The time for vacuum() is counted in the book-keeping() column) +# When running with --fast we run the following vacuum commands on +# the database between each major update of the tables: +# vacuum table +# or +# vacuum + +# The time for vacuum() is accounted for in the book-keeping() column, not +# in the test that updates the database. run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast diff --git a/sql-bench/Makefile.am b/sql-bench/Makefile.am index 85880dd1989..9de8da5c189 100644 --- a/sql-bench/Makefile.am +++ b/sql-bench/Makefile.am @@ -21,15 +21,16 @@ benchdir_root= $(prefix) benchdir = $(benchdir_root)/sql-bench bench_SCRIPTS = test-ATIS test-connect test-create test-insert \ test-big-tables test-select test-wisconsin \ - test-alter-table \ + test-alter-table graph-compare-results \ bench-init.pl compare-results run-all-tests \ - server-cfg crash-me copy-db + server-cfg crash-me copy-db \ CLEANFILES = $(bench_SCRIPTS) EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \ test-insert.sh test-big-tables.sh test-select.sh \ test-alter-table.sh test-wisconsin.sh \ bench-init.pl.sh compare-results.sh server-cfg.sh \ - run-all-tests.sh crash-me.sh copy-db.sh + run-all-tests.sh crash-me.sh copy-db.sh \ + graph-compare-results.sh EXTRA_DIST = $(EXTRA_SCRIPTS) dist-hook: diff --git a/sql-bench/compare-results.sh b/sql-bench/compare-results.sh index d8a358ed171..9e3a8f2add8 100644 --- a/sql-bench/compare-results.sh +++ b/sql-bench/compare-results.sh @@ -25,7 +25,7 @@ use Getopt::Long; $opt_server="mysql"; $opt_dir="output"; -$opt_machine=""; +$opt_machine=$opt_cmp=""; $opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=$opt_no_bars=$opt_verbose=0; GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count","no-bars","html","verbose") || usage(); @@ -53,10 +53,6 @@ if ($#ARGV == -1) @ARGV=glob($files); $automatic_files=1; } -else -{ - $opt_cmp=""; -} foreach (@ARGV) { diff --git a/sql-bench/crash-me.sh b/sql-bench/crash-me.sh index badbcc85288..f6985adc5c0 100644 --- a/sql-bench/crash-me.sh +++ b/sql-bench/crash-me.sh @@ -38,7 +38,7 @@ # as such, and clarify ones such as "mediumint" with comments such as # "3-byte int" or "same as xxx". -$version="1.56"; +$version="1.57"; use DBI; use Getopt::Long; @@ -1539,12 +1539,24 @@ report("insert INTO ... SELECT ...","insert_select", "insert into crash_q (a) SELECT crash_me.a from crash_me", "drop table crash_q $drop_attr"); -report_trans("transactions","transactions", - [create_table("crash_q",["a integer not null"],[]), - "insert into crash_q values (1)"], - "select * from crash_q", - "drop table crash_q $drop_attr" - ); +if (!defined($limits{"transactions"})) +{ + my ($limit,$type); + $limit="transactions"; + print "$limit: "; + foreach $type (('', 'type=bdb', 'type=innodb', 'type=gemini')) + { + undef($limits{$limit}); + last if (!report_trans($limit, + [create_table("crash_q",["a integer not null"],[], + $type), + "insert into crash_q values (1)"], + "select * from crash_q", + "drop table crash_q $drop_attr" + )); + } + print "$limits{$limit}\n"; +} report("atomic updates","atomic_updates", create_table("crash_q",["a integer not null"],["primary key (a)"]), @@ -2500,8 +2512,7 @@ sub report_result sub report_trans { - my ($prompt,$limit,$queries,$check,$clear)=@_; - print "$prompt: "; + my ($limit,$queries,$check,$clear)=@_; if (!defined($limits{$limit})) { eval {undef($dbh->{AutoCommit})}; @@ -2518,7 +2529,6 @@ sub report_trans safe_query($clear); } else { $dbh->{AutoCommit} = 1; - safe_query($clear); save_config_data($limit,"error",$prompt); } } else { @@ -2532,8 +2542,7 @@ sub report_trans } safe_query($clear); } - print "$limits{$limit}\n"; - return $limits{$limit} ne "no"; + return $limits{$limit} ne "yes"; } @@ -2961,9 +2970,11 @@ sub sql_concat sub create_table { - my($table_name,$fields,$index) = @_; + my($table_name,$fields,$index,$extra) = @_; my($query,$nr,$parts,@queries,@index); + $extra="" if (!defined($extra)); + $query="create table $table_name ("; $nr=0; foreach $field (@$fields) @@ -3015,7 +3026,7 @@ sub create_table } } chop($query); - $query.= ')'; + $query.= ") $extra"; unshift(@queries,$query); return @queries; } diff --git a/sql-bench/graph-compare-results.sh b/sql-bench/graph-compare-results.sh new file mode 100644 index 00000000000..395ad272262 --- /dev/null +++ b/sql-bench/graph-compare-results.sh @@ -0,0 +1,660 @@ +#### +#### Hello ... this is a heavily hacked script by Luuk +#### instead of printing the result it makes a nice gif +#### when you want to look at the code ... beware of the +#### ugliest code ever seen .... but it works ... +#### and that's sometimes the only thing you want ... isn't it ... +#### as the original script ... Hope you like it +#### +#### Greetz..... Luuk de Boer 1997. +#### + +## if you want the seconds behind the bar printed or not ... +## or only the one where the bar is too big for the graph ... +## look at line 535 of this program and below ... +## look in sub calculate for allmost all hard/soft settings :-) + +# a little program to generate a table of results +# just read all the RUN-*.log files and format them nicely +# Made by Luuk de Boer +# Patched by Monty + +use Getopt::Long; +use GD; + +$opt_server="mysql"; +$opt_cmp="mysql,pg,solid"; +$opt_cmp="msql,mysql,pg,solid"; +$opt_cmp="empress,mysql,pg,solid"; +$opt_dir="output"; +$opt_machine=""; +$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=0; + +GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count") || usage(); + +usage() if ($opt_help || $opt_Information); + +if ($opt_same_server) +{ + $files="$opt_dir/RUN-$opt_server-*$opt_machine"; +} +else +{ + $files="$opt_dir/RUN-*$opt_machine"; +} +$files.= "-cmp-$opt_cmp" if (length($opt_cmp)); + +$automatic_files=0; +if ($#ARGV == -1) +{ + @ARGV=glob($files); + $automatic_files=1; +} + + +# +# Go trough all RUN files and gather statistics. +# + +foreach (@ARGV) +{ + $filename = $_; + next if (defined($found{$_})); # remove duplicates + $found{$_}=1; + /RUN-(.*)$/; + $prog = $1; + push(@key_order,$prog); + $next = 0; + open(TMP, "<$filename") || die "Can't open $filename: $!\n"; + while () + { + chomp; + if ($next == 0) { + if (/Server version:\s+(\S+.*)/i) + { + $tot{$prog}{'server'} = $1; + } + elsif (/Arguments:\s+(.+)/i) + { + $tot{$prog}{'arguments'} = $1; + # Remove some standard, not informative arguments + $tot{$prog}{'arguments'} =~ s/--log|--use-old-results|--server=\S+|--cmp=\S+|--user=\S+|--pass=\S+|--machine=\S+//g; + $tot{$prog}{'arguments'} =~ s/\s+/ /g; + } + elsif (/Comments:\s+(.+)/i) { + $tot{$prog}{'comments'} = $1; + } elsif (/^(\S+):\s*(estimated\s|)total\stime:\s+(\d+)\s+secs/i) + { + $tmp = $1; $tmp =~ s/://; + $tot{$prog}{$tmp} = [ $3, (length($2) ? "+" : "")]; + $op1{$tmp} = $tmp; + } elsif (/Totals per operation:/i) { + $next = 1; + next; + } + } + elsif ($next == 1) + { + if (/^(\S+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s*([+|?])*/) + { + $tot1{$prog}{$1} = [$2,$6,$7]; + $op{$1} = $1; +#print "TEST - $_ \n * $prog - $1 - $2 - $6 - $7 ****\n"; +# $prog - filename +# $1 - operation +# $2 - time in secs +# $6 - number of loops +# $7 - nothing / + / ? / * => estimated time ... + # get the highest value .... + $highest = ($2/$6) if (($highest < ($2/$6)) && ($1 !~/TOTALS/i)); + $gifcount++; + $giftotal += ($2/$6); + } + } + } +} + +if (!%op) +{ + print "Didn't find any files matching: '$files'\n"; + print "Use the --cmp=server,server option to compare benchmarks\n"; + exit 1; +} + + +# everything is loaded ... +# now we have to create a fancy output :-) + +# I prefer to redirect scripts instead to force it to file ; Monty +# +# open(RES, ">$resultfile") || die "Can't write to $resultfile: $!\n"; +# select(RES) +# + +#print < +# +#EOF + +if ($opt_relative) +{ +# print "Column 1 is in seconds. All other columns are presented relative\n"; +# print "to this. 1.00 is the same, bigger numbers indicates slower\n\n"; +} + +#print "The result logs which where found and the options:\n"; + +if ($automatic_files) +{ + if ($key_order[$i] =~ /^$opt_server/) + { + if ($key_order[$i] =~ /^$opt_server/) + { + unshift(@key_order,$key_order[$i]); + splice(@key_order,$i+1,1); + } + } +} +# extra for mysql and mysql_pgcc +#$number1 = shift(@key_order); +#$number2 = shift(@key_order); +#unshift(@key_order,$number1); +#unshift(@key_order,$number2); + +# Print header + +$column_count=0; +foreach $key (@key_order) +{ + $column_count++; +# printf "%2d %-40.40s: %s %s\n", $column_count, $key, +# $tot{$key}{'server'}, $tot{$key}{'arguments'}; +# print "Comments: $tot{$key}{'comments'}\n" +# if ($tot{$key}{'comments'} =~ /\w+/); +} + +#print "\n"; + +$namewidth=$opt_skip_count ? 20 :25; +$colwidth= $opt_relative ? 9 : 6; + +print_sep("="); +#printf "%-$namewidth.${namewidth}s|", "Operation"; +$count = 1; +foreach $key (@key_order) +{ +# printf "%${colwidth}d|", $count; + $count++; +} +#print "\n"; +#print_sep("-"); +#print_string("Results per test:"); +#print_sep("-"); + +foreach $key (sort {$a cmp $b} keys %op1) +{ +# printf "%-$namewidth.${namewidth}s|", $key; + $first=undef(); + foreach $server (@key_order) + { + print_value($first,$tot{$server}{$key}->[0],$tot{$server}{$key}->[1]); + $first=$tot{$server}{$key}->[0] if (!defined($first)); + } +# print "\n"; +} + +print_sep("-"); +print_string("The results per operation:"); +print_sep("-"); +$luukcounter = 1; +foreach $key (sort {$a cmp $b} keys %op) +{ + next if ($key =~ /TOTALS/i); + $tmp=$key; + $tmp.= " (" . $tot1{$key_order[0]}{$key}->[1] . ")" if (!$skip_count); +# printf "%-$namewidth.${namewidth}s|", $tmp; + $first=undef(); + foreach $server (@key_order) + { + print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]); + $first=$tot1{$server}{$key}->[0] if (!defined($first)); + } +# print "\n"; + $luukcounter++; +} + +#print_sep("-"); +$key="TOTALS"; +#printf "%-$namewidth.${namewidth}s|", $key; +$first=undef(); +foreach $server (@key_order) +{ +# print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]); + $first=$tot1{$server}{$key}->[0] if (!defined($first)); +} +#print "\n"; +#print_sep("="); +&make_gif; + +exit 0; + +# +# some format functions; +# + +sub print_sep +{ + my ($sep)=@_; +# print $sep x ($namewidth + (($colwidth+1) * $column_count)+1),"\n"; +} + + +sub print_value +{ + my ($first,$value,$flags)=@_; + my ($tmp); + + if (defined($value)) + { + if (!defined($first) || !$opt_relative) + { + $tmp=sprintf("%d",$value); + } + else + { + $first=1 if (!$first); # Assume that it took one second instead of 0 + $tmp= sprintf("%.2f",$value/$first); + } + if (defined($flags)) + { + $tmp="+".$tmp if ($flags =~ /\+/); + $tmp="?".$tmp if ($flags =~ /\?/); + } + } + else + { + $tmp=""; + } + $tmp= " " x ($colwidth-length($tmp)) . $tmp if (length($tmp) < $colwidth); +# print $tmp . "|"; +} + + +sub print_string +{ + my ($str)=@_; + my ($width); + $width=$namewidth + ($colwidth+1)*$column_count; + + $str=substr($str,1,$width) if (length($str) > $width); +# print($str," " x ($width - length($str)),"|\n"); +} + +sub usage +{ + exit(0); +} + + + +########################################### +########################################### +########################################### +# making here a gif of the results ... (lets try it :-)) +# luuk .... 1997 +########################################### +## take care that $highest / $giftotal / $gifcount / $luukcounter +## are getting there value above ... so don't forget them while +## copying the code to some other program .... + +sub make_gif { + &gd; # some base things .... + &legend; # make the nice legend + &lines; # yep sometimes you have to print some lines + &gif("gif/benchmark2-".$opt_cmp); # and finally we can print all to a gif file ... +} +##### mmm we are finished now ... + + +# first we have to calculate some limits and some other stuff +sub calculate { +# here is the list which I have to know to make everything ..... +# the small border width ... $sm_border = +# the border default $border = +# the step default ... if it must be calculated then no value $step = +# the highest number $highest = +# the max length of the text of the x borders $max_len_lb= +# the max length of a legend entry $max_len_le= +# number of entries in the legend $num_legen = +# the length of the color blocks for the legend $legend_block= +# the width of the gif ...if it must be calculated - no value $width = +# the height of the gif .. if it must be calculated - no value $height = +# the width of the grey field ' ' ' ' $width_grey= +# the height of the grey field ' ' ' ' $height_grey= +# number of dashed lines $lines= +# if bars must overlap how much they must be overlapped $overlap= +# titlebar title of graph in two colors big $titlebar= +# titlebar1 sub title of graph in small font in black $titlebar1= +# xlabel $xlabel= +# ylabel $ylabel= +# the name of the gif ... $name= +# then the following things must be knows ..... +# xlabel below or on the left side ? +# legend yes/no? +# where must the legend be placed? +# must the xlabel be printed horizontal or vertical? +# must the ylabel be printed horizontal or vertical? +# must the graph be a line or a bar graph? +# is a xlabel several different entries or some sub entries of one? +# so xlabel 1 => test1=10, test2=15, test3=7 etc +# or xlabel 1 => test1a=12, test1b=10, test1c=7 etc +# must the bars overlap (only with the second example I think) +# must the number be printed above or next to the bar? +# when must the number be printed .... only when it extends the graph ...??? +# the space between the bars .... are that the same width of the bars ... +# or is it a separate space ... defined ??? +# must the date printed below or some where else .... + +#calculate all space for text and other things .... + $sm_border = 8; # the grey border around ... + $border = 40; #default ... + $left_border = 2.75 * $border; #default ... + $right_border = $border; #default ... + $up_border = $border; #default ... + $down_border = $border; # default ... + $step = ($height - $up_border - $down_border)/ ($luukcounter + (($#key_order + 1) * $luukcounter)); + # can set $step to get nice graphs ... and change the format ... + $step = 8; # set hard the step value + + $gifavg = ($giftotal/$gifcount); + $highest = 2 * $gifavg; + $highest = 1; # set hard the highest value ... + $xhigh = int($highest + .5 * $highest); + + # here to get the max lenght of the test entries ... + # so we can calculate the with of the left border + foreach $oper (sort keys (%op)) { + $max_len_lb = length($oper) if (length($oper) > $max_len_lb); +# print "oper = $oper - $max_len_lb\n"; + } + $max_len_lb = $max_len_lb * gdSmallFont->width; + $left_border = (3*$sm_border) + $max_len_lb; + $down_border = (4*$sm_border) + (gdSmallFont->width*(length($xhigh)+3)) + (gdSmallFont->height *2); + $right_border = (3*$sm_border) + 3 + (gdSmallFont->width*(length($highest)+5)); + + # calculate the space for the legend ..... + foreach $key (@key_order) { + $tmp = $key; + $tmp =~ s/-cmp-$opt_cmp//i; + $giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'}; + $max_len_le = length($giflegend) if (length($giflegend) > $max_len_le); + } + $max_len_le = $max_len_le * gdSmallFont->width; + $legend_block = 10; # the length of the block in the legend + $max_high_le = (($#key_order + 1)*(gdSmallFont->height+2)) + (2*$legend_block); + $down_border += $max_high_le; + $up_border = (5 * $sm_border) + gdSmallFont->height + gdLargeFont->height; + + print "Here some things we already know ....\n"; +# print "luukcounter = $luukcounter (number of tests)\n"; +# print "gifcount = $gifcount (number of total entries)\n"; +# print "giftotal = $giftotal (total secs)\n"; +# print "gifavg = $gifavg\n"; +# print "highest = $highest\n"; +# print "xhigh = $xhigh\n"; +# print "step = $step -- $#key_order\n"; +# print "max_len_lb = $max_len_lb\n"; +# printf "Small- width %d - height %s\n",gdSmallFont->width,gdSmallFont->height; +# printf "Tiny- width %d - height %s\n",gdTinyFont->width,gdTinyFont->height; +} + +sub gd { + &calculate; + $width = 600; # the width .... + $height = 500; # the height ... + $width_greyfield = 430; + # when $step is set ... count the height ....???? + $width = $width_greyfield + $left_border + $right_border; + $height = ($step * ($luukcounter + ($luukcounter * ($#key_order + 1)))) + $down_border + $up_border; + $b_width = $width - ($left_border + $right_border); # width within the grey field + $overlap = 0; # how far each colum can fall over each other ...nice :-) + + # make the gif image .... + $im = new GD::Image($width,$height); + + # allocate the colors to use ... + $white = $im->colorAllocate(255,255,255); + $black = $im->colorAllocate(0,0,0); + $paper_white = $im->colorAllocate(220, 220, 220); + $grey1 = $im->colorAllocate(240, 240, 240); + $grey4 = $im->colorAllocate(229, 229, 229); + $grey2 = $im->colorAllocate(102, 102, 102); + $grey3 = $im->colorAllocate(153, 153, 153); + + $red = $im->colorAllocate(205,0,0); # msql + $lred = $im->colorAllocate(255,0,0); + $blue = $im->colorAllocate(0,0,205); # mysql + $lblue = $im->colorAllocate(0,0,255); # mysql_pgcc + $green = $im->colorAllocate(0, 205, 0); # postgres + $lgreen = $im->colorAllocate(0, 255, 0); # pg_fast + $orange = $im->colorAllocate(205,133, 0); # solid + $lorange = $im->colorAllocate(255, 165, 0); # Adabas + $yellow = $im->colorAllocate(205,205,0); # empress + $lyellow = $im->colorAllocate(255,255,0); + $magenta = $im->colorAllocate(255,0,255); # oracle + $lmagenta = $im->colorAllocate(255,200,255); + $cyan = $im->colorAllocate(0,205,205); # sybase + $lcyan = $im->colorAllocate(0,255,255); + $sienna = $im->colorAllocate(139,71,38); # db2 + $lsienna = $im->colorAllocate(160,82,45); + $coral = $im->colorAllocate(205,91,69); # Informix + $lcoral = $im->colorAllocate(255,114,86); + $peach = $im->colorAllocate(205,175,149); + $lpeach = $im->colorAllocate(255,218,185); + + @colors = ($red, $blue, $green, $orange, $yellow, $magenta, $cyan, $sienna, $coral, $peach); + @lcolors = ($lred, $lblue, $lgreen, $lorange, $lyellow, $lmagenta, $lcyan, $lsienna, $lcoral, $lpeach); + + # set a color per server so in every result it has the same color .... + foreach $key (@key_order) { + if ($tot{$key}{'server'} =~ /mysql/i) { + if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i) { + $tot{$key}{'color'} = $lblue; + } else { + $tot{$key}{'color'} = $blue; + } + } elsif ($tot{$key}{'server'} =~ /msql/i) { + $tot{$key}{'color'} = $lred; + } elsif ($tot{$key}{'server'} =~ /postgres/i) { + if ($key =~ /pg_fast/i) { + $tot{$key}{'color'} = $lgreen; + } else { + $tot{$key}{'color'} = $green; + } + } elsif ($tot{$key}{'server'} =~ /solid/i) { + $tot{$key}{'color'} = $lorange; + } elsif ($tot{$key}{'server'} =~ /empress/i) { + $tot{$key}{'color'} = $lyellow; + } elsif ($tot{$key}{'server'} =~ /oracle/i) { + $tot{$key}{'color'} = $magenta; + } elsif ($tot{$key}{'server'} =~ /sybase/i) { + $tot{$key}{'color'} = $cyan; + } elsif ($tot{$key}{'server'} =~ /db2/i) { + $tot{$key}{'color'} = $sienna; + } elsif ($tot{$key}{'server'} =~ /informix/i) { + $tot{$key}{'color'} = $coral; + } elsif ($tot{$key}{'server'} =~ /microsoft/i) { + $tot{$key}{'color'} = $peach; + } elsif ($tot{$key}{'server'} =~ /access/i) { + $tot{$key}{'color'} = $lpeach; + } elsif ($tot{$key}{'server'} =~ /adabas/i) { + $tot{$key}{'color'} = $lorange; + } + } + + # make the nice little borders + # left bar + $poly0 = new GD::Polygon; + $poly0->addPt(0,0); + $poly0->addPt($sm_border,$sm_border); + $poly0->addPt($sm_border,($height - $sm_border)); + $poly0->addPt(0,$height); + $im->filledPolygon($poly0,$grey1); + $im->polygon($poly0, $grey4); + # upper bar + $poly3 = new GD::Polygon; + $poly3->addPt(0,0); + $poly3->addPt($sm_border,$sm_border); + $poly3->addPt(($width - $sm_border),$sm_border); + $poly3->addPt($width,0); + $im->polygon($poly3, $grey4); + $tmptime = localtime(time); + $im->string(gdSmallFont,($width - $sm_border - (gdSmallFont->width * length($tmptime))),($height - ($sm_border) - gdSmallFont->height), $tmptime, $grey3); + + # right bar + $poly1 = new GD::Polygon; + $poly1->addPt($width,0); + $poly1->addPt(($width - $sm_border),$sm_border); + $poly1->addPt(($width - $sm_border),($height - $sm_border)); + $poly1->addPt($width,$height); + $im->filledPolygon($poly1, $grey3); + $im->stringUp(gdSmallFont,($width - 10),($height - (2 * $sm_border)), "Made by Luuk de Boer - 1997 (c)", $blue); + #below bar + $poly2 = new GD::Polygon; + $poly2->addPt(0,$height); + $poly2->addPt($sm_border,($height - $sm_border)); + $poly2->addPt(($width - $sm_border),($height - $sm_border)); + $poly2->addPt($width,$height); + $im->filledPolygon($poly2, $grey2); + + # do the black line around where in you will print ... (must be done at last + # but is hard to develop with ... but the filled grey must be done first :-) + $im->filledRectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$grey4); + + + # print the nice title ... + $titlebar = "MySQL Benchmark results"; # head title ... + $titlebar1 = "Compare $opt_cmp "; # sub title + $header2 = "seconds/test"; # header value + $center = ($width / 2) - ((gdLargeFont->width * length($titlebar)) / 2); + $center1 = ($width / 2) - ((gdSmallFont->width * length($titlebar1)) / 2); + $center2 = ($width_greyfield/2) - ((gdSmallFont->width*length($header2))/2); + $bovenkant = $sm_border * 3; + $bovenkant1 = $bovenkant + gdLargeFont->height + (.5*$sm_border); + $bovenkant2 = $height - $down_border + (1*$sm_border) + (gdSmallFont->width*(length($xhigh)+3)); + $im->string(gdLargeFont,($center),($bovenkant + 1), $titlebar, $grey3); + $im->string(gdLargeFont,($center),($bovenkant), $titlebar, $red); + $im->string(gdSmallFont,($center1),($bovenkant1), $titlebar1, $black); + $im->string(gdSmallFont,($left_border + $center2),($bovenkant2), $header2, $black); + + $xlength = $width - $left_border - $right_border; + $lines = 10; # hard coded number of dashed lines + $xverh = $xlength / $xhigh; +# print " de verhouding ===> $xverh --- $xlength -- $xhigh \n"; + + $xstep = ($xhigh / $lines) * $xverh; + $teller = 0; + # make the nice dashed lines and print the values ... + for ($i = 0; $i <= $lines; $i++) { + $st2 = ($left_border) + ($i * $xstep); + $im->dashedLine($st2,($height-$down_border),$st2,($up_border), $grey3); + if (($i != 0) && ($teller == 2)) { + $st3 = sprintf("%.2f", $i*($xhigh/$lines)); + $im->stringUp(gdTinyFont,($st2 - (gdSmallFont->height/2)),($height - $down_border +(.5*$sm_border) + (gdSmallFont->width*(length($xhigh)+3))), $st3, $black); + $teller = 0; + } + $teller++; + } + $im->rectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$black); +} + +sub legend { + # make the legend ... + $legxbegin = $left_border; + + $legybegin = $height - $down_border + (2*$sm_border) + (gdSmallFont->width * (length($xhigh) + 3)) + gdSmallFont->height; + $legxend = $legxbegin + $max_len_le + (4*$legend_block); + $legxend = $legxbegin + $width_greyfield; + $legyend = $legybegin + $max_high_le; + $im->filledRectangle($legxbegin,$legybegin,$legxend,$legyend,$grey4); + $im->rectangle($legxbegin,$legybegin,$legxend,$legyend,$black); + # calculate the space for the legend ..... + $c = 0; $i = 1; + $legybegin += $legend_block; + foreach $key (@key_order) { + $xtmp = $legxbegin + $legend_block; + $ytmp = $legybegin + ($c * (gdSmallFont->height +2)); + $xtmp1 = $xtmp + $legend_block; + $ytmp1 = $ytmp + gdSmallFont->height; + $im->filledRectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$tot{$key}{'color'}); + $im->rectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$black); + $tmp = $key; + $tmp =~ s/-cmp-$opt_cmp//i; + $giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'}; + $xtmp2 = $xtmp1 + $legend_block; + $im->string(gdSmallFont,$xtmp2,$ytmp,"$giflegend",$black); + $c++; + $i++; +# print "$c $i -> $giflegend\n"; + } + +} + +sub lines { + + $g = 0; + $i = 0; + $ybegin = $up_border + ((($#key_order + 2)/2)*$step); + $xbegin = $left_border; + foreach $key (sort {$a cmp $b} keys %op) { + next if ($key =~ /TOTALS/i); + $c = 0; +# print "key - $key\n"; + foreach $server (@key_order) { + $tot1{$server}{$key}->[1] = 1 if ($tot1{$server}{$key}->[1] == 0); + $entry = $tot1{$server}{$key}->[0]/$tot1{$server}{$key}->[1]; + $ytmp = $ybegin + ($i * $step) ; + $xtmp = $xbegin + ($entry * $xverh) ; + $ytmp1 = $ytmp + $step; +# print "$server -- $entry --x $xtmp -- y $ytmp - $c\n"; + $entry1 = sprintf("%.2f", $entry); + if ($entry < $xhigh) { + $im->filledRectangle($xbegin, $ytmp, $xtmp, $ytmp1, $tot{$server}{'color'}); + $im->rectangle($xbegin, $ytmp, $xtmp, $ytmp1, $black); +# print the seconds behind the bar (look below for another entry) +# this entry is for the bars that are not greater then the max width +# of the grey field ... +# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$black)); +# if you want the seconds in the color of the bar just uncomment it (below) +# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$tot{$server}{'color'})); + } else { + $im->filledRectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $tot{$server}{'color'}); + $im->rectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $black); + +# print the seconds behind the bar (look below for another entry) +# here is the seconds printed behind the bar is the bar is too big for +# the graph ... (seconds is greater then xhigh ...) + $im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$black); +# if you want the seconds in the color of the bar just uncomment it (below) +# $im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$colors[$c]); + } + $c++; + $i++; + } + # see if we can center the text between the bars ... + $ytmp2 = $ytmp1 - (((($c)*$step) + gdSmallFont->height)/2); + $im->string(gdSmallFont,($sm_border*2),$ytmp2,$key, $black); + $i++; + } +} + + +sub gif { + my ($name) = @_; + $name_gif = $name . ".gif"; + print "name --> $name_gif\n"; + open (GIF, "> $name_gif") || die "Can't open $name_gif: $!\n"; + print GIF $im->gif; + close (GIF); +} + diff --git a/sql-bench/limits/mysql-3.23.cfg b/sql-bench/limits/mysql-3.23.cfg index 19bb3c67cc1..a496bd7bf4c 100644 --- a/sql-bench/limits/mysql-3.23.cfg +++ b/sql-bench/limits/mysql-3.23.cfg @@ -1,4 +1,4 @@ -#This file is automaticly generated by crash-me 1.54 +#This file is automaticly generated by crash-me 1.57 NEG=yes # update of column= -column Need_cast_for_null=no # Need to cast NULL for arithmetic @@ -36,7 +36,7 @@ constraint_check=no # Column constraints constraint_check_table=no # Table constraints constraint_null=yes # NULL constraint (SyBase style) crash_me_safe=yes # crash me safe -crash_me_version=1.54 # crash me version +crash_me_version=1.57 # crash me version create_default=yes # default value for column create_default_func=no # default value function for column create_if_not_exists=yes # create table if not exists @@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,# select_string_size=1048565 # constant string size in SELECT select_table_update=no # Update with sub select select_without_from=yes # SELECT without FROM -server_version=MySQL 3.23.29 gamma # server version +server_version=MySQL 3.23.39 debug # server version simple_joins=yes # ANSI SQL simple joins storage_of_float=round # Storage of float values subqueries=no # subqueries @@ -402,7 +402,7 @@ table_alias=yes # Table alias table_name_case=no # case independent table names table_wildcard=yes # Select table_name.* temporary_table=yes # temporary tables -transactions=no # transactions +transactions=yes # constant string size in where truncate_table=yes # truncate type_extra_abstime=no # Type abstime type_extra_bfile=no # Type bfile diff --git a/sql-bench/limits/mysql.cfg b/sql-bench/limits/mysql.cfg index 19bb3c67cc1..a496bd7bf4c 100644 --- a/sql-bench/limits/mysql.cfg +++ b/sql-bench/limits/mysql.cfg @@ -1,4 +1,4 @@ -#This file is automaticly generated by crash-me 1.54 +#This file is automaticly generated by crash-me 1.57 NEG=yes # update of column= -column Need_cast_for_null=no # Need to cast NULL for arithmetic @@ -36,7 +36,7 @@ constraint_check=no # Column constraints constraint_check_table=no # Table constraints constraint_null=yes # NULL constraint (SyBase style) crash_me_safe=yes # crash me safe -crash_me_version=1.54 # crash me version +crash_me_version=1.57 # crash me version create_default=yes # default value for column create_default_func=no # default value function for column create_if_not_exists=yes # create table if not exists @@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,# select_string_size=1048565 # constant string size in SELECT select_table_update=no # Update with sub select select_without_from=yes # SELECT without FROM -server_version=MySQL 3.23.29 gamma # server version +server_version=MySQL 3.23.39 debug # server version simple_joins=yes # ANSI SQL simple joins storage_of_float=round # Storage of float values subqueries=no # subqueries @@ -402,7 +402,7 @@ table_alias=yes # Table alias table_name_case=no # case independent table names table_wildcard=yes # Select table_name.* temporary_table=yes # temporary tables -transactions=no # transactions +transactions=yes # constant string size in where truncate_table=yes # truncate type_extra_abstime=no # Type abstime type_extra_bfile=no # Type bfile diff --git a/sql-bench/limits/pg.cfg b/sql-bench/limits/pg.cfg index ed1c2eaa63f..9cb42f86b8a 100644 --- a/sql-bench/limits/pg.cfg +++ b/sql-bench/limits/pg.cfg @@ -1,4 +1,4 @@ -#This file is automaticly generated by crash-me 1.56 +#This file is automaticly generated by crash-me 1.57 NEG=yes # update of column= -column Need_cast_for_null=no # Need to cast NULL for arithmetic @@ -36,7 +36,7 @@ constraint_check=yes # Column constraints constraint_check_table=yes # Table constraints constraint_null=yes # NULL constraint (SyBase style) crash_me_safe=yes # crash me safe -crash_me_version=1.56 # crash me version +crash_me_version=1.57 # crash me version create_default=yes # default value for column create_default_func=yes # default value function for column create_if_not_exists=no # create table if not exists diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index 1983b2ce01b..86b891d8856 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -799,18 +799,27 @@ sub reconnect_on_errors sub vacuum { - my ($self,$full_vacuum,$dbh_ref)=@_; - my ($loop_time,$end_time,$dbh); + my ($self,$full_vacuum,$dbh_ref,@tables)=@_; + my ($loop_time,$end_time,$dbh,$table); if (defined($full_vacuum)) { $$dbh_ref->disconnect; $$dbh_ref= $self->connect(); } $dbh=$$dbh_ref; $loop_time=new Benchmark; - $dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; - $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; - $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; - $dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; + if ($#tables >= 0) + { + foreach $table (@tables) + { + $dbh->do("vacuum $table") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; + } + } + else + { +# $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; +# $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; + $dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; + } $end_time=new Benchmark; print "Time for book-keeping (1): " . Benchmark::timestr(Benchmark::timediff($end_time, $loop_time),"all") . "\n\n"; diff --git a/sql-bench/test-insert.sh b/sql-bench/test-insert.sh index 82ffcd83487..b8f20b123a5 100644 --- a/sql-bench/test-insert.sh +++ b/sql-bench/test-insert.sh @@ -250,10 +250,6 @@ if ($limits->{'unique_index'}) timestr(timediff($end_time, $loop_time),"all") . "\n\n"; } -#if ($opt_fast && defined($server->{vacuum})) -#{ -# $server->vacuum(1,\$dbh); -#} #### #### Do some selects on the table @@ -1410,10 +1406,6 @@ if ($limits->{'insert_multi_value'}) print "Time for multiple_value_insert (" . ($opt_loop_count) . "): " . timestr(timediff($end_time, $loop_time),"all") . "\n\n"; - if ($opt_fast && defined($server->{vacuum})) - { - $server->vacuum(1,\$dbh); - } if ($opt_lock_tables) { $sth = $dbh->do("UNLOCK TABLES ") || die $DBI::errstr; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 8de9da444bd..e72440ba4b1 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2587,7 +2587,7 @@ static struct option long_options[] = { {"skip-show-database", no_argument, 0, (int) OPT_SKIP_SHOW_DB}, {"skip-slave-start", no_argument, 0, (int) OPT_SKIP_SLAVE_START}, {"skip-stack-trace", no_argument, 0, (int) OPT_SKIP_STACK_TRACE}, - {"skip-symlinks", no_argument, 0, (int) OPT_SKIP_SYMLINKS}, + {"skip-symlink", no_argument, 0, (int) OPT_SKIP_SYMLINKS}, {"skip-thread-priority", no_argument, 0, (int) OPT_SKIP_PRIOR}, {"sql-bin-update-same", no_argument, 0, (int) OPT_SQL_BIN_UPDATE_SAME}, #include "sslopt-longopts.h" From ccf31559fc6c62900cb1ee55061aa261c0702197 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2001 20:20:34 -0500 Subject: [PATCH 05/33] manual.texi fix more out-of-date mysql.server commentary, plus manual.texi small misc changes Docs/manual.texi: fix more out-of-date mysql.server commentary, plus small misc changes --- Docs/manual.texi | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index a85a24848bb..39793597956 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -10128,6 +10128,10 @@ options. @xref{InnoDB start}. @cindex stopping, the server @cindex server, starting and stopping +The @code{mysql.server} and @code{safe_mysqld} scripts can be used to start +the server automatically at system startup time. @code{mysql.server} can also +be used to stop the server. + The @code{mysql.server} script can be used to start or stop the server by invoking it with @code{start} or @code{stop} arguments: @@ -10145,9 +10149,8 @@ the @strong{MySQL} installation directory, then invokes @code{safe_mysqld}. You might need to edit @code{mysql.server} if you have a binary distribution that you've installed in a non-standard location. Modify it to @code{cd} into the proper directory before it runs @code{safe_mysqld}. If you want the -server to run as some specific user, you can change the -@code{mysql_daemon_user=root} line to use another user. You can also modify -@code{mysql.server} to pass other options to @code{safe_mysqld}. +server to run as some specific user, add an appropriate @code{user} line +to the @file{/etc/my.cnf} file, as shown later in this section. @code{mysql.server stop} brings down the server by sending a signal to it. You can take down the server manually by executing @code{mysqladmin shutdown}. @@ -10655,7 +10658,7 @@ The above is the quick and dirty way that one commonly uses for testing. The nice thing with this is that all connections you do in the above shell will automatically be directed to the new running server! -If you need to do this more permanently, you should create an own option +If you need to do this more permanently, you should create an option file for each server. @xref{Option files}. In your startup script that is executed at boot time (mysql.server?) you should specify for both servers: @@ -45753,19 +45756,19 @@ not yet 100% confident in this code. @appendixsubsec Changes in release 3.23.39 @itemize @bullet @item -Fixed problem that client 'hang' when @code{LOAD TABLE FROM MASTER} failed. +Fixed problem that client 'hung' when @code{LOAD TABLE FROM MASTER} failed. @item -Running @code{myisamchk --fast --force} will not anymore repair tables +Running @code{myisamchk --fast --force} will no longer repair tables that only had the open count wrong. @item Added functions to handle symbolic links to make life easier in 4.0. @item We are now using the @code{-lcma} thread library on HPUX 10.20 to -get @strong{MySQL} more stabile on HPUX. +get @strong{MySQL} more stable on HPUX. @item Fixed problem with @code{IF()} and number of decimals in the result. @item -Fixed that date-part extract functions works with dates where day +Fixed date-part extraction functions to work with dates where day and/or month is 0. @item Extended argument length in option files from 256 to 512 chars. @@ -45773,7 +45776,7 @@ Extended argument length in option files from 256 to 512 chars. Fixed problem with shutdown when @code{INSERT DELAYED} was waiting for a @code{LOCK TABLE}. @item -Fixed coredump bug buged in InnoDB when tablespace was full. +Fixed coredump bug in InnoDB when tablespace was full. @item Fixed problem with @code{MERGE} tables and big tables (> 4G) when using @code{ORDER BY}. From 85929a164d5ecef0587f2299855da275e82f9f36 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2001 20:33:10 -0500 Subject: [PATCH 06/33] manual.texi 1) manual says mysql.server uses "user"; it doesn't. manual.texi 2) manual says mysql.server uses "bindir"; it doesn't. Docs/manual.texi: 1) manual says mysql.server uses "user"; it doesn't. 2) manual says mysql.server uses "bindir"; it doesn't. --- Docs/manual.texi | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index 7ebbcc6275b..c07d8cf2b40 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -10176,23 +10176,23 @@ this: datadir=/usr/local/mysql/var socket=/tmp/mysqld.sock port=3306 +user=mysql [mysql.server] -user=mysql basedir=/usr/local/mysql @end example -The @code{mysql.server} script uses the following variables: -@code{user}, @code{datadir}, @code{basedir}, @code{bindir}, and -@code{pid-file}. +The @code{mysql.server} script understands the following options: +@code{datadir}, @code{basedir}, and @code{pid-file}. -The following table shows which option sections each of the startup script -uses: +The following table shows which option groups each of the startup scripts +read from option files: @multitable @columnfractions .20 .80 +@item @strong{Script} @tab @strong{Option groups} @item @code{mysqld} @tab @code{mysqld} and @code{server} -@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld} and @code{server} -@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld} and @code{server} +@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld}, and @code{server} +@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld}, and @code{server} @end multitable @xref{Option files}. From 19e54153855391f3843ca0201687b3ff81630ab1 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2001 16:27:59 +0300 Subject: [PATCH 07/33] Updated benchmark results. BitKeeper/deleted/.del-ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~f761da5546f0d362: Delete: sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~79ac0482599eace1: Delete: sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~dfc480becae45236: Delete: sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~a2a77f37b689cd63: Delete: sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~7dd5ac726f86cf0b: Delete: sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~441a6aefd381e319: Delete: sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~d12a7edef05d7185: Delete: sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~ddcf36cdf3f72e8c: Delete: sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~2db07249379f36: Delete: sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg sql-bench/Comments/postgres.benchmark: b --- sql-bench/Comments/postgres.benchmark | 4 +- ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 19 ---- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 20 ++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 75 ------------- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 101 ++++++++++++++++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 --- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 14 +++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 19 ---- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 19 ++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 30 ------ ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 30 ++++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 18 ---- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 18 ++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 58 ---------- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 85 +++++++++++++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 23 ---- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 30 ++++++ ...ysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 --- ...sql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 14 +++ 19 files changed, 333 insertions(+), 272 deletions(-) delete mode 100644 sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/ATIS-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/RUN-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/alter-table-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/big-tables-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/connect-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/create-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/insert-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/select-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/wisconsin-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark index 4b417b8f97e..6fadf20755e 100644 --- a/sql-bench/Comments/postgres.benchmark +++ b/sql-bench/Comments/postgres.benchmark @@ -1,4 +1,4 @@ -# This file describes how to run MySQL benchmarks with PostgreSQL +# This file describes how to run MySQL benchmark suite with PostgreSQL # # WARNING: # @@ -18,7 +18,7 @@ # WARNING # The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory, -# 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP +# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.0 compiled with SMP # support # Both the perl client and the database server is run # on the same machine. No other cpu intensive process was used during diff --git a/sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 74ba392c93b..00000000000 --- a/sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:46:54 - -ATIS table test - -Creating tables -Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (9768): 2 wallclock secs ( 0.49 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data -Time for select_simple_join (500): 2 wallclock secs ( 0.63 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_join (200): 15 wallclock secs ( 4.21 usr 2.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_distinct (800): 12 wallclock secs ( 1.70 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (2600): 12 wallclock secs ( 1.43 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Removing tables -Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/ATIS-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..84ff70751d3 --- /dev/null +++ b/sql-bench/Results/ATIS-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,20 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 19:26:17 + +ATIS table test + +Creating tables +Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Inserting data +Time to insert (9768): 3 wallclock secs ( 0.45 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.89 CPU) + +Retrieving data +Time for select_simple_join (500): 3 wallclock secs ( 0.68 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.87 CPU) +Time for select_join (100): 3 wallclock secs ( 0.51 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.71 CPU) +Time for select_key_prefix_join (100): 13 wallclock secs ( 4.08 usr 2.01 sys + 0.00 cusr 0.00 csys = 6.09 CPU) +Time for select_distinct (800): 15 wallclock secs ( 1.75 usr 0.69 sys + 0.00 cusr 0.00 csys = 2.44 CPU) +Time for select_group (2600): 20 wallclock secs ( 1.57 usr 0.41 sys + 0.00 cusr 0.00 csys = 1.98 CPU) + +Removing tables +Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU) diff --git a/sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index b3f8ad6f63f..00000000000 --- a/sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,75 +0,0 @@ -Benchmark DBD suite: 2.9 -Date of test: 2000-08-17 19:09:48 -Running tests on: Linux 2.2.14-my-SMP i686 -Arguments: -Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M -Limits from: mysql,pg -Server version: MySQL 3.23.22 beta - -ATIS: Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -alter-table: Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -big-tables: Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -connect: Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -create: Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -insert: Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -select: Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -wisconsin: Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -All 8 test executed successfully - -Totals per operation: -Operation seconds usr sys cpu tests -alter_table_add 252.00 0.20 0.02 0.00 992 -connect 10.00 6.60 1.51 0.00 10000 -connect+select_1_row 13.00 7.08 2.47 0.00 10000 -connect+select_simple 13.00 7.36 2.24 0.00 10000 -count 46.00 0.07 0.00 0.00 100 -count_distinct 124.00 0.65 0.16 0.00 1000 -count_distinct_big 623.00 69.07 56.00 0.00 1020 -count_distinct_group 77.00 0.94 0.33 0.00 1000 -count_distinct_group_on_key 64.00 0.37 0.07 0.00 1000 -count_distinct_group_on_key_parts 77.00 0.93 0.45 0.00 1000 -count_group_on_key_parts 61.00 1.09 0.27 0.00 1000 -count_on_key 574.00 16.11 3.17 0.00 50100 -create+drop 26.00 2.10 0.81 0.00 10000 -create_MANY_tables 32.00 1.97 0.49 0.00 10000 -create_index 4.00 0.00 0.00 0.00 8 -create_key+drop 40.00 3.64 0.72 0.00 10000 -create_table 0.00 0.00 0.00 0.00 31 -delete_big 21.00 0.00 0.00 0.00 13 -delete_big_many_keys 120.00 0.00 0.00 0.00 2 -delete_key 4.00 0.50 0.47 0.00 10000 -drop_index 4.00 0.00 0.00 0.00 8 -drop_table 0.00 0.00 0.00 0.00 28 -drop_table_when_MANY_tables 9.00 0.44 0.49 0.00 10000 -insert 130.00 20.73 12.97 0.00 350768 -insert_duplicates 113.00 18.31 11.27 0.00 300000 -insert_key 159.00 8.91 4.08 0.00 100000 -insert_many_fields 8.00 0.29 0.08 0.00 2000 -min_max 31.00 0.03 0.00 0.00 60 -min_max_on_key 213.00 25.00 4.86 0.00 85000 -order_by 47.00 19.72 16.45 0.00 10 -order_by_key 31.00 19.75 10.54 0.00 10 -select_1_row 3.00 0.74 0.62 0.00 10000 -select_2_rows 3.00 0.45 0.58 0.00 10000 -select_big 37.00 23.09 11.64 0.00 10080 -select_column+column 3.00 0.52 0.59 0.00 10000 -select_diff_key 210.00 0.28 0.07 0.00 500 -select_distinct 12.00 1.70 0.68 0.00 800 -select_group 70.00 1.49 0.40 0.00 2711 -select_group_when_MANY_tables 14.00 0.68 0.63 0.00 10000 -select_join 15.00 4.21 2.20 0.00 200 -select_key 129.00 66.05 14.03 0.00 200000 -select_key_prefix 130.00 67.36 13.74 0.00 200000 -select_many_fields 22.00 7.89 6.66 0.00 2000 -select_range 21.00 7.00 1.72 0.00 25420 -select_range_prefix 18.00 6.07 1.50 0.00 25010 -select_simple 2.00 0.52 0.49 0.00 10000 -select_simple_join 2.00 0.63 0.32 0.00 500 -update_big 65.00 0.01 0.00 0.00 500 -update_of_key 25.00 2.51 2.23 0.00 500 -update_of_key_big 33.00 0.06 0.00 0.00 501 -update_of_primary_key_many_keys 67.00 0.00 0.01 0.00 256 -update_with_key 109.00 13.71 11.48 0.00 100000 -wisc_benchmark 4.00 1.75 0.68 0.00 114 -TOTALS 3920.00 438.58 200.19 0.00 1594242 diff --git a/sql-bench/Results/RUN-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..b7112793495 --- /dev/null +++ b/sql-bench/Results/RUN-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,101 @@ +Benchmark DBD suite: 2.12 +Date of test: 2001-06-05 19:27:31 +Running tests on: Linux 2.4.0-64GB-SMP i686 +Arguments: +Comments: Intel Xeon, 2x550 Mhz, 512M, key_buffer=16M +Limits from: mysql,pg +Server version: MySQL 3.23.39 + +ATIS: Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU) +alter-table: Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU) +big-tables: Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU) +connect: Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU) +create: Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU) +insert: Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU) +select: Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU) +wisconsin: Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU) + +All 8 test executed successfully + +Totals per operation: +Operation seconds usr sys cpu tests +alter_table_add 261.00 0.13 0.02 0.15 992 +connect 16.00 6.84 2.50 9.34 10000 +connect+select_1_row 15.00 7.11 3.70 10.81 10000 +connect+select_simple 13.00 6.70 3.21 9.91 10000 +count 45.00 0.01 0.00 0.01 100 +count_distinct 60.00 0.42 0.08 0.50 1000 +count_distinct_2 63.00 0.18 0.03 0.21 1000 +count_distinct_big 165.00 7.78 3.16 10.94 120 +count_distinct_group 194.00 1.21 0.37 1.58 1000 +count_distinct_group_on_key 59.00 0.51 0.07 0.58 1000 +count_distinct_group_on_key_parts 194.00 1.12 0.46 1.58 1000 +count_distinct_key_prefix 51.00 0.45 0.08 0.53 1000 +count_group_on_key_parts 58.00 1.16 0.35 1.51 1000 +count_on_key 586.00 16.61 2.71 19.32 50100 +create+drop 33.00 2.94 0.82 3.76 10000 +create_MANY_tables 18.00 1.02 0.62 1.64 5000 +create_index 5.00 0.00 0.00 0.00 8 +create_key+drop 41.00 3.05 0.66 3.71 10000 +create_table 0.00 0.01 0.00 0.01 31 +delete_all 17.00 0.00 0.00 0.00 12 +delete_all_many_keys 75.00 0.03 0.00 0.03 1 +delete_big 1.00 0.00 0.00 0.00 1 +delete_big_many_keys 75.00 0.03 0.00 0.03 128 +delete_key 4.00 0.76 0.29 1.05 10000 +drop_index 5.00 0.00 0.00 0.00 8 +drop_table 0.00 0.00 0.00 0.00 28 +drop_table_when_MANY_tables 6.00 0.37 0.63 1.00 5000 +insert 144.00 24.06 14.28 38.34 350768 +insert_duplicates 31.00 5.06 3.72 8.78 100000 +insert_key 137.00 9.91 6.26 16.17 100000 +insert_many_fields 10.00 0.54 0.08 0.62 2000 +insert_select_1_key 7.00 0.00 0.00 0.00 1 +insert_select_2_keys 9.00 0.00 0.00 0.00 1 +min_max 30.00 0.04 0.01 0.05 60 +min_max_on_key 230.00 28.28 4.43 32.71 85000 +order_by_big 78.00 22.39 9.83 32.22 10 +order_by_big_key 33.00 23.35 10.15 33.50 10 +order_by_big_key2 32.00 22.53 9.81 32.34 10 +order_by_big_key_desc 36.00 23.47 10.27 33.74 10 +order_by_big_key_diff 74.00 22.66 9.76 32.42 10 +order_by_big_key_prefix 33.00 22.18 9.81 31.99 10 +order_by_key2_diff 9.00 1.30 0.85 2.15 500 +order_by_key_prefix 4.00 0.97 0.57 1.54 500 +order_by_range 8.00 1.26 0.49 1.75 500 +outer_join 110.00 0.00 0.00 0.00 10 +outer_join_found 107.00 0.00 0.00 0.00 10 +outer_join_not_found 59.00 0.00 0.00 0.00 500 +outer_join_on_key 60.00 0.00 0.00 0.00 10 +select_1_row 3.00 0.81 0.69 1.50 10000 +select_2_rows 3.00 0.67 0.63 1.30 10000 +select_big 63.00 32.72 16.55 49.27 10080 +select_column+column 4.00 0.52 0.46 0.98 10000 +select_diff_key 193.00 0.32 0.04 0.36 500 +select_distinct 15.00 1.75 0.69 2.44 800 +select_group 75.00 1.59 0.45 2.04 2711 +select_group_when_MANY_tables 5.00 0.43 0.87 1.30 5000 +select_join 3.00 0.51 0.20 0.71 100 +select_key 132.00 53.98 10.53 64.51 200000 +select_key2 139.00 78.61 11.08 89.69 200000 +select_key2_return_key 131.00 64.58 9.61 74.19 200000 +select_key2_return_prim 134.00 72.33 11.34 83.67 200000 +select_key_prefix 141.00 86.32 12.05 98.37 200000 +select_key_prefix_join 13.00 4.08 2.01 6.09 100 +select_key_return_key 125.00 59.92 12.00 71.92 200000 +select_many_fields 23.00 8.85 7.55 16.40 2000 +select_query_cache 120.00 3.67 0.53 4.20 10000 +select_query_cache2 120.00 3.80 0.57 4.37 10000 +select_range 201.00 9.05 3.95 13.00 410 +select_range_key2 21.00 7.15 1.40 8.55 25010 +select_range_prefix 22.00 6.55 1.40 7.95 25010 +select_simple 2.00 0.54 0.49 1.03 10000 +select_simple_join 3.00 0.68 0.19 0.87 500 +update_big 64.00 0.00 0.00 0.00 10 +update_of_key 25.00 2.62 1.44 4.06 50000 +update_of_key_big 35.00 0.05 0.04 0.09 501 +update_of_primary_key_many_keys 47.00 0.01 0.02 0.03 256 +update_with_key 119.00 18.44 12.64 31.08 300000 +update_with_key_prefix 36.00 6.23 3.85 10.08 100000 +wisc_benchmark 5.00 2.33 0.52 2.85 114 +TOTALS 5323.00 795.55 233.87 1029.42 2551551 diff --git a/sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 67da8f8a33a..00000000000 --- a/sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:47:38 - -Testing of ALTER TABLE -Testing with 1000 columns and 1000 rows in 20 steps -Insert data into the table -Time for insert (1000) 0 wallclock secs ( 0.06 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for alter_table_add (992): 252 wallclock secs ( 0.20 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/alter-table-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..bd9506b44c2 --- /dev/null +++ b/sql-bench/Results/alter-table-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,14 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 13:47:22 + +Testing of ALTER TABLE +Testing with 1000 columns and 1000 rows in 20 steps +Insert data into the table +Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) + +Time for alter_table_add (992): 261 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU) + +Time for create_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time for drop_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU) diff --git a/sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index a4ff1e655ac..00000000000 --- a/sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:51:59 - -Testing of some unusual tables -All tests are done 1000 times with 1000 fields - -Testing table with 1000 fields -Testing select * from table with 1 record -Time to select_many_fields(1000): 9 wallclock secs ( 4.07 usr 3.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select all_fields from table with 1 record -Time to select_many_fields(1000): 13 wallclock secs ( 3.82 usr 3.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert VALUES() -Time to insert_many_fields(1000): 3 wallclock secs ( 0.23 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert (all_fields) VALUES() -Time to insert_many_fields(1000): 5 wallclock secs ( 0.06 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/big-tables-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..ff6f41e1bad --- /dev/null +++ b/sql-bench/Results/big-tables-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,19 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 13:51:53 + +Testing of some unusual tables +All tests are done 1000 times with 1000 fields + +Testing table with 1000 fields +Testing select * from table with 1 record +Time to select_many_fields(1000): 10 wallclock secs ( 4.43 usr 4.17 sys + 0.00 cusr 0.00 csys = 8.60 CPU) + +Testing select all_fields from table with 1 record +Time to select_many_fields(1000): 13 wallclock secs ( 4.42 usr 3.38 sys + 0.00 cusr 0.00 csys = 7.80 CPU) + +Testing insert VALUES() +Time to insert_many_fields(1000): 3 wallclock secs ( 0.46 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.49 CPU) + +Testing insert (all_fields) VALUES() +Time to insert_many_fields(1000): 7 wallclock secs ( 0.08 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.13 CPU) + +Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU) diff --git a/sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index b8317ca9ddb..00000000000 --- a/sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,30 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:52:30 - -Testing the speed of connecting to the server and sending of data -All tests are done 10000 times - -Testing connection/disconnect -Time to connect (10000): 10 wallclock secs ( 6.60 usr 1.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test connect/simple select/disconnect -Time for connect+select_simple (10000): 13 wallclock secs ( 7.36 usr 2.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test simple select -Time for select_simple (10000): 2 wallclock secs ( 0.52 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing connect/select 1 row from table/disconnect -Time to connect+select_1_row (10000): 13 wallclock secs ( 7.08 usr 2.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 1 row from table -Time to select_1_row (10000): 3 wallclock secs ( 0.74 usr 0.62 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 2 rows from table -Time to select_2_rows (10000): 3 wallclock secs ( 0.45 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test select with aritmetic (+) -Time for select_column+column (10000): 3 wallclock secs ( 0.52 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing retrieval of big records (7000 bytes) -Time to select_big (10000): 6 wallclock secs ( 2.98 usr 1.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/connect-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..29c5ea5e2de --- /dev/null +++ b/sql-bench/Results/connect-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,30 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 13:52:26 + +Testing the speed of connecting to the server and sending of data +All tests are done 10000 times + +Testing connection/disconnect +Time to connect (10000): 16 wallclock secs ( 6.84 usr 2.50 sys + 0.00 cusr 0.00 csys = 9.34 CPU) + +Test connect/simple select/disconnect +Time for connect+select_simple (10000): 13 wallclock secs ( 6.70 usr 3.21 sys + 0.00 cusr 0.00 csys = 9.91 CPU) + +Test simple select +Time for select_simple (10000): 2 wallclock secs ( 0.54 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.03 CPU) + +Testing connect/select 1 row from table/disconnect +Time to connect+select_1_row (10000): 15 wallclock secs ( 7.11 usr 3.70 sys + 0.00 cusr 0.00 csys = 10.81 CPU) + +Testing select 1 row from table +Time to select_1_row (10000): 3 wallclock secs ( 0.81 usr 0.69 sys + 0.00 cusr 0.00 csys = 1.50 CPU) + +Testing select 2 rows from table +Time to select_2_rows (10000): 3 wallclock secs ( 0.67 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.30 CPU) + +Test select with aritmetic (+) +Time for select_column+column (10000): 4 wallclock secs ( 0.52 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.98 CPU) + +Testing retrieval of big records (65000 bytes) +Time to select_big (10000): 30 wallclock secs (10.79 usr 6.41 sys + 0.00 cusr 0.00 csys = 17.20 CPU) + +Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU) diff --git a/sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 731c1569794..00000000000 --- a/sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,18 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:53:24 - -Testing the speed of creating and droping tables -Testing with 10000 tables and 10000 loop count - -Testing create of tables -Time for create_MANY_tables (10000): 32 wallclock secs ( 1.97 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Accessing tables -Time to select_group_when_MANY_tables (10000): 14 wallclock secs ( 0.68 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing drop -Time for drop_table_when_MANY_tables (10000): 9 wallclock secs ( 0.44 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing create+drop -Time for create+drop (10000): 26 wallclock secs ( 2.10 usr 0.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for create_key+drop (10000): 40 wallclock secs ( 3.64 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/create-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..15ffc740a83 --- /dev/null +++ b/sql-bench/Results/create-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,18 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 13:53:52 + +Testing the speed of creating and droping tables +Testing with 5000 tables and 10000 loop count + +Testing create of tables +Time for create_MANY_tables (5000): 18 wallclock secs ( 1.02 usr 0.62 sys + 0.00 cusr 0.00 csys = 1.64 CPU) + +Accessing tables +Time to select_group_when_MANY_tables (5000): 5 wallclock secs ( 0.43 usr 0.87 sys + 0.00 cusr 0.00 csys = 1.30 CPU) + +Testing drop +Time for drop_table_when_MANY_tables (5000): 6 wallclock secs ( 0.37 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.00 CPU) + +Testing create+drop +Time for create+drop (10000): 33 wallclock secs ( 2.94 usr 0.82 sys + 0.00 cusr 0.00 csys = 3.76 CPU) +Time for create_key+drop (10000): 41 wallclock secs ( 3.05 usr 0.66 sys + 0.00 cusr 0.00 csys = 3.71 CPU) +Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU) diff --git a/sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index e1dfc3171b9..00000000000 --- a/sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,58 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:55:26 - -Testing the speed of inserting data into 1 table and do some selects on it. -The tests are done with a table that has 100000 rows. - -Generating random keys -Creating tables -Inserting 100000 rows in order -Inserting 100000 rows in reverse order -Inserting 100000 rows in random order -Time for insert (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for insert_duplicates (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data from the table -Time for select_big (10:3000000): 30 wallclock secs (19.98 usr 10.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key (10:3000000): 31 wallclock secs (19.75 usr 10.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by (10:3000000): 47 wallclock secs (19.72 usr 16.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_diff_key (500:1000): 210 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range_prefix (5010:42084): 10 wallclock secs ( 2.48 usr 0.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (5010:42084): 11 wallclock secs ( 2.61 usr 0.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key_prefix (200000): 130 wallclock secs (67.36 usr 13.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key (200000): 129 wallclock secs (66.05 usr 14.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test of compares with simple ranges -Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.59 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (20000:43500): 8 wallclock secs ( 3.74 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (111): 58 wallclock secs ( 0.06 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max_on_key (15000): 8 wallclock secs ( 4.40 usr 0.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max (60): 31 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (100): 56 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count (100): 46 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (20): 64 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys with functions -Time for update_of_key (500): 25 wallclock secs ( 2.51 usr 2.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for update_of_key_big (501): 33 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update with key -Time for update_with_key (100000): 109 wallclock secs (13.71 usr 11.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of all rows -Time for update_big (500): 65 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing delete -Time for delete_key (10000): 4 wallclock secs ( 0.50 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for delete_big (12): 20 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Insert into table with 16 keys and with a primary key with 16 parts -Time for insert_key (100000): 159 wallclock secs ( 8.91 usr 4.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys -Time for update_of_primary_key_many_keys (256): 67 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Deleting everything from table -Time for delete_big_many_keys (2): 120 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/insert-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..9aae3dc3270 --- /dev/null +++ b/sql-bench/Results/insert-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,85 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 13:55:36 + +Testing the speed of inserting data into 1 table and do some selects on it. +The tests are done with a table that has 100000 rows. + +Generating random keys +Creating tables +Inserting 100000 rows in order +Inserting 100000 rows in reverse order +Inserting 100000 rows in random order +Time for insert (300000): 123 wallclock secs (21.22 usr 12.32 sys + 0.00 cusr 0.00 csys = 33.54 CPU) + +Testing insert of duplicates +Time for insert_duplicates (100000): 31 wallclock secs ( 5.06 usr 3.72 sys + 0.00 cusr 0.00 csys = 8.78 CPU) + +Retrieving data from the table +Time for select_big (10:3000000): 32 wallclock secs (21.78 usr 10.07 sys + 0.00 cusr 0.00 csys = 31.85 CPU) +Time for order_by_big_key (10:3000000): 33 wallclock secs (23.35 usr 10.15 sys + 0.00 cusr 0.00 csys = 33.50 CPU) +Time for order_by_big_key_desc (10:3000000): 36 wallclock secs (23.47 usr 10.27 sys + 0.00 cusr 0.00 csys = 33.74 CPU) +Time for order_by_big_key_prefix (10:3000000): 33 wallclock secs (22.18 usr 9.81 sys + 0.00 cusr 0.00 csys = 31.99 CPU) +Time for order_by_big_key2 (10:3000000): 32 wallclock secs (22.53 usr 9.81 sys + 0.00 cusr 0.00 csys = 32.34 CPU) +Time for order_by_big_key_diff (10:3000000): 74 wallclock secs (22.66 usr 9.76 sys + 0.00 cusr 0.00 csys = 32.42 CPU) +Time for order_by_big (10:3000000): 78 wallclock secs (22.39 usr 9.83 sys + 0.00 cusr 0.00 csys = 32.22 CPU) +Time for order_by_range (500:125750): 8 wallclock secs ( 1.26 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.75 CPU) +Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 0.97 usr 0.57 sys + 0.00 cusr 0.00 csys = 1.54 CPU) +Time for order_by_key2_diff (500:250500): 9 wallclock secs ( 1.30 usr 0.85 sys + 0.00 cusr 0.00 csys = 2.15 CPU) +Time for select_diff_key (500:1000): 193 wallclock secs ( 0.32 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.36 CPU) +Time for select_range_prefix (5010:42084): 13 wallclock secs ( 2.55 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.06 CPU) +Time for select_range_key2 (5010:42084): 12 wallclock secs ( 2.81 usr 0.68 sys + 0.00 cusr 0.00 csys = 3.49 CPU) +Time for select_key_prefix (200000): 141 wallclock secs (86.32 usr 12.05 sys + 0.00 cusr 0.00 csys = 98.37 CPU) +Time for select_key (200000): 132 wallclock secs (53.98 usr 10.53 sys + 0.00 cusr 0.00 csys = 64.51 CPU) +Time for select_key_return_key (200000): 125 wallclock secs (59.92 usr 12.00 sys + 0.00 cusr 0.00 csys = 71.92 CPU) +Time for select_key2 (200000): 139 wallclock secs (78.61 usr 11.08 sys + 0.00 cusr 0.00 csys = 89.69 CPU) +Time for select_key2_return_key (200000): 131 wallclock secs (64.58 usr 9.61 sys + 0.00 cusr 0.00 csys = 74.19 CPU) +Time for select_key2_return_prim (200000): 134 wallclock secs (72.33 usr 11.34 sys + 0.00 cusr 0.00 csys = 83.67 CPU) + +Test of compares with simple ranges +Time for select_range_prefix (20000:43500): 9 wallclock secs ( 4.00 usr 0.89 sys + 0.00 cusr 0.00 csys = 4.89 CPU) +Time for select_range_key2 (20000:43500): 9 wallclock secs ( 4.34 usr 0.72 sys + 0.00 cusr 0.00 csys = 5.06 CPU) +Time for select_group (111): 55 wallclock secs ( 0.02 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.06 CPU) +Time for min_max_on_key (15000): 8 wallclock secs ( 5.12 usr 0.76 sys + 0.00 cusr 0.00 csys = 5.88 CPU) +Time for min_max (60): 30 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU) +Time for count_on_key (100): 52 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.05 CPU) +Time for count (100): 45 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) +Time for count_distinct_big (20): 98 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Testing update of keys with functions +Time for update_of_key (50000): 25 wallclock secs ( 2.62 usr 1.44 sys + 0.00 cusr 0.00 csys = 4.06 CPU) +Time for update_of_key_big (501): 35 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU) + +Testing update with key +Time for update_with_key (300000): 119 wallclock secs (18.44 usr 12.64 sys + 0.00 cusr 0.00 csys = 31.08 CPU) +Time for update_with_key_prefix (100000): 36 wallclock secs ( 6.23 usr 3.85 sys + 0.00 cusr 0.00 csys = 10.08 CPU) + +Testing update of all rows +Time for update_big (10): 64 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing left outer join +Time for outer_join_on_key (10:10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join (10:10): 110 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_found (10:10): 107 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_not_found (500:10): 59 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing INSERT INTO ... SELECT +Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert_select_2_keys (1): 9 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing delete +Time for delete_key (10000): 4 wallclock secs ( 0.76 usr 0.29 sys + 0.00 cusr 0.00 csys = 1.05 CPU) +Time for delete_all (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Insert into table with 16 keys and with a primary key with 16 parts +Time for insert_key (100000): 137 wallclock secs ( 9.91 usr 6.26 sys + 0.00 cusr 0.00 csys = 16.17 CPU) + +Testing update of keys +Time for update_of_primary_key_many_keys (256): 47 wallclock secs ( 0.01 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.03 CPU) + +Deleting rows from the table +Time for delete_big_many_keys (128): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) + +Deleting everything from table +Time for delete_all_many_keys (1): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) + +Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU) diff --git a/sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 45bb324ec79..00000000000 --- a/sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,23 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:22:00 - -Testing the speed of selecting on keys that consist of many parts -The test-table has 10000 rows and the test is done with 12 ranges. - -Creating table -Inserting 10000 rows -Time to insert (10000): 4 wallclock secs ( 0.81 usr 0.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing big selects on the table -Time for select_big (70:17207): 1 wallclock secs ( 0.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (410:75949): 2 wallclock secs ( 0.65 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max_on_key (70000): 205 wallclock secs (20.60 usr 3.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (50000): 518 wallclock secs (16.08 usr 3.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for count_group_on_key_parts (1000:0): 61 wallclock secs ( 1.09 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Testing count(distinct) on the table -Time for count_distinct (1000:2000): 124 wallclock secs ( 0.65 usr 0.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key (1000:6000): 64 wallclock secs ( 0.37 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key_parts (1000:100000): 77 wallclock secs ( 0.93 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group (1000:100000): 77 wallclock secs ( 0.94 usr 0.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (1000:10000000): 559 wallclock secs (69.04 usr 55.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/select-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..c224f1e223f --- /dev/null +++ b/sql-bench/Results/select-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,30 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 14:41:13 + +Testing the speed of selecting on keys that consist of many parts +The test-table has 10000 rows and the test is done with 500 ranges. + +Creating table +Inserting 10000 rows +Time to insert (10000): 5 wallclock secs ( 0.80 usr 0.34 sys + 0.00 cusr 0.00 csys = 1.14 CPU) + +Test if the database has a query cache +Time for select_query_cache (10000): 120 wallclock secs ( 3.67 usr 0.53 sys + 0.00 cusr 0.00 csys = 4.20 CPU) + +Time for select_query_cache2 (10000): 120 wallclock secs ( 3.80 usr 0.57 sys + 0.00 cusr 0.00 csys = 4.37 CPU) + +Testing big selects on the table +Time for select_big (70:17207): 1 wallclock secs ( 0.15 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.22 CPU) +Time for select_range (410:1057904): 201 wallclock secs ( 9.05 usr 3.95 sys + 0.00 cusr 0.00 csys = 13.00 CPU) +Time for min_max_on_key (70000): 222 wallclock secs (23.16 usr 3.67 sys + 0.00 cusr 0.00 csys = 26.83 CPU) +Time for count_on_key (50000): 534 wallclock secs (16.58 usr 2.69 sys + 0.00 cusr 0.00 csys = 19.27 CPU) + +Time for count_group_on_key_parts (1000:100000): 58 wallclock secs ( 1.16 usr 0.35 sys + 0.00 cusr 0.00 csys = 1.51 CPU) +Testing count(distinct) on the table +Time for count_distinct_key_prefix (1000:1000): 51 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.53 CPU) +Time for count_distinct (1000:1000): 60 wallclock secs ( 0.42 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.50 CPU) +Time for count_distinct_2 (1000:1000): 63 wallclock secs ( 0.18 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.21 CPU) +Time for count_distinct_group_on_key (1000:6000): 59 wallclock secs ( 0.51 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.58 CPU) +Time for count_distinct_group_on_key_parts (1000:100000): 194 wallclock secs ( 1.12 usr 0.46 sys + 0.00 cusr 0.00 csys = 1.58 CPU) +Time for count_distinct_group (1000:100000): 194 wallclock secs ( 1.21 usr 0.37 sys + 0.00 cusr 0.00 csys = 1.58 CPU) +Time for count_distinct_big (100:1000000): 67 wallclock secs ( 7.77 usr 3.16 sys + 0.00 cusr 0.00 csys = 10.93 CPU) +Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU) diff --git a/sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index c2eb314c1f4..00000000000 --- a/sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:50:12 - -Wisconsin benchmark test - -Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (31000): 11 wallclock secs ( 1.12 usr 0.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Running actual benchmark -Time for wisc_benchmark (114): 4 wallclock secs ( 1.75 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/wisconsin-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..54a474a53d2 --- /dev/null +++ b/sql-bench/Results/wisconsin-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,14 @@ +Testing server 'MySQL 3.23.39' at 2001-06-05 15:13:43 + +Wisconsin benchmark test + +Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Inserting data +Time to insert (31000): 13 wallclock secs ( 1.59 usr 1.18 sys + 0.00 cusr 0.00 csys = 2.77 CPU) +Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Running actual benchmark +Time for wisc_benchmark (114): 5 wallclock secs ( 2.33 usr 0.52 sys + 0.00 cusr 0.00 csys = 2.85 CPU) + +Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU) From 1efd1b1507b7b3041ae4977e63131b824c16194b Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2001 21:49:03 +0300 Subject: [PATCH 08/33] mem0pool.h Fix a bus error in aligment of longlong on Solaris innobase/include/mem0pool.h: Fix a bus error in aligment of longlong on Solaris --- innobase/include/mem0pool.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/innobase/include/mem0pool.h b/innobase/include/mem0pool.h index 14198920bca..eb675b4a7f9 100644 --- a/innobase/include/mem0pool.h +++ b/innobase/include/mem0pool.h @@ -31,7 +31,8 @@ struct mem_area_struct{ }; /* Each memory area takes this many extra bytes for control information */ -#define MEM_AREA_EXTRA_SIZE (sizeof(struct mem_area_struct)) +#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_struct),\ + UNIV_MEM_ALIGNMENT)) /************************************************************************ Creates a memory pool. */ From 703b2384df3ba5a8b8421f94724a90bc716e54a3 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2001 16:29:52 -0500 Subject: [PATCH 09/33] manual.texi correct instance of mysql_escape() to mysql_escape_string(); manual.texi add mention of mysql_escape_string() for PHP API; manual.texi add mention of PreparedStatement, placeholders for JDBC API. Docs/manual.texi: correct instance of mysql_escape() to mysql_escape_string(); add mention of mysql_escape_string() for PHP API; add mention of PreparedStatement, placeholders for JDBC API. --- Docs/manual.texi | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index c07d8cf2b40..8406e641275 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -12071,11 +12071,13 @@ not give your applications any more access privileges than they need. Users of PHP: @itemize @bullet @item Check out the @code{addslashes()} function. +As of PHP 4.0.3, a @code{mysql_escape_string()} function is available +that is based on the function of the same name in the @strong{MySQL} C API. @end itemize @item Users of @strong{MySQL} C API: @itemize @bullet -@item Check out the @code{mysql_escape()} API call. +@item Check out the @code{mysql_escape_string()} API call. @end itemize @item Users of @strong{MySQL}++: @@ -12087,6 +12089,11 @@ Users of Perl DBI: @itemize @bullet @item Check out the @code{quote()} method or use placeholders. @end itemize +@item +Users of Java JDBC: +@itemize @bullet +@item Use a @code{PreparedStatement} object and placeholders. +@end itemize @end itemize @item From 3e50dc195800d9b5a7d8e1f4acf2acc93bd06bb2 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2001 16:46:51 -0500 Subject: [PATCH 10/33] manual.texi minor clarifications to Windows section Docs/manual.texi: minor clarifications to Windows section --- Docs/manual.texi | 51 +++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index 8406e641275..0d765502454 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -8878,8 +8878,8 @@ in a while. @section Windows Notes This section describes installation and use of @strong{MySQL} on Windows. -This is also described in the @file{README} file that comes with the -@strong{MySQL} Windows distribution. +This information is also provided in the @file{README} file that comes +with the @strong{MySQL} Windows distribution. @menu * Windows installation:: Installing @strong{MySQL} on Windows @@ -8895,6 +8895,10 @@ This is also described in the @file{README} file that comes with the @node Windows installation, Win95 start, Windows, Windows @subsection Installing MySQL on Windows +The following instructions apply to precompiled binary distributions. +If you download a source distribution, you will have to compile and install +it yourself. + If you don't have a copy of the @strong{MySQL} distribution, you should first download one from @uref{http://www.mysql.com/downloads/mysql-3.23.html}. @@ -8907,23 +8911,30 @@ To install either distribution, unzip it in some empty directory and run the @code{Setup.exe} program. By default, @strong{MySQL}-Windows is configured to be installed in -@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere, install it -in @file{C:\mysql} first, then move the installation to where you want it. If -you do move @strong{MySQL}, you must tell @code{mysqld} where everything is by -supplying options to @code{mysqld}. Use @code{C:\mysql\bin\mysqld --help} to -display all options! For example, if you have moved the @strong{MySQL} -distribution to @file{D:\programs\mysql}, you must start @code{mysqld} with: -@code{D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql} +@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere, +install it in @file{C:\mysql} first, then move the installation to +where you want it. If you do move @strong{MySQL}, you must indicate +where everything is located by supplying a @code{--basedir} option when +you start the server. For example, if you have moved the @strong{MySQL} +distribution to @file{D:\programs\mysql}, you must start @code{mysqld} +like this: + +@example +C:\> D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql +@end example + +Use @code{mysqld --help} to display all the options that @code{mysqld} +understands! With all newer @strong{MySQL} versions, you can also create a @file{C:\my.cnf} file that holds any default options for the @strong{MySQL} server. Copy the file @file{\mysql\my-xxxxx.cnf} to @file{C:\my.cnf} and edit it to suit your setup. Note that you should specify all paths with @samp{/} instead of @samp{\}. If you use -@samp{\}, you need to specify it twice, as @samp{\} is the escape +@samp{\}, you need to specify it twice, because @samp{\} is the escape character in @strong{MySQL}. @xref{Option files}. -Starting from @strong{MySQL} 3.23.38, the Windows distribution includes +Starting with @strong{MySQL} 3.23.38, the Windows distribution includes both the normal and the @strong{MySQL-Max} binaries. The main benefit of using the normal @code{mysqld.exe} binary is that it's a little faster and uses less resources. @@ -8980,9 +8991,13 @@ You can kill the @strong{MySQL} server by executing: C:\> C:\mysql\bin\mysqladmin -u root shutdown @end example -Note that Win95 and Win98 don't support creation of named pipes. On -Win95 and Win98, you can only use named pipes to connect to a remote -@strong{MySQL} running on an NT server. +Note that Win95 and Win98 don't support creation of named pipes. +On Win95 and Win98, you can only use named pipes to connect to a +remote @strong{MySQL} server running on a Windows NT server host. +(The @strong{MySQL} server must also support named pipes, of +course. For example, using @code{mysqld-opt} under NT will not allow +named pipe connections. You should use either @code{mysqld-nt} or +@code{mysqld-max-nt}.) If @code{mysqld} doesn't start, please check whether or not the @file{\mysql\mysql.err} file contains any reason for this. You can also @@ -9039,9 +9054,9 @@ with the default service options. If you have stopped @code{mysqld-nt}, you have to start it with @code{NET START mysql}. The service is installed with the name @code{MySQL}. Once installed, it must -be started using the Services Control Manager (SCM) Utility (found in Control -Panel) or by using the @code{NET START MySQL} command. If any options are -desired, they must be specified as ``Startup parameters'' in the SCM utility +be started using the Services Control Manager (SCM) Utility found in the +Control Panel, or by using the @code{NET START MySQL} command. If any options +are desired, they must be specified as ``Startup parameters'' in the SCM utility before you start the @strong{MySQL} service. Once running, @code{mysqld-nt} can be stopped using @code{mysqladmin}, or from the SCM utility or by using the command @code{NET STOP MySQL}. If you use SCM to stop @code{mysqld-nt}, @@ -9181,7 +9196,7 @@ Here is a note about how to connect to get a secure connection to remote @itemize @bullet @item -Install an SSH client on your Windows machine --- As a user, the best non-free +Install an SSH client on your Windows machine. As a user, the best non-free one I've found is from @code{SecureCRT} from @uref{http://www.vandyke.com/}. Another option is @code{f-secure} from @uref{http://www.f-secure.com/}. You can also find some free ones on @strong{Google} at From 30bd6b3bbc21ea8b8c933d0497274f2c8a7476b7 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 6 Jun 2001 03:10:00 +0300 Subject: [PATCH 11/33] Updated manual about symlinks on window and adding users BitKeeper/etc/ignore: added sql-bench/gif/* --- .bzrignore | 1 + Docs/manual.texi | 73 +++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/.bzrignore b/.bzrignore index c58c5a69f04..dbe41ffe92a 100644 --- a/.bzrignore +++ b/.bzrignore @@ -283,3 +283,4 @@ support-files/mysql.server support-files/mysql.spec tags tmp/* +sql-bench/gif/* diff --git a/Docs/manual.texi b/Docs/manual.texi index c07d8cf2b40..b23dd090a74 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -369,7 +369,7 @@ The MySQL Access Privilege System * Request access:: Access control, stage 2: Request verification * Privilege changes:: When privilege changes take effect * Default privileges:: Setting up the initial @strong{MySQL} privileges -* Adding users:: Adding new user privileges to @strong{MySQL} +* Adding users:: Adding new users to @strong{MySQL} * Passwords:: How to set up passwords * Access denied:: Causes of @code{Access denied} errors @@ -9238,6 +9238,20 @@ should create the file @file{C:\mysql\data\foo.sym} that contains the text @code{D:\data\foo}. After that, all tables created in the database @code{foo} will be created in @file{D:\data\foo}. +Note that because of the speed penalty you get when opening every table, +we have not enabled this by default even if you have compiled +@strong{MySQL} with support for this. To enable symlinks you should put +in your @code{my.cnf} or @code{my.ini} file the following entry: + +@example +[mysqld] +use-symbolic-links +@end example + +In @strong{MySQL} 4.0 we will enable symlinks by default. Then you +should instead use the @code{skip-symlink} option if you want to +disable this. + @cindex compiling, on Windows @cindex Windows, compiling on @node Windows compiling, Windows vs Unix, Windows symbolic links, Windows @@ -11928,7 +11942,7 @@ system. This section describes how it works. * Request access:: Access control, stage 2: Request verification * Privilege changes:: When privilege changes take effect * Default privileges:: Setting up the initial @strong{MySQL} privileges -* Adding users:: Adding new user privileges to @strong{MySQL} +* Adding users:: Adding new users to @strong{MySQL} * Passwords:: How to set up passwords * Access denied:: Causes of @code{Access denied} errors @end menu @@ -12292,7 +12306,6 @@ DATA INFILE} and administrative operations. @cindex user names, and passwords @cindex passwords, for users - There are several distinctions between the way user names and passwords are used by @strong{MySQL} and the way they are used by Unix or Windows: @@ -12326,6 +12339,42 @@ knowing your 'scrambled' password is enough to be able to connect to the @strong{MySQL} server! @end itemize +@strong{MySQL} users and they privileges are normally created with the +@code{GRANT} command. @xref{GRANT}. + +When you login to a @strong{MySQL} server with a command line client you +should specify the password with @code{--password=your-password}. +@xref{Connecting}. + +@example +mysql --user=monty --password=guess database_name +@end example + +If you want the client to prompt for a password, you should use +@code{--password} without any argument + +@example +mysql --user=monty --password database_name +@end example + +or the short form: + +@example +mysql -u monty -p database_name +@end example + +Note that in the last example the password is @strong{NOT} 'database_name'. + +If you want to use the -p option to supply a password you should do like this: + +@example +mysql -u monty -pguess database_name +@end example + +On some system the library call that @strong{MySQL} uses to prompt for a +password will automaticly cut the password to 8 characters. Internally +@strong{MySQL} doesn't have any limit for the length of the password. + @node Connecting, Password security, User names, Privilege system @section Connecting to the MySQL Server @cindex connecting, to the server @@ -13385,12 +13434,15 @@ running @code{mysql_install_db}. @findex GRANT statement @findex statements, GRANT @node Adding users, Passwords, Default privileges, Privilege system -@section Adding New User Privileges to MySQL +@section Adding New Users to MySQL You can add users two different ways: by using @code{GRANT} statements or by manipulating the @strong{MySQL} grant tables directly. The preferred method is to use @code{GRANT} statements, because they are -more concise and less error-prone. +more concise and less error-prone. @xref{GRANT}. + +There is also a lot of contributed programs like @code{phpmyadmin} that +can be used to create and administrate users. @xref{Contrib}. The examples below show how to use the @code{mysql} client to set up new users. These examples assume that privileges are set up according to the @@ -13501,6 +13553,11 @@ mysql> GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP IDENTIFIED BY 'stupid'; @end example +The reason that we do to grant statements for the user 'custom' is that +we want the give the user access to @strong{MySQL} both from the local +machine with Unix sockets and from the remote machine 'whitehouse.gov' +over TCP/IP. + To set up the user's privileges by modifying the grant tables directly, run these commands (note the @code{FLUSH PRIVILEGES} at the end): @@ -23000,8 +23057,9 @@ REVOKE priv_type [(column_list)] [, priv_type [(column_list)] ...] @code{GRANT} is implemented in @strong{MySQL} Version 3.22.11 or later. For earlier @strong{MySQL} versions, the @code{GRANT} statement does nothing. -The @code{GRANT} and @code{REVOKE} commands allow system administrators to -grant and revoke rights to @strong{MySQL} users at four privilege levels: +The @code{GRANT} and @code{REVOKE} commands allow system administrators +to create users and grant and revoke rights to @strong{MySQL} users at +four privilege levels: @table @strong @item Global level @@ -23021,6 +23079,7 @@ Column privileges apply to single columns in a given table. These privileges are stored in the @code{mysql.columns_priv} table. @end table +If you give a grant for a users that doesn't exists, that user is created. For examples of how @code{GRANT} works, see @ref{Adding users}. For the @code{GRANT} and @code{REVOKE} statements, @code{priv_type} may be From 5388d91592db38f4cee3e5c27b108e1f8da8716d Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 6 Jun 2001 16:12:30 +0300 Subject: [PATCH 12/33] ha_innobase.cc Improved error message in the case the .frm file exists but InnoDB data dictionary does not conatin the table sql/ha_innobase.cc: Improved error message in the case the .frm file exists but InnoDB data dictionary does not conatin the table --- sql/ha_innobase.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc index 4a69056a9e2..51ea5f40018 100644 --- a/sql/ha_innobase.cc +++ b/sql/ha_innobase.cc @@ -679,7 +679,7 @@ ha_innobase::bas_ext() const /* out: file extension strings, currently not used */ { - static const char* ext[] = {".not_used", NullS}; + static const char* ext[] = {".InnoDB_table_inside_tablespace", NullS}; return(ext); } @@ -779,6 +779,14 @@ ha_innobase::open( if (NULL == (ib_table = dict_table_get(norm_name, NULL))) { + fprintf(stderr, "InnoDB: Cannot find table %s from the internal\n" + "InnoDB: data dictionary of InnoDB though the .frm file\n" + "InnoDB: for the table exists. Maybe you have deleted\n" + "InnoDB: and created again an InnoDB database but\n" + "InnoDB: forgotten to delete the corresponding\n" + "InnoDB: .frm files of old InnoDB tables?\n", + norm_name); + free_share(share); my_free((char*) upd_buff, MYF(0)); my_errno = ENOENT; From bb72939a5bf0d950aec3224292948f01caa986bd Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 6 Jun 2001 15:45:07 -0400 Subject: [PATCH 13/33] sql_table.cc re-apply a lost change where we check to see if the sql_table.cc table handler supports temporary tables sql/sql_table.cc: re-apply a lost change where we check to see if the table handler supports temporary tables BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 9 +-------- sql/sql_table.cc | 7 +++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 0ef0a6a42c7..20c4a1faba5 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1,8 +1 @@ -heikki@donna.mysql.fi -jani@janikt.pp.saunalahti.fi -monty@hundin.mysql.fi -mwagner@evoq.mwagner.org -paul@central.snake.net -sasha@mysql.sashanet.com -serg@serg.mysql.com -paul@teton.kitebird.com +mikef@nslinuxw4.bedford.progress.com diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f287481921d..3fa2bc5d9d3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -221,6 +221,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, db_options|=HA_OPTION_PACK_RECORD; file=get_new_handler((TABLE*) 0, create_info->db_type); + if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && + (file->option_flag() & HA_NO_TEMP_TABLES)) + { + my_error(ER_ILLEGAL_HA,MYF(0),table_name); + DBUG_RETURN(-1); + } + /* Don't pack keys in old tables if the user has requested this */ while ((sql_field=it++)) From 14289d42edec950a6cc9f3db1e242cf8595e207d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 7 Jun 2001 00:10:59 +0300 Subject: [PATCH 14/33] Changed innodb_unix_file_flush_method -> innodb_flush_method Cleaned up error messages. mysql-test/mysql-test-run.sh: Added option --mysqld to send arguments to mysqld. sql/ha_innobase.cc: Shortended messages a bit to correspond to the rest of MySQL. sql/mysqld.cc: Changed innodb_unix_file_flush_method -> innodb_flush_method BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 2 +- mysql-test/mysql-test-run.sh | 6 ++++++ sql/ha_innobase.cc | 13 ++++++------- sql/mysqld.cc | 3 ++- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 20c4a1faba5..e8deba03c8a 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1 +1 @@ -mikef@nslinuxw4.bedford.progress.com +monty@hundin.mysql.fi diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 0dfdbda701e..6634d8696e6 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -143,6 +143,10 @@ while test $# -gt 0; do EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" SLEEP_TIME=`$ECHO "$1" | $SED -e "s;--sleep=;;"` ;; + --mysqld=*) + TMP=`$ECHO "$1" | $SED -e "s;--mysqld-=;"` + EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $TMP" + ;; --gcov ) if [ x$BINARY_DIST = x1 ] ; then $ECHO "Cannot do coverage test without the source - please use source dist" @@ -158,12 +162,14 @@ while test $# -gt 0; do $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with -gdb option" fi DO_GDB=1 + USE_RUNNING_SERVER="" ;; --ddd ) if [ x$BINARY_DIST = x1 ] ; then $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with -gdb option" fi DO_DDD=1 + USE_RUNNING_SERVER="" ;; --skip-*) EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1" diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc index 51ea5f40018..6bae31902c3 100644 --- a/sql/ha_innobase.cc +++ b/sql/ha_innobase.cc @@ -679,7 +679,7 @@ ha_innobase::bas_ext() const /* out: file extension strings, currently not used */ { - static const char* ext[] = {".InnoDB_table_inside_tablespace", NullS}; + static const char* ext[] = {".InnoDB", NullS}; return(ext); } @@ -779,12 +779,11 @@ ha_innobase::open( if (NULL == (ib_table = dict_table_get(norm_name, NULL))) { - fprintf(stderr, "InnoDB: Cannot find table %s from the internal\n" - "InnoDB: data dictionary of InnoDB though the .frm file\n" - "InnoDB: for the table exists. Maybe you have deleted\n" - "InnoDB: and created again an InnoDB database but\n" - "InnoDB: forgotten to delete the corresponding\n" - "InnoDB: .frm files of old InnoDB tables?\n", + fprintf(stderr, "\ +Cannot find table %s from the internal data dictionary\n\ +of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\ +and created again an InnoDB database but forgotten to delete the\n\ +corresponding .frm files of old InnoDB tables?\n", norm_name); free_share(share); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index e72440ba4b1..ffef39964da 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2796,7 +2796,7 @@ struct show_var_st init_vars[]= { {"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR}, {"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL}, {"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR}, - {"innodb_unix_file_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR}, + {"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR}, #endif {"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG}, {"join_buffer_size", (char*) &join_buff_size, SHOW_LONG}, @@ -3073,6 +3073,7 @@ static void usage(void) puts("\ --innodb_data_home_dir=dir The common part for Innodb table spaces\n\ --innodb_data_file_path=dir Path to individual files and their sizes\n\ + --innodb_flush_method=# Which method to flush data\n\ --innodb_flush_log_at_trx_commit[=#]\n\ Set to 0 if you don't want to flush logs\n\ --innodb_log_arch_dir=dir Where full logs should be archived\n\ From 9c20e1e704cb3d288d3de0cd79d36896455cac68 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 7 Jun 2001 17:32:02 +0300 Subject: [PATCH 15/33] page0cur.ic Fix a bug in insert buffer and multiversioning manual.texi Changed innodb_unix_file_flush_method to innodb_flush_method in the manual Docs/manual.texi: Changed innodb_unix_file_flush_method to innodb_flush_method in the manual innobase/include/page0cur.ic: Fix a bug in insert buffer and multiversioning BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + Docs/manual.texi | 4 ++-- innobase/include/page0cur.ic | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index e8deba03c8a..2ca78e17fab 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1 +1,2 @@ monty@hundin.mysql.fi +heikki@donna.mysql.fi diff --git a/Docs/manual.texi b/Docs/manual.texi index 76e84b4cdad..49e13c7d3b4 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -25727,7 +25727,7 @@ in its own lock table and rolls back the transaction. If you use than InnoDB in the same transaction, then a deadlock may arise which InnoDB cannot notice. In cases like this the timeout is useful to resolve the situation. -@item @code{innodb_unix_file_flush_method} @tab +@item @code{innodb_flush_method} @tab (Available from 3.23.39 up.) The default value for this is @code{fdatasync}. Another option is @code{O_DSYNC}. @@ -26338,7 +26338,7 @@ In some versions of Linux and Unix, flushing files to disk with the Unix @code{fdatasync} and other similar methods is surprisingly slow. The default method InnoDB uses is the @code{fdatasync} function. If you are not satisfied with the database write performance, you may -try setting @code{innodb_unix_file_flush_method} in @file{my.cnf} +try setting @code{innodb_flush_method} in @file{my.cnf} to @code{O_DSYNC}, though O_DSYNC seems to be slower on most systems. You can also try setting it to @code{littlesync}, which means that InnoDB does not call the file flush for every write it does to a diff --git a/innobase/include/page0cur.ic b/innobase/include/page0cur.ic index 4313036adaf..39f8ab11513 100644 --- a/innobase/include/page0cur.ic +++ b/innobase/include/page0cur.ic @@ -171,10 +171,10 @@ page_cur_search( ut_ad(dtuple_check_typed(tuple)); page_cur_search_with_match(page, tuple, mode, - &low_matched_fields, - &low_matched_bytes, &up_matched_fields, &up_matched_bytes, + &low_matched_fields, + &low_matched_bytes, cursor); return(low_matched_fields); } From 50861b3142ae712a3ce728364c4ac4fe6e06a6da Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 7 Jun 2001 19:26:00 +0300 Subject: [PATCH 16/33] manual.texi Change my email address (of course I read also from the old address) Docs/manual.texi: Change my email address (of course I read also from the old address) --- Docs/manual.texi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index 49e13c7d3b4..db3b7ebce19 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -26810,7 +26810,7 @@ Contact information of Innobase Oy, producer of the InnoDB engine: @example Website: www.innodb.com -Heikki.Tuuri@@innobase.inet.fi +Heikki.Tuuri@@innodb.com phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile) InnoDB Oy Inc. World Trade Center Helsinki From 9de7a216545fce43bb8e71698582138fc46db087 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 7 Jun 2001 20:24:53 +0300 Subject: [PATCH 17/33] trx0roll.c Changed Innobase to Innodb in some fprintfs trx0sys.c Changed Innobase to Innodb in some fprintfs innobase/trx/trx0roll.c: Changed Innobase to Innodb in some fprintfs innobase/trx/trx0sys.c: Changed Innobase to Innodb in some fprintfs --- innobase/trx/trx0roll.c | 10 +++++----- innobase/trx/trx0sys.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/innobase/trx/trx0roll.c b/innobase/trx/trx0roll.c index 13e2d1869ab..6b74c0d0d51 100644 --- a/innobase/trx/trx0roll.c +++ b/innobase/trx/trx0roll.c @@ -176,7 +176,7 @@ trx_rollback_all_without_sess(void) if (UT_LIST_GET_FIRST(trx_sys->trx_list)) { fprintf(stderr, - "Innobase: Starting rollback of uncommitted transactions\n"); + "InnoDB: Starting rollback of uncommitted transactions\n"); } else { return; } @@ -196,7 +196,7 @@ loop: if (trx == NULL) { fprintf(stderr, - "Innobase: Rollback of uncommitted transactions completed\n"); + "InnoDB: Rollback of uncommitted transactions completed\n"); mem_heap_free(heap); @@ -221,7 +221,7 @@ loop: ut_a(thr == que_fork_start_command(fork, SESS_COMM_EXECUTE, 0)); - fprintf(stderr, "Innobase: Rolling back trx no %lu\n", + fprintf(stderr, "InnoDB: Rolling back trx no %lu\n", ut_dulint_get_low(trx->id)); mutex_exit(&kernel_mutex); @@ -238,7 +238,7 @@ loop: mutex_exit(&kernel_mutex); fprintf(stderr, - "Innobase: Waiting rollback of trx no %lu to end\n", + "InnoDB: Waiting rollback of trx no %lu to end\n", ut_dulint_get_low(trx->id)); os_thread_sleep(100000); @@ -264,7 +264,7 @@ loop: mutex_exit(&(dict_sys->mutex)); } - fprintf(stderr, "Innobase: Rolling back of trx no %lu completed\n", + fprintf(stderr, "InnoDB: Rolling back of trx no %lu completed\n", ut_dulint_get_low(trx->id)); mem_heap_free(heap); diff --git a/innobase/trx/trx0sys.c b/innobase/trx/trx0sys.c index ef5eb5d9443..99ec5b50237 100644 --- a/innobase/trx/trx0sys.c +++ b/innobase/trx/trx0sys.c @@ -198,7 +198,7 @@ trx_sys_init_at_db_start(void) if (UT_LIST_GET_LEN(trx_sys->trx_list) > 0) { fprintf(stderr, - "Innobase: %lu uncommitted transaction(s) which must be rolled back\n", + "InnoDB: %lu uncommitted transaction(s) which must be rolled back\n", UT_LIST_GET_LEN(trx_sys->trx_list)); } From 95015b0844abd39e6ce635eeda7f8fbb56b5da56 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 17:29:03 +0300 Subject: [PATCH 18/33] Added a link to manual. Docs/manual.texi: Added a link. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 3 +-- Docs/manual.texi | 5 +++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 2ca78e17fab..fb534622f9b 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1,2 +1 @@ -monty@hundin.mysql.fi -heikki@donna.mysql.fi +jani@janikt.pp.saunalahti.fi diff --git a/Docs/manual.texi b/Docs/manual.texi index db3b7ebce19..e409aa4e85e 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -44731,6 +44731,11 @@ these tables directly without ODBC-driver. Windows GUI (binary only) to administrate a database, by David B. Mansel, @email{david@@zhadum.org}. +@item @uref{http://home.online.no/~runeberg/myqa, MyQA} +is an Linux based query client for the MySQL database server. MyQA lets +you enter SQL queries, execute them, and view the results, all in a +graphical user interface. The GUI is roughly similar to that of the +'Query Analyzer' client that comes with MS SQL Server. @item @uref{http://members.xoom.com/_opex_/mysqlmanager/index.html, MySQL Manager} a graphical MySQL server manager for MySQL server written in Java, for Windows From 0f9b1363b3a0f8368b03d4320351924d9ef4ced2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 18:03:24 +0300 Subject: [PATCH 19/33] row0sel.c Fix a bug in consistent read through a secondary index innobase/row/row0sel.c: Fix a bug in consistent read through a secondary index BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + innobase/row/row0sel.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index fb534622f9b..139241f6b88 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1 +1,2 @@ jani@janikt.pp.saunalahti.fi +heikki@donna.mysql.fi diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c index e3bab021669..5599bb42a25 100644 --- a/innobase/row/row0sel.c +++ b/innobase/row/row0sel.c @@ -2207,11 +2207,11 @@ row_sel_get_clust_rec_for_mysql( visit through secondary index records that would not really exist in our snapshot. */ - if ((old_vers || rec_get_deleted_flag(rec)) + if (clust_rec && (old_vers || rec_get_deleted_flag(rec)) && !row_sel_sec_rec_is_for_clust_rec(rec, sec_index, clust_rec, clust_index)) { clust_rec = NULL; - } + } } *out_rec = clust_rec; From f00c02293db7b344bef6eb8f3a9fab976707bc10 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 10:59:05 -0500 Subject: [PATCH 20/33] manual.texi minor cleanup Docs/manual.texi: minor cleanup BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + Docs/manual.texi | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index fb534622f9b..de94fcd107b 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1 +1,2 @@ jani@janikt.pp.saunalahti.fi +paul@teton.kitebird.com diff --git a/Docs/manual.texi b/Docs/manual.texi index e409aa4e85e..ca17d1fdd5d 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -44721,7 +44721,7 @@ You can always find the latest version @uref{http://www.trash.net/~ffischer/admin/index.html, here}. @item @uref{http://www.mysql.com/Downloads/Win32/MySQL-Maker-1.0.zip,MySQL-Maker 1.0}. -Shareware @strong{MySQL} client for Windows. It's WYSIWYG tool which allows +Shareware @strong{MySQL} client for Windows. It's a WYSIWYG tool which allows you to create, change and delete databases and tables. You can change field - structure and add, change and delete data in these tables directly without ODBC-driver. @@ -44732,13 +44732,13 @@ Windows GUI (binary only) to administrate a database, by David B. Mansel, @email{david@@zhadum.org}. @item @uref{http://home.online.no/~runeberg/myqa, MyQA} -is an Linux based query client for the MySQL database server. MyQA lets -you enter SQL queries, execute them, and view the results, all in a +is a Linux-based query client for the @strong{MySQL} database server. MyQA +lets you enter SQL queries, execute them, and view the results, all in a graphical user interface. The GUI is roughly similar to that of the 'Query Analyzer' client that comes with MS SQL Server. @item @uref{http://members.xoom.com/_opex_/mysqlmanager/index.html, MySQL Manager} -a graphical MySQL server manager for MySQL server written in Java, for Windows +a graphical @strong{MySQL} server manager for @strong{MySQL} server written in Java, for Windows @item @uref{http://www.mysql.com/Downloads/Win32/netadmin.zip, netadmin.zip} From f4d5d2d9799c5716a4f06b4dbc74698d13acf304 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 15:54:20 -0500 Subject: [PATCH 21/33] manual.texi Contrib software additions. Docs/manual.texi: Contrib software additions. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + Docs/manual.texi | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 1d132989aef..8f4e935d486 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1,3 +1,4 @@ heikki@donna.mysql.fi jani@janikt.pp.saunalahti.fi paul@teton.kitebird.com +mwagner@evoq.mwagner.org diff --git a/Docs/manual.texi b/Docs/manual.texi index ca17d1fdd5d..94486282592 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -44819,6 +44819,17 @@ data either by clicking on the table folder or by composing their own SQL statements with our built-in SQL editor. The tool has been tested with Oracle 8 and @strong{MySQL} as the back-end databases. It requires JDK 1.3 from JavaSoft. +@item @uref{http://www.jetools.com/products/databrowser/, DataBrowser} +The DataBrowser is a cross-database, cross-platform data access tool. It is more +user friendly than tools like SQL Plus, psql (command line based tools). It is more +flexible than TOAD, ISQL, PGAccess which are GUI's that are limitied to a single +platform or database. +@item @uref{http://www.intrex.net/amit/software/, SQLC} +The SQL Console is a standalone java application that allows you to connect to a + SQL database system and issue SQL queries and updates. It has an easy-to use +graphical user interface. The SQL Console uses JDBC to connect to the database +systems and, therefore, with proper JDBC drivers, you can use this utility to +connect to some of the most popular database systems. @end itemize @cindex Web clients From 0d38f5719999dc677b6975d89dd8bff8bafd03db Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 16:07:28 -0500 Subject: [PATCH 22/33] manual.texi Added another Contrib software entry. Docs/manual.texi: Added another Contrib software entry. --- Docs/manual.texi | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Docs/manual.texi b/Docs/manual.texi index 94486282592..83d489de88b 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -44830,6 +44830,13 @@ The SQL Console is a standalone java application that allows you to connect to a graphical user interface. The SQL Console uses JDBC to connect to the database systems and, therefore, with proper JDBC drivers, you can use this utility to connect to some of the most popular database systems. +@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_mmc.zip, MySQL MMC} +MySQL MMC is a GUI Management Tool developed using kdevelop +with a very good interface completely like Microsoft +Enterprise Tool (for Sql Server) or Sybase Central. We +can use it to manage server, database, table, index, +users and to edit table data in grid or execute Sql +by Query Analysis. @end itemize @cindex Web clients From b44a7c93e90f3315eedd4b3bfe9651f45c155acd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 18:14:30 -0500 Subject: [PATCH 23/33] manual.texi cleanup Docs/manual.texi: cleanup --- Docs/manual.texi | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index 83d489de88b..867d2d5d152 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -44697,8 +44697,8 @@ of several databases simultaneously. By Innovative-IT Development AB. The @strong{MySQL} GUI client homepage. By Sinisa at @strong{MySQL AB}. @item @uref{http://www.mysql.com/Downloads/Contrib/mysql_navigator_0.9.0.tar.gz, MySQL navigator 0.9} -MySQL Navigator is MySQL database server GUI client program. The purpose -of MySQL Navigator is to provide a useful client interface to MySQL +MySQL Navigator is a @strong{MySQL} database server GUI client program. The purpose +of MySQL Navigator is to provide a useful client interface to @strong{MySQL} database servers, whilst supporting multiple operating systems and languages. You can currently import/export database, enter queries, get result sets, edit scripts, run scripts, add, alter, and delete users, @@ -44833,7 +44833,7 @@ connect to some of the most popular database systems. @item @uref{http://www.mysql.com/Downloads/Contrib/mysql_mmc.zip, MySQL MMC} MySQL MMC is a GUI Management Tool developed using kdevelop with a very good interface completely like Microsoft -Enterprise Tool (for Sql Server) or Sybase Central. We +Enterprise Tool (for SQL Server) or Sybase Central. We can use it to manage server, database, table, index, users and to edit table data in grid or execute Sql by Query Analysis. @@ -44909,7 +44909,7 @@ and run update queries. Originally written to implement a simple fast low-overhead banner-rotation system. By Sasha Pachev. @item @uref{http://htcheck.sourceforge.net, htCheck} - URL checker with -MySQL backend. Spidered URLs can later be queried using SQL to retrieve +@strong{MySQL} backend. Spidered URLs can later be queried using SQL to retrieve various kinds of information, eg. broken links. Written by Gabriele Bartolini. @item @uref{http://www.odbsoft.com/cook/sources.htm} @@ -46295,7 +46295,7 @@ Fixed problem when using @code{DECIMAL()} keys on negative numbers. always returned @code{NULL}. @item Fixed security bug in something (please upgrade if you are using a earlier -MySQL 3.23 version). +@strong{MySQL} 3.23 version). @item Fixed buffer overflow bug when writing a certain error message. @item From 458d62a0f38a8e75042693836690533cbb3e667c Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2001 18:16:20 -0500 Subject: [PATCH 24/33] README msyql -> mysql sql-bench/README: msyql -> mysql --- sql-bench/README | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql-bench/README b/sql-bench/README index 6096c5cc1e8..6b6a5fc95c0 100755 --- a/sql-bench/README +++ b/sql-bench/README @@ -11,7 +11,7 @@ In this directory are the queries and raw data files used to populate the MySQL benchmarks. In order to run the benchmarks you should normally execute a command like the following: -run-all-tests --server=msyql --cmp=mysql,pg,solid --user=test --password=test --log +run-all-tests --server=mysql --cmp=mysql,pg,solid --user=test --password=test --log The above means that one wants to run the benchmark with MySQL. The limits should be taken from all of mysql,PostgreSQL and Solid. Login name and From e47948041ac3dc470e0f01792d33cd3b1572fce6 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 10 Jun 2001 13:31:52 -0500 Subject: [PATCH 25/33] manual.texi HPUX -> HP-UX where necessary Docs/manual.texi: HPUX -> HP-UX where necessary --- Docs/manual.texi | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index 867d2d5d152..86335434eb7 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -8715,10 +8715,10 @@ the DCE libraries while you compile @code{gcc} 2.95! @node HP-UX 11.x, Mac OS X, HP-UX 10.20, Source install system issues @subsection HP-UX Version 11.x Notes -For HPUX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later. +For HP-UX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later. -Because of some critical bugs in the standard HPUX libraries, one should -install the following patches before trying to run @strong{MySQL} on HPUX 11.0: +Because of some critical bugs in the standard HP-UX libraries, you should +install the following patches before trying to run @strong{MySQL} on HP-UX 11.0: @example PHKL_22840 Streams cumulative @@ -8728,7 +8728,7 @@ PHNE_22397 ARPA cumulative This will solve a problem that one gets @code{EWOULDBLOCK} from @code{recv()} and @code{EBADF} from @code{accept()} in threaded applications. -If you are using @code{gcc} 2.95.1 on an unpatched HPUX 11.x system, +If you are using @code{gcc} 2.95.1 on an unpatched HP-UX 11.x system, you will get the error: @example @@ -8767,8 +8767,8 @@ After this, the following configure line should work: CFLAGS="-fomit-frame-pointer -O3 -fpic" CXX=gcc CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -O3" ./configure --prefix=/usr/local/mysql --disable-shared @end example -Here is some information that a HPUX Version 11.x user sent us about compiling -@strong{MySQL} with HPUX:x compiler: +Here is some information that a HP-UX Version 11.x user sent us about compiling +@strong{MySQL} with HP-UX:x compiler: @example Environment: @@ -33216,7 +33216,7 @@ binaries includes: @multitable @columnfractions .4 .3 .3 @item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB} @item AIX 4.3 @tab N @tab Y -@item HPUX 11.0 @tab N @tab Y +@item HP-UX 11.0 @tab N @tab Y @item Linux-Alpha @tab N @tab Y @item Linux-Intel @tab Y @tab Y @item Linux-Ia64 @tab N @tab Y @@ -45869,8 +45869,8 @@ that only had the open count wrong. @item Added functions to handle symbolic links to make life easier in 4.0. @item -We are now using the @code{-lcma} thread library on HPUX 10.20 to -get @strong{MySQL} more stable on HPUX. +We are now using the @code{-lcma} thread library on HP-UX 10.20 to +get @strong{MySQL} more stable on HP-UX. @item Fixed problem with @code{IF()} and number of decimals in the result. @item From 44b670f650dbcac6426f635caefeb15851614842 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 11 Jun 2001 12:23:30 +0300 Subject: [PATCH 26/33] Small fixes in the manual. BitKeeper/etc/ignore: Added sql-bench/compare-results-all sql-bench/template.html to the ignore list Docs/manual.texi: Changed innodb_unix_file_flush_method -> innodb_flush_method. --- .bzrignore | 2 ++ Docs/manual.texi | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.bzrignore b/.bzrignore index dbe41ffe92a..a5a65443413 100644 --- a/.bzrignore +++ b/.bzrignore @@ -284,3 +284,5 @@ support-files/mysql.spec tags tmp/* sql-bench/gif/* +sql-bench/compare-results-all +sql-bench/template.html diff --git a/Docs/manual.texi b/Docs/manual.texi index 76e84b4cdad..0a39e508579 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -25727,7 +25727,7 @@ in its own lock table and rolls back the transaction. If you use than InnoDB in the same transaction, then a deadlock may arise which InnoDB cannot notice. In cases like this the timeout is useful to resolve the situation. -@item @code{innodb_unix_file_flush_method} @tab +@item @code{innodb_flush_method} @tab (Available from 3.23.39 up.) The default value for this is @code{fdatasync}. Another option is @code{O_DSYNC}. @@ -26338,7 +26338,7 @@ In some versions of Linux and Unix, flushing files to disk with the Unix @code{fdatasync} and other similar methods is surprisingly slow. The default method InnoDB uses is the @code{fdatasync} function. If you are not satisfied with the database write performance, you may -try setting @code{innodb_unix_file_flush_method} in @file{my.cnf} +try setting @code{innodb_flush_method} in @file{my.cnf} to @code{O_DSYNC}, though O_DSYNC seems to be slower on most systems. You can also try setting it to @code{littlesync}, which means that InnoDB does not call the file flush for every write it does to a @@ -42520,6 +42520,9 @@ For more information on Object Oriented Programming @uref{http://language.perl.com/info/documentation.html} @end example +Note that if you want to use transactions with Perl, you need to have +@code{Msql-Mysql-modules} version 1.2216 or newer. + Installation instructions for @strong{MySQL} Perl support are given in @ref{Perl support}. @@ -45730,7 +45733,8 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}. @itemize @bullet @item -Added support for symbolic links to @code{MyISAM} tables. +Added support for symbolic links to @code{MyISAM} tables. Symlink handling is +now enabled by default for Windows. @item Added @code{SQL_CALC_FOUND_ROWS} and @code{FOUND_ROWS()}. This makes it possible to know how many rows a query would have returned From 765940cb3abccdbfe9e43f2c1116d1e96eb27ae9 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 11 Jun 2001 15:01:28 +0300 Subject: [PATCH 27/33] Changed compare in MyISAM to use my_pread() Fixed that @VAR shows all decimals Fixed problem with FLUSH TABLES and LOCK TABLE CREATE ... SELECT now creates keys later Reset AUTO_INCREMENT order if droping AUTO_INCREMENT key Docs/manual.texi: Added MySQL/PostgreSQL comparison isam/_dynrec.c: Fixed wrong compare (not used in MySQL) myisam/mi_dynrec.c: Fixed wrong compare (not used in MySQL). Changed compare to use my_pread() myisam/mi_packrec.c: cleanup mysql-test/r/alter_table.result: Added test for ALTER TABLE mysql-test/r/variables.result: Added test for variables with REAL values. mysql-test/t/alter_table.test: Added test for ALTER TABLE mysql-test/t/variables.test: Added test for variables with REAL values. mysys/my_pread.c: Cleanup sql-bench/Makefile.am: removed extra \ sql-bench/README: Fixed typo. sql/item_func.cc: Fixed that @VAR shows all decimals sql/share/swedish/errmsg.OLD: update sql/sql_base.cc: Fixed problem with FLUSH TABLES and LOCK TABLE sql/sql_insert.cc: CREATE ... SELECT now creates keys later sql/sql_table.cc: Reset AUTO_INCREMENT order if droping AUTO_INCREMENT key. BitKeeper/etc/ignore: Added sql-bench/graph-compare-results to the ignore list BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- .bzrignore | 1 + BitKeeper/etc/logging_ok | 2 +- Docs/manual.texi | 594 +++++++++++++++++++++++++++++--- isam/_dynrec.c | 2 +- myisam/mi_dynrec.c | 9 +- myisam/mi_packrec.c | 2 +- mysql-test/r/alter_table.result | 5 + mysql-test/r/variables.result | 4 +- mysql-test/t/alter_table.test | 11 + mysql-test/t/variables.test | 4 +- mysys/my_pread.c | 6 +- sql-bench/Makefile.am | 2 +- sql-bench/README | 2 +- sql/item_func.cc | 2 +- sql/share/swedish/errmsg.OLD | 3 + sql/sql_base.cc | 3 + sql/sql_insert.cc | 1 + sql/sql_table.cc | 9 + 18 files changed, 596 insertions(+), 66 deletions(-) diff --git a/.bzrignore b/.bzrignore index dbe41ffe92a..b160b186fba 100644 --- a/.bzrignore +++ b/.bzrignore @@ -284,3 +284,4 @@ support-files/mysql.spec tags tmp/* sql-bench/gif/* +sql-bench/graph-compare-results diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index e8deba03c8a..01a1261eae5 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1 +1 @@ -monty@hundin.mysql.fi +monty@tik.mysql.fi diff --git a/Docs/manual.texi b/Docs/manual.texi index 76e84b4cdad..22275637890 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -686,7 +686,7 @@ System/Compile Time and Startup Parameter Tuning * Compile and link options:: How compiling and linking affects the speed of MySQL * Disk issues:: Disk issues -* Symbolic links:: +* Symbolic links:: Using Symbolic Links * Server parameters:: Tuning server parameters * Table cache:: How MySQL opens and closes tables * Creating many tables:: Drawbacks of creating large numbers of tables in the same database @@ -952,6 +952,17 @@ How MySQL Compares to @code{mSQL} * Protocol differences:: How @code{mSQL} and @strong{MySQL} client/server communications protocols differ * Syntax differences:: How @code{mSQL} 2.0 SQL syntax differs from @strong{MySQL} +How MySQL Compares to PostgreSQL + +* MySQL-PostgreSQL goals:: +* MySQL-PostgreSQL features:: +* MySQL-PostgreSQL benchmarks:: + +MySQL and PostgreSQL development goals + +* MySQL-PostgreSQL features:: +* MySQL-PostgreSQL benchmarks:: + MySQL Internals * MySQL threads:: MySQL threads @@ -9000,7 +9011,7 @@ named pipe connections. You should use either @code{mysqld-nt} or @code{mysqld-max-nt}.) If @code{mysqld} doesn't start, please check whether or not the -@file{\mysql\mysql.err} file contains any reason for this. You can also +@file{\mysql\data\mysql.err} file contains any reason for this. You can also try to start the server with @code{mysqld --standalone}; In this case, you may get some useful information on the screen that may help solve the problem. @@ -9174,14 +9185,12 @@ server, you can do so using this command: C:\> mysqladmin --user=root --password=your_password shutdown @end example -If you are using the old shareware version of @strong{MySQL} Version 3.21 -under Windows, the above command will fail with an error: @code{parse error -near 'SET OPTION password'}. This is because the old shareware version, -which is based on @strong{MySQL} Version 3.21, doesn't have the -@code{SET PASSWORD} command. The fix is in this case to upgrade to -the Version 3.22 shareware. +If you are using the old shareware version of @strong{MySQL} Version +3.21 under Windows, the above command will fail with an error: +@code{parse error near 'SET OPTION password'}. The fix is in to upgrade +to the current @strong{MySQL} version, which is freely available. -With the newer @strong{MySQL} versions you can easily add new users +With the current @strong{MySQL} versions you can easily add new users and change privileges with @code{GRANT} and @code{REVOKE} commands. @xref{GRANT}. @@ -9250,7 +9259,7 @@ Note that the symbolic link will be used only if the directory For example, if the @strong{MySQL} data directory is @file{C:\mysql\data} and you want to have database @code{foo} located at @file{D:\data\foo}, you should create the file @file{C:\mysql\data\foo.sym} that contains the -text @code{D:\data\foo}. After that, all tables created in the database +text @code{D:\data\foo\}. After that, all tables created in the database @code{foo} will be created in @file{D:\data\foo}. Note that because of the speed penalty you get when opening every table, @@ -10066,7 +10075,7 @@ correctly, check the log file to see if you can find out why. Log files are located in the data directory (typically @file{/usr/local/mysql/data} for a binary distribution, @file{/usr/local/var} for a source distribution, -@file{\mysql\mysql.err} on Windows.) Look in the data directory for +@file{\mysql\data\mysql.err} on Windows.) Look in the data directory for files with names of the form @file{host_name.err} and @file{host_name.log} where @code{host_name} is the name of your server host. Then check the last few lines of these files: @@ -10611,7 +10620,7 @@ interactive-timeout @tindex .my.cnf file If you have a source distribution, you will find sample configuration files named @file{my-xxxx.cnf} in the @file{support-files} directory. -If you have a binary distribution, look in the @file{DIR/share/mysql} +If you have a binary distribution, look in the @file{DIR/support-files} directory, where @code{DIR} is the pathname to the @strong{MySQL} installation directory (typically @file{/usr/local/mysql}). Currently there are sample configuration files for small, medium, large, and very @@ -19563,6 +19572,11 @@ sequence number by executing @code{SET INSERT_ID=#} before @code{ALTER TABLE} or using the @code{AUTO_INCREMENT = #} table option. @xref{SET OPTION}. +With MyISAM tables, if you don't change the @code{AUTO_INCREMENT} +column, the sequence number will not be affected. If you drop an +@code{AUTO_INCREMENT} column and then add another @code{AUTO_INCREMENT} +column, the numbers will start from 1 again. + @xref{ALTER TABLE problems}. @findex RENAME TABLE @@ -39430,7 +39444,7 @@ switch to a new log) by executing @code{FLUSH LOGS}. @xref{FLUSH}. @code{mysqld} writes all errors to the stderr, which the @code{safe_mysqld} script redirects to a file called @code{'hostname'.err}. (On Windows, @code{mysqld} writes this directly -to @file{mysql.err}). +to @file{\mysql\data\mysql.err}). This contains information indicating when @code{mysqld} was started and stopped and also any critical errors found when running. If @code{mysqld} @@ -43585,53 +43599,516 @@ users. @item @end table -@cindex PostgreSQL, comparison +@cindex PostgreSQL/MySQL, overview @node Compare PostgreSQL, , Compare mSQL, Comparisons @section How MySQL Compares to PostgreSQL +When reading the following, please note that both products are +continually evolving. We at @strong{MySQL AB} and the PostgreSQL +developers are both working on making our respective database as good as +possible, so we are both a serious choice to any commercial database. + +The following comparison is made by us at MySQL AB. We have tried to be +as accurate and fair as possible, but because we don't have a full +knowledge of all PostgreSQL features while we know MySQL througly, we +may have got some things wrong. We will however correct these when they +come to our attention. + We would first like to note that @code{PostgreSQL} and @strong{MySQL} -are both widely used products, but their design goals are completely -different. This means that for some applications @strong{MySQL} is more -suitable and for others @code{PostgreSQL} is more suitable. When -choosing which database to use, you should first check if the database's -feature set is good enough to satisfy your application. If you need -speed, @strong{MySQL} is probably your best choice. If you need some -of the extra features that @code{PostgreSQL} can offer, you should use +are both widely used products, but with different design goals, even if +we are both striving to be ANSI SQL compatible. This means that for +some applications @strong{MySQL} is more suitable and for others +@code{PostgreSQL} is more suitable. When choosing which database to +use, you should first check if the database's feature set satisfies your +application. If you need speed, @strong{MySQL} is probably your best +choice. If you need some of the extra features that only @code{PostgreSQL} +can offer, you should use @code{PostgreSQL}. + +@cindex PostgreSQL/MySQL, goals +@menu +* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development goals +* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL +* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL +@end menu + +@node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL +@subsection MySQL and PostgreSQL development goals + +When adding things to MySQL we take pride to do an optimal, definite +solution. The code should be so good that we shouldn't have any need to +change it in the foreseeable future. We also do not like to sacrifice +speed for features but instead will do our utmost to find a solution +that will give maximal throughput. This means that development will take +a little longer, but the end result will be well worth this. This kind +of development is only possible because all server code are checked by +one of a few (currently two) persons before it's included in the +@strong{MySQL} server. + +We at MySQL AB believe in frequent releases to be able to push out new +features quickly to our users. Because of this we do a new small release +about every 3 weeks, which a major branch every year. All releases are +throughly tested with our testing tools on a lot of different platforms. + +PostgreSQL is based on a kernel with lots of contributors. In this setup +it makes sense to prioritize adding a lot of new features, instead of +implementing them optimally, because one can always optimize things +later if there arises a need for this. + +Another big difference between @strong{MySQL} and PostgreSQL is that +nearly all of the code in the MySQL server are coded by developers that +are employed by MySQL AB and are still working on the server code. The +exceptions are the transaction engines and the regexp library. + +This is in sharp contrast to the PostgreSQL code where the majority of +the code is coded by a big group of people with different backgrounds. +It was only recently that the PostgreSQL developers announced that they +current developer group had finally had time to take a look at all +the code in the current PostgreSQL release. + +Both of the above development methods has it's own merits and drawbacks. +We here at @strong{MySQL AB} think of course that our model is better +because our model gives better code consistence, more optimal and +reusable code and, in our opinion, fewer bugs. Because we are the +authors of the @strong{MySQL} server code we are better able to +coordinate new features and releases. + +@cindex PostgreSQL/MySQL, features +@node MySQL-PostgreSQL features, MySQL-PostgreSQL benchmarks, MySQL-PostgreSQL goals, Compare PostgreSQL +@subsection Featurevise Comparison of MySQL and PostgreSQL + +On the @uref{http://www.mysql.com/information/crash-me.php, crash-me} +page you can find a list of those database constructs and limits that +one can detect automatically with a program. Note however that a lot of +the numerical limits may be changed with startup options for respective +database. The above web page is however extremely useful when you want to +ensure that your applications works with many different databases or +when you want to convert your application from one datbase to another. + +@strong{MySQL} offers the following advantages over PostgreSQL: + +@itemize @bullet +@item +@code{MySQL} is generally much faster than PostgreSQL. +@xref{MySQL-PostgreSQL benchmarks}. +@item +Because @strong{MySQL} has a much larger user base than PostgreSQL the +code is more tested and has historically been more stable than +PostgreSQL. @strong{MySQL} is the much more used in production +environments than PostgreSQL, mostly thanks to that @strong{MySQL AB}, +former TCX DataKonsult AB, has provided top quality commercial support +for @strong{MySQL} from the day it was released, whereas until recently +PostgreSQL was unsupported. +@item +@strong{MySQL} works on more platforms than PostgreSQL. @xref{Which OS}. +@item +@strong{MySQL} works better on Windows; @strong{MySQL} is running as a +native windows application (a service on NT/Win2000/WinXP), while +PostgreSQL is run under the cygwin emulation. We have heard that +PostgreSQL is not yet that stable on windows but we haven't been able to +verify this ourselves. +@item +@strong{MySQL} has more API to other languages and is supported by more +programs than PostgreSQL. @xref{Contrib}. +@item +@strong{MySQL} works on 24/7 heavy duty systems. In most circumstances +you never have to run any cleanups on @code{MySQL}. PostgreSQL doesn't +yet support 24/7 systems because you have have to run @code{vacuum()} +once in a while to reclaim space from @code{UPDATE} and @code{DELETE} +commands and to perform statistics analyzes that are critical to get +good performance with PostgreSQL. On a busy system with lots of changes +vacuum must be run very frequently, in the worst cases even many times a +day. During the @code{vacuum()} run, which may take hours if the +database is big, the database is from a production standpoint +practically dead. The PostgreSQL team has fixing this on their TODO, +but we assume that this is not an easy thing to fix permanently. +@item +A working, tested replication feature used by sites like +@uref{http://finance.yahoo.com, Yahoo finance}, +@uref{http://www.mobile.de/,mobile.de} and +@uref{http://www.slashdot.org,Slashdot}. +@item +Included in the @strong{MySQL} distribution is included two different +testing suits (@file{mysql-test-run} and +@uref{http://www.mysql.com/information/crash-me.php,crash-me}) and a +benchmark suite. The test system is actively updated with code to test +each new feature and almost all repeatable bugs that comes to our +attention. We test @strong{MySQL} with these on a lot of platforms +before every release. These tests are more sofisticated than anything +have seen from PostgreSQL and ensures that the @strong{MySQL} code keeps +at a high standard. +@item +There are far moore books in print on @strong{MySQL} than on PostgreSQL. +O'Reilly, Sams, Que, and New Riders are all major publishers with books +about MySQL. All @strong{MySQL} features is also documented in the +@strong{MySQL} on-line manual because when a feature is implemented, the +@strong{MySQL} developers are required to document it before it's +included in the source. +@item +@strong{MySQL} has supports more of the standard ODBC functions than @code{PostgreSQL}. +@item +@strong{MySQL} has a much more sophisticated @code{ALTER TABLE}. +@item +@strong{MySQL} has support for tables without transactions for +applications that need all speed they can get. The tables may be memory +based,@code{HEAP} tables or disk based @code{MyISAM}. @xref{Table types}. +@item +@strong{MySQL} has support for 3 different table handles that support +transactions (@code{BDB}, @code{InnoDB} and @code{Gemini}. Because +every transaction engine performs differently under different +conditions, this gives the application writer more options to find an +optimal solution for his/her setup. @xref{Table types}. +@item +@code{MERGE} tables gives you a unique way to instantly make a view over +a set of identical tables and use these as one. This is perfectly for +systems where you have log files that you order for example by month. +@xref{MERGE}. +@item +The option to compress read-only tables, but still have direct access to +the rows in the table, gives you better performance by minimizing disk +reads. This is very useful when you are archiving +things.@xref{myisampack}. +@item +@strong{MySQL} has internal support for text search. @xref{Fulltext Search}. +@item +You can access many databases from the same connection (depending of course +on your privileges). +@item +@strong{MySQL} is coded from the start with multi-threading while +PostgreSQL uses processes. Because context switching and access to +common storage areas is much faster between threads, than are separate +processes, this gives @strong{MySQL} a big speed advantage in multi-user +applications and also makes it easier for @strong{MySQL} to take full +advantage of symmetric multiprocessor systems (SMP). +@item +@strong{MySQL} has a much more sophisticated privilege system than +PostgreSQL. While PostgreSQL only supports @code{INSERT}, +@code{SELECT}, @code{update/delete} grants per user on a database or a +table @strong{MySQL} allows you to define a full set of different +privileges on database, table and columns level. @strong{MySQL} also allows +you to specify the privilege on host+user combinations. @xref{GRANT}. +@item +@strong{MySQL} supports a compressed server/client protocol which +improves performance over slow links. +@item +@strong{MySQL} employs the table handler concept and is the only +relational database we know of built around this concept. This allows +different low level table types to be swapped into the SQL engine, each +table type optimized for a different performance characteristics. +@item +All @code{MySQL} table types (except @strong{InnoDB}) are implemented as +files (ie: one table per file), which makes it really easy to backup, +move, delete and even symlink databases and tables when the server is +down. +@item +Tools to repair and optimize @strong{MyISAM} tables (the most common +@strong{MySQL} table type). A repair tool is only needed when a +physical corruption of a data file happens, usually from a hardware +failure. It allows a majority of the data to be recovered. +@item +Upgrading @strong{MySQL} is painless. When you upgrading @strong{MySQL}, +you don't need to dump/restore your data, as you have to do with most +PostgreSQL upgrades. +@end itemize -@code{PostgreSQL} has some more advanced features like user-defined -types, triggers, rules, and some transaction support (currently it -has about the same semantics as @strong{MySQL}'s transactions in that the -transaction is not 100% atomic). However, PostgreSQL lacks many of the -standard types and functions from ANSI SQL and ODBC. See the @code{crash-me} -Web page (@uref{http://www.mysql.com/information/crash-me.php}) for a complete -list of limits and which types and functions are supported or unsupported. +Drawbacks with @strong{MySQL} compared to PostgreSQL: -Normally, @code{PostgreSQL} is a magnitude slower than @strong{MySQL}. -@xref{Benchmarks}. This is due largely to the fact that they have only -transaction-safe tables and that their transactions system is not as -sophisticated as Berkeley DB's. In @strong{MySQL} you can decide per -table if you want the table to be fast or take the speed penalty of -making it transaction-safe. +@itemize @bullet +@item +The transaction support in @strong{MySQL} is not yet as well tested as +PostgreSQL's system. +@item +Because @strong{MySQL} uses threads, which are still a moving target on +many OS, one must either use binaries from +@uref{http://www.mysql.com/downloads} or carefully follow our +instructions on +@uref{http://www.mysql.com/doc/I/n/Installing_source.html} to get an +optimal binary that works in all cases. +@item +Table locking, as used by the non-transactional @code{MyISAM} tables, is +in many cases faster than page locks, row locks or versioning. The +drawback however is that if one doesn't take into account how table +locks works, a single long-running query can block a table for updates +for a long time. This can usable be avoided when designing the +application. If not, one can always switch the trouble table to use one +of the transactional table types. @xref{Table locking}. +@item +With UDF (user defined functions) one can extend @strong{MySQL} with +both normal SQL functions and aggregates, but this is not as easy or as +flexible as in PostgreSQL. @xref{Adding functions}. +@item +Updates and deletes that goes over multiple tables is harder to do in +@strong{MySQL}. (Will be fixed in @strong{MySQL} 4.0 with multi-table +@code{DELETE} and multi-table @code{UPDATE} and in @strong{MySQL} 4.1 +with @code{SUB-SELECT}) +@end itemize -The most important things that @code{PostgreSQL} supports that @strong{MySQL} -doesn't yet support: +PostgreSQL offers currently the following advantages over @strong{MySQL}: -@table @code -@item Sub select -@item Foreign keys -@item Stored procedures -@item An extendable type system. -@item A way to extend the SQL to handle new key types (like R-trees) -@end table +Note that because we know the @strong{MySQL} road map, we have included +in the following table the version when @strong{MySQL} should support +this feature. Unfortunately we couldn't do this for previous comparison, +because we don't know the PostgreSQL roadmap. -@strong{MySQL}, on the other hand, supports many ANSI SQL constructs -that @code{PostgreSQL} doesn't support. Most of these can be found at the -@uref{http://www.mysql.com/information/crash-me.php, @code{crash-me} Web page}. +@multitable @columnfractions .70 .30 +@item @strong{Feature} @tab @strong{MySQL version} +@item Subselects @tab 4.1 +@item Foreign keys @tab 4.0 and 4.1 +@item Views. @tab 4.2 +@item Stored procedures in multiple languages @tab 4.1 +@item Extensible type system. @tab Not planed +@item Unions @tab 4.0. +@item Full join. @tab 4.0 or 4.1. +@item Triggers. @tab 4.1 +@item Constrainst @tab 4.1 +@item Cursors @tab 4.1 or 4.2 +@item Extensible index types like R-trees @tab R-trees are planned to 4.2 +@item Inherited tables @tab Not planned +@end multitable -If you really need the rich type system @code{PostgreSQL} offers and you -can afford the speed penalty of having to do everything transaction -safe, you should take a look at @code{PostgreSQL}. +Other reasons to use PostgreSQL: + +@itemize @bullet +@item +Standard usage is in PostgreSQL closer to ANSI SQL in some cases. +@item +One can get speed up PostgreSQL by coding things as stored procedures. +@item +Bigger team of developers that contributes to the server. +@end itemize + +Drawbacks with PostgreSQL compared to @strong{MySQL}: + +@itemize @bullet +@item +@code{Vaccum()} makes PostgreSQL hard to use in a 24/7 environment. +@item +Only transactional tables. +@item +Much slower insert/delete/update. +@end itemize + +For a complete list of drawbacks, you should also examine the first table +in this section. + +@cindex PostgreSQL/MySQL, benchmarks +@node MySQL-PostgreSQL benchmarks, , MySQL-PostgreSQL features, Compare PostgreSQL +@subsection Benchmarking MySQL and PostgreSQL + +The only open source benchmark, that we know of, that can be used to +benchmark @strong{MySQL} and PostgreSQL (and other databases) is our +own. It can be found at: +@uref{http://www.mysql.com/information/benchmarks.html}. + +We have many times asked the PostgreSQL developers and some PostgreSQL +users to help us extend this benchmark to make the definitive benchmark +for databases, but unfortunately we haven't got any feedback for this. + +We, the @strong{MySQL} developers, has because of this spent a lot of +hours to get maximum performance from PostgreSQL for the benchmarks, but +because we don't know PostgreSQL intimately we are sure that there are +things that we have missed. We have on the benchmark page documented +exactly how we did run the benchmark so that it should be easy for +anyone to repeat and verify our results. + +The benchmarks are usually run with and without the @code{--fast} +option. When run with @code{--fast} we are trying to use every trick +the server can do to get the code to execute as fast as possible. +The idea is that the normal run should show how the server would work in +a default setup and the @code{--fast} run shows how the server would do +if the application developer would use extensions in the server to make +his application run faster. + +When running with PostgreSQL and @code{--fast} we do a @code{vacuum()} +between after every major table update and drop table to make the database +in perfect shape for the following selects. The time for vacuum() is +measured separately. + +When running with PostgreSQL 7.1.1 we could however not run with +@code{--fast} because during the insert test, the postmaster (the +PostgreSQL deamon) died and the database was so corrupted that it was +impossible to restart postmaster. (The details about the machine we run +the benchmark can be found on the benchmark page). After this happened +twice, we decided to postpone the @code{--fast} test until next +PostgreSQL release. + +Before going to the other benchmarks we know of, We would like to give +some background to benchmarks: + +It's very easy to write a test that shows ANY database to be best +database in the world, by just restricting the test to something the +database is very good at and not test anything that the database is not +good at; If one after this publish the result with a single figure +things is even easier. + +This would be like we would measure the speed of @strong{MySQL} compared +to PostgreSQL by looking at the summary time of the MySQL benchmarks on +our web page. Based on this @strong{MySQL} would be more than 40 times +faster than PostgreSQL, something that is of course not true. We could +make things even worse by just taking the test where PostgreSQL performs +worst and claim that @strong{MySQL} is more than 2000 times faster than +PostgreSQL. + +The case is that @strong{MySQL} does a lot of optimizations that +PostgreSQL doesn't do and the other way around. An SQL optimizer is a +very complex thing and a company could spend years on just making the +optimizer faster and faster. + +When looking at the benchmark results you should look for things that +you do in your application and just use these results to decide which +database would be best suited for your application. The benchmark +results also shows things a particular database is not good at and should +give you a notion about things to avoid and what you may have to do in +other ways. + +We know of two benchmark tests that claims that PostgreSQL performers +better than @strong{MySQL}. These both where multi-user tests, a test +that we here at @strong{MySQL AB} haven't had time to write and include in +the benchmark suite, mainly because it's a big task to do this in a +manner that is fair against all databases. + +One is the benchmark paid for by +@uref{http://www.greatbridge.com/about/press.php?content_id=4,Great +Bridge}. + +This is the worst benchmark we have ever seen anyone ever conduct. This +was not only tuned to only test what PostgreSQL is absolutely best at, +it was also totally unfair against every other database involved in the +test. + +@strong{NOTE}: We know that not even some of the main PostgreSQL +developers did like the way Great Bridge conducted the benchmark, so we +don't blame them for the way the benchmark was made. + +This benchmark has been condemned in a lot of postings and newsgroups so +we will here just shortly repeat some things that where wrong with it. + +@itemize @bullet +@item +The tests where run with an expensive commercial tool, that makes it +impossible for an open source company like us to verify the benchmarks, +or even check how the benchmark where really done. The tool is not even +a true benchmark tool, but a application/setup testing tool. To refer +this as STANDARD benchmark tool is to stretch the truth a long way. +@item +Great Bridge admitted that they had optimized the PostgreSQL database +(with vacuum() before the test) and tuned the startup for the tests, +something they hadn't done for any of the other databases involved. To +say "This process optimizes indexes and frees up disk space a bit. The +optimized indexes boost performance by some margin". Our benchmarks +clearly indicates that the difference in running a lot of selects on a +database with and without vacuum() can easily differ by a factor of 10. +@item +The test results where also strange; The ASPAP3 test benchmark +documentation mentions that the test does: + +"selections, simple joins, projections, aggregates, one-tuple updates, +and bulk updates" + +PostgreSQL is good at doing selects and joins (especially after a +vacuum()), but doesn't perform as well on inserts/updates; The +benchmarks seem to indicate that only SELECTs where done (or very few +updates) . This could easily explain they good results for PostgreSQL in +this test. The bad results for MySQL will be obvious a bit down in this +document. +@item +They did run the so called benchmark from a Windows machine against a +Linux machine over ODBC, a setup that no normal database user would ever +do when running a heavy multi-user application. This tested more the +ODBC driver and the Windows protocol used between the clients than the +database itself. +@item +When running the database against Oracle and MS-SQL (Great Bridge has +indirectly indicated that the databases they used in the test), +they didn't use the native protocol but instead ODBC. Anyone that has +ever used Oracle, knows that all real application uses the native +interface instead of ODBC. Doing a test through ODBC and claiming that +the results had anything to do with using the database for real can't +be regarded as fair play. They should have done two tests with and +without ODBC to provide the right facts (after having got experts to tune +all involved databases of course). +@item +They refer to the TCP-C tests, but doesn't anywhere mention that the +tests they did where not a true TCP-C test and they where not even +allowed to call it a TCP-C test. A TCP-C test can only be conducted by +the rules approved by the @uref{http://www.tpc.org,TCP-council}. Great +Bridge didn't do that. By doing this they have both violated the TCP +trademark and miscredited their own benchmarks. The rules set by the +TCP-council are very strict to ensure that no one can produce false +results or make unprovable statements. Apparently Great Bridge wasn't +interested in doing this. +@item +After the first test, we contacted Great Bridge and mentioned to them +some of the obvious mistakes they had done with @strong{MySQL}; Running +with a debug version of our ODBC driver, running on a Linux system that +wasn't optimized for threads, using an old MySQL version when there was +a recommended newer one available, not starting @strong{MySQL} with the +right options for heavy multi-user use (the default installation of +MySQL is tuned for minimal resource use). Great Bridge did run a new +test, with our optimized ODBC driver and with better startup options for +MySQL, but refused to either use our updated glibc library or our +standard binary (used by 80% of our users), which was statically linked +with a fixed glibc library. + +According to what we know, Great Bridge did nothing to ensure that the +other databases was setup correctly to run good in their test +environment. We are sure however that they didn't contact Oracle or +Microsoft to ask for their advice in this matter ;) +@item +The benchmark was paid for by Great Bridge, and they decided to publish +only partial chosen results (instead of publishing it all). +@end itemize + +Tim Perdue, a long time PostgreSQL fan and a reluctant MySQL user +published a comparison on +@uref{http://www.phpbuilder.com/columns/tim20001112.php3,phpbuider}. + +When we got aware of the comparison, we phoned Tim Perdue about this +because there was a lot of strange things in his results. For example, +he claimed that MySQL had a problem with five users in his tests, when we +know that there are users with similar machines as his that are using +MySQL with 2000 simultaneous connections doing 400 queries per second (In +this case the limit was the web bandwidth, not the database). + +It sounded like he was using a Linux kernel that either had some +problems with many threads (Linux kernels before 2.4 had a problem with +this but we have documented how to fix this and Tim should be aware of +this problem). The other possible problem could have been an old glibc +library and that Tim didn't use a MySQL binary from our site, which is +linked with a corrected glibc library, but had compiled a version of his +own with. In any of the above cases, the symptom would have been exactly +what Tim had measured. + +We asked Tim if we could get access to his data so that we could repeat +the benchmark and if he could check the MySQL version on the machine to +find out what was wrong and he promised to come back to us about this. +He has not done that yet. + +Because of this we can't put any trust in this benchmark either :( + +Conclusion: + +The only benchmarks that exist today that anyone can download and run +against @strong{MySQL}and PostgreSQL is the MySQL benchmarks. We here +at @strong{MySQL} believe that open source databases should be tested +with open source tools! This is the only way to ensure that no one +does tests that none can reproduce and use this to claim that a +database is better than another. Without knowing all the facts it's +impossible to answer the claims of the tester. + +The thing we find strange is that every test we have seen about +PostgreSQL, that is impossible to reproduce, claims that PostgreSQL is +better in most cases while our tests, which anyone can reproduce, +clearly shows otherwise. With this we don't want to say that PostgreSQL +isn't good at many things (It is!) We would just like to see a fair test +where they are very good so that we could get some friendly competition +going! + +For more information about our benchmarks suite see @xref{MySQL +Benchmarks}. + +We are working on even better benchmarks including much better +documentation (the current is lacking). @cindex internals @cindex threads @@ -44863,7 +45340,7 @@ html templates. By Alex Krohn. This cgi scripts in Perl enables you to edit content of Mysql database. By Tomas Zeman. @item -@uref{http://futurerealm.com/opensource/futuresql.htm, FutureSQL Web Database Administration Tool}. +@uref{http://worldcommunity.com/opensource/futuresql, FutureSQL Web Database Administration Tool}. FutureSQL by Peter F. Brown, is a free, open source rapid application development Web database administration tool, written in Perl, using @strong{MySQL}. It uses @code{DBI:DBD} and @code{CGI.pm}. @@ -45122,6 +45599,10 @@ Patches for @code{radiusd} to make it support @strong{MySQL}. By Wim Bonis, @appendixsec Useful Tools @itemize @bullet +@item @uref{http://worldcommunity.com/opensource/utilities/mysql_backup.html, MySQL Backup}. + +A backup script for MySQL. By Peter F. Brown. + @item @uref{http://www.mysql.com/Downloads/Contrib/mytop, mytop} @item @uref{http://public.yahoo.com/~jzawodn/mytop/, mytop home page} mytop is a Perl program that allows you to monitor @strong{MySQL} servers by @@ -45839,6 +46320,15 @@ not yet 100% confident in this code. @appendixsubsec Changes in release 3.23.39 @itemize @bullet @item +If one dropped and added an @code{auto_increment} column, the +@code{auto_increment} value wasn't reset. +@item +Fixed problem where @code{LOCK TABLES table_name READ} followed by +@code{FLUSH TABLES} put a exclusive lock on the table. +@item +@code{REAL} @@variables with was represented with 2 digits when +converted to strings. +@item Fixed problem that client 'hung' when @code{LOAD TABLE FROM MASTER} failed. @item Running @code{myisamchk --fast --force} will no longer repair tables @@ -51095,6 +51585,10 @@ Everything in this list is approximately in the order it will be done. If you want to affect the priority order, please register a license or support us and tell us what you want to have done more quickly. @xref{Licensing and Support}. +The plan is that we in the future will support the full ANSI SQL99 +standard, but with a lot of useful extensions. The challenge is to do +this without sacrifying the speed or compromise the code. + @node TODO MySQL 4.0, TODO future, TODO, TODO @appendixsec Things that should be in 4.0 diff --git a/isam/_dynrec.c b/isam/_dynrec.c index 42a596fa623..2a908f5b42c 100644 --- a/isam/_dynrec.c +++ b/isam/_dynrec.c @@ -954,7 +954,7 @@ static int _nisam_cmp_buffer(File file, const byte *buff, ulong filepos, uint le { if (my_read(file,temp_buff,next_length,MYF(MY_NABP))) goto err; - if (memcmp((byte*) buff,temp_buff,IO_SIZE)) + if (memcmp((byte*) buff,temp_buff,next_length)) DBUG_RETURN(1); buff+=next_length; length-= next_length; diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c index 4c05f6c737d..e090498f3fe 100644 --- a/myisam/mi_dynrec.c +++ b/myisam/mi_dynrec.c @@ -1221,20 +1221,19 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos, char temp_buff[IO_SIZE*2]; DBUG_ENTER("_mi_cmp_buffer"); - VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0))); next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1)); while (length > IO_SIZE*2) { - if (my_read(file,temp_buff,next_length,MYF(MY_NABP))) + if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) || + memcmp((byte*) buff,temp_buff,next_length)) goto err; - if (memcmp((byte*) buff,temp_buff,IO_SIZE)) - DBUG_RETURN(1); + filepos+=next_length; buff+=next_length; length-= next_length; next_length=IO_SIZE*2; } - if (my_read(file,temp_buff,length,MYF(MY_NABP))) + if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP))) goto err; DBUG_RETURN(memcmp((byte*) buff,temp_buff,length)); err: diff --git a/myisam/mi_packrec.c b/myisam/mi_packrec.c index b6a9435ee3d..be7f9dcae0a 100644 --- a/myisam/mi_packrec.c +++ b/myisam/mi_packrec.c @@ -1010,7 +1010,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BLOCK_INFO *info, File file, { ref_length=myisam->s->pack.ref_length; /* - We can't use my_pread() here because mi_rad_pack_record assumes + We can't use my_pread() here because mi_read_rnd_pack_record assumes position is ok */ VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0))); diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result index 4f12f71c7ce..dbdbb7f57a9 100644 --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@ -27,3 +27,8 @@ n 12 Table Op Msg_type Msg_text test.t1 optimize status OK +i +1 +2 +3 +4 diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index ee04e437bb7..f852378e6a1 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -1,7 +1,7 @@ @test @`select` @TEST @not_used 1 2 3 NULL @test_int @test_double @test_string @test_string2 @select -10 0.00 abcdeghi abcdefghij NULL +10 1e-10 abcdeghi abcdefghij NULL @test_int @test_double @test_string @test_string2 hello hello hello hello @test_int @test_double @test_string @test_string2 @@ -10,3 +10,5 @@ hellohello hellohello hellohello hellohello NULL NULL NULL NULL @t1:=(@t2:=1)+@t3:=4 @t1 @t2 @t3 5 5 1 4 +@t5 +1.23456 diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test index da98240c2cf..dbfbd4267d8 100644 --- a/mysql-test/t/alter_table.test +++ b/mysql-test/t/alter_table.test @@ -71,3 +71,14 @@ ALTER TABLE t1 ADD Column new_col int not null; UNLOCK TABLES; OPTIMIZE TABLE t1; DROP TABLE t1; + +# +# Drop and add an auto_increment column +# + +create table t1 (i int unsigned not null auto_increment primary key); +insert into t1 values (null),(null),(null),(null); +alter table t1 drop i,add i int unsigned not null auto_increment, drop primary key, add primary key (i); +select * from t1; +drop table t1; + diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index 1067559b759..d5ff64d199b 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -1,7 +1,7 @@ # # test variables # -set @`test`=1,@TEST=3,@select=2; +set @`test`=1,@TEST=3,@select=2,@t5=1.23456; select @test,@`select`,@TEST,@not_used; set @test_int=10,@test_double=1e-10,@test_string="abcdeghi",@test_string2="abcdefghij",@select=NULL; select @test_int,@test_double,@test_string,@test_string2,@select; @@ -12,3 +12,5 @@ select @test_int,@test_double,@test_string,@test_string2; set @test_int=null,@test_double=null,@test_string=null,@test_string2=null; select @test_int,@test_double,@test_string,@test_string2; select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3; +select @t5; + diff --git a/mysys/my_pread.c b/mysys/my_pread.c index 4e0de71bcf5..5c7d0be5854 100644 --- a/mysys/my_pread.c +++ b/mysys/my_pread.c @@ -66,11 +66,11 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, my_filename(Filedes),my_errno); } if ((int) readbytes == -1 || (MyFlags & (MY_FNABP | MY_NABP))) - DBUG_RETURN(MY_FILE_ERROR); /* Return with error */ + DBUG_RETURN(MY_FILE_ERROR); /* Return with error */ } if (MyFlags & (MY_NABP | MY_FNABP)) - DBUG_RETURN(0); /* Ok vid l{sning */ - DBUG_RETURN(readbytes); /* purecov: inspected */ + DBUG_RETURN(0); /* Read went ok; Return 0 */ + DBUG_RETURN(readbytes); /* purecov: inspected */ } } /* my_pread */ diff --git a/sql-bench/Makefile.am b/sql-bench/Makefile.am index 9de8da5c189..673a36852e9 100644 --- a/sql-bench/Makefile.am +++ b/sql-bench/Makefile.am @@ -23,7 +23,7 @@ bench_SCRIPTS = test-ATIS test-connect test-create test-insert \ test-big-tables test-select test-wisconsin \ test-alter-table graph-compare-results \ bench-init.pl compare-results run-all-tests \ - server-cfg crash-me copy-db \ + server-cfg crash-me copy-db CLEANFILES = $(bench_SCRIPTS) EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \ test-insert.sh test-big-tables.sh test-select.sh \ diff --git a/sql-bench/README b/sql-bench/README index 6096c5cc1e8..6b6a5fc95c0 100755 --- a/sql-bench/README +++ b/sql-bench/README @@ -11,7 +11,7 @@ In this directory are the queries and raw data files used to populate the MySQL benchmarks. In order to run the benchmarks you should normally execute a command like the following: -run-all-tests --server=msyql --cmp=mysql,pg,solid --user=test --password=test --log +run-all-tests --server=mysql --cmp=mysql,pg,solid --user=test --password=test --log The above means that one wants to run the benchmark with MySQL. The limits should be taken from all of mysql,PostgreSQL and Solid. Login name and diff --git a/sql/item_func.cc b/sql/item_func.cc index 84bc972608e..8a2bd15ae6d 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1758,7 +1758,7 @@ Item_func_get_user_var::val_str(String *str) return NULL; switch (entry->type) { case REAL_RESULT: - str->set(*(double*) entry->value); + str->set(*(double*) entry->value,decimals); break; case INT_RESULT: str->set(*(longlong*) entry->value); diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD index 672ce97c575..fc26a08e9ee 100644 --- a/sql/share/swedish/errmsg.OLD +++ b/sql/share/swedish/errmsg.OLD @@ -206,3 +206,6 @@ "Kunde inte starta en tråd för replikering", "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar", "Du kan endast använda konstant-uttryck med SET", +"Lock wait timeout exceeded", +"The total number of locks exceeds the lock table size", +"Update locks cannot be acquired during a READ UNCOMMITTED transaction", diff --git a/sql/sql_base.cc b/sql/sql_base.cc index d9470ee0b59..d068dd500bc 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -389,6 +389,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, thd->in_lock_tables=1; result=reopen_tables(thd,1,1); thd->in_lock_tables=0; + /* Set version for table */ + for (TABLE *table=thd->open_tables; table ; table=table->next) + table->version=refresh_version; } VOID(pthread_mutex_unlock(&LOCK_open)); if (if_wait_for_refresh) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index cfd16df5d17..14f4a732eac 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1362,6 +1362,7 @@ select_create::prepare(List &values) if (info.handle_duplicates == DUP_IGNORE || info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + table->file->deactivate_non_unique_index((ha_rows) 0); DBUG_RETURN(0); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 3fa2bc5d9d3..207f9dd324d 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1233,7 +1233,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (drop->type == Alter_drop::COLUMN && !my_strcasecmp(field->field_name, drop->name)) + { + /* Reset auto_increment value if it was dropped */ + if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER && + !(create_info->used_fields & HA_CREATE_USED_AUTO)) + { + create_info->auto_increment_value=0; + create_info->used_fields|=HA_CREATE_USED_AUTO; + } break; + } } if (drop) { From 54fe14e2f6d4249b4317dd8e9d4473fb665a468c Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 11 Jun 2001 20:26:24 -0600 Subject: [PATCH 28/33] send a separate commit message to internals added a test for check table BitKeeper/triggers/post-commit: send a separate commit message to internals BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + BitKeeper/triggers/post-commit | 19 ++++++++++++++++++- mysql-test/r/check.result | 2 ++ mysql-test/t/check.test | 18 ++++++++++++++++++ 4 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 mysql-test/r/check.result create mode 100644 mysql-test/t/check.test diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index d5d02afc3d1..2f120a649dc 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -4,3 +4,4 @@ monty@tik.mysql.fi mwagner@evoq.mwagner.org paul@teton.kitebird.com monty@hundin.mysql.fi +sasha@mysql.sashanet.com diff --git a/BitKeeper/triggers/post-commit b/BitKeeper/triggers/post-commit index b84dc543e0a..619b6f88cea 100755 --- a/BitKeeper/triggers/post-commit +++ b/BitKeeper/triggers/post-commit @@ -3,6 +3,7 @@ #shift TO=dev-public@mysql.com FROM=$USER@mysql.com +INTERNALS=internals@lists.mysql.com LIMIT=10000 if [ "$REAL_EMAIL" = "" ] @@ -22,7 +23,23 @@ then List-ID: From: $FROM To: $TO -Subject: bk commit +Subject: bk commit into 3.23 tree + +EOF + bk changes -v -r+ + bk cset -r+ -d + ) | head -n $LIMIT | /usr/sbin/sendmail -t + echo "Notifying internals list at $INTERNALS" + ( + cat < +From: $FROM +To: $INTERNALS +Subject: bk commit into 3.23 tree + +Below is the list of changes that have just been pushed into main +3.23. repository. For information on how to access the repository +see http://www.mysql.com/doc/I/n/Installing_source_tree.html EOF bk changes -v -r+ diff --git a/mysql-test/r/check.result b/mysql-test/r/check.result new file mode 100644 index 00000000000..694d7429a14 --- /dev/null +++ b/mysql-test/r/check.result @@ -0,0 +1,2 @@ +Table Op Msg_type Msg_text +test.t1 check status OK diff --git a/mysql-test/t/check.test b/mysql-test/t/check.test new file mode 100644 index 00000000000..e65a61d86da --- /dev/null +++ b/mysql-test/t/check.test @@ -0,0 +1,18 @@ +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); +connection con1; +drop table if exists t1; +#add a lot of keys to slow down check +create table t1(n int not null, key(n), key(n), key(n), key(n)); +let $1=10000; +while ($1) +{ + eval insert into t1 values ($1); + dec $1; +} +send check table t1 type=extended; +connection con2; +insert into t1 values (200000); +connection con1; +reap; + From 01c886ee22bd87b27348092d60a256b6735c3729 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2001 11:59:14 +0300 Subject: [PATCH 29/33] Added --verbose option to mysqlshow Fixes for making distribution Build-tools/Do-compile: Added --no-strip option Docs/manual.texi: Changelog and some fixes to comparison section client/mysqlshow.c: Added --verbose option mysql-test/r/create.result: Test of CREATE ... SELECT with keys mysql-test/t/create.test: Test of CREATE ... SELECT with keys scripts/make_binary_distribution.sh: Changed to use long options sql-bench/Comments/postgres.benchmark: update sql-bench/server-cfg.sh: update to use 'vacuum analyze' --- Build-tools/Do-compile | 14 ++- Docs/manual.texi | 52 +++++--- client/mysqlshow.c | 171 ++++++++++++++++++++++++-- mysql-test/r/create.result | 4 + mysql-test/t/create.test | 12 ++ scripts/make_binary_distribution.sh | 55 +++++---- sql-bench/Comments/postgres.benchmark | 1 + sql-bench/server-cfg.sh | 2 + 8 files changed, 251 insertions(+), 60 deletions(-) diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile index 3ef9ba614f9..a82d045f880 100755 --- a/Build-tools/Do-compile +++ b/Build-tools/Do-compile @@ -4,10 +4,10 @@ use Getopt::Long; $opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env=""; $opt_dbd_options=$opt_perl_options=$opt_suffix=""; $opt_tmp=$version_suffix=""; -$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0; +$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=$opt_no_strip=0; $opt_innodb=$opt_bdb=0; -GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage(); +GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution","no-strip") || usage(); usage() if ($opt_help || $opt_Information); usage() if (!$opt_distribution); @@ -19,7 +19,7 @@ if ($opt_innodb || $opt_bdb) chomp($host=`hostname`); $full_host_name=$host; -print "$host: Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n" if ($opt_debug); +info("Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n"); $connect_option= ($opt_tcpip ? "--host=$host" : ""); $host =~ /^([^.-]*)/; $host=$1 . $opt_suffix; @@ -146,10 +146,13 @@ if ($opt_stage <= 2) # if ($opt_stage <= 3) { + my ($flags); log_system("rm -fr mysql-3* mysql-4* $pwd/$host/*.tar.gz"); log_system("nm -n sql/mysqld | gzip -9 -v 2>&1 > sql/mysqld.sym.gz | cat"); - log_system("strip sql/mysqld extra/comp_err client/mysql sql/mysqld client/mysqlshow extra/replace isam/isamchk client/mysqladmin client/mysqldump extra/perror"); - check_system("scripts/make_binary_distribution $opt_tmp $opt_suffix",".tar.gz created"); + + $flags=""; + $flags.="--no-strip" if ($opt_no_strip); + check_system("scripts/make_binary_distribution --tmp=$opt_tmp --suffix=$opt_suffix $flags",".tar.gz created"); safe_system("mv mysql*.tar.gz $pwd/$host"); safe_system("cp client/mysqladmin $pwd/$host/bin"); safe_system("$make clean") if ($opt_with_small_disk); @@ -174,6 +177,7 @@ if ($opt_stage <= 4 && !$opt_no_test) $tar_file =~ /(mysql-[^\/]*)\.tar/; $ver=$1; $test_dir="$pwd/$host/test/$ver"; +$ENV{"LD_LIBRARY_PATH"}= "$testdir/lib:" . $ENV{"LD_LIBRARY_PATH"}; if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest) { diff --git a/Docs/manual.texi b/Docs/manual.texi index 078c09aed12..eba352092a1 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -7472,6 +7472,9 @@ Configure @strong{MySQL} with the @code{--with-named-z-libs=no} option. @node Solaris x86, SunOS, Solaris 2.7, Source install system issues @subsection Solaris x86 Notes +On Solaris 2.8 on x86, @strong{mysqld} will core dump if you run +'strip' in. + If you are using @code{gcc} or @code{egcs} on Solaris x86 and you experience problems with core dumps under load, you should use the following @code{configure} command: @@ -7530,6 +7533,11 @@ Linux version that doesn't have @code{glibc2}, you must install LinuxThreads before trying to compile @strong{MySQL}. You can get LinuxThreads at @uref{http://www.mysql.com/Downloads/Linux}. +@strong{NOTE:} We have seen some strange problems with Linux 2.2.14 and +@strong{MySQL} on SMP systems; If you have a SMP system, we recommend +you to upgrade to Linux 2.4 ASAP! Your system will be faster and more +stable by doing this! + Note that @code{glibc} versions before and including Version 2.1.1 have a fatal bug in @code{pthread_mutex_timedwait} handling, which is used when you do @code{INSERT DELAYED}. We recommend you to not use @@ -43627,15 +43635,15 @@ application. If you need speed, @strong{MySQL} is probably your best choice. If you need some of the extra features that only @code{PostgreSQL} can offer, you should use @code{PostgreSQL}. -@cindex PostgreSQL/MySQL, goals +@cindex PostgreSQL/MySQL, strategies @menu -* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development goals +* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies * MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL * MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL @end menu @node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL -@subsection MySQL and PostgreSQL development goals +@subsection MySQL and PostgreSQL development strategies When adding things to MySQL we take pride to do an optimal, definite solution. The code should be so good that we shouldn't have any need to @@ -43718,7 +43726,8 @@ you never have to run any cleanups on @code{MySQL}. PostgreSQL doesn't yet support 24/7 systems because you have have to run @code{vacuum()} once in a while to reclaim space from @code{UPDATE} and @code{DELETE} commands and to perform statistics analyzes that are critical to get -good performance with PostgreSQL. On a busy system with lots of changes +good performance with PostgreSQL. Vacuum is also needed after adding +a lot of new rows to a table. On a busy system with lots of changes vacuum must be run very frequently, in the worst cases even many times a day. During the @code{vacuum()} run, which may take hours if the database is big, the database is from a production standpoint @@ -43809,7 +43818,7 @@ Tools to repair and optimize @strong{MyISAM} tables (the most common physical corruption of a data file happens, usually from a hardware failure. It allows a majority of the data to be recovered. @item -Upgrading @strong{MySQL} is painless. When you upgrading @strong{MySQL}, +Upgrading @strong{MySQL} is painless. When you are upgrading @strong{MySQL}, you don't need to dump/restore your data, as you have to do with most PostgreSQL upgrades. @end itemize @@ -43907,7 +43916,7 @@ We have many times asked the PostgreSQL developers and some PostgreSQL users to help us extend this benchmark to make the definitive benchmark for databases, but unfortunately we haven't got any feedback for this. -We, the @strong{MySQL} developers, has because of this spent a lot of +We, the @strong{MySQL} developers, have because of this spent a lot of hours to get maximum performance from PostgreSQL for the benchmarks, but because we don't know PostgreSQL intimately we are sure that there are things that we have missed. We have on the benchmark page documented @@ -44002,8 +44011,8 @@ optimized indexes boost performance by some margin". Our benchmarks clearly indicates that the difference in running a lot of selects on a database with and without vacuum() can easily differ by a factor of 10. @item -The test results where also strange; The ASPAP3 test benchmark -documentation mentions that the test does: +The test results where also strange; The AS3AP test documentation +mentions that the test does: "selections, simple joins, projections, aggregates, one-tuple updates, and bulk updates" @@ -44031,13 +44040,13 @@ be regarded as fair play. They should have done two tests with and without ODBC to provide the right facts (after having got experts to tune all involved databases of course). @item -They refer to the TCP-C tests, but doesn't anywhere mention that the -tests they did where not a true TCP-C test and they where not even -allowed to call it a TCP-C test. A TCP-C test can only be conducted by -the rules approved by the @uref{http://www.tpc.org,TCP-council}. Great -Bridge didn't do that. By doing this they have both violated the TCP +They refer to the TPC-C tests, but doesn't anywhere mention that the +tests they did where not a true TPC-C test and they where not even +allowed to call it a TPC-C test. A TPC-C test can only be conducted by +the rules approved by the @uref{http://www.tpc.org,TPC-council}. Great +Bridge didn't do that. By doing this they have both violated the TPC trademark and miscredited their own benchmarks. The rules set by the -TCP-council are very strict to ensure that no one can produce false +TPC-council are very strict to ensure that no one can produce false results or make unprovable statements. Apparently Great Bridge wasn't interested in doing this. @item @@ -44054,7 +44063,7 @@ standard binary (used by 80% of our users), which was statically linked with a fixed glibc library. According to what we know, Great Bridge did nothing to ensure that the -other databases was setup correctly to run good in their test +other databases where setup correctly to run good in their test environment. We are sure however that they didn't contact Oracle or Microsoft to ask for their advice in this matter ;) @item @@ -44095,7 +44104,7 @@ The only benchmarks that exist today that anyone can download and run against @strong{MySQL}and PostgreSQL is the MySQL benchmarks. We here at @strong{MySQL} believe that open source databases should be tested with open source tools! This is the only way to ensure that no one -does tests that none can reproduce and use this to claim that a +does tests that nobody can reproduce and use this to claim that a database is better than another. Without knowing all the facts it's impossible to answer the claims of the tester. @@ -44110,8 +44119,9 @@ going! For more information about our benchmarks suite see @xref{MySQL Benchmarks}. -We are working on even better benchmarks including much better -documentation (the current is lacking). +We are working on an even better benchmark suite, including much better +documentation of what the individual tests really do and how to add more +tests to the suite. @cindex internals @cindex threads @@ -46347,8 +46357,10 @@ not yet 100% confident in this code. @appendixsubsec Changes in release 3.23.39 @itemize @bullet @item -If one dropped and added an @code{auto_increment} column, the -@code{auto_increment} value wasn't reset. +If one dropped and added an @code{AUTO_INCREMENT} column, the +@code{AUTO_INCREMENT} sequence wasn't reset. +@item +@code{CREATE .. SELECT} now creates not unique indexes delayed. @item Fixed problem where @code{LOCK TABLES table_name READ} followed by @code{FLUSH TABLES} put a exclusive lock on the table. diff --git a/client/mysqlshow.c b/client/mysqlshow.c index 8fffe02a52f..199318abc2f 100644 --- a/client/mysqlshow.c +++ b/client/mysqlshow.c @@ -16,7 +16,7 @@ /* Show databases, tables or columns */ -#define SHOW_VERSION "8.2" +#define SHOW_VERSION "8.3" #include #include @@ -30,6 +30,7 @@ static my_string host=0,opt_password=0,user=0; static my_bool opt_show_keys=0,opt_compress=0,opt_status=0; +static uint opt_verbose=0; static void get_options(int *argc,char ***argv); static uint opt_mysql_port=0; @@ -140,6 +141,7 @@ static struct option long_options[] = #ifndef DONT_ALLOW_USER_CHANGE {"user", required_argument, 0, 'u'}, #endif + {"verbose", no_argument, 0, 'v'}, {"version", no_argument, 0, 'V'}, {0, 0, 0, 0} }; @@ -181,6 +183,8 @@ static void usage(void) -u, --user=# user for login if not current user\n"); #endif printf("\ + -v, --verbose more verbose output; You can use this multiple times\n\ + to get even more verbose output.\n\ -V, --version output version information and exit\n"); puts("\n\ @@ -200,7 +204,7 @@ get_options(int *argc,char ***argv) int c,option_index; my_bool tty_password=0; - while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?VWi",long_options, + while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?vVWi",long_options, &option_index)) != EOF) { switch(c) { @@ -210,6 +214,9 @@ get_options(int *argc,char ***argv) case 'c': charsets_dir= optarg; break; + case 'v': + opt_verbose++; + break; case 'h': host = optarg; break; @@ -277,10 +284,13 @@ static int list_dbs(MYSQL *mysql,const char *wild) { const char *header; - uint length; + uint length, counter = 0; + ulong rowcount = 0L; + char tables[NAME_LEN+1], rows[NAME_LEN+1]; + char query[255]; MYSQL_FIELD *field; MYSQL_RES *result; - MYSQL_ROW row; + MYSQL_ROW row, trow, rrow; if (!(result=mysql_list_dbs(mysql,wild))) { @@ -297,10 +307,79 @@ list_dbs(MYSQL *mysql,const char *wild) if (length < field->max_length) length=field->max_length; - print_header(header,length,NullS); + if (!opt_verbose) + print_header(header,length,NullS); + else if (opt_verbose == 1) + print_header(header,length,"Tables",6,NullS); + else + print_header(header,length,"Tables",6,"Total Rows",12,NullS); + while ((row = mysql_fetch_row(result))) - print_row(row[0],length,0); - print_trailer(length,0); + { + counter++; + + if (opt_verbose) + { + /* + * Original code by MG16373; Slightly modified by Monty. + * Print now the count of tables and rows for each database. + */ + + if (!(mysql_select_db(mysql,row[0]))) + { + MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL); + if (mysql_affected_rows(mysql) > 0) + { + sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql)); + rowcount = 0; + if (opt_verbose > 1) + { + while ((trow = mysql_fetch_row(tresult))) + { + sprintf(query,"SELECT COUNT(*) FROM `%s`",trow[0]); + if (!(mysql_query(mysql,query))) + { + MYSQL_RES *rresult; + if ((rresult = mysql_store_result(mysql))) + { + rrow = mysql_fetch_row(rresult); + rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10); + mysql_free_result(rresult); + } + } + } + sprintf(rows,"%12lu",rowcount); + } + } + else + { + sprintf(tables,"%6d",0); + sprintf(rows,"%12d",0); + } + mysql_free_result(tresult); + } + else + { + strmov(tables,"N/A"); + strmov(rows,"N/A"); + } + } + + if (!opt_verbose) + print_row(row[0],length,0); + else if (opt_verbose == 1) + print_row(row[0],length,tables,6,NullS); + else + print_row(row[0],length,tables,6,rows,12,NullS); + } + + print_trailer(length, + (opt_verbose > 0 ? 6 : 0), + (opt_verbose > 1 ? 12 :0), + 0); + + if (counter && opt_verbose) + printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : ""); mysql_free_result(result); return 0; } @@ -310,10 +389,11 @@ static int list_tables(MYSQL *mysql,const char *db,const char *table) { const char *header; - uint head_length; + uint head_length, counter = 0; + char query[255], rows[64], fields[16]; MYSQL_FIELD *field; MYSQL_RES *result; - MYSQL_ROW row; + MYSQL_ROW row, rrow; if (mysql_select_db(mysql,db)) { @@ -338,14 +418,81 @@ list_tables(MYSQL *mysql,const char *db,const char *table) if (head_length < field->max_length) head_length=field->max_length; - print_header(header,head_length,NullS); + if (!opt_verbose) + print_header(header,head_length,NullS); + else if (opt_verbose == 1) + print_header(header,head_length,"Columns",8,NullS); + else + print_header(header,head_length,"Columns",8, "Total Rows",10,NullS); + while ((row = mysql_fetch_row(result))) - print_row(row[0],head_length,0); - print_trailer(head_length,0); + { + /* + * Modified by MG16373 + * Print now the count of rows for each table. + */ + counter++; + if (opt_verbose > 0) + { + if (!(mysql_select_db(mysql,db))) + { + MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL); + ulong rowcount=0L; + if (!rresult) + { + strmov(fields,"N/A"); + strmov(rows,"N/A"); + } + else + { + sprintf(fields,"%8u",(uint) mysql_num_fields(rresult)); + mysql_free_result(rresult); + + if (opt_verbose > 1) + { + sprintf(query,"SELECT COUNT(*) FROM `%s`",row[0]); + if (!(mysql_query(mysql,query))) + { + if ((rresult = mysql_store_result(mysql))) + { + rrow = mysql_fetch_row(rresult); + rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10); + mysql_free_result(rresult); + } + sprintf(rows,"%10lu",rowcount); + } + else + sprintf(rows,"%10d",0); + } + } + } + else + { + strmov(fields,"N/A"); + strmov(rows,"N/A"); + } + } + if (!opt_verbose) + print_row(row[0],head_length,NullS); + else if (opt_verbose == 1) + print_row(row[0],head_length, fields,8, NullS); + else + print_row(row[0],head_length, fields,8, rows,10, NullS); + } + + print_trailer(head_length, + (opt_verbose > 0 ? 8 : 0), + (opt_verbose > 1 ? 10 :0), + 0); + + if (counter && opt_verbose) + printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : ""); + mysql_free_result(result); return 0; } + static int list_table_status(MYSQL *mysql,const char *db,const char *wild) { diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index f32c9b0bc80..7940d51868a 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -8,3 +8,7 @@ b 1 10000000001 a$1 $b c$ 1 2 3 +table type possible_keys key key_len ref rows Extra +t2 ref B B 21 const 1 where used +a B +3 world diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index a5224cd0318..d45d013c9fb 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -2,6 +2,7 @@ # Check some special create statements. # +drop table if exists t1,t2; create table t1 (b char(0)); insert into t1 values (""),(null); select * from t1; @@ -57,3 +58,14 @@ select a$1, $b, c$ from test_$1.$test1; create table test_$1.test2$ (a int); drop table test_$1.test2$; drop database test_$1; + +# +# Test of CREATE ... SELECT with indexes +# + +create table t1 (a int auto_increment not null primary key, B CHAR(20)); +insert into t1 (b) values ("hello"),("my"),("world"); +create table t2 (key (b)) select * from t1; +explain select * from t2 where b="world"; +select * from t2 where b="world"; +drop table t1,t2; diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 84dac59018b..40c48188346 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -11,30 +11,29 @@ export machine system version SOURCE=`pwd` CP="cp -p" -# Debug option must come first +STRIP=1 DEBUG=0 -if test x$1 = x"--debug" -then - DEBUG=1 - shift 1 -fi - -# Save temporary distribution here (must be full path) +SILENT=0 TMP=/tmp -if test $# -gt 0 -then - TMP=$1 - shift 1 -fi - -# Get optional suffix for distribution SUFFIX="" -if test $# -gt 0 -then - SUFFIX=$1 - shift 1 -fi +parse_arguments() { + for arg do + case "$arg" in + --debug) DEBUG=1;; + --tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;; + --suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;; + --no-strip) STRIP=0 ;; + --silent) SILENT=1 ;; + *) + echo "Unknown argument '$arg'" + exit 1 + ;; + esac + done +} + +parse_arguments "$@" #make @@ -68,14 +67,18 @@ for i in extra/comp_err extra/replace extra/perror extra/resolveip \ client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \ client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \ client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \ - client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest + client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest \ + client/.libs/mysqlcheck do if [ -f $i ] then $CP $i $BASE/bin fi done -strip $BASE/bin/* + +if [ x$STRIP = x1 ] ; then + strip $BASE/bin/* +fi for i in sql/mysqld.sym.gz do @@ -190,7 +193,13 @@ fi echo "Using $tar to create archive" cd $TMP -$tar cvf $SOURCE/$NEW_NAME.tar $NEW_NAME + +OPT=cvf +if [ x$SILENT = x1 ] ; then + OPT=cf +fi + +$tar $OPT $SOURCE/$NEW_NAME.tar $NEW_NAME cd $SOURCE echo "Compressing archive" gzip -9 $NEW_NAME.tar diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark index 6fadf20755e..b25a9931f9e 100644 --- a/sql-bench/Comments/postgres.benchmark +++ b/sql-bench/Comments/postgres.benchmark @@ -84,6 +84,7 @@ run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --u # the database between each major update of the tables: # vacuum table # or +# vacuum analyze # vacuum # The time for vacuum() is accounted for in the book-keeping() column, not diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index 86b891d8856..a8d992bfdce 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -811,6 +811,7 @@ sub vacuum { foreach $table (@tables) { + $dbh->do("vacuum analyze $table") || die "Got error: $DBI::errstr when executing 'vacuum analyze $table'\n"; $dbh->do("vacuum $table") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; } } @@ -818,6 +819,7 @@ sub vacuum { # $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; # $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; + $dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum analyze'\n"; $dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n"; } $end_time=new Benchmark; From be2b125bf8d54c3d3d1051b7dbb4ed62de351788 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2001 11:04:31 +0200 Subject: [PATCH 30/33] Added --enable-shared to Do-compile Build-tools/Do-compile: Added --enable-shared BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + Build-tools/Do-compile | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 2f120a649dc..83d7b6d611c 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -5,3 +5,4 @@ mwagner@evoq.mwagner.org paul@teton.kitebird.com monty@hundin.mysql.fi sasha@mysql.sashanet.com +monty@work.mysql.com diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile index 3ef9ba614f9..958cb1c8edb 100755 --- a/Build-tools/Do-compile +++ b/Build-tools/Do-compile @@ -4,10 +4,10 @@ use Getopt::Long; $opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env=""; $opt_dbd_options=$opt_perl_options=$opt_suffix=""; $opt_tmp=$version_suffix=""; -$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0; +$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=0; $opt_innodb=$opt_bdb=0; -GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage(); +GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution","enable-shared","no-crash-me") || usage(); usage() if ($opt_help || $opt_Information); usage() if (!$opt_distribution); @@ -119,7 +119,10 @@ if ($opt_stage <= 1) { $opt_config_options.=" --with-client-ldflags=-all-static"; } - $opt_config_options.= " --disable-shared"; # Default for binary versions + if (!$opt_enable_shared) + { + $opt_config_options.= " --disable-shared"; # Default for binary versions + } if ($opt_bdb) { $opt_config_options.= " --with-berkeley-db" @@ -237,7 +240,7 @@ if ($opt_stage <= 7 && $opt_perl_files && !$opt_no_perl && !$opt_no_test) } -if ($opt_stage <= 8 && !$opt_no_test) +if ($opt_stage <= 8 && !$opt_no_test && !$opt_no_crash_me) { safe_cd("$test_dir/sql-bench"); log_system("rm -f limits/mysql.cfg"); From cdcc56705c89909a4077008640aebc894504e99d Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2001 13:10:23 +0300 Subject: [PATCH 31/33] Added mysqlcheck to RPM support-files/mysql.spec.sh: Added mysqlcheck --- support-files/mysql.spec.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index a10b09c12be..6912953495e 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -360,6 +360,7 @@ fi %attr(755, root, root) /usr/bin/mysql %attr(755, root, root) /usr/bin/mysqlaccess %attr(755, root, root) /usr/bin/mysqladmin +%attr(755, root, root) /usr/bin/mysqlcheck %attr(755, root, root) /usr/bin/mysql_find_rows %attr(755, root, root) /usr/bin/mysqldump %attr(755, root, root) /usr/bin/mysqlimport From b80c1ce3ae10c81121650ee203f729411148ecf6 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2001 17:13:52 +0300 Subject: [PATCH 32/33] Fixed wrong mysql-test New german error messages BitKeeper/deleted/.del-ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~d002b0bc548ff8b3: Delete: sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~e938a858bd12aa8d: Delete: sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~14360865bbba479f: Delete: sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~b6be70bb51013cad: Delete: sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~1b715c6fd72e913e: Delete: sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~f3b1d326092bf44: Delete: sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~a0143553cccb54e2: Delete: sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~4ffc9cf4be665ea2: Delete: sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~1ed1dc6abd24e7e3: Delete: sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~2ac8fe298953d43: Delete: sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~cf0d806760eefef2: Delete: sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~e625af7f600bf930: Delete: sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~a88e954bc8de5460: Delete: sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~d922a0fcc1009130: Delete: sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~840503a555e420ec: Delete: sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~ee94f987797ca948: Delete: sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~2f516d2c108a9e05: Delete: sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~6e532c1936df1737: Delete: sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~28b688e2cd4b6bb3: Delete: sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~6d8209bf72b663ed: Delete: sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~c87333d6fe04433e: Delete: sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~1cf5d5f0d70a3fa0: Delete: sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~7ed15d6fd1a5944c: Delete: sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~ab58fffa30dce97e: Delete: sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~fc207468e871ff69: Delete: sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~241c337935ae1524: Delete: sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~4e5a2ab4907748d4: Delete: sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~975e26cac59161fa: Delete: sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~27b7a557c3cb07a: Delete: sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~a85a6f0477c13f83: Delete: sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~34a39fbcb58d8945: Delete: sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~8ef771713f89e1: Delete: sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~4f7795c27eaab86b: Delete: sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~8101a5823c17e58a: Delete: sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~6a468dcd3e6f5405: Delete: sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg BitKeeper/deleted/.del-wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg~24a02e007a58bf73: Delete: sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg Docs/internals.texi: Added MySQL protocol Docs/manual.texi: Added links configure.in: Update to 39a mysql-test/t/check.test: Added missing drop table mysql-test/t/compare.test: Added missing drop table sql/share/german/errmsg.txt: Update --- Docs/internals.texi | 209 +++++++++++++++++- Docs/manual.texi | 4 + configure.in | 2 +- mysql-test/t/check.test | 2 +- mysql-test/t/compare.test | 1 + ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 19 -- ...S-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 19 -- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 20 -- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 20 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 28 --- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 26 +++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 74 ------- ...N-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 88 -------- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 102 --------- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 103 +++++++++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 77 ------- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 104 +++++++++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 -- ...e-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 -- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 14 -- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 14 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 -- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 14 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 19 -- ...s-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 19 -- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 19 -- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 19 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 28 --- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 25 +++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 30 --- ...t-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 30 --- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 30 --- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 30 +++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 42 ---- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 38 ++++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 18 -- ...e-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 18 -- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 18 -- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 18 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 18 -- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 18 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 58 ----- ...t-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 98 -------- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 103 --------- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 103 +++++++++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 85 ------- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 104 +++++++++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 23 -- ...t-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 29 --- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 36 --- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 36 +++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 38 ---- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 42 ++++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 -- ...n-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 14 -- ...-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg | 14 -- ...-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 14 ++ ...fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg | 26 --- ...ast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg | 22 ++ sql/share/german/errmsg.txt | 28 +-- 60 files changed, 975 insertions(+), 1331 deletions(-) delete mode 100644 sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg delete mode 100644 sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg create mode 100644 sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg diff --git a/Docs/internals.texi b/Docs/internals.texi index 9bbd90a7a3a..e0574df550b 100644 --- a/Docs/internals.texi +++ b/Docs/internals.texi @@ -47,7 +47,7 @@ This is a manual about @strong{MySQL} internals. @menu @end menu -@node caching +@node caching,,, @chapter How MySQL handles caching @strong{MySQL} has the following caches: @@ -89,7 +89,7 @@ found rows are cached in a join cache. One SELECT query can use many join caches in the worst case. @end itemize -@node flush tables +@node flush tables,,, @chapter How MySQL handles flush tables @itemize @bullet @@ -134,7 +134,7 @@ After this it will give other threads a chance to open the same tables. @end itemize -@node Filesort +@node Filesort,,, @chapter How MySQL does sorting (filesort) @itemize @bullet @@ -174,7 +174,7 @@ and then we read the rows in the sorted order into a row buffer @end itemize -@node Coding guidelines +@node Coding guidelines,,, @chapter Coding guidelines @itemize @bullet @@ -289,7 +289,7 @@ Use pointers rather than array indexing when operating on strings. @end itemize -@node mysys functions +@node mysys functions,,, @chapter mysys functions Functions i mysys: (For flags se my_sys.h) @@ -433,6 +433,205 @@ Functions i mysys: (For flags se my_sys.h) void end_key_cache _A((void)); - End key-cacheing. +@node protocol,,, +@chapter MySQL client/server protocol + +Raw packet without compression +============================== +------------------------------------------------- +| Packet Length | Packet no | Data | +| 3 Bytes | 1 Byte | n Bytes | +------------------------------------------------- + +3 Byte packet length + The length is calculated with int3store + See include/global.h for details. + The max packetsize can be 16 MB. +1 Byte packet no + +If no compression is used the first 4 bytes of each paket +is the header of the paket. +The packet number is incremented for each sent packet. The first +packet starts with 0 + +n Byte data + +The packet length can be recalculated with: +length = byte1 + (256 * byte2) + (256 * 256 * byte3) + +Raw packet with compression +=========================== +----------------------------------------------------- +| Packet Length | Packet no | Uncomp. Packet Length | +| 3 Bytes | 1 Byte | 3 Bytes | +----------------------------------------------------- + +3 Byte packet length + The length is calculated with int3store + See include/global.h for details. + The max packetsize can be 16 MB. +1 Byte packet no +3 Byte uncompressed packet length + +If compression is used the first 7 bytes of each paket +is the header of the paket. + +Basic packets +============== +OK-packet + For details see sql/net_pkg.cc + function send_ok + ------------------------------------------------- + | Header | No of Rows | Affected Rows | + | | 1 Byte | 1-8 Byte | + ------------------------------------------------- + | ID (last_insert_id) | Status | Length | + | 1-8 Byte | 2 Byte | 1-8 Byte | + ------------------------------------------------- + | Messagetext | + | n Byte | + ------------------------------------------------- + + Header + 1 byte number of rows ? (always 0 ?) + 1-8 bytes affected rows + 1-8 byte id (last_insert_id) + 2 byte Status (usually 0) + If the OK-packege includes a message: + 1-8 bytes length of message + n bytes messagetext + +Error-packet + ------------------------------------------------- + | Header | Statuscode | Error no | + | | 1 Byte | 2 Byte | + ------------------------------------------------- + | Messagetext | 0x00 | + | n Byte | 1 Byte | + ------------------------------------------------- + + Header + 1 byte status code (0xFF = ERROR) + 2 byte error number (is only sent to new 3.23 clients. + n byte errortext + 1 byte 0x00 + + + +The communication +================= + +> Packet from server to client +< Paket from client tor server + + Login + ------ + > 1. packet + Header + 1 byte protocolversion + n byte serverversion + 1 byte 0x00 + 4 byte threadnumber + 8 byte crypt seed + 1 byte 0x00 + 2 byte CLIENT_xxx options (see include/mysql_com.h + that is supported by the server + 1 byte number of current server charset + 2 byte server status variables (SERVER_STATUS_xxx flags) + 13 byte 0x00 (not used yet). + + < 2. packet + Header + 2 byte CLIENT_xxx options + 3 byte max_allowed_packet for the client + n byte username + 1 byte 0x00 + 8 byte crypted password + 1 byte 0x00 + n byte databasename + 1 byte 0x00 + + > 3. packet + OK-packet + + + Command + -------- + < 1. packet + Header + 1 byte command type (e.g.0x03 = query) + n byte query + + Result set (after command) + -------------------------- + > 2. packet + Header + 1-8 byte field_count (packed with net_store_length()) + + If field_count == 0 (command): + 1-8 byte affected rows + 1-8 byte insert id + 2 bytes server_status (SERVER_STATUS_xx) + + If field_count == NULL_LENGTH (251) + LOAD DATA LOCAL INFILE + + If field_count > 0 Result Set: + + > n packets + Header Info + Column description: 5 data object /column + (See code in unpack_fields()) + + Columninfo for each column: + 1 data block table_name + 1 byte length of block + n byte data + 1 data block field_name + 1 byte length of block... + n byte data + 1 data block display length of field + 1 byte length of block + 3 bytes display length of filed + 1 data block type field of type (enum_field_types) + 1 byte length of block + 1 bytexs field of type + 1 data block flags + 1 byte length of block + 2 byte flags for the columns (NOT_NULL_FLAG, ZEROFILL_FLAG....) + 1 byte decimals + + if table definition: + 1 data block default value + + Actual result (one packet per row): + 4 byte header + 1-8 byte length of data + n data + + +Fieldtype Codes: +================ + + display_length |enum_field_type |flags + ---------------------------------------------------- +Blob 03 FF FF 00 |01 FC |03 90 00 00 +Mediumblob 03 FF FF FF |01 FC |03 90 00 00 +Tinyblob 03 FF 00 00 |01 FC |03 90 00 00 +Text 03 FF FF 00 |01 FC |03 10 00 00 +Mediumtext 03 FF FF FF |01 FC |03 10 00 00 +Tinytext 03 FF 00 00 |01 FC |03 10 00 00 +Integer 03 0B 00 00 |01 03 |03 03 42 00 +Mediumint 03 09 00 00 |01 09 |03 00 00 00 +Smallint 03 06 00 00 |01 02 |03 00 00 00 +Tinyint 03 04 00 00 |01 01 |03 00 00 00 +Varchar 03 XX 00 00 |01 FD |03 00 00 00 +Enum 03 05 00 00 |01 FE |03 00 01 00 +Datetime 03 13 00 00 |01 0C |03 00 00 00 +Timestamp 03 0E 00 00 |01 07 |03 61 04 00 +Time 03 08 00 00 |01 0B |03 00 00 00 +Date 03 0A 00 00 |01 0A |03 00 00 00 + @c The Index was empty, and ugly, so I removed it. (jcole, Sep 7, 2000) diff --git a/Docs/manual.texi b/Docs/manual.texi index eba352092a1..6c096b9831c 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -2612,6 +2612,10 @@ M2D, a @strong{MySQL} Administration client for Windows. M2D supports administration of @strong{MySQL} databases, creation of new databases and tables, editing, and more. +@item @uref{http://dlabs.4t2.com} +Dexter, a small server written in Perl which can be used as a proxy server for +@strong{MySQL} or as a database extender. + @item @uref{http://www.scibit.com/Products/Software/Utils/Mascon.asp} Mascon is a powerful Win32 GUI for administering MySQL databases. diff --git a/configure.in b/configure.in index 37bc22ff63e..1b95a5d5800 100644 --- a/configure.in +++ b/configure.in @@ -4,7 +4,7 @@ dnl Process this file with autoconf to produce a configure script. AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! -AM_INIT_AUTOMAKE(mysql, 3.23.39) +AM_INIT_AUTOMAKE(mysql, 3.23.39a) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 diff --git a/mysql-test/t/check.test b/mysql-test/t/check.test index e65a61d86da..62af9f92e65 100644 --- a/mysql-test/t/check.test +++ b/mysql-test/t/check.test @@ -15,4 +15,4 @@ connection con2; insert into t1 values (200000); connection con1; reap; - +drop table t1; diff --git a/mysql-test/t/compare.test b/mysql-test/t/compare.test index b5596784f35..450d9c0961c 100644 --- a/mysql-test/t/compare.test +++ b/mysql-test/t/compare.test @@ -2,6 +2,7 @@ # Bug when using comparions of strings and integers. # +drop table if exists t1; CREATE TABLE t1 (id CHAR(12) not null, PRIMARY KEY (id)); insert into t1 values ('000000000001'),('000000000002'); explain select * from t1 where id=000000000001; diff --git a/sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index e932e9ca0ce..00000000000 --- a/sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:10:55 - -ATIS table test - -Creating tables -Time for create_table (28): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (9768): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data -Time for select_simple_join (500): 1 wallclock secs ( 0.64 usr 0.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_join (200): 16 wallclock secs ( 4.21 usr 2.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_distinct (800): 11 wallclock secs ( 1.74 usr 0.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (2600): 13 wallclock secs ( 1.34 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Removing tables -Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 42 wallclock secs ( 7.93 usr 3.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index a94e920b55d..00000000000 --- a/sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 5:18:45 - -ATIS table test - -Creating tables -Time for create_table (28): 0 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (9768): 9 wallclock secs ( 2.88 usr 0.35 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data -Time for select_simple_join (500): 3 wallclock secs ( 0.69 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_join (200): 14 wallclock secs ( 5.18 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_distinct (800): 17 wallclock secs ( 2.21 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (2600): 45 wallclock secs ( 1.73 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Removing tables -Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 89 wallclock secs (12.72 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 9db5568dd99..00000000000 --- a/sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,20 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 4:40:22 - -ATIS table test - -Creating tables -Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) - -Inserting data -Time to insert (9768): 8 wallclock secs ( 2.97 usr 0.28 sys + 0.00 cusr 0.00 csys = 3.25 CPU) - -Retrieving data -Time for select_simple_join (500): 3 wallclock secs ( 0.74 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.77 CPU) -Time for select_join (100): 4 wallclock secs ( 0.52 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.65 CPU) -Time for select_key_prefix_join (100): 11 wallclock secs ( 4.30 usr 0.16 sys + 0.00 cusr 0.00 csys = 4.46 CPU) -Time for select_distinct (800): 22 wallclock secs ( 1.95 usr 0.18 sys + 0.00 cusr 0.00 csys = 2.13 CPU) -Time for select_group (2600): 52 wallclock secs ( 1.43 usr 0.19 sys + 0.00 cusr 0.00 csys = 1.62 CPU) - -Removing tables -Time to drop_table (28): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) -Total time: 101 wallclock secs (11.93 usr 0.98 sys + 0.00 cusr 0.00 csys = 12.91 CPU) diff --git a/sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..748dd16a42e --- /dev/null +++ b/sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,20 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:14:34 + +ATIS table test + +Creating tables +Time for create_table (28): 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) + +Inserting data +Time to insert (9768): 8 wallclock secs ( 2.78 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.29 CPU) + +Retrieving data +Time for select_simple_join (500): 3 wallclock secs ( 0.73 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.83 CPU) +Time for select_join (100): 4 wallclock secs ( 0.67 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.76 CPU) +Time for select_key_prefix_join (100): 12 wallclock secs ( 4.43 usr 0.15 sys + 0.00 cusr 0.00 csys = 4.58 CPU) +Time for select_distinct (800): 22 wallclock secs ( 2.10 usr 0.26 sys + 0.00 cusr 0.00 csys = 2.36 CPU) +Time for select_group (2600): 55 wallclock secs ( 1.75 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.81 CPU) + +Removing tables +Time to drop_table (28): 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU) +Total time: 104 wallclock secs (12.54 usr 1.17 sys + 0.00 cusr 0.00 csys = 13.71 CPU) diff --git a/sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index befe150bdc4..00000000000 --- a/sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,28 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:08:29 - -ATIS table test - -Creating tables -Time for create_table (28): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (9768): 9 wallclock secs ( 2.85 usr 0.37 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data -Time for select_simple_join (500): 3 wallclock secs ( 0.79 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_join (200): 13 wallclock secs ( 4.77 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_distinct (800): 17 wallclock secs ( 2.06 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (2600): 41 wallclock secs ( 1.51 usr 0.15 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Removing tables -Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 87 wallclock secs (12.00 usr 0.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..628ddd42784 --- /dev/null +++ b/sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,26 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 17:53:03 + +ATIS table test + +Creating tables +Time for create_table (28): 1 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Inserting data +Time to insert (9768): 8 wallclock secs ( 2.90 usr 0.28 sys + 0.00 cusr 0.00 csys = 3.18 CPU) + +Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Retrieving data +Time for select_simple_join (500): 4 wallclock secs ( 0.71 usr 0.18 sys + 0.00 cusr 0.00 csys = 0.89 CPU) +Time for select_join (100): 4 wallclock secs ( 0.59 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.69 CPU) +Time for select_key_prefix_join (100): 12 wallclock secs ( 4.47 usr 0.12 sys + 0.00 cusr 0.00 csys = 4.59 CPU) +Time for select_distinct (800): 23 wallclock secs ( 1.91 usr 0.34 sys + 0.00 cusr 0.00 csys = 2.25 CPU) +Time for select_group (2600): 51 wallclock secs ( 1.48 usr 0.12 sys + 0.00 cusr 0.00 csys = 1.60 CPU) + +Removing tables +Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Total time: 105 wallclock secs (12.13 usr 1.14 sys + 0.00 cusr 0.00 csys = 13.27 CPU) diff --git a/sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 88b36fc1e52..00000000000 --- a/sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,74 +0,0 @@ -Benchmark DBD suite: 2.9 -Date of test: 2000-08-17 20:19:45 -Running tests on: Linux 2.2.14-my-SMP i686 -Arguments: --fast -Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M -Limits from: mysql,pg -Server version: MySQL 3.23.22 beta - -ATIS: Total time: 42 wallclock secs ( 7.93 usr 3.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -alter-table: Total time: 260 wallclock secs ( 0.26 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -big-tables: Total time: 31 wallclock secs ( 8.32 usr 6.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -connect: Total time: 54 wallclock secs (26.60 usr 10.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -create: Total time: 122 wallclock secs ( 8.51 usr 3.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -insert: Total time: 1332 wallclock secs (254.96 usr 103.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -select: Total time: 1696 wallclock secs (113.17 usr 64.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -wisconsin: Total time: 6 wallclock secs ( 1.67 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -All 8 test executed successfully - -Totals per operation: -Operation seconds usr sys cpu tests -alter_table_add 252.00 0.20 0.10 0.00 992 -connect 11.00 6.50 1.98 0.00 10000 -connect+select_1_row 14.00 7.49 2.28 0.00 10000 -connect+select_simple 12.00 7.41 2.24 0.00 10000 -count 46.00 0.02 0.00 0.00 100 -count_distinct 124.00 0.57 0.12 0.00 1000 -count_distinct_big 629.00 70.62 55.60 0.00 1020 -count_distinct_group 77.00 1.14 0.31 0.00 1000 -count_distinct_group_on_key 65.00 0.35 0.14 0.00 1000 -count_distinct_group_on_key_parts 77.00 1.07 0.35 0.00 1000 -count_group_on_key_parts 61.00 1.03 0.31 0.00 1000 -count_on_key 573.00 16.47 3.19 0.00 50100 -create+drop 26.00 2.17 1.03 0.00 10000 -create_MANY_tables 35.00 1.84 0.58 0.00 10000 -create_index 4.00 0.00 0.00 0.00 8 -create_key+drop 40.00 3.68 0.86 0.00 10000 -create_table 1.00 0.01 0.00 0.00 31 -delete_big 18.00 0.00 0.00 0.00 13 -delete_big_many_keys 1.00 0.00 0.00 0.00 2 -delete_key 3.00 0.44 0.38 0.00 10000 -drop_index 4.00 0.00 0.00 0.00 8 -drop_table 0.00 0.00 0.00 0.00 28 -drop_table_when_MANY_tables 15.00 0.10 0.01 0.00 10000 -insert 87.00 18.67 12.96 0.00 350768 -insert_duplicates 82.00 17.82 12.50 0.00 300000 -insert_key 91.00 8.12 4.12 0.00 100000 -insert_many_fields 9.00 0.46 0.10 0.00 2000 -min_max 32.00 0.05 0.00 0.00 60 -min_max_on_key 210.00 24.91 5.21 0.00 85000 -order_by 48.00 20.14 16.88 0.00 10 -order_by_key 31.00 20.12 10.64 0.00 10 -select_1_row 2.00 0.54 0.83 0.00 10000 -select_2_rows 4.00 0.55 0.65 0.00 10000 -select_big 37.00 23.14 12.09 0.00 10080 -select_column+column 3.00 0.51 0.73 0.00 10000 -select_diff_key 205.00 0.19 0.03 0.00 500 -select_distinct 11.00 1.74 0.60 0.00 800 -select_group 71.00 1.38 0.66 0.00 2711 -select_group_when_MANY_tables 6.00 0.71 0.53 0.00 10000 -select_join 16.00 4.21 2.17 0.00 200 -select_key 125.00 67.84 14.15 0.00 200000 -select_key_prefix 127.00 65.92 14.67 0.00 200000 -select_many_fields 22.00 7.85 6.78 0.00 2000 -select_range 20.00 7.27 1.62 0.00 25420 -select_range_prefix 19.00 6.09 1.82 0.00 25010 -select_simple 2.00 0.47 0.54 0.00 10000 -select_simple_join 1.00 0.64 0.24 0.00 500 -update_big 65.00 0.00 0.00 0.00 500 -update_of_key 77.00 2.84 2.32 0.00 756 -update_of_key_big 33.00 0.02 0.01 0.00 501 -update_with_key 97.00 14.16 13.03 0.00 100000 -wisc_benchmark 4.00 1.66 0.72 0.00 114 -TOTALS 3625.00 439.13 206.08 0.00 1594242 diff --git a/sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 0cb843d77ba..00000000000 --- a/sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,88 +0,0 @@ -Benchmark DBD suite: 2.10 -Date of test: 2000-12-05 5:18:45 -Running tests on: Linux 2.2.14-my-SMP i686 -Arguments: -Comments: Intel Xeon, 2x550 Mhz 500 Mb, pg started with -o -F -Limits from: mysql,pg -Server version: PostgreSQL version ??? - -ATIS: Total time: 89 wallclock secs (12.72 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -alter-table: Total time: 29 wallclock secs ( 0.71 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -big-tables: Total time: 1248 wallclock secs ( 9.27 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -connect: Total time: 472 wallclock secs (48.80 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -create: Total time: 8968 wallclock secs (35.76 usr 5.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -insert: Estimated total time: 110214 wallclock secs (659.27 usr 91.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -select: Estimated total time: 8255 wallclock secs (54.76 usr 6.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -wisconsin: Total time: 813 wallclock secs (12.05 usr 2.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -All 8 test executed successfully -Tests with estimated time have a + at end of line - -Totals per operation: -Operation seconds usr sys cpu tests -alter_table_add 28.00 0.41 0.03 0.00 992 -connect 125.00 9.11 3.79 0.00 10000 -connect+select_1_row 173.00 12.56 5.56 0.00 10000 -connect+select_simple 140.00 12.15 5.74 0.00 10000 -count 130.00 0.01 0.03 0.00 100 -count_distinct 235.00 0.76 0.12 0.00 2000 -count_distinct_big 200.00 8.26 0.30 0.00 120 -count_distinct_group 271.00 1.27 0.10 0.00 1000 -count_distinct_group_on_key 174.00 0.44 0.11 0.00 1000 -count_distinct_group_on_key_parts 270.00 1.43 0.07 0.00 1000 -count_group_on_key_parts 242.00 1.19 0.05 0.00 1000 -count_on_key 2544.00 16.73 2.42 0.00 50100 + -create+drop 2954.00 11.24 1.81 0.00 10000 -create_MANY_tables 448.00 7.42 0.95 0.00 10000 -create_index 1.00 0.00 0.00 0.00 8 -create_key+drop 4055.00 10.98 1.30 0.00 10000 -create_table 1.00 0.03 0.01 0.00 31 -delete_all 341.00 0.00 0.00 0.00 12 -delete_all_many_keys 31.00 0.07 0.00 0.00 1 -delete_big 0.00 0.00 0.00 0.00 1 -delete_big_many_keys 30.00 0.07 0.00 0.00 128 -delete_key 283.00 2.91 0.52 0.00 10000 -drop_index 0.00 0.00 0.00 0.00 8 -drop_table 0.00 0.00 0.00 0.00 28 -drop_table_when_MANY_tables 1324.00 3.41 0.51 0.00 10000 -insert 8542.00 109.96 19.42 0.00 350768 -insert_duplicates 3055.00 60.75 8.53 0.00 100000 -insert_key 3693.00 33.29 5.64 0.00 100000 -insert_many_fields 357.00 1.18 0.13 0.00 2000 -insert_select_1_key 49.00 0.00 0.00 0.00 1 -insert_select_2_keys 43.00 0.00 0.00 0.00 1 -min_max 58.00 0.02 0.01 0.00 60 -min_max_on_key 11172.00 24.56 3.60 0.00 85000 ++ -order_by_big 121.00 21.92 0.67 0.00 10 -order_by_big_key 115.00 22.06 0.67 0.00 10 -order_by_big_key2 118.00 22.07 0.53 0.00 10 -order_by_big_key_desc 116.00 22.15 0.66 0.00 10 -order_by_big_key_diff 126.00 22.20 0.79 0.00 10 -order_by_key 15.00 1.09 0.06 0.00 500 -order_by_key2_diff 19.00 2.00 0.06 0.00 500 -order_by_range 16.00 1.21 0.02 0.00 500 -select_1_row 7.00 3.10 0.50 0.00 10000 -select_2_rows 6.00 2.75 0.54 0.00 10000 -select_big 64.00 25.86 1.65 0.00 10080 -select_column+column 9.00 2.41 0.31 0.00 10000 -select_diff_key 13.00 0.24 0.01 0.00 500 -select_distinct 17.00 2.21 0.07 0.00 800 -select_group 285.00 1.76 0.11 0.00 2711 -select_group_when_MANY_tables 187.00 2.71 0.68 0.00 10000 -select_join 14.00 5.18 0.20 0.00 200 -select_key 4967.00 68.44 12.65 0.00 200000 + -select_key2 4933.00 67.48 11.08 0.00 200000 + -select_key_prefix 4938.00 67.63 10.85 0.00 200000 + -select_many_fields 891.00 8.07 0.66 0.00 2000 -select_range 35.00 0.87 0.02 0.00 410 -select_range_key2 26862.00 7.62 1.08 0.00 25000 ++ -select_range_prefix 24419.00 9.69 0.80 0.00 25000 ++ -select_simple 4.00 2.96 0.45 0.00 10000 -select_simple_join 3.00 0.69 0.04 0.00 500 -update_big 1894.00 0.02 0.00 0.00 10 -update_of_key 2460.00 15.33 3.09 0.00 50000 -update_of_key_big 444.00 0.20 0.00 0.00 501 -update_of_primary_key_many_keys 1164.00 0.08 0.01 0.00 256 -update_with_key 14806.00 89.73 16.29 0.00 300000 -wisc_benchmark 18.00 3.04 0.25 0.00 114 -TOTALS 130055.00 832.98 125.55 0.00 1844991 ++++++++++ diff --git a/sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 1d07a79018e..00000000000 --- a/sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,102 +0,0 @@ -Benchmark DBD suite: 2.13 -Date of test: 2001-06-03 19:30:53 -Running tests on: Linux 2.4.0-64GB-SMP i686 -Arguments: -Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F -Limits from: mysql,pg -Server version: PostgreSQL version 7.1.1 - -ATIS: Total time: 101 wallclock secs (11.93 usr 0.98 sys + 0.00 cusr 0.00 csys = 12.91 CPU) -alter-table: Total time: 52 wallclock secs ( 0.49 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.63 CPU) -big-tables: Total time: 1324 wallclock secs ( 8.10 usr 0.69 sys + 0.00 cusr 0.00 csys = 8.79 CPU) -connect: Total time: 555 wallclock secs (51.66 usr 14.01 sys + 0.00 cusr 0.00 csys = 65.67 CPU) -create: Total time: 10008 wallclock secs (27.97 usr 5.83 sys + 0.00 cusr 0.00 csys = 33.80 CPU) -insert: Estimated total time: 199360 wallclock secs (879.85 usr 202.59 sys + 0.00 cusr 0.00 csys = 1082.45 CPU) -select: Estimated total time: 13197 wallclock secs (68.30 usr 8.18 sys + 0.00 cusr 0.00 csys = 76.48 CPU) -wisconsin: Total time: 52 wallclock secs (12.40 usr 2.23 sys + 0.00 cusr 0.00 csys = 14.63 CPU) - -All 8 test executed successfully -Tests with estimated time have a + at end of line - -Totals per operation: -Operation seconds usr sys cpu tests -alter_table_add 49.00 0.26 0.06 0.32 992 -connect 143.00 8.01 1.89 9.90 10000 -connect+select_1_row 195.00 10.94 2.31 13.25 10000 -connect+select_simple 157.00 10.42 2.41 12.83 10000 -count 131.00 0.03 0.00 0.03 100 -count_distinct 132.00 0.31 0.06 0.37 1000 -count_distinct_2 213.00 0.37 0.03 0.40 1000 -count_distinct_big 266.00 7.91 0.25 8.16 120 -count_distinct_group 384.00 1.07 0.08 1.15 1000 -count_distinct_group_on_key 488.00 0.41 0.03 0.44 1000 -count_distinct_group_on_key_parts 383.00 1.10 0.07 1.17 1000 -count_distinct_key_prefix 179.00 0.28 0.07 0.35 1000 -count_group_on_key_parts 331.00 1.13 0.06 1.19 1000 -count_on_key 1850.00 15.78 1.99 17.77 50100 + -create+drop 3280.00 10.74 1.89 12.63 10000 -create_MANY_tables 160.00 3.67 1.35 5.02 5000 -create_index 1.00 0.00 0.00 0.00 8 -create_key+drop 5781.00 10.70 1.53 12.23 10000 -create_table 1.00 0.01 0.00 0.01 31 -delete_all 2478.00 0.01 0.00 0.01 12 -delete_all_many_keys 94.00 0.05 0.00 0.05 1 -delete_big 0.00 0.01 0.00 0.01 1 -delete_big_many_keys 93.00 0.05 0.00 0.05 128 -delete_key 85.00 3.19 0.48 3.67 10000 -drop_index 0.00 0.01 0.00 0.01 8 -drop_table 1.00 0.01 0.00 0.01 28 -drop_table_when_MANY_tables 772.00 1.29 0.28 1.57 5000 -insert 353.00 104.09 24.32 128.41 350768 -insert_duplicates 120.00 30.53 10.61 41.14 100000 -insert_key 804.00 47.08 47.06 94.14 100000 -insert_many_fields 528.00 1.12 0.21 1.33 2000 -insert_select_1_key 86.00 0.00 0.00 0.00 1 -insert_select_2_keys 196.00 0.00 0.00 0.00 1 -min_max 60.00 0.02 0.00 0.02 60 -min_max_on_key 10543.00 25.38 4.37 29.75 85000 ++ -order_by_big 148.00 21.11 0.72 21.83 10 -order_by_big_key 145.00 24.01 1.27 25.28 10 -order_by_big_key2 132.00 21.28 0.64 21.92 10 -order_by_big_key_desc 145.00 23.93 1.27 25.20 10 -order_by_big_key_diff 138.00 21.30 0.56 21.86 10 -order_by_big_key_prefix 133.00 21.16 0.80 21.96 10 -order_by_key2_diff 7.00 1.94 0.03 1.97 500 -order_by_key_prefix 4.00 1.04 0.08 1.12 500 -order_by_range 4.00 1.13 0.06 1.19 500 -outer_join 2539.00 0.00 0.01 0.01 10 -outer_join_found 2515.00 0.00 0.00 0.00 10 -outer_join_not_found 124666.00 0.00 0.00 0.00 500 + -outer_join_on_key 2307.00 0.00 0.00 0.00 10 -select_1_row 6.00 2.25 0.46 2.71 10000 -select_2_rows 7.00 2.77 0.38 3.15 10000 -select_big 93.00 33.23 9.79 43.02 10080 -select_column+column 8.00 2.78 0.41 3.19 10000 -select_diff_key 0.00 0.21 0.02 0.23 500 -select_distinct 22.00 1.95 0.18 2.13 800 -select_group 326.00 1.47 0.20 1.67 2711 -select_group_when_MANY_tables 15.00 1.57 0.78 2.35 5000 -select_join 4.00 0.52 0.13 0.65 100 -select_key 243.00 68.03 8.10 76.13 200000 -select_key2 208.00 66.48 8.68 75.16 200000 -select_key2_return_key 200.00 66.41 7.77 74.18 200000 -select_key2_return_prim 204.00 64.75 7.90 72.65 200000 -select_key_prefix 208.00 66.62 8.81 75.43 200000 -select_key_prefix_join 11.00 4.30 0.16 4.46 100 -select_key_return_key 239.00 66.86 8.37 75.23 200000 -select_many_fields 795.00 6.97 0.48 7.45 2000 -select_query_cache 2549.00 3.25 0.52 3.77 10000 -select_query_cache2 2547.00 3.04 0.53 3.57 10000 -select_range 465.00 10.41 0.63 11.04 410 -select_range_key2 20341.00 4.22 0.52 4.74 25010 ++ -select_range_prefix 20344.00 6.32 1.04 7.36 25010 ++ -select_simple 5.00 2.73 0.30 3.03 10000 -select_simple_join 3.00 0.74 0.03 0.77 500 -update_big 6046.00 0.01 0.00 0.01 10 -update_of_key 136.00 16.21 11.85 28.06 50000 -update_of_key_big 320.00 0.16 0.09 0.25 501 -update_of_primary_key_many_keys 5365.00 0.16 0.03 0.19 256 -update_with_key 518.00 89.50 33.03 122.53 300000 -update_with_key_prefix 186.00 30.32 15.83 46.15 100000 -wisc_benchmark 16.00 3.30 0.65 3.95 114 -TOTALS 224650.00 1060.42 234.52 1294.94 2551551 ++++++++ diff --git a/sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..4025af26afd --- /dev/null +++ b/sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,103 @@ +Benchmark DBD suite: 2.13 +Date of test: 2001-06-12 18:11:16 +Running tests on: Linux 2.4.2-64GB-SMP i686 +Arguments: +Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F +Limits from: mysql,pg +Server version: PostgreSQL version 7.1.2 + +ATIS: Total time: 104 wallclock secs (12.54 usr 1.17 sys + 0.00 cusr 0.00 csys = 13.71 CPU) +alter-table: Total time: 50 wallclock secs ( 0.58 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.66 CPU) +big-tables: Total time: 1355 wallclock secs ( 8.68 usr 0.69 sys + 0.00 cusr 0.00 csys = 9.37 CPU) +connect: Total time: 547 wallclock secs (50.45 usr 14.25 sys + 0.00 cusr 0.00 csys = 64.70 CPU) +create: Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU) +insert: Estimated total time: 288864 wallclock secs (887.56 usr 201.43 sys + 0.00 cusr 0.00 csys = 1088.99 CPU) +select: Estimated total time: 13160 wallclock secs (70.90 usr 7.35 sys + 0.00 cusr 0.00 csys = 78.25 CPU) +wisconsin: Total time: 55 wallclock secs (12.69 usr 2.29 sys + 0.00 cusr 0.00 csys = 14.98 CPU) + +All 8 test executed successfully +Tests with estimated time have a + at end of line + +Totals per operation: +Operation seconds usr sys cpu tests +alter_table_add 48.00 0.31 0.04 0.35 992 +connect 141.00 7.82 1.62 9.44 10000 +connect+select_1_row 192.00 10.79 2.47 13.26 10000 +connect+select_simple 154.00 10.43 2.60 13.03 10000 +count 131.00 0.06 0.00 0.06 100 +count_distinct 131.00 0.29 0.02 0.31 1000 +count_distinct_2 213.00 0.43 0.06 0.49 1000 +count_distinct_big 268.00 8.51 0.17 8.68 120 +count_distinct_group 384.00 1.12 0.07 1.19 1000 +count_distinct_group_on_key 485.00 0.38 0.03 0.41 1000 +count_distinct_group_on_key_parts 381.00 1.23 0.05 1.28 1000 +count_distinct_key_prefix 188.00 0.33 0.05 0.38 1000 +count_group_on_key_parts 332.00 1.20 0.04 1.24 1000 +count_on_key 1809.00 15.49 2.15 17.65 50100 + +create+drop 2924.00 11.10 2.41 13.51 10000 +create_MANY_tables 194.00 6.27 5.72 11.99 5000 +create_index 1.00 0.00 0.00 0.00 8 +create_key+drop 5464.00 11.00 2.30 13.30 10000 +create_table 1.00 0.05 0.00 0.05 31 +delete_all 3191.00 0.01 0.00 0.01 12 +delete_all_many_keys 118.00 0.05 0.04 0.09 1 +delete_big 0.00 0.00 0.00 0.00 1 +delete_big_many_keys 118.00 0.05 0.04 0.09 128 +delete_key 136.00 3.08 0.59 3.67 10000 +drop_index 0.00 0.00 0.00 0.00 8 +drop_table 0.00 0.02 0.00 0.02 28 +drop_table_when_MANY_tables 599.00 1.39 0.38 1.77 5000 +insert 353.00 103.74 26.20 129.94 350768 +insert_duplicates 120.00 29.00 13.77 42.77 100000 +insert_key 907.00 45.53 60.49 106.02 100000 +insert_many_fields 529.00 1.04 0.19 1.23 2000 +insert_select_1_key 111.00 0.00 0.00 0.00 1 +insert_select_2_keys 180.00 0.00 0.00 0.00 1 +min_max 59.00 0.03 0.00 0.03 60 +min_max_on_key 9941.00 25.90 4.02 29.92 85000 ++ +order_by_big 146.00 22.57 0.64 23.21 10 +order_by_big_key 145.00 26.12 1.23 27.35 10 +order_by_big_key2 133.00 22.62 0.93 23.55 10 +order_by_big_key_desc 145.00 25.80 1.41 27.21 10 +order_by_big_key_diff 139.00 22.46 0.67 23.13 10 +order_by_big_key_prefix 132.00 22.46 0.83 23.29 10 +order_by_key2_diff 7.00 2.09 0.04 2.13 500 +order_by_key_prefix 4.00 1.12 0.06 1.18 500 +order_by_range 4.00 1.11 0.04 1.15 500 +outer_join 4093.00 0.00 0.00 0.00 10 +outer_join_found 4086.00 0.00 0.00 0.00 10 +outer_join_not_found 203500.00 0.00 0.00 0.00 500 + +outer_join_on_key 3961.00 0.00 0.00 0.00 10 +select_1_row 6.00 2.56 0.45 3.01 10000 +select_2_rows 6.00 2.68 0.45 3.13 10000 +select_big 62.00 22.48 3.33 25.81 80 +select_big_str 35.00 10.82 5.73 16.55 10000 +select_column+column 8.00 2.73 0.39 3.12 10000 +select_diff_key 0.00 0.16 0.02 0.18 500 +select_distinct 22.00 2.10 0.26 2.36 800 +select_group 327.00 1.78 0.06 1.84 2711 +select_group_when_MANY_tables 14.00 1.46 0.28 1.74 5000 +select_join 4.00 0.67 0.09 0.76 100 +select_key 245.00 69.03 8.64 77.67 200000 +select_key2 209.00 67.94 8.08 76.02 200000 +select_key2_return_key 201.00 63.19 8.05 71.24 200000 +select_key2_return_prim 204.00 64.84 7.89 72.73 200000 +select_key_prefix 210.00 67.51 8.60 76.11 200000 +select_key_prefix_join 12.00 4.43 0.15 4.58 100 +select_key_return_key 240.00 67.26 8.61 75.87 200000 +select_many_fields 825.00 7.63 0.50 8.13 2000 +select_query_cache 2623.00 3.22 0.37 3.59 10000 +select_query_cache2 2622.00 2.73 0.47 3.20 10000 +select_range 491.00 11.40 0.50 11.90 410 +select_range_key2 21975.00 5.82 0.10 5.92 25010 ++ +select_range_prefix 21993.00 6.20 0.48 6.68 25010 ++ +select_simple 5.00 2.59 0.54 3.13 10000 +select_simple_join 3.00 0.73 0.10 0.83 500 +update_big 6612.00 0.00 0.00 0.00 10 +update_of_key 119.00 16.20 10.81 27.01 50000 +update_of_key_big 333.00 0.21 0.21 0.42 501 +update_of_primary_key_many_keys 6813.00 0.13 0.02 0.15 256 +update_with_key 567.00 90.20 25.08 115.28 300000 +update_with_key_prefix 244.00 29.03 5.64 34.67 100000 +wisc_benchmark 16.00 3.54 1.02 4.56 114 +TOTALS 313344.00 1074.27 238.29 1312.57 2551551 ++++++++ diff --git a/sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 97f6abfa8a7..00000000000 --- a/sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,77 +0,0 @@ -Benchmark DBD suite: 2.8 -Date of test: 2000-08-17 11:51:48 -Running tests on: Linux 2.2.14-my-SMP i686 -Arguments: --fast -Comments: Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F -Limits from: mysql,pg -Server version: PostgreSQL version 7.0.2 - -ATIS: Total time: 87 wallclock secs (12.00 usr 0.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -alter-table: Total time: 29 wallclock secs ( 0.58 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -big-tables: Total time: 1247 wallclock secs ( 8.78 usr 0.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -connect: Total time: 484 wallclock secs (47.96 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -create: Total time: 8745 wallclock secs (32.62 usr 4.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -insert: Estimated total time: 16506 wallclock secs (446.80 usr 59.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -select: Estimated total time: 5187 wallclock secs (127.12 usr 9.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -wisconsin: Total time: 60 wallclock secs (12.14 usr 1.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -All 8 test executed successfully -Tests with estimated time have a + at end of line - -Totals per operation: -Operation seconds usr sys cpu tests -alter_table_add 27.00 0.32 0.05 0.00 992 -book-keeping 2680.00 0.04 0.03 0.00 25 -connect 129.00 9.33 3.59 0.00 10000 -connect+select_1_row 176.00 12.21 5.95 0.00 10000 -connect+select_simple 142.00 11.69 5.72 0.00 10000 -count 119.00 0.00 0.00 0.00 100 -count_distinct 185.00 0.71 0.16 0.00 1000 -count_distinct_big 667.00 82.38 2.86 0.00 1020 -count_distinct_group 246.00 1.12 0.06 0.00 1000 -count_distinct_group_on_key 145.00 0.33 0.07 0.00 1000 -count_distinct_group_on_key_parts 246.00 1.09 0.05 0.00 1000 -count_group_on_key_parts 216.00 1.37 0.02 0.00 1000 -count_on_key 1213.00 15.61 2.51 0.00 50100 + -create+drop 3022.00 10.18 1.71 0.00 10000 -create_MANY_tables 455.00 8.09 1.12 0.00 10000 -create_index 1.00 0.00 0.00 0.00 8 -create_key+drop 3752.00 8.40 1.09 0.00 10000 -create_table 1.00 0.01 0.00 0.00 31 -delete_big 102.00 0.00 0.00 0.00 13 -delete_big_many_keys 2.00 0.00 0.00 0.00 2 -delete_key 15.00 2.84 0.49 0.00 10000 -drop_index 0.00 0.00 0.00 0.00 8 -drop_table 0.00 0.00 0.00 0.00 28 -drop_table_when_MANY_tables 1328.00 2.91 0.56 0.00 10000 -insert 375.00 103.83 16.23 0.00 350768 -insert_duplicates 321.00 88.94 13.94 0.00 300000 -insert_key 1367.00 32.13 5.30 0.00 100000 -insert_many_fields 356.00 1.12 0.19 0.00 2000 -min_max 53.00 0.02 0.00 0.00 60 -min_max_on_key 8723.00 25.11 3.76 0.00 85000 ++ -order_by 103.00 22.63 0.73 0.00 10 -order_by_key 103.00 22.46 0.65 0.00 10 -select_1_row 6.00 2.47 0.51 0.00 10000 -select_2_rows 7.00 3.12 0.44 0.00 10000 -select_big 61.00 26.33 1.34 0.00 10080 -select_column+column 8.00 2.78 0.39 0.00 10000 -select_diff_key 1.00 0.23 0.02 0.00 500 -select_distinct 17.00 2.06 0.13 0.00 800 -select_group 264.00 1.55 0.15 0.00 2711 -select_group_when_MANY_tables 188.00 3.03 0.46 0.00 10000 -select_join 13.00 4.77 0.26 0.00 200 -select_key 188.00 65.70 9.45 0.00 200000 -select_key_prefix 188.00 65.88 9.55 0.00 200000 -select_many_fields 886.00 7.63 0.72 0.00 2000 -select_range 66.00 7.49 0.74 0.00 25420 -select_range_prefix 44.00 6.28 0.79 0.00 25010 -select_simple 4.00 2.62 0.47 0.00 10000 -select_simple_join 3.00 0.79 0.07 0.00 500 -update_big 1832.00 0.00 0.00 0.00 500 -update_of_key 97.00 14.01 2.17 0.00 500 -update_of_key_big 559.00 0.21 0.01 0.00 501 -update_of_primary_key_many_keys 1491.00 0.07 0.01 0.00 256 -update_with_key 449.00 91.48 14.02 0.00 100000 -wisc_benchmark 15.00 3.21 0.28 0.00 114 -TOTALS 32657.00 776.58 108.82 0.00 1594267 +++ diff --git a/sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..8326a1864c3 --- /dev/null +++ b/sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,104 @@ +Benchmark DBD suite: 2.13 +Date of test: 2001-06-12 18:14:29 +Running tests on: Linux 2.4.2-64GB-SMP i686 +Arguments: --fast +Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F +Limits from: mysql,pg +Server version: PostgreSQL version 7.1.2 + +ATIS: Total time: 105 wallclock secs (12.13 usr 1.14 sys + 0.00 cusr 0.00 csys = 13.27 CPU) +alter-table: Total time: 51 wallclock secs ( 0.63 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.69 CPU) +big-tables: Total time: 1356 wallclock secs ( 8.41 usr 0.76 sys + 0.00 cusr 0.00 csys = 9.17 CPU) +connect: Total time: 550 wallclock secs (52.92 usr 14.30 sys + 0.00 cusr 0.00 csys = 67.22 CPU) +create: Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU) +insert: Estimated total time: 21187 wallclock secs (884.26 usr 225.15 sys + 0.00 cusr 0.00 csys = 1109.40 CPU) +select: Estimated total time: 12852 wallclock secs (74.09 usr 9.62 sys + 0.00 cusr 0.00 csys = 83.71 CPU) +wisconsin: Total time: 64 wallclock secs (13.06 usr 3.32 sys + 0.00 cusr 0.00 csys = 16.38 CPU) + +All 8 test executed successfully +Tests with estimated time have a + at end of line + +Totals per operation: +Operation seconds usr sys cpu tests +alter_table_add 48.00 0.32 0.03 0.35 992 +book-keeping 3262.00 0.03 0.00 0.03 25 +connect 140.00 7.94 1.85 9.79 10000 +connect+select_1_row 190.00 10.78 2.23 13.01 10000 +connect+select_simple 155.00 10.57 2.71 13.28 10000 +count 132.00 0.04 0.00 0.04 100 +count_distinct 131.00 0.34 0.05 0.39 1000 +count_distinct_2 213.00 0.38 0.03 0.41 1000 +count_distinct_big 269.00 8.53 0.27 8.80 120 +count_distinct_group 385.00 1.14 0.09 1.23 1000 +count_distinct_group_on_key 209.00 0.35 0.09 0.44 1000 +count_distinct_group_on_key_parts 382.00 1.16 0.06 1.22 1000 +count_distinct_key_prefix 188.00 0.38 0.02 0.40 1000 +count_group_on_key_parts 332.00 1.14 0.03 1.17 1000 +count_on_key 1774.00 14.24 1.80 16.04 50100 + +create+drop 2924.00 11.10 2.41 13.51 10000 +create_MANY_tables 194.00 6.27 5.72 11.99 5000 +create_index 0.00 0.00 0.00 0.00 8 +create_key+drop 5464.00 11.00 2.30 13.30 10000 +create_table 1.00 0.03 0.00 0.03 31 +delete_all 11.00 0.01 0.01 0.02 12 +delete_all_many_keys 3.00 0.05 0.00 0.05 1 +delete_big 2.00 0.01 0.00 0.01 1 +delete_big_many_keys 3.00 0.05 0.00 0.05 128 +delete_key 11.00 3.02 0.37 3.39 10000 +drop_index 1.00 0.00 0.00 0.00 8 +drop_table 0.00 0.00 0.00 0.00 28 +drop_table_when_MANY_tables 599.00 1.39 0.38 1.77 5000 +insert 359.00 104.39 28.15 132.54 350768 +insert_duplicates 111.00 28.41 9.26 37.67 100000 +insert_key 895.00 45.94 68.46 114.40 100000 +insert_many_fields 525.00 1.01 0.18 1.19 2000 +insert_select_1_key 45.00 0.00 0.00 0.00 1 +insert_select_2_keys 77.00 0.01 0.00 0.01 1 +min_max 58.00 0.01 0.00 0.01 60 +min_max_on_key 9948.00 29.82 5.49 35.30 85000 ++ +order_by_big 147.00 22.48 0.61 23.09 10 +order_by_big_key 150.00 25.91 1.24 27.15 10 +order_by_big_key2 137.00 22.59 0.71 23.30 10 +order_by_big_key_desc 147.00 25.81 1.23 27.04 10 +order_by_big_key_diff 143.00 22.68 0.55 23.23 10 +order_by_big_key_prefix 133.00 22.64 0.62 23.26 10 +order_by_key2_diff 7.00 2.07 0.04 2.11 500 +order_by_key_prefix 3.00 1.48 0.03 1.51 500 +order_by_range 4.00 1.04 0.04 1.08 500 +outer_join 253.00 0.00 0.00 0.00 10 +outer_join_found 243.00 0.00 0.00 0.00 10 +outer_join_not_found 242.00 0.00 0.01 0.01 500 +outer_join_on_key 238.00 0.00 0.00 0.00 10 +select_1_row 6.00 2.65 0.43 3.08 10000 +select_2_rows 7.00 2.81 0.40 3.21 10000 +select_big 56.00 22.70 2.29 24.99 80 +select_big_str 37.00 12.51 5.97 18.48 10000 +select_column+column 8.00 2.90 0.33 3.23 10000 +select_diff_key 1.00 0.21 0.00 0.21 500 +select_distinct 23.00 1.91 0.34 2.25 800 +select_group 318.00 1.54 0.12 1.66 2711 +select_group_when_MANY_tables 14.00 1.46 0.28 1.74 5000 +select_join 4.00 0.59 0.10 0.69 100 +select_key 213.00 67.07 8.38 75.45 200000 +select_key2 192.00 67.06 8.24 75.30 200000 +select_key2_return_key 183.00 63.93 8.32 72.25 200000 +select_key2_return_prim 188.00 64.56 8.71 73.27 200000 +select_key_prefix 192.00 67.39 7.56 74.95 200000 +select_key_prefix_join 12.00 4.47 0.12 4.59 100 +select_key_return_key 208.00 65.98 8.96 74.94 200000 +select_many_fields 823.00 7.36 0.55 7.91 2000 +select_query_cache 2643.00 3.20 0.43 3.63 10000 +select_query_cache2 2642.00 3.26 0.43 3.69 10000 +select_range 481.00 11.87 1.04 12.91 410 +select_range_key2 47.00 6.56 0.67 7.23 25010 +select_range_prefix 48.00 6.63 0.65 7.28 25010 +select_simple 5.00 2.74 0.38 3.12 10000 +select_simple_join 4.00 0.71 0.18 0.89 500 +update_big 3883.00 0.01 0.00 0.01 10 +update_of_key 90.00 14.87 5.98 20.85 50000 +update_of_key_big 647.00 0.12 0.06 0.18 501 +update_of_primary_key_many_keys 835.00 0.10 0.09 0.19 256 +update_with_key 470.00 87.85 41.80 129.65 300000 +update_with_key_prefix 170.00 31.13 15.28 46.41 100000 +wisc_benchmark 18.00 3.58 0.20 3.78 114 +TOTALS 45356.00 1076.29 265.36 1341.64 2551576 +++ diff --git a/sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 9a85f9a1754..00000000000 --- a/sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:11:37 - -Testing of ALTER TABLE -Testing with 1000 columns and 1000 rows in 20 steps -Insert data into the table -Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for alter_table_add (992): 252 wallclock secs ( 0.20 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 260 wallclock secs ( 0.26 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index d225f1fddaa..00000000000 --- a/sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 5:20:15 - -Testing of ALTER TABLE -Testing with 1000 columns and 1000 rows in 20 steps -Insert data into the table -Time for insert (1000) 0 wallclock secs ( 0.28 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for alter_table_add (992): 28 wallclock secs ( 0.41 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 29 wallclock secs ( 0.71 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 62a8ccfdb01..00000000000 --- a/sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 4:42:04 - -Testing of ALTER TABLE -Testing with 1000 columns and 1000 rows in 20 steps -Insert data into the table -Time for insert (1000) 1 wallclock secs ( 0.21 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.29 CPU) - -Time for alter_table_add (992): 49 wallclock secs ( 0.26 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.32 CPU) - -Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for drop_index (8): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) - -Total time: 52 wallclock secs ( 0.49 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.63 CPU) diff --git a/sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..06c5236dca0 --- /dev/null +++ b/sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,14 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:16:18 + +Testing of ALTER TABLE +Testing with 1000 columns and 1000 rows in 20 steps +Insert data into the table +Time for insert (1000) 0 wallclock secs ( 0.27 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.31 CPU) + +Time for alter_table_add (992): 48 wallclock secs ( 0.31 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.35 CPU) + +Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Total time: 50 wallclock secs ( 0.58 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.66 CPU) diff --git a/sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 18b03b3ae0a..00000000000 --- a/sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:09:56 - -Testing of ALTER TABLE -Testing with 1000 columns and 1000 rows in 20 steps -Insert data into the table -Time for insert (1000) 1 wallclock secs ( 0.26 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for alter_table_add (992): 27 wallclock secs ( 0.32 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 29 wallclock secs ( 0.58 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..bb492b6b989 --- /dev/null +++ b/sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,14 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 17:54:48 + +Testing of ALTER TABLE +Testing with 1000 columns and 1000 rows in 20 steps +Insert data into the table +Time for insert (1000) 1 wallclock secs ( 0.30 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.33 CPU) + +Time for alter_table_add (992): 48 wallclock secs ( 0.32 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.35 CPU) + +Time for create_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time for drop_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Total time: 51 wallclock secs ( 0.63 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.69 CPU) diff --git a/sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index b5532123a9b..00000000000 --- a/sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:15:57 - -Testing of some unusual tables -All tests are done 1000 times with 1000 fields - -Testing table with 1000 fields -Testing select * from table with 1 record -Time to select_many_fields(1000): 9 wallclock secs ( 3.76 usr 3.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select all_fields from table with 1 record -Time to select_many_fields(1000): 13 wallclock secs ( 4.09 usr 3.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert VALUES() -Time to insert_many_fields(1000): 3 wallclock secs ( 0.41 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert (all_fields) VALUES() -Time to insert_many_fields(1000): 6 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 31 wallclock secs ( 8.32 usr 6.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 10e17dea64a..00000000000 --- a/sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 5:20:45 - -Testing of some unusual tables -All tests are done 1000 times with 1000 fields - -Testing table with 1000 fields -Testing select * from table with 1 record -Time to select_many_fields(1000): 402 wallclock secs ( 3.75 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select all_fields from table with 1 record -Time to select_many_fields(1000): 489 wallclock secs ( 4.32 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert VALUES() -Time to insert_many_fields(1000): 144 wallclock secs ( 0.38 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert (all_fields) VALUES() -Time to insert_many_fields(1000): 213 wallclock secs ( 0.80 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 1248 wallclock secs ( 9.27 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 3ac4af4f5ea..00000000000 --- a/sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,19 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 4:42:56 - -Testing of some unusual tables -All tests are done 1000 times with 1000 fields - -Testing table with 1000 fields -Testing select * from table with 1 record -Time to select_many_fields(1000): 338 wallclock secs ( 3.28 usr 0.22 sys + 0.00 cusr 0.00 csys = 3.50 CPU) - -Testing select all_fields from table with 1 record -Time to select_many_fields(1000): 457 wallclock secs ( 3.69 usr 0.26 sys + 0.00 cusr 0.00 csys = 3.95 CPU) - -Testing insert VALUES() -Time to insert_many_fields(1000): 229 wallclock secs ( 0.40 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.47 CPU) - -Testing insert (all_fields) VALUES() -Time to insert_many_fields(1000): 299 wallclock secs ( 0.72 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.86 CPU) - -Total time: 1324 wallclock secs ( 8.10 usr 0.69 sys + 0.00 cusr 0.00 csys = 8.79 CPU) diff --git a/sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..4ae51da87c6 --- /dev/null +++ b/sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,19 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:17:10 + +Testing of some unusual tables +All tests are done 1000 times with 1000 fields + +Testing table with 1000 fields +Testing select * from table with 1 record +Time to select_many_fields(1000): 354 wallclock secs ( 3.70 usr 0.19 sys + 0.00 cusr 0.00 csys = 3.89 CPU) + +Testing select all_fields from table with 1 record +Time to select_many_fields(1000): 471 wallclock secs ( 3.93 usr 0.31 sys + 0.00 cusr 0.00 csys = 4.24 CPU) + +Testing insert VALUES() +Time to insert_many_fields(1000): 230 wallclock secs ( 0.34 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.46 CPU) + +Testing insert (all_fields) VALUES() +Time to insert_many_fields(1000): 299 wallclock secs ( 0.70 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.77 CPU) + +Total time: 1355 wallclock secs ( 8.68 usr 0.69 sys + 0.00 cusr 0.00 csys = 9.37 CPU) diff --git a/sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index a5c7613be55..00000000000 --- a/sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,28 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:10:25 - -Testing of some unusual tables -All tests are done 1000 times with 1000 fields - -Testing table with 1000 fields -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select * from table with 1 record -Time to select_many_fields(1000): 398 wallclock secs ( 3.66 usr 0.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select all_fields from table with 1 record -Time to select_many_fields(1000): 488 wallclock secs ( 3.97 usr 0.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert VALUES() -Time to insert_many_fields(1000): 143 wallclock secs ( 0.41 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert (all_fields) VALUES() -Time to insert_many_fields(1000): 213 wallclock secs ( 0.71 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 1247 wallclock secs ( 8.78 usr 0.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..1758aac5e38 --- /dev/null +++ b/sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,25 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 17:55:39 + +Testing of some unusual tables +All tests are done 1000 times with 1000 fields + +Testing table with 1000 fields +Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Testing select * from table with 1 record +Time to select_many_fields(1000): 353 wallclock secs ( 3.56 usr 0.31 sys + 0.00 cusr 0.00 csys = 3.87 CPU) + +Testing select all_fields from table with 1 record +Time to select_many_fields(1000): 470 wallclock secs ( 3.80 usr 0.24 sys + 0.00 cusr 0.00 csys = 4.04 CPU) + +Testing insert VALUES() +Time to insert_many_fields(1000): 229 wallclock secs ( 0.38 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.45 CPU) + +Time for book-keeping (1): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing insert (all_fields) VALUES() +Time to insert_many_fields(1000): 296 wallclock secs ( 0.63 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.74 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Total time: 1356 wallclock secs ( 8.41 usr 0.76 sys + 0.00 cusr 0.00 csys = 9.17 CPU) diff --git a/sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 6084f81343f..00000000000 --- a/sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,30 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:16:28 - -Testing the speed of connecting to the server and sending of data -All tests are done 10000 times - -Testing connection/disconnect -Time to connect (10000): 11 wallclock secs ( 6.50 usr 1.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test connect/simple select/disconnect -Time for connect+select_simple (10000): 12 wallclock secs ( 7.41 usr 2.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test simple select -Time for select_simple (10000): 2 wallclock secs ( 0.47 usr 0.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing connect/select 1 row from table/disconnect -Time to connect+select_1_row (10000): 14 wallclock secs ( 7.49 usr 2.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 1 row from table -Time to select_1_row (10000): 2 wallclock secs ( 0.54 usr 0.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 2 rows from table -Time to select_2_rows (10000): 4 wallclock secs ( 0.55 usr 0.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test select with aritmetic (+) -Time for select_column+column (10000): 3 wallclock secs ( 0.51 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing retrieval of big records (7000 bytes) -Time to select_big (10000): 6 wallclock secs ( 3.12 usr 1.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 54 wallclock secs (26.60 usr 10.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 31a92939c56..00000000000 --- a/sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,30 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 5:41:34 - -Testing the speed of connecting to the server and sending of data -All tests are done 10000 times - -Testing connection/disconnect -Time to connect (10000): 125 wallclock secs ( 9.11 usr 3.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test connect/simple select/disconnect -Time for connect+select_simple (10000): 140 wallclock secs (12.15 usr 5.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test simple select -Time for select_simple (10000): 4 wallclock secs ( 2.96 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing connect/select 1 row from table/disconnect -Time to connect+select_1_row (10000): 173 wallclock secs (12.56 usr 5.56 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 1 row from table -Time to select_1_row (10000): 7 wallclock secs ( 3.10 usr 0.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 2 rows from table -Time to select_2_rows (10000): 6 wallclock secs ( 2.75 usr 0.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test select with aritmetic (+) -Time for select_column+column (10000): 9 wallclock secs ( 2.41 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing retrieval of big records (7000 bytes) -Time to select_big (10000): 8 wallclock secs ( 3.74 usr 0.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 472 wallclock secs (48.80 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 86af371cbb9..00000000000 --- a/sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,30 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 5:05:01 - -Testing the speed of connecting to the server and sending of data -All tests are done 10000 times - -Testing connection/disconnect -Time to connect (10000): 143 wallclock secs ( 8.01 usr 1.89 sys + 0.00 cusr 0.00 csys = 9.90 CPU) - -Test connect/simple select/disconnect -Time for connect+select_simple (10000): 157 wallclock secs (10.42 usr 2.41 sys + 0.00 cusr 0.00 csys = 12.83 CPU) - -Test simple select -Time for select_simple (10000): 5 wallclock secs ( 2.73 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.03 CPU) - -Testing connect/select 1 row from table/disconnect -Time to connect+select_1_row (10000): 195 wallclock secs (10.94 usr 2.31 sys + 0.00 cusr 0.00 csys = 13.25 CPU) - -Testing select 1 row from table -Time to select_1_row (10000): 6 wallclock secs ( 2.25 usr 0.46 sys + 0.00 cusr 0.00 csys = 2.71 CPU) - -Testing select 2 rows from table -Time to select_2_rows (10000): 7 wallclock secs ( 2.77 usr 0.38 sys + 0.00 cusr 0.00 csys = 3.15 CPU) - -Test select with aritmetic (+) -Time for select_column+column (10000): 8 wallclock secs ( 2.78 usr 0.41 sys + 0.00 cusr 0.00 csys = 3.19 CPU) - -Testing retrieval of big records (65000 bytes) -Time to select_big (10000): 34 wallclock secs (11.75 usr 5.84 sys + 0.00 cusr 0.00 csys = 17.59 CPU) - -Total time: 555 wallclock secs (51.66 usr 14.01 sys + 0.00 cusr 0.00 csys = 65.67 CPU) diff --git a/sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..2cdf15596f5 --- /dev/null +++ b/sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,30 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:39:45 + +Testing the speed of connecting to the server and sending of data +All tests are done 10000 times + +Testing connection/disconnect +Time to connect (10000): 141 wallclock secs ( 7.82 usr 1.62 sys + 0.00 cusr 0.00 csys = 9.44 CPU) + +Test connect/simple select/disconnect +Time for connect+select_simple (10000): 154 wallclock secs (10.43 usr 2.60 sys + 0.00 cusr 0.00 csys = 13.03 CPU) + +Test simple select +Time for select_simple (10000): 5 wallclock secs ( 2.59 usr 0.54 sys + 0.00 cusr 0.00 csys = 3.13 CPU) + +Testing connect/select 1 row from table/disconnect +Time to connect+select_1_row (10000): 192 wallclock secs (10.79 usr 2.47 sys + 0.00 cusr 0.00 csys = 13.26 CPU) + +Testing select 1 row from table +Time to select_1_row (10000): 6 wallclock secs ( 2.56 usr 0.45 sys + 0.00 cusr 0.00 csys = 3.01 CPU) + +Testing select 2 rows from table +Time to select_2_rows (10000): 6 wallclock secs ( 2.68 usr 0.45 sys + 0.00 cusr 0.00 csys = 3.13 CPU) + +Test select with aritmetic (+) +Time for select_column+column (10000): 8 wallclock secs ( 2.73 usr 0.39 sys + 0.00 cusr 0.00 csys = 3.12 CPU) + +Testing retrieval of big records (65000 bytes) +Time to select_big_str (10000): 35 wallclock secs (10.82 usr 5.73 sys + 0.00 cusr 0.00 csys = 16.55 CPU) + +Total time: 547 wallclock secs (50.45 usr 14.25 sys + 0.00 cusr 0.00 csys = 64.70 CPU) diff --git a/sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 21556d6f7ce..00000000000 --- a/sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,42 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:31:12 - -Testing the speed of connecting to the server and sending of data -All tests are done 10000 times - -Testing connection/disconnect -Time to connect (10000): 129 wallclock secs ( 9.33 usr 3.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test connect/simple select/disconnect -Time for connect+select_simple (10000): 142 wallclock secs (11.69 usr 5.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test simple select -Time for select_simple (10000): 4 wallclock secs ( 2.62 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing connect/select 1 row from table/disconnect -Time to connect+select_1_row (10000): 176 wallclock secs (12.21 usr 5.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 1 row from table -Time to select_1_row (10000): 6 wallclock secs ( 2.47 usr 0.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing select 2 rows from table -Time to select_2_rows (10000): 7 wallclock secs ( 3.12 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test select with aritmetic (+) -Time for select_column+column (10000): 8 wallclock secs ( 2.78 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing retrieval of big records (7000 bytes) -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time to select_big (10000): 8 wallclock secs ( 3.71 usr 0.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 484 wallclock secs (47.96 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..00ea04c49a3 --- /dev/null +++ b/sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,38 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 18:18:15 + +Testing the speed of connecting to the server and sending of data +All tests are done 10000 times + +Testing connection/disconnect +Time to connect (10000): 140 wallclock secs ( 7.94 usr 1.85 sys + 0.00 cusr 0.00 csys = 9.79 CPU) + +Test connect/simple select/disconnect +Time for connect+select_simple (10000): 155 wallclock secs (10.57 usr 2.71 sys + 0.00 cusr 0.00 csys = 13.28 CPU) + +Test simple select +Time for select_simple (10000): 5 wallclock secs ( 2.74 usr 0.38 sys + 0.00 cusr 0.00 csys = 3.12 CPU) + +Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing connect/select 1 row from table/disconnect +Time to connect+select_1_row (10000): 190 wallclock secs (10.78 usr 2.23 sys + 0.00 cusr 0.00 csys = 13.01 CPU) + +Testing select 1 row from table +Time to select_1_row (10000): 6 wallclock secs ( 2.65 usr 0.43 sys + 0.00 cusr 0.00 csys = 3.08 CPU) + +Testing select 2 rows from table +Time to select_2_rows (10000): 7 wallclock secs ( 2.81 usr 0.40 sys + 0.00 cusr 0.00 csys = 3.21 CPU) + +Test select with aritmetic (+) +Time for select_column+column (10000): 8 wallclock secs ( 2.90 usr 0.33 sys + 0.00 cusr 0.00 csys = 3.23 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing retrieval of big records (65000 bytes) +Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time to select_big_str (10000): 37 wallclock secs (12.51 usr 5.97 sys + 0.00 cusr 0.00 csys = 18.48 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Total time: 550 wallclock secs (52.92 usr 14.30 sys + 0.00 cusr 0.00 csys = 67.22 CPU) diff --git a/sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 5b90c4778c6..00000000000 --- a/sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,18 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:17:23 - -Testing the speed of creating and droping tables -Testing with 10000 tables and 10000 loop count - -Testing create of tables -Time for create_MANY_tables (10000): 35 wallclock secs ( 1.84 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Accessing tables -Time to select_group_when_MANY_tables (10000): 6 wallclock secs ( 0.71 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing drop -Time for drop_table_when_MANY_tables (10000): 15 wallclock secs ( 0.10 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing create+drop -Time for create+drop (10000): 26 wallclock secs ( 2.17 usr 1.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for create_key+drop (10000): 40 wallclock secs ( 3.68 usr 0.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 122 wallclock secs ( 8.51 usr 3.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 1272418baab..00000000000 --- a/sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,18 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 5:49:26 - -Testing the speed of creating and droping tables -Testing with 10000 tables and 10000 loop count - -Testing create of tables -Time for create_MANY_tables (10000): 448 wallclock secs ( 7.42 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Accessing tables -Time to select_group_when_MANY_tables (10000): 187 wallclock secs ( 2.71 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing drop -Time for drop_table_when_MANY_tables (10000): 1324 wallclock secs ( 3.41 usr 0.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing create+drop -Time for create+drop (10000): 2954 wallclock secs (11.24 usr 1.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for create_key+drop (10000): 4055 wallclock secs (10.98 usr 1.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 8968 wallclock secs (35.76 usr 5.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 7e33d155a20..00000000000 --- a/sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,18 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 5:14:17 - -Testing the speed of creating and droping tables -Testing with 5000 tables and 10000 loop count - -Testing create of tables -Time for create_MANY_tables (5000): 160 wallclock secs ( 3.67 usr 1.35 sys + 0.00 cusr 0.00 csys = 5.02 CPU) - -Accessing tables -Time to select_group_when_MANY_tables (5000): 15 wallclock secs ( 1.57 usr 0.78 sys + 0.00 cusr 0.00 csys = 2.35 CPU) - -Testing drop -Time for drop_table_when_MANY_tables (5000): 772 wallclock secs ( 1.29 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.57 CPU) - -Testing create+drop -Time for create+drop (10000): 3280 wallclock secs (10.74 usr 1.89 sys + 0.00 cusr 0.00 csys = 12.63 CPU) -Time for create_key+drop (10000): 5781 wallclock secs (10.70 usr 1.53 sys + 0.00 cusr 0.00 csys = 12.23 CPU) -Total time: 10008 wallclock secs (27.97 usr 5.83 sys + 0.00 cusr 0.00 csys = 33.80 CPU) diff --git a/sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..d4ed9d43980 --- /dev/null +++ b/sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,18 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:48:52 + +Testing the speed of creating and droping tables +Testing with 5000 tables and 10000 loop count + +Testing create of tables +Time for create_MANY_tables (5000): 194 wallclock secs ( 6.27 usr 5.72 sys + 0.00 cusr 0.00 csys = 11.99 CPU) + +Accessing tables +Time to select_group_when_MANY_tables (5000): 14 wallclock secs ( 1.46 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.74 CPU) + +Testing drop +Time for drop_table_when_MANY_tables (5000): 599 wallclock secs ( 1.39 usr 0.38 sys + 0.00 cusr 0.00 csys = 1.77 CPU) + +Testing create+drop +Time for create+drop (10000): 2924 wallclock secs (11.10 usr 2.41 sys + 0.00 cusr 0.00 csys = 13.51 CPU) +Time for create_key+drop (10000): 5464 wallclock secs (11.00 usr 2.30 sys + 0.00 cusr 0.00 csys = 13.30 CPU) +Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU) diff --git a/sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index c22ceeb6781..00000000000 --- a/sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,18 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-15 17:09:50 - -Testing the speed of creating and droping tables -Testing with 10000 tables and 10000 loop count - -Testing create of tables -Time for create_MANY_tables (10000): 455 wallclock secs ( 8.09 usr 1.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Accessing tables -Time to select_group_when_MANY_tables (10000): 188 wallclock secs ( 3.03 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing drop -Time for drop_table_when_MANY_tables (10000): 1328 wallclock secs ( 2.91 usr 0.56 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing create+drop -Time for create+drop (10000): 3022 wallclock secs (10.18 usr 1.71 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for create_key+drop (10000): 3752 wallclock secs ( 8.40 usr 1.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 8745 wallclock secs (32.62 usr 4.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..d4ed9d43980 --- /dev/null +++ b/sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,18 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:48:52 + +Testing the speed of creating and droping tables +Testing with 5000 tables and 10000 loop count + +Testing create of tables +Time for create_MANY_tables (5000): 194 wallclock secs ( 6.27 usr 5.72 sys + 0.00 cusr 0.00 csys = 11.99 CPU) + +Accessing tables +Time to select_group_when_MANY_tables (5000): 14 wallclock secs ( 1.46 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.74 CPU) + +Testing drop +Time for drop_table_when_MANY_tables (5000): 599 wallclock secs ( 1.39 usr 0.38 sys + 0.00 cusr 0.00 csys = 1.77 CPU) + +Testing create+drop +Time for create+drop (10000): 2924 wallclock secs (11.10 usr 2.41 sys + 0.00 cusr 0.00 csys = 13.51 CPU) +Time for create_key+drop (10000): 5464 wallclock secs (11.00 usr 2.30 sys + 0.00 cusr 0.00 csys = 13.30 CPU) +Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU) diff --git a/sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 2ecb653e7f7..00000000000 --- a/sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,58 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:19:26 - -Testing the speed of inserting data into 1 table and do some selects on it. -The tests are done with a table that has 100000 rows. - -Generating random keys -Creating tables -Inserting 100000 rows in order -Inserting 100000 rows in reverse order -Inserting 100000 rows in random order -Time for insert (300000): 82 wallclock secs (17.82 usr 12.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for insert_duplicates (300000): 82 wallclock secs (17.82 usr 12.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data from the table -Time for select_big (10:3000000): 31 wallclock secs (19.92 usr 10.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key (10:3000000): 31 wallclock secs (20.12 usr 10.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by (10:3000000): 48 wallclock secs (20.14 usr 16.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_diff_key (500:1000): 205 wallclock secs ( 0.19 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range_prefix (5010:42084): 11 wallclock secs ( 2.64 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (5010:42084): 10 wallclock secs ( 2.77 usr 0.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key_prefix (200000): 127 wallclock secs (65.92 usr 14.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key (200000): 125 wallclock secs (67.84 usr 14.15 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test of compares with simple ranges -Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.45 usr 1.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (20000:43500): 7 wallclock secs ( 3.71 usr 0.90 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (111): 58 wallclock secs ( 0.04 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max_on_key (15000): 8 wallclock secs ( 4.68 usr 1.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max (60): 32 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (100): 56 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count (100): 46 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (20): 63 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys with functions -Time for update_of_key (500): 23 wallclock secs ( 2.80 usr 2.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for update_of_key_big (501): 33 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update with key -Time for update_with_key (100000): 97 wallclock secs (14.16 usr 13.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of all rows -Time for update_big (500): 65 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing delete -Time for delete_key (10000): 3 wallclock secs ( 0.44 usr 0.38 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for delete_big (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Insert into table with 16 keys and with a primary key with 16 parts -Time for insert_key (100000): 91 wallclock secs ( 8.12 usr 4.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys -Time for update_of_primary_key_many_keys (256): 54 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Deleting everything from table -Time for delete_big_many_keys (2): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 1332 wallclock secs (254.96 usr 103.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index da6ee641174..00000000000 --- a/sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,98 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 8:18:54 - -Testing the speed of inserting data into 1 table and do some selects on it. -The tests are done with a table that has 100000 rows. - -Generating random keys -Creating tables -Inserting 100000 rows in order -Inserting 100000 rows in reverse order -Inserting 100000 rows in random order -Time for insert (300000): 7486 wallclock secs (94.98 usr 16.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing insert of duplicates -Time for insert_duplicates (100000): 3055 wallclock secs (60.75 usr 8.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data from the table -Time for select_big (10:3000000): 54 wallclock secs (21.95 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key (10:3000000): 115 wallclock secs (22.06 usr 0.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key_desc (10:3000000): 116 wallclock secs (22.15 usr 0.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key2 (10:3000000): 118 wallclock secs (22.07 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key_diff (10:3000000): 126 wallclock secs (22.20 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big (10:3000000): 121 wallclock secs (21.92 usr 0.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_range (500:125750): 16 wallclock secs ( 1.21 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key (500:125750): 15 wallclock secs ( 1.09 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key2_diff (500:250500): 19 wallclock secs ( 2.00 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_diff_key (500:1000): 13 wallclock secs ( 0.24 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -180 queries in 180 loops of 5000 loops took 653 seconds -Estimated time for select_range_prefix (5000:1512): 18138 wallclock secs ( 5.00 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -165 queries in 165 loops of 5000 loops took 614 seconds -Estimated time for select_range_key2 (5000:1386): 18606 wallclock secs ( 3.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -24340 queries in 12170 loops of 100000 loops took 601 seconds -Estimated time for select_key_prefix (200000): 4938 wallclock secs (67.63 usr 10.85 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -24198 queries in 12099 loops of 100000 loops took 601 seconds -Estimated time for select_key (200000): 4967 wallclock secs (68.44 usr 12.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -24362 queries in 12181 loops of 100000 loops took 601 seconds -Estimated time for select_key2 (200000): 4933 wallclock secs (67.48 usr 11.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test of compares with simple ranges -Note: Query took longer then time-limit: 600 -Estimating end time based on: -1920 queries in 48 loops of 500 loops took 603 seconds -Estimated time for select_range_prefix (20000:4176): 6281 wallclock secs ( 4.69 usr 0.52 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -1480 queries in 37 loops of 500 loops took 611 seconds -Estimated time for select_range_key2 (20000:3219): 8256 wallclock secs ( 4.59 usr 1.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (111): 240 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -1314 queries in 219 loops of 2500 loops took 603 seconds -Estimated time for min_max_on_key (15000): 6883 wallclock secs ( 4.00 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max (60): 58 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (100): 120 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count (100): 130 wallclock secs ( 0.01 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (20): 143 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys with functions -Time for update_of_key (50000): 2460 wallclock secs (15.33 usr 3.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for update_of_key_big (501): 444 wallclock secs ( 0.20 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update with key -Time for update_with_key (300000): 14806 wallclock secs (89.73 usr 16.29 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of all rows -Time for update_big (10): 1894 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing INSERT INTO ... SELECT -Time for insert_select_1_key (1): 49 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for insert_select_2_keys (1): 43 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for drop table(2): 20 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing delete -Time for delete_key (10000): 283 wallclock secs ( 2.91 usr 0.52 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for delete_all (12): 341 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Insert into table with 16 keys and with a primary key with 16 parts -Time for insert_key (100000): 3693 wallclock secs (33.29 usr 5.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys -Time for update_of_primary_key_many_keys (256): 1164 wallclock secs ( 0.08 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Deleting rows from the table -Time for delete_big_many_keys (128): 30 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Deleting everything from table -Time for delete_all_many_keys (1): 31 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Estimated total time: 110214 wallclock secs (659.27 usr 91.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 68d7052ef6e..00000000000 --- a/sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,103 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 8:01:05 - -Testing the speed of inserting data into 1 table and do some selects on it. -The tests are done with a table that has 100000 rows. - -Generating random keys -Creating tables -Inserting 100000 rows in order -Inserting 100000 rows in reverse order -Inserting 100000 rows in random order -Time for insert (300000): 302 wallclock secs (89.07 usr 22.07 sys + 0.00 cusr 0.00 csys = 111.14 CPU) - -Testing insert of duplicates -Time for insert_duplicates (100000): 120 wallclock secs (30.53 usr 10.61 sys + 0.00 cusr 0.00 csys = 41.14 CPU) - -Retrieving data from the table -Time for select_big (10:3000000): 58 wallclock secs (21.31 usr 3.95 sys + 0.00 cusr 0.00 csys = 25.26 CPU) -Time for order_by_big_key (10:3000000): 145 wallclock secs (24.01 usr 1.27 sys + 0.00 cusr 0.00 csys = 25.28 CPU) -Time for order_by_big_key_desc (10:3000000): 145 wallclock secs (23.93 usr 1.27 sys + 0.00 cusr 0.00 csys = 25.20 CPU) -Time for order_by_big_key_prefix (10:3000000): 133 wallclock secs (21.16 usr 0.80 sys + 0.00 cusr 0.00 csys = 21.96 CPU) -Time for order_by_big_key2 (10:3000000): 132 wallclock secs (21.28 usr 0.64 sys + 0.00 cusr 0.00 csys = 21.92 CPU) -Time for order_by_big_key_diff (10:3000000): 138 wallclock secs (21.30 usr 0.56 sys + 0.00 cusr 0.00 csys = 21.86 CPU) -Time for order_by_big (10:3000000): 148 wallclock secs (21.11 usr 0.72 sys + 0.00 cusr 0.00 csys = 21.83 CPU) -Time for order_by_range (500:125750): 4 wallclock secs ( 1.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.19 CPU) -Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 1.04 usr 0.08 sys + 0.00 cusr 0.00 csys = 1.12 CPU) -Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 1.94 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.97 CPU) -Time for select_diff_key (500:1000): 0 wallclock secs ( 0.21 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.23 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -210 queries in 210 loops of 5010 loops took 616 seconds -Estimated time for select_range_prefix (5010:1764): 14696 wallclock secs ( 2.62 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.10 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -210 queries in 210 loops of 5010 loops took 615 seconds -Estimated time for select_range_key2 (5010:1764): 14672 wallclock secs ( 1.67 usr 0.24 sys + 0.00 cusr 0.00 csys = 1.91 CPU) -Time for select_key_prefix (200000): 208 wallclock secs (66.62 usr 8.81 sys + 0.00 cusr 0.00 csys = 75.43 CPU) -Time for select_key (200000): 243 wallclock secs (68.03 usr 8.10 sys + 0.00 cusr 0.00 csys = 76.13 CPU) -Time for select_key_return_key (200000): 239 wallclock secs (66.86 usr 8.37 sys + 0.00 cusr 0.00 csys = 75.23 CPU) -Time for select_key2 (200000): 208 wallclock secs (66.48 usr 8.68 sys + 0.00 cusr 0.00 csys = 75.16 CPU) -Time for select_key2_return_key (200000): 200 wallclock secs (66.41 usr 7.77 sys + 0.00 cusr 0.00 csys = 74.18 CPU) -Time for select_key2_return_prim (200000): 204 wallclock secs (64.75 usr 7.90 sys + 0.00 cusr 0.00 csys = 72.65 CPU) - -Test of compares with simple ranges -Note: Query took longer then time-limit: 600 -Estimating end time based on: -2160 queries in 54 loops of 500 loops took 610 seconds -Estimated time for select_range_prefix (20000:4698): 5648 wallclock secs ( 3.70 usr 0.56 sys + 0.00 cusr 0.00 csys = 4.26 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -2120 queries in 53 loops of 500 loops took 601 seconds -Estimated time for select_range_key2 (20000:4611): 5669 wallclock secs ( 2.55 usr 0.28 sys + 0.00 cusr 0.00 csys = 2.83 CPU) -Time for select_group (111): 274 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -1320 queries in 220 loops of 2500 loops took 601 seconds -Estimated time for min_max_on_key (15000): 6829 wallclock secs ( 5.23 usr 0.91 sys + 0.00 cusr 0.00 csys = 6.14 CPU) -Time for min_max (60): 60 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU) -Time for count_on_key (100): 116 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU) -Time for count (100): 131 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) -Time for count_distinct_big (20): 201 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) - -Testing update of keys with functions -Time for update_of_key (50000): 136 wallclock secs (16.21 usr 11.85 sys + 0.00 cusr 0.00 csys = 28.06 CPU) -Time for update_of_key_big (501): 320 wallclock secs ( 0.16 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.25 CPU) - -Testing update with key -Time for update_with_key (300000): 518 wallclock secs (89.50 usr 33.03 sys + 0.00 cusr 0.00 csys = 122.53 CPU) -Time for update_with_key_prefix (100000): 186 wallclock secs (30.32 usr 15.83 sys + 0.00 cusr 0.00 csys = 46.15 CPU) - -Testing update of all rows -Time for update_big (10): 6046 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) - -Testing left outer join -Time for outer_join_on_key (10:10): 2307 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for outer_join (10:10): 2539 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU) -Time for outer_join_found (10:10): 2515 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -3 queries in 3 loops of 500 loops took 748 seconds -Estimated time for outer_join_not_found (500:500): 124666 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing INSERT INTO ... SELECT -Time for insert_select_1_key (1): 86 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for insert_select_2_keys (1): 196 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for drop table(2): 22 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing delete -Time for delete_key (10000): 85 wallclock secs ( 3.19 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.67 CPU) -Time for delete_all (12): 2478 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) - -Insert into table with 16 keys and with a primary key with 16 parts -Time for insert_key (100000): 804 wallclock secs (47.08 usr 47.06 sys + 0.00 cusr 0.00 csys = 94.14 CPU) - -Testing update of keys -Time for update_of_primary_key_many_keys (256): 5365 wallclock secs ( 0.16 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.19 CPU) - -Deleting rows from the table -Time for delete_big_many_keys (128): 93 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) - -Deleting everything from table -Time for delete_all_many_keys (1): 94 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) - -Estimated total time: 199360 wallclock secs (879.85 usr 202.59 sys + 0.00 cusr 0.00 csys = 1082.45 CPU) diff --git a/sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..5cbb52e1ddc --- /dev/null +++ b/sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,103 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 4:22:08 + +Testing the speed of inserting data into 1 table and do some selects on it. +The tests are done with a table that has 100000 rows. + +Generating random keys +Creating tables +Inserting 100000 rows in order +Inserting 100000 rows in reverse order +Inserting 100000 rows in random order +Time for insert (300000): 304 wallclock secs (88.91 usr 24.12 sys + 0.00 cusr 0.00 csys = 113.03 CPU) + +Testing insert of duplicates +Time for insert_duplicates (100000): 120 wallclock secs (29.00 usr 13.77 sys + 0.00 cusr 0.00 csys = 42.77 CPU) + +Retrieving data from the table +Time for select_big (10:3000000): 61 wallclock secs (22.36 usr 3.32 sys + 0.00 cusr 0.00 csys = 25.68 CPU) +Time for order_by_big_key (10:3000000): 145 wallclock secs (26.12 usr 1.23 sys + 0.00 cusr 0.00 csys = 27.35 CPU) +Time for order_by_big_key_desc (10:3000000): 145 wallclock secs (25.80 usr 1.41 sys + 0.00 cusr 0.00 csys = 27.21 CPU) +Time for order_by_big_key_prefix (10:3000000): 132 wallclock secs (22.46 usr 0.83 sys + 0.00 cusr 0.00 csys = 23.29 CPU) +Time for order_by_big_key2 (10:3000000): 133 wallclock secs (22.62 usr 0.93 sys + 0.00 cusr 0.00 csys = 23.55 CPU) +Time for order_by_big_key_diff (10:3000000): 139 wallclock secs (22.46 usr 0.67 sys + 0.00 cusr 0.00 csys = 23.13 CPU) +Time for order_by_big (10:3000000): 146 wallclock secs (22.57 usr 0.64 sys + 0.00 cusr 0.00 csys = 23.21 CPU) +Time for order_by_range (500:125750): 4 wallclock secs ( 1.11 usr 0.04 sys + 0.00 cusr 0.00 csys = 1.15 CPU) +Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 1.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.18 CPU) +Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 2.09 usr 0.04 sys + 0.00 cusr 0.00 csys = 2.13 CPU) +Time for select_diff_key (500:1000): 0 wallclock secs ( 0.16 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.18 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +195 queries in 195 loops of 5010 loops took 627 seconds +Estimated time for select_range_prefix (5010:1638): 16109 wallclock secs ( 2.83 usr 0.00 sys + 0.00 cusr 0.00 csys = 2.83 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +195 queries in 195 loops of 5010 loops took 626 seconds +Estimated time for select_range_key2 (5010:1638): 16083 wallclock secs ( 1.80 usr 0.00 sys + 0.00 cusr 0.00 csys = 1.80 CPU) +Time for select_key_prefix (200000): 210 wallclock secs (67.51 usr 8.60 sys + 0.00 cusr 0.00 csys = 76.11 CPU) +Time for select_key (200000): 245 wallclock secs (69.03 usr 8.64 sys + 0.00 cusr 0.00 csys = 77.67 CPU) +Time for select_key_return_key (200000): 240 wallclock secs (67.26 usr 8.61 sys + 0.00 cusr 0.00 csys = 75.87 CPU) +Time for select_key2 (200000): 209 wallclock secs (67.94 usr 8.08 sys + 0.00 cusr 0.00 csys = 76.02 CPU) +Time for select_key2_return_key (200000): 201 wallclock secs (63.19 usr 8.05 sys + 0.00 cusr 0.00 csys = 71.24 CPU) +Time for select_key2_return_prim (200000): 204 wallclock secs (64.84 usr 7.89 sys + 0.00 cusr 0.00 csys = 72.73 CPU) + +Test of compares with simple ranges +Note: Query took longer then time-limit: 600 +Estimating end time based on: +2080 queries in 52 loops of 500 loops took 612 seconds +Estimated time for select_range_prefix (20000:4524): 5884 wallclock secs ( 3.37 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.85 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +2040 queries in 51 loops of 500 loops took 601 seconds +Estimated time for select_range_key2 (20000:4437): 5892 wallclock secs ( 4.02 usr 0.10 sys + 0.00 cusr 0.00 csys = 4.12 CPU) +Time for select_group (111): 272 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +1410 queries in 235 loops of 2500 loops took 602 seconds +Estimated time for min_max_on_key (15000): 6404 wallclock secs ( 4.36 usr 0.96 sys + 0.00 cusr 0.00 csys = 5.32 CPU) +Time for min_max (60): 59 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU) +Time for count_on_key (100): 114 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU) +Time for count (100): 131 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.06 CPU) +Time for count_distinct_big (20): 203 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Testing update of keys with functions +Time for update_of_key (50000): 119 wallclock secs (16.20 usr 10.81 sys + 0.00 cusr 0.00 csys = 27.01 CPU) +Time for update_of_key_big (501): 333 wallclock secs ( 0.21 usr 0.21 sys + 0.00 cusr 0.00 csys = 0.42 CPU) + +Testing update with key +Time for update_with_key (300000): 567 wallclock secs (90.20 usr 25.08 sys + 0.00 cusr 0.00 csys = 115.28 CPU) +Time for update_with_key_prefix (100000): 244 wallclock secs (29.03 usr 5.64 sys + 0.00 cusr 0.00 csys = 34.67 CPU) + +Testing update of all rows +Time for update_big (10): 6612 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing left outer join +Time for outer_join_on_key (10:10): 3961 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join (10:10): 4093 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_found (10:10): 4086 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +2 queries in 2 loops of 500 loops took 814 seconds +Estimated time for outer_join_not_found (500:500): 203500 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing INSERT INTO ... SELECT +Time for insert_select_1_key (1): 111 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert_select_2_keys (1): 180 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for drop table(2): 18 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing delete +Time for delete_key (10000): 136 wallclock secs ( 3.08 usr 0.59 sys + 0.00 cusr 0.00 csys = 3.67 CPU) +Time for delete_all (12): 3191 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Insert into table with 16 keys and with a primary key with 16 parts +Time for insert_key (100000): 907 wallclock secs (45.53 usr 60.49 sys + 0.00 cusr 0.00 csys = 106.02 CPU) + +Testing update of keys +Time for update_of_primary_key_many_keys (256): 6813 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU) + +Deleting rows from the table +Time for delete_big_many_keys (128): 118 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU) + +Deleting everything from table +Time for delete_all_many_keys (1): 118 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU) + +Estimated total time: 288864 wallclock secs (887.56 usr 201.43 sys + 0.00 cusr 0.00 csys = 1088.99 CPU) diff --git a/sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index d38cc95311e..00000000000 --- a/sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,85 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:39:16 - -Testing the speed of inserting data into 1 table and do some selects on it. -The tests are done with a table that has 100000 rows. - -Generating random keys -Creating tables -Inserting 100000 rows in order -Inserting 100000 rows in reverse order -Inserting 100000 rows in random order -Time for insert (300000): 315 wallclock secs (88.93 usr 13.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 6 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for insert_duplicates (300000): 321 wallclock secs (88.94 usr 13.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 3 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Retrieving data from the table -Time for select_big (10:3000000): 52 wallclock secs (22.48 usr 0.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key (10:3000000): 103 wallclock secs (22.46 usr 0.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by (10:3000000): 103 wallclock secs (22.63 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_diff_key (500:1000): 1 wallclock secs ( 0.23 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range_prefix (5010:42084): 30 wallclock secs ( 2.82 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (5010:42084): 29 wallclock secs ( 3.04 usr 0.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key_prefix (200000): 188 wallclock secs (65.88 usr 9.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key (200000): 188 wallclock secs (65.70 usr 9.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Test of compares with simple ranges -Time for select_range_prefix (20000:43500): 14 wallclock secs ( 3.46 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (20000:43500): 13 wallclock secs ( 3.53 usr 0.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (111): 223 wallclock secs ( 0.04 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -1446 queries in 241 loops of 2500 loops took 602 seconds -Estimated time for min_max_on_key (15000): 6244 wallclock secs ( 4.77 usr 0.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max (60): 53 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (100): 112 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count (100): 119 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (20): 138 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys with functions -Time for update_of_key (500): 97 wallclock secs (14.01 usr 2.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -NOTICE: Vacuum: table not found -Time for book-keeping (1): 41 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for update_of_key_big (501): 559 wallclock secs ( 0.21 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 115 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update with key -Time for update_with_key (100000): 449 wallclock secs (91.48 usr 14.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of all rows -Time for update_big (500): 1832 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing delete -Time for delete_key (10000): 15 wallclock secs ( 2.84 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for delete_big (12): 100 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Insert into table with 16 keys and with a primary key with 16 parts -Time for insert_key (100000): 1367 wallclock secs (32.13 usr 5.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 8 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing update of keys -Time for update_of_primary_key_many_keys (256): 1491 wallclock secs ( 0.07 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 2489 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Deleting everything from table -Time for delete_big_many_keys (2): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Estimated total time: 16506 wallclock secs (446.80 usr 59.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..7cf90e5b34b --- /dev/null +++ b/sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,104 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 18:27:26 + +Testing the speed of inserting data into 1 table and do some selects on it. +The tests are done with a table that has 100000 rows. + +Generating random keys +Creating tables +Inserting 100000 rows in order +Inserting 100000 rows in reverse order +Inserting 100000 rows in random order +Time for insert (300000): 296 wallclock secs (89.01 usr 24.43 sys + 0.00 cusr 0.00 csys = 113.44 CPU) + +Time for book-keeping (1): 8 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing insert of duplicates +Time for insert_duplicates (100000): 111 wallclock secs (28.41 usr 9.26 sys + 0.00 cusr 0.00 csys = 37.67 CPU) + +Retrieving data from the table +Time for select_big (10:3000000): 55 wallclock secs (22.58 usr 2.28 sys + 0.00 cusr 0.00 csys = 24.86 CPU) +Time for order_by_big_key (10:3000000): 150 wallclock secs (25.91 usr 1.24 sys + 0.00 cusr 0.00 csys = 27.15 CPU) +Time for order_by_big_key_desc (10:3000000): 147 wallclock secs (25.81 usr 1.23 sys + 0.00 cusr 0.00 csys = 27.04 CPU) +Time for order_by_big_key_prefix (10:3000000): 133 wallclock secs (22.64 usr 0.62 sys + 0.00 cusr 0.00 csys = 23.26 CPU) +Time for order_by_big_key2 (10:3000000): 137 wallclock secs (22.59 usr 0.71 sys + 0.00 cusr 0.00 csys = 23.30 CPU) +Time for order_by_big_key_diff (10:3000000): 143 wallclock secs (22.68 usr 0.55 sys + 0.00 cusr 0.00 csys = 23.23 CPU) +Time for order_by_big (10:3000000): 147 wallclock secs (22.48 usr 0.61 sys + 0.00 cusr 0.00 csys = 23.09 CPU) +Time for order_by_range (500:125750): 4 wallclock secs ( 1.04 usr 0.04 sys + 0.00 cusr 0.00 csys = 1.08 CPU) +Time for order_by_key_prefix (500:125750): 3 wallclock secs ( 1.48 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.51 CPU) +Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 2.07 usr 0.04 sys + 0.00 cusr 0.00 csys = 2.11 CPU) +Time for select_diff_key (500:1000): 1 wallclock secs ( 0.21 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.21 CPU) +Time for select_range_prefix (5010:42084): 34 wallclock secs ( 2.90 usr 0.27 sys + 0.00 cusr 0.00 csys = 3.17 CPU) +Time for select_range_key2 (5010:42084): 33 wallclock secs ( 2.72 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.02 CPU) +Time for select_key_prefix (200000): 192 wallclock secs (67.39 usr 7.56 sys + 0.00 cusr 0.00 csys = 74.95 CPU) +Time for select_key (200000): 213 wallclock secs (67.07 usr 8.38 sys + 0.00 cusr 0.00 csys = 75.45 CPU) +Time for select_key_return_key (200000): 208 wallclock secs (65.98 usr 8.96 sys + 0.00 cusr 0.00 csys = 74.94 CPU) +Time for select_key2 (200000): 192 wallclock secs (67.06 usr 8.24 sys + 0.00 cusr 0.00 csys = 75.30 CPU) +Time for select_key2_return_key (200000): 183 wallclock secs (63.93 usr 8.32 sys + 0.00 cusr 0.00 csys = 72.25 CPU) +Time for select_key2_return_prim (200000): 188 wallclock secs (64.56 usr 8.71 sys + 0.00 cusr 0.00 csys = 73.27 CPU) + +Test of compares with simple ranges +Time for select_range_prefix (20000:43500): 14 wallclock secs ( 3.73 usr 0.38 sys + 0.00 cusr 0.00 csys = 4.11 CPU) +Time for select_range_key2 (20000:43500): 14 wallclock secs ( 3.84 usr 0.37 sys + 0.00 cusr 0.00 csys = 4.21 CPU) +Time for select_group (111): 267 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.06 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +1398 queries in 233 loops of 2500 loops took 601 seconds +Estimated time for min_max_on_key (15000): 6448 wallclock secs ( 4.83 usr 0.54 sys + 0.00 cusr 0.00 csys = 5.36 CPU) +Time for min_max (60): 58 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) +Time for count_on_key (100): 115 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) +Time for count (100): 132 wallclock secs ( 0.04 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.04 CPU) +Time for count_distinct_big (20): 204 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Testing update of keys with functions +Time for update_of_key (50000): 90 wallclock secs (14.87 usr 5.98 sys + 0.00 cusr 0.00 csys = 20.85 CPU) +Time for book-keeping (1): 58 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time for update_of_key_big (501): 647 wallclock secs ( 0.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.18 CPU) + +Time for book-keeping (1): 236 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing update with key +Time for update_with_key (300000): 470 wallclock secs (87.85 usr 41.80 sys + 0.00 cusr 0.00 csys = 129.65 CPU) +Time for update_with_key_prefix (100000): 170 wallclock secs (31.13 usr 15.28 sys + 0.00 cusr 0.00 csys = 46.41 CPU) + +Testing update of all rows +Time for update_big (10): 3883 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Testing left outer join +Time for outer_join_on_key (10:10): 238 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join (10:10): 253 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_found (10:10): 243 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_not_found (500:10): 242 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Testing INSERT INTO ... SELECT +Time for insert_select_1_key (1): 45 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert_select_2_keys (1): 77 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) +Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for book-keeping (1): 1626 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + + +Testing delete +Time for delete_key (10000): 11 wallclock secs ( 3.02 usr 0.37 sys + 0.00 cusr 0.00 csys = 3.39 CPU) +Time for delete_all (12): 11 wallclock secs ( 0.01 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.02 CPU) + +Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Insert into table with 16 keys and with a primary key with 16 parts +Time for insert_key (100000): 895 wallclock secs (45.94 usr 68.46 sys + 0.00 cusr 0.00 csys = 114.40 CPU) + +Time for book-keeping (1): 16 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Testing update of keys +Time for update_of_primary_key_many_keys (256): 835 wallclock secs ( 0.10 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.19 CPU) + +Time for book-keeping (1): 1298 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Deleting rows from the table +Time for delete_big_many_keys (128): 3 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) + +Deleting everything from table +Time for delete_all_many_keys (1): 3 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Estimated total time: 21187 wallclock secs (884.26 usr 225.15 sys + 0.00 cusr 0.00 csys = 1109.40 CPU) diff --git a/sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 966d3010631..00000000000 --- a/sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,23 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:41:39 - -Testing the speed of selecting on keys that consist of many parts -The test-table has 10000 rows and the test is done with 12 ranges. - -Creating table -Inserting 10000 rows -Time to insert (10000): 4 wallclock secs ( 0.85 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing big selects on the table -Time for select_big (70:17207): 0 wallclock secs ( 0.10 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (410:75949): 3 wallclock secs ( 0.79 usr 0.18 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max_on_key (70000): 202 wallclock secs (20.23 usr 4.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (50000): 517 wallclock secs (16.44 usr 3.18 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for count_group_on_key_parts (1000:0): 61 wallclock secs ( 1.03 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Testing count(distinct) on the table -Time for count_distinct (1000:2000): 124 wallclock secs ( 0.57 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key (1000:6000): 65 wallclock secs ( 0.35 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key_parts (1000:100000): 77 wallclock secs ( 1.07 usr 0.35 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group (1000:100000): 77 wallclock secs ( 1.14 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (1000:10000000): 566 wallclock secs (70.60 usr 55.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 1696 wallclock secs (113.17 usr 64.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 60e5348568b..00000000000 --- a/sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,29 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 20:00:31 - -Testing the speed of selecting on keys that consist of many parts -The test-table has 10000 rows and the test is done with 12 ranges. - -Creating table -Inserting 10000 rows -Time to insert (10000): 254 wallclock secs ( 3.11 usr 0.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing big selects on the table -Time for select_big (70:17207): 2 wallclock secs ( 0.17 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (410:75949): 35 wallclock secs ( 0.87 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -9807 queries in 1401 loops of 10000 loops took 601 seconds -Estimated time for min_max_on_key (70000): 4289 wallclock secs (20.56 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -12395 queries in 2479 loops of 10000 loops took 601 seconds -Estimated time for count_on_key (50000): 2424 wallclock secs (16.70 usr 2.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for count_group_on_key_parts (1000:100000): 242 wallclock secs ( 1.19 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Testing count(distinct) on the table -Time for count_distinct (2000:2000): 235 wallclock secs ( 0.76 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key (1000:6000): 174 wallclock secs ( 0.44 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key_parts (1000:100000): 270 wallclock secs ( 1.43 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group (1000:100000): 271 wallclock secs ( 1.27 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (100:1000000): 57 wallclock secs ( 8.24 usr 0.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Estimated total time: 8255 wallclock secs (54.76 usr 6.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index ea359e81a2b..00000000000 --- a/sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,36 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 16:37:16 - -Testing the speed of selecting on keys that consist of many parts -The test-table has 10000 rows and the test is done with 500 ranges. - -Creating table -Inserting 10000 rows -Time to insert (10000): 10 wallclock secs ( 2.96 usr 0.39 sys + 0.00 cusr 0.00 csys = 3.35 CPU) - -Test if the database has a query cache -Time for select_query_cache (10000): 2549 wallclock secs ( 3.25 usr 0.52 sys + 0.00 cusr 0.00 csys = 3.77 CPU) - -Time for select_query_cache2 (10000): 2547 wallclock secs ( 3.04 usr 0.53 sys + 0.00 cusr 0.00 csys = 3.57 CPU) - -Testing big selects on the table -Time for select_big (70:17207): 1 wallclock secs ( 0.17 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.17 CPU) -Time for select_range (410:1057904): 465 wallclock secs (10.41 usr 0.63 sys + 0.00 cusr 0.00 csys = 11.04 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -11326 queries in 1618 loops of 10000 loops took 601 seconds -Estimated time for min_max_on_key (70000): 3714 wallclock secs (20.15 usr 3.46 sys + 0.00 cusr 0.00 csys = 23.61 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -17320 queries in 3464 loops of 10000 loops took 601 seconds -Estimated time for count_on_key (50000): 1734 wallclock secs (15.76 usr 1.99 sys + 0.00 cusr 0.00 csys = 17.75 CPU) - -Time for count_group_on_key_parts (1000:100000): 331 wallclock secs ( 1.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.19 CPU) -Testing count(distinct) on the table -Time for count_distinct_key_prefix (1000:1000): 179 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.35 CPU) -Time for count_distinct (1000:1000): 132 wallclock secs ( 0.31 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.37 CPU) -Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.37 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.40 CPU) -Time for count_distinct_group_on_key (1000:6000): 488 wallclock secs ( 0.41 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.44 CPU) -Time for count_distinct_group_on_key_parts (1000:100000): 383 wallclock secs ( 1.10 usr 0.07 sys + 0.00 cusr 0.00 csys = 1.17 CPU) -Time for count_distinct_group (1000:100000): 384 wallclock secs ( 1.07 usr 0.08 sys + 0.00 cusr 0.00 csys = 1.15 CPU) -Time for count_distinct_big (100:1000000): 65 wallclock secs ( 7.88 usr 0.25 sys + 0.00 cusr 0.00 csys = 8.13 CPU) -Estimated total time: 13197 wallclock secs (68.30 usr 8.18 sys + 0.00 cusr 0.00 csys = 76.48 CPU) diff --git a/sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..c53058af7bf --- /dev/null +++ b/sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,36 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 15:10:04 + +Testing the speed of selecting on keys that consist of many parts +The test-table has 10000 rows and the test is done with 500 ranges. + +Creating table +Inserting 10000 rows +Time to insert (10000): 9 wallclock secs ( 2.91 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.21 CPU) + +Test if the database has a query cache +Time for select_query_cache (10000): 2623 wallclock secs ( 3.22 usr 0.37 sys + 0.00 cusr 0.00 csys = 3.59 CPU) + +Time for select_query_cache2 (10000): 2622 wallclock secs ( 2.73 usr 0.47 sys + 0.00 cusr 0.00 csys = 3.20 CPU) + +Testing big selects on the table +Time for select_big (70:17207): 1 wallclock secs ( 0.12 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.13 CPU) +Time for select_range (410:1057904): 491 wallclock secs (11.40 usr 0.50 sys + 0.00 cusr 0.00 csys = 11.90 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +11893 queries in 1699 loops of 10000 loops took 601 seconds +Estimated time for min_max_on_key (70000): 3537 wallclock secs (21.54 usr 3.06 sys + 0.00 cusr 0.00 csys = 24.60 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +17720 queries in 3544 loops of 10000 loops took 601 seconds +Estimated time for count_on_key (50000): 1695 wallclock secs (15.49 usr 2.14 sys + 0.00 cusr 0.00 csys = 17.64 CPU) + +Time for count_group_on_key_parts (1000:100000): 332 wallclock secs ( 1.20 usr 0.04 sys + 0.00 cusr 0.00 csys = 1.24 CPU) +Testing count(distinct) on the table +Time for count_distinct_key_prefix (1000:1000): 188 wallclock secs ( 0.33 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.38 CPU) +Time for count_distinct (1000:1000): 131 wallclock secs ( 0.29 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.31 CPU) +Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.43 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.49 CPU) +Time for count_distinct_group_on_key (1000:6000): 485 wallclock secs ( 0.38 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.41 CPU) +Time for count_distinct_group_on_key_parts (1000:100000): 381 wallclock secs ( 1.23 usr 0.05 sys + 0.00 cusr 0.00 csys = 1.28 CPU) +Time for count_distinct_group (1000:100000): 384 wallclock secs ( 1.12 usr 0.07 sys + 0.00 cusr 0.00 csys = 1.19 CPU) +Time for count_distinct_big (100:1000000): 65 wallclock secs ( 8.50 usr 0.17 sys + 0.00 cusr 0.00 csys = 8.67 CPU) +Estimated total time: 13160 wallclock secs (70.90 usr 7.35 sys + 0.00 cusr 0.00 csys = 78.25 CPU) diff --git a/sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 14c86c54550..00000000000 --- a/sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,38 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 6:40:22 - -Testing the speed of selecting on keys that consist of many parts -The test-table has 10000 rows and the test is done with 12 ranges. - -Creating table -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting 10000 rows -Time to insert (10000): 12 wallclock secs ( 3.13 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Testing big selects on the table -Time for select_big (70:17207): 1 wallclock secs ( 0.14 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (410:75949): 24 wallclock secs ( 0.92 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -16968 queries in 2424 loops of 10000 loops took 601 seconds -Estimated time for min_max_on_key (70000): 2479 wallclock secs (20.34 usr 2.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Note: Query took longer then time-limit: 600 -Estimating end time based on: -27270 queries in 5454 loops of 10000 loops took 601 seconds -Estimated time for count_on_key (50000): 1101 wallclock secs (15.60 usr 2.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time for count_group_on_key_parts (1000:0): 216 wallclock secs ( 1.37 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Testing count(distinct) on the table -Time for count_distinct (1000:2000): 185 wallclock secs ( 0.71 usr 0.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key (1000:6000): 145 wallclock secs ( 0.33 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key_parts (1000:100000): 246 wallclock secs ( 1.09 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group (1000:100000): 246 wallclock secs ( 1.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (1000:10000000): 529 wallclock secs (82.37 usr 2.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Estimated total time: 5187 wallclock secs (127.12 usr 9.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..4f624d80112 --- /dev/null +++ b/sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,42 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 22:43:08 + +Testing the speed of selecting on keys that consist of many parts +The test-table has 10000 rows and the test is done with 500 ranges. + +Creating table +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Inserting 10000 rows +Time to insert (10000): 16 wallclock secs ( 3.01 usr 0.33 sys + 0.00 cusr 0.00 csys = 3.34 CPU) + +Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Test if the database has a query cache +Time for select_query_cache (10000): 2643 wallclock secs ( 3.20 usr 0.43 sys + 0.00 cusr 0.00 csys = 3.63 CPU) + +Time for select_query_cache2 (10000): 2642 wallclock secs ( 3.26 usr 0.43 sys + 0.00 cusr 0.00 csys = 3.69 CPU) + +Testing big selects on the table +Time for select_big (70:17207): 1 wallclock secs ( 0.12 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.13 CPU) +Time for select_range (410:1057904): 481 wallclock secs (11.87 usr 1.04 sys + 0.00 cusr 0.00 csys = 12.91 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +12019 queries in 1717 loops of 10000 loops took 601 seconds +Estimated time for min_max_on_key (70000): 3500 wallclock secs (24.99 usr 4.95 sys + 0.00 cusr 0.00 csys = 29.94 CPU) +Note: Query took longer then time-limit: 600 +Estimating end time based on: +18105 queries in 3621 loops of 10000 loops took 601 seconds +Estimated time for count_on_key (50000): 1659 wallclock secs (14.19 usr 1.80 sys + 0.00 cusr 0.00 csys = 15.99 CPU) + +Time for count_group_on_key_parts (1000:100000): 332 wallclock secs ( 1.14 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.17 CPU) +Testing count(distinct) on the table +Time for count_distinct_key_prefix (1000:1000): 188 wallclock secs ( 0.38 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.40 CPU) +Time for count_distinct (1000:1000): 131 wallclock secs ( 0.34 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.39 CPU) +Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.38 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.41 CPU) +Time for count_distinct_group_on_key (1000:6000): 209 wallclock secs ( 0.35 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.44 CPU) +Time for count_distinct_group_on_key_parts (1000:100000): 382 wallclock secs ( 1.16 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.22 CPU) +Time for count_distinct_group (1000:100000): 385 wallclock secs ( 1.14 usr 0.09 sys + 0.00 cusr 0.00 csys = 1.23 CPU) +Time for count_distinct_big (100:1000000): 65 wallclock secs ( 8.53 usr 0.26 sys + 0.00 cusr 0.00 csys = 8.79 CPU) +Time for book-keeping (1): 2 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Estimated total time: 12852 wallclock secs (74.09 usr 9.62 sys + 0.00 cusr 0.00 csys = 83.71 CPU) diff --git a/sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 6c2f9506a2c..00000000000 --- a/sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'MySQL 3.23.22 beta' at 2000-08-17 20:09:56 - -Wisconsin benchmark test - -Time for create_table (3): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (31000): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Running actual benchmark -Time for wisc_benchmark (114): 4 wallclock secs ( 1.66 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 6 wallclock secs ( 1.67 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 23f8f49f12c..00000000000 --- a/sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2000-12-05 20:46:15 - -Wisconsin benchmark test - -Time for create_table (3): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (31000): 793 wallclock secs ( 8.99 usr 1.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Running actual benchmark -Time for wisc_benchmark (114): 18 wallclock secs ( 3.04 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 813 wallclock secs (12.05 usr 2.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 079272b708e..00000000000 --- a/sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,14 +0,0 @@ -Testing server 'PostgreSQL version ???' at 2001-06-03 19:06:27 - -Wisconsin benchmark test - -Time for create_table (3): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (31000): 33 wallclock secs ( 9.09 usr 1.58 sys + 0.00 cusr 0.00 csys = 10.67 CPU) -Time to delete_big (1): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) - -Running actual benchmark -Time for wisc_benchmark (114): 16 wallclock secs ( 3.30 usr 0.65 sys + 0.00 cusr 0.00 csys = 3.95 CPU) - -Total time: 52 wallclock secs (12.40 usr 2.23 sys + 0.00 cusr 0.00 csys = 14.63 CPU) diff --git a/sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..9e5dceb2b76 --- /dev/null +++ b/sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,14 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 17:42:14 + +Wisconsin benchmark test + +Time for create_table (3): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Inserting data +Time to insert (31000): 32 wallclock secs ( 9.14 usr 1.27 sys + 0.00 cusr 0.00 csys = 10.41 CPU) +Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Running actual benchmark +Time for wisc_benchmark (114): 16 wallclock secs ( 3.54 usr 1.02 sys + 0.00 cusr 0.00 csys = 4.56 CPU) + +Total time: 55 wallclock secs (12.69 usr 2.29 sys + 0.00 cusr 0.00 csys = 14.98 CPU) diff --git a/sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg deleted file mode 100644 index 58cc9c98970..00000000000 --- a/sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg +++ /dev/null @@ -1,26 +0,0 @@ -Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 7:27:10 - -Wisconsin benchmark test - -Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Inserting data -Time to insert (31000): 39 wallclock secs ( 8.92 usr 1.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -NOTICE: Vacuum: table not found -Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Time to delete_big (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Running actual benchmark -Time for wisc_benchmark (114): 15 wallclock secs ( 3.21 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -NOTICE: Vacuum: table not found -Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) - -Total time: 60 wallclock secs (12.14 usr 1.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg new file mode 100644 index 00000000000..a58c22fc6f2 --- /dev/null +++ b/sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg @@ -0,0 +1,22 @@ +Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:11:23 + +Wisconsin benchmark test + +Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Inserting data +Time to insert (31000): 39 wallclock secs ( 9.47 usr 3.11 sys + 0.00 cusr 0.00 csys = 12.58 CPU) +Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time to delete_big (1): 2 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU) + +Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Running actual benchmark +Time for wisc_benchmark (114): 18 wallclock secs ( 3.58 usr 0.20 sys + 0.00 cusr 0.00 csys = 3.78 CPU) + +Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Total time: 64 wallclock secs (13.06 usr 3.32 sys + 0.00 cusr 0.00 csys = 16.38 CPU) diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt index 7a86a4368e7..2f41fbf30c2 100644 --- a/sql/share/german/errmsg.txt +++ b/sql/share/german/errmsg.txt @@ -2,7 +2,7 @@ This file is public domain and comes with NO WARRANTY of any kind Dirk Munzinger (dmun@4t2.com) - Version: 17.03.1999 */ + Version: 07.06.2001 */ "hashchk", "isamchk", @@ -196,19 +196,19 @@ "Netzfehler beim Lesen vom Master", "Netzfehler beim Schreiben zum Master", "Kann keinen FULLTEXT-Index finden der der Spaltenliste entspricht", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again', -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, check permisions on master.info", -"Could not create slave thread, check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", +"Kann das aktuelle Kommando wegen aktiver Tabellensperre oder aktiver Transaktion nicht ausführen", +"Unbekannte System-Variabel '%-.64'", +"Tabelle '%-.64s' ist als defekt makiert und sollte repariert werden", +"Tabelle '%-.64s' ist als defekt makiert und der letzte (automatische) Reparaturversuch schlug fehl.", +"Warnung: Das Rollback konnte bei einigen Tabellen, die nicht mittels Transaktionen geändert wurden, nicht ausgeführt werden.", +"Multi-Statement Transaktionen benötigen mehr als 'max_binlog_cache_size' Bytes An Speicher. Diese mysqld-Variabel vergrössern und nochmal versuchen.', +"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Das Kommand SLAVE STOP muss zuerst ausgeführt werden.", +"Diese Operationbenötigt einen aktiven Slave. Slave konfigurieren und mittels SLAVE START aktivieren.", +"Der Server ist nicht als Slave konfigiriert. Im Konfigurations-File oder mittel CHANGE MASTER TO beheben.", +"Konnte Master-Info-Struktur nicht initialisieren; Berechtigungen von master.info prüfen.", +"Konnte keinen Slave-Thread starten. System-Resourcen überprüfen.", +"Benutzer %-.64s hat mehr als 'max_user_connections' aktive Verbindungen", +"Bei der Verwendung mit SET dürfen nur Constante Ausdrücke verwendet werden", "Lock wait timeout exceeded", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", From 043c49aade6113bd01f564880854b0071a88d953 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2001 18:00:36 +0300 Subject: [PATCH 33/33] Added --skip-stack to mysql-test-run Docs/manual.texi: Changelog mysql-test/mysql-test-run.sh: Added --skip-stack when using gdb sql/mysqld.cc: Changed reference to bugs@lists.mysql.com to manual reference. --- Docs/manual.texi | 22 +++++++++++++--------- mysql-test/mysql-test-run.sh | 9 +++++---- sql/mysqld.cc | 4 ++-- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/Docs/manual.texi b/Docs/manual.texi index 6c096b9831c..1de38a43aa6 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -954,14 +954,9 @@ How MySQL Compares to @code{mSQL} How MySQL Compares to PostgreSQL -* MySQL-PostgreSQL goals:: -* MySQL-PostgreSQL features:: -* MySQL-PostgreSQL benchmarks:: - -MySQL and PostgreSQL development goals - -* MySQL-PostgreSQL features:: -* MySQL-PostgreSQL benchmarks:: +* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies +* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL +* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL MySQL Internals @@ -995,6 +990,7 @@ Changes in release 4.0.x (Development; Alpha) Changes in release 3.23.x (Stable) +* News-3.23.39a:: * News-3.23.39:: Changes in release 3.23.39 * News-3.23.38:: Changes in release 3.23.38 * News-3.23.37:: Changes in release 3.23.37 @@ -46314,6 +46310,7 @@ users use this code as the rest of the code and because of this we are not yet 100% confident in this code. @menu +* News-3.23.39a:: * News-3.23.39:: Changes in release 3.23.39 * News-3.23.38:: Changes in release 3.23.38 * News-3.23.37:: Changes in release 3.23.37 @@ -46357,7 +46354,14 @@ not yet 100% confident in this code. * News-3.23.0:: Changes in release 3.23.0 @end menu -@node News-3.23.39, News-3.23.38, News-3.23.x, News-3.23.x +@node News-3.23.39a, News-3.23.39, News-3.23.x, News-3.23.x +@appendixsubsec Changes in release 3.23.39a +@itemize @bullet +@item +Fixed a small bug in the mysql-test benchmark suite +@end itemize + +@node News-3.23.39, News-3.23.38, News-3.23.39a, News-3.23.x @appendixsubsec Changes in release 3.23.39 @itemize @bullet @item diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 6634d8696e6..4d987e591e0 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -144,8 +144,9 @@ while test $# -gt 0; do SLEEP_TIME=`$ECHO "$1" | $SED -e "s;--sleep=;;"` ;; --mysqld=*) - TMP=`$ECHO "$1" | $SED -e "s;--mysqld-=;"` - EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $TMP" + TMP=`$ECHO "$1" | $SED -e "s;--mysqld=;;"` + EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $TMP" + EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT $TMP" ;; --gcov ) if [ x$BINARY_DIST = x1 ] ; then @@ -163,12 +164,14 @@ while test $# -gt 0; do fi DO_GDB=1 USE_RUNNING_SERVER="" + EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-stack" ;; --ddd ) if [ x$BINARY_DIST = x1 ] ; then $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with -gdb option" fi DO_DDD=1 + EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-stack" USE_RUNNING_SERVER="" ;; --skip-*) @@ -436,12 +439,10 @@ start_master() --server-id=1 \ --basedir=$MY_BASEDIR \ --port=$MASTER_MYPORT \ - --exit-info=256 \ --datadir=$MASTER_MYDDIR \ --pid-file=$MASTER_MYPID \ --socket=$MASTER_MYSOCK \ --log=$MASTER_MYLOG --default-character-set=latin1 \ - --core \ --tmpdir=$MYSQL_TMP_DIR \ --language=english \ --innodb_data_file_path=ibdata1:50M \ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index ffef39964da..b155b313e8b 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1194,8 +1194,8 @@ bad corruption, the above values may be invalid\n\n", thd->thread_id); } fprintf(stderr, "\ -Please use the information above to create a repeatable test case for the\n\ -crash, and send it to bugs@lists.mysql.com\n"); +The manual page at http://www.mysql.com/doc/C/r/Crashing.html contains\n\ +information that should help you find out what is causing the crash\n"); fflush(stderr); #endif /* HAVE_STACKTRACE */