mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Merge 10.4 into 10.5
This commit is contained in:
commit
28fad39de7
117 changed files with 4134 additions and 1715 deletions
|
@ -37,7 +37,7 @@ IF(CMAKE_VERSION VERSION_LESS "3.6.0")
|
|||
SET(CPACK_PACKAGE_FILE_NAME "${CPACK_RPM_PACKAGE_NAME}-${VERSION}-${RPM}-${CMAKE_SYSTEM_PROCESSOR}")
|
||||
ELSE()
|
||||
SET(CPACK_RPM_FILE_NAME "RPM-DEFAULT")
|
||||
SET(CPACK_RPM_DEBUGINFO_PACKAGE ON)
|
||||
SET(CPACK_RPM_DEBUGINFO_PACKAGE ON CACHE INTERNAL "")
|
||||
ENDIF()
|
||||
|
||||
SET(CPACK_RPM_PACKAGE_RELEASE "1%{?dist}")
|
||||
|
|
4
debian/po/ca.po
vendored
4
debian/po/ca.po
vendored
|
@ -203,13 +203,13 @@ msgstr ""
|
|||
#~ msgid ""
|
||||
#~ "MySQL will only install if you have a non-numeric hostname that is "
|
||||
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.5.0.1 "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.0.0.1 "
|
||||
#~ "myhostname\"."
|
||||
#~ msgstr ""
|
||||
#~ "El MySQL nom<6F>s s'instal<61>la en cas de tenir un nom d'ordinador central que "
|
||||
#~ "no sigui num<75>ric i que es pugui resoldre a trav<61>s del fitxer /etc/hosts. "
|
||||
#~ "Ex. si l'ordre \"hostname\" retorna \"myhostname\", llavors hi ha d'haver "
|
||||
#~ "una l<EFBFBD>nia com la seg<65>ent \"10.5.0.1 myhostname\"."
|
||||
#~ "una línia com la següent \"10.0.0.1 myhostname\"."
|
||||
|
||||
#, fuzzy
|
||||
#~ msgid ""
|
||||
|
|
4
debian/po/cs.po
vendored
4
debian/po/cs.po
vendored
|
@ -310,13 +310,13 @@ msgstr ""
|
|||
#~ msgid ""
|
||||
#~ "MySQL will only install if you have a non-numeric hostname that is "
|
||||
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.5.0.1 "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.0.0.1 "
|
||||
#~ "myhostname\"."
|
||||
#~ msgstr ""
|
||||
#~ "MySQL se nainstaluje pouze v případě, že používáte nenumerické jméno "
|
||||
#~ "počítače, které se dá přeložit přes soubor /etc/hosts. Např. když příkaz "
|
||||
#~ "\"hostname\" vrátí \"diamond\", tak v /etc/hosts musí existovat obdobný "
|
||||
#~ "řádek jako \"10.5.0.1 diamond\"."
|
||||
#~ "řádek jako \"10.0.0.1 diamond\"."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "A new mysql user \"debian-sys-maint\" will be created. This mysql account "
|
||||
|
|
72
debian/po/da.po
vendored
72
debian/po/da.po
vendored
|
@ -80,7 +80,7 @@ msgid ""
|
|||
"You should also check the permissions and ownership of the /var/lib/mysql "
|
||||
"directory:"
|
||||
msgstr ""
|
||||
"Du b<EFBFBD>r ogs<67> tjekke filrettighederne og ejerskabet af mappen /var/lib/mysql:"
|
||||
"Du bør også tjekke filrettighederne og ejerskabet af mappen /var/lib/mysql:"
|
||||
|
||||
#. Type: boolean
|
||||
#. Description
|
||||
|
@ -107,7 +107,7 @@ msgid ""
|
|||
"the data should be kept."
|
||||
msgstr ""
|
||||
"Hvis du fjerner MariaDB-pakken for senere at installere en nyere version, "
|
||||
"eller hvis en anden mariadb-server-pakke allerede benytter den, b<EFBFBD>r dataene "
|
||||
"eller hvis en anden mariadb-server-pakke allerede benytter den, bør dataene "
|
||||
"bevares."
|
||||
|
||||
#. Type: password
|
||||
|
@ -123,7 +123,7 @@ msgid ""
|
|||
"While not mandatory, it is highly recommended that you set a password for "
|
||||
"the MariaDB administrative \"root\" user."
|
||||
msgstr ""
|
||||
"Selvom det ikke kr<EFBFBD>ves, anbefales det kraftigt, at du s<>tter en adgangskode "
|
||||
"Selvom det ikke kræves, anbefales det kraftigt, at du sætter en adgangskode "
|
||||
"for MariaDB's administrationsbruger \"root\"."
|
||||
|
||||
#. Type: password
|
||||
|
@ -132,7 +132,7 @@ msgstr ""
|
|||
#, fuzzy
|
||||
#| msgid "If that field is left blank, the password will not be changed."
|
||||
msgid "If this field is left blank, the password will not be changed."
|
||||
msgstr "Hvis du lader dette felt st<EFBFBD> tomt, vil adgangskoden ikke blive <20>ndret."
|
||||
msgstr "Hvis du lader dette felt stå tomt, vil adgangskoden ikke blive ændret."
|
||||
|
||||
#. Type: password
|
||||
#. Description
|
||||
|
@ -146,7 +146,7 @@ msgstr "Ny adgangskode for MariaDB's \"root\"-bruger:"
|
|||
#. Description
|
||||
#: ../mariadb-server-10.5.templates:7001
|
||||
msgid "Unable to set password for the MariaDB \"root\" user"
|
||||
msgstr "Kunne ikke s<EFBFBD>tte adgangskoden for MariaDB's \"root\"-bruger"
|
||||
msgstr "Kunne ikke sætte adgangskoden for MariaDB's \"root\"-bruger"
|
||||
|
||||
#. Type: error
|
||||
#. Description
|
||||
|
@ -157,7 +157,7 @@ msgid ""
|
|||
"because of a communication problem with the MariaDB server."
|
||||
msgstr ""
|
||||
"Der opstod en fejl, da adgangskoden for MariaDB's administrationsbruger blev "
|
||||
"fors<EFBFBD>gt <20>ndret. Dette kan v<>re sket, fordi brugeren allerede har en "
|
||||
"forsøgt ændret. Dette kan være sket, fordi brugeren allerede har en "
|
||||
"adgangskode, eller fordi der var problemer med at kommunikere med MariaDB-"
|
||||
"serveren."
|
||||
|
||||
|
@ -165,7 +165,7 @@ msgstr ""
|
|||
#. Description
|
||||
#: ../mariadb-server-10.5.templates:7001
|
||||
msgid "You should check the account's password after the package installation."
|
||||
msgstr "Du b<EFBFBD>r tjekke kontoens adgangskode efter pakkeinstallationen."
|
||||
msgstr "Du bør tjekke kontoens adgangskode efter pakkeinstallationen."
|
||||
|
||||
#. Type: error
|
||||
#. Description
|
||||
|
@ -194,11 +194,11 @@ msgid "The two passwords you entered were not the same. Please try again."
|
|||
msgstr ""
|
||||
|
||||
#~ msgid "Really proceed with downgrade?"
|
||||
#~ msgstr "<EFBFBD>nsker du virkelig at forts<74>tte nedgraderingen?"
|
||||
#~ msgstr "Ønsker du virkelig at fortsætte nedgraderingen?"
|
||||
|
||||
#~ msgid "A file named /var/lib/mysql/debian-*.flag exists on this system."
|
||||
#~ msgstr ""
|
||||
#~ "Der er en fil med navnet /var/lib/mysql/debian-*.flag p<EFBFBD> dette system."
|
||||
#~ "Der er en fil med navnet /var/lib/mysql/debian-*.flag på dette system."
|
||||
|
||||
#, fuzzy
|
||||
#~| msgid ""
|
||||
|
@ -208,7 +208,7 @@ msgstr ""
|
|||
#~ "Such a file is an indication that a mariadb-server package with a higher "
|
||||
#~ "version has been installed previously."
|
||||
#~ msgstr ""
|
||||
#~ "S<EFBFBD>dan en fil tyder p<> at der tidligere har v<>ret installeret en h<>jere "
|
||||
#~ "Sådan en fil tyder på at der tidligere har været installeret en højere "
|
||||
#~ "version af mariadb-server-pakken."
|
||||
|
||||
#~ msgid ""
|
||||
|
@ -232,7 +232,7 @@ msgstr ""
|
|||
#~ "To use MariaDB, the following entries for users and groups should be "
|
||||
#~ "added to the system:"
|
||||
#~ msgstr ""
|
||||
#~ "Nedenst<EFBFBD>ende linjer for brugere og grupper skal tilf<6C>jes dette system for "
|
||||
#~ "Nedenstående linjer for brugere og grupper skal tilføjes dette system for "
|
||||
#~ "at benytte MariaDB:"
|
||||
|
||||
#~ msgid "Cannot upgrade if ISAM tables are present!"
|
||||
|
@ -246,18 +246,18 @@ msgstr ""
|
|||
#~ "mysql-server-4.1 gets removed nevertheless just reinstall it to convert "
|
||||
#~ "those tables."
|
||||
#~ msgstr ""
|
||||
#~ "Nyere versioner af MySQL kan ikke l<EFBFBD>ngere benytte det gamle ISAM-"
|
||||
#~ "tabelformat, og det er derfor n<EFBFBD>dvendigt at konvertere dine tabeller til "
|
||||
#~ "Nyere versioner af MySQL kan ikke længere benytte det gamle ISAM-"
|
||||
#~ "tabelformat, og det er derfor nødvendigt at konvertere dine tabeller til "
|
||||
#~ "f.eks. MyISAM forud for opgraderingen med \"mysql_convert_table_format\" "
|
||||
#~ "eller \"ALTER TABLE x ENGINE=MyISAM\". Installationen af mysql-server-5.1 "
|
||||
#~ "afbrydes nu. Skulle din gamle mysql-server-4.1 alligevel bliver "
|
||||
#~ "afinstalleret, s<EFBFBD> geninstall<6C>r den blot og konverter tabellerne."
|
||||
#~ "afinstalleret, så geninstallér den blot og konverter tabellerne."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Support MySQL connections from hosts running Debian \"sarge\" or older?"
|
||||
#~ msgstr ""
|
||||
#~ "Underst<EFBFBD>t MySQL-forbindelser fra maskiner, der k<>rer Debian \"Sarge\" "
|
||||
#~ "eller <EFBFBD>ldre?"
|
||||
#~ "Understøt MySQL-forbindelser fra maskiner, der kører Debian \"Sarge\" "
|
||||
#~ "eller ældre?"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "In old versions of MySQL clients on Debian, passwords were not stored "
|
||||
|
@ -265,10 +265,10 @@ msgstr ""
|
|||
#~ "PHP) from hosts running Debian 3.1 Sarge will not be able to connect to "
|
||||
#~ "recent accounts or accounts whose password have been changed."
|
||||
#~ msgstr ""
|
||||
#~ "Gamle udgaver af MySQL-klienter p<EFBFBD> Debian gemte ikke adgangskoderne "
|
||||
#~ "Gamle udgaver af MySQL-klienter på Debian gemte ikke adgangskoderne "
|
||||
#~ "sikkert. Dette er blevet forbedret siden da, men klienter (f.eks. PHP) "
|
||||
#~ "fra maskiner, der k<EFBFBD>rer Debian 3.1 Sarge vil ikke kunne forbinde til "
|
||||
#~ "nyere konti eller konti, hvis adgangskode er blevet <EFBFBD>ndret."
|
||||
#~ "fra maskiner, der kører Debian 3.1 Sarge vil ikke kunne forbinde til "
|
||||
#~ "nyere konti eller konti, hvis adgangskode er blevet ændret."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "To use mysql you must install an equivalent user and group to the "
|
||||
|
@ -276,7 +276,7 @@ msgstr ""
|
|||
#~ "permissions (the uid/gid may be different)."
|
||||
#~ msgstr ""
|
||||
#~ "For at kunne bruge mysql skal du installere en bruger og en gruppe, der "
|
||||
#~ "svarer til nedenst<EFBFBD>ende, og sikre dig at /var/lib/mysql har de rigtige "
|
||||
#~ "svarer til nedenstående, og sikre dig at /var/lib/mysql har de rigtige "
|
||||
#~ "adgangsrettigheder (uid/gid kan afvige)."
|
||||
|
||||
#~ msgid ""
|
||||
|
@ -295,14 +295,14 @@ msgstr ""
|
|||
|
||||
#~ msgid ""
|
||||
#~ "If you do not provide a password no changes will be made to the account."
|
||||
#~ msgstr "Hvis du ikke angiver en adgangskode, vil kontoen ikke blive <EFBFBD>ndret."
|
||||
#~ msgstr "Hvis du ikke angiver en adgangskode, vil kontoen ikke blive ændret."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "When installation finishes, you should verify that the account is "
|
||||
#~ "properly protected with a password (see README.Debian for more "
|
||||
#~ "information)."
|
||||
#~ msgstr ""
|
||||
#~ "N<EFBFBD>r installationen afsluttes, b<>r du tjekke at kontoen er ordentligt "
|
||||
#~ "Når installationen afsluttes, bør du tjekke at kontoen er ordentligt "
|
||||
#~ "beskyttet med en adgangskode (se README.Debian for yderligere "
|
||||
#~ "oplysninger)."
|
||||
|
||||
|
@ -314,12 +314,12 @@ msgstr ""
|
|||
#~ "corrupted! This script also enhances the privilege tables but is not "
|
||||
#~ "supposed to give any user more rights that he had before,"
|
||||
#~ msgstr ""
|
||||
#~ "Du skal k<EFBFBD>re \"mysql_upgrade\" efter opgraderingen, da tabellerne eller "
|
||||
#~ "kan blive <EFBFBD>delagt! Dette script forbedrer ogs<67> rettighedstabellerne, men "
|
||||
#~ "Du skal køre \"mysql_upgrade\" efter opgraderingen, da tabellerne eller "
|
||||
#~ "kan blive ødelagt! Dette script forbedrer også rettighedstabellerne, men "
|
||||
#~ "burde ikke give nogen bruger flere rettigheder, end han havde tidligere,"
|
||||
|
||||
#~ msgid "Please also read http://www.mysql.com/doc/en/Upgrade.html"
|
||||
#~ msgstr "L<EFBFBD>s ogs<67> http://www.mysql.com/doc/en/Upgrade.html"
|
||||
#~ msgstr "Læs også http://www.mysql.com/doc/en/Upgrade.html"
|
||||
|
||||
#~ msgid "Install Hints"
|
||||
#~ msgstr "Installationstips"
|
||||
|
@ -331,17 +331,17 @@ msgstr ""
|
|||
#~ msgstr ""
|
||||
#~ "Ved opgraderinger fra MySQL 3.23, der fulgte med Debian Woody, kan de "
|
||||
#~ "symbolske /var/lib/mysql or /var/log/mysql blive fjernet ved et uheld, og "
|
||||
#~ "m<EFBFBD> genskabes manuelt."
|
||||
#~ "må genskabes manuelt."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "MySQL will only install if you have a non-numeric hostname that is "
|
||||
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.5.0.1 "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.0.0.1 "
|
||||
#~ "myhostname\"."
|
||||
#~ msgstr ""
|
||||
#~ "MySQL vil kun blive installeret, hvis du har et ikke-numerisk v<EFBFBD>rtsnavn, "
|
||||
#~ "som kan sl<EFBFBD>s op i filen /ets/hosts. Hvis f.eks. kommandoen \"hostname\" "
|
||||
#~ "svarer med \"mitvaertsnavn\", skal du have en linje a'la \"10.5.0.1 "
|
||||
#~ "MySQL vil kun blive installeret, hvis du har et ikke-numerisk værtsnavn, "
|
||||
#~ "som kan slås op i filen /ets/hosts. Hvis f.eks. kommandoen \"hostname\" "
|
||||
#~ "svarer med \"mitvaertsnavn\", skal du have en linje a'la \"10.0.0.1 "
|
||||
#~ "mitvaertsnavn\" i /etc/hosts."
|
||||
|
||||
#~ msgid ""
|
||||
|
@ -356,8 +356,8 @@ msgstr ""
|
|||
#~ "root/.my.cnf, always write the \"user\" and the \"password\" lines in "
|
||||
#~ "there, never only the password!"
|
||||
#~ msgstr ""
|
||||
#~ "Husk at s<EFBFBD>tte en ADGANGSKODE for MySQLs root-bruger! Hvis du bruger en /"
|
||||
#~ "etc/.my.cnf, s<EFBFBD> skriv altid \"user\"- og \"password\"-linjer ind her, "
|
||||
#~ "Husk at sætte en ADGANGSKODE for MySQLs root-bruger! Hvis du bruger en /"
|
||||
#~ "etc/.my.cnf, så skriv altid \"user\"- og \"password\"-linjer ind her, "
|
||||
#~ "ikke kun adgangskoden!"
|
||||
|
||||
#~ msgid ""
|
||||
|
@ -365,7 +365,7 @@ msgstr ""
|
|||
#~ "by all MySQL versions, not necessarily only the one you are about to "
|
||||
#~ "purge?"
|
||||
#~ msgstr ""
|
||||
#~ "Skal jeg fjerne hele mappetr<EFBFBD>et /var/lib/mysql, som benyttes af alle "
|
||||
#~ "Skal jeg fjerne hele mappetræet /var/lib/mysql, som benyttes af alle "
|
||||
#~ "MySQL-versioner, ikke kun den version, du er ved at slette?"
|
||||
|
||||
#~ msgid ""
|
||||
|
@ -373,7 +373,7 @@ msgstr ""
|
|||
#~ "make use of it mysql_fix_privilege_tables must be executed manually. The "
|
||||
#~ "script is not supposed to give any user more rights that he had before,"
|
||||
#~ msgstr ""
|
||||
#~ "En sj<EFBFBD>lden gang imellem, f.eks. ved nye hovedversioner, sker det at "
|
||||
#~ "rettighedssystemet forbedres. For at g<EFBFBD>re brug af dette, skal "
|
||||
#~ "mysql_fix_privilege_tables k<EFBFBD>res manuelt. Scriptet vil ikke give nogen "
|
||||
#~ "En sjælden gang imellem, f.eks. ved nye hovedversioner, sker det at "
|
||||
#~ "rettighedssystemet forbedres. For at gøre brug af dette, skal "
|
||||
#~ "mysql_fix_privilege_tables køres manuelt. Scriptet vil ikke give nogen "
|
||||
#~ "bruger flere rettigheder, end vedkommende havde tidligere,"
|
||||
|
|
4
debian/po/es.po
vendored
4
debian/po/es.po
vendored
|
@ -354,13 +354,13 @@ msgstr ""
|
|||
#~ msgid ""
|
||||
#~ "MySQL will only install if you have a non-numeric hostname that is "
|
||||
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.5.0.1 "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.0.0.1 "
|
||||
#~ "myhostname\"."
|
||||
#~ msgstr ""
|
||||
#~ "Sólo se instalará MySQL si tiene un nombre de equipo que no sea una "
|
||||
#~ "dirección IP y pueda resolverse a través del archivo /etc/hosts. Por "
|
||||
#~ "ejemplo, si la orden «hostname» devuelve «MiNombreEquipo» entonces deberá "
|
||||
#~ "existir una línea «10.5.0.1 MiNombreEquipo» en dicho archivo."
|
||||
#~ "existir una línea «10.0.0.1 MiNombreEquipo» en dicho archivo."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "A new mysql user \"debian-sys-maint\" will be created. This mysql account "
|
||||
|
|
4
debian/po/pt_BR.po
vendored
4
debian/po/pt_BR.po
vendored
|
@ -322,13 +322,13 @@ msgstr ""
|
|||
#~ msgid ""
|
||||
#~ "MySQL will only install if you have a non-numeric hostname that is "
|
||||
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.5.0.1 "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.0.0.1 "
|
||||
#~ "myhostname\"."
|
||||
#~ msgstr ""
|
||||
#~ "O MySQL será instalado somente caso você possua um nome de host NÃO "
|
||||
#~ "NUMÉRICO que possa ser resolvido através do arquivo /etc/hosts, ou seja, "
|
||||
#~ "caso o comando \"hostname\" retorne \"myhostname\", uma linha como "
|
||||
#~ "\"10.5.0.1 myhostname\" deverá existir no arquivo /etc/hosts."
|
||||
#~ "\"10.0.0.1 myhostname\" deverá existir no arquivo /etc/hosts."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "A new mysql user \"debian-sys-maint\" will be created. This mysql account "
|
||||
|
|
4
debian/po/tr.po
vendored
4
debian/po/tr.po
vendored
|
@ -202,12 +202,12 @@ msgstr ""
|
|||
#~ msgid ""
|
||||
#~ "MySQL will only install if you have a non-numeric hostname that is "
|
||||
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.5.0.1 "
|
||||
#~ "returns \"myhostname\" then there must be a line like \"10.0.0.1 "
|
||||
#~ "myhostname\"."
|
||||
#~ msgstr ""
|
||||
#~ "MySQL sadece /etc/hosts dosyası yoluyla çözülebilir NUMERİK OLMAYAN bir "
|
||||
#~ "makine adına sahipseniz kurulacaktır. Örneğin, eğer \"hostname\" komutu "
|
||||
#~ "\"makinem\" ismini döndürüyorsa, bu dosya içinde \"10.5.0.1 makinem\" "
|
||||
#~ "\"makinem\" ismini döndürüyorsa, bu dosya içinde \"10.0.0.1 makinem\" "
|
||||
#~ "gibi bir satır olmalıdır."
|
||||
|
||||
#, fuzzy
|
||||
|
|
|
@ -4134,9 +4134,12 @@ reread_log_header:
|
|||
|
||||
log_header_read(max_cp_field);
|
||||
|
||||
if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
|
||||
if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)
|
||||
|| checkpoint_lsn_start
|
||||
!= mach_read_from_8(buf + LOG_CHECKPOINT_LSN)
|
||||
|| log_sys.log.get_lsn_offset()
|
||||
!= mach_read_from_8(buf + LOG_CHECKPOINT_OFFSET))
|
||||
goto reread_log_header;
|
||||
}
|
||||
|
||||
log_mutex_exit();
|
||||
|
||||
|
@ -4156,42 +4159,39 @@ reread_log_header:
|
|||
}
|
||||
|
||||
/* label it */
|
||||
byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr[OS_FILE_LOG_BLOCK_SIZE];
|
||||
memset(log_hdr, 0, sizeof log_hdr);
|
||||
mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys.log.format);
|
||||
mach_write_to_4(LOG_HEADER_SUBFORMAT + log_hdr, log_sys.log.subformat);
|
||||
mach_write_to_8(LOG_HEADER_START_LSN + log_hdr, checkpoint_lsn_start);
|
||||
strcpy(reinterpret_cast<char*>(LOG_HEADER_CREATOR + log_hdr),
|
||||
"Backup " MYSQL_SERVER_VERSION);
|
||||
log_block_set_checksum(log_hdr,
|
||||
log_block_calc_checksum_crc32(log_hdr));
|
||||
byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr_buf[LOG_FILE_HDR_SIZE];
|
||||
memset(log_hdr_buf, 0, sizeof log_hdr_buf);
|
||||
|
||||
/* Write the log header. */
|
||||
if (ds_write(dst_log_file, log_hdr, sizeof log_hdr)) {
|
||||
log_write_fail:
|
||||
msg("error: write to logfile failed");
|
||||
goto fail;
|
||||
}
|
||||
/* Adjust the checkpoint page. */
|
||||
memcpy(log_hdr, buf, OS_FILE_LOG_BLOCK_SIZE);
|
||||
mach_write_to_8(log_hdr + LOG_CHECKPOINT_OFFSET,
|
||||
(checkpoint_lsn_start & (OS_FILE_LOG_BLOCK_SIZE - 1))
|
||||
| LOG_FILE_HDR_SIZE);
|
||||
byte *log_hdr_field = log_hdr_buf;
|
||||
mach_write_to_4(LOG_HEADER_FORMAT + log_hdr_field, log_sys.log.format);
|
||||
mach_write_to_4(LOG_HEADER_SUBFORMAT + log_hdr_field, log_sys.log.subformat);
|
||||
mach_write_to_8(LOG_HEADER_START_LSN + log_hdr_field, checkpoint_lsn_start);
|
||||
strcpy(reinterpret_cast<char*>(LOG_HEADER_CREATOR + log_hdr_field),
|
||||
"Backup " MYSQL_SERVER_VERSION);
|
||||
log_block_set_checksum(log_hdr_field,
|
||||
log_block_calc_checksum_crc32(log_hdr_field));
|
||||
|
||||
/* copied from log_group_checkpoint() */
|
||||
log_hdr_field +=
|
||||
(log_sys.next_checkpoint_no & 1) ? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1;
|
||||
/* The least significant bits of LOG_CHECKPOINT_OFFSET must be
|
||||
stored correctly in the copy of the ib_logfile. The most significant
|
||||
bits, which identify the start offset of the log block in the file,
|
||||
we did choose freely, as LOG_FILE_HDR_SIZE. */
|
||||
ut_ad(!((log_sys.log.get_lsn() ^ checkpoint_lsn_start)
|
||||
& (OS_FILE_LOG_BLOCK_SIZE - 1)));
|
||||
log_block_set_checksum(log_hdr,
|
||||
log_block_calc_checksum_crc32(log_hdr));
|
||||
/* Write checkpoint page 1 and two empty log pages before the
|
||||
payload. */
|
||||
if (ds_write(dst_log_file, log_hdr, OS_FILE_LOG_BLOCK_SIZE)
|
||||
|| !memset(log_hdr, 0, sizeof log_hdr)
|
||||
|| ds_write(dst_log_file, log_hdr, sizeof log_hdr)
|
||||
|| ds_write(dst_log_file, log_hdr, sizeof log_hdr)) {
|
||||
goto log_write_fail;
|
||||
/* Adjust the checkpoint page. */
|
||||
memcpy(log_hdr_field, log_sys.checkpoint_buf, OS_FILE_LOG_BLOCK_SIZE);
|
||||
mach_write_to_8(log_hdr_field + LOG_CHECKPOINT_OFFSET,
|
||||
(checkpoint_lsn_start & (OS_FILE_LOG_BLOCK_SIZE - 1))
|
||||
| LOG_FILE_HDR_SIZE);
|
||||
log_block_set_checksum(log_hdr_field,
|
||||
log_block_calc_checksum_crc32(log_hdr_field));
|
||||
|
||||
/* Write log header*/
|
||||
if (ds_write(dst_log_file, log_hdr_buf, sizeof(log_hdr_buf))) {
|
||||
msg("error: write to logfile failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
log_copying_running = true;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (C) 2010, 2017, MariaDB Corporation.
|
||||
/* Copyright (C) 2010, 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -13,6 +13,9 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
|
||||
|
||||
#ifndef MY_VALGRIND_INCLUDED
|
||||
#define MY_VALGRIND_INCLUDED
|
||||
|
||||
/* clang -> gcc */
|
||||
#ifndef __has_feature
|
||||
# define __has_feature(x) 0
|
||||
|
@ -33,6 +36,7 @@
|
|||
# define MEM_NOACCESS(a,len) VALGRIND_MAKE_MEM_NOACCESS(a,len)
|
||||
# define MEM_CHECK_ADDRESSABLE(a,len) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(a,len)
|
||||
# define MEM_CHECK_DEFINED(a,len) VALGRIND_CHECK_MEM_IS_DEFINED(a,len)
|
||||
# define REDZONE_SIZE 8
|
||||
#elif defined(__SANITIZE_ADDRESS__)
|
||||
# include <sanitizer/asan_interface.h>
|
||||
/* How to do manual poisoning:
|
||||
|
@ -41,11 +45,13 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
|
|||
# define MEM_NOACCESS(a,len) ASAN_POISON_MEMORY_REGION(a,len)
|
||||
# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
|
||||
# define MEM_CHECK_DEFINED(a,len) ((void) 0)
|
||||
# define REDZONE_SIZE 8
|
||||
#else
|
||||
# define MEM_UNDEFINED(a,len) ((void) (a), (void) (len))
|
||||
# define MEM_NOACCESS(a,len) ((void) 0)
|
||||
# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
|
||||
# define MEM_CHECK_DEFINED(a,len) ((void) 0)
|
||||
# define REDZONE_SIZE 0
|
||||
#endif /* HAVE_VALGRIND_MEMCHECK_H */
|
||||
|
||||
#if defined(TRASH_FREED_MEMORY)
|
||||
|
@ -56,3 +62,5 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
|
|||
|
||||
#define TRASH_ALLOC(A,B) do { TRASH_FILL(A,B,0xA5); MEM_UNDEFINED(A,B); } while(0)
|
||||
#define TRASH_FREE(A,B) do { TRASH_FILL(A,B,0x8F); MEM_NOACCESS(A,B); } while(0)
|
||||
|
||||
#endif /* MY_VALGRIND_INCLUDED */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
'\" t
|
||||
.\"
|
||||
.TH "\FBMYSQLIMPORT\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
|
||||
.TH "\FBMYSQLIMPORT\FR" "1" "21 May 2019" "MariaDB 10\&.4" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
|
@ -346,6 +346,22 @@ option\&.
|
|||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
.\" mysqlimport: ignore-foreign-keys option
|
||||
.\" ignore-foreign-keys option: mysqlimport
|
||||
\fB\-\-ignore\-foreign\-keys\fR,
|
||||
\fB\-k\fR
|
||||
.sp
|
||||
Disable foreign key checks while importing the data\&.
|
||||
.RE
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
.\" mysqlimport: ignore-lines option
|
||||
.\" ignore-lines option: mysqlimport
|
||||
\fB\-\-ignore\-lines=\fR\fB\fIN\fR\fR
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
--source include/have_sequence.inc
|
||||
--source include/have_innodb.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
create table t1(c1 integer not null,c2 integer not null, key (c1)) engine=InnoDb;
|
||||
create view v1 as select * from t1 where c1 in (0,1);
|
||||
|
||||
|
|
|
@ -3021,3 +3021,404 @@ DROP VIEW v1,v2,v3;
|
|||
DROP TABLE t1;
|
||||
set optimizer_switch=@exit_optimizer_switch;
|
||||
set join_cache_level=@exit_join_cache_level;
|
||||
#
|
||||
# Bug mdev-12812: EXPLAIN for query with many expensive derived
|
||||
#
|
||||
CREATE TABLE t1
|
||||
(id int auto_increment primary key,
|
||||
uid int NOT NULL,
|
||||
gp_id int NOT NULL,
|
||||
r int NOT NULL
|
||||
);
|
||||
INSERT INTO t1(uid,gp_id,r) VALUES
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1);
|
||||
CREATE TABLE t2 (id int) ;
|
||||
INSERT INTO t2 VALUES (1);
|
||||
explain SELECT 1 FROM t2 JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_1 ON gp_1.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_2 ON gp_2.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_3 ON gp_3.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_4 ON gp_4.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_5 ON gp_5.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_6 ON gp_6.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
WHERE p1.gp_id=7) gp_7 ON gp_7.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_8 ON gp_8.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_9 ON gp_9.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_14 ON gp_14.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_15 ON gp_15.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_16 ON gp_16.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
)gp_17 ON gp_17.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
)gp_18 ON gp_18.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
)gp_19 ON gp_19.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
) gp_20 ON gp_20.id=t2.id ;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t2 system NULL NULL NULL NULL 1
|
||||
1 PRIMARY t2 system NULL NULL NULL NULL 1
|
||||
1 PRIMARY t2 system NULL NULL NULL NULL 1
|
||||
1 PRIMARY t2 system NULL NULL NULL NULL 1
|
||||
1 PRIMARY t2 system NULL NULL NULL NULL 1
|
||||
1 PRIMARY t2 system NULL NULL NULL NULL 1
|
||||
1 PRIMARY p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
1 PRIMARY p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
1 PRIMARY p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p4 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p1 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p3 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p4 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p1 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p3 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p4 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p1 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p3 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p4 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived17> ALL NULL NULL NULL NULL 50328437500000 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived14> ALL NULL NULL NULL NULL 27680640625000000 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived7> ALL NULL NULL NULL NULL 7798774269472204800 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived8> ALL NULL NULL NULL NULL 7798774269472204800 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived9> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived10> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived11> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived12> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived13> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived15> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
1 PRIMARY <derived16> ALL NULL NULL NULL NULL 15224352343750000640 Using where; Using join buffer (incremental, BNL join)
|
||||
17 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
17 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
17 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
17 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
17 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
17 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
16 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
16 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
16 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
16 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
16 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
16 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
16 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
16 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
15 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
15 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
15 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
15 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
15 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
15 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
15 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
15 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
14 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
14 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
14 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
14 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
14 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
14 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
14 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
13 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
13 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
13 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
13 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
13 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
13 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
13 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
13 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
12 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
12 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
12 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
12 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
12 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
12 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
12 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
12 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
11 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
11 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
11 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
11 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
11 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
11 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
11 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
11 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
10 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
10 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
10 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
10 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
10 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
10 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
10 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
10 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
9 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
9 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where
|
||||
9 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
9 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
9 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
9 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
9 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
9 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
8 DERIVED p1 ALL NULL NULL NULL NULL 550 Using where
|
||||
8 DERIVED p3 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
8 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
8 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED t2 system NULL NULL NULL NULL 1
|
||||
7 DERIVED p1 ALL NULL NULL NULL NULL 550 Using where
|
||||
7 DERIVED p3 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (flat, BNL join)
|
||||
7 DERIVED p4 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED p5 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED p6 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED p7 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED p8 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED p9 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
7 DERIVED p10 ALL NULL NULL NULL NULL 550 Using where; Using join buffer (incremental, BNL join)
|
||||
DROP TABLE t1, t2;
|
||||
|
|
|
@ -1976,3 +1976,263 @@ DROP TABLE t1;
|
|||
# The following command must be the last one the file
|
||||
set optimizer_switch=@exit_optimizer_switch;
|
||||
set join_cache_level=@exit_join_cache_level;
|
||||
|
||||
--echo #
|
||||
--echo # Bug mdev-12812: EXPLAIN for query with many expensive derived
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1
|
||||
(id int auto_increment primary key,
|
||||
uid int NOT NULL,
|
||||
gp_id int NOT NULL,
|
||||
r int NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO t1(uid,gp_id,r) VALUES
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,1,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),(1,2,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),(1,3,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),(1,4,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),(1,5,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),(1,6,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),(1,7,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),(1,8,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),(1,9,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),
|
||||
(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,10,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),(1,11,1),
|
||||
(1,11,1);
|
||||
|
||||
CREATE TABLE t2 (id int) ;
|
||||
INSERT INTO t2 VALUES (1);
|
||||
|
||||
explain SELECT 1 FROM t2 JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_1 ON gp_1.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_2 ON gp_2.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_3 ON gp_3.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_4 ON gp_4.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_5 ON gp_5.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_6 ON gp_6.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p1 ON p1.r=1 AND p1.uid=t2.id
|
||||
JOIN t1 p3 ON p3.r=3 AND p3.uid=t2.id
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
WHERE p1.gp_id=7) gp_7 ON gp_7.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_8 ON gp_8.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_9 ON gp_9.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_14 ON gp_14.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_15 ON gp_15.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
) gp_16 ON gp_16.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
)gp_17 ON gp_17.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
)gp_18 ON gp_18.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
JOIN t1 p9 ON p9.r=9 AND p9.uid=t2.id
|
||||
JOIN t1 p10 ON p10.r=10 AND p10.uid=t2.id
|
||||
)gp_19 ON gp_19.id=t2.id
|
||||
JOIN
|
||||
(SELECT t2.id
|
||||
FROM t2
|
||||
JOIN t1 p4 ON p4.r=4 AND p4.uid=t2.id
|
||||
JOIN t1 p5 ON p5.r=5 AND p5.uid=t2.id
|
||||
JOIN t1 p6 ON p6.r=6 AND p6.uid=t2.id
|
||||
JOIN t1 p7 ON p7.r=7 AND p7.uid=t2.id
|
||||
JOIN t1 p8 ON p8.r=8 AND p8.uid=t2.id
|
||||
) gp_20 ON gp_20.id=t2.id ;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
|
|
|
@ -19,4 +19,3 @@ innodb_bug12902967 : broken upstream
|
|||
file_contents : MDEV-6526 these files are not installed anymore
|
||||
max_statement_time : cannot possibly work, depends on timing
|
||||
partition_open_files_limit : open_files_limit check broken by MDEV-18360
|
||||
join_cache : enable after MDEV-17752 is fixed
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -49,6 +49,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -62,6 +63,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -78,6 +80,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -91,6 +94,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -107,6 +111,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -121,6 +126,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -138,6 +144,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -151,6 +158,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -217,6 +225,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -230,6 +239,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -246,6 +256,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -259,6 +270,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -275,6 +287,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -288,6 +301,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -304,6 +318,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -317,6 +332,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -357,6 +373,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -370,6 +387,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -409,6 +427,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -422,6 +441,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -491,6 +511,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -504,6 +525,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -542,6 +564,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -555,6 +578,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -593,6 +617,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -606,6 +631,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -644,6 +670,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -657,6 +684,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -698,6 +726,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -711,6 +740,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -736,6 +766,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -749,6 +780,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -774,6 +806,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -787,6 +820,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -812,6 +846,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -825,6 +860,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -850,6 +886,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -863,6 +900,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -888,6 +926,7 @@ SELECT City.Name, Country.Name FROM City,Country
|
|||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -901,6 +940,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
|
|||
CountryLanguage.Percentage > 50 AND
|
||||
LENGTH(Language) < LENGTH(City.Name) - 2;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, CountryLanguage.Language
|
||||
FROM City,Country,CountryLanguage
|
||||
WHERE City.Country=Country.Code AND
|
||||
|
@ -926,6 +966,7 @@ show variables like 'join_cache_level';
|
|||
|
||||
set join_cache_level=1;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND City.Population > 3000000;
|
||||
|
||||
|
@ -947,12 +988,14 @@ set join_cache_level=6;
|
|||
|
||||
ALTER TABLE Country MODIFY Name varchar(52) NOT NULL default '';
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
ALTER TABLE Country MODIFY Name varchar(300) NOT NULL default '';
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -961,12 +1004,14 @@ ALTER TABLE Country ADD COLUMN PopulationBar text;
|
|||
UPDATE Country
|
||||
SET PopulationBar=REPEAT('x', CAST(Population/100000 AS unsigned int));
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, Country.PopulationBar FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
||||
set join_buffer_size=256;
|
||||
|
||||
--sorted_result
|
||||
SELECT City.Name, Country.Name, Country.PopulationBar FROM City,Country
|
||||
WHERE City.Country=Country.Code AND
|
||||
Country.Name LIKE 'L%' AND City.Population > 100000;
|
||||
|
@ -974,6 +1019,40 @@ SELECT City.Name, Country.Name, Country.PopulationBar FROM City,Country
|
|||
set join_cache_level=default;
|
||||
set join_buffer_size=default;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-17752: Plan changes from hash_index_merge to index_merge with new optimizer defaults
|
||||
--echo #
|
||||
|
||||
set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
|
||||
set @save_use_stat_tables=@@use_stat_tables;
|
||||
set optimizer_use_condition_selectivity=4;
|
||||
set use_stat_tables='preferably';
|
||||
|
||||
use world;
|
||||
set join_cache_level=4;
|
||||
CREATE INDEX City_Name ON City(Name);
|
||||
|
||||
--disable_result_log
|
||||
ANALYZE TABLE City, Country;
|
||||
--enable_result_log
|
||||
|
||||
EXPLAIN
|
||||
SELECT Country.Name, Country.Population, City.Name, City.Population
|
||||
FROM Country LEFT JOIN City
|
||||
ON City.Country=Country.Code AND City.Population > 5000000
|
||||
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
|
||||
|
||||
EXPLAIN
|
||||
SELECT Country.Name, Country.Population, City.Name, City.Population
|
||||
FROM Country LEFT JOIN City
|
||||
ON City.Country=Country.Code AND
|
||||
(City.Population > 5000000 OR City.Name LIKE 'Za%')
|
||||
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
|
||||
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
||||
set @@use_stat_tables=@save_use_stat_tables;
|
||||
set join_cache_level=default;
|
||||
|
||||
DROP DATABASE world;
|
||||
|
||||
use test;
|
||||
|
|
|
@ -1342,7 +1342,7 @@ EXPLAIN SELECT * FROM t1 LEFT JOIN (t2 LEFT JOIN t3 ON c21=c31) ON c11=c21;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
|
||||
1 SIMPLE t2 hash_ALL NULL #hash#$hj 5 test.t1.c11 0 Using where; Using join buffer (flat, BNLH join)
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 0 Using where; Using join buffer (incremental, BNL join)
|
||||
1 SIMPLE t3 hash_ALL NULL #hash#$hj 5 test.t1.c11 0 Using where; Using join buffer (incremental, BNLH join)
|
||||
DROP TABLE t1,t2,t3;
|
||||
CREATE TABLE t1 (goods int(12) NOT NULL, price varchar(128) NOT NULL);
|
||||
INSERT INTO t1 VALUES (23, 2340), (26, 9900);
|
||||
|
|
|
@ -2003,6 +2003,7 @@ INSERT INTO t2 VALUES
|
|||
CREATE TABLE t3 ( a int, b int NOT NULL , PRIMARY KEY (b)) ;
|
||||
INSERT INTO t3 VALUES
|
||||
(0,6),(0,7),(0,8),(2,9),(0,10),(2,21),(0,22),(2,23),(2,24),(2,25);
|
||||
set @save_join_cache_level= @@join_cache_level;
|
||||
SET SESSION join_cache_level=4;
|
||||
EXPLAIN EXTENDED
|
||||
SELECT * FROM (t2 LEFT JOIN t1 ON t1.b = t2.b) JOIN t3 ON t1.b = t3.b;
|
||||
|
@ -2021,7 +2022,7 @@ EXECUTE stmt;
|
|||
b b a b
|
||||
10 10 0 10
|
||||
DEALLOCATE PREPARE stmt;
|
||||
SET SESSION join_cache_level=default;
|
||||
SET SESSION join_cache_level=@save_join_cache_level;
|
||||
DROP TABLE t1,t2,t3;
|
||||
#
|
||||
# LP bug #943543: LEFT JOIN converted to JOIN with
|
||||
|
@ -2448,7 +2449,7 @@ t1.b1+'0' t2.b2 + '0'
|
|||
0 0
|
||||
1 1
|
||||
DROP TABLE t1, t2;
|
||||
set @join_cache_level= @save_join_cache_level;
|
||||
set @@join_cache_level= @save_join_cache_level;
|
||||
#
|
||||
# MDEV-14779: using left join causes incorrect results with materialization and derived tables
|
||||
#
|
||||
|
@ -2515,6 +2516,143 @@ v2
|
|||
DROP TABLE t1,t2;
|
||||
# end of 5.5 tests
|
||||
#
|
||||
# MDEV-19258: chained right joins all converted to inner joins
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
id int NOT NULL AUTO_INCREMENT,
|
||||
timestamp bigint NOT NULL,
|
||||
modifiedBy varchar(255) DEFAULT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE t2 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
REVTYPE tinyint DEFAULT NULL,
|
||||
profile_id int DEFAULT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
CREATE TABLE t3 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
person_id int DEFAULT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
CREATE TABLE t4 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
INSERT INTO t1 VALUES
|
||||
(1,1294391193890,'Cxqy$*9.kKeE'),(2,1294643906883,'rE4wqGV0gif@'),
|
||||
(3,1294643927456,'L?3yt(%dY$Br'),(4,1294644343525,'WH&ObiZ$#2S4'),
|
||||
(5,1294644616416,'YXnCbt?olUZ0'),(6,1294644954537,'8Npe4!(#lU@k'),
|
||||
(7,1294645046659,'knc0GhXB1#ib'),(8,1294645183829,'w*oPpVfuS8^m'),
|
||||
(9,1294645386701,'hwXR@3qVzrbU'),(10,1294645525982,'BeLW*Y9ndP0l'),
|
||||
(11,1294645627723,'nTegib^)qZ$I'),(12,1294650860266,'u62C^Kzx3wH8'),
|
||||
(13,1294657613745,'4&BkFjGa!qLg'),(14,1294660627161,')anpt312SCoh'),
|
||||
(15,1294661023336,'LtJ2PX?*kTmx'),(16,1294662838066,'POGRr@?#ofpl'),
|
||||
(17,1294663020989,'o.)1EOT2jnF7'),(18,1294663308065,'&TZ0F0LHE6.h'),
|
||||
(19,1294664900039,'j)kSC%^In$9d'),(20,1294668904556,'97glN50)cAo.'),
|
||||
(21,1294728056853,'lrKZxmw?I.Ek'),(22,1294728157174,'@P*SRg!pT.q?'),
|
||||
(23,1294728327099,'W9gPrptF.)8n'),(24,1294728418481,'$q*c^sM&URd#'),
|
||||
(25,1294728729620,'9*f4&bTPRtHo'),(26,1294728906014,')4VtTEnS7$oI'),
|
||||
(27,1294732190003,'8dkNSPq2u3AQ'),(28,1294733205065,'SV2N6IoEf438'),
|
||||
(29,1294741984927,'rBKj.0S^Ey%*'),(30,1294751748352,'j$2DvlBqk)Fw'),
|
||||
(31,1294753902212,'C$N6OrEw8elz'),(32,1294758120598,'DCSVZw!rnxXq'),
|
||||
(33,1294761769556,'OTS@QU8a6s5c'),(34,1294816845305,'IUE2stG0D3L5'),
|
||||
(35,1294816966909,'Xd16yka.9nHe'),(36,1294817116302,'lOQHZpm%!8qb'),
|
||||
(37,1294817374775,'^&pE3IhNf7ey'),(38,1294817538907,'oEn4#7C0Vhfp'),
|
||||
(39,1294818482950,'bx54J*O0Va&?'),(40,1294819047024,'J%@a&1.qgdb?'),
|
||||
(41,1294821826077,'C9kojr$L3Phz'),(42,1294825454458,'gG#BOnM80ZPi'),
|
||||
(43,1294904129918,'F^!TrjM#zdvc'),(44,1294904254166,'Va&Tb)k0RvlM'),
|
||||
(45,1294904414964,'dJjq0M6HvhR#'),(46,1294904505784,'nJmxg)ELqY(b'),
|
||||
(47,1294904602835,'dhF#or$Vge!7'),(48,1294904684728,'?bIh5E3l!0em'),
|
||||
(49,1294904877898,'Y*WflOdcxnk.'),(50,1294905002390,'*?H!lUgez5A.'),
|
||||
(51,1294905096043,'wlEIY3n9uz!p'),(52,1294905404621,'T?qv3H6&hlQD'),
|
||||
(53,1294905603922,'S@Bhys^Ti7bt'),(54,1294905788416,'KR?a5NVukz#l'),
|
||||
(55,1294905993190,'A*&q4kWhED!o'),(56,1294906205254,'fT0%7z0DF6h*'),
|
||||
(57,1294906319680,'LhzdW4?ivjR0'),(58,1294906424296,'h0KDlns%U*6T'),
|
||||
(59,1294906623844,'b$CfB1noI6Ax'),(60,1294911258896,'#T1*LP!3$Oys');
|
||||
INSERT INTO t2 VALUES
|
||||
(1,1,0,10209),(1,42480,1,10209),(1,61612,1,10209),(1,257545,1,10209),
|
||||
(1,385332,1,10209),(1,1687999,1,10209),(3,1,0,10210),(3,617411,2,10210),
|
||||
(4,11,0,14),(4,95149,1,10211),(4,607890,2,10211),(5,1,0,10212),
|
||||
(6,1,0,10213),(6,93344,1,10213),(6,295578,1,10213),(6,295579,1,10213),
|
||||
(6,295644,1,10213),(7,1,0,10214),(7,12,1,7),(7,688796,1,10214),
|
||||
(7,1140433,1,10214),(7,1715227,1,10214),(8,1,0,10215),(8,74253,1,10215),
|
||||
(8,93345,1,10215),(8,12,2,2),(9,1,0,10216),(9,93342,1,10216),
|
||||
(9,122354,1,10216),(9,301499,2,10216),(10,11,0,5),(10,93343,1,10217),
|
||||
(10,122355,1,10217),(10,123050,1,10217),(10,301500,2,10217),(11,1,0,10218),
|
||||
(11,87852,1,10218),(11,605499,2,10218),(12,1,0,10219),(12,88024,1,10219),
|
||||
(12,605892,2,10219),(13,1,0,10220);
|
||||
INSERT INTO t3 VALUES
|
||||
(1,1,300003),(1,117548,NULL),(2,1,300003),(2,117548,300006),
|
||||
(3,1,300153),(3,117548,NULL),(4,1,300153),(4,117548,NULL),
|
||||
(5,1,300153),(5,117548,NULL),(6,1,300182),(6,117548,NULL),
|
||||
(7,1,300205),(7,117548,NULL),(8,1,300217),(8,117548,NULL),
|
||||
(9,1,300290),(9,117548,NULL),(10,1,300290),(10,117548,NULL),
|
||||
(11,1,300405),(11,117548,NULL),(12,1,300670),(12,117548,NULL),
|
||||
(13,1,300670),(13,117548,NULL),(14,1,300006),(14,117548,NULL),
|
||||
(15,1,300671),(15,117548,NULL),(16,1,300732),(16,117548,NULL);
|
||||
INSERT INTO t4 VALUES
|
||||
(300000,1),(300001,1),(300003,1),(300004,1),
|
||||
(300005,1),(300005,688796),(300006,1),(300006,97697),
|
||||
(300009,1),(300010,1),(300011,1),(300012,1),(300013,1),
|
||||
(300014,1),(300015,1),(300016,1),(300017,1),(300018,1),
|
||||
(300019,1),(300020,1),(300021,1),(300022,1),(300023,1),
|
||||
(300024,1),(300025,1),(300026,1),(300027,1),(300028,1);
|
||||
# This should have join order of t2,t3,t4,t1
|
||||
EXPLAIN EXTENDED SELECT *
|
||||
FROM t1 INNER JOIN t2 ON t2.REV=t1.id
|
||||
INNER JOIN t3 ON t3.id=t2.profile_id
|
||||
INNER JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416 AND
|
||||
t2.REVTYPE=2;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 42 100.00 Using where
|
||||
1 SIMPLE t3 ref PRIMARY PRIMARY 4 test.t2.profile_id 1 100.00 Using where
|
||||
1 SIMPLE t4 ref PRIMARY PRIMARY 4 test.t3.person_id 1 100.00 Using index
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.REV 1 100.00 Using where
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`timestamp` AS `timestamp`,`test`.`t1`.`modifiedBy` AS `modifiedBy`,`test`.`t2`.`id` AS `id`,`test`.`t2`.`REV` AS `REV`,`test`.`t2`.`REVTYPE` AS `REVTYPE`,`test`.`t2`.`profile_id` AS `profile_id`,`test`.`t3`.`id` AS `id`,`test`.`t3`.`REV` AS `REV`,`test`.`t3`.`person_id` AS `person_id`,`test`.`t4`.`id` AS `id`,`test`.`t4`.`REV` AS `REV` from `test`.`t1` join `test`.`t2` join `test`.`t3` join `test`.`t4` where `test`.`t2`.`REVTYPE` = 2 and `test`.`t4`.`id` = `test`.`t3`.`person_id` and `test`.`t3`.`id` = `test`.`t2`.`profile_id` and `test`.`t1`.`id` = `test`.`t2`.`REV` and `test`.`t1`.`timestamp` < 1294664900039 and `test`.`t1`.`timestamp` > 1294644616416
|
||||
SELECT *
|
||||
FROM t1 INNER JOIN t2 ON t2.REV=t1.id
|
||||
INNER JOIN t3 ON t3.id=t2.profile_id
|
||||
INNER JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416 AND
|
||||
t2.REVTYPE=2;
|
||||
id timestamp modifiedBy id REV REVTYPE profile_id id REV person_id id REV
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 1 300003 300003 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 97697
|
||||
# This should have join order of t2,t3,t4,t1 with the same plan as above
|
||||
# because all RIGHT JOIN operations are converted into INNER JOIN
|
||||
EXPLAIN EXTENDED SELECT *
|
||||
FROM t1 RIGHT JOIN t2 ON t2.REV=t1.id
|
||||
RIGHT JOIN t3 ON t3.id=t2.profile_id
|
||||
RIGHT JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416
|
||||
AND t2.REVTYPE=2;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 42 100.00 Using where
|
||||
1 SIMPLE t3 ref PRIMARY PRIMARY 4 test.t2.profile_id 1 100.00 Using where
|
||||
1 SIMPLE t4 ref PRIMARY PRIMARY 4 test.t3.person_id 1 100.00 Using index
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.REV 1 100.00 Using where
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`timestamp` AS `timestamp`,`test`.`t1`.`modifiedBy` AS `modifiedBy`,`test`.`t2`.`id` AS `id`,`test`.`t2`.`REV` AS `REV`,`test`.`t2`.`REVTYPE` AS `REVTYPE`,`test`.`t2`.`profile_id` AS `profile_id`,`test`.`t3`.`id` AS `id`,`test`.`t3`.`REV` AS `REV`,`test`.`t3`.`person_id` AS `person_id`,`test`.`t4`.`id` AS `id`,`test`.`t4`.`REV` AS `REV` from `test`.`t4` join `test`.`t3` join `test`.`t2` join `test`.`t1` where `test`.`t2`.`REVTYPE` = 2 and `test`.`t1`.`id` = `test`.`t2`.`REV` and `test`.`t3`.`id` = `test`.`t2`.`profile_id` and `test`.`t4`.`id` = `test`.`t3`.`person_id` and `test`.`t1`.`timestamp` < 1294664900039 and `test`.`t1`.`timestamp` > 1294644616416
|
||||
SELECT *
|
||||
FROM t1 RIGHT JOIN t2 ON t2.REV=t1.id
|
||||
RIGHT JOIN t3 ON t3.id=t2.profile_id
|
||||
RIGHT JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416
|
||||
AND t2.REVTYPE=2;
|
||||
id timestamp modifiedBy id REV REVTYPE profile_id id REV person_id id REV
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 1 300003 300003 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 97697
|
||||
DROP TABLE t1,t2,t3,t4;
|
||||
# end of 10.1 tests
|
||||
#
|
||||
# MDEV-17518: Range optimization doesn't use ON expressions from nested outer joins
|
||||
#
|
||||
create table t1(a int);
|
||||
|
|
|
@ -1575,6 +1575,7 @@ CREATE TABLE t3 ( a int, b int NOT NULL , PRIMARY KEY (b)) ;
|
|||
INSERT INTO t3 VALUES
|
||||
(0,6),(0,7),(0,8),(2,9),(0,10),(2,21),(0,22),(2,23),(2,24),(2,25);
|
||||
|
||||
set @save_join_cache_level= @@join_cache_level;
|
||||
SET SESSION join_cache_level=4;
|
||||
|
||||
EXPLAIN EXTENDED
|
||||
|
@ -1588,7 +1589,7 @@ EXECUTE stmt;
|
|||
|
||||
DEALLOCATE PREPARE stmt;
|
||||
|
||||
SET SESSION join_cache_level=default;
|
||||
SET SESSION join_cache_level=@save_join_cache_level;
|
||||
|
||||
DROP TABLE t1,t2,t3;
|
||||
|
||||
|
@ -1976,7 +1977,7 @@ set @save_join_cache_level= @@join_cache_level;
|
|||
SET @@join_cache_level = 3;
|
||||
SELECT t1.b1+'0' , t2.b2 + '0' FROM t1 LEFT JOIN t2 ON b1 = b2;
|
||||
DROP TABLE t1, t2;
|
||||
set @join_cache_level= @save_join_cache_level;
|
||||
set @@join_cache_level= @save_join_cache_level;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-14779: using left join causes incorrect results with materialization and derived tables
|
||||
|
@ -2042,6 +2043,131 @@ DROP TABLE t1,t2;
|
|||
|
||||
--echo # end of 5.5 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19258: chained right joins all converted to inner joins
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id int NOT NULL AUTO_INCREMENT,
|
||||
timestamp bigint NOT NULL,
|
||||
modifiedBy varchar(255) DEFAULT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE t2 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
REVTYPE tinyint DEFAULT NULL,
|
||||
profile_id int DEFAULT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
|
||||
CREATE TABLE t3 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
person_id int DEFAULT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
|
||||
CREATE TABLE t4 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
|
||||
INSERT INTO t1 VALUES
|
||||
(1,1294391193890,'Cxqy$*9.kKeE'),(2,1294643906883,'rE4wqGV0gif@'),
|
||||
(3,1294643927456,'L?3yt(%dY$Br'),(4,1294644343525,'WH&ObiZ$#2S4'),
|
||||
(5,1294644616416,'YXnCbt?olUZ0'),(6,1294644954537,'8Npe4!(#lU@k'),
|
||||
(7,1294645046659,'knc0GhXB1#ib'),(8,1294645183829,'w*oPpVfuS8^m'),
|
||||
(9,1294645386701,'hwXR@3qVzrbU'),(10,1294645525982,'BeLW*Y9ndP0l'),
|
||||
(11,1294645627723,'nTegib^)qZ$I'),(12,1294650860266,'u62C^Kzx3wH8'),
|
||||
(13,1294657613745,'4&BkFjGa!qLg'),(14,1294660627161,')anpt312SCoh'),
|
||||
(15,1294661023336,'LtJ2PX?*kTmx'),(16,1294662838066,'POGRr@?#ofpl'),
|
||||
(17,1294663020989,'o.)1EOT2jnF7'),(18,1294663308065,'&TZ0F0LHE6.h'),
|
||||
(19,1294664900039,'j)kSC%^In$9d'),(20,1294668904556,'97glN50)cAo.'),
|
||||
(21,1294728056853,'lrKZxmw?I.Ek'),(22,1294728157174,'@P*SRg!pT.q?'),
|
||||
(23,1294728327099,'W9gPrptF.)8n'),(24,1294728418481,'$q*c^sM&URd#'),
|
||||
(25,1294728729620,'9*f4&bTPRtHo'),(26,1294728906014,')4VtTEnS7$oI'),
|
||||
(27,1294732190003,'8dkNSPq2u3AQ'),(28,1294733205065,'SV2N6IoEf438'),
|
||||
(29,1294741984927,'rBKj.0S^Ey%*'),(30,1294751748352,'j$2DvlBqk)Fw'),
|
||||
(31,1294753902212,'C$N6OrEw8elz'),(32,1294758120598,'DCSVZw!rnxXq'),
|
||||
(33,1294761769556,'OTS@QU8a6s5c'),(34,1294816845305,'IUE2stG0D3L5'),
|
||||
(35,1294816966909,'Xd16yka.9nHe'),(36,1294817116302,'lOQHZpm%!8qb'),
|
||||
(37,1294817374775,'^&pE3IhNf7ey'),(38,1294817538907,'oEn4#7C0Vhfp'),
|
||||
(39,1294818482950,'bx54J*O0Va&?'),(40,1294819047024,'J%@a&1.qgdb?'),
|
||||
(41,1294821826077,'C9kojr$L3Phz'),(42,1294825454458,'gG#BOnM80ZPi'),
|
||||
(43,1294904129918,'F^!TrjM#zdvc'),(44,1294904254166,'Va&Tb)k0RvlM'),
|
||||
(45,1294904414964,'dJjq0M6HvhR#'),(46,1294904505784,'nJmxg)ELqY(b'),
|
||||
(47,1294904602835,'dhF#or$Vge!7'),(48,1294904684728,'?bIh5E3l!0em'),
|
||||
(49,1294904877898,'Y*WflOdcxnk.'),(50,1294905002390,'*?H!lUgez5A.'),
|
||||
(51,1294905096043,'wlEIY3n9uz!p'),(52,1294905404621,'T?qv3H6&hlQD'),
|
||||
(53,1294905603922,'S@Bhys^Ti7bt'),(54,1294905788416,'KR?a5NVukz#l'),
|
||||
(55,1294905993190,'A*&q4kWhED!o'),(56,1294906205254,'fT0%7z0DF6h*'),
|
||||
(57,1294906319680,'LhzdW4?ivjR0'),(58,1294906424296,'h0KDlns%U*6T'),
|
||||
(59,1294906623844,'b$CfB1noI6Ax'),(60,1294911258896,'#T1*LP!3$Oys');
|
||||
|
||||
|
||||
INSERT INTO t2 VALUES
|
||||
(1,1,0,10209),(1,42480,1,10209),(1,61612,1,10209),(1,257545,1,10209),
|
||||
(1,385332,1,10209),(1,1687999,1,10209),(3,1,0,10210),(3,617411,2,10210),
|
||||
(4,11,0,14),(4,95149,1,10211),(4,607890,2,10211),(5,1,0,10212),
|
||||
(6,1,0,10213),(6,93344,1,10213),(6,295578,1,10213),(6,295579,1,10213),
|
||||
(6,295644,1,10213),(7,1,0,10214),(7,12,1,7),(7,688796,1,10214),
|
||||
(7,1140433,1,10214),(7,1715227,1,10214),(8,1,0,10215),(8,74253,1,10215),
|
||||
(8,93345,1,10215),(8,12,2,2),(9,1,0,10216),(9,93342,1,10216),
|
||||
(9,122354,1,10216),(9,301499,2,10216),(10,11,0,5),(10,93343,1,10217),
|
||||
(10,122355,1,10217),(10,123050,1,10217),(10,301500,2,10217),(11,1,0,10218),
|
||||
(11,87852,1,10218),(11,605499,2,10218),(12,1,0,10219),(12,88024,1,10219),
|
||||
(12,605892,2,10219),(13,1,0,10220);
|
||||
|
||||
INSERT INTO t3 VALUES
|
||||
(1,1,300003),(1,117548,NULL),(2,1,300003),(2,117548,300006),
|
||||
(3,1,300153),(3,117548,NULL),(4,1,300153),(4,117548,NULL),
|
||||
(5,1,300153),(5,117548,NULL),(6,1,300182),(6,117548,NULL),
|
||||
(7,1,300205),(7,117548,NULL),(8,1,300217),(8,117548,NULL),
|
||||
(9,1,300290),(9,117548,NULL),(10,1,300290),(10,117548,NULL),
|
||||
(11,1,300405),(11,117548,NULL),(12,1,300670),(12,117548,NULL),
|
||||
(13,1,300670),(13,117548,NULL),(14,1,300006),(14,117548,NULL),
|
||||
(15,1,300671),(15,117548,NULL),(16,1,300732),(16,117548,NULL);
|
||||
|
||||
INSERT INTO t4 VALUES
|
||||
(300000,1),(300001,1),(300003,1),(300004,1),
|
||||
(300005,1),(300005,688796),(300006,1),(300006,97697),
|
||||
(300009,1),(300010,1),(300011,1),(300012,1),(300013,1),
|
||||
(300014,1),(300015,1),(300016,1),(300017,1),(300018,1),
|
||||
(300019,1),(300020,1),(300021,1),(300022,1),(300023,1),
|
||||
(300024,1),(300025,1),(300026,1),(300027,1),(300028,1);
|
||||
|
||||
let $q1=
|
||||
SELECT *
|
||||
FROM t1 INNER JOIN t2 ON t2.REV=t1.id
|
||||
INNER JOIN t3 ON t3.id=t2.profile_id
|
||||
INNER JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416 AND
|
||||
t2.REVTYPE=2;
|
||||
|
||||
--echo # This should have join order of t2,t3,t4,t1
|
||||
eval EXPLAIN EXTENDED $q1;
|
||||
eval $q1;
|
||||
|
||||
let $q2=
|
||||
SELECT *
|
||||
FROM t1 RIGHT JOIN t2 ON t2.REV=t1.id
|
||||
RIGHT JOIN t3 ON t3.id=t2.profile_id
|
||||
RIGHT JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416
|
||||
AND t2.REVTYPE=2;
|
||||
|
||||
--echo # This should have join order of t2,t3,t4,t1 with the same plan as above
|
||||
--echo # because all RIGHT JOIN operations are converted into INNER JOIN
|
||||
eval EXPLAIN EXTENDED $q2;
|
||||
eval $q2;
|
||||
|
||||
DROP TABLE t1,t2,t3,t4;
|
||||
|
||||
--echo # end of 10.1 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-17518: Range optimization doesn't use ON expressions from nested outer joins
|
||||
--echo #
|
||||
|
|
|
@ -2014,6 +2014,7 @@ INSERT INTO t2 VALUES
|
|||
CREATE TABLE t3 ( a int, b int NOT NULL , PRIMARY KEY (b)) ;
|
||||
INSERT INTO t3 VALUES
|
||||
(0,6),(0,7),(0,8),(2,9),(0,10),(2,21),(0,22),(2,23),(2,24),(2,25);
|
||||
set @save_join_cache_level= @@join_cache_level;
|
||||
SET SESSION join_cache_level=4;
|
||||
EXPLAIN EXTENDED
|
||||
SELECT * FROM (t2 LEFT JOIN t1 ON t1.b = t2.b) JOIN t3 ON t1.b = t3.b;
|
||||
|
@ -2032,7 +2033,7 @@ EXECUTE stmt;
|
|||
b b a b
|
||||
10 10 0 10
|
||||
DEALLOCATE PREPARE stmt;
|
||||
SET SESSION join_cache_level=default;
|
||||
SET SESSION join_cache_level=@save_join_cache_level;
|
||||
DROP TABLE t1,t2,t3;
|
||||
#
|
||||
# LP bug #943543: LEFT JOIN converted to JOIN with
|
||||
|
@ -2089,7 +2090,7 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
|
|||
ORDER BY t1.b;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ref idx idx 4 const 2 100.00 Using where
|
||||
1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00
|
||||
1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t1`.`b` = 5 order by `test`.`t1`.`b`
|
||||
SELECT t1.b, t2.c, t2.d FROM t2 JOIN t1 ON t2.c = t1.a
|
||||
|
@ -2097,16 +2098,16 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
|
|||
ORDER BY t1.b;
|
||||
b c d
|
||||
5 8 88
|
||||
5 8 81
|
||||
5 8 88
|
||||
5 8 81
|
||||
5 8 81
|
||||
EXPLAIN EXTENDED
|
||||
SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
|
||||
WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
|
||||
ORDER BY t1.b;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where
|
||||
1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00
|
||||
1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t1`.`b` = 5 order by `test`.`t1`.`b`
|
||||
SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
|
||||
|
@ -2114,9 +2115,9 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
|
|||
ORDER BY t1.b;
|
||||
b c d
|
||||
5 8 88
|
||||
5 8 81
|
||||
5 8 88
|
||||
5 8 81
|
||||
5 8 81
|
||||
DROP TABLE t1,t2;
|
||||
#
|
||||
# Bug mdev-4336: LEFT JOIN with disjunctive
|
||||
|
@ -2232,10 +2233,10 @@ SELECT * FROM t1 LEFT JOIN t2 LEFT JOIN t3 ON i2 = i3 ON i1 = i3
|
|||
WHERE d3 IS NULL;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
|
||||
1 SIMPLE t2 hash_ALL NULL #hash#$hj 5 test.t1.i1 2 100.00 Using where; Using join buffer (flat, BNLH join)
|
||||
1 SIMPLE t3 hash_ALL NULL #hash#$hj 5 test.t1.i1 2 100.00 Using where; Using join buffer (incremental, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t2`.`i2` AS `i2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`d3` AS `d3` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`i2` = `test`.`t1`.`i1` and `test`.`t3`.`i3` = `test`.`t1`.`i1`) where `test`.`t3`.`d3` = 0 or `test`.`t3`.`d3` is null
|
||||
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t2`.`i2` AS `i2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`d3` AS `d3` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`i2` = `test`.`t1`.`i1` and `test`.`t3`.`i3` = `test`.`t1`.`i1` and `test`.`t1`.`i1` is not null and `test`.`t1`.`i1` is not null) where `test`.`t3`.`d3` = 0 or `test`.`t3`.`d3` is null
|
||||
DROP TABLE t1,t2,t3;
|
||||
#
|
||||
# Bug mdev-6705: wrong on expression after constant row substitution
|
||||
|
@ -2253,9 +2254,9 @@ WHERE b IN (1,2,3) OR b = d;
|
|||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 system NULL NULL NULL NULL 1 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 hash_ALL NULL #hash#$hj 5 const 2 100.00 Using where; Using join buffer (flat, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select 10 AS `a`,8 AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t3`.`d` AS `d` from `test`.`t2` left join `test`.`t3` on(`test`.`t3`.`d` = 10) where `test`.`t2`.`c` = 8 and `test`.`t3`.`d` = 8
|
||||
Note 1003 select 10 AS `a`,8 AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t3`.`d` AS `d` from `test`.`t2` left join `test`.`t3` on(`test`.`t3`.`d` = 10 and 10 is not null) where `test`.`t2`.`c` = 8 and `test`.`t3`.`d` = 8
|
||||
SELECT * FROM t1 INNER JOIN t2 ON c = b LEFT JOIN t3 ON d = a
|
||||
WHERE b IN (1,2,3) OR b = d;
|
||||
a b c d
|
||||
|
@ -2282,11 +2283,11 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||
explain select * from t1 left join t2 on t2.b is null;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
|
||||
1 SIMPLE t2 ref b b 5 const 780 Using where
|
||||
1 SIMPLE t2 ref b b 5 const 780 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
explain select * from t1 left join t2 on t2.c is null;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
|
||||
1 SIMPLE t2 ref c c 5 const 393 Using where
|
||||
1 SIMPLE t2 ref c c 5 const 393 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
drop table t1,t2;
|
||||
#
|
||||
# MDEV-10006: optimizer doesn't convert outer join to inner on views with WHERE clause
|
||||
|
@ -2328,8 +2329,8 @@ LEFT JOIN t3 on t2.i2 = t3.i3
|
|||
WHERE v3 = 4;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t3 ref PRIMARY,v3 v3 5 const 1 100.00
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00
|
||||
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t1`.`v1` AS `v1`,`test`.`t2`.`i2` AS `i2`,`test`.`t2`.`v2` AS `v2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`v3` AS `v3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`v3` = 4 and `test`.`t1`.`i1` = `test`.`t3`.`i3` and `test`.`t2`.`i2` = `test`.`t3`.`i3`
|
||||
# This should have the same join order like the query above:
|
||||
|
@ -2346,8 +2347,8 @@ AND 1 = 1
|
|||
WHERE v3 = 4;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t3 ref PRIMARY,v3 v3 5 const 1 100.00
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00
|
||||
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t3.i3 1 100.00 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t1`.`v1` AS `v1`,`test`.`t2`.`i2` AS `i2`,`test`.`t2`.`v2` AS `v2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`v3` AS `v3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`v3` = 4 and `test`.`t1`.`i1` = `test`.`t3`.`i3` and `test`.`t2`.`i2` = `test`.`t3`.`i3`
|
||||
drop table t1,t2,t3;
|
||||
|
@ -2370,9 +2371,9 @@ ON t1.x = t2.x
|
|||
WHERE IFNULL(t2.x,0)=0;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t2 hash_ALL NULL #hash#$hj 5 test.t1.x 2 100.00 Using where; Using join buffer (flat, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where ifnull(`test`.`t2`.`x`,0) = 0
|
||||
Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x` and `test`.`t1`.`x` is not null) where ifnull(`test`.`t2`.`x`,0) = 0
|
||||
SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
|
||||
FROM t t1 LEFT JOIN t t2
|
||||
ON t1.x = t2.x
|
||||
|
@ -2386,9 +2387,9 @@ ON t1.x = t2.x
|
|||
WHERE f(t2.x,0)=0;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t2 hash_ALL NULL #hash#$hj 5 test.t1.x 2 100.00 Using where; Using join buffer (flat, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where `f`(`test`.`t2`.`x`,0) = 0
|
||||
Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x` and `test`.`t1`.`x` is not null) where `f`(`test`.`t2`.`x`,0) = 0
|
||||
drop function f;
|
||||
drop table t;
|
||||
CREATE TABLE t1 (
|
||||
|
@ -2426,9 +2427,9 @@ FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
|
|||
WHERE IFNULL(t2.col3,0) = 0;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t2 hash_ALL NULL #hash#$hj 17 test.t1.col1 2 100.00 Using where; Using join buffer (flat, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where ifnull(`test`.`t2`.`col3`,0) = 0
|
||||
Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1` and `test`.`t1`.`col1` is not null) where ifnull(`test`.`t2`.`col3`,0) = 0
|
||||
SELECT t1.col1, t2.col1, t2.col3
|
||||
FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
|
||||
WHERE f1(t2.col3,0) = 0;
|
||||
|
@ -2440,9 +2441,9 @@ FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
|
|||
WHERE f1(t2.col3,0) = 0;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t2 hash_ALL NULL #hash#$hj 17 test.t1.col1 2 100.00 Using where; Using join buffer (flat, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where `f1`(`test`.`t2`.`col3`,0) = 0
|
||||
Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1` and `test`.`t1`.`col1` is not null) where `f1`(`test`.`t2`.`col3`,0) = 0
|
||||
DROP FUNCTION f1;
|
||||
DROP TABLE t1,t2;
|
||||
#
|
||||
|
@ -2459,7 +2460,7 @@ t1.b1+'0' t2.b2 + '0'
|
|||
0 0
|
||||
1 1
|
||||
DROP TABLE t1, t2;
|
||||
set @join_cache_level= @save_join_cache_level;
|
||||
set @@join_cache_level= @save_join_cache_level;
|
||||
#
|
||||
# MDEV-14779: using left join causes incorrect results with materialization and derived tables
|
||||
#
|
||||
|
@ -2526,6 +2527,143 @@ v2
|
|||
DROP TABLE t1,t2;
|
||||
# end of 5.5 tests
|
||||
#
|
||||
# MDEV-19258: chained right joins all converted to inner joins
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
id int NOT NULL AUTO_INCREMENT,
|
||||
timestamp bigint NOT NULL,
|
||||
modifiedBy varchar(255) DEFAULT NULL,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
CREATE TABLE t2 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
REVTYPE tinyint DEFAULT NULL,
|
||||
profile_id int DEFAULT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
CREATE TABLE t3 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
person_id int DEFAULT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
CREATE TABLE t4 (
|
||||
id int NOT NULL,
|
||||
REV int NOT NULL,
|
||||
PRIMARY KEY (id,REV)
|
||||
);
|
||||
INSERT INTO t1 VALUES
|
||||
(1,1294391193890,'Cxqy$*9.kKeE'),(2,1294643906883,'rE4wqGV0gif@'),
|
||||
(3,1294643927456,'L?3yt(%dY$Br'),(4,1294644343525,'WH&ObiZ$#2S4'),
|
||||
(5,1294644616416,'YXnCbt?olUZ0'),(6,1294644954537,'8Npe4!(#lU@k'),
|
||||
(7,1294645046659,'knc0GhXB1#ib'),(8,1294645183829,'w*oPpVfuS8^m'),
|
||||
(9,1294645386701,'hwXR@3qVzrbU'),(10,1294645525982,'BeLW*Y9ndP0l'),
|
||||
(11,1294645627723,'nTegib^)qZ$I'),(12,1294650860266,'u62C^Kzx3wH8'),
|
||||
(13,1294657613745,'4&BkFjGa!qLg'),(14,1294660627161,')anpt312SCoh'),
|
||||
(15,1294661023336,'LtJ2PX?*kTmx'),(16,1294662838066,'POGRr@?#ofpl'),
|
||||
(17,1294663020989,'o.)1EOT2jnF7'),(18,1294663308065,'&TZ0F0LHE6.h'),
|
||||
(19,1294664900039,'j)kSC%^In$9d'),(20,1294668904556,'97glN50)cAo.'),
|
||||
(21,1294728056853,'lrKZxmw?I.Ek'),(22,1294728157174,'@P*SRg!pT.q?'),
|
||||
(23,1294728327099,'W9gPrptF.)8n'),(24,1294728418481,'$q*c^sM&URd#'),
|
||||
(25,1294728729620,'9*f4&bTPRtHo'),(26,1294728906014,')4VtTEnS7$oI'),
|
||||
(27,1294732190003,'8dkNSPq2u3AQ'),(28,1294733205065,'SV2N6IoEf438'),
|
||||
(29,1294741984927,'rBKj.0S^Ey%*'),(30,1294751748352,'j$2DvlBqk)Fw'),
|
||||
(31,1294753902212,'C$N6OrEw8elz'),(32,1294758120598,'DCSVZw!rnxXq'),
|
||||
(33,1294761769556,'OTS@QU8a6s5c'),(34,1294816845305,'IUE2stG0D3L5'),
|
||||
(35,1294816966909,'Xd16yka.9nHe'),(36,1294817116302,'lOQHZpm%!8qb'),
|
||||
(37,1294817374775,'^&pE3IhNf7ey'),(38,1294817538907,'oEn4#7C0Vhfp'),
|
||||
(39,1294818482950,'bx54J*O0Va&?'),(40,1294819047024,'J%@a&1.qgdb?'),
|
||||
(41,1294821826077,'C9kojr$L3Phz'),(42,1294825454458,'gG#BOnM80ZPi'),
|
||||
(43,1294904129918,'F^!TrjM#zdvc'),(44,1294904254166,'Va&Tb)k0RvlM'),
|
||||
(45,1294904414964,'dJjq0M6HvhR#'),(46,1294904505784,'nJmxg)ELqY(b'),
|
||||
(47,1294904602835,'dhF#or$Vge!7'),(48,1294904684728,'?bIh5E3l!0em'),
|
||||
(49,1294904877898,'Y*WflOdcxnk.'),(50,1294905002390,'*?H!lUgez5A.'),
|
||||
(51,1294905096043,'wlEIY3n9uz!p'),(52,1294905404621,'T?qv3H6&hlQD'),
|
||||
(53,1294905603922,'S@Bhys^Ti7bt'),(54,1294905788416,'KR?a5NVukz#l'),
|
||||
(55,1294905993190,'A*&q4kWhED!o'),(56,1294906205254,'fT0%7z0DF6h*'),
|
||||
(57,1294906319680,'LhzdW4?ivjR0'),(58,1294906424296,'h0KDlns%U*6T'),
|
||||
(59,1294906623844,'b$CfB1noI6Ax'),(60,1294911258896,'#T1*LP!3$Oys');
|
||||
INSERT INTO t2 VALUES
|
||||
(1,1,0,10209),(1,42480,1,10209),(1,61612,1,10209),(1,257545,1,10209),
|
||||
(1,385332,1,10209),(1,1687999,1,10209),(3,1,0,10210),(3,617411,2,10210),
|
||||
(4,11,0,14),(4,95149,1,10211),(4,607890,2,10211),(5,1,0,10212),
|
||||
(6,1,0,10213),(6,93344,1,10213),(6,295578,1,10213),(6,295579,1,10213),
|
||||
(6,295644,1,10213),(7,1,0,10214),(7,12,1,7),(7,688796,1,10214),
|
||||
(7,1140433,1,10214),(7,1715227,1,10214),(8,1,0,10215),(8,74253,1,10215),
|
||||
(8,93345,1,10215),(8,12,2,2),(9,1,0,10216),(9,93342,1,10216),
|
||||
(9,122354,1,10216),(9,301499,2,10216),(10,11,0,5),(10,93343,1,10217),
|
||||
(10,122355,1,10217),(10,123050,1,10217),(10,301500,2,10217),(11,1,0,10218),
|
||||
(11,87852,1,10218),(11,605499,2,10218),(12,1,0,10219),(12,88024,1,10219),
|
||||
(12,605892,2,10219),(13,1,0,10220);
|
||||
INSERT INTO t3 VALUES
|
||||
(1,1,300003),(1,117548,NULL),(2,1,300003),(2,117548,300006),
|
||||
(3,1,300153),(3,117548,NULL),(4,1,300153),(4,117548,NULL),
|
||||
(5,1,300153),(5,117548,NULL),(6,1,300182),(6,117548,NULL),
|
||||
(7,1,300205),(7,117548,NULL),(8,1,300217),(8,117548,NULL),
|
||||
(9,1,300290),(9,117548,NULL),(10,1,300290),(10,117548,NULL),
|
||||
(11,1,300405),(11,117548,NULL),(12,1,300670),(12,117548,NULL),
|
||||
(13,1,300670),(13,117548,NULL),(14,1,300006),(14,117548,NULL),
|
||||
(15,1,300671),(15,117548,NULL),(16,1,300732),(16,117548,NULL);
|
||||
INSERT INTO t4 VALUES
|
||||
(300000,1),(300001,1),(300003,1),(300004,1),
|
||||
(300005,1),(300005,688796),(300006,1),(300006,97697),
|
||||
(300009,1),(300010,1),(300011,1),(300012,1),(300013,1),
|
||||
(300014,1),(300015,1),(300016,1),(300017,1),(300018,1),
|
||||
(300019,1),(300020,1),(300021,1),(300022,1),(300023,1),
|
||||
(300024,1),(300025,1),(300026,1),(300027,1),(300028,1);
|
||||
# This should have join order of t2,t3,t4,t1
|
||||
EXPLAIN EXTENDED SELECT *
|
||||
FROM t1 INNER JOIN t2 ON t2.REV=t1.id
|
||||
INNER JOIN t3 ON t3.id=t2.profile_id
|
||||
INNER JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416 AND
|
||||
t2.REVTYPE=2;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 42 100.00 Using where
|
||||
1 SIMPLE t3 ref PRIMARY PRIMARY 4 test.t2.profile_id 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t4 ref PRIMARY PRIMARY 4 test.t3.person_id 1 100.00 Using index
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.REV 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`timestamp` AS `timestamp`,`test`.`t1`.`modifiedBy` AS `modifiedBy`,`test`.`t2`.`id` AS `id`,`test`.`t2`.`REV` AS `REV`,`test`.`t2`.`REVTYPE` AS `REVTYPE`,`test`.`t2`.`profile_id` AS `profile_id`,`test`.`t3`.`id` AS `id`,`test`.`t3`.`REV` AS `REV`,`test`.`t3`.`person_id` AS `person_id`,`test`.`t4`.`id` AS `id`,`test`.`t4`.`REV` AS `REV` from `test`.`t1` join `test`.`t2` join `test`.`t3` join `test`.`t4` where `test`.`t2`.`REVTYPE` = 2 and `test`.`t4`.`id` = `test`.`t3`.`person_id` and `test`.`t3`.`id` = `test`.`t2`.`profile_id` and `test`.`t1`.`id` = `test`.`t2`.`REV` and `test`.`t1`.`timestamp` < 1294664900039 and `test`.`t1`.`timestamp` > 1294644616416
|
||||
SELECT *
|
||||
FROM t1 INNER JOIN t2 ON t2.REV=t1.id
|
||||
INNER JOIN t3 ON t3.id=t2.profile_id
|
||||
INNER JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416 AND
|
||||
t2.REVTYPE=2;
|
||||
id timestamp modifiedBy id REV REVTYPE profile_id id REV person_id id REV
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 1 300003 300003 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 97697
|
||||
# This should have join order of t2,t3,t4,t1 with the same plan as above
|
||||
# because all RIGHT JOIN operations are converted into INNER JOIN
|
||||
EXPLAIN EXTENDED SELECT *
|
||||
FROM t1 RIGHT JOIN t2 ON t2.REV=t1.id
|
||||
RIGHT JOIN t3 ON t3.id=t2.profile_id
|
||||
RIGHT JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416
|
||||
AND t2.REVTYPE=2;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 42 100.00 Using where
|
||||
1 SIMPLE t3 ref PRIMARY PRIMARY 4 test.t2.profile_id 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t4 ref PRIMARY PRIMARY 4 test.t3.person_id 1 100.00 Using index
|
||||
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.REV 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`timestamp` AS `timestamp`,`test`.`t1`.`modifiedBy` AS `modifiedBy`,`test`.`t2`.`id` AS `id`,`test`.`t2`.`REV` AS `REV`,`test`.`t2`.`REVTYPE` AS `REVTYPE`,`test`.`t2`.`profile_id` AS `profile_id`,`test`.`t3`.`id` AS `id`,`test`.`t3`.`REV` AS `REV`,`test`.`t3`.`person_id` AS `person_id`,`test`.`t4`.`id` AS `id`,`test`.`t4`.`REV` AS `REV` from `test`.`t4` join `test`.`t3` join `test`.`t2` join `test`.`t1` where `test`.`t2`.`REVTYPE` = 2 and `test`.`t1`.`id` = `test`.`t2`.`REV` and `test`.`t3`.`id` = `test`.`t2`.`profile_id` and `test`.`t4`.`id` = `test`.`t3`.`person_id` and `test`.`t1`.`timestamp` < 1294664900039 and `test`.`t1`.`timestamp` > 1294644616416
|
||||
SELECT *
|
||||
FROM t1 RIGHT JOIN t2 ON t2.REV=t1.id
|
||||
RIGHT JOIN t3 ON t3.id=t2.profile_id
|
||||
RIGHT JOIN t4 ON t4.id=t3.person_id
|
||||
WHERE t1.timestamp < 1294664900039 AND t1.timestamp > 1294644616416
|
||||
AND t2.REVTYPE=2;
|
||||
id timestamp modifiedBy id REV REVTYPE profile_id id REV person_id id REV
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 1 300003 300003 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 1
|
||||
12 1294650860266 u62C^Kzx3wH8 8 12 2 2 2 117548 300006 300006 97697
|
||||
DROP TABLE t1,t2,t3,t4;
|
||||
# end of 10.1 tests
|
||||
#
|
||||
# MDEV-17518: Range optimization doesn't use ON expressions from nested outer joins
|
||||
#
|
||||
create table t1(a int);
|
||||
|
@ -2538,13 +2676,13 @@ insert into t3 select A.a + B.a* 10 + C.a * 100, 12345 from t1 A, t1 B, t1 C;
|
|||
explain select * from t1 left join t3 on t1.a=t3.b and t3.a<5;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 10
|
||||
1 SIMPLE t3 range a a 5 NULL 5 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 hash_range a #hash#$hj:a 5:5 test.t1.a 5 Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
|
||||
# This must use range for table t3, too:
|
||||
explain select * from t1 left join (t3 join t2) on t1.a=t3.b and t3.a<5;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 10
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 range a a 5 NULL 5 Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join)
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 hash_range a #hash#$hj:a 5:5 test.t1.a 5 Using where; Rowid-ordered scan; Using join buffer (incremental, BNLH join)
|
||||
#
|
||||
# .. part 2: make sure condition selectivity can use the condition too.
|
||||
#
|
||||
|
@ -2570,8 +2708,8 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` A
|
|||
explain extended select * from t1 left join (t3 join t2) on t1.a=t3.b and t3.a<5;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 1.96 Using where
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 hash_ALL NULL #hash#$hj 5 test.t1.a 1000 1.96 Using where; Using join buffer (incremental, BNLH join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t2`.`a` AS `a` from `test`.`t1` left join (`test`.`t3` join `test`.`t2`) on(`test`.`t3`.`b` = `test`.`t1`.`a` and `test`.`t3`.`a` < 5 and `test`.`t1`.`a` is not null) where 1
|
||||
drop table t1,t2,t3;
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
--source include/have_case_insensitive_file_system.inc
|
||||
--source include/have_innodb.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
--echo #
|
||||
--echo # Bug#46941 crash with lower_case_table_names=2 and
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- source include/not_embedded.inc
|
||||
# need to have the dynamic loading turned on for the client plugin tests
|
||||
--source include/have_plugin_auth.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
SET @old_general_log= @@global.general_log;
|
||||
SET @old_slow_query_log= @@global.slow_query_log;
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
# The non-blocking API is not supported in the embedded server.
|
||||
-- source include/not_embedded.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
SET @old_general_log= @@global.general_log;
|
||||
SET @old_slow_query_log= @@global.slow_query_log;
|
||||
|
|
|
@ -19,6 +19,9 @@ let collation=utf8_unicode_ci;
|
|||
# There are tables in 'mysql' database of type innodb
|
||||
--source include/have_innodb.inc
|
||||
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
disable_query_log;
|
||||
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
|
||||
enable_query_log;
|
||||
|
|
|
@ -1248,7 +1248,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
|
|||
{
|
||||
"index": "a",
|
||||
"covering": true,
|
||||
"ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
|
||||
"ranges": ["(2,3) <= (b,c) <= (2,3)"],
|
||||
"rows": 8,
|
||||
"cost": 2.2
|
||||
}
|
||||
|
@ -1264,7 +1264,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
|
|||
"rows": 8,
|
||||
"cost": 2.2,
|
||||
"key_parts_used_for_access": ["a", "b", "c"],
|
||||
"ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
|
||||
"ranges": ["(2,3) <= (b,c) <= (2,3)"],
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
|
@ -1446,7 +1446,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
|
|||
{
|
||||
"index": "id",
|
||||
"covering": true,
|
||||
"ranges": ["0x24a20f <= a"],
|
||||
"ranges": ["(0x24a20f) <= (a)"],
|
||||
"rows": 9,
|
||||
"cost": 2.35
|
||||
}
|
||||
|
@ -1462,7 +1462,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
|
|||
"rows": 9,
|
||||
"cost": 2.35,
|
||||
"key_parts_used_for_access": ["id"],
|
||||
"ranges": ["0x24a20f <= a"],
|
||||
"ranges": ["(0x24a20f) <= (a)"],
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
|
@ -1624,7 +1624,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
|
|||
{
|
||||
"index": "id",
|
||||
"covering": true,
|
||||
"ranges": ["0x24a20f <= a <= 0x24a20f"],
|
||||
"ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
|
||||
"rows": 9,
|
||||
"cost": 2.35
|
||||
}
|
||||
|
@ -1640,7 +1640,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
|
|||
"rows": 9,
|
||||
"cost": 2.35,
|
||||
"key_parts_used_for_access": ["id", "a"],
|
||||
"ranges": ["0x24a20f <= a <= 0x24a20f"],
|
||||
"ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
|
@ -1856,7 +1856,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a_c",
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -1866,7 +1866,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
|||
},
|
||||
{
|
||||
"index": "a_b",
|
||||
"ranges": ["1 <= a <= 1 AND 2 <= b <= 2"],
|
||||
"ranges": ["(1,2) <= (a,b) <= (1,2)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -1885,7 +1885,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
|||
"type": "range_scan",
|
||||
"index": "a_b",
|
||||
"rows": 21,
|
||||
"ranges": ["1 <= a <= 1 AND 2 <= b <= 2"]
|
||||
"ranges": ["(1,2) <= (a,b) <= (1,2)"]
|
||||
},
|
||||
"rows_for_plan": 21,
|
||||
"cost_for_plan": 27.445,
|
||||
|
@ -2025,7 +2025,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a_c",
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -2044,7 +2044,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
|||
"type": "range_scan",
|
||||
"index": "a_c",
|
||||
"rows": 180,
|
||||
"ranges": ["1 <= a <= 1"]
|
||||
"ranges": ["(1) <= (a) <= (1)"]
|
||||
},
|
||||
"rows_for_plan": 180,
|
||||
"cost_for_plan": 231.72,
|
||||
|
@ -2895,7 +2895,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "pk",
|
||||
"ranges": ["2 <= pk <= 2"],
|
||||
"ranges": ["(2) <= (pk) <= (2)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -2906,7 +2906,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
|||
},
|
||||
{
|
||||
"index": "pk_a",
|
||||
"ranges": ["2 <= pk <= 2 AND 5 <= a <= 5"],
|
||||
"ranges": ["(2,5) <= (pk,a) <= (2,5)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -2917,7 +2917,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
|||
},
|
||||
{
|
||||
"index": "pk_a_b",
|
||||
"ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"],
|
||||
"ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
|
@ -2964,7 +2964,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
|||
"type": "range_scan",
|
||||
"index": "pk_a_b",
|
||||
"rows": 1,
|
||||
"ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"]
|
||||
"ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"]
|
||||
},
|
||||
"rows_for_plan": 1,
|
||||
"cost_for_plan": 1.1793,
|
||||
|
@ -3338,7 +3338,7 @@ explain delete from t0 where t0.a<3 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["NULL < a < 3"],
|
||||
"ranges": ["(NULL) < (a) < (3)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -3354,7 +3354,7 @@ explain delete from t0 where t0.a<3 {
|
|||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 3,
|
||||
"ranges": ["NULL < a < 3"]
|
||||
"ranges": ["(NULL) < (a) < (3)"]
|
||||
},
|
||||
"rows_for_plan": 3,
|
||||
"cost_for_plan": 5.007,
|
||||
|
@ -3481,7 +3481,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["NULL < a < 3"],
|
||||
"ranges": ["(NULL) < (a) < (3)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
|
@ -3500,7 +3500,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
|||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 3,
|
||||
"ranges": ["NULL < a < 3"]
|
||||
"ranges": ["(NULL) < (a) < (3)"]
|
||||
},
|
||||
"rows_for_plan": 3,
|
||||
"cost_for_plan": 1.407,
|
||||
|
@ -3546,7 +3546,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["NULL < a < 3"],
|
||||
"ranges": ["(NULL) < (a) < (3)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
|
@ -3565,7 +3565,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
|||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 3,
|
||||
"ranges": ["NULL < a < 3"]
|
||||
"ranges": ["(NULL) < (a) < (3)"]
|
||||
},
|
||||
"rows_for_plan": 3,
|
||||
"cost_for_plan": 1.407,
|
||||
|
@ -6034,4 +6034,238 @@ COUNT(*)
|
|||
1
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
|
||||
#
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table one_k (a int);
|
||||
insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
|
||||
create table t1 ( a int, b int, key a_b(a,b));
|
||||
insert into t1 select a,a from one_k;
|
||||
set optimizer_trace='enabled=on';
|
||||
explain select * from t1 force index (a_b) where a=2 and b=4;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a_b a_b 10 const,const 1 Using index
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "a_b",
|
||||
"ranges":
|
||||
[
|
||||
"(2,4) <= (a,b) <= (2,4)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 1,
|
||||
"cost": 1.1783,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
explain select * from t1 where a >= 900 and b between 10 and 20;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range a_b a_b 10 NULL 107 Using where; Using index
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "a_b",
|
||||
"ranges":
|
||||
[
|
||||
"(900,10) <= (a,b)"
|
||||
],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 107,
|
||||
"cost": 10.955,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table t0,t1;
|
||||
create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
|
||||
insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
|
||||
explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range start_date start_date 8 NULL 1000 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "start_date",
|
||||
"ranges":
|
||||
[
|
||||
"(0x4ac60f,NULL) < (start_date,end_date)"
|
||||
],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 1000,
|
||||
"cost": 1282.2,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table t1,one_k;
|
||||
create table ten(a int);
|
||||
insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null,
|
||||
b int not null,
|
||||
c int not null,
|
||||
d int not null,
|
||||
key a_b_c(a,b,c)
|
||||
);
|
||||
insert into t1 select a,a, a,a from ten;
|
||||
explain select * from t1 force index(a_b_c) where a between 1 and 4 and b < 50;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range a_b_c a_b_c 8 NULL 4 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "a_b_c",
|
||||
"ranges":
|
||||
[
|
||||
"(1) <= (a,b) < (4,50)"
|
||||
],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 4,
|
||||
"cost": 6.2648,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table ten,t1;
|
||||
# Ported test from MYSQL for ranges involving Binary column
|
||||
CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
|
||||
INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
|
||||
INSERT INTO t1 VALUES (2, NULL);
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "i_b",
|
||||
"ranges":
|
||||
[
|
||||
"(0xd95b94336a9946a39cf5b58cfe772d8c) <= (b) <= (0xd95b94336a9946a39cf5b58cfe772d8c)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 1,
|
||||
"cost": 2.3797,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "i_b",
|
||||
"ranges":
|
||||
[
|
||||
"(NULL) <= (b) <= (NULL)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 1,
|
||||
"cost": 2.3797,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table t1;
|
||||
set optimizer_trace='enabled=off';
|
||||
|
|
|
@ -387,4 +387,61 @@ SELECT COUNT(*) FROM v1 WHERE MATCH (f) AGAINST ('fooba');
|
|||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
|
||||
--echo #
|
||||
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table one_k (a int);
|
||||
insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
|
||||
create table t1 ( a int, b int, key a_b(a,b));
|
||||
insert into t1 select a,a from one_k;
|
||||
set optimizer_trace='enabled=on';
|
||||
|
||||
explain select * from t1 force index (a_b) where a=2 and b=4;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
explain select * from t1 where a >= 900 and b between 10 and 20;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
drop table t0,t1;
|
||||
|
||||
create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
|
||||
--disable_warnings
|
||||
insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
|
||||
--enable_warnings
|
||||
explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
drop table t1,one_k;
|
||||
|
||||
create table ten(a int);
|
||||
insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null,
|
||||
b int not null,
|
||||
c int not null,
|
||||
d int not null,
|
||||
key a_b_c(a,b,c)
|
||||
);
|
||||
|
||||
insert into t1 select a,a, a,a from ten;
|
||||
explain select * from t1 force index(a_b_c) where a between 1 and 4 and b < 50;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
drop table ten,t1;
|
||||
|
||||
--echo # Ported test from MYSQL for ranges involving Binary column
|
||||
|
||||
CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
|
||||
INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
|
||||
INSERT INTO t1 VALUES (2, NULL);
|
||||
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
drop table t1;
|
||||
|
||||
set optimizer_trace='enabled=off';
|
||||
|
|
|
@ -110,7 +110,7 @@ explain select * from t1 where a=1 or b=1 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
|
@ -126,7 +126,7 @@ explain select * from t1 where a=1 or b=1 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "b",
|
||||
"ranges": ["1 <= b <= 1"],
|
||||
"ranges": ["(1) <= (b) <= (1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
|
@ -147,7 +147,7 @@ explain select * from t1 where a=1 or b=1 {
|
|||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"analyzing_roworder_intersect": {
|
||||
"cause": "too few roworder scans"
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ explain select * from t1 where a=1 or b=1 {
|
|||
"type": "range_scan",
|
||||
"index": "b",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= b <= 1"],
|
||||
"ranges": ["(1) <= (b) <= (1)"],
|
||||
"analyzing_roworder_intersect": {
|
||||
"cause": "too few roworder scans"
|
||||
}
|
||||
|
@ -176,13 +176,13 @@ explain select * from t1 where a=1 or b=1 {
|
|||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= a <= 1"]
|
||||
"ranges": ["(1) <= (a) <= (1)"]
|
||||
},
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "b",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= b <= 1"]
|
||||
"ranges": ["(1) <= (b) <= (1)"]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
@ -243,3 +243,500 @@ explain select * from t1 where a=1 or b=1 {
|
|||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
set @@optimizer_switch= @tmp_opt_switch;
|
||||
# More tests added index_merge access
|
||||
create table t1
|
||||
(
|
||||
/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
|
||||
st_a int not null default 0,
|
||||
swt1a int not null default 0,
|
||||
swt2a int not null default 0,
|
||||
st_b int not null default 0,
|
||||
swt1b int not null default 0,
|
||||
swt2b int not null default 0,
|
||||
/* fields/keys for row retrieval tests */
|
||||
key1 int,
|
||||
key2 int,
|
||||
key3 int,
|
||||
key4 int,
|
||||
/* make rows much bigger then keys */
|
||||
filler1 char (200),
|
||||
filler2 char (200),
|
||||
filler3 char (200),
|
||||
filler4 char (200),
|
||||
filler5 char (200),
|
||||
filler6 char (200),
|
||||
/* order of keys is important */
|
||||
key sta_swt12a(st_a,swt1a,swt2a),
|
||||
key sta_swt1a(st_a,swt1a),
|
||||
key sta_swt2a(st_a,swt2a),
|
||||
key sta_swt21a(st_a,swt2a,swt1a),
|
||||
key st_a(st_a),
|
||||
key stb_swt1a_2b(st_b,swt1b,swt2a),
|
||||
key stb_swt1b(st_b,swt1b),
|
||||
key st_b(st_b),
|
||||
key(key1),
|
||||
key(key2),
|
||||
key(key3),
|
||||
key(key4)
|
||||
) ;
|
||||
create table t0 as select * from t1;
|
||||
# Printing of many insert into t0 values (....) disabled.
|
||||
alter table t1 disable keys;
|
||||
# Printing of many insert into t1 select .... from t0 disabled.
|
||||
# Printing of many insert into t1 (...) values (....) disabled.
|
||||
alter table t1 enable keys;
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
|
||||
set optimizer_trace='enabled=on';
|
||||
# 3-way ROR-intersection
|
||||
explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 2243,
|
||||
"cost": 2862.1,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 2243,
|
||||
"cost": 2862.1,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 2243,
|
||||
"cost": 2862.1,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"intersecting_indexes":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 58.252,
|
||||
"disk_sweep_cost": 1923.1,
|
||||
"cumulative_total_cost": 1981.4,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2243,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 116.5,
|
||||
"disk_sweep_cost": 84.518,
|
||||
"cumulative_total_cost": 201.02,
|
||||
"usable": true,
|
||||
"matching_rows_now": 77.636,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 174.76,
|
||||
"disk_sweep_cost": 0,
|
||||
"cumulative_total_cost": 174.76,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2.6872,
|
||||
"intersect_covering_with_this_index": true,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"clustered_pk":
|
||||
{
|
||||
"clustered_pk_added_to_intersect": false,
|
||||
"cause": "no clustered pk index"
|
||||
},
|
||||
"rows": 2,
|
||||
"cost": 174.76,
|
||||
"covering": true,
|
||||
"chosen": true
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_access_plan":
|
||||
{
|
||||
"type": "index_roworder_intersect",
|
||||
"rows": 2,
|
||||
"cost": 174.76,
|
||||
"covering": true,
|
||||
"clustered_pk_scan": false,
|
||||
"intersect_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key2",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key3",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"rows_for_plan": 2,
|
||||
"cost_for_plan": 174.76,
|
||||
"chosen": true
|
||||
}
|
||||
]
|
||||
# ROR-union(ROR-intersection, ROR-range)
|
||||
explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
|
||||
{
|
||||
"indexes_to_merge":
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
}
|
||||
],
|
||||
"index_to_merge": "key1",
|
||||
"cumulated_cost": 170.53
|
||||
},
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key4",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key4) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
}
|
||||
],
|
||||
"index_to_merge": "key3",
|
||||
"cumulated_cost": 341.05
|
||||
}
|
||||
],
|
||||
"cost_of_reading_ranges": 341.05,
|
||||
"use_roworder_union": true,
|
||||
"cause": "always cheaper than non roworder retrieval",
|
||||
"analyzing_roworder_scans":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"intersecting_indexes":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 58.252,
|
||||
"disk_sweep_cost": 1923.1,
|
||||
"cumulative_total_cost": 1981.4,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2243,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 116.5,
|
||||
"disk_sweep_cost": 84.518,
|
||||
"cumulative_total_cost": 201.02,
|
||||
"usable": true,
|
||||
"matching_rows_now": 77.636,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"clustered_pk":
|
||||
{
|
||||
"clustered_pk_added_to_intersect": false,
|
||||
"cause": "no clustered pk index"
|
||||
},
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"chosen": true
|
||||
}
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key3",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"intersecting_indexes":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 58.252,
|
||||
"disk_sweep_cost": 1923.1,
|
||||
"cumulative_total_cost": 1981.4,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2243,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key4",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 116.5,
|
||||
"disk_sweep_cost": 84.518,
|
||||
"cumulative_total_cost": 201.02,
|
||||
"usable": true,
|
||||
"matching_rows_now": 77.636,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"clustered_pk":
|
||||
{
|
||||
"clustered_pk_added_to_intersect": false,
|
||||
"cause": "no clustered pk index"
|
||||
},
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"chosen": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"index_roworder_union_cost": 386.73,
|
||||
"members": 2,
|
||||
"chosen": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_access_plan":
|
||||
{
|
||||
"type": "index_roworder_union",
|
||||
"union_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "index_roworder_intersect",
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"clustered_pk_scan": false,
|
||||
"intersect_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key2",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "index_roworder_intersect",
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"clustered_pk_scan": false,
|
||||
"intersect_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key3",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key4",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key4) <= (100)"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"rows_for_plan": 154,
|
||||
"cost_for_plan": 386.73,
|
||||
"chosen": true
|
||||
}
|
||||
]
|
||||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
|
|
|
@ -19,3 +19,115 @@ select * from information_schema.OPTIMIZER_TRACE;
|
|||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
set @@optimizer_switch= @tmp_opt_switch;
|
||||
|
||||
--echo # More tests added index_merge access
|
||||
|
||||
--enable_warnings
|
||||
create table t1
|
||||
(
|
||||
/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
|
||||
st_a int not null default 0,
|
||||
swt1a int not null default 0,
|
||||
swt2a int not null default 0,
|
||||
|
||||
st_b int not null default 0,
|
||||
swt1b int not null default 0,
|
||||
swt2b int not null default 0,
|
||||
|
||||
/* fields/keys for row retrieval tests */
|
||||
key1 int,
|
||||
key2 int,
|
||||
key3 int,
|
||||
key4 int,
|
||||
|
||||
/* make rows much bigger then keys */
|
||||
filler1 char (200),
|
||||
filler2 char (200),
|
||||
filler3 char (200),
|
||||
filler4 char (200),
|
||||
filler5 char (200),
|
||||
filler6 char (200),
|
||||
|
||||
/* order of keys is important */
|
||||
key sta_swt12a(st_a,swt1a,swt2a),
|
||||
key sta_swt1a(st_a,swt1a),
|
||||
key sta_swt2a(st_a,swt2a),
|
||||
key sta_swt21a(st_a,swt2a,swt1a),
|
||||
key st_a(st_a),
|
||||
key stb_swt1a_2b(st_b,swt1b,swt2a),
|
||||
key stb_swt1b(st_b,swt1b),
|
||||
key st_b(st_b),
|
||||
|
||||
key(key1),
|
||||
key(key2),
|
||||
key(key3),
|
||||
key(key4)
|
||||
) ;
|
||||
# Fill table
|
||||
create table t0 as select * from t1;
|
||||
--disable_query_log
|
||||
--echo # Printing of many insert into t0 values (....) disabled.
|
||||
let $cnt=1000;
|
||||
while ($cnt)
|
||||
{
|
||||
eval insert into t0 values (1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 'data1', 'data2', 'data3', 'data4', 'data5', 'data6');
|
||||
dec $cnt;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
alter table t1 disable keys;
|
||||
--disable_query_log
|
||||
--echo # Printing of many insert into t1 select .... from t0 disabled.
|
||||
let $1=4;
|
||||
while ($1)
|
||||
{
|
||||
let $2=4;
|
||||
while ($2)
|
||||
{
|
||||
let $3=4;
|
||||
while ($3)
|
||||
{
|
||||
eval insert into t1 select $1, $2, $3, $1 ,$2, $3, key1, key2, key3, key4, filler1, filler2, filler3, filler4, filler5, filler6 from t0;
|
||||
dec $3;
|
||||
}
|
||||
dec $2;
|
||||
}
|
||||
dec $1;
|
||||
}
|
||||
|
||||
--echo # Printing of many insert into t1 (...) values (....) disabled.
|
||||
# Row retrieval tests
|
||||
# -1 is used for values 'out of any range we are using'
|
||||
# insert enough rows for index intersection to be used for (key1,key2)
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 100, 100,'key1-key2-key3-key4');
|
||||
let $cnt=400;
|
||||
while ($cnt)
|
||||
{
|
||||
eval insert into t1 (key1, key2, key3, key4, filler1) values (100, -1, 100, -1,'key1-key3');
|
||||
dec $cnt;
|
||||
}
|
||||
let $cnt=400;
|
||||
while ($cnt)
|
||||
{
|
||||
eval insert into t1 (key1, key2, key3, key4, filler1) values (-1, 100, -1, 100,'key2-key4');
|
||||
dec $cnt;
|
||||
}
|
||||
--enable_query_log
|
||||
alter table t1 enable keys;
|
||||
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
|
||||
set optimizer_trace='enabled=on';
|
||||
|
||||
--echo # 3-way ROR-intersection
|
||||
explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
--echo # ROR-union(ROR-intersection, ROR-range)
|
||||
explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
|
|
|
@ -116,7 +116,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
|||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "PRIMARY",
|
||||
"ranges": ["pk1 < 0", "0 < pk1"],
|
||||
"ranges": ["(pk1) < (0)", "(0) < (pk1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -127,7 +127,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
|||
},
|
||||
{
|
||||
"index": "key1",
|
||||
"ranges": ["1 <= key1 <= 1"],
|
||||
"ranges": ["(1) <= (key1) <= (1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
|
@ -164,7 +164,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
|||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= key1 <= 1"]
|
||||
"ranges": ["(1) <= (key1) <= (1)"]
|
||||
},
|
||||
"rows_for_plan": 1,
|
||||
"cost_for_plan": 2.3751,
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
--source include/have_innodb.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
SET SESSION STORAGE_ENGINE='InnoDB';
|
||||
|
||||
|
|
|
@ -3197,6 +3197,7 @@ drop table t1,t2,t3;
|
|||
#
|
||||
# MDEV-18896: IN subquery in WHERE of a table-less query used for INSERT
|
||||
#
|
||||
set @@optimizer_switch= @subselect_sj_tmp;
|
||||
create table t1 (a1 varchar(25));
|
||||
create table t2 (a2 varchar(25)) ;
|
||||
insert into t1 select 'xxx' from dual where 'xxx' in (select a2 from t2);
|
||||
|
|
|
@ -2880,6 +2880,8 @@ drop table t1,t2,t3;
|
|||
--echo # MDEV-18896: IN subquery in WHERE of a table-less query used for INSERT
|
||||
--echo #
|
||||
|
||||
set @@optimizer_switch= @subselect_sj_tmp;
|
||||
|
||||
create table t1 (a1 varchar(25));
|
||||
create table t2 (a2 varchar(25)) ;
|
||||
insert into t1 select 'xxx' from dual where 'xxx' in (select a2 from t2);
|
||||
|
|
|
@ -3211,6 +3211,7 @@ drop table t1,t2,t3;
|
|||
#
|
||||
# MDEV-18896: IN subquery in WHERE of a table-less query used for INSERT
|
||||
#
|
||||
set @@optimizer_switch= @subselect_sj_tmp;
|
||||
create table t1 (a1 varchar(25));
|
||||
create table t2 (a2 varchar(25)) ;
|
||||
insert into t1 select 'xxx' from dual where 'xxx' in (select a2 from t2);
|
||||
|
|
|
@ -149,6 +149,7 @@ EOF
|
|||
load data infile 'mdev8605.txt' into table t1 fields terminated by ',';
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
--remove_file $datadir/test/mdev8605.txt
|
||||
|
||||
# timestamps (on NULL = NOW())
|
||||
create table t1 (a timestamp, b int auto_increment primary key);
|
||||
|
@ -218,6 +219,7 @@ load data infile 'sep8605.txt' into table t1 fields terminated by ','
|
|||
(@a,a2,a3,b,c) set a1=100-@a;
|
||||
select 100-a1,a2,a3,b,c from t1;
|
||||
delete from t1;
|
||||
--remove_file $datadir/test/sep8605.txt
|
||||
|
||||
--write_file $datadir/test/fix8605.txt
|
||||
00012010-11-12 01:02:030010000000000000000
|
||||
|
@ -231,6 +233,7 @@ set statement timestamp=777777777 for
|
|||
load data infile 'fix8605.txt' into table t1 fields terminated by '';
|
||||
select * from t1;
|
||||
delete from t1;
|
||||
--remove_file $datadir/test/fix8605.txt
|
||||
|
||||
--write_file $datadir/test/xml8605.txt
|
||||
<data>
|
||||
|
@ -300,6 +303,7 @@ set statement timestamp=777777777 for
|
|||
load xml infile 'xml8605.txt' into table t1 rows identified by '<row>';
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
--remove_file $datadir/test/xml8605.txt
|
||||
|
||||
# explicit DEFAULT
|
||||
create table t1 (a int not null default 5, b int, c int);
|
||||
|
|
|
@ -11,14 +11,11 @@ INSERT INTO t2 VALUES(2);
|
|||
SELECT * FROM t1;
|
||||
ERROR 42000: Unknown storage engine 'InnoDB'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
SELECT * FROM t2;
|
||||
a
|
||||
2
|
||||
CHECK TABLE t1,t2;
|
||||
CHECK TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
test.t2 check status OK
|
||||
DROP TABLE t1, t2;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` is corrupted");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption in an InnoDB type table");
|
||||
call mtr.add_suppression("\\[ERROR\\] mysqld.*: Index for table 't2' is corrupt; try to repair it");
|
||||
SET GLOBAL innodb_file_per_table = ON;
|
||||
set global innodb_compression_algorithm = 1;
|
||||
# Create and populate tables to be corrupted
|
||||
|
@ -19,7 +20,7 @@ COMMIT;
|
|||
SELECT * FROM t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
SELECT * FROM t2;
|
||||
ERROR HY000: Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB
|
||||
Got one of the listed errors
|
||||
SELECT * FROM t3;
|
||||
ERROR 42S02: Table 'test.t3' doesn't exist in engine
|
||||
# Restore the original tables
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
[strict_crc32]
|
||||
--innodb-checksum-algorithm=strict_crc32
|
||||
|
||||
[strict_full_crc32]
|
||||
--innodb-checksum-algorithm=strict_full_crc32
|
|
@ -11,6 +11,7 @@ call mtr.add_suppression("InnoDB: The page \\[page id: space=\\d+, page number=3
|
|||
call mtr.add_suppression("InnoDB: Table in tablespace \\d+ encrypted. However key management plugin or used key_version \\d+ is not found or used encryption algorithm or method does not match. Can't continue opening the table.");
|
||||
--enable_query_log
|
||||
|
||||
let INNODB_CHECKSUM_ALGORITHM = `SELECT @@innodb_checksum_algorithm`;
|
||||
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
|
||||
--echo # Work around MDEV-19541
|
||||
SET GLOBAL innodb_checksum_algorithm=crc32;
|
||||
|
@ -36,7 +37,8 @@ my $ps = $ENV{INNODB_PAGE_SIZE};
|
|||
my $file = "$ENV{MYSQLD_DATADIR}/test/t1.ibd";
|
||||
open(FILE, "+<$file") || die "Unable to open $file";
|
||||
binmode FILE;
|
||||
seek (FILE, $ENV{INNODB_PAGE_SIZE} * 3, SEEK_SET) or die "seek";
|
||||
my $offset = ($ENV{INNODB_CHECKSUM_ALGORITHM} =~ /full_crc32/) ? 26 : 0;
|
||||
seek (FILE, $ENV{INNODB_PAGE_SIZE} * 3 + $offset, SEEK_SET) or die "seek";
|
||||
print FILE "junk";
|
||||
close FILE or die "close";
|
||||
|
||||
|
@ -53,11 +55,16 @@ EOF
|
|||
--source include/start_mysqld.inc
|
||||
--error ER_UNKNOWN_STORAGE_ENGINE
|
||||
SELECT * FROM t1;
|
||||
let $restart_parameters=--innodb_force_recovery=1;
|
||||
--disable_query_log
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=[1-9][0-9]*, page number=3\\] in file .*test.t[1].ibd looks corrupted; key_version=");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t1` is corrupted. Please drop the table and recreate.");
|
||||
--enable_query_log
|
||||
let $restart_parameters=--innodb_force_recovery=1 --skip-innodb-buffer-pool-load-at-startup;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
CHECK TABLE t1,t2;
|
||||
CHECK TABLE t2;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` is corrupted");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption in an InnoDB type table");
|
||||
call mtr.add_suppression("\\[ERROR\\] mysqld.*: Index for table 't2' is corrupt; try to repair it");
|
||||
|
||||
SET GLOBAL innodb_file_per_table = ON;
|
||||
set global innodb_compression_algorithm = 1;
|
||||
|
@ -69,7 +70,7 @@ EOF
|
|||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
SELECT * FROM t1;
|
||||
--error ER_GET_ERRMSG
|
||||
--error ER_GET_ERRMSG,ER_NOT_KEYFILE
|
||||
SELECT * FROM t2;
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
SELECT * FROM t3;
|
||||
|
|
|
@ -33,7 +33,6 @@ galera_concurrent_ctas : MDEV-18180 Galera test failure on galera.galera_concurr
|
|||
galera_encrypt_tmp_files : Get error failed to enable encryption of temporary files
|
||||
galera_flush : MariaDB does not have global.thread_statistics
|
||||
galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
|
||||
galera_gcs_fc_limit : MDEV-17061 Timeout in wait_condition.inc for PROCESSLIST
|
||||
galera_ist_mariabackup : MDEV-18829 test leaves port open
|
||||
galera_ist_progress: MDEV-15236 fails when trying to read transfer status
|
||||
galera_kill_applier : race condition at the start of the test
|
||||
|
|
|
@ -7,11 +7,13 @@ SET GLOBAL wsrep_slave_threads = 10;
|
|||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
# Wait 10 slave threads to start 1
|
||||
connection node_2;
|
||||
# Generate 12 replication events
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
# Generate 100 replication events
|
||||
connection node_1;
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
13
|
||||
101
|
||||
# Wait 9 slave threads to exit 1
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
# Wait 10 slave threads to start 2
|
||||
|
@ -19,23 +21,21 @@ SET GLOBAL wsrep_slave_threads = 20;
|
|||
# Wait 20 slave threads to start 3
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
connection node_2;
|
||||
# Generate 40 replication events
|
||||
# Generate 100 replication events
|
||||
connection node_1;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
53
|
||||
201
|
||||
# Wait 10 slave threads to exit 3
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
SET GLOBAL wsrep_slave_threads = 0;
|
||||
Warnings:
|
||||
Warning 1292 Truncated incorrect wsrep_slave_threads value: '0'
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
# Wait 10 slave threads to start 3
|
||||
connection node_2;
|
||||
# Generate 12 replication events
|
||||
# Generate 100 replication events
|
||||
connection node_1;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
65
|
||||
301
|
||||
# Wait 10 slave threads to exit 4
|
||||
connection node_1;
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -1,25 +1,28 @@
|
|||
connection node_2;
|
||||
connection node_1;
|
||||
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
CREATE TABLE t1 (f1 INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,B INTEGER) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
connection node_2;
|
||||
SELECT COUNT(*) = 1 FROM t1;
|
||||
COUNT(*) = 1
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1';
|
||||
LOCK TABLE t1 WRITE;
|
||||
connection node_1;
|
||||
INSERT INTO t1 VALUES (2);
|
||||
INSERT INTO t1 VALUES (3);
|
||||
INSERT INTO t1 VALUES (4);
|
||||
INSERT INTO t1 VALUES (5);
|
||||
FLUSH STATUS;
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
INSERT INTO t1 VALUES (3,3);
|
||||
INSERT INTO t1 VALUES (4,4);
|
||||
INSERT INTO t1(B) SELECT B FROM t1;
|
||||
connection node_1a;
|
||||
# In node_1 either insert or commit should be stuck
|
||||
connection node_2;
|
||||
UNLOCK TABLES;
|
||||
connection node_1;
|
||||
INSERT INTO t1 VALUES (6);
|
||||
INSERT INTO t1 VALUES (NULL,6);
|
||||
connection node_2;
|
||||
SELECT COUNT(*) = 6 FROM t1;
|
||||
COUNT(*) = 6
|
||||
1
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
9
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -19,13 +19,14 @@ SET GLOBAL wsrep_slave_threads = 1;
|
|||
--source include/wait_condition.inc
|
||||
|
||||
--connection node_2
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
# Wait until inserts are replicated
|
||||
--let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
|
||||
--source include/wait_condition.inc
|
||||
--echo # Generate 12 replication events
|
||||
--echo # Generate 100 replication events
|
||||
--disable_query_log
|
||||
--disable_result_log
|
||||
--let $count = 12
|
||||
--let $count = 100
|
||||
while ($count)
|
||||
{
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
@ -35,9 +36,7 @@ while ($count)
|
|||
--enable_query_log
|
||||
|
||||
--connection node_1
|
||||
# Wait until inserts are replicated
|
||||
--let $wait_condition = SELECT COUNT(*) = 13 FROM t1;
|
||||
--source include/wait_condition.inc
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
|
@ -61,10 +60,10 @@ SET GLOBAL wsrep_slave_threads = 20;
|
|||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
|
||||
--connection node_2
|
||||
--echo # Generate 40 replication events
|
||||
--echo # Generate 100 replication events
|
||||
--disable_query_log
|
||||
--disable_result_log
|
||||
--let $count = 40
|
||||
--let $count = 100
|
||||
while ($count)
|
||||
{
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
@ -74,11 +73,6 @@ while ($count)
|
|||
--enable_result_log
|
||||
|
||||
--connection node_1
|
||||
|
||||
# Wait until inserts are replicated
|
||||
--let $wait_condition = SELECT COUNT(*) = 53 FROM t1;
|
||||
--source include/wait_condition.inc
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
--echo # Wait 10 slave threads to exit 3
|
||||
|
@ -87,7 +81,7 @@ SELECT COUNT(*) FROM t1;
|
|||
--source include/wait_condition.inc
|
||||
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
SET GLOBAL wsrep_slave_threads = 0;
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
|
||||
--echo # Wait 10 slave threads to start 3
|
||||
--let $wait_timeout=600
|
||||
|
@ -95,10 +89,10 @@ SET GLOBAL wsrep_slave_threads = 0;
|
|||
--source include/wait_condition.inc
|
||||
|
||||
--connection node_2
|
||||
--echo # Generate 12 replication events
|
||||
--echo # Generate 100 replication events
|
||||
--disable_query_log
|
||||
--disable_result_log
|
||||
--let $count = 12
|
||||
--let $count = 100
|
||||
while ($count)
|
||||
{
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
@ -108,10 +102,6 @@ while ($count)
|
|||
--enable_query_log
|
||||
|
||||
--connection node_1
|
||||
# Wait until inserts are replicated
|
||||
--let $wait_condition = SELECT COUNT(*) = 65 FROM t1;
|
||||
--source include/wait_condition.inc
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
--echo # Wait 10 slave threads to exit 4
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
--source include/galera_cluster.inc
|
||||
--source include/have_innodb.inc
|
||||
|
||||
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
CREATE TABLE t1 (f1 INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,B INTEGER) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
|
||||
--connection node_2
|
||||
SELECT COUNT(*) = 1 FROM t1;
|
||||
--sleep 1
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
|
||||
SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1';
|
||||
|
@ -19,24 +19,22 @@ SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1';
|
|||
LOCK TABLE t1 WRITE;
|
||||
|
||||
--connection node_1
|
||||
--sleep 1
|
||||
INSERT INTO t1 VALUES (2);
|
||||
--sleep 2
|
||||
INSERT INTO t1 VALUES (3);
|
||||
--sleep 2
|
||||
INSERT INTO t1 VALUES (4);
|
||||
--sleep 2
|
||||
FLUSH STATUS;
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
INSERT INTO t1 VALUES (3,3);
|
||||
INSERT INTO t1 VALUES (4,4);
|
||||
|
||||
# This query will hang because flow control will kick in
|
||||
--send
|
||||
INSERT INTO t1 VALUES (5);
|
||||
--sleep 2
|
||||
INSERT INTO t1(B) SELECT B FROM t1;
|
||||
|
||||
--let $galera_connection_name = node_1a
|
||||
--let $galera_server_number = 1
|
||||
--source include/galera_connect.inc
|
||||
--connection node_1a
|
||||
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'Commit' AND INFO = 'INSERT INTO t1 VALUES (5)';
|
||||
|
||||
--echo # In node_1 either insert or commit should be stuck
|
||||
--let $wait_condition = SELECT VARIABLE_VALUE > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_flow_control_paused';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--connection node_2
|
||||
|
@ -46,11 +44,11 @@ UNLOCK TABLES;
|
|||
--connection node_1
|
||||
--reap
|
||||
|
||||
INSERT INTO t1 VALUES (6);
|
||||
INSERT INTO t1 VALUES (NULL,6);
|
||||
|
||||
--connection node_2
|
||||
# Replication catches up and continues normally
|
||||
SELECT COUNT(*) = 6 FROM t1;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
--disable_query_log
|
||||
--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_orig';
|
||||
|
|
|
@ -1,28 +1,37 @@
|
|||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
connection node_2;
|
||||
connection node_3;
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S';
|
||||
connection node_2;
|
||||
SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S';
|
||||
connection node_3;
|
||||
Suspending node ...
|
||||
connection node_1;
|
||||
SET SESSION wsrep_sync_wait = 0;
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
|
||||
VARIABLE_VALUE = 2
|
||||
1
|
||||
CREATE TABLE t1 (f1 INTEGER);
|
||||
CREATE TABLE t1 (f1 INTEGER) engine=InnoDB;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
connection node_2;
|
||||
SET SESSION wsrep_sync_wait = 0;
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
|
||||
VARIABLE_VALUE = 2
|
||||
1
|
||||
SET SESSION wsrep_sync_wait = DEFAULT;
|
||||
SELECT COUNT(*) = 1 FROM t1;
|
||||
COUNT(*) = 1
|
||||
SET SESSION wsrep_sync_wait = 15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
DROP TABLE t1;
|
||||
connection node_3;
|
||||
Resuming node ...
|
||||
CALL mtr.add_suppression("WSREP: gcs_caused() returned -1 \\(Operation not permitted\\)");
|
||||
CALL mtr.add_suppression("WSREP: gcs_caused");
|
||||
CALL mtr.add_suppression("WSREP: gcs/src/gcs_core.cpp:core_handle_uuid_msg");
|
||||
SET SESSION wsrep_sync_wait = 15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
connection node_1;
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -7,24 +7,28 @@
|
|||
--source include/galera_cluster.inc
|
||||
--source include/have_innodb.inc
|
||||
|
||||
--let $galera_connection_name = node_3
|
||||
--let $galera_server_number = 3
|
||||
--source include/galera_connect.inc
|
||||
--let $wsrep_cluster_address_node3 = `SELECT @@wsrep_cluster_address`
|
||||
|
||||
# Save original auto_increment_offset values.
|
||||
--let $node_1=node_1
|
||||
--let $node_2=node_2
|
||||
--let $node_3=node_3
|
||||
--source ../galera/include/auto_increment_offset_save.inc
|
||||
|
||||
--connection node_1
|
||||
--let $wsrep_provider_options_node1 = `SELECT @@wsrep_provider_options`
|
||||
SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S';
|
||||
|
||||
--connection node_2
|
||||
--source include/wait_until_connected_again.inc
|
||||
--let $wsrep_provider_options_node2 = `SELECT @@wsrep_provider_options`
|
||||
SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S';
|
||||
|
||||
--let $galera_connection_name = node_3
|
||||
--let $galera_server_number = 3
|
||||
--source include/galera_connect.inc
|
||||
--connection node_3
|
||||
--source include/wait_until_connected_again.inc
|
||||
--let $wsrep_cluster_address_node3 = `SELECT @@wsrep_cluster_address`
|
||||
|
||||
# Suspend node #3
|
||||
|
||||
--connection node_3
|
||||
--source include/galera_suspend.inc
|
||||
--sleep 5
|
||||
|
||||
|
@ -32,27 +36,27 @@ SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_ti
|
|||
|
||||
--connection node_1
|
||||
--source include/wait_until_connected_again.inc
|
||||
SET SESSION wsrep_sync_wait = 0;
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
|
||||
--disable_query_log
|
||||
--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node1';
|
||||
--enable_query_log
|
||||
|
||||
--source include/wait_until_connected_again.inc
|
||||
CREATE TABLE t1 (f1 INTEGER);
|
||||
|
||||
CREATE TABLE t1 (f1 INTEGER) engine=InnoDB;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
||||
--connection node_2
|
||||
SET SESSION wsrep_sync_wait = 0;
|
||||
--source include/wait_until_connected_again.inc
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
|
||||
--disable_query_log
|
||||
--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node2';
|
||||
--enable_query_log
|
||||
|
||||
--source include/wait_until_connected_again.inc
|
||||
SET SESSION wsrep_sync_wait = DEFAULT;
|
||||
SELECT COUNT(*) = 1 FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
SET SESSION wsrep_sync_wait = 15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
# Reconnect node #3 so that MTR's end-of-test checks can run
|
||||
|
||||
|
@ -65,4 +69,13 @@ DROP TABLE t1;
|
|||
--enable_query_log
|
||||
--source include/galera_wait_ready.inc
|
||||
|
||||
CALL mtr.add_suppression("WSREP: gcs_caused() returned -1 \\(Operation not permitted\\)");
|
||||
CALL mtr.add_suppression("WSREP: gcs_caused");
|
||||
CALL mtr.add_suppression("WSREP: gcs/src/gcs_core.cpp:core_handle_uuid_msg");
|
||||
|
||||
SET SESSION wsrep_sync_wait = 15;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
--connection node_1
|
||||
DROP TABLE t1;
|
||||
# Restore original auto_increment_offset values.
|
||||
--source ../galera/include/auto_increment_offset_restore.inc
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb;
|
||||
create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1;
|
||||
create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2;
|
||||
|
|
|
@ -9,14 +9,11 @@ INSERT INTO t2 VALUES(1);
|
|||
SELECT * FROM t1;
|
||||
ERROR 42000: Unknown storage engine 'InnoDB'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
0
|
||||
2
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
SELECT * FROM t2;
|
||||
a
|
||||
1
|
||||
CHECK TABLE t1,t2;
|
||||
CHECK TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
test.t2 check status OK
|
||||
DROP TABLE t1, t2;
|
||||
|
|
|
@ -59,6 +59,8 @@ truncate table t2;
|
|||
ERROR HY000: Table 't2' is read only
|
||||
drop table t2;
|
||||
ERROR HY000: Table 't2' is read only
|
||||
create schema db;
|
||||
drop schema db;
|
||||
show tables;
|
||||
Tables_in_test
|
||||
t1
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
CREATE TABLE t1 (pk INT PRIMARY KEY, c CHAR(255))ENGINE=InnoDB STATS_PERSISTENT=0;
|
||||
SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG = 2;
|
||||
INSERT INTO t1 VALUES(1, 'sql'), (2, 'server'), (3, 'mariadb'),
|
||||
(4, 'mariadb'), (5, 'test1'), (6, 'test2'), (7, 'test3'),
|
||||
(8, 'test4'), (9, 'test5'), (10, 'test6'), (11, 'test7'),
|
||||
(12, 'test8');
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
12
|
||||
UPDATE t1 SET c='best8' WHERE pk=12;
|
||||
# Kill the server
|
||||
# Corrupt the pages
|
||||
SELECT * FROM t1 WHERE PK = 1;
|
||||
ERROR 42000: Unknown storage engine 'InnoDB'
|
||||
SELECT * FROM t1 WHERE PK = 1;
|
||||
pk c
|
||||
1 sql
|
||||
SELECT * FROM t1 WHERE pk = 12;
|
||||
ERROR HY000: Index for table 't1' is corrupt; try to repair it
|
||||
DROP TABLE t1;
|
|
@ -1 +1,2 @@
|
|||
--innodb_doublewrite=0
|
||||
--skip-innodb-doublewrite
|
||||
--skip-innodb-buffer-pool-load-at-startup
|
||||
|
|
|
@ -6,6 +6,8 @@ call mtr.add_suppression("Plugin 'InnoDB' init function returned error");
|
|||
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed");
|
||||
call mtr.add_suppression("InnoDB: Database page corruption on disk or a failed file read of tablespace test/t1 page");
|
||||
call mtr.add_suppression("InnoDB: Failed to read file '.*test.t1\\.ibd' at offset 3: Page read from tablespace is corrupted.");
|
||||
call mtr.add_suppression("InnoDB: Background Page read failed to read or decrypt \\[page id: space=\\d+, page number=3\\]");
|
||||
call mtr.add_suppression("InnoDB: Table `test`.`t1` is corrupted. Please drop the table and recreate.");
|
||||
--enable_query_log
|
||||
|
||||
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
|
||||
|
@ -54,8 +56,10 @@ EOF
|
|||
SELECT * FROM t1;
|
||||
let $restart_parameters=--innodb_force_recovery=1;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
CHECK TABLE t1,t2;
|
||||
CHECK TABLE t2;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
--source include/innodb_page_size.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/not_embedded.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
# Slow shutdown and restart to make sure ibuf merge is finished
|
||||
SET GLOBAL innodb_fast_shutdown = 0;
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
-- source include/have_innodb.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
FLUSH TABLES;
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
--source include/innodb_page_size.inc
|
||||
--source include/have_partition.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
#
|
||||
# MMDEV-8386: MariaDB creates very big tmp file and hangs on xtradb
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
--source include/not_embedded.inc
|
||||
# DBUG_SUICIDE() hangs under valgrind
|
||||
--source include/not_valgrind.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
CREATE TABLE t1(
|
||||
a INT AUTO_INCREMENT PRIMARY KEY,
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- source include/have_innodb.inc
|
||||
-- source include/count_sessions.inc
|
||||
-- source include/have_debug_sync.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
--source include/have_innodb.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
SET @saved_page_cleaners = @@GLOBAL.innodb_page_cleaners;
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/not_embedded.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
# zlib
|
||||
set global innodb_compression_algorithm = 1;
|
||||
|
|
|
@ -91,6 +91,9 @@ truncate table t2;
|
|||
|
||||
--error ER_OPEN_AS_READONLY
|
||||
drop table t2;
|
||||
|
||||
create schema db;
|
||||
drop schema db;
|
||||
show tables;
|
||||
|
||||
--echo # Restart the server with innodb_force_recovery as 6.
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
[strict_crc32]
|
||||
--innodb-checksum-algorithm=strict_crc32
|
||||
|
||||
[strict_full_crc32]
|
||||
--innodb-checksum-algorithm=strict_full_crc32
|
|
@ -0,0 +1,2 @@
|
|||
--skip-innodb-doublewrite
|
||||
--skip-innodb-buffer-pool-load-at-startup
|
|
@ -0,0 +1,59 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
--disable_query_log
|
||||
call mtr.add_suppression("InnoDB: Database page corruption on disk or a failed file read of tablespace test/t1 page ");
|
||||
call mtr.add_suppression("InnoDB: Background Page read failed to read or decrypt \\[page id: space=\\d+, page number=19\\]");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: Failed to read file '.*test.t1\\.ibd' at offset 19: Page read from tablespace is corrupted\\.");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: Plugin initialization aborted at srv0start\\.cc.* with error Data structure corruption");
|
||||
call mtr.add_suppression("\\[ERROR\\] Plugin 'InnoDB' (init function|registration)");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption");
|
||||
call mtr.add_suppression("\\[ERROR\\] mysqld.*: Index for table 't1' is corrupt; try to repair it");
|
||||
--enable_query_log
|
||||
CREATE TABLE t1 (pk INT PRIMARY KEY, c CHAR(255))ENGINE=InnoDB STATS_PERSISTENT=0;
|
||||
|
||||
SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG = 2;
|
||||
|
||||
INSERT INTO t1 VALUES(1, 'sql'), (2, 'server'), (3, 'mariadb'),
|
||||
(4, 'mariadb'), (5, 'test1'), (6, 'test2'), (7, 'test3'),
|
||||
(8, 'test4'), (9, 'test5'), (10, 'test6'), (11, 'test7'),
|
||||
(12, 'test8');
|
||||
|
||||
let $restart_noprint=2;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
|
||||
let MYSQLD_DATADIR=`select @@datadir`;
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
--source ../include/no_checkpoint_start.inc
|
||||
UPDATE t1 SET c='best8' WHERE pk=12;
|
||||
|
||||
--let CLEANUP_IF_CHECKPOINT=DROP TABLE t1;
|
||||
--source ../include/no_checkpoint_end.inc
|
||||
--echo # Corrupt the pages
|
||||
|
||||
perl;
|
||||
my $file = "$ENV{MYSQLD_DATADIR}/test/t1.ibd";
|
||||
open(FILE, "+<$file") || die "Unable to open $file";
|
||||
binmode FILE;
|
||||
seek (FILE, $ENV{INNODB_PAGE_SIZE} * 19 + 38, SEEK_SET) or die "seek";
|
||||
print FILE "junk";
|
||||
close FILE or die "close";
|
||||
EOF
|
||||
|
||||
--source include/start_mysqld.inc
|
||||
--error ER_UNKNOWN_STORAGE_ENGINE
|
||||
SELECT * FROM t1 WHERE PK = 1;
|
||||
|
||||
let $restart_parameters=--innodb-force-recovery=1;
|
||||
# Work around MDEV-19435 to avoid crash in row_purge_reset_trx_id()
|
||||
let $restart_parameters=--innodb-force-recovery=2;
|
||||
--source include/restart_mysqld.inc
|
||||
SELECT * FROM t1 WHERE PK = 1;
|
||||
--error ER_NOT_KEYFILE
|
||||
SELECT * FROM t1 WHERE pk = 12;
|
||||
|
||||
DROP TABLE t1;
|
||||
let $restart_parameters=;
|
||||
--source include/restart_mysqld.inc
|
|
@ -4,6 +4,8 @@
|
|||
--source include/not_embedded.inc
|
||||
# DBUG_EXECUTE_IF is needed
|
||||
--source include/have_debug.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
if (`SELECT @@innodb_log_file_size = 1048576`) {
|
||||
--skip Test requires innodb_log_file_size>1M.
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
--source include/windows.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
# Deadlock in conjunction with the innodb change buffering.
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
call mtr.add_suppression("InnoDB: innodb_open_files=13 is exceeded");
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
-- source include/have_innodb_max_16k.inc
|
||||
# restart does not work with embedded
|
||||
-- source include/not_embedded.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
|
||||
CREATE TABLE t (a INT) ENGINE=INNODB;
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
--source include/have_debug.inc
|
||||
# This test is slow on buildbot.
|
||||
--source include/big_test.inc
|
||||
call mtr.add_suppression("InnoDB: New log files created");
|
||||
|
||||
let $basedir=$MYSQLTEST_VARDIR/tmp/backup;
|
||||
|
|
|
@ -271,6 +271,7 @@ TIME,HOSTNAME,root,localhost,ID,0,CONNECT,mysql,,0
|
|||
TIME,HOSTNAME,root,localhost,ID,0,DISCONNECT,mysql,,0
|
||||
TIME,HOSTNAME,no_such_user,localhost,ID,0,FAILED_CONNECT,,,ID
|
||||
TIME,HOSTNAME,no_such_user,localhost,ID,0,DISCONNECT,,,0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users=\'odin, dva, tri\'',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users=\'odin, root, dva, tri\'',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,CREATE,test,t2,
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create table t2 (id int)',0
|
||||
|
@ -393,6 +394,7 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u3 IDENTIFIED BY ***
|
|||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop user u1, u2, u3',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into t1 values (1), (2)',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_logging= off',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_logging= on',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_events=\'\'',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global serv',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'select (1), (2)',0
|
||||
|
|
|
@ -244,6 +244,7 @@ TIME,HOSTNAME,root,localhost,ID,0,CONNECT,mysql,,0
|
|||
TIME,HOSTNAME,root,localhost,ID,0,DISCONNECT,mysql,,0
|
||||
TIME,HOSTNAME,no_such_user,localhost,ID,0,FAILED_CONNECT,,,ID
|
||||
TIME,HOSTNAME,no_such_user,localhost,ID,0,DISCONNECT,,,0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users=\'odin, dva, tri\'',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users=\'odin, root, dva, tri\'',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,CREATE,test,t2,
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create table t2 (id int)',0
|
||||
|
|
|
@ -7,24 +7,27 @@ SET @@GLOBAL.rpl_semi_sync_master_timeout=100;
|
|||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@GLOBAL.replicate_events_marked_for_skip=FILTER_ON_MASTER;
|
||||
SET @@GLOBAL. rpl_semi_sync_slave_enabled = 1;
|
||||
SET @@GLOBAL.rpl_semi_sync_slave_enabled = 1;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
CREATE TABLE t1 (a INT) ENGINE=innodb;
|
||||
SET @@GLOBAL.debug_dbug= "d,dbug_master_binlog_over_2GB";
|
||||
SET @@GLOBAL.debug_dbug="d,dbug_master_binlog_over_2GB";
|
||||
SET @@SESSION.skip_replication=1;
|
||||
INSERT INTO t1 SET a=1;
|
||||
SET @@SESSION.skip_replication=0;
|
||||
INSERT INTO t1 SET a=0;
|
||||
SET @@GLOBAL.debug_dbug="";
|
||||
FLUSH LOGS;
|
||||
connection slave;
|
||||
connection master;
|
||||
SET @@GLOBAL.debug_dbug="";
|
||||
SET @@GLOBAL. rpl_semi_sync_master_timeout = 10000;
|
||||
SET @@GLOBAL. rpl_semi_sync_master_enabled = 0;
|
||||
SET @@GLOBAL.debug_dbug=@@GLOBAL.debug_dbug;
|
||||
SET @@GLOBAL.rpl_semi_sync_master_timeout = 10000;
|
||||
SET @@GLOBAL.rpl_semi_sync_master_enabled = 0;
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@GLOBAL. rpl_semi_sync_slave_enabled = 0;
|
||||
SET @@GLOBAL.rpl_semi_sync_slave_enabled = 0;
|
||||
SET @@GLOBAL.replicate_events_marked_for_skip = REPLICATE;
|
||||
include/start_slave.inc
|
||||
include/rpl_end.inc
|
||||
|
|
|
@ -15,29 +15,34 @@ source include/master-slave.inc;
|
|||
# Suppress warnings that might be generated during the test
|
||||
call mtr.add_suppression("Timeout waiting for reply of binlog");
|
||||
|
||||
--let $sav_enabled_master=`SELECT @@GLOBAL.rpl_semi_sync_master_enabled `
|
||||
--let $sav_timeout_master=`SELECT @@GLOBAL.rpl_semi_sync_master_timeout `
|
||||
--let $sav_enabled_master=`SELECT @@GLOBAL.rpl_semi_sync_master_enabled`
|
||||
--let $sav_timeout_master=`SELECT @@GLOBAL.rpl_semi_sync_master_timeout`
|
||||
SET @@GLOBAL.rpl_semi_sync_master_enabled = 1;
|
||||
SET @@GLOBAL.rpl_semi_sync_master_timeout=100;
|
||||
|
||||
--connection slave
|
||||
--let $sav_skip_marked_slave=`SELECT @@GLOBAL.replicate_events_marked_for_skip`
|
||||
--let $sav_enabled_slave=`SELECT @@GLOBAL.rpl_semi_sync_slave_enabled`
|
||||
source include/stop_slave.inc;
|
||||
--let $sav_skip_marked_slave=`SELECT @@GLOBAL.replicate_events_marked_for_skip `
|
||||
SET @@GLOBAL.replicate_events_marked_for_skip=FILTER_ON_MASTER;
|
||||
--let $sav_enabled_slave=`SELECT @@GLOBAL.rpl_semi_sync_slave_enabled `
|
||||
SET @@GLOBAL. rpl_semi_sync_slave_enabled = 1;
|
||||
|
||||
SET @@GLOBAL.rpl_semi_sync_slave_enabled = 1;
|
||||
source include/start_slave.inc;
|
||||
|
||||
--connection master
|
||||
CREATE TABLE t1 (a INT) ENGINE=innodb;
|
||||
|
||||
# Make the following events as if they offset over 2GB from the beginning of binlog
|
||||
SET @@GLOBAL.debug_dbug= "d,dbug_master_binlog_over_2GB";
|
||||
--let $sav_debug_dbug=@@GLOBAL.debug_dbug
|
||||
SET @@GLOBAL.debug_dbug="d,dbug_master_binlog_over_2GB";
|
||||
SET @@SESSION.skip_replication=1;
|
||||
INSERT INTO t1 SET a=1;
|
||||
SET @@SESSION.skip_replication=0;
|
||||
INSERT INTO t1 SET a=0;
|
||||
SET @@GLOBAL.debug_dbug="";
|
||||
|
||||
# The current binlog is inconsistent so let's rotate it away
|
||||
# to clean up simulation results.
|
||||
FLUSH LOGS;
|
||||
|
||||
--sync_slave_with_master
|
||||
|
||||
|
@ -45,17 +50,17 @@ INSERT INTO t1 SET a=0;
|
|||
# Clean up
|
||||
#
|
||||
--connection master
|
||||
SET @@GLOBAL.debug_dbug="";
|
||||
--eval SET @@GLOBAL. rpl_semi_sync_master_timeout = $sav_timeout_master
|
||||
--eval SET @@GLOBAL. rpl_semi_sync_master_enabled = $sav_enabled_master
|
||||
--eval SET @@GLOBAL.debug_dbug=$sav_debug_dbug
|
||||
--eval SET @@GLOBAL.rpl_semi_sync_master_timeout = $sav_timeout_master
|
||||
--eval SET @@GLOBAL.rpl_semi_sync_master_enabled = $sav_enabled_master
|
||||
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
|
||||
--sync_slave_with_master
|
||||
source include/stop_slave.inc;
|
||||
--eval SET @@GLOBAL. rpl_semi_sync_slave_enabled = $sav_enabled_slave
|
||||
--eval SET @@GLOBAL.rpl_semi_sync_slave_enabled = $sav_enabled_slave
|
||||
--eval SET @@GLOBAL.replicate_events_marked_for_skip = $sav_skip_marked_slave
|
||||
|
||||
--let $rpl_only_running_threads= 1
|
||||
source include/start_slave.inc;
|
||||
--source include/rpl_end.inc
|
||||
|
|
|
@ -5,7 +5,4 @@
|
|||
SELECT @@wsrep_on;
|
||||
@@wsrep_on
|
||||
0
|
||||
SELECT @@GLOBAL.wsrep_provider;
|
||||
@@GLOBAL.wsrep_provider
|
||||
libgalera_smm.so
|
||||
SET @@GLOBAL.wsrep_cluster_address='gcomm://';
|
||||
|
|
|
@ -8,7 +8,5 @@
|
|||
--echo #
|
||||
|
||||
SELECT @@wsrep_on;
|
||||
--replace_regex /.*libgalera_smm.so/libgalera_smm.so/
|
||||
SELECT @@GLOBAL.wsrep_provider;
|
||||
SET @@GLOBAL.wsrep_cluster_address='gcomm://';
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
|
|||
DBUG_SET("-d,simulate_out_of_memory");
|
||||
DBUG_RETURN((void*) 0); /* purecov: inspected */
|
||||
});
|
||||
length= ALIGN_SIZE(length);
|
||||
length= ALIGN_SIZE(length) + REDZONE_SIZE;
|
||||
if ((*(prev= &mem_root->free)) != NULL)
|
||||
{
|
||||
if ((*prev)->left < length &&
|
||||
|
@ -274,6 +274,7 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
|
|||
mem_root->used= next;
|
||||
mem_root->first_block_usage= 0;
|
||||
}
|
||||
point+= REDZONE_SIZE;
|
||||
TRASH_ALLOC(point, original_length);
|
||||
DBUG_PRINT("exit",("ptr: %p", point));
|
||||
DBUG_RETURN((void*) point);
|
||||
|
|
|
@ -60,6 +60,16 @@ public:
|
|||
}
|
||||
virtual int update(const uchar *src, uint slen, uchar *dst, uint *dlen)
|
||||
{
|
||||
#ifdef HAVE_WOLFSSL
|
||||
// WolfSSL checks parameters and does not like NULL pointers to be passed to function below.
|
||||
if (!src)
|
||||
{
|
||||
static uchar dummy[MY_AES_BLOCK_SIZE];
|
||||
DBUG_ASSERT(!slen);
|
||||
src=dummy;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (EVP_CipherUpdate(ctx, dst, (int*)dlen, src, slen) != 1)
|
||||
return MY_AES_OPENSSL_ERROR;
|
||||
return MY_AES_OK;
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
|
||||
#define PLUGIN_VERSION 0x104
|
||||
#define PLUGIN_STR_VERSION "1.4.5"
|
||||
#define PLUGIN_STR_VERSION "1.4.6"
|
||||
|
||||
#define _my_thread_var loc_thread_var
|
||||
|
||||
|
@ -2024,10 +2024,14 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
|
|||
update_connection_info(cn, event_class, ev, &after_action);
|
||||
|
||||
if (!logging)
|
||||
{
|
||||
if (cn)
|
||||
cn->log_always= 0;
|
||||
goto exit_func;
|
||||
}
|
||||
|
||||
if (event_class == MYSQL_AUDIT_GENERAL_CLASS && FILTER(EVENT_QUERY) &&
|
||||
cn && do_log_user(cn->user))
|
||||
cn && (cn->log_always || do_log_user(cn->user)))
|
||||
{
|
||||
const struct mysql_event_general *event =
|
||||
(const struct mysql_event_general *) ev;
|
||||
|
@ -2040,6 +2044,7 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
|
|||
{
|
||||
log_statement(cn, event, "QUERY");
|
||||
cn->query_length= 0; /* So the log_current_query() won't log this again. */
|
||||
cn->log_always= 0;
|
||||
}
|
||||
}
|
||||
else if (event_class == MYSQL_AUDIT_TABLE_CLASS && FILTER(EVENT_TABLE) && cn)
|
||||
|
@ -2110,8 +2115,6 @@ exit_func:
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (cn)
|
||||
cn->log_always= 0;
|
||||
flogger_mutex_unlock(&lock_operations);
|
||||
}
|
||||
|
||||
|
@ -2556,8 +2559,7 @@ static void log_current_query(MYSQL_THD thd)
|
|||
if (!thd)
|
||||
return;
|
||||
cn= get_loc_info(thd);
|
||||
if (!ci_needs_setup(cn) && cn->query_length &&
|
||||
FILTER(EVENT_QUERY) && do_log_user(cn->user))
|
||||
if (!ci_needs_setup(cn) && cn->query_length)
|
||||
{
|
||||
cn->log_always= 1;
|
||||
log_statement_ex(cn, cn->query_time, thd_get_thread_id(thd),
|
||||
|
@ -2817,6 +2819,7 @@ static void update_logging(MYSQL_THD thd,
|
|||
{
|
||||
CLIENT_ERROR(1, "Logging was disabled.", MYF(ME_WARNING));
|
||||
}
|
||||
mark_always_logged(thd);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -834,8 +834,11 @@ int Log_event_writer::encrypt_and_write(const uchar *pos, size_t len)
|
|||
return 1;
|
||||
|
||||
uint dstlen;
|
||||
if (encryption_ctx_update(ctx, pos, (uint)len, dst, &dstlen))
|
||||
if (len == 0)
|
||||
dstlen= 0;
|
||||
else if (encryption_ctx_update(ctx, pos, (uint)len, dst, &dstlen))
|
||||
goto err;
|
||||
|
||||
if (maybe_write_event_len(dst, dstlen))
|
||||
return 1;
|
||||
pos= dst;
|
||||
|
@ -3196,6 +3199,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
|||
flags2((standalone ? FL_STANDALONE : 0) | (commit_id_arg ? FL_GROUP_COMMIT_ID : 0))
|
||||
{
|
||||
cache_type= Log_event::EVENT_NO_CACHE;
|
||||
bool is_tmp_table= thd_arg->lex->stmt_accessed_temp_table();
|
||||
if (thd_arg->transaction.stmt.trans_did_wait() ||
|
||||
thd_arg->transaction.all.trans_did_wait())
|
||||
flags2|= FL_WAITED;
|
||||
|
@ -3204,7 +3208,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
|||
thd_arg->transaction.all.trans_did_ddl() ||
|
||||
thd_arg->transaction.all.has_created_dropped_temp_table())
|
||||
flags2|= FL_DDL;
|
||||
else if (is_transactional)
|
||||
else if (is_transactional && !is_tmp_table)
|
||||
flags2|= FL_TRANSACTIONAL;
|
||||
if (!(thd_arg->variables.option_bits & OPTION_RPL_SKIP_PARALLEL))
|
||||
flags2|= FL_ALLOW_PARALLEL;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Copyright (c) 2006, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2010, 2017, MariaDB Corporation.
|
||||
Copyright (c) 2010, 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -612,10 +612,10 @@ extern mysql_mutex_t
|
|||
LOCK_item_func_sleep, LOCK_status,
|
||||
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
|
||||
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
|
||||
LOCK_active_mi, LOCK_manager,
|
||||
LOCK_global_system_variables, LOCK_user_conn,
|
||||
LOCK_active_mi, LOCK_manager, LOCK_user_conn,
|
||||
LOCK_prepared_stmt_count, LOCK_error_messages,
|
||||
LOCK_slave_background;
|
||||
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_global_system_variables;
|
||||
extern mysql_rwlock_t LOCK_all_status_vars;
|
||||
extern mysql_mutex_t LOCK_start_thread;
|
||||
#ifdef HAVE_OPENSSL
|
||||
|
|
365
sql/opt_range.cc
365
sql/opt_range.cc
|
@ -328,8 +328,6 @@ public:
|
|||
uint *imerge_cost_buff; /* buffer for index_merge cost estimates */
|
||||
uint imerge_cost_buff_size; /* size of the buffer */
|
||||
|
||||
/* TRUE if last checked tree->key can be used for ROR-scan */
|
||||
bool is_ror_scan;
|
||||
/* Number of ranges in the last checked tree->key */
|
||||
uint n_ranges;
|
||||
uint8 first_null_comp; /* first null component if any, 0 - otherwise */
|
||||
|
@ -351,7 +349,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
|
|||
static ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
SEL_ARG *tree, bool update_tbl_stats,
|
||||
uint *mrr_flags, uint *bufsize,
|
||||
Cost_estimate *cost);
|
||||
Cost_estimate *cost, bool *is_ror_scan);
|
||||
|
||||
QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index,
|
||||
SEL_ARG *key_tree, uint mrr_flags,
|
||||
|
@ -431,16 +429,18 @@ static int and_range_trees(RANGE_OPT_PARAM *param,
|
|||
static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree);
|
||||
|
||||
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar *key);
|
||||
const uchar* key, uint length);
|
||||
static void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
|
||||
uint n_keypart, key_part_map keypart_map);
|
||||
|
||||
static void append_range_all_keyparts(Json_writer_array *range_trace,
|
||||
String *range_string,
|
||||
String *range_so_far, const SEL_ARG *keypart,
|
||||
const KEY_PART_INFO *key_parts);
|
||||
static void trace_ranges(Json_writer_array *range_trace,
|
||||
PARAM *param, uint idx,
|
||||
SEL_ARG *keypart,
|
||||
const KEY_PART_INFO *key_parts);
|
||||
|
||||
static
|
||||
void append_range(String *out, const KEY_PART_INFO *key_parts,
|
||||
const uchar *min_key, const uchar *max_key, const uint flag);
|
||||
void print_range(String *out, const KEY_PART_INFO *key_part,
|
||||
KEY_MULTI_RANGE *range, uint n_key_parts);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -2208,7 +2208,7 @@ public:
|
|||
@param param Parameters for range analysis of this table
|
||||
@param trace_object The optimizer trace object the info is appended to
|
||||
*/
|
||||
virtual void trace_basic_info(const PARAM *param,
|
||||
virtual void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const= 0;
|
||||
|
||||
};
|
||||
|
@ -2251,11 +2251,11 @@ public:
|
|||
}
|
||||
DBUG_RETURN(quick);
|
||||
}
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
void TRP_RANGE::trace_basic_info(const PARAM *param,
|
||||
void TRP_RANGE::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
DBUG_ASSERT(param->using_real_indexes);
|
||||
|
@ -2273,10 +2273,7 @@ void TRP_RANGE::trace_basic_info(const PARAM *param,
|
|||
// TRP_RANGE should not be created if there are no range intervals
|
||||
DBUG_ASSERT(key);
|
||||
|
||||
String range_info;
|
||||
range_info.length(0);
|
||||
range_info.set_charset(system_charset_info);
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info, key, key_part);
|
||||
trace_ranges(&trace_range, param, key_idx, key, key_part);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2296,7 +2293,7 @@ public:
|
|||
struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */
|
||||
bool is_covering; /* TRUE if no row retrieval phase is necessary */
|
||||
double index_scan_costs; /* SUM(cost(index_scan)) */
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
|
@ -2317,11 +2314,11 @@ public:
|
|||
MEM_ROOT *parent_alloc);
|
||||
TABLE_READ_PLAN **first_ror; /* array of ptrs to plans for merged scans */
|
||||
TABLE_READ_PLAN **last_ror; /* end of the above array */
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
void TRP_ROR_UNION::trace_basic_info(const PARAM *param,
|
||||
void TRP_ROR_UNION::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
|
@ -2351,12 +2348,12 @@ public:
|
|||
TRP_RANGE **range_scans_end; /* end of the array */
|
||||
/* keys whose scans are to be filtered by cpk conditions */
|
||||
key_map filtered_scans;
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
|
||||
};
|
||||
|
||||
void TRP_INDEX_INTERSECT::trace_basic_info(const PARAM *param,
|
||||
void TRP_INDEX_INTERSECT::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
|
@ -2385,11 +2382,11 @@ public:
|
|||
MEM_ROOT *parent_alloc);
|
||||
TRP_RANGE **range_scans; /* array of ptrs to plans of merged scans */
|
||||
TRP_RANGE **range_scans_end; /* end of the array */
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
void TRP_INDEX_MERGE::trace_basic_info(const PARAM *param,
|
||||
void TRP_INDEX_MERGE::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
|
@ -2452,12 +2449,12 @@ public:
|
|||
QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows,
|
||||
MEM_ROOT *parent_alloc);
|
||||
void use_index_scan() { is_index_scan= TRUE; }
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
|
||||
void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
|
||||
void TRP_GROUP_MIN_MAX::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
|
@ -2489,10 +2486,8 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
|
|||
// can have group quick without ranges
|
||||
if (index_tree)
|
||||
{
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info, index_tree,
|
||||
key_part);
|
||||
trace_ranges(&trace_range, param, param_idx,
|
||||
index_tree, key_part);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3176,6 +3171,7 @@ double records_in_column_ranges(PARAM *param, uint idx,
|
|||
seq.real_keyno= MAX_KEY;
|
||||
seq.param= param;
|
||||
seq.start= tree;
|
||||
seq.is_ror_scan= FALSE;
|
||||
|
||||
seq_it= seq_if.init((void *) &seq, 0, flags);
|
||||
|
||||
|
@ -3395,7 +3391,6 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||
param.mem_root= &alloc;
|
||||
param.old_root= thd->mem_root;
|
||||
param.table= table;
|
||||
param.is_ror_scan= FALSE;
|
||||
param.remove_false_where_parts= true;
|
||||
|
||||
if (create_key_parts_for_pseudo_indexes(¶m, used_fields))
|
||||
|
@ -6374,7 +6369,7 @@ typedef struct st_ror_scan_info : INDEX_SCAN_INFO
|
|||
{
|
||||
} ROR_SCAN_INFO;
|
||||
|
||||
void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
|
||||
void TRP_ROR_INTERSECT::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
|
@ -6397,20 +6392,9 @@ void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
|
|||
trace_isect_idx.add("rows", (*cur_scan)->records);
|
||||
|
||||
Json_writer_array trace_range(thd, "ranges");
|
||||
for (const SEL_ARG *current= (*cur_scan)->sel_arg->first(); current;
|
||||
current= current->next)
|
||||
{
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
for (const SEL_ARG *part= current; part;
|
||||
part= part->next_key_part ? part->next_key_part : nullptr)
|
||||
{
|
||||
const KEY_PART_INFO *cur_key_part= key_part + part->part;
|
||||
append_range(&range_info, cur_key_part, part->min_value,
|
||||
part->max_value, part->min_flag | part->max_flag);
|
||||
}
|
||||
trace_range.add(range_info.ptr(), range_info.length());
|
||||
}
|
||||
|
||||
trace_ranges(&trace_range, param, (*cur_scan)->idx,
|
||||
(*cur_scan)->sel_arg, key_part);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7363,6 +7347,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
|||
Cost_estimate cost;
|
||||
double found_read_time;
|
||||
uint mrr_flags, buf_size;
|
||||
bool is_ror_scan= FALSE;
|
||||
INDEX_SCAN_INFO *index_scan;
|
||||
uint keynr= param->real_keynr[idx];
|
||||
if (key->type == SEL_ARG::MAYBE_KEY ||
|
||||
|
@ -7377,7 +7362,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
|||
|
||||
found_records= check_quick_select(param, idx, read_index_only, key,
|
||||
update_tbl_stats, &mrr_flags,
|
||||
&buf_size, &cost);
|
||||
&buf_size, &cost, &is_ror_scan);
|
||||
|
||||
if (found_records != HA_POS_ERROR && tree->index_scans &&
|
||||
(index_scan= (INDEX_SCAN_INFO *)alloc_root(param->mem_root,
|
||||
|
@ -7388,9 +7373,6 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
|||
const KEY &cur_key= param->table->key_info[keynr];
|
||||
const KEY_PART_INFO *key_part= cur_key.key_part;
|
||||
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
|
||||
index_scan->idx= idx;
|
||||
index_scan->keynr= keynr;
|
||||
index_scan->key_info= ¶m->table->key_info[keynr];
|
||||
|
@ -7401,17 +7383,16 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
|||
*tree->index_scans_end++= index_scan;
|
||||
|
||||
if (unlikely(thd->trace_started()))
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info, key,
|
||||
key_part);
|
||||
trace_ranges(&trace_range, param, idx, key, key_part);
|
||||
trace_range.end();
|
||||
|
||||
trace_idx.add("rowid_ordered", param->is_ror_scan)
|
||||
trace_idx.add("rowid_ordered", is_ror_scan)
|
||||
.add("using_mrr", !(mrr_flags & HA_MRR_USE_DEFAULT_IMPL))
|
||||
.add("index_only", read_index_only)
|
||||
.add("rows", found_records)
|
||||
.add("cost", cost.total_cost());
|
||||
}
|
||||
if ((found_records != HA_POS_ERROR) && param->is_ror_scan)
|
||||
if ((found_records != HA_POS_ERROR) && is_ror_scan)
|
||||
{
|
||||
tree->n_ror_scans++;
|
||||
tree->ror_scans_map.set_bit(idx);
|
||||
|
@ -11026,7 +11007,8 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
|
|||
static
|
||||
ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
SEL_ARG *tree, bool update_tbl_stats,
|
||||
uint *mrr_flags, uint *bufsize, Cost_estimate *cost)
|
||||
uint *mrr_flags, uint *bufsize, Cost_estimate *cost,
|
||||
bool *is_ror_scan)
|
||||
{
|
||||
SEL_ARG_RANGE_SEQ seq;
|
||||
RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
|
||||
|
@ -11051,9 +11033,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
|||
param->range_count=0;
|
||||
param->max_key_part=0;
|
||||
|
||||
param->is_ror_scan= TRUE;
|
||||
seq.is_ror_scan= TRUE;
|
||||
if (file->index_flags(keynr, 0, TRUE) & HA_KEY_SCAN_NOT_ROR)
|
||||
param->is_ror_scan= FALSE;
|
||||
seq.is_ror_scan= FALSE;
|
||||
|
||||
*mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
|
||||
/*
|
||||
|
@ -11106,12 +11088,12 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
|||
TODO: Don't have this logic here, make table engines return
|
||||
appropriate flags instead.
|
||||
*/
|
||||
param->is_ror_scan= FALSE;
|
||||
seq.is_ror_scan= FALSE;
|
||||
}
|
||||
else if (param->table->s->primary_key == keynr && pk_is_clustered)
|
||||
{
|
||||
/* Clustered PK scan is always a ROR scan (TODO: same as above) */
|
||||
param->is_ror_scan= TRUE;
|
||||
seq.is_ror_scan= TRUE;
|
||||
}
|
||||
else if (param->range_count > 1)
|
||||
{
|
||||
|
@ -11121,8 +11103,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
|||
(1,3)" returns ROR order for all records with x=1, then ROR
|
||||
order for records with x=3
|
||||
*/
|
||||
param->is_ror_scan= FALSE;
|
||||
seq.is_ror_scan= FALSE;
|
||||
}
|
||||
*is_ror_scan= seq.is_ror_scan;
|
||||
|
||||
DBUG_PRINT("exit", ("Records: %lu", (ulong) rows));
|
||||
DBUG_RETURN(rows); //psergey-merge:todo: maintain first_null_comp.
|
||||
|
@ -13547,21 +13530,19 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
|
|||
Cost_estimate dummy_cost;
|
||||
uint mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
|
||||
uint mrr_bufsize=0;
|
||||
bool is_ror_scan= FALSE;
|
||||
cur_quick_prefix_records= check_quick_select(param, cur_param_idx,
|
||||
FALSE /*don't care*/,
|
||||
cur_index_tree, TRUE,
|
||||
&mrr_flags, &mrr_bufsize,
|
||||
&dummy_cost);
|
||||
&dummy_cost, &is_ror_scan);
|
||||
if (unlikely(cur_index_tree && thd->trace_started()))
|
||||
{
|
||||
Json_writer_array trace_range(thd, "ranges");
|
||||
|
||||
const KEY_PART_INFO *key_part= cur_index_info->key_part;
|
||||
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info,
|
||||
cur_index_tree, key_part);
|
||||
trace_ranges(&trace_range, param, cur_param_idx,
|
||||
cur_index_tree, key_part);
|
||||
}
|
||||
}
|
||||
cost_group_min_max(table, cur_index_info, cur_used_key_parts,
|
||||
|
@ -15733,12 +15714,16 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
|
|||
}
|
||||
|
||||
#endif /* !DBUG_OFF */
|
||||
|
||||
static
|
||||
void append_range(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar *min_key, const uchar *max_key, const uint flag)
|
||||
void print_range(String *out, const KEY_PART_INFO *key_part,
|
||||
KEY_MULTI_RANGE *range, uint n_key_parts)
|
||||
{
|
||||
if (out->length() > 0)
|
||||
out->append(STRING_WITH_LEN(" AND "));
|
||||
uint flag= range->range_flag;
|
||||
String key_name;
|
||||
key_name.set_charset(system_charset_info);
|
||||
key_part_map keypart_map= range->start_key.keypart_map |
|
||||
range->end_key.keypart_map;
|
||||
|
||||
if (flag & GEOM_FLAG)
|
||||
{
|
||||
|
@ -15747,22 +15732,24 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
|
|||
range types, so printing "col < some_geom" doesn't make sense.
|
||||
Just print the column name, not operator.
|
||||
*/
|
||||
out->append(key_part->field->field_name);
|
||||
print_keyparts_name(out, key_part, n_key_parts, keypart_map);
|
||||
out->append(STRING_WITH_LEN(" "));
|
||||
print_key_value(out, key_part, min_key);
|
||||
print_key_value(out, key_part, range->start_key.key,
|
||||
range->start_key.length);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(flag & NO_MIN_RANGE))
|
||||
{
|
||||
print_key_value(out, key_part, min_key);
|
||||
print_key_value(out, key_part, range->start_key.key,
|
||||
range->start_key.length);
|
||||
if (flag & NEAR_MIN)
|
||||
out->append(STRING_WITH_LEN(" < "));
|
||||
else
|
||||
out->append(STRING_WITH_LEN(" <= "));
|
||||
}
|
||||
|
||||
out->append(key_part->field->field_name);
|
||||
print_keyparts_name(out, key_part, n_key_parts, keypart_map);
|
||||
|
||||
if (!(flag & NO_MAX_RANGE))
|
||||
{
|
||||
|
@ -15770,7 +15757,8 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
|
|||
out->append(STRING_WITH_LEN(" < "));
|
||||
else
|
||||
out->append(STRING_WITH_LEN(" <= "));
|
||||
print_key_value(out, key_part, max_key);
|
||||
print_key_value(out, key_part, range->end_key.key,
|
||||
range->end_key.length);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15778,60 +15766,43 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
|
|||
|
||||
Add ranges to the trace
|
||||
For ex:
|
||||
query: select * from t1 where a=2 ;
|
||||
and we have an index on a , so we create a range
|
||||
2 <= a <= 2
|
||||
lets say we have an index a_b(a,b)
|
||||
query: select * from t1 where a=2 and b=4 ;
|
||||
so we create a range:
|
||||
(2,4) <= (a,b) <= (2,4)
|
||||
this is added to the trace
|
||||
*/
|
||||
|
||||
static void append_range_all_keyparts(Json_writer_array *range_trace,
|
||||
String *range_string,
|
||||
String *range_so_far, const SEL_ARG *keypart,
|
||||
const KEY_PART_INFO *key_parts)
|
||||
static void trace_ranges(Json_writer_array *range_trace,
|
||||
PARAM *param, uint idx,
|
||||
SEL_ARG *keypart,
|
||||
const KEY_PART_INFO *key_parts)
|
||||
{
|
||||
|
||||
DBUG_ASSERT(keypart);
|
||||
DBUG_ASSERT(keypart && keypart != &null_element);
|
||||
|
||||
// Navigate to first interval in red-black tree
|
||||
SEL_ARG_RANGE_SEQ seq;
|
||||
KEY_MULTI_RANGE range;
|
||||
range_seq_t seq_it;
|
||||
uint flags= 0;
|
||||
RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init,
|
||||
sel_arg_range_seq_next, 0, 0};
|
||||
KEY *keyinfo= param->table->key_info + param->real_keynr[idx];
|
||||
uint n_key_parts= param->table->actual_n_key_parts(keyinfo);
|
||||
seq.keyno= idx;
|
||||
seq.real_keyno= param->real_keynr[idx];
|
||||
seq.param= param;
|
||||
seq.start= keypart;
|
||||
/*
|
||||
is_ror_scan is set to FALSE here, because we are only interested
|
||||
in iterating over all the ranges and printing them.
|
||||
*/
|
||||
seq.is_ror_scan= FALSE;
|
||||
const KEY_PART_INFO *cur_key_part= key_parts + keypart->part;
|
||||
const SEL_ARG *keypart_range= keypart->first();
|
||||
const size_t save_range_so_far_length= range_so_far->length();
|
||||
seq_it= seq_if.init((void *) &seq, 0, flags);
|
||||
|
||||
|
||||
while (keypart_range)
|
||||
while (!seq_if.next(seq_it, &range))
|
||||
{
|
||||
// Append the current range predicate to the range String
|
||||
switch (keypart->type)
|
||||
{
|
||||
case SEL_ARG::Type::KEY_RANGE:
|
||||
append_range(range_so_far, cur_key_part, keypart_range->min_value,
|
||||
keypart_range->max_value,
|
||||
keypart_range->min_flag | keypart_range->max_flag);
|
||||
break;
|
||||
case SEL_ARG::Type::MAYBE_KEY:
|
||||
range_so_far->append("MAYBE_KEY");
|
||||
break;
|
||||
case SEL_ARG::Type::IMPOSSIBLE:
|
||||
range_so_far->append("IMPOSSIBLE");
|
||||
break;
|
||||
default:
|
||||
DBUG_ASSERT(false);
|
||||
break;
|
||||
}
|
||||
|
||||
if (keypart_range->next_key_part &&
|
||||
keypart_range->next_key_part->part ==
|
||||
keypart_range->part + 1 &&
|
||||
keypart_range->is_singlepoint())
|
||||
{
|
||||
append_range_all_keyparts(range_trace, range_string, range_so_far,
|
||||
keypart_range->next_key_part, key_parts);
|
||||
}
|
||||
else
|
||||
range_trace->add(range_so_far->c_ptr_safe(), range_so_far->length());
|
||||
keypart_range= keypart_range->next;
|
||||
range_so_far->length(save_range_so_far_length);
|
||||
StringBuffer<128> range_info(system_charset_info);
|
||||
print_range(&range_info, cur_key_part, &range, n_key_parts);
|
||||
range_trace->add(range_info.c_ptr_safe(), range_info.length());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15841,70 +15812,110 @@ static void append_range_all_keyparts(Json_writer_array *range_trace,
|
|||
@param[out] out String the key is appended to
|
||||
@param[in] key_part Index components description
|
||||
@param[in] key Key tuple
|
||||
@param[in] used_length length of the key tuple
|
||||
*/
|
||||
|
||||
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar *key)
|
||||
const uchar* key, uint used_length)
|
||||
{
|
||||
out->append(STRING_WITH_LEN("("));
|
||||
Field *field= key_part->field;
|
||||
|
||||
if (field->flags & BLOB_FLAG)
|
||||
{
|
||||
// Byte 0 of a nullable key is the null-byte. If set, key is NULL.
|
||||
if (field->real_maybe_null() && *key)
|
||||
out->append(STRING_WITH_LEN("NULL"));
|
||||
else
|
||||
(field->type() == MYSQL_TYPE_GEOMETRY)
|
||||
? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
|
||||
: out->append(STRING_WITH_LEN("unprintable_blob_value"));
|
||||
return;
|
||||
}
|
||||
|
||||
uint store_length= key_part->store_length;
|
||||
|
||||
if (field->real_maybe_null())
|
||||
{
|
||||
/*
|
||||
Byte 0 of key is the null-byte. If set, key is NULL.
|
||||
Otherwise, print the key value starting immediately after the
|
||||
null-byte
|
||||
*/
|
||||
if (*key)
|
||||
{
|
||||
out->append(STRING_WITH_LEN("NULL"));
|
||||
return;
|
||||
}
|
||||
key++; // Skip null byte
|
||||
store_length--;
|
||||
}
|
||||
|
||||
/*
|
||||
Binary data cannot be converted to UTF8 which is what the
|
||||
optimizer trace expects. If the column is binary, the hex
|
||||
representation is printed to the trace instead.
|
||||
*/
|
||||
if (field->flags & BINARY_FLAG)
|
||||
{
|
||||
out->append("0x");
|
||||
for (uint i = 0; i < store_length; i++)
|
||||
{
|
||||
out->append(_dig_vec_lower[*(key + i) >> 4]);
|
||||
out->append(_dig_vec_lower[*(key + i) & 0x0F]);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
StringBuffer<128> tmp(system_charset_info);
|
||||
TABLE *table= field->table;
|
||||
uint store_length;
|
||||
my_bitmap_map *old_sets[2];
|
||||
|
||||
dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
|
||||
const uchar *key_end= key+used_length;
|
||||
|
||||
field->set_key_image(key, key_part->length);
|
||||
if (field->type() == MYSQL_TYPE_BIT)
|
||||
(void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
|
||||
else
|
||||
field->val_str(&tmp); // may change tmp's charset
|
||||
out->append(tmp.ptr(), tmp.length(), tmp.charset());
|
||||
for (; key < key_end; key+=store_length, key_part++)
|
||||
{
|
||||
field= key_part->field;
|
||||
store_length= key_part->store_length;
|
||||
if (field->flags & BLOB_FLAG)
|
||||
{
|
||||
// Byte 0 of a nullable key is the null-byte. If set, key is NULL.
|
||||
if (field->real_maybe_null() && *key)
|
||||
out->append(STRING_WITH_LEN("NULL"));
|
||||
else
|
||||
(field->type() == MYSQL_TYPE_GEOMETRY)
|
||||
? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
|
||||
: out->append(STRING_WITH_LEN("unprintable_blob_value"));
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (field->real_maybe_null())
|
||||
{
|
||||
/*
|
||||
Byte 0 of key is the null-byte. If set, key is NULL.
|
||||
Otherwise, print the key value starting immediately after the
|
||||
null-byte
|
||||
*/
|
||||
if (*key)
|
||||
{
|
||||
out->append(STRING_WITH_LEN("NULL"));
|
||||
goto next;
|
||||
}
|
||||
key++; // Skip null byte
|
||||
store_length--;
|
||||
}
|
||||
|
||||
/*
|
||||
Binary data cannot be converted to UTF8 which is what the
|
||||
optimizer trace expects. If the column is binary, the hex
|
||||
representation is printed to the trace instead.
|
||||
*/
|
||||
if (field->flags & BINARY_FLAG)
|
||||
{
|
||||
out->append("0x");
|
||||
for (uint i = 0; i < store_length; i++)
|
||||
{
|
||||
out->append(_dig_vec_lower[*(key + i) >> 4]);
|
||||
out->append(_dig_vec_lower[*(key + i) & 0x0F]);
|
||||
}
|
||||
goto next;
|
||||
}
|
||||
|
||||
field->set_key_image(key, key_part->length);
|
||||
if (field->type() == MYSQL_TYPE_BIT)
|
||||
(void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
|
||||
else
|
||||
field->val_str(&tmp); // may change tmp's charset
|
||||
out->append(tmp.ptr(), tmp.length(), tmp.charset());
|
||||
|
||||
next:
|
||||
if (key + store_length < key_end)
|
||||
out->append(STRING_WITH_LEN(","));
|
||||
}
|
||||
dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
|
||||
out->append(STRING_WITH_LEN(")"));
|
||||
}
|
||||
|
||||
/**
|
||||
Print key parts involed in a range
|
||||
@param[out] out String the key is appended to
|
||||
@param[in] key_part Index components description
|
||||
@param[in] n_keypart Number of keyparts in index
|
||||
@param[in] keypart_map map for keyparts involved in the range
|
||||
*/
|
||||
|
||||
void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
|
||||
uint n_keypart, key_part_map keypart_map)
|
||||
{
|
||||
uint i;
|
||||
out->append(STRING_WITH_LEN("("));
|
||||
bool first_keypart= TRUE;
|
||||
for (i=0; i < n_keypart; key_part++, i++)
|
||||
{
|
||||
if (keypart_map & (1 << i))
|
||||
{
|
||||
if (first_keypart)
|
||||
first_keypart= FALSE;
|
||||
else
|
||||
out->append(STRING_WITH_LEN(","));
|
||||
out->append(key_part->field->field_name);
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
out->append(STRING_WITH_LEN(")"));
|
||||
}
|
||||
|
|
|
@ -458,7 +458,9 @@ public:
|
|||
SEL_ARG *key_tree= first();
|
||||
uint res= key_tree->store_min(key[key_tree->part].store_length,
|
||||
range_key, *range_key_flag);
|
||||
*range_key_flag|= key_tree->min_flag;
|
||||
// add flags only if a key_part is written to the buffer
|
||||
if (res)
|
||||
*range_key_flag|= key_tree->min_flag;
|
||||
if (key_tree->next_key_part &&
|
||||
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
|
||||
key_tree->part != last_part &&
|
||||
|
@ -480,7 +482,8 @@ public:
|
|||
SEL_ARG *key_tree= last();
|
||||
uint res=key_tree->store_max(key[key_tree->part].store_length,
|
||||
range_key, *range_key_flag);
|
||||
(*range_key_flag)|= key_tree->max_flag;
|
||||
if (res)
|
||||
(*range_key_flag)|= key_tree->max_flag;
|
||||
if (key_tree->next_key_part &&
|
||||
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
|
||||
key_tree->part != last_part &&
|
||||
|
|
|
@ -53,6 +53,11 @@ typedef struct st_sel_arg_range_seq
|
|||
int i; /* Index of last used element in the above array */
|
||||
|
||||
bool at_start; /* TRUE <=> The traversal has just started */
|
||||
/*
|
||||
Iteration functions will set this to FALSE
|
||||
if ranges being traversed do not allow to construct a ROR-scan"
|
||||
*/
|
||||
bool is_ror_scan;
|
||||
} SEL_ARG_RANGE_SEQ;
|
||||
|
||||
|
||||
|
@ -165,7 +170,7 @@ bool sel_arg_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
|
|||
seq->i--;
|
||||
step_down_to(seq, key_tree->next);
|
||||
key_tree= key_tree->next;
|
||||
seq->param->is_ror_scan= FALSE;
|
||||
seq->is_ror_scan= FALSE;
|
||||
goto walk_right_n_up;
|
||||
}
|
||||
|
||||
|
@ -207,7 +212,7 @@ walk_right_n_up:
|
|||
!memcmp(cur[-1].min_key, cur[-1].max_key, len) &&
|
||||
!key_tree->min_flag && !key_tree->max_flag))
|
||||
{
|
||||
seq->param->is_ror_scan= FALSE;
|
||||
seq->is_ror_scan= FALSE;
|
||||
if (!key_tree->min_flag)
|
||||
cur->min_key_parts +=
|
||||
key_tree->next_key_part->store_min_key(seq->param->key[seq->keyno],
|
||||
|
@ -312,7 +317,7 @@ walk_up_n_right:
|
|||
range->range_flag |= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
|
||||
}
|
||||
|
||||
if (seq->param->is_ror_scan)
|
||||
if (seq->is_ror_scan)
|
||||
{
|
||||
/*
|
||||
If we get here, the condition on the key was converted to form
|
||||
|
@ -327,7 +332,7 @@ walk_up_n_right:
|
|||
(range->start_key.length == range->end_key.length) &&
|
||||
!memcmp(range->start_key.key, range->end_key.key, range->start_key.length) &&
|
||||
is_key_scan_ror(seq->param, seq->real_keyno, key_tree->part + 1)))
|
||||
seq->param->is_ror_scan= FALSE;
|
||||
seq->is_ror_scan= FALSE;
|
||||
}
|
||||
}
|
||||
seq->param->range_count++;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright (c) 2010, 2015, MariaDB
|
||||
Copyright (c) 2010, 2019, MariaDB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include "mariadb.h"
|
||||
#include "sql_base.h"
|
||||
#include "sql_const.h"
|
||||
#include "sql_select.h"
|
||||
#include "filesort.h"
|
||||
#include "opt_subselect.h"
|
||||
|
@ -522,7 +523,7 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs,
|
|||
!child_select->is_part_of_union() && // 1
|
||||
parent_unit->first_select()->leaf_tables.elements && // 2
|
||||
child_select->outer_select() &&
|
||||
child_select->outer_select()->leaf_tables.elements && // 2A
|
||||
child_select->outer_select()->table_list.first && // 2A
|
||||
subquery_types_allow_materialization(thd, in_subs) &&
|
||||
(in_subs->is_top_level_item() || //3
|
||||
optimizer_flag(thd,
|
||||
|
@ -1418,8 +1419,8 @@ void get_delayed_table_estimates(TABLE *table,
|
|||
*startup_cost= item->jtbm_read_time;
|
||||
|
||||
/* Calculate cost of scanning the temptable */
|
||||
double data_size= item->jtbm_record_count *
|
||||
hash_sj_engine->tmp_table->s->reclength;
|
||||
double data_size= COST_MULT(item->jtbm_record_count,
|
||||
hash_sj_engine->tmp_table->s->reclength);
|
||||
/* Do like in handler::read_time */
|
||||
*scan_time= data_size/IO_SIZE + 2;
|
||||
}
|
||||
|
@ -2494,7 +2495,8 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
|
|||
int tableno;
|
||||
double rows= 1.0;
|
||||
while ((tableno = tm_it.next_bit()) != Table_map_iterator::BITMAP_END)
|
||||
rows *= join->map2table[tableno]->table->quick_condition_rows;
|
||||
rows= COST_MULT(rows,
|
||||
join->map2table[tableno]->table->quick_condition_rows);
|
||||
sjm->rows= MY_MIN(sjm->rows, rows);
|
||||
}
|
||||
memcpy((uchar*) sjm->positions,
|
||||
|
@ -2607,7 +2609,7 @@ static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements)
|
|||
double
|
||||
get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size)
|
||||
{
|
||||
if (row_count * row_size > thd->variables.max_heap_table_size)
|
||||
if (row_count > thd->variables.max_heap_table_size / (double) row_size)
|
||||
return (double) DISK_TEMPTABLE_LOOKUP_COST;
|
||||
else
|
||||
return (double) HEAP_TEMPTABLE_LOOKUP_COST;
|
||||
|
@ -3014,8 +3016,11 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
|
|||
}
|
||||
|
||||
double mat_read_time= prefix_cost.total_cost();
|
||||
mat_read_time += mat_info->materialization_cost.total_cost() +
|
||||
prefix_rec_count * mat_info->lookup_cost.total_cost();
|
||||
mat_read_time=
|
||||
COST_ADD(mat_read_time,
|
||||
COST_ADD(mat_info->materialization_cost.total_cost(),
|
||||
COST_MULT(prefix_rec_count,
|
||||
mat_info->lookup_cost.total_cost())));
|
||||
|
||||
/*
|
||||
NOTE: When we pick to use SJM[-Scan] we don't memcpy its POSITION
|
||||
|
@ -3055,9 +3060,12 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
|
|||
}
|
||||
|
||||
/* Add materialization cost */
|
||||
prefix_cost += mat_info->materialization_cost.total_cost() +
|
||||
prefix_rec_count * mat_info->scan_cost.total_cost();
|
||||
prefix_rec_count *= mat_info->rows;
|
||||
prefix_cost=
|
||||
COST_ADD(prefix_cost,
|
||||
COST_ADD(mat_info->materialization_cost.total_cost(),
|
||||
COST_MULT(prefix_rec_count,
|
||||
mat_info->scan_cost.total_cost())));
|
||||
prefix_rec_count= COST_MULT(prefix_rec_count, mat_info->rows);
|
||||
|
||||
uint i;
|
||||
table_map rem_tables= remaining_tables;
|
||||
|
@ -3072,8 +3080,8 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
|
|||
{
|
||||
best_access_path(join, join->positions[i].table, rem_tables, i,
|
||||
disable_jbuf, prefix_rec_count, &curpos, &dummy);
|
||||
prefix_rec_count *= curpos.records_read;
|
||||
prefix_cost += curpos.read_time;
|
||||
prefix_rec_count= COST_MULT(prefix_rec_count, curpos.records_read);
|
||||
prefix_cost= COST_ADD(prefix_cost, curpos.read_time);
|
||||
}
|
||||
|
||||
*strategy= SJ_OPT_MATERIALIZE_SCAN;
|
||||
|
@ -3380,16 +3388,18 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
|
|||
for (uint j= first_dupsweedout_table; j <= idx; j++)
|
||||
{
|
||||
POSITION *p= join->positions + j;
|
||||
current_fanout *= p->records_read;
|
||||
dups_cost += p->read_time + current_fanout / TIME_FOR_COMPARE;
|
||||
current_fanout= COST_MULT(current_fanout, p->records_read);
|
||||
dups_cost= COST_ADD(dups_cost,
|
||||
COST_ADD(p->read_time,
|
||||
current_fanout / TIME_FOR_COMPARE));
|
||||
if (p->table->emb_sj_nest)
|
||||
{
|
||||
sj_inner_fanout *= p->records_read;
|
||||
sj_inner_fanout= COST_MULT(sj_inner_fanout, p->records_read);
|
||||
dups_removed_fanout |= p->table->table->map;
|
||||
}
|
||||
else
|
||||
{
|
||||
sj_outer_fanout *= p->records_read;
|
||||
sj_outer_fanout= COST_MULT(sj_outer_fanout, p->records_read);
|
||||
temptable_rec_size += p->table->table->file->ref_length;
|
||||
}
|
||||
}
|
||||
|
@ -3408,12 +3418,13 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
|
|||
sj_outer_fanout,
|
||||
temptable_rec_size);
|
||||
|
||||
double write_cost= join->positions[first_tab].prefix_record_count*
|
||||
sj_outer_fanout * one_write_cost;
|
||||
double full_lookup_cost= join->positions[first_tab].prefix_record_count*
|
||||
sj_outer_fanout* sj_inner_fanout *
|
||||
one_lookup_cost;
|
||||
dups_cost += write_cost + full_lookup_cost;
|
||||
double write_cost= COST_MULT(join->positions[first_tab].prefix_record_count,
|
||||
sj_outer_fanout * one_write_cost);
|
||||
double full_lookup_cost=
|
||||
COST_MULT(join->positions[first_tab].prefix_record_count,
|
||||
COST_MULT(sj_outer_fanout,
|
||||
sj_inner_fanout * one_lookup_cost));
|
||||
dups_cost= COST_ADD(dups_cost, COST_ADD(write_cost, full_lookup_cost));
|
||||
|
||||
*read_time= dups_cost;
|
||||
*record_count= prefix_rec_count * sj_outer_fanout;
|
||||
|
@ -3560,8 +3571,8 @@ static void recalculate_prefix_record_count(JOIN *join, uint start, uint end)
|
|||
if (j == join->const_tables)
|
||||
prefix_count= 1.0;
|
||||
else
|
||||
prefix_count= join->best_positions[j-1].prefix_record_count *
|
||||
join->best_positions[j-1].records_read;
|
||||
prefix_count= COST_MULT(join->best_positions[j-1].prefix_record_count,
|
||||
join->best_positions[j-1].records_read);
|
||||
|
||||
join->best_positions[j].prefix_record_count= prefix_count;
|
||||
}
|
||||
|
@ -6387,14 +6398,16 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
|
|||
The cost of executing the subquery and storing its result in an indexed
|
||||
temporary table.
|
||||
*/
|
||||
double materialization_cost= inner_read_time_1 +
|
||||
write_cost * inner_record_count_1;
|
||||
double materialization_cost= COST_ADD(inner_read_time_1,
|
||||
COST_MULT(write_cost,
|
||||
inner_record_count_1));
|
||||
|
||||
materialize_strategy_cost= materialization_cost +
|
||||
outer_lookup_keys * lookup_cost;
|
||||
materialize_strategy_cost= COST_ADD(materialization_cost,
|
||||
COST_MULT(outer_lookup_keys,
|
||||
lookup_cost));
|
||||
|
||||
/* C.2 Compute the cost of the IN=>EXISTS strategy. */
|
||||
in_exists_strategy_cost= outer_lookup_keys * inner_read_time_2;
|
||||
in_exists_strategy_cost= COST_MULT(outer_lookup_keys, inner_read_time_2);
|
||||
|
||||
/* C.3 Compare the costs and choose the cheaper strategy. */
|
||||
if (materialize_strategy_cost >= in_exists_strategy_cost)
|
||||
|
|
|
@ -194,7 +194,6 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
bzero((char*) info,sizeof(*info));
|
||||
info->thd=thd;
|
||||
info->table=table;
|
||||
info->forms= &info->table; /* Only one table */
|
||||
info->addon_field= addon_field;
|
||||
|
||||
if ((table->s->tmp_table == INTERNAL_TMP_TABLE ||
|
||||
|
@ -583,33 +582,34 @@ static int rr_unpack_from_buffer(READ_RECORD *info)
|
|||
}
|
||||
/* cacheing of records from a database */
|
||||
|
||||
static const uint STRUCT_LENGTH= 3 + MAX_REFLENGTH;
|
||||
|
||||
static int init_rr_cache(THD *thd, READ_RECORD *info)
|
||||
{
|
||||
uint rec_cache_size;
|
||||
uint rec_cache_size, cache_records;
|
||||
DBUG_ENTER("init_rr_cache");
|
||||
|
||||
info->struct_length= 3+MAX_REFLENGTH;
|
||||
info->reclength= ALIGN_SIZE(info->table->s->reclength+1);
|
||||
if (info->reclength < info->struct_length)
|
||||
info->reclength= ALIGN_SIZE(info->struct_length);
|
||||
if (info->reclength < STRUCT_LENGTH)
|
||||
info->reclength= ALIGN_SIZE(STRUCT_LENGTH);
|
||||
|
||||
info->error_offset= info->table->s->reclength;
|
||||
info->cache_records= (thd->variables.read_rnd_buff_size /
|
||||
(info->reclength+info->struct_length));
|
||||
rec_cache_size= info->cache_records*info->reclength;
|
||||
info->rec_cache_size= info->cache_records*info->ref_length;
|
||||
cache_records= thd->variables.read_rnd_buff_size /
|
||||
(info->reclength + STRUCT_LENGTH);
|
||||
rec_cache_size= cache_records * info->reclength;
|
||||
info->rec_cache_size= cache_records * info->ref_length;
|
||||
|
||||
// We have to allocate one more byte to use uint3korr (see comments for it)
|
||||
if (info->cache_records <= 2 ||
|
||||
!(info->cache=(uchar*) my_malloc_lock(rec_cache_size+info->cache_records*
|
||||
info->struct_length+1,
|
||||
MYF(MY_THREAD_SPECIFIC))))
|
||||
if (cache_records <= 2 ||
|
||||
!(info->cache= (uchar*) my_malloc_lock(rec_cache_size + cache_records *
|
||||
STRUCT_LENGTH + 1,
|
||||
MYF(MY_THREAD_SPECIFIC))))
|
||||
DBUG_RETURN(1);
|
||||
#ifdef HAVE_valgrind
|
||||
// Avoid warnings in qsort
|
||||
bzero(info->cache,rec_cache_size+info->cache_records* info->struct_length+1);
|
||||
bzero(info->cache, rec_cache_size + cache_records * STRUCT_LENGTH + 1);
|
||||
#endif
|
||||
DBUG_PRINT("info",("Allocated buffert for %d records",info->cache_records));
|
||||
DBUG_PRINT("info", ("Allocated buffer for %d records", cache_records));
|
||||
info->read_positions=info->cache+rec_cache_size;
|
||||
info->cache_pos=info->cache_end=info->cache;
|
||||
DBUG_RETURN(0);
|
||||
|
@ -664,8 +664,7 @@ static int rr_from_cache(READ_RECORD *info)
|
|||
int3store(ref_position,(long) i);
|
||||
ref_position+=3;
|
||||
}
|
||||
my_qsort(info->read_positions, length, info->struct_length,
|
||||
(qsort_cmp) rr_cmp);
|
||||
my_qsort(info->read_positions, length, STRUCT_LENGTH, (qsort_cmp) rr_cmp);
|
||||
|
||||
position=info->read_positions;
|
||||
for (i=0 ; i < length ; i++)
|
||||
|
|
|
@ -52,15 +52,11 @@ struct READ_RECORD
|
|||
typedef int (*Setup_func)(struct st_join_table*);
|
||||
|
||||
TABLE *table; /* Head-form */
|
||||
//handler *file;
|
||||
TABLE **forms; /* head and ref forms */
|
||||
Unlock_row_func unlock_row;
|
||||
Read_func read_record_func;
|
||||
THD *thd;
|
||||
SQL_SELECT *select;
|
||||
uint cache_records;
|
||||
uint ref_length,struct_length,reclength,rec_cache_size,error_offset;
|
||||
uint index;
|
||||
uint ref_length, reclength, rec_cache_size, error_offset;
|
||||
uchar *ref_pos; /* pointer to form->refpos */
|
||||
uchar *record;
|
||||
uchar *rec_buf; /* to read field values after filesort */
|
||||
|
|
|
@ -247,6 +247,14 @@
|
|||
#define DISK_TEMPTABLE_LOOKUP_COST 1.0
|
||||
#define SORT_INDEX_CMP_COST 0.02
|
||||
|
||||
|
||||
#define COST_MAX (DBL_MAX * (1.0 - DBL_EPSILON))
|
||||
|
||||
#define COST_ADD(c,d) (COST_MAX - (d) > (c) ? (c) + (d) : COST_MAX)
|
||||
|
||||
#define COST_MULT(c,f) (COST_MAX / (f) > (c) ? (c) * (f) : COST_MAX)
|
||||
|
||||
|
||||
#define MY_CHARSET_BIN_MB_MAXLEN 1
|
||||
|
||||
/** Don't pack string keys shorter than this (if PACK_KEYS=1 isn't used). */
|
||||
|
|
|
@ -2142,6 +2142,38 @@ public:
|
|||
DBUG_RETURN((stmt_accessed_table_flag & (1U << accessed_table)) != 0);
|
||||
}
|
||||
|
||||
/**
|
||||
Checks either a trans/non trans temporary table is being accessed while
|
||||
executing a statement.
|
||||
|
||||
@return
|
||||
@retval TRUE if a temporary table is being accessed
|
||||
@retval FALSE otherwise
|
||||
*/
|
||||
inline bool stmt_accessed_temp_table()
|
||||
{
|
||||
DBUG_ENTER("THD::stmt_accessed_temp_table");
|
||||
DBUG_RETURN(stmt_accessed_non_trans_temp_table() ||
|
||||
stmt_accessed_trans_temp_table());
|
||||
}
|
||||
|
||||
/**
|
||||
Checks if a temporary transactional table is being accessed while executing
|
||||
a statement.
|
||||
|
||||
@return
|
||||
@retval TRUE if a temporary transactional table is being accessed
|
||||
@retval FALSE otherwise
|
||||
*/
|
||||
inline bool stmt_accessed_trans_temp_table()
|
||||
{
|
||||
DBUG_ENTER("THD::stmt_accessed_trans_temp_table");
|
||||
|
||||
DBUG_RETURN((stmt_accessed_table_flag &
|
||||
((1U << STMT_READS_TEMP_TRANS_TABLE) |
|
||||
(1U << STMT_WRITES_TEMP_TRANS_TABLE))) != 0);
|
||||
}
|
||||
|
||||
/**
|
||||
Checks if a temporary non-transactional table is about to be accessed
|
||||
while executing a statement.
|
||||
|
|
|
@ -4878,6 +4878,13 @@ end_with_restore_list:
|
|||
*/
|
||||
/* Skip first table, which is the table we are inserting in */
|
||||
TABLE_LIST *second_table= first_table->next_local;
|
||||
/*
|
||||
This is a hack: this leaves select_lex->table_list in an inconsistent
|
||||
state as 'elements' does not contain number of elements in the list.
|
||||
Moreover, if second_table == NULL then 'next' becomes invalid.
|
||||
TODO: fix it by removing the front element (restoring of it should
|
||||
be done properly as well)
|
||||
*/
|
||||
select_lex->table_list.first= second_table;
|
||||
select_lex->context.table_list=
|
||||
select_lex->context.first_name_resolution_table= second_table;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Copyright (c) 2000, 2016 Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2019 MariaDB Corporation
|
||||
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -7451,7 +7451,7 @@ best_access_path(JOIN *join,
|
|||
else
|
||||
tmp= table->file->read_time(key, 1,
|
||||
(ha_rows) MY_MIN(tmp,s->worst_seeks));
|
||||
tmp*= record_count;
|
||||
tmp= COST_MULT(tmp, record_count);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -7632,7 +7632,7 @@ best_access_path(JOIN *join,
|
|||
else
|
||||
tmp= table->file->read_time(key, 1,
|
||||
(ha_rows) MY_MIN(tmp,s->worst_seeks));
|
||||
tmp*= record_count;
|
||||
tmp= COST_MULT(tmp, record_count);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -7642,7 +7642,7 @@ best_access_path(JOIN *join,
|
|||
}
|
||||
}
|
||||
|
||||
tmp += s->startup_cost;
|
||||
tmp= COST_ADD(tmp, s->startup_cost);
|
||||
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
|
||||
} /* not ft_key */
|
||||
|
||||
|
@ -7665,7 +7665,7 @@ best_access_path(JOIN *join,
|
|||
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
|
||||
{
|
||||
trace_access_idx.add("chosen", true);
|
||||
best_time= tmp + records/(double) TIME_FOR_COMPARE;
|
||||
best_time= COST_ADD(tmp, records/(double) TIME_FOR_COMPARE);
|
||||
best= tmp;
|
||||
best_records= records;
|
||||
best_key= start_key;
|
||||
|
@ -7707,14 +7707,18 @@ best_access_path(JOIN *join,
|
|||
use_cond_selectivity);
|
||||
|
||||
tmp= s->quick ? s->quick->read_time : s->scan_time();
|
||||
tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_ADD(tmp, cmp_time);
|
||||
|
||||
/* We read the table as many times as join buffer becomes full. */
|
||||
tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
record_count /
|
||||
(double) thd->variables.join_buff_size));
|
||||
best_time= tmp +
|
||||
(record_count*join_sel) / TIME_FOR_COMPARE * rnd_records;
|
||||
|
||||
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
record_count /
|
||||
(double) thd->variables.join_buff_size));
|
||||
tmp= COST_MULT(tmp, refills);
|
||||
best_time= COST_ADD(tmp,
|
||||
COST_MULT((record_count*join_sel) / TIME_FOR_COMPARE,
|
||||
rnd_records));
|
||||
best= tmp;
|
||||
records= rnd_records;
|
||||
best_key= hj_start_key;
|
||||
|
@ -7746,7 +7750,8 @@ best_access_path(JOIN *join,
|
|||
'range' access using index IDX, and the best way to perform 'ref'
|
||||
access is to use the same index IDX, with the same or more key parts.
|
||||
(note: it is not clear how this rule is/should be extended to
|
||||
index_merge quick selects)
|
||||
index_merge quick selects). Also if we have a hash join we prefer that
|
||||
over a table scan
|
||||
(3) See above note about InnoDB.
|
||||
(4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access
|
||||
path, but there is no quick select)
|
||||
|
@ -7763,6 +7768,7 @@ best_access_path(JOIN *join,
|
|||
*/
|
||||
Json_writer_object trace_access_scan(thd);
|
||||
if ((records >= s->found_records || best > s->read_time) && // (1)
|
||||
!(best_key && best_key->key == MAX_KEY) && // (2)
|
||||
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
|
||||
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
|
||||
!((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
|
||||
|
@ -7795,9 +7801,9 @@ best_access_path(JOIN *join,
|
|||
access (see first else-branch below), but we don't take it into
|
||||
account here for range/index_merge access. Find out why this is so.
|
||||
*/
|
||||
tmp= record_count *
|
||||
(s->quick->read_time +
|
||||
(s->found_records - rnd_records)/(double) TIME_FOR_COMPARE);
|
||||
double cmp_time= (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_MULT(record_count,
|
||||
COST_ADD(s->quick->read_time, cmp_time));
|
||||
|
||||
if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
|
||||
{
|
||||
|
@ -7836,16 +7842,15 @@ best_access_path(JOIN *join,
|
|||
- read the whole table record
|
||||
- skip rows which does not satisfy join condition
|
||||
*/
|
||||
tmp= record_count *
|
||||
(tmp +
|
||||
(s->records - rnd_records)/(double) TIME_FOR_COMPARE);
|
||||
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_MULT(record_count, COST_ADD(tmp,cmp_time));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We read the table as many times as join buffer becomes full. */
|
||||
tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
record_count /
|
||||
(double) thd->variables.join_buff_size));
|
||||
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
(record_count /
|
||||
(double) thd->variables.join_buff_size)));
|
||||
tmp= COST_MULT(tmp, refills);
|
||||
/*
|
||||
We don't make full cartesian product between rows in the scanned
|
||||
table and existing records because we skip all rows from the
|
||||
|
@ -7853,7 +7858,8 @@ best_access_path(JOIN *join,
|
|||
we read the table (see flush_cached_records for details). Here we
|
||||
take into account cost to read and skip these records.
|
||||
*/
|
||||
tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_ADD(tmp, cmp_time);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7869,19 +7875,17 @@ best_access_path(JOIN *join,
|
|||
tmp give us total cost of using TABLE SCAN
|
||||
*/
|
||||
|
||||
double best_filter_cmp_gain= 0;
|
||||
if (best_filter)
|
||||
{
|
||||
best_filter_cmp_gain= best_filter->get_cmp_gain(record_count * records);
|
||||
}
|
||||
const double best_filter_cmp_gain= best_filter
|
||||
? best_filter->get_cmp_gain(record_count * records)
|
||||
: 0;
|
||||
trace_access_scan.add("resulting_rows", rnd_records);
|
||||
trace_access_scan.add("cost", tmp);
|
||||
|
||||
if (best == DBL_MAX ||
|
||||
(tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records <
|
||||
COST_ADD(tmp, record_count/(double) TIME_FOR_COMPARE*rnd_records) <
|
||||
(best_key->is_for_hash_join() ? best_time :
|
||||
best + record_count/(double) TIME_FOR_COMPARE*records -
|
||||
best_filter_cmp_gain)))
|
||||
COST_ADD(best - best_filter_cmp_gain,
|
||||
record_count/(double) TIME_FOR_COMPARE*records)))
|
||||
{
|
||||
/*
|
||||
If the table has a range (s->quick is set) make_join_select()
|
||||
|
@ -8420,16 +8424,13 @@ optimize_straight_join(JOIN *join, table_map join_tables)
|
|||
position, &loose_scan_pos);
|
||||
|
||||
/* compute the cost of the new plan extended with 's' */
|
||||
record_count*= position->records_read;
|
||||
double filter_cmp_gain= 0;
|
||||
if (position->range_rowid_filter_info)
|
||||
{
|
||||
filter_cmp_gain=
|
||||
position->range_rowid_filter_info->get_cmp_gain(record_count);
|
||||
}
|
||||
read_time+= position->read_time +
|
||||
record_count / (double) TIME_FOR_COMPARE -
|
||||
filter_cmp_gain;
|
||||
record_count= COST_MULT(record_count, position->records_read);
|
||||
const double filter_cmp_gain= position->range_rowid_filter_info
|
||||
? position->range_rowid_filter_info->get_cmp_gain(record_count)
|
||||
: 0;
|
||||
read_time+= COST_ADD(read_time - filter_cmp_gain,
|
||||
COST_ADD(position->read_time,
|
||||
record_count / (double) TIME_FOR_COMPARE));
|
||||
advance_sj_state(join, join_tables, idx, &record_count, &read_time,
|
||||
&loose_scan_pos);
|
||||
|
||||
|
@ -8619,9 +8620,10 @@ greedy_search(JOIN *join,
|
|||
swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]);
|
||||
|
||||
/* compute the cost of the new plan extended with 'best_table' */
|
||||
record_count*= join->positions[idx].records_read;
|
||||
read_time+= join->positions[idx].read_time +
|
||||
record_count / (double) TIME_FOR_COMPARE;
|
||||
record_count= COST_MULT(record_count, join->positions[idx].records_read);
|
||||
read_time= COST_ADD(read_time,
|
||||
COST_ADD(join->positions[idx].read_time,
|
||||
record_count / (double) TIME_FOR_COMPARE));
|
||||
|
||||
remaining_tables&= ~(best_table->table->map);
|
||||
--size_remain;
|
||||
|
@ -8728,11 +8730,13 @@ void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
|
|||
}
|
||||
if (tab->records_read && (cur_table_map & filter_map))
|
||||
{
|
||||
record_count *= tab->records_read;
|
||||
read_time += tab->read_time + record_count / (double) TIME_FOR_COMPARE;
|
||||
record_count= COST_MULT(record_count, tab->records_read);
|
||||
read_time= COST_ADD(read_time,
|
||||
COST_ADD(tab->read_time,
|
||||
record_count / (double) TIME_FOR_COMPARE));
|
||||
if (tab->emb_sj_nest)
|
||||
sj_inner_fanout *= tab->records_read;
|
||||
}
|
||||
sj_inner_fanout= COST_MULT(sj_inner_fanout, tab->records_read);
|
||||
}
|
||||
|
||||
if (i == last_sj_table)
|
||||
{
|
||||
|
@ -8770,8 +8774,8 @@ void JOIN::get_prefix_cost_and_fanout(uint n_tables,
|
|||
{
|
||||
if (best_positions[i].records_read)
|
||||
{
|
||||
record_count *= best_positions[i].records_read;
|
||||
read_time += best_positions[i].read_time;
|
||||
record_count= COST_MULT(record_count, best_positions[i].records_read);
|
||||
read_time= COST_ADD(read_time, best_positions[i].read_time);
|
||||
}
|
||||
}
|
||||
*read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
|
||||
|
@ -9351,20 +9355,16 @@ best_extension_by_limited_search(JOIN *join,
|
|||
best_access_path(join, s, remaining_tables, idx, disable_jbuf,
|
||||
record_count, position, &loose_scan_pos);
|
||||
|
||||
/* Compute the cost of extending the plan with 's', avoid overflow */
|
||||
if (position->records_read < DBL_MAX / record_count)
|
||||
current_record_count= record_count * position->records_read;
|
||||
else
|
||||
current_record_count= DBL_MAX;
|
||||
double filter_cmp_gain= 0;
|
||||
if (position->range_rowid_filter_info)
|
||||
{
|
||||
filter_cmp_gain=
|
||||
position->range_rowid_filter_info->get_cmp_gain(current_record_count);
|
||||
}
|
||||
current_read_time=read_time + position->read_time +
|
||||
current_record_count / (double) TIME_FOR_COMPARE -
|
||||
filter_cmp_gain;
|
||||
/* Compute the cost of extending the plan with 's' */
|
||||
current_record_count= COST_MULT(record_count, position->records_read);
|
||||
const double filter_cmp_gain= position->range_rowid_filter_info
|
||||
? position->range_rowid_filter_info->get_cmp_gain(current_record_count)
|
||||
: 0;
|
||||
current_read_time=COST_ADD(read_time,
|
||||
COST_ADD(position->read_time -
|
||||
filter_cmp_gain,
|
||||
current_record_count /
|
||||
(double) TIME_FOR_COMPARE));
|
||||
|
||||
advance_sj_state(join, remaining_tables, idx, ¤t_record_count,
|
||||
¤t_read_time, &loose_scan_pos);
|
||||
|
@ -9449,12 +9449,12 @@ best_extension_by_limited_search(JOIN *join,
|
|||
if (join->sort_by_table &&
|
||||
join->sort_by_table !=
|
||||
join->positions[join->const_tables].table->table)
|
||||
/*
|
||||
We may have to make a temp table, note that this is only a
|
||||
heuristic since we cannot know for sure at this point.
|
||||
/*
|
||||
We may have to make a temp table, note that this is only a
|
||||
heuristic since we cannot know for sure at this point.
|
||||
Hence it may be wrong.
|
||||
*/
|
||||
current_read_time+= current_record_count;
|
||||
current_read_time= COST_ADD(current_read_time, current_record_count);
|
||||
if (current_read_time < join->best_read)
|
||||
{
|
||||
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
|
||||
|
@ -9772,8 +9772,8 @@ prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
|
|||
#max_nested_outer_joins=64-1) will not make it any more precise.
|
||||
*/
|
||||
if (pos->records_read)
|
||||
found*= pos->records_read;
|
||||
}
|
||||
found= COST_MULT(found, pos->records_read);
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
@ -11353,8 +11353,16 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
|
|||
/*
|
||||
We plan to scan all rows.
|
||||
Check again if we should use an index.
|
||||
We could have used an column from a previous table in
|
||||
the index if we are using limit and this is the first table
|
||||
|
||||
There are two cases:
|
||||
1) There could be an index usage the refers to a previous
|
||||
table that we didn't consider before, but could be consider
|
||||
now as a "last resort". For example
|
||||
SELECT * from t1,t2 where t1.a between t2.a and t2.b;
|
||||
2) If the current table is the first non const table
|
||||
and there is a limit it still possibly beneficial
|
||||
to use the index even if the index range is big as
|
||||
we can stop when we've found limit rows.
|
||||
|
||||
(1) - Don't switch the used index if we are using semi-join
|
||||
LooseScan on this table. Using different index will not
|
||||
|
@ -16049,8 +16057,20 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
|
|||
table->table->maybe_null= FALSE;
|
||||
table->outer_join= 0;
|
||||
if (!(straight_join || table->straight))
|
||||
table->dep_tables= table->embedding && !table->embedding->sj_subq_pred ?
|
||||
table->embedding->dep_tables : 0;
|
||||
{
|
||||
table->dep_tables= 0;
|
||||
TABLE_LIST *embedding= table->embedding;
|
||||
while (embedding)
|
||||
{
|
||||
if (embedding->nested_join->join_list.head()->outer_join)
|
||||
{
|
||||
if (!embedding->sj_subq_pred)
|
||||
table->dep_tables= embedding->dep_tables;
|
||||
break;
|
||||
}
|
||||
embedding= embedding->embedding;
|
||||
}
|
||||
}
|
||||
if (table->on_expr)
|
||||
{
|
||||
/* Add ON expression to the WHERE or upper-level ON condition. */
|
||||
|
@ -16575,11 +16595,12 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
|
|||
pos= loose_scan_pos;
|
||||
|
||||
reopt_remaining_tables &= ~rs->table->map;
|
||||
rec_count *= pos.records_read;
|
||||
cost += pos.read_time;
|
||||
rec_count= COST_MULT(rec_count, pos.records_read);
|
||||
cost= COST_ADD(cost, pos.read_time);
|
||||
|
||||
|
||||
if (!rs->emb_sj_nest)
|
||||
*outer_rec_count *= pos.records_read;
|
||||
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
|
||||
}
|
||||
join->cur_sj_inner_tables= save_cur_sj_inner_tables;
|
||||
|
||||
|
@ -21163,7 +21184,6 @@ join_read_first(JOIN_TAB *tab)
|
|||
tab->table->status=0;
|
||||
tab->read_record.read_record_func= join_read_next;
|
||||
tab->read_record.table=table;
|
||||
tab->read_record.index=tab->index;
|
||||
tab->read_record.record=table->record[0];
|
||||
if (!table->file->inited)
|
||||
error= table->file->ha_index_init(tab->index, tab->sorted);
|
||||
|
@ -21204,7 +21224,6 @@ join_read_last(JOIN_TAB *tab)
|
|||
tab->table->status=0;
|
||||
tab->read_record.read_record_func= join_read_prev;
|
||||
tab->read_record.table=table;
|
||||
tab->read_record.index=tab->index;
|
||||
tab->read_record.record=table->record[0];
|
||||
if (!table->file->inited)
|
||||
error= table->file->ha_index_init(tab->index, 1);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#define SQL_WINDOW_INCLUDED
|
||||
|
||||
#include "filesort.h"
|
||||
#include "records.h"
|
||||
|
||||
class Item_window_func;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright (c) 2016 MariaDB Corporation
|
||||
Copyright (c) 2016, 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -377,6 +377,19 @@ bool THD::open_temporary_table(TABLE_LIST *tl)
|
|||
if (!table && (share= find_tmp_table_share(tl)))
|
||||
{
|
||||
table= open_temporary_table(share, tl->get_table_name());
|
||||
/*
|
||||
Temporary tables are not safe for parallel replication. They were
|
||||
designed to be visible to one thread only, so have no table locking.
|
||||
Thus there is no protection against two conflicting transactions
|
||||
committing in parallel and things like that.
|
||||
|
||||
So for now, anything that uses temporary tables will be serialised
|
||||
with anything before it, when using parallel replication.
|
||||
*/
|
||||
if (table && rgi_slave &&
|
||||
rgi_slave->is_parallel_exec &&
|
||||
wait_for_prior_commit())
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
if (!table)
|
||||
|
|
|
@ -4411,6 +4411,11 @@ loop:
|
|||
return (NULL);
|
||||
}
|
||||
|
||||
if (local_err == DB_PAGE_CORRUPTED
|
||||
&& srv_force_recovery) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Try to set table as corrupted instead of
|
||||
asserting. */
|
||||
if (page_id.space() == TRX_SYS_SPACE) {
|
||||
|
@ -5774,10 +5779,27 @@ buf_page_monitor(
|
|||
}
|
||||
|
||||
/** Mark a table corrupted.
|
||||
Also remove the bpage from LRU list.
|
||||
@param[in] bpage Corrupted page. */
|
||||
static void buf_mark_space_corrupt(buf_page_t* bpage, const fil_space_t* space)
|
||||
{
|
||||
/* If block is not encrypted find the table with specified
|
||||
space id, and mark it corrupted. Encrypted tables
|
||||
are marked unusable later e.g. in ::open(). */
|
||||
if (!bpage->encrypted) {
|
||||
dict_set_corrupted_by_space(space);
|
||||
} else {
|
||||
dict_set_encrypted_by_space(space);
|
||||
}
|
||||
}
|
||||
|
||||
/** Mark a table corrupted.
|
||||
@param[in] bpage Corrupted page
|
||||
@param[in] space Corrupted page belongs to tablespace
|
||||
Also remove the bpage from LRU list. */
|
||||
static
|
||||
void
|
||||
buf_mark_space_corrupt(buf_page_t* bpage, const fil_space_t* space)
|
||||
buf_corrupt_page_release(buf_page_t* bpage, const fil_space_t* space)
|
||||
{
|
||||
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
|
||||
const ibool uncompressed = (buf_page_get_state(bpage)
|
||||
|
@ -5801,13 +5823,8 @@ buf_mark_space_corrupt(buf_page_t* bpage, const fil_space_t* space)
|
|||
|
||||
mutex_exit(buf_page_get_mutex(bpage));
|
||||
|
||||
/* If block is not encrypted find the table with specified
|
||||
space id, and mark it corrupted. Encrypted tables
|
||||
are marked unusable later e.g. in ::open(). */
|
||||
if (!bpage->encrypted) {
|
||||
dict_set_corrupted_by_space(space);
|
||||
} else {
|
||||
dict_set_encrypted_by_space(space);
|
||||
if (!srv_force_recovery) {
|
||||
buf_mark_space_corrupt(bpage, space);
|
||||
}
|
||||
|
||||
/* After this point bpage can't be referenced. */
|
||||
|
@ -5869,7 +5886,7 @@ static dberr_t buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space)
|
|||
not decrypted and it could be either encrypted and corrupted
|
||||
or corrupted or good page. If we decrypted, there page could
|
||||
still be corrupted if used key does not match. */
|
||||
const bool still_encrypted = key_version
|
||||
const bool still_encrypted = (!space->full_crc32() && key_version)
|
||||
&& space->crypt_data
|
||||
&& space->crypt_data->type != CRYPT_SCHEME_UNENCRYPTED
|
||||
&& !bpage->encrypted
|
||||
|
@ -6039,7 +6056,7 @@ database_corrupted:
|
|||
"buf_page_import_corrupt_failure",
|
||||
if (!is_predefined_tablespace(
|
||||
bpage->id.space())) {
|
||||
buf_mark_space_corrupt(bpage, space);
|
||||
buf_corrupt_page_release(bpage, space);
|
||||
ib::info() << "Simulated IMPORT "
|
||||
"corruption";
|
||||
space->release_for_io();
|
||||
|
@ -6073,7 +6090,7 @@ database_corrupted:
|
|||
<< FORCE_RECOVERY_MSG;
|
||||
}
|
||||
|
||||
if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) {
|
||||
if (!srv_force_recovery) {
|
||||
|
||||
/* If page space id is larger than TRX_SYS_SPACE
|
||||
(0), we will attempt to mark the corresponding
|
||||
|
@ -6083,7 +6100,7 @@ database_corrupted:
|
|||
" a corrupt database page.";
|
||||
}
|
||||
|
||||
buf_mark_space_corrupt(bpage, space);
|
||||
buf_corrupt_page_release(bpage, space);
|
||||
space->release_for_io();
|
||||
return(err);
|
||||
}
|
||||
|
@ -6092,6 +6109,18 @@ database_corrupted:
|
|||
DBUG_EXECUTE_IF("buf_page_import_corrupt_failure",
|
||||
page_not_corrupt: bpage = bpage; );
|
||||
|
||||
if (err == DB_PAGE_CORRUPTED
|
||||
|| err == DB_DECRYPTION_FAILED) {
|
||||
buf_corrupt_page_release(bpage, space);
|
||||
|
||||
if (recv_recovery_is_on()) {
|
||||
recv_recover_corrupt_page(bpage);
|
||||
}
|
||||
|
||||
space->release_for_io();
|
||||
return err;
|
||||
}
|
||||
|
||||
if (recv_recovery_is_on()) {
|
||||
recv_recover_page(bpage);
|
||||
}
|
||||
|
|
|
@ -2176,8 +2176,8 @@ buf_LRU_old_ratio_update_instance(
|
|||
buf_pool_t* buf_pool,/*!< in: buffer pool instance */
|
||||
uint old_pct,/*!< in: Reserve this percentage of
|
||||
the buffer pool for "old" blocks. */
|
||||
ibool adjust) /*!< in: TRUE=adjust the LRU list;
|
||||
FALSE=just assign buf_pool->LRU_old_ratio
|
||||
bool adjust) /*!< in: true=adjust the LRU list;
|
||||
false=just assign buf_pool->LRU_old_ratio
|
||||
during the initialization of InnoDB */
|
||||
{
|
||||
uint ratio;
|
||||
|
@ -2219,8 +2219,8 @@ buf_LRU_old_ratio_update(
|
|||
/*=====================*/
|
||||
uint old_pct,/*!< in: Reserve this percentage of
|
||||
the buffer pool for "old" blocks. */
|
||||
ibool adjust) /*!< in: TRUE=adjust the LRU list;
|
||||
FALSE=just assign buf_pool->LRU_old_ratio
|
||||
bool adjust) /*!< in: true=adjust the LRU list;
|
||||
false=just assign buf_pool->LRU_old_ratio
|
||||
during the initialization of InnoDB */
|
||||
{
|
||||
uint new_ratio = 0;
|
||||
|
|
|
@ -1215,7 +1215,8 @@ dict_create_table_step(
|
|||
ut_ad(node->col_no == v_col->v_pos);
|
||||
dict_build_v_col_def_step(node);
|
||||
|
||||
if (node->base_col_no < v_col->num_base - 1) {
|
||||
if (node->base_col_no
|
||||
< unsigned{v_col->num_base} - 1) {
|
||||
/* move on to next base column */
|
||||
node->base_col_no++;
|
||||
} else {
|
||||
|
|
|
@ -2147,44 +2147,6 @@ add_field_size:
|
|||
return(FALSE);
|
||||
}
|
||||
|
||||
/** Clears the virtual column's index list before index is
|
||||
being freed.
|
||||
@param[in] index Index being freed */
|
||||
void dict_index_remove_from_v_col_list(dict_index_t* index)
|
||||
{
|
||||
/* Index is not completely formed */
|
||||
if (!index->cached) {
|
||||
return;
|
||||
}
|
||||
if (dict_index_has_virtual(index)) {
|
||||
const dict_col_t* col;
|
||||
const dict_v_col_t* vcol;
|
||||
|
||||
for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
|
||||
col = dict_index_get_nth_col(index, i);
|
||||
if (col->is_virtual()) {
|
||||
vcol = reinterpret_cast<const dict_v_col_t*>(
|
||||
col);
|
||||
/* This could be NULL, when we do add
|
||||
virtual column, add index together. We do not
|
||||
need to track this virtual column's index */
|
||||
if (vcol->v_indexes == NULL) {
|
||||
continue;
|
||||
}
|
||||
dict_v_idx_list::iterator it;
|
||||
for (it = vcol->v_indexes->begin();
|
||||
it != vcol->v_indexes->end(); ++it) {
|
||||
dict_v_idx_t v_index = *it;
|
||||
if (v_index.index == index) {
|
||||
vcol->v_indexes->erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds an index to the dictionary cache, with possible indexing newly
|
||||
added column.
|
||||
@param[in] index index; NOTE! The index memory
|
||||
|
@ -2534,18 +2496,9 @@ dict_index_add_col(
|
|||
|
||||
if (col->is_virtual()) {
|
||||
dict_v_col_t* v_col = reinterpret_cast<dict_v_col_t*>(col);
|
||||
|
||||
/* When v_col->v_indexes==NULL,
|
||||
ha_innobase::commit_inplace_alter_table(commit=true)
|
||||
will evict and reload the table definition, and
|
||||
v_col->v_indexes will not be NULL for the new table. */
|
||||
if (v_col->v_indexes != NULL) {
|
||||
/* Register the index with the virtual column index
|
||||
list */
|
||||
v_col->v_indexes->push_back(
|
||||
dict_v_idx_t(index, index->n_def));
|
||||
}
|
||||
|
||||
/* Register the index with the virtual column index list */
|
||||
v_col->n_v_indexes++;
|
||||
v_col->v_indexes.push_front(dict_v_idx_t(index, index->n_def));
|
||||
col_name = dict_table_get_v_col_name_mysql(
|
||||
table, dict_col_get_no(col));
|
||||
} else {
|
||||
|
|
|
@ -1940,7 +1940,7 @@ dict_load_virtual_one_col(
|
|||
btr_pcur_open_on_user_rec(sys_virtual_index, tuple, PAGE_CUR_GE,
|
||||
BTR_SEARCH_LEAF, &pcur, &mtr);
|
||||
|
||||
for (i = 0; i < v_col->num_base + skipped; i++) {
|
||||
for (i = 0; i < unsigned{v_col->num_base} + skipped; i++) {
|
||||
const char* err_msg;
|
||||
ulint pos;
|
||||
|
||||
|
|
|
@ -167,6 +167,9 @@ dict_mem_table_create(
|
|||
mem_heap_alloc(heap, table->n_cols * sizeof(dict_col_t)));
|
||||
table->v_cols = static_cast<dict_v_col_t*>(
|
||||
mem_heap_alloc(heap, n_v_cols * sizeof(*table->v_cols)));
|
||||
for (ulint i = n_v_cols; i--; ) {
|
||||
new (&table->v_cols[i]) dict_v_col_t();
|
||||
}
|
||||
|
||||
/* true means that the stats latch will be enabled -
|
||||
dict_table_stats_lock() will not be noop. */
|
||||
|
@ -227,15 +230,10 @@ dict_mem_table_free(
|
|||
/* Clean up virtual index info structures that are registered
|
||||
with virtual columns */
|
||||
for (ulint i = 0; i < table->n_v_def; i++) {
|
||||
dict_v_col_t* vcol
|
||||
= dict_table_get_nth_v_col(table, i);
|
||||
|
||||
UT_DELETE(vcol->v_indexes);
|
||||
dict_table_get_nth_v_col(table, i)->~dict_v_col_t();
|
||||
}
|
||||
|
||||
if (table->s_cols != NULL) {
|
||||
UT_DELETE(table->s_cols);
|
||||
}
|
||||
UT_DELETE(table->s_cols);
|
||||
|
||||
mem_heap_free(table->heap);
|
||||
}
|
||||
|
@ -414,7 +412,8 @@ dict_mem_table_add_v_col(
|
|||
v_col->num_base = num_base;
|
||||
|
||||
/* Initialize the index list for virtual columns */
|
||||
v_col->v_indexes = UT_NEW_NOKEY(dict_v_idx_list());
|
||||
ut_ad(v_col->v_indexes.empty());
|
||||
v_col->n_v_indexes = 0;
|
||||
|
||||
return(v_col);
|
||||
}
|
||||
|
@ -448,7 +447,7 @@ dict_mem_table_add_s_col(
|
|||
}
|
||||
|
||||
s_col.num_base = num_base;
|
||||
table->s_cols->push_back(s_col);
|
||||
table->s_cols->push_front(s_col);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
|
@ -749,13 +748,11 @@ dict_mem_index_create(
|
|||
|
||||
if (type & DICT_SPATIAL) {
|
||||
mutex_create(LATCH_ID_RTR_SSN_MUTEX, &index->rtr_ssn.mutex);
|
||||
index->rtr_track = static_cast<rtr_info_track_t*>(
|
||||
mem_heap_alloc(
|
||||
heap,
|
||||
sizeof(*index->rtr_track)));
|
||||
index->rtr_track = new
|
||||
(mem_heap_alloc(heap, sizeof *index->rtr_track))
|
||||
rtr_info_track_t();
|
||||
mutex_create(LATCH_ID_RTR_ACTIVE_MUTEX,
|
||||
&index->rtr_track->rtr_active_mutex);
|
||||
index->rtr_track->rtr_active = UT_NEW_NOKEY(rtr_info_active());
|
||||
}
|
||||
|
||||
return(index);
|
||||
|
@ -863,11 +860,7 @@ dict_mem_fill_vcol_has_index(
|
|||
continue;
|
||||
}
|
||||
|
||||
dict_v_idx_list::iterator it;
|
||||
for (it = v_col->v_indexes->begin();
|
||||
it != v_col->v_indexes->end(); ++it) {
|
||||
dict_v_idx_t v_idx = *it;
|
||||
|
||||
for (const auto& v_idx : v_col->v_indexes) {
|
||||
if (v_idx.index != index) {
|
||||
continue;
|
||||
}
|
||||
|
@ -940,7 +933,7 @@ dict_mem_fill_vcol_set_for_base_col(
|
|||
continue;
|
||||
}
|
||||
|
||||
for (ulint j = 0; j < v_col->num_base; j++) {
|
||||
for (ulint j = 0; j < unsigned{v_col->num_base}; j++) {
|
||||
if (strcmp(col_name, dict_table_get_col_name(
|
||||
table,
|
||||
v_col->base_col[j]->ind)) == 0) {
|
||||
|
@ -1064,22 +1057,15 @@ dict_mem_index_free(
|
|||
dict_index_zip_pad_mutex_destroy(index);
|
||||
|
||||
if (dict_index_is_spatial(index)) {
|
||||
rtr_info_active::iterator it;
|
||||
rtr_info_t* rtr_info;
|
||||
|
||||
for (it = index->rtr_track->rtr_active->begin();
|
||||
it != index->rtr_track->rtr_active->end(); ++it) {
|
||||
rtr_info = *it;
|
||||
|
||||
for (auto& rtr_info : index->rtr_track->rtr_active) {
|
||||
rtr_info->index = NULL;
|
||||
}
|
||||
|
||||
mutex_destroy(&index->rtr_ssn.mutex);
|
||||
mutex_destroy(&index->rtr_track->rtr_active_mutex);
|
||||
UT_DELETE(index->rtr_track->rtr_active);
|
||||
index->rtr_track->~rtr_info_track_t();
|
||||
}
|
||||
|
||||
dict_index_remove_from_v_col_list(index);
|
||||
index->detach_columns();
|
||||
mem_heap_free(index->heap);
|
||||
}
|
||||
|
||||
|
|
|
@ -1162,14 +1162,10 @@ fil_crypt_read_crypt_data(fil_space_t* space)
|
|||
mtr.commit();
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Start encrypting a space
|
||||
/** Start encrypting a space
|
||||
@param[in,out] space Tablespace
|
||||
@return true if a recheck is needed */
|
||||
static
|
||||
bool
|
||||
fil_crypt_start_encrypting_space(
|
||||
fil_space_t* space)
|
||||
@return true if a recheck of tablespace is needed by encryption thread. */
|
||||
static bool fil_crypt_start_encrypting_space(fil_space_t* space)
|
||||
{
|
||||
bool recheck = false;
|
||||
|
||||
|
@ -1627,14 +1623,12 @@ fil_crypt_return_iops(
|
|||
fil_crypt_update_total_stat(state);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Search for a space needing rotation
|
||||
@param[in,out] key_state Key state
|
||||
@param[in,out] state Rotation state
|
||||
@param[in,out] recheck recheck ? */
|
||||
static
|
||||
bool
|
||||
fil_crypt_find_space_to_rotate(
|
||||
/** Search for a space needing rotation
|
||||
@param[in,out] key_state Key state
|
||||
@param[in,out] state Rotation state
|
||||
@param[in,out] recheck recheck of the tablespace is needed or
|
||||
still encryption thread does write page 0 */
|
||||
static bool fil_crypt_find_space_to_rotate(
|
||||
key_state_t* key_state,
|
||||
rotate_thread_t* state,
|
||||
bool* recheck)
|
||||
|
@ -1664,11 +1658,10 @@ fil_crypt_find_space_to_rotate(
|
|||
/* If key rotation is enabled (default) we iterate all tablespaces.
|
||||
If key rotation is not enabled we iterate only the tablespaces
|
||||
added to keyrotation list. */
|
||||
if (srv_fil_crypt_rotate_key_age) {
|
||||
state->space = fil_space_next(state->space);
|
||||
} else {
|
||||
state->space = fil_space_keyrotate_next(state->space);
|
||||
}
|
||||
state->space = srv_fil_crypt_rotate_key_age
|
||||
? fil_space_next(state->space)
|
||||
: fil_system.keyrotate_next(state->space, *recheck,
|
||||
key_state->key_version);
|
||||
|
||||
while (!state->should_shutdown() && state->space) {
|
||||
/* If there is no crypt data and we have not yet read
|
||||
|
@ -1686,11 +1679,10 @@ fil_crypt_find_space_to_rotate(
|
|||
return true;
|
||||
}
|
||||
|
||||
if (srv_fil_crypt_rotate_key_age) {
|
||||
state->space = fil_space_next(state->space);
|
||||
} else {
|
||||
state->space = fil_space_keyrotate_next(state->space);
|
||||
}
|
||||
state->space = srv_fil_crypt_rotate_key_age
|
||||
? fil_space_next(state->space)
|
||||
: fil_system.keyrotate_next(state->space, *recheck,
|
||||
key_state->key_version);
|
||||
}
|
||||
|
||||
/* if we didn't find any space return iops */
|
||||
|
@ -2315,13 +2307,8 @@ A thread which monitors global key state and rotates tablespaces accordingly
|
|||
@return a dummy parameter */
|
||||
extern "C" UNIV_INTERN
|
||||
os_thread_ret_t
|
||||
DECLARE_THREAD(fil_crypt_thread)(
|
||||
/*=============================*/
|
||||
void* arg __attribute__((unused))) /*!< in: a dummy parameter required
|
||||
* by os_thread_create */
|
||||
DECLARE_THREAD(fil_crypt_thread)(void*)
|
||||
{
|
||||
UT_NOT_USED(arg);
|
||||
|
||||
mutex_enter(&fil_crypt_threads_mutex);
|
||||
uint thread_no = srv_n_fil_crypt_threads_started;
|
||||
srv_n_fil_crypt_threads_started++;
|
||||
|
|
|
@ -5029,25 +5029,29 @@ fil_space_t::acquire() and fil_space_t::release() are invoked here which
|
|||
blocks a concurrent operation from dropping the tablespace.
|
||||
@param[in] prev_space Pointer to the previous fil_space_t.
|
||||
If NULL, use the first fil_space_t on fil_system.space_list.
|
||||
@param[in] recheck recheck of the tablespace is needed or
|
||||
still encryption thread does write page0 for it
|
||||
@param[in] key_version key version of the key state thread
|
||||
@return pointer to the next fil_space_t.
|
||||
@retval NULL if this was the last*/
|
||||
@retval NULL if this was the last */
|
||||
fil_space_t*
|
||||
fil_space_keyrotate_next(
|
||||
fil_space_t* prev_space)
|
||||
fil_system_t::keyrotate_next(
|
||||
fil_space_t* prev_space,
|
||||
bool recheck,
|
||||
uint key_version)
|
||||
{
|
||||
fil_space_t* space = prev_space;
|
||||
fil_space_t* old = NULL;
|
||||
|
||||
mutex_enter(&fil_system.mutex);
|
||||
|
||||
if (UT_LIST_GET_LEN(fil_system.rotation_list) == 0) {
|
||||
if (space) {
|
||||
space->release();
|
||||
fil_space_remove_from_keyrotation(space);
|
||||
}
|
||||
mutex_exit(&fil_system.mutex);
|
||||
return(NULL);
|
||||
}
|
||||
/* If one of the encryption threads already started the encryption
|
||||
of the table then don't remove the unencrypted spaces from
|
||||
rotation list
|
||||
|
||||
If there is a change in innodb_encrypt_tables variables value then
|
||||
don't remove the last processed tablespace from the rotation list. */
|
||||
const bool remove = ((!recheck || prev_space->crypt_data)
|
||||
&& (!key_version == !srv_encrypt_tables));
|
||||
|
||||
fil_space_t* space = prev_space;
|
||||
|
||||
if (prev_space == NULL) {
|
||||
space = UT_LIST_GET_FIRST(fil_system.rotation_list);
|
||||
|
@ -5058,22 +5062,17 @@ fil_space_keyrotate_next(
|
|||
/* Move on to the next fil_space_t */
|
||||
space->release();
|
||||
|
||||
old = space;
|
||||
space = UT_LIST_GET_NEXT(rotation_list, space);
|
||||
|
||||
fil_space_remove_from_keyrotation(old);
|
||||
}
|
||||
while (space != NULL
|
||||
&& (UT_LIST_GET_LEN(space->chain) == 0
|
||||
|| space->is_stopping())) {
|
||||
space = UT_LIST_GET_NEXT(rotation_list, space);
|
||||
}
|
||||
|
||||
/* Skip spaces that are being created by fil_ibd_create(),
|
||||
or dropped. Note that rotation_list contains only
|
||||
space->purpose == FIL_TYPE_TABLESPACE. */
|
||||
while (space != NULL
|
||||
&& (UT_LIST_GET_LEN(space->chain) == 0
|
||||
|| space->is_stopping())) {
|
||||
|
||||
old = space;
|
||||
space = UT_LIST_GET_NEXT(rotation_list, space);
|
||||
fil_space_remove_from_keyrotation(old);
|
||||
if (remove) {
|
||||
fil_space_remove_from_keyrotation(prev_space);
|
||||
}
|
||||
}
|
||||
|
||||
if (space != NULL) {
|
||||
|
@ -5081,7 +5080,6 @@ fil_space_keyrotate_next(
|
|||
}
|
||||
|
||||
mutex_exit(&fil_system.mutex);
|
||||
|
||||
return(space);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2018, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -972,7 +972,7 @@ rtr_create_rtr_info(
|
|||
&rtr_info->rtr_path_mutex);
|
||||
|
||||
mutex_enter(&index->rtr_track->rtr_active_mutex);
|
||||
index->rtr_track->rtr_active->push_back(rtr_info);
|
||||
index->rtr_track->rtr_active.push_front(rtr_info);
|
||||
mutex_exit(&index->rtr_track->rtr_active_mutex);
|
||||
return(rtr_info);
|
||||
}
|
||||
|
@ -1045,7 +1045,7 @@ rtr_init_rtr_info(
|
|||
rtr_info->index = index;
|
||||
|
||||
mutex_enter(&index->rtr_track->rtr_active_mutex);
|
||||
index->rtr_track->rtr_active->push_back(rtr_info);
|
||||
index->rtr_track->rtr_active.push_front(rtr_info);
|
||||
mutex_exit(&index->rtr_track->rtr_active_mutex);
|
||||
}
|
||||
|
||||
|
@ -1097,7 +1097,7 @@ rtr_clean_rtr_info(
|
|||
}
|
||||
|
||||
if (index) {
|
||||
index->rtr_track->rtr_active->remove(rtr_info);
|
||||
index->rtr_track->rtr_active.remove(rtr_info);
|
||||
mutex_exit(&index->rtr_track->rtr_active_mutex);
|
||||
}
|
||||
|
||||
|
@ -1202,36 +1202,22 @@ rtr_check_discard_page(
|
|||
the root page */
|
||||
buf_block_t* block) /*!< in: block of page to be discarded */
|
||||
{
|
||||
ulint pageno = block->page.id.page_no();
|
||||
rtr_info_t* rtr_info;
|
||||
rtr_info_active::iterator it;
|
||||
const ulint pageno = block->page.id.page_no();
|
||||
|
||||
mutex_enter(&index->rtr_track->rtr_active_mutex);
|
||||
|
||||
for (it = index->rtr_track->rtr_active->begin();
|
||||
it != index->rtr_track->rtr_active->end(); ++it) {
|
||||
rtr_info = *it;
|
||||
rtr_node_path_t::iterator rit;
|
||||
bool found = false;
|
||||
|
||||
for (const auto& rtr_info : index->rtr_track->rtr_active) {
|
||||
if (cursor && rtr_info == cursor->rtr_info) {
|
||||
continue;
|
||||
}
|
||||
|
||||
mutex_enter(&rtr_info->rtr_path_mutex);
|
||||
for (rit = rtr_info->path->begin();
|
||||
rit != rtr_info->path->end(); ++rit) {
|
||||
node_visit_t node = *rit;
|
||||
|
||||
for (const node_visit_t& node : *rtr_info->path) {
|
||||
if (node.page_no == pageno) {
|
||||
found = true;
|
||||
rtr_rebuild_path(rtr_info, pageno);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
rtr_rebuild_path(rtr_info, pageno);
|
||||
}
|
||||
mutex_exit(&rtr_info->rtr_path_mutex);
|
||||
|
||||
if (rtr_info->matches) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue