2016-01-25 18:44:51 +01:00
|
|
|
/* Copyright (C) Olivier Bertrand 2004 - 2016
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
|
|
/**
|
|
|
|
@file ha_connect.cc
|
|
|
|
|
|
|
|
@brief
|
|
|
|
The ha_connect engine is a stubbed storage engine that enables to create tables
|
|
|
|
based on external data. Principally they are based on plain files of many
|
|
|
|
different types, but also on collections of such files, collection of tables,
|
2014-03-30 22:52:54 +02:00
|
|
|
local or remote MySQL/MariaDB tables retrieved via MySQL API,
|
2013-02-07 10:34:27 +01:00
|
|
|
ODBC tables retrieving data from other DBMS having an ODBC server, and even
|
|
|
|
virtual tables.
|
|
|
|
|
|
|
|
@details
|
|
|
|
ha_connect will let you create/open/delete tables, the created table can be
|
|
|
|
done specifying an already existing file, the drop table command will just
|
|
|
|
suppress the table definition but not the eventual data file.
|
2014-04-19 11:11:30 +02:00
|
|
|
Indexes are not supported for all table types but data can be inserted,
|
2013-05-28 17:22:38 +02:00
|
|
|
updated or deleted.
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
You can enable the CONNECT storage engine in your build by doing the
|
|
|
|
following during your build process:<br> ./configure
|
2013-05-28 17:22:38 +02:00
|
|
|
--with-connect-storage-engine
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
You can install the CONNECT handler as all other storage handlers.
|
|
|
|
|
|
|
|
Once this is done, MySQL will let you create tables with:<br>
|
|
|
|
CREATE TABLE <table name> (...) ENGINE=CONNECT;
|
|
|
|
|
|
|
|
The example storage engine does not use table locks. It
|
|
|
|
implements an example "SHARE" that is inserted into a hash by table
|
|
|
|
name. This is not used yet.
|
|
|
|
|
|
|
|
Please read the object definition in ha_connect.h before reading the rest
|
|
|
|
of this file.
|
|
|
|
|
|
|
|
@note
|
|
|
|
This MariaDB CONNECT handler is currently an adaptation of the XDB handler
|
|
|
|
that was written for MySQL version 4.1.2-alpha. Its overall design should
|
|
|
|
be enhanced in the future to meet MariaDB requirements.
|
|
|
|
|
|
|
|
@note
|
|
|
|
It was written also from the Brian's ha_example handler and contains parts
|
2014-03-30 22:52:54 +02:00
|
|
|
of it that are there, such as table and system variables.
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
@note
|
|
|
|
When you create an CONNECT table, the MySQL Server creates a table .frm
|
|
|
|
(format) file in the database directory, using the table name as the file
|
2014-03-30 22:52:54 +02:00
|
|
|
name as is customary with MySQL.
|
|
|
|
For file based tables, if a file name is not specified, this is an inward
|
|
|
|
table. An empty file is made in the current data directory that you can
|
|
|
|
populate later like for other engine tables. This file modified on ALTER
|
|
|
|
and is deleted when dropping the table.
|
|
|
|
If a file name is specified, this in an outward table. The specified file
|
|
|
|
will be used as representing the table data and will not be modified or
|
|
|
|
deleted on command such as ALTER or DROP.
|
|
|
|
To get an idea of what occurs, here is an example select that would do
|
|
|
|
a scan of an entire table:
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
@code
|
|
|
|
ha-connect::open
|
|
|
|
ha_connect::store_lock
|
|
|
|
ha_connect::external_lock
|
|
|
|
ha_connect::info
|
|
|
|
ha_connect::rnd_init
|
|
|
|
ha_connect::extra
|
|
|
|
ENUM HA_EXTRA_CACHE Cache record in HA_rrnd()
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::rnd_next
|
|
|
|
ha_connect::extra
|
|
|
|
ENUM HA_EXTRA_NO_CACHE End caching of records (def)
|
|
|
|
ha_connect::external_lock
|
|
|
|
ha_connect::extra
|
|
|
|
ENUM HA_EXTRA_RESET Reset database to after open
|
|
|
|
@endcode
|
|
|
|
|
|
|
|
Here you see that the connect storage engine has 9 rows called before
|
|
|
|
rnd_next signals that it has reached the end of its data. Calls to
|
|
|
|
ha_connect::extra() are hints as to what will be occuring to the request.
|
|
|
|
|
|
|
|
Happy use!<br>
|
|
|
|
-Olivier
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
|
|
|
#pragma implementation // gcc: Class implementation
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MYSQL_SERVER 1
|
|
|
|
#define DONT_DEFINE_VOID
|
|
|
|
#include "sql_class.h"
|
|
|
|
#include "create_options.h"
|
|
|
|
#include "mysql_com.h"
|
|
|
|
#include "field.h"
|
2013-02-09 01:08:15 +01:00
|
|
|
#include "sql_parse.h"
|
2013-03-13 01:10:20 +01:00
|
|
|
#include "sql_base.h"
|
2014-02-03 16:14:13 +01:00
|
|
|
#include <sys/stat.h>
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
#include "sql_table.h"
|
|
|
|
#endif // NEW_WAY
|
2014-05-31 12:31:26 +02:00
|
|
|
#include "sql_partition.h"
|
2013-02-07 10:34:27 +01:00
|
|
|
#undef OFFSET
|
|
|
|
|
|
|
|
#define NOPARSE
|
|
|
|
#if defined(UNIX)
|
|
|
|
#include "osutil.h"
|
|
|
|
#endif // UNIX
|
|
|
|
#include "global.h"
|
|
|
|
#include "plgdbsem.h"
|
2013-02-09 01:08:15 +01:00
|
|
|
#if defined(ODBC_SUPPORT)
|
Fixing compilation problems on Unix:
1. Conflicting declarations:
In file included from /usr/include/sql.h:19:0,
from <path>/storage/connect/odbconn.h:15,
from <path>/storage/connect/ha_connect.cc:117:
/usr/include/sqltypes.h:98:23: error: conflicting declaration
‘typedef unsigned int DWORD’
os.h and unixODBC's sqltypes.h (included from sql.h) have conflicting
declarations, because unixODBC for some reasons incorrectly defines
DWORD as "unsigned int", while we define DWORD as "unsigned long"
(which is the Microsoft way).
We should never include os.h and odbconn.h from the same file.
Inside tabodbc.cpp DWORD must be seen as sql.h defines it.
In all other files DWORD must be seen as os.h defines it.
Fix:
Moving ODBC catalog function prototypes into a separate file odbccat.h.
Fixing ha_connect.cc to include odbccat.h instead of odbcon.h
2. Use of ambiguous overloaded function in myconn.cpp:
There's no a method SetValue(const char *fmt, int i);
There's only a method SetValue(char *fmt, int i);
Fixing the call accordingly:
- crp->Kdata->SetValue((fmt) ? fmt : "", i);
+ crp->Kdata->SetValue((fmt) ? fmt : (char*) "", i);
Note, this is a quick hack. The correct fix would be to change
the method prototype to have the "fmt" argument as "const char *".
However, it is tightly related to about 300 other places where
"char*" is used instead of "const char *". We'll need to fix
all of them gradually (in separate changes).
added:
storage/connect/odbccat.h
modified:
storage/connect/ha_connect.cc
storage/connect/myconn.cpp
storage/connect/odbconn.h
storage/connect/tabodbc.cpp
2013-02-11 07:16:52 +01:00
|
|
|
#include "odbccat.h"
|
2013-02-09 01:08:15 +01:00
|
|
|
#endif // ODBC_SUPPORT
|
2013-02-22 17:26:08 +01:00
|
|
|
#include "xtable.h"
|
|
|
|
#include "tabmysql.h"
|
2013-02-09 01:08:15 +01:00
|
|
|
#include "filamdbf.h"
|
2013-04-29 13:50:20 +02:00
|
|
|
#include "tabxcl.h"
|
2013-02-09 01:08:15 +01:00
|
|
|
#include "tabfmt.h"
|
2013-02-07 10:34:27 +01:00
|
|
|
#include "reldef.h"
|
|
|
|
#include "tabcol.h"
|
|
|
|
#include "xindex.h"
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2013-03-12 01:20:52 +01:00
|
|
|
#include <io.h>
|
2013-02-09 01:08:15 +01:00
|
|
|
#include "tabwmi.h"
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // __WIN__
|
2013-02-07 10:34:27 +01:00
|
|
|
#include "connect.h"
|
|
|
|
#include "user_connect.h"
|
|
|
|
#include "ha_connect.h"
|
2013-02-09 01:08:15 +01:00
|
|
|
#include "myutil.h"
|
2013-03-07 18:53:41 +01:00
|
|
|
#include "preparse.h"
|
2013-05-13 12:59:59 +02:00
|
|
|
#include "inihandl.h"
|
2014-03-18 19:25:50 +01:00
|
|
|
#if defined(LIBXML2_SUPPORT)
|
2013-07-08 09:05:59 +02:00
|
|
|
#include "libdoc.h"
|
2013-02-07 10:34:27 +01:00
|
|
|
#endif // LIBXML2_SUPPORT
|
2013-07-08 09:22:32 +02:00
|
|
|
#include "taboccur.h"
|
|
|
|
#include "tabpivot.h"
|
2015-05-13 19:58:21 +02:00
|
|
|
#include "tabfix.h"
|
2013-07-08 09:22:32 +02:00
|
|
|
|
2014-03-18 19:25:50 +01:00
|
|
|
#define my_strupr(p) my_caseup_str(default_charset_info, (p));
|
|
|
|
#define my_strlwr(p) my_casedn_str(default_charset_info, (p));
|
|
|
|
#define my_stricmp(a,b) my_strcasecmp(default_charset_info, (a), (b))
|
2013-07-08 09:22:32 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Initialize the ha_connect static members. */
|
|
|
|
/***********************************************************************/
|
2015-02-22 17:53:02 +01:00
|
|
|
#define SZCONV 8192
|
|
|
|
#define SZWORK 67108864 // Default work area size 64M
|
|
|
|
#define SZWMIN 4194304 // Minimum work area size 4M
|
|
|
|
#define JSONMAX 10 // JSON Default max grp size
|
2014-03-30 22:52:54 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
extern "C" {
|
2016-03-16 18:53:56 +01:00
|
|
|
char version[]= "Version 1.04.0006 March 12, 2016";
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2016-03-16 18:53:56 +01:00
|
|
|
char compver[]= "Version 1.04.0006 " __DATE__ " " __TIME__;
|
2014-05-31 12:31:26 +02:00
|
|
|
char slash= '\\';
|
2015-05-27 16:23:38 +02:00
|
|
|
#else // !__WIN__
|
2014-05-31 12:31:26 +02:00
|
|
|
char slash= '/';
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2013-02-07 10:34:27 +01:00
|
|
|
} // extern "C"
|
|
|
|
|
2014-03-30 22:52:54 +02:00
|
|
|
#if defined(XMAP)
|
2014-11-15 18:28:24 +01:00
|
|
|
my_bool xmap= false;
|
2014-03-30 22:52:54 +02:00
|
|
|
#endif // XMAP
|
2014-03-18 19:25:50 +01:00
|
|
|
|
2014-04-05 19:26:32 +02:00
|
|
|
ulong ha_connect::num= 0;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-11-08 13:35:03 +01:00
|
|
|
#if defined(XMSG)
|
|
|
|
extern "C" {
|
|
|
|
char *msg_path;
|
|
|
|
} // extern "C"
|
|
|
|
#endif // XMSG
|
2014-03-30 22:52:54 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Utility functions. */
|
|
|
|
/***********************************************************************/
|
|
|
|
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
|
2015-05-09 17:30:20 +02:00
|
|
|
PQRYRES VirColumns(PGLOBAL g, bool info);
|
2015-05-26 01:02:33 +02:00
|
|
|
PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info);
|
|
|
|
PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info);
|
2014-10-22 13:51:33 +02:00
|
|
|
void PushWarning(PGLOBAL g, THD *thd, int level);
|
|
|
|
bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
|
|
|
|
const char *db, char *tab, const char *src, int port);
|
|
|
|
bool ExactInfo(void);
|
|
|
|
USETEMP UseTemp(void);
|
2015-02-08 19:47:26 +01:00
|
|
|
int GetConvSize(void);
|
|
|
|
TYPCONV GetTypeConv(void);
|
2015-02-22 17:53:02 +01:00
|
|
|
uint GetJsonGrpSize(void);
|
2014-11-15 18:28:24 +01:00
|
|
|
uint GetWorkSize(void);
|
|
|
|
void SetWorkSize(uint);
|
2014-11-16 01:16:51 +01:00
|
|
|
extern "C" const char *msglang(void);
|
2014-02-03 16:14:13 +01:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
static PCONNECT GetUser(THD *thd, PCONNECT xp);
|
2014-02-03 16:14:13 +01:00
|
|
|
static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp);
|
2013-04-19 20:35:43 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
static handler *connect_create_handler(handlerton *hton,
|
2014-10-22 13:51:33 +02:00
|
|
|
TABLE_SHARE *table,
|
|
|
|
MEM_ROOT *mem_root);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
static int connect_assisted_discovery(handlerton *hton, THD* thd,
|
|
|
|
TABLE_SHARE *table_s,
|
|
|
|
HA_CREATE_INFO *info);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return str as a zero terminated string. */
|
|
|
|
/****************************************************************************/
|
|
|
|
static char *strz(PGLOBAL g, LEX_STRING &ls)
|
|
|
|
{
|
|
|
|
char *str= (char*)PlugSubAlloc(g, NULL, ls.length + 1);
|
|
|
|
|
|
|
|
memcpy(str, ls.str, ls.length);
|
|
|
|
str[ls.length]= 0;
|
|
|
|
return str;
|
|
|
|
} // end of strz
|
|
|
|
|
2014-08-22 17:30:22 +02:00
|
|
|
/***********************************************************************/
|
2014-10-21 17:29:51 +02:00
|
|
|
/* CONNECT session variables definitions. */
|
2014-08-22 17:30:22 +02:00
|
|
|
/***********************************************************************/
|
2014-10-21 17:29:51 +02:00
|
|
|
// Tracing: 0 no, 1 yes, >1 more tracing
|
|
|
|
static MYSQL_THDVAR_INT(xtrace,
|
|
|
|
PLUGIN_VAR_RQCMDARG, "Console trace value.",
|
|
|
|
NULL, NULL, 0, 0, INT_MAX, 1);
|
|
|
|
|
|
|
|
// Getting exact info values
|
|
|
|
static MYSQL_THDVAR_BOOL(exact_info, PLUGIN_VAR_RQCMDARG,
|
|
|
|
"Getting exact info values",
|
|
|
|
NULL, NULL, 0);
|
|
|
|
|
|
|
|
/**
|
|
|
|
Temporary file usage:
|
|
|
|
no: Not using temporary file
|
|
|
|
auto: Using temporary file when needed
|
|
|
|
yes: Allways using temporary file
|
|
|
|
force: Force using temporary file (no MAP)
|
|
|
|
test: Reserved
|
|
|
|
*/
|
|
|
|
const char *usetemp_names[]=
|
2014-03-18 19:25:50 +01:00
|
|
|
{
|
2014-10-21 17:29:51 +02:00
|
|
|
"NO", "AUTO", "YES", "FORCE", "TEST", NullS
|
|
|
|
};
|
2014-03-18 19:25:50 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
TYPELIB usetemp_typelib=
|
2014-03-18 19:25:50 +01:00
|
|
|
{
|
2014-10-21 17:29:51 +02:00
|
|
|
array_elements(usetemp_names) - 1, "usetemp_typelib",
|
|
|
|
usetemp_names, NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static MYSQL_THDVAR_ENUM(
|
|
|
|
use_tempfile, // name
|
|
|
|
PLUGIN_VAR_RQCMDARG, // opt
|
|
|
|
"Temporary file use.", // comment
|
|
|
|
NULL, // check
|
|
|
|
NULL, // update function
|
|
|
|
1, // def (AUTO)
|
|
|
|
&usetemp_typelib); // typelib
|
|
|
|
|
2014-11-15 18:28:24 +01:00
|
|
|
// Size used for g->Sarea_Size
|
|
|
|
static MYSQL_THDVAR_UINT(work_size,
|
|
|
|
PLUGIN_VAR_RQCMDARG,
|
|
|
|
"Size of the CONNECT work area.",
|
|
|
|
NULL, NULL, SZWORK, SZWMIN, UINT_MAX, 1);
|
|
|
|
|
2015-02-07 11:33:52 +01:00
|
|
|
// Size used when converting TEXT columns to VARCHAR
|
|
|
|
static MYSQL_THDVAR_INT(conv_size,
|
|
|
|
PLUGIN_VAR_RQCMDARG, // opt
|
|
|
|
"Size used when converting TEXT columns.",
|
|
|
|
NULL, NULL, SZCONV, 0, 65500, 1);
|
|
|
|
|
|
|
|
/**
|
|
|
|
Type conversion:
|
|
|
|
no: Unsupported types -> TYPE_ERROR
|
|
|
|
yes: TEXT -> VARCHAR
|
|
|
|
skip: skip unsupported type columns in Discovery
|
|
|
|
*/
|
|
|
|
const char *xconv_names[]=
|
|
|
|
{
|
|
|
|
"NO", "YES", "SKIP", NullS
|
|
|
|
};
|
|
|
|
|
|
|
|
TYPELIB xconv_typelib=
|
|
|
|
{
|
|
|
|
array_elements(xconv_names) - 1, "xconv_typelib",
|
|
|
|
xconv_names, NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static MYSQL_THDVAR_ENUM(
|
|
|
|
type_conv, // name
|
|
|
|
PLUGIN_VAR_RQCMDARG, // opt
|
|
|
|
"Unsupported types conversion.", // comment
|
|
|
|
NULL, // check
|
|
|
|
NULL, // update function
|
|
|
|
0, // def (no)
|
|
|
|
&xconv_typelib); // typelib
|
|
|
|
|
2015-02-22 17:53:02 +01:00
|
|
|
// Estimate max number of rows for JSON aggregate functions
|
|
|
|
static MYSQL_THDVAR_UINT(json_grp_size,
|
|
|
|
PLUGIN_VAR_RQCMDARG, // opt
|
|
|
|
"max number of rows for JSON aggregate functions.",
|
|
|
|
NULL, NULL, JSONMAX, 1, INT_MAX, 1);
|
|
|
|
|
2014-11-15 18:28:24 +01:00
|
|
|
#if defined(XMSG) || defined(NEWMSG)
|
2014-11-08 13:35:03 +01:00
|
|
|
const char *language_names[]=
|
|
|
|
{
|
|
|
|
"default", "english", "french", NullS
|
|
|
|
};
|
|
|
|
|
|
|
|
TYPELIB language_typelib=
|
|
|
|
{
|
|
|
|
array_elements(language_names) - 1, "language_typelib",
|
|
|
|
language_names, NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static MYSQL_THDVAR_ENUM(
|
|
|
|
msg_lang, // name
|
|
|
|
PLUGIN_VAR_RQCMDARG, // opt
|
|
|
|
"Message language", // comment
|
|
|
|
NULL, // check
|
|
|
|
NULL, // update
|
|
|
|
1, // def (ENGLISH)
|
|
|
|
&language_typelib); // typelib
|
2014-11-15 18:28:24 +01:00
|
|
|
#endif // XMSG || NEWMSG
|
2014-11-08 13:35:03 +01:00
|
|
|
|
2016-01-25 18:44:51 +01:00
|
|
|
/***********************************************************************/
|
|
|
|
/* The CONNECT handlerton object. */
|
|
|
|
/***********************************************************************/
|
|
|
|
handlerton *connect_hton= NULL;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Function to export session variable values to other source files. */
|
|
|
|
/***********************************************************************/
|
2016-01-25 18:44:51 +01:00
|
|
|
extern "C" int GetTraceValue(void)
|
|
|
|
{return connect_hton ? THDVAR(current_thd, xtrace) : 0;}
|
2014-10-21 17:29:51 +02:00
|
|
|
bool ExactInfo(void) {return THDVAR(current_thd, exact_info);}
|
|
|
|
USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);}
|
2015-02-07 11:33:52 +01:00
|
|
|
int GetConvSize(void) {return THDVAR(current_thd, conv_size);}
|
|
|
|
TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);}
|
2016-01-25 18:44:51 +01:00
|
|
|
uint GetJsonGrpSize(void)
|
|
|
|
{return connect_hton ? THDVAR(current_thd, json_grp_size) : 10;}
|
2014-11-15 18:28:24 +01:00
|
|
|
uint GetWorkSize(void) {return THDVAR(current_thd, work_size);}
|
2015-05-10 12:14:21 +02:00
|
|
|
void SetWorkSize(uint)
|
2014-11-15 18:28:24 +01:00
|
|
|
{
|
|
|
|
// Changing the session variable value seems to be impossible here
|
|
|
|
// and should be done in a check function
|
|
|
|
push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0,
|
|
|
|
"Work size too big, try setting a smaller value");
|
|
|
|
} // end of SetWorkSize
|
|
|
|
#if defined(XMSG) || defined(NEWMSG)
|
2014-11-08 13:35:03 +01:00
|
|
|
extern "C" const char *msglang(void)
|
|
|
|
{
|
|
|
|
return language_names[THDVAR(current_thd, msg_lang)];
|
|
|
|
} // end of msglang
|
2014-11-15 18:28:24 +01:00
|
|
|
#else // !XMSG && !NEWMSG
|
|
|
|
extern "C" const char *msglang(void)
|
|
|
|
{
|
|
|
|
#if defined(FRENCH)
|
|
|
|
return "french";
|
|
|
|
#else // DEFAULT
|
|
|
|
return "english";
|
|
|
|
#endif // DEFAULT
|
|
|
|
} // end of msglang
|
|
|
|
#endif // !XMSG && !NEWMSG
|
2014-03-18 19:25:50 +01:00
|
|
|
|
2014-11-15 18:28:24 +01:00
|
|
|
#if 0
|
2014-10-21 17:29:51 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Global variables update functions. */
|
|
|
|
/***********************************************************************/
|
2014-03-30 22:52:54 +02:00
|
|
|
static void update_connect_zconv(MYSQL_THD thd,
|
|
|
|
struct st_mysql_sys_var *var,
|
|
|
|
void *var_ptr, const void *save)
|
|
|
|
{
|
|
|
|
zconv= *(int *)var_ptr= *(int *)save;
|
|
|
|
} // end of update_connect_zconv
|
|
|
|
|
|
|
|
static void update_connect_xconv(MYSQL_THD thd,
|
|
|
|
struct st_mysql_sys_var *var,
|
|
|
|
void *var_ptr, const void *save)
|
|
|
|
{
|
|
|
|
xconv= (int)(*(ulong *)var_ptr= *(ulong *)save);
|
|
|
|
} // end of update_connect_xconv
|
|
|
|
|
|
|
|
#if defined(XMAP)
|
|
|
|
static void update_connect_xmap(MYSQL_THD thd,
|
|
|
|
struct st_mysql_sys_var *var,
|
|
|
|
void *var_ptr, const void *save)
|
|
|
|
{
|
2014-11-15 18:28:24 +01:00
|
|
|
xmap= (my_bool)(*(my_bool *)var_ptr= *(my_bool *)save);
|
2014-03-30 22:52:54 +02:00
|
|
|
} // end of update_connect_xmap
|
|
|
|
#endif // XMAP
|
2014-11-15 18:28:24 +01:00
|
|
|
#endif // 0
|
2014-03-30 22:52:54 +02:00
|
|
|
|
2014-11-15 18:28:24 +01:00
|
|
|
#if 0 // (was XMSG) Unuseful because not called for default value
|
|
|
|
static void update_msg_path(MYSQL_THD thd,
|
|
|
|
struct st_mysql_sys_var *var,
|
|
|
|
void *var_ptr, const void *save)
|
|
|
|
{
|
|
|
|
char *value= *(char**)save;
|
|
|
|
char *old= *(char**)var_ptr;
|
|
|
|
|
|
|
|
if (value)
|
|
|
|
*(char**)var_ptr= my_strdup(value, MYF(0));
|
|
|
|
else
|
|
|
|
*(char**)var_ptr= 0;
|
|
|
|
|
|
|
|
my_free(old);
|
|
|
|
} // end of update_msg_path
|
|
|
|
|
|
|
|
static int check_msg_path (MYSQL_THD thd, struct st_mysql_sys_var *var,
|
|
|
|
void *save, struct st_mysql_value *value)
|
2014-08-22 17:30:22 +02:00
|
|
|
{
|
2014-11-15 18:28:24 +01:00
|
|
|
const char *path;
|
|
|
|
char buff[512];
|
|
|
|
int len= sizeof(buff);
|
|
|
|
|
|
|
|
path= value->val_str(value, buff, &len);
|
|
|
|
|
|
|
|
if (path && *path != '*') {
|
|
|
|
/* Save a pointer to the name in the
|
|
|
|
'file_format_name_map' constant array. */
|
|
|
|
*(char**)save= my_strdup(path, MYF(0));
|
|
|
|
return(0);
|
|
|
|
} else {
|
|
|
|
push_warning_printf(thd,
|
|
|
|
Sql_condition::WARN_LEVEL_WARN,
|
|
|
|
ER_WRONG_ARGUMENTS,
|
|
|
|
"CONNECT: invalid message path");
|
|
|
|
} // endif path
|
|
|
|
|
|
|
|
*(char**)save= NULL;
|
|
|
|
return(1);
|
|
|
|
} // end of check_msg_path
|
|
|
|
#endif // 0
|
2014-08-22 17:30:22 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/**
|
2013-04-29 13:50:20 +02:00
|
|
|
CREATE TABLE option list (table options)
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
These can be specified in the CREATE TABLE:
|
|
|
|
CREATE TABLE ( ... ) {...here...}
|
|
|
|
*/
|
|
|
|
ha_create_table_option connect_table_option_list[]=
|
|
|
|
{
|
|
|
|
HA_TOPTION_STRING("TABLE_TYPE", type),
|
|
|
|
HA_TOPTION_STRING("FILE_NAME", filename),
|
|
|
|
HA_TOPTION_STRING("XFILE_NAME", optname),
|
|
|
|
//HA_TOPTION_STRING("CONNECT_STRING", connect),
|
|
|
|
HA_TOPTION_STRING("TABNAME", tabname),
|
|
|
|
HA_TOPTION_STRING("TABLE_LIST", tablist),
|
2013-02-21 17:48:35 +01:00
|
|
|
HA_TOPTION_STRING("DBNAME", dbname),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_TOPTION_STRING("SEP_CHAR", separator),
|
|
|
|
HA_TOPTION_STRING("QCHAR", qchar),
|
|
|
|
HA_TOPTION_STRING("MODULE", module),
|
|
|
|
HA_TOPTION_STRING("SUBTYPE", subtype),
|
2013-02-09 01:08:15 +01:00
|
|
|
HA_TOPTION_STRING("CATFUNC", catfunc),
|
2013-05-10 20:22:21 +02:00
|
|
|
HA_TOPTION_STRING("SRCDEF", srcdef),
|
|
|
|
HA_TOPTION_STRING("COLIST", colist),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_TOPTION_STRING("OPTION_LIST", oplist),
|
2013-02-18 16:21:52 +01:00
|
|
|
HA_TOPTION_STRING("DATA_CHARSET", data_charset),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_TOPTION_NUMBER("LRECL", lrecl, 0, 0, INT_MAX32, 1),
|
|
|
|
HA_TOPTION_NUMBER("BLOCK_SIZE", elements, 0, 0, INT_MAX32, 1),
|
|
|
|
//HA_TOPTION_NUMBER("ESTIMATE", estimate, 0, 0, INT_MAX32, 1),
|
|
|
|
HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 2, 1),
|
|
|
|
HA_TOPTION_NUMBER("HEADER", header, 0, 0, 3, 1),
|
2013-06-07 22:24:27 +02:00
|
|
|
HA_TOPTION_NUMBER("QUOTED", quoted, (ulonglong) -1, 0, 3, 1),
|
|
|
|
HA_TOPTION_NUMBER("ENDING", ending, (ulonglong) -1, 0, INT_MAX32, 1),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_TOPTION_NUMBER("COMPRESS", compressed, 0, 0, 2, 1),
|
|
|
|
//HA_TOPTION_BOOL("COMPRESS", compressed, 0),
|
|
|
|
HA_TOPTION_BOOL("MAPPED", mapped, 0),
|
|
|
|
HA_TOPTION_BOOL("HUGE", huge, 0),
|
|
|
|
HA_TOPTION_BOOL("SPLIT", split, 0),
|
|
|
|
HA_TOPTION_BOOL("READONLY", readonly, 0),
|
2013-03-25 11:07:45 +01:00
|
|
|
HA_TOPTION_BOOL("SEPINDEX", sepindex, 0),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_TOPTION_END
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-04-29 13:50:20 +02:00
|
|
|
CREATE TABLE option list (field options)
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
These can be specified in the CREATE TABLE per field:
|
|
|
|
CREATE TABLE ( field ... {...here...}, ... )
|
|
|
|
*/
|
|
|
|
ha_create_table_option connect_field_option_list[]=
|
|
|
|
{
|
2013-06-07 22:24:27 +02:00
|
|
|
HA_FOPTION_NUMBER("FLAG", offset, (ulonglong) -1, 0, INT_MAX32, 1),
|
2014-04-19 11:11:30 +02:00
|
|
|
HA_FOPTION_NUMBER("MAX_DIST", freq, 0, 0, INT_MAX32, 1), // BLK_INDX
|
2014-07-17 18:13:51 +02:00
|
|
|
//HA_FOPTION_NUMBER("DISTRIB", opt, 0, 0, 2, 1), // used for BLK_INDX
|
2013-03-05 19:30:40 +01:00
|
|
|
HA_FOPTION_NUMBER("FIELD_LENGTH", fldlen, 0, 0, INT_MAX32, 1),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_FOPTION_STRING("DATE_FORMAT", dateformat),
|
|
|
|
HA_FOPTION_STRING("FIELD_FORMAT", fieldformat),
|
|
|
|
HA_FOPTION_STRING("SPECIAL", special),
|
2014-07-17 18:13:51 +02:00
|
|
|
HA_FOPTION_ENUM("DISTRIB", opt, "scattered,clustered,sorted", 0),
|
2013-02-07 10:34:27 +01:00
|
|
|
HA_FOPTION_END
|
|
|
|
};
|
|
|
|
|
2014-03-23 18:49:19 +01:00
|
|
|
/*
|
2014-04-19 11:11:30 +02:00
|
|
|
CREATE TABLE option list (index options)
|
2014-03-23 18:49:19 +01:00
|
|
|
|
|
|
|
These can be specified in the CREATE TABLE per index:
|
|
|
|
CREATE TABLE ( field ..., .., INDEX .... *here*, ... )
|
|
|
|
*/
|
2014-04-19 11:11:30 +02:00
|
|
|
ha_create_table_option connect_index_option_list[]=
|
|
|
|
{
|
2014-04-30 10:48:29 +02:00
|
|
|
HA_IOPTION_BOOL("DYNAM", dynamic, 0),
|
2014-04-19 11:11:30 +02:00
|
|
|
HA_IOPTION_BOOL("MAPPED", mapped, 0),
|
2014-04-30 10:48:29 +02:00
|
|
|
HA_IOPTION_END
|
2014-04-19 11:11:30 +02:00
|
|
|
};
|
|
|
|
|
2013-02-18 12:23:50 +01:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Push G->Message as a MySQL warning. */
|
|
|
|
/***********************************************************************/
|
2014-03-30 22:52:54 +02:00
|
|
|
bool PushWarning(PGLOBAL g, PTDBASE tdbp, int level)
|
|
|
|
{
|
2013-02-18 12:23:50 +01:00
|
|
|
PHC phc;
|
|
|
|
THD *thd;
|
|
|
|
MYCAT *cat= (MYCAT*)tdbp->GetDef()->GetCat();
|
|
|
|
|
|
|
|
if (!cat || !(phc= cat->GetHandler()) || !phc->GetTable() ||
|
|
|
|
!(thd= (phc->GetTable())->in_use))
|
|
|
|
return true;
|
|
|
|
|
2014-03-30 22:52:54 +02:00
|
|
|
PushWarning(g, thd, level);
|
2013-02-18 12:23:50 +01:00
|
|
|
return false;
|
2014-03-30 22:52:54 +02:00
|
|
|
} // end of PushWarning
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-03-31 01:20:35 +02:00
|
|
|
void PushWarning(PGLOBAL g, THD *thd, int level)
|
|
|
|
{
|
|
|
|
if (thd) {
|
|
|
|
Sql_condition::enum_warning_level wlvl;
|
|
|
|
|
|
|
|
wlvl= (Sql_condition::enum_warning_level)level;
|
|
|
|
push_warning(thd, wlvl, 0, g->Message);
|
|
|
|
} else
|
|
|
|
htrc("%s\n", g->Message);
|
|
|
|
|
|
|
|
} // end of PushWarning
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
#ifdef HAVE_PSI_INTERFACE
|
2013-04-19 20:35:43 +02:00
|
|
|
static PSI_mutex_key con_key_mutex_CONNECT_SHARE_mutex;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
static PSI_mutex_info all_connect_mutexes[]=
|
|
|
|
{
|
2013-03-05 19:30:40 +01:00
|
|
|
{ &con_key_mutex_CONNECT_SHARE_mutex, "CONNECT_SHARE::mutex", 0}
|
2013-02-07 10:34:27 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static void init_connect_psi_keys()
|
|
|
|
{
|
|
|
|
const char* category= "connect";
|
|
|
|
int count;
|
|
|
|
|
|
|
|
if (PSI_server == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
count= array_elements(all_connect_mutexes);
|
|
|
|
PSI_server->register_mutex(category, all_connect_mutexes, count);
|
|
|
|
}
|
2013-04-19 20:35:43 +02:00
|
|
|
#else
|
|
|
|
static void init_connect_psi_keys() {}
|
2013-02-07 10:34:27 +01:00
|
|
|
#endif
|
|
|
|
|
2013-02-14 14:41:10 +01:00
|
|
|
|
2013-05-21 16:29:10 +02:00
|
|
|
DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir)
|
|
|
|
{
|
|
|
|
const char *res= PlugSetPath(to, mysql_data_home, name, dir);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
If frm_error() is called then we will use this to determine
|
|
|
|
the file extensions that exist for the storage engine. This is also
|
|
|
|
used by the default rename_table and delete_table method in
|
|
|
|
handler.cc.
|
|
|
|
|
|
|
|
For engines that have two file name extentions (separate meta/index file
|
|
|
|
and data file), the order of elements is relevant. First element of engine
|
|
|
|
file name extentions array should be meta/index file extention. Second
|
|
|
|
element - data file extention. This order is assumed by
|
|
|
|
prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
|
|
|
|
|
|
|
|
@see
|
|
|
|
rename_table method in handler.cc and
|
|
|
|
delete_table method in handler.cc
|
|
|
|
*/
|
|
|
|
static const char *ha_connect_exts[]= {
|
2015-05-01 15:59:12 +02:00
|
|
|
".dos", ".fix", ".csv", ".bin", ".fmt", ".dbf", ".xml", ".json", ".ini",
|
|
|
|
".vec", ".dnx", ".fnx", ".bnx", ".vnx", ".dbx", ".dop", ".fop", ".bop",
|
|
|
|
".vop", NULL};
|
2013-04-19 20:35:43 +02:00
|
|
|
|
2013-02-14 14:41:10 +01:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Plugin initialization
|
|
|
|
*/
|
2013-02-07 10:34:27 +01:00
|
|
|
static int connect_init_func(void *p)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("connect_init_func");
|
2014-03-18 19:25:50 +01:00
|
|
|
|
2014-10-14 16:42:22 +02:00
|
|
|
// added from Sergei mail
|
|
|
|
#if 0 // (defined(LINUX))
|
|
|
|
Dl_info dl_info;
|
|
|
|
if (dladdr(&connect_hton, &dl_info))
|
|
|
|
{
|
|
|
|
if (dlopen(dl_info.dli_fname, RTLD_NOLOAD | RTLD_NOW | RTLD_GLOBAL) == 0)
|
|
|
|
{
|
|
|
|
sql_print_information("CONNECT: dlopen() failed, OEM table type is not supported");
|
|
|
|
sql_print_information("CONNECT: %s", dlerror());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sql_print_information("CONNECT: dladdr() failed, OEM table type is not supported");
|
|
|
|
sql_print_information("CONNECT: %s", dlerror());
|
|
|
|
}
|
|
|
|
#endif // 0 (LINUX)
|
2014-03-18 19:25:50 +01:00
|
|
|
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2014-05-10 12:21:08 +02:00
|
|
|
sql_print_information("CONNECT: %s", compver);
|
2015-05-27 16:23:38 +02:00
|
|
|
#else // !__WIN__
|
2015-02-07 11:33:52 +01:00
|
|
|
sql_print_information("CONNECT: %s", version);
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2013-02-14 14:41:10 +01:00
|
|
|
|
|
|
|
#ifdef LIBXML2_SUPPORT
|
|
|
|
XmlInitParserLib();
|
|
|
|
#endif // LIBXML2_SUPPORT
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
init_connect_psi_keys();
|
|
|
|
|
|
|
|
connect_hton= (handlerton *)p;
|
2014-05-05 17:36:16 +02:00
|
|
|
connect_hton->state= SHOW_OPTION_YES;
|
|
|
|
connect_hton->create= connect_create_handler;
|
2014-05-31 12:31:26 +02:00
|
|
|
//connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | HTON_NO_PARTITION;
|
|
|
|
connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED;
|
2013-02-07 10:34:27 +01:00
|
|
|
connect_hton->table_options= connect_table_option_list;
|
|
|
|
connect_hton->field_options= connect_field_option_list;
|
2014-04-30 10:48:29 +02:00
|
|
|
connect_hton->index_options= connect_index_option_list;
|
2013-04-19 20:35:43 +02:00
|
|
|
connect_hton->tablefile_extensions= ha_connect_exts;
|
|
|
|
connect_hton->discover_table_structure= connect_assisted_discovery;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2013-02-14 14:41:10 +01:00
|
|
|
sql_print_information("connect_init: hton=%p", p);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DTVAL::SetTimeShift(); // Initialize time zone shift once for all
|
2015-05-13 19:58:21 +02:00
|
|
|
BINCOL::SetEndian(); // Initialize host endian setting
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(0);
|
2014-05-10 12:21:08 +02:00
|
|
|
} // end of connect_init_func
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
|
2013-02-14 14:41:10 +01:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Plugin clean up
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
static int connect_done_func(void *)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
int error= 0;
|
|
|
|
PCONNECT pc, pn;
|
|
|
|
DBUG_ENTER("connect_done_func");
|
|
|
|
|
2013-02-14 14:41:10 +01:00
|
|
|
#ifdef LIBXML2_SUPPORT
|
|
|
|
XmlCleanupParserLib();
|
|
|
|
#endif // LIBXML2_SUPPORT
|
|
|
|
|
2015-05-27 16:23:38 +02:00
|
|
|
#if !defined(__WIN__)
|
2014-03-19 02:25:28 +01:00
|
|
|
//PROFILE_End(); Causes signal 11
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2013-06-28 14:22:32 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
for (pc= user_connect::to_users; pc; pc= pn) {
|
|
|
|
if (pc->g)
|
|
|
|
PlugCleanup(pc->g, true);
|
|
|
|
|
|
|
|
pn= pc->next;
|
|
|
|
delete pc;
|
|
|
|
} // endfor pc
|
|
|
|
|
2016-01-25 18:44:51 +01:00
|
|
|
connect_hton= NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(error);
|
2014-05-10 12:21:08 +02:00
|
|
|
} // end of connect_done_func
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Example of simple lock controls. The "share" it creates is a
|
2014-05-10 12:21:08 +02:00
|
|
|
structure we will pass to each CONNECT handler. Do you have to have
|
2013-02-07 10:34:27 +01:00
|
|
|
one of these? Well, you have pieces that are used for locking, and
|
|
|
|
they are needed to function.
|
|
|
|
*/
|
|
|
|
|
2013-07-23 16:29:16 +02:00
|
|
|
CONNECT_SHARE *ha_connect::get_share()
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2013-07-23 16:29:16 +02:00
|
|
|
CONNECT_SHARE *tmp_share;
|
2014-05-10 12:21:08 +02:00
|
|
|
|
2013-07-23 16:29:16 +02:00
|
|
|
lock_shared_ha_data();
|
2014-05-10 12:21:08 +02:00
|
|
|
|
|
|
|
if (!(tmp_share= static_cast<CONNECT_SHARE*>(get_ha_share_ptr()))) {
|
2013-07-23 16:29:16 +02:00
|
|
|
tmp_share= new CONNECT_SHARE;
|
|
|
|
if (!tmp_share)
|
|
|
|
goto err;
|
2013-03-05 19:30:40 +01:00
|
|
|
mysql_mutex_init(con_key_mutex_CONNECT_SHARE_mutex,
|
2013-07-23 16:29:16 +02:00
|
|
|
&tmp_share->mutex, MY_MUTEX_INIT_FAST);
|
|
|
|
set_ha_share_ptr(static_cast<Handler_share*>(tmp_share));
|
2014-05-10 12:21:08 +02:00
|
|
|
} // endif tmp_share
|
|
|
|
|
|
|
|
err:
|
2013-07-23 16:29:16 +02:00
|
|
|
unlock_shared_ha_data();
|
|
|
|
return tmp_share;
|
2014-05-10 12:21:08 +02:00
|
|
|
} // end of get_share
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
|
|
|
|
static handler* connect_create_handler(handlerton *hton,
|
|
|
|
TABLE_SHARE *table,
|
|
|
|
MEM_ROOT *mem_root)
|
|
|
|
{
|
|
|
|
handler *h= new (mem_root) ha_connect(hton, table);
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
|
|
|
htrc("New CONNECT %p, table: %.*s\n", h,
|
|
|
|
table ? table->table_name.length : 6,
|
|
|
|
table ? table->table_name.str : "<null>");
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
return h;
|
|
|
|
} // end of connect_create_handler
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* ha_connect constructor. */
|
|
|
|
/****************************************************************************/
|
|
|
|
ha_connect::ha_connect(handlerton *hton, TABLE_SHARE *table_arg)
|
|
|
|
:handler(hton, table_arg)
|
|
|
|
{
|
|
|
|
hnum= ++num;
|
2013-04-19 20:35:43 +02:00
|
|
|
xp= (table) ? GetUser(ha_thd(), NULL) : NULL;
|
|
|
|
if (xp)
|
|
|
|
xp->SetHandler(this);
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2014-08-23 19:17:15 +02:00
|
|
|
datapath= ".\\";
|
2015-05-27 16:23:38 +02:00
|
|
|
#else // !__WIN__
|
2014-08-23 19:17:15 +02:00
|
|
|
datapath= "./";
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2013-02-07 10:34:27 +01:00
|
|
|
tdbp= NULL;
|
2015-01-09 23:36:50 +01:00
|
|
|
sdvalin1= sdvalin2= sdvalin3= sdvalin4= NULL;
|
2013-03-03 15:37:27 +01:00
|
|
|
sdvalout= NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
xmod= MODE_ANY;
|
|
|
|
istable= false;
|
2014-05-31 12:31:26 +02:00
|
|
|
*partname= 0;
|
2013-02-07 10:34:27 +01:00
|
|
|
bzero((char*) &xinfo, sizeof(XINFO));
|
|
|
|
valid_info= false;
|
|
|
|
valid_query_id= 0;
|
|
|
|
creat_query_id= (table && table->in_use) ? table->in_use->query_id : 0;
|
|
|
|
stop= false;
|
2014-02-03 16:14:13 +01:00
|
|
|
alter= false;
|
2014-04-19 11:11:30 +02:00
|
|
|
mrr= false;
|
2014-08-22 17:30:22 +02:00
|
|
|
nox= true;
|
2014-07-17 18:13:51 +02:00
|
|
|
abort= false;
|
2013-02-07 10:34:27 +01:00
|
|
|
indexing= -1;
|
2013-08-12 21:51:56 +02:00
|
|
|
locked= 0;
|
2014-07-17 18:13:51 +02:00
|
|
|
part_id= NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
data_file_name= NULL;
|
|
|
|
index_file_name= NULL;
|
|
|
|
enable_activate_all_index= 0;
|
|
|
|
int_table_flags= (HA_NO_TRANSACTIONS | HA_NO_PREFIX_CHAR_KEYS);
|
|
|
|
ref_length= sizeof(int);
|
|
|
|
share= NULL;
|
|
|
|
tshp= NULL;
|
|
|
|
} // end of ha_connect constructor
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* ha_connect destructor. */
|
|
|
|
/****************************************************************************/
|
|
|
|
ha_connect::~ha_connect(void)
|
|
|
|
{
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
|
|
|
htrc("Delete CONNECT %p, table: %.*s, xp=%p count=%d\n", this,
|
|
|
|
table ? table->s->table_name.length : 6,
|
2014-02-03 16:14:13 +01:00
|
|
|
table ? table->s->table_name.str : "<null>",
|
|
|
|
xp, xp ? xp->count : 0);
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if (xp) {
|
|
|
|
PCONNECT p;
|
|
|
|
|
|
|
|
xp->count--;
|
|
|
|
|
|
|
|
for (p= user_connect::to_users; p; p= p->next)
|
|
|
|
if (p == xp)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (p && !p->count) {
|
|
|
|
if (p->next)
|
|
|
|
p->next->previous= p->previous;
|
|
|
|
|
|
|
|
if (p->previous)
|
|
|
|
p->previous->next= p->next;
|
|
|
|
else
|
|
|
|
user_connect::to_users= p->next;
|
|
|
|
|
|
|
|
} // endif p
|
|
|
|
|
|
|
|
if (!xp->count) {
|
|
|
|
PlugCleanup(xp->g, true);
|
|
|
|
delete xp;
|
|
|
|
} // endif count
|
|
|
|
|
|
|
|
} // endif xp
|
|
|
|
|
|
|
|
} // end of ha_connect destructor
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Get a pointer to the user of this handler. */
|
|
|
|
/****************************************************************************/
|
2013-04-19 20:35:43 +02:00
|
|
|
static PCONNECT GetUser(THD *thd, PCONNECT xp)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
if (!thd)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (xp && thd == xp->thdp)
|
|
|
|
return xp;
|
|
|
|
|
|
|
|
for (xp= user_connect::to_users; xp; xp= xp->next)
|
|
|
|
if (thd == xp->thdp)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!xp) {
|
2015-05-10 12:14:21 +02:00
|
|
|
xp= new user_connect(thd);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
if (xp->user_init()) {
|
2013-02-07 10:34:27 +01:00
|
|
|
delete xp;
|
|
|
|
xp= NULL;
|
|
|
|
} // endif user_init
|
|
|
|
|
|
|
|
} else
|
|
|
|
xp->count++;
|
|
|
|
|
|
|
|
return xp;
|
|
|
|
} // end of GetUser
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Get the global pointer of the user of this handler. */
|
|
|
|
/****************************************************************************/
|
2013-11-28 01:25:39 +01:00
|
|
|
static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2013-04-19 20:35:43 +02:00
|
|
|
lxp= GetUser(thd, lxp);
|
2013-02-07 10:34:27 +01:00
|
|
|
return (lxp) ? lxp->g : NULL;
|
|
|
|
} // end of GetPlug
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Get the implied table type. */
|
|
|
|
/****************************************************************************/
|
|
|
|
TABTYPE ha_connect::GetRealType(PTOS pos)
|
|
|
|
{
|
2014-04-14 14:26:48 +02:00
|
|
|
TABTYPE type;
|
|
|
|
|
2014-04-22 19:15:08 +02:00
|
|
|
if (pos || (pos= GetTableOptionStruct())) {
|
2014-04-14 14:26:48 +02:00
|
|
|
type= GetTypeID(pos->type);
|
|
|
|
|
|
|
|
if (type == TAB_UNDEF)
|
|
|
|
type= pos->srcdef ? TAB_MYSQL : pos->tabname ? TAB_PRX : TAB_DOS;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
2014-04-14 14:26:48 +02:00
|
|
|
} else
|
|
|
|
type= TAB_UNDEF;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
return type;
|
|
|
|
} // end of GetRealType
|
|
|
|
|
2014-04-22 19:15:08 +02:00
|
|
|
/** @brief
|
|
|
|
The name of the index type that will be used for display.
|
|
|
|
Don't implement this method unless you really have indexes.
|
|
|
|
*/
|
|
|
|
const char *ha_connect::index_type(uint inx)
|
|
|
|
{
|
|
|
|
switch (GetIndexType(GetRealType())) {
|
2014-04-30 10:48:29 +02:00
|
|
|
case 1:
|
|
|
|
if (table_share)
|
|
|
|
return (GetIndexOption(&table_share->key_info[inx], "Dynamic"))
|
|
|
|
? "KINDEX" : "XINDEX";
|
|
|
|
else
|
|
|
|
return "XINDEX";
|
|
|
|
|
2014-04-22 19:15:08 +02:00
|
|
|
case 2: return "REMOTE";
|
2014-10-31 12:28:07 +01:00
|
|
|
case 3: return "VIRTUAL";
|
2014-04-22 19:15:08 +02:00
|
|
|
} // endswitch
|
|
|
|
|
|
|
|
return "Unknown";
|
|
|
|
} // end of index_type
|
|
|
|
|
|
|
|
/** @brief
|
|
|
|
This is a bitmap of flags that indicates how the storage engine
|
|
|
|
implements indexes. The current index flags are documented in
|
|
|
|
handler.h. If you do not implement indexes, just return zero here.
|
|
|
|
|
|
|
|
@details
|
|
|
|
part is the key part to check. First key part is 0.
|
|
|
|
If all_parts is set, MySQL wants to know the flags for the combined
|
|
|
|
index, up to and including 'part'.
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
//ong ha_connect::index_flags(uint inx, uint part, bool all_parts) const
|
|
|
|
ulong ha_connect::index_flags(uint, uint, bool) const
|
2014-04-22 19:15:08 +02:00
|
|
|
{
|
|
|
|
ulong flags= HA_READ_NEXT | HA_READ_RANGE |
|
|
|
|
HA_KEYREAD_ONLY | HA_KEY_SCAN_NOT_ROR;
|
|
|
|
ha_connect *hp= (ha_connect*)this;
|
|
|
|
PTOS pos= hp->GetTableOptionStruct();
|
|
|
|
|
|
|
|
if (pos) {
|
|
|
|
TABTYPE type= hp->GetRealType(pos);
|
|
|
|
|
|
|
|
switch (GetIndexType(type)) {
|
|
|
|
case 1: flags|= (HA_READ_ORDER | HA_READ_PREV); break;
|
|
|
|
case 2: flags|= HA_READ_AFTER_KEY; break;
|
|
|
|
} // endswitch
|
|
|
|
|
|
|
|
} // endif pos
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
} // end of index_flags
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/** @brief
|
|
|
|
This is a list of flags that indicate what functionality the storage
|
|
|
|
engine implements. The current table flags are documented in handler.h
|
|
|
|
*/
|
|
|
|
ulonglong ha_connect::table_flags() const
|
|
|
|
{
|
2014-02-16 18:05:43 +01:00
|
|
|
ulonglong flags= HA_CAN_VIRTUAL_COLUMNS | HA_REC_NOT_IN_SEQ |
|
2014-02-03 16:14:13 +01:00
|
|
|
HA_NO_AUTO_INCREMENT | HA_NO_PREFIX_CHAR_KEYS |
|
2014-04-22 19:15:08 +02:00
|
|
|
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
|
2014-04-08 11:15:08 +02:00
|
|
|
HA_PARTIAL_COLUMN_READ | HA_FILE_BASED |
|
2014-02-03 16:14:13 +01:00
|
|
|
// HA_NULL_IN_KEY | not implemented yet
|
2014-02-16 18:05:43 +01:00
|
|
|
// HA_FAST_KEY_READ | causes error when sorting (???)
|
|
|
|
HA_NO_TRANSACTIONS | HA_DUPLICATE_KEY_NOT_IN_ORDER |
|
2015-03-19 12:21:08 +01:00
|
|
|
HA_NO_BLOBS | HA_MUST_USE_TABLE_CONDITION_PUSHDOWN;
|
2014-02-03 16:14:13 +01:00
|
|
|
ha_connect *hp= (ha_connect*)this;
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS pos= hp->GetTableOptionStruct();
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
if (pos) {
|
|
|
|
TABTYPE type= hp->GetRealType(pos);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (IsFileType(type))
|
|
|
|
flags|= HA_FILE_BASED;
|
|
|
|
|
2014-02-16 18:05:43 +01:00
|
|
|
if (IsExactType(type))
|
|
|
|
flags|= (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
// No data change on ALTER for outward tables
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
if (!IsFileType(type) || hp->FileExists(pos->filename, true))
|
2014-02-03 16:14:13 +01:00
|
|
|
flags|= HA_NO_COPY_ON_ALTER;
|
|
|
|
|
|
|
|
} // endif pos
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
} // end of table_flags
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/****************************************************************************/
|
2014-05-10 12:21:08 +02:00
|
|
|
/* Return the value of an option specified in an option list. */
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
2014-02-03 16:14:13 +01:00
|
|
|
char *GetListOption(PGLOBAL g, const char *opname,
|
|
|
|
const char *oplist, const char *def)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2015-01-31 15:05:43 +01:00
|
|
|
if (!oplist)
|
|
|
|
return (char*)def;
|
|
|
|
|
2013-03-30 22:06:35 +01:00
|
|
|
char key[16], val[256];
|
2013-02-07 10:34:27 +01:00
|
|
|
char *pk, *pv, *pn;
|
2013-03-30 22:06:35 +01:00
|
|
|
char *opval= (char*) def;
|
|
|
|
int n;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-02-15 02:15:48 +01:00
|
|
|
for (pk= (char*)oplist; pk; pk= ++pn) {
|
2013-02-07 10:34:27 +01:00
|
|
|
pn= strchr(pk, ',');
|
|
|
|
pv= strchr(pk, '=');
|
|
|
|
|
|
|
|
if (pv && (!pn || pv < pn)) {
|
|
|
|
n= pv - pk;
|
|
|
|
memcpy(key, pk, n);
|
|
|
|
key[n]= 0;
|
|
|
|
pv++;
|
|
|
|
|
|
|
|
if (pn) {
|
|
|
|
n= pn - pv;
|
|
|
|
memcpy(val, pv, n);
|
|
|
|
val[n]= 0;
|
|
|
|
} else
|
|
|
|
strcpy(val, pv);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
if (pn) {
|
2014-04-21 12:57:10 +02:00
|
|
|
n= MY_MIN(pn - pk, 15);
|
2013-02-07 10:34:27 +01:00
|
|
|
memcpy(key, pk, n);
|
|
|
|
key[n]= 0;
|
|
|
|
} else
|
|
|
|
strcpy(key, pk);
|
|
|
|
|
|
|
|
val[0]= 0;
|
|
|
|
} // endif pv
|
|
|
|
|
|
|
|
if (!stricmp(opname, key)) {
|
2015-03-18 13:30:14 +01:00
|
|
|
opval= PlugDup(g, val);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
} else if (!pn)
|
|
|
|
break;
|
|
|
|
|
|
|
|
} // endfor pk
|
|
|
|
|
|
|
|
return opval;
|
|
|
|
} // end of GetListOption
|
|
|
|
|
2015-05-26 01:02:33 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the value of a string option or NULL if not specified. */
|
|
|
|
/****************************************************************************/
|
|
|
|
char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef)
|
|
|
|
{
|
|
|
|
const char *opval= NULL;
|
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return sdef;
|
|
|
|
else if (!stricmp(opname, "Type"))
|
|
|
|
opval= options->type;
|
|
|
|
else if (!stricmp(opname, "Filename"))
|
|
|
|
opval= options->filename;
|
|
|
|
else if (!stricmp(opname, "Optname"))
|
|
|
|
opval= options->optname;
|
|
|
|
else if (!stricmp(opname, "Tabname"))
|
|
|
|
opval= options->tabname;
|
|
|
|
else if (!stricmp(opname, "Tablist"))
|
|
|
|
opval= options->tablist;
|
|
|
|
else if (!stricmp(opname, "Database") ||
|
|
|
|
!stricmp(opname, "DBname"))
|
|
|
|
opval= options->dbname;
|
|
|
|
else if (!stricmp(opname, "Separator"))
|
|
|
|
opval= options->separator;
|
|
|
|
else if (!stricmp(opname, "Qchar"))
|
|
|
|
opval= options->qchar;
|
|
|
|
else if (!stricmp(opname, "Module"))
|
|
|
|
opval= options->module;
|
|
|
|
else if (!stricmp(opname, "Subtype"))
|
|
|
|
opval= options->subtype;
|
|
|
|
else if (!stricmp(opname, "Catfunc"))
|
|
|
|
opval= options->catfunc;
|
|
|
|
else if (!stricmp(opname, "Srcdef"))
|
|
|
|
opval= options->srcdef;
|
|
|
|
else if (!stricmp(opname, "Colist"))
|
|
|
|
opval= options->colist;
|
|
|
|
else if (!stricmp(opname, "Data_charset"))
|
|
|
|
opval= options->data_charset;
|
|
|
|
|
|
|
|
if (!opval && options && options->oplist)
|
|
|
|
opval= GetListOption(g, opname, options->oplist);
|
|
|
|
|
|
|
|
return opval ? (char*)opval : sdef;
|
|
|
|
} // end of GetStringTableOption
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the value of a Boolean option or bdef if not specified. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef)
|
|
|
|
{
|
|
|
|
bool opval= bdef;
|
|
|
|
char *pv;
|
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return bdef;
|
|
|
|
else if (!stricmp(opname, "Mapped"))
|
|
|
|
opval= options->mapped;
|
|
|
|
else if (!stricmp(opname, "Huge"))
|
|
|
|
opval= options->huge;
|
|
|
|
else if (!stricmp(opname, "Split"))
|
|
|
|
opval= options->split;
|
|
|
|
else if (!stricmp(opname, "Readonly"))
|
|
|
|
opval= options->readonly;
|
|
|
|
else if (!stricmp(opname, "SepIndex"))
|
|
|
|
opval= options->sepindex;
|
|
|
|
else if (!stricmp(opname, "Header"))
|
|
|
|
opval= (options->header != 0); // Is Boolean for some table types
|
|
|
|
else if (options->oplist)
|
|
|
|
if ((pv= GetListOption(g, opname, options->oplist)))
|
|
|
|
opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0);
|
|
|
|
|
|
|
|
return opval;
|
|
|
|
} // end of GetBooleanTableOption
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the value of an integer option or NO_IVAL if not specified. */
|
|
|
|
/****************************************************************************/
|
|
|
|
int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef)
|
|
|
|
{
|
2015-06-02 10:34:51 +02:00
|
|
|
ulonglong opval= NO_IVAL;
|
2015-05-26 01:02:33 +02:00
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return idef;
|
|
|
|
else if (!stricmp(opname, "Lrecl"))
|
|
|
|
opval= options->lrecl;
|
|
|
|
else if (!stricmp(opname, "Elements"))
|
|
|
|
opval= options->elements;
|
|
|
|
else if (!stricmp(opname, "Multiple"))
|
|
|
|
opval= options->multiple;
|
|
|
|
else if (!stricmp(opname, "Header"))
|
|
|
|
opval= options->header;
|
|
|
|
else if (!stricmp(opname, "Quoted"))
|
|
|
|
opval= options->quoted;
|
|
|
|
else if (!stricmp(opname, "Ending"))
|
|
|
|
opval= options->ending;
|
|
|
|
else if (!stricmp(opname, "Compressed"))
|
|
|
|
opval= (options->compressed);
|
|
|
|
|
2015-10-09 12:01:07 +02:00
|
|
|
if ((ulonglong) opval == (ulonglong)NO_IVAL) {
|
2015-05-26 01:02:33 +02:00
|
|
|
char *pv;
|
|
|
|
|
|
|
|
if ((pv= GetListOption(g, opname, options->oplist)))
|
|
|
|
opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true);
|
|
|
|
else
|
|
|
|
return idef;
|
|
|
|
|
|
|
|
} // endif opval
|
|
|
|
|
|
|
|
return (int)opval;
|
|
|
|
} // end of GetIntegerTableOption
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the table option structure. */
|
|
|
|
/****************************************************************************/
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS ha_connect::GetTableOptionStruct(TABLE_SHARE *s)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2014-04-22 19:15:08 +02:00
|
|
|
TABLE_SHARE *tsp= (tshp) ? tshp : (s) ? s : table_share;
|
|
|
|
|
2015-10-20 18:49:33 +02:00
|
|
|
return (tsp && (!tsp->db_plugin ||
|
|
|
|
!stricmp(plugin_name(tsp->db_plugin)->str, "connect") ||
|
|
|
|
!stricmp(plugin_name(tsp->db_plugin)->str, "partition")))
|
|
|
|
? tsp->option_struct : NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of GetTableOptionStruct
|
|
|
|
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the string eventually formatted with partition name. */
|
|
|
|
/****************************************************************************/
|
|
|
|
char *ha_connect::GetRealString(const char *s)
|
|
|
|
{
|
|
|
|
char *sv;
|
|
|
|
|
|
|
|
if (IsPartitioned() && s) {
|
2015-01-30 10:57:00 +01:00
|
|
|
sv= (char*)PlugSubAlloc(xp->g, NULL, 0);
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
sprintf(sv, s, partname);
|
2015-01-30 10:57:00 +01:00
|
|
|
PlugSubAlloc(xp->g, NULL, strlen(sv) + 1);
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
} else
|
|
|
|
sv= (char*)s;
|
|
|
|
|
|
|
|
return sv;
|
|
|
|
} // end of GetRealString
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
2015-05-26 01:02:33 +02:00
|
|
|
/* Return the value of a string option or sdef if not specified. */
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
char *ha_connect::GetStringOption(char *opname, char *sdef)
|
|
|
|
{
|
|
|
|
char *opval= NULL;
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS options= GetTableOptionStruct();
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
if (!stricmp(opname, "Connect")) {
|
2014-10-21 17:29:51 +02:00
|
|
|
LEX_STRING cnc= (tshp) ? tshp->connect_string
|
|
|
|
: table->s->connect_string;
|
2014-07-17 18:13:51 +02:00
|
|
|
|
|
|
|
if (cnc.length)
|
2014-11-08 13:35:03 +01:00
|
|
|
opval= GetRealString(strz(xp->g, cnc));
|
2014-07-17 18:13:51 +02:00
|
|
|
|
|
|
|
} else if (!stricmp(opname, "Query_String"))
|
|
|
|
opval= thd_query_string(table->in_use)->str;
|
|
|
|
else if (!stricmp(opname, "Partname"))
|
|
|
|
opval= partname;
|
2015-01-06 10:18:04 +01:00
|
|
|
else if (!stricmp(opname, "Table_charset")) {
|
|
|
|
const CHARSET_INFO *chif= (tshp) ? tshp->table_charset
|
|
|
|
: table->s->table_charset;
|
|
|
|
|
|
|
|
if (chif)
|
|
|
|
opval= (char*)chif->csname;
|
|
|
|
|
2015-05-26 01:02:33 +02:00
|
|
|
} else
|
|
|
|
opval= GetStringTableOption(xp->g, options, opname, NULL);
|
2015-01-27 12:50:50 +01:00
|
|
|
|
2015-05-26 01:02:33 +02:00
|
|
|
if (opval && (!stricmp(opname, "connect")
|
|
|
|
|| !stricmp(opname, "tabname")
|
|
|
|
|| !stricmp(opname, "filename")))
|
|
|
|
opval = GetRealString(opval);
|
2015-01-27 12:50:50 +01:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if (!opval) {
|
|
|
|
if (sdef && !strcmp(sdef, "*")) {
|
|
|
|
// Return the handler default value
|
2013-02-22 17:26:08 +01:00
|
|
|
if (!stricmp(opname, "Dbname") || !stricmp(opname, "Database"))
|
2013-02-07 10:34:27 +01:00
|
|
|
opval= (char*)GetDBName(NULL); // Current database
|
2013-05-24 00:19:26 +02:00
|
|
|
else if (!stricmp(opname, "Type")) // Default type
|
2014-04-19 11:11:30 +02:00
|
|
|
opval= (!options) ? NULL :
|
2013-06-03 14:43:47 +02:00
|
|
|
(options->srcdef) ? (char*)"MYSQL" :
|
|
|
|
(options->tabname) ? (char*)"PROXY" : (char*)"DOS";
|
2013-04-29 13:50:20 +02:00
|
|
|
else if (!stricmp(opname, "User")) // Connected user
|
2013-05-24 09:31:43 +02:00
|
|
|
opval= (char *) "root";
|
2013-04-29 13:50:20 +02:00
|
|
|
else if (!stricmp(opname, "Host")) // Connected user host
|
2013-05-24 09:31:43 +02:00
|
|
|
opval= (char *) "localhost";
|
2013-02-22 17:26:08 +01:00
|
|
|
else
|
|
|
|
opval= sdef; // Caller default
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
} else
|
|
|
|
opval= sdef; // Caller default
|
|
|
|
|
|
|
|
} // endif !opval
|
|
|
|
|
|
|
|
return opval;
|
|
|
|
} // end of GetStringOption
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the value of a Boolean option or bdef if not specified. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::GetBooleanOption(char *opname, bool bdef)
|
|
|
|
{
|
2015-05-26 01:02:33 +02:00
|
|
|
bool opval;
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS options= GetTableOptionStruct();
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-05-19 19:25:06 +02:00
|
|
|
if (!stricmp(opname, "View"))
|
2014-04-22 19:15:08 +02:00
|
|
|
opval= (tshp) ? tshp->is_view : table_share->is_view;
|
2015-05-26 01:02:33 +02:00
|
|
|
else
|
|
|
|
opval= GetBooleanTableOption(xp->g, options, opname, bdef);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
return opval;
|
|
|
|
} // end of GetBooleanOption
|
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Set the value of the opname option (does not work for oplist options) */
|
|
|
|
/* Currently used only to set the Sepindex value. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::SetBooleanOption(char *opname, bool b)
|
|
|
|
{
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS options= GetTableOptionStruct();
|
2013-04-09 23:14:45 +02:00
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!stricmp(opname, "SepIndex"))
|
|
|
|
options->sepindex= b;
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
} // end of SetBooleanOption
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return the value of an integer option or NO_IVAL if not specified. */
|
|
|
|
/****************************************************************************/
|
|
|
|
int ha_connect::GetIntegerOption(char *opname)
|
|
|
|
{
|
2015-05-26 01:02:33 +02:00
|
|
|
int opval;
|
2014-07-22 15:51:21 +02:00
|
|
|
PTOS options= GetTableOptionStruct();
|
|
|
|
TABLE_SHARE *tsp= (tshp) ? tshp : table_share;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-07-22 15:51:21 +02:00
|
|
|
if (!stricmp(opname, "Avglen"))
|
2015-05-26 01:02:33 +02:00
|
|
|
opval= (int)tsp->avg_row_length;
|
2014-07-22 15:51:21 +02:00
|
|
|
else if (!stricmp(opname, "Estimate"))
|
2015-05-26 01:02:33 +02:00
|
|
|
opval= (int)tsp->max_rows;
|
|
|
|
else
|
|
|
|
opval= GetIntegerTableOption(xp->g, options, opname, NO_IVAL);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-05-26 01:02:33 +02:00
|
|
|
return opval;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of GetIntegerOption
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Set the value of the opname option (does not work for oplist options) */
|
|
|
|
/* Currently used only to set the Lrecl value. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::SetIntegerOption(char *opname, int n)
|
|
|
|
{
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS options= GetTableOptionStruct();
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!stricmp(opname, "Lrecl"))
|
|
|
|
options->lrecl= n;
|
|
|
|
else if (!stricmp(opname, "Elements"))
|
|
|
|
options->elements= n;
|
|
|
|
//else if (!stricmp(opname, "Estimate"))
|
|
|
|
// options->estimate= n;
|
|
|
|
else if (!stricmp(opname, "Multiple"))
|
|
|
|
options->multiple= n;
|
|
|
|
else if (!stricmp(opname, "Header"))
|
|
|
|
options->header= n;
|
|
|
|
else if (!stricmp(opname, "Quoted"))
|
|
|
|
options->quoted= n;
|
|
|
|
else if (!stricmp(opname, "Ending"))
|
|
|
|
options->ending= n;
|
|
|
|
else if (!stricmp(opname, "Compressed"))
|
|
|
|
options->compressed= n;
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
//else if (options->oplist)
|
|
|
|
// SetListOption(opname, options->oplist, n);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
} // end of SetIntegerOption
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Return a field option structure. */
|
|
|
|
/****************************************************************************/
|
|
|
|
PFOS ha_connect::GetFieldOptionStruct(Field *fdp)
|
|
|
|
{
|
|
|
|
return fdp->option_struct;
|
|
|
|
} // end of GetFildOptionStruct
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Returns the column description structure used to make the column. */
|
|
|
|
/****************************************************************************/
|
2013-07-05 13:13:45 +02:00
|
|
|
void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
const char *cp;
|
2014-03-30 22:52:54 +02:00
|
|
|
char *chset, v;
|
2013-02-07 10:34:27 +01:00
|
|
|
ha_field_option_struct *fop;
|
|
|
|
Field* fp;
|
|
|
|
Field* *fldp;
|
|
|
|
|
|
|
|
// Double test to be on the safe side
|
|
|
|
if (!table)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
// Find the column to describe
|
|
|
|
if (field) {
|
|
|
|
fldp= (Field**)field;
|
|
|
|
fldp++;
|
|
|
|
} else
|
|
|
|
fldp= (tshp) ? tshp->field : table->field;
|
|
|
|
|
2013-05-19 19:25:06 +02:00
|
|
|
if (!fldp || !(fp= *fldp))
|
2013-02-07 10:34:27 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
// Get the CONNECT field options structure
|
|
|
|
fop= GetFieldOptionStruct(fp);
|
|
|
|
pcf->Flags= 0;
|
|
|
|
|
|
|
|
// Now get column information
|
2013-08-09 18:02:47 +02:00
|
|
|
pcf->Name= (char*)fp->field_name;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if (fop && fop->special) {
|
2013-08-09 18:02:47 +02:00
|
|
|
pcf->Fieldfmt= (char*)fop->special;
|
|
|
|
pcf->Flags= U_SPECIAL;
|
2013-02-07 10:34:27 +01:00
|
|
|
return fldp;
|
2013-08-09 18:02:47 +02:00
|
|
|
} // endif special
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-12-28 15:46:49 +01:00
|
|
|
pcf->Scale= 0;
|
2014-04-19 11:11:30 +02:00
|
|
|
pcf->Opt= (fop) ? (int)fop->opt : 0;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if ((pcf->Length= fp->field_length) < 0)
|
|
|
|
pcf->Length= 256; // BLOB?
|
|
|
|
|
2013-12-28 15:46:49 +01:00
|
|
|
pcf->Precision= pcf->Length;
|
|
|
|
|
2013-03-07 18:53:41 +01:00
|
|
|
if (fop) {
|
2013-04-29 13:50:20 +02:00
|
|
|
pcf->Offset= (int)fop->offset;
|
2014-04-19 11:11:30 +02:00
|
|
|
pcf->Freq= (int)fop->freq;
|
2013-03-07 18:53:41 +01:00
|
|
|
pcf->Datefmt= (char*)fop->dateformat;
|
|
|
|
pcf->Fieldfmt= (char*)fop->fieldformat;
|
|
|
|
} else {
|
|
|
|
pcf->Offset= -1;
|
2014-04-19 11:11:30 +02:00
|
|
|
pcf->Freq= 0;
|
2013-03-07 18:53:41 +01:00
|
|
|
pcf->Datefmt= NULL;
|
|
|
|
pcf->Fieldfmt= NULL;
|
|
|
|
} // endif fop
|
|
|
|
|
2014-03-30 22:52:54 +02:00
|
|
|
chset = (char *)fp->charset()->name;
|
|
|
|
v = (!strcmp(chset, "binary")) ? 'B' : 0;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
switch (fp->type()) {
|
|
|
|
case MYSQL_TYPE_BLOB:
|
|
|
|
case MYSQL_TYPE_VARCHAR:
|
2013-04-29 13:50:20 +02:00
|
|
|
case MYSQL_TYPE_VAR_STRING:
|
2013-02-07 10:34:27 +01:00
|
|
|
pcf->Flags |= U_VAR;
|
2013-06-29 01:10:31 +02:00
|
|
|
/* no break */
|
2013-08-11 14:21:38 +02:00
|
|
|
default:
|
2014-03-30 22:52:54 +02:00
|
|
|
pcf->Type= MYSQLtoPLG(fp->type(), &v);
|
2013-08-11 14:21:38 +02:00
|
|
|
break;
|
|
|
|
} // endswitch SQL type
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-08-11 14:21:38 +02:00
|
|
|
switch (pcf->Type) {
|
|
|
|
case TYPE_STRING:
|
2013-02-07 10:34:27 +01:00
|
|
|
// Do something for case
|
|
|
|
cp= fp->charset()->name;
|
|
|
|
|
|
|
|
// Find if collation name ends by _ci
|
|
|
|
if (!strcmp(cp + strlen(cp) - 3, "_ci")) {
|
2013-12-28 15:46:49 +01:00
|
|
|
pcf->Scale= 1; // Case insensitive
|
2014-04-19 11:11:30 +02:00
|
|
|
pcf->Opt= 0; // Prevent index opt until it is safe
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endif ci
|
|
|
|
|
|
|
|
break;
|
2013-12-28 15:46:49 +01:00
|
|
|
case TYPE_DOUBLE:
|
2014-04-21 12:57:10 +02:00
|
|
|
pcf->Scale= MY_MAX(MY_MIN(fp->decimals(), ((unsigned)pcf->Length - 2)), 0);
|
2013-12-28 15:46:49 +01:00
|
|
|
break;
|
|
|
|
case TYPE_DECIM:
|
|
|
|
pcf->Precision= ((Field_new_decimal*)fp)->precision;
|
2014-05-30 14:53:15 +02:00
|
|
|
pcf->Length= pcf->Precision;
|
2013-12-28 15:46:49 +01:00
|
|
|
pcf->Scale= fp->decimals();
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
2013-08-11 14:21:38 +02:00
|
|
|
case TYPE_DATE:
|
2013-03-05 19:30:40 +01:00
|
|
|
// Field_length is only used for DATE columns
|
2014-03-18 19:25:50 +01:00
|
|
|
if (fop && fop->fldlen)
|
2013-04-29 13:50:20 +02:00
|
|
|
pcf->Length= (int)fop->fldlen;
|
2014-04-19 11:11:30 +02:00
|
|
|
else {
|
2013-03-07 21:38:00 +01:00
|
|
|
int len;
|
|
|
|
|
|
|
|
if (pcf->Datefmt) {
|
|
|
|
// Find the (max) length produced by the date format
|
|
|
|
char buf[256];
|
2013-04-19 20:35:43 +02:00
|
|
|
PGLOBAL g= GetPlug(table->in_use, xp);
|
2013-03-07 21:38:00 +01:00
|
|
|
PDTP pdtp= MakeDateFormat(g, pcf->Datefmt, false, true, 0);
|
2013-04-21 16:38:08 +02:00
|
|
|
struct tm datm;
|
|
|
|
bzero(&datm, sizeof(datm));
|
|
|
|
datm.tm_mday= 12;
|
|
|
|
datm.tm_mon= 11;
|
|
|
|
datm.tm_year= 112;
|
2014-11-24 18:32:44 +01:00
|
|
|
mktime(&datm); // set other fields get proper day name
|
2013-03-07 21:38:00 +01:00
|
|
|
len= strftime(buf, 256, pdtp->OutFmt, &datm);
|
|
|
|
} else
|
|
|
|
len= 0;
|
2013-03-07 18:53:41 +01:00
|
|
|
|
2013-03-07 21:38:00 +01:00
|
|
|
// 11 is for signed numeric representation of the date
|
|
|
|
pcf->Length= (len) ? len : 11;
|
|
|
|
} // endelse
|
2013-03-05 19:30:40 +01:00
|
|
|
|
2014-10-09 17:23:37 +02:00
|
|
|
// For Value setting
|
|
|
|
pcf->Precision= MY_MAX(pcf->Precision, pcf->Length);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
default:
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch type
|
|
|
|
|
2013-12-03 22:59:40 +01:00
|
|
|
if (fp->flags & UNSIGNED_FLAG)
|
|
|
|
pcf->Flags |= U_UNSIGNED;
|
|
|
|
|
|
|
|
if (fp->flags & ZEROFILL_FLAG)
|
|
|
|
pcf->Flags |= U_ZEROFILL;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
// This is used to skip null bit
|
|
|
|
if (fp->real_maybe_null())
|
|
|
|
pcf->Flags |= U_NULLS;
|
|
|
|
|
|
|
|
// Mark virtual columns as such
|
2015-11-24 22:20:32 +01:00
|
|
|
if (!fp->stored_in_db())
|
2013-02-07 10:34:27 +01:00
|
|
|
pcf->Flags |= U_VIRTUAL;
|
|
|
|
|
|
|
|
pcf->Key= 0; // Not used when called from MySQL
|
2013-07-04 23:13:07 +02:00
|
|
|
|
2013-07-05 13:13:45 +02:00
|
|
|
// Get the comment if any
|
2014-11-08 13:35:03 +01:00
|
|
|
if (fp->comment.str && fp->comment.length)
|
|
|
|
pcf->Remark= strz(g, fp->comment);
|
|
|
|
else
|
2013-07-05 13:13:45 +02:00
|
|
|
pcf->Remark= NULL;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
return fldp;
|
|
|
|
} // end of GetColumnOption
|
|
|
|
|
2014-04-26 00:17:26 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return an index option structure. */
|
|
|
|
/****************************************************************************/
|
|
|
|
PXOS ha_connect::GetIndexOptionStruct(KEY *kp)
|
|
|
|
{
|
|
|
|
return kp->option_struct;
|
|
|
|
} // end of GetIndexOptionStruct
|
|
|
|
|
2014-04-30 10:48:29 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Return a Boolean index option or false if not specified. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::GetIndexOption(KEY *kp, char *opname)
|
|
|
|
{
|
|
|
|
bool opval= false;
|
|
|
|
PXOS options= GetIndexOptionStruct(kp);
|
|
|
|
|
|
|
|
if (options) {
|
|
|
|
if (!stricmp(opname, "Dynamic"))
|
|
|
|
opval= options->dynamic;
|
|
|
|
else if (!stricmp(opname, "Mapped"))
|
|
|
|
opval= options->mapped;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
} else if (kp->comment.str && kp->comment.length) {
|
2014-11-08 13:35:03 +01:00
|
|
|
char *pv, *oplist= strz(xp->g, kp->comment);
|
2014-04-30 10:48:29 +02:00
|
|
|
|
|
|
|
if ((pv= GetListOption(xp->g, opname, oplist)))
|
|
|
|
opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0);
|
|
|
|
|
|
|
|
} // endif comment
|
|
|
|
|
|
|
|
return opval;
|
|
|
|
} // end of GetIndexOption
|
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Returns the index description structure used to make the index. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::IsUnique(uint n)
|
|
|
|
{
|
|
|
|
TABLE_SHARE *s= (table) ? table->s : NULL;
|
|
|
|
KEY kp= s->key_info[n];
|
|
|
|
|
|
|
|
return (kp.flags & 1) != 0;
|
|
|
|
} // end of IsUnique
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Returns the index description structure used to make the index. */
|
|
|
|
/****************************************************************************/
|
2014-02-03 16:14:13 +01:00
|
|
|
PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
char *name, *pn;
|
|
|
|
bool unique;
|
2013-04-09 23:14:45 +02:00
|
|
|
PIXDEF xdp, pxd=NULL, toidx= NULL;
|
|
|
|
PKPDEF kpp, pkp;
|
2013-02-07 10:34:27 +01:00
|
|
|
KEY kp;
|
2014-02-03 16:14:13 +01:00
|
|
|
PGLOBAL& g= xp->g;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (!s)
|
|
|
|
s= table->s;
|
|
|
|
|
|
|
|
for (int n= 0; (unsigned)n < s->keynames.count; n++) {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Getting created index %d info\n", n + 1);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
// Find the index to describe
|
2014-02-03 16:14:13 +01:00
|
|
|
kp= s->key_info[n];
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
// Now get index information
|
2014-02-03 16:14:13 +01:00
|
|
|
pn= (char*)s->keynames.type_names[n];
|
2015-03-18 13:30:14 +01:00
|
|
|
name= PlugDup(g, pn);
|
2013-04-09 23:14:45 +02:00
|
|
|
unique= (kp.flags & 1) != 0;
|
|
|
|
pkp= NULL;
|
|
|
|
|
|
|
|
// Allocate the index description block
|
|
|
|
xdp= new(g) INDEXDEF(name, unique, n);
|
|
|
|
|
|
|
|
// Get the the key parts info
|
2013-07-23 16:29:16 +02:00
|
|
|
for (int k= 0; (unsigned)k < kp.user_defined_key_parts; k++) {
|
2013-04-09 23:14:45 +02:00
|
|
|
pn= (char*)kp.key_part[k].field->field_name;
|
2015-03-18 13:30:14 +01:00
|
|
|
name= PlugDup(g, pn);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
// Allocate the key part description block
|
|
|
|
kpp= new(g) KPARTDEF(name, k + 1);
|
|
|
|
kpp->SetKlen(kp.key_part[k].length);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-03 21:54:02 +02:00
|
|
|
#if 0 // NIY
|
|
|
|
// Index on auto increment column can be an XXROW index
|
2014-04-19 11:11:30 +02:00
|
|
|
if (kp.key_part[k].field->flags & AUTO_INCREMENT_FLAG &&
|
2013-07-23 16:29:16 +02:00
|
|
|
kp.uder_defined_key_parts == 1) {
|
2013-04-03 21:54:02 +02:00
|
|
|
char *type= GetStringOption("Type", "DOS");
|
|
|
|
TABTYPE typ= GetTypeID(type);
|
|
|
|
|
|
|
|
xdp->SetAuto(IsTypeFixed(typ));
|
|
|
|
} // endif AUTO_INCREMENT
|
|
|
|
#endif // 0
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
if (pkp)
|
|
|
|
pkp->SetNext(kpp);
|
|
|
|
else
|
|
|
|
xdp->SetToKeyParts(kpp);
|
|
|
|
|
|
|
|
pkp= kpp;
|
|
|
|
} // endfor k
|
|
|
|
|
2013-07-23 16:29:16 +02:00
|
|
|
xdp->SetNParts(kp.user_defined_key_parts);
|
2014-04-30 10:48:29 +02:00
|
|
|
xdp->Dynamic= GetIndexOption(&kp, "Dynamic");
|
|
|
|
xdp->Mapped= GetIndexOption(&kp, "Mapped");
|
2013-04-09 23:14:45 +02:00
|
|
|
|
|
|
|
if (pxd)
|
|
|
|
pxd->SetNext(xdp);
|
2013-02-07 10:34:27 +01:00
|
|
|
else
|
2013-04-09 23:14:45 +02:00
|
|
|
toidx= xdp;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
pxd= xdp;
|
|
|
|
} // endfor n
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
return toidx;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of GetIndexInfo
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Returns the index description structure used to make the index. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::CheckVirtualIndex(TABLE_SHARE *s)
|
|
|
|
{
|
|
|
|
|
|
|
|
char *rid;
|
|
|
|
KEY kp;
|
|
|
|
Field *fp;
|
|
|
|
PGLOBAL& g= xp->g;
|
|
|
|
|
|
|
|
if (!s)
|
|
|
|
s= table->s;
|
|
|
|
|
|
|
|
for (int n= 0; (unsigned)n < s->keynames.count; n++) {
|
|
|
|
kp= s->key_info[n];
|
|
|
|
|
|
|
|
// Now get index information
|
|
|
|
|
|
|
|
// Get the the key parts info
|
|
|
|
for (int k= 0; (unsigned)k < kp.user_defined_key_parts; k++) {
|
|
|
|
fp= kp.key_part[k].field;
|
|
|
|
rid= (fp->option_struct) ? fp->option_struct->special : NULL;
|
|
|
|
|
|
|
|
if (!rid || (stricmp(rid, "ROWID") && stricmp(rid, "ROWNUM"))) {
|
|
|
|
strcpy(g->Message, "Invalid virtual index");
|
|
|
|
return true;
|
|
|
|
} // endif rowid
|
|
|
|
|
|
|
|
} // endfor k
|
|
|
|
|
|
|
|
} // endfor n
|
|
|
|
|
|
|
|
return false;
|
|
|
|
} // end of CheckVirtualIndex
|
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
bool ha_connect::IsPartitioned(void)
|
|
|
|
{
|
|
|
|
if (tshp)
|
|
|
|
return tshp->partition_info_str_len > 0;
|
|
|
|
else if (table && table->part_info)
|
|
|
|
return true;
|
|
|
|
else
|
|
|
|
return false;
|
2014-07-17 18:13:51 +02:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
} // end of IsPartitioned
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
const char *ha_connect::GetDBName(const char* name)
|
|
|
|
{
|
|
|
|
return (name) ? name : table->s->db.str;
|
|
|
|
} // end of GetDBName
|
|
|
|
|
|
|
|
const char *ha_connect::GetTableName(void)
|
|
|
|
{
|
2014-11-08 13:35:03 +01:00
|
|
|
return tshp ? tshp->table_name.str : table_share->table_name.str;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of GetTableName
|
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
char *ha_connect::GetPartName(void)
|
|
|
|
{
|
|
|
|
return (IsPartitioned()) ? partname : (char*)GetTableName();
|
|
|
|
} // end of GetTableName
|
|
|
|
|
2013-08-09 18:02:47 +02:00
|
|
|
#if 0
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Returns the column real or special name length of a field. */
|
|
|
|
/****************************************************************************/
|
|
|
|
int ha_connect::GetColNameLen(Field *fp)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
PFOS fop= GetFieldOptionStruct(fp);
|
|
|
|
|
|
|
|
// Now get the column name length
|
|
|
|
if (fop && fop->special)
|
|
|
|
n= strlen(fop->special) + 1;
|
|
|
|
else
|
2013-08-09 18:02:47 +02:00
|
|
|
n= strlen(fp->field_name);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
return n;
|
|
|
|
} // end of GetColNameLen
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Returns the column real or special name of a field. */
|
|
|
|
/****************************************************************************/
|
|
|
|
char *ha_connect::GetColName(Field *fp)
|
|
|
|
{
|
|
|
|
PFOS fop= GetFieldOptionStruct(fp);
|
|
|
|
|
|
|
|
return (fop && fop->special) ? fop->special : (char*)fp->field_name;
|
|
|
|
} // end of GetColName
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Adds the column real or special name of a field to a string. */
|
|
|
|
/****************************************************************************/
|
|
|
|
void ha_connect::AddColName(char *cp, Field *fp)
|
|
|
|
{
|
|
|
|
PFOS fop= GetFieldOptionStruct(fp);
|
|
|
|
|
|
|
|
// Now add the column name
|
|
|
|
if (fop && fop->special)
|
|
|
|
// The prefix * mark the column as "special"
|
|
|
|
strcat(strcpy(cp, "*"), strupr(fop->special));
|
|
|
|
else
|
2013-02-25 22:44:42 +01:00
|
|
|
strcpy(cp, (char*)fp->field_name);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
} // end of AddColName
|
2013-08-09 18:02:47 +02:00
|
|
|
#endif // 0
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-08-23 19:17:15 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* This function sets the current database path. */
|
|
|
|
/***********************************************************************/
|
|
|
|
void ha_connect::SetDataPath(PGLOBAL g, const char *path)
|
|
|
|
{
|
|
|
|
datapath= SetPath(g, path);
|
|
|
|
} // end of SetDataPath
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* Get the table description block of a CONNECT table. */
|
|
|
|
/****************************************************************************/
|
|
|
|
PTDB ha_connect::GetTDB(PGLOBAL g)
|
|
|
|
{
|
|
|
|
const char *table_name;
|
|
|
|
PTDB tp;
|
|
|
|
|
|
|
|
// Double test to be on the safe side
|
|
|
|
if (!g || !table)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
table_name= GetTableName();
|
|
|
|
|
2013-05-20 18:17:09 +02:00
|
|
|
if (!xp->CheckQuery(valid_query_id) && tdbp
|
2013-05-20 13:12:34 +02:00
|
|
|
&& !stricmp(tdbp->GetName(), table_name)
|
2014-04-19 17:02:53 +02:00
|
|
|
&& (tdbp->GetMode() == xmod
|
|
|
|
|| (tdbp->GetMode() == MODE_READ && xmod == MODE_READX)
|
2013-09-22 13:40:31 +02:00
|
|
|
|| tdbp->GetAmType() == TYPE_AM_XML)) {
|
2013-02-07 10:34:27 +01:00
|
|
|
tp= tdbp;
|
2014-04-19 17:02:53 +02:00
|
|
|
tp->SetMode(xmod);
|
2013-12-28 15:46:49 +01:00
|
|
|
} else if ((tp= CntGetTDB(g, table_name, xmod, this))) {
|
2013-02-07 10:34:27 +01:00
|
|
|
valid_query_id= xp->last_query_id;
|
2014-07-17 18:13:51 +02:00
|
|
|
// tp->SetMode(xmod);
|
2013-12-28 15:46:49 +01:00
|
|
|
} else
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("GetTDB: %s\n", g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
return tp;
|
|
|
|
} // end of GetTDB
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Open a CONNECT table, restricting column list if cols is true. */
|
|
|
|
/****************************************************************************/
|
2013-11-11 13:00:39 +01:00
|
|
|
int ha_connect::OpenTable(PGLOBAL g, bool del)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
bool rc= false;
|
|
|
|
char *c1= NULL, *c2=NULL;
|
|
|
|
|
|
|
|
// Double test to be on the safe side
|
|
|
|
if (!g || !table) {
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("OpenTable logical error; g=%p table=%p\n", g, table);
|
2013-11-11 13:00:39 +01:00
|
|
|
return HA_ERR_INITIALIZATION;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endif g
|
|
|
|
|
|
|
|
if (!(tdbp= GetTDB(g)))
|
2013-11-11 13:00:39 +01:00
|
|
|
return RC_FX;
|
2013-02-13 00:51:41 +01:00
|
|
|
else if (tdbp->IsReadOnly())
|
|
|
|
switch (xmod) {
|
|
|
|
case MODE_WRITE:
|
|
|
|
case MODE_INSERT:
|
|
|
|
case MODE_UPDATE:
|
|
|
|
case MODE_DELETE:
|
|
|
|
strcpy(g->Message, MSG(READ_ONLY));
|
2013-11-11 13:00:39 +01:00
|
|
|
return HA_ERR_TABLE_READONLY;
|
2013-02-13 00:51:41 +01:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
} // endswitch xmode
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-10-11 13:57:56 +02:00
|
|
|
if (xmod != MODE_INSERT || tdbp->GetAmType() == TYPE_AM_ODBC
|
|
|
|
|| tdbp->GetAmType() == TYPE_AM_MYSQL) {
|
2013-03-19 18:45:05 +01:00
|
|
|
// Get the list of used fields (columns)
|
|
|
|
char *p;
|
|
|
|
unsigned int k1, k2, n1, n2;
|
|
|
|
Field* *field;
|
2013-08-09 18:02:47 +02:00
|
|
|
Field* fp;
|
2013-10-11 13:57:56 +02:00
|
|
|
MY_BITMAP *map= (xmod == MODE_INSERT) ? table->write_set : table->read_set;
|
2013-03-19 18:45:05 +01:00
|
|
|
MY_BITMAP *ump= (xmod == MODE_UPDATE) ? table->write_set : NULL;
|
|
|
|
|
|
|
|
k1= k2= 0;
|
|
|
|
n1= n2= 1; // 1 is space for final null character
|
|
|
|
|
2013-08-09 18:02:47 +02:00
|
|
|
for (field= table->field; fp= *field; field++) {
|
|
|
|
if (bitmap_is_set(map, fp->field_index)) {
|
|
|
|
n1+= (strlen(fp->field_name) + 1);
|
2013-03-19 18:45:05 +01:00
|
|
|
k1++;
|
|
|
|
} // endif
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-08-09 18:02:47 +02:00
|
|
|
if (ump && bitmap_is_set(ump, fp->field_index)) {
|
|
|
|
n2+= (strlen(fp->field_name) + 1);
|
2013-03-19 18:45:05 +01:00
|
|
|
k2++;
|
|
|
|
} // endif
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-03-19 18:45:05 +01:00
|
|
|
} // endfor field
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-03-19 18:45:05 +01:00
|
|
|
if (k1) {
|
|
|
|
p= c1= (char*)PlugSubAlloc(g, NULL, n1);
|
|
|
|
|
2013-08-09 18:02:47 +02:00
|
|
|
for (field= table->field; fp= *field; field++)
|
|
|
|
if (bitmap_is_set(map, fp->field_index)) {
|
|
|
|
strcpy(p, (char*)fp->field_name);
|
2013-03-19 18:45:05 +01:00
|
|
|
p+= (strlen(p) + 1);
|
|
|
|
} // endif used field
|
|
|
|
|
|
|
|
*p= '\0'; // mark end of list
|
|
|
|
} // endif k1
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-03-19 18:45:05 +01:00
|
|
|
if (k2) {
|
|
|
|
p= c2= (char*)PlugSubAlloc(g, NULL, n2);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-08-09 18:02:47 +02:00
|
|
|
for (field= table->field; fp= *field; field++)
|
|
|
|
if (bitmap_is_set(ump, fp->field_index)) {
|
|
|
|
strcpy(p, (char*)fp->field_name);
|
2014-07-17 18:13:51 +02:00
|
|
|
|
|
|
|
if (part_id && bitmap_is_set(part_id, fp->field_index)) {
|
|
|
|
// Trying to update a column used for partitioning
|
|
|
|
// This cannot be currently done because it may require
|
|
|
|
// a row to be moved in another partition.
|
|
|
|
sprintf(g->Message,
|
|
|
|
"Cannot update column %s because it is used for partitioning",
|
|
|
|
p);
|
|
|
|
return HA_ERR_INTERNAL_ERROR;
|
|
|
|
} // endif part_id
|
|
|
|
|
2013-03-19 18:45:05 +01:00
|
|
|
p+= (strlen(p) + 1);
|
|
|
|
} // endif used field
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-03-19 18:45:05 +01:00
|
|
|
*p= '\0'; // mark end of list
|
|
|
|
} // endif k2
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-03-19 18:45:05 +01:00
|
|
|
} // endif xmod
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// Open the table
|
|
|
|
if (!(rc= CntOpenTable(g, tdbp, xmod, c1, c2, del, this))) {
|
|
|
|
istable= true;
|
|
|
|
// strmake(tname, table_name, sizeof(tname)-1);
|
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
// We may be in a create index query
|
|
|
|
if (xmod == MODE_ANY && *tdbp->GetName() != '#') {
|
|
|
|
// The current indexes
|
|
|
|
PIXDEF oldpix= GetIndexInfo();
|
|
|
|
} // endif xmod
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
} else
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("OpenTable: %s\n", g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
tdbp= NULL;
|
|
|
|
valid_info= false;
|
|
|
|
} // endif rc
|
|
|
|
|
2013-11-11 13:00:39 +01:00
|
|
|
return (rc) ? HA_ERR_INITIALIZATION : 0;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of OpenTable
|
|
|
|
|
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
/* CheckColumnList: check that all bitmap columns do exist. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::CheckColumnList(PGLOBAL g)
|
|
|
|
{
|
|
|
|
// Check the list of used fields (columns)
|
|
|
|
int rc;
|
|
|
|
bool brc= false;
|
|
|
|
PCOL colp;
|
|
|
|
Field* *field;
|
|
|
|
Field* fp;
|
|
|
|
MY_BITMAP *map= table->read_set;
|
|
|
|
|
|
|
|
// Save stack and allocation environment and prepare error return
|
|
|
|
if (g->jump_level == MAX_JUMP) {
|
|
|
|
strcpy(g->Message, MSG(TOO_MANY_JUMPS));
|
|
|
|
return true;
|
|
|
|
} // endif jump_level
|
|
|
|
|
|
|
|
if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) {
|
|
|
|
for (field= table->field; fp= *field; field++)
|
|
|
|
if (bitmap_is_set(map, fp->field_index)) {
|
|
|
|
if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) {
|
|
|
|
sprintf(g->Message, "Column %s not found in %s",
|
|
|
|
fp->field_name, tdbp->GetName());
|
|
|
|
brc= true;
|
|
|
|
goto fin;
|
|
|
|
} // endif colp
|
|
|
|
|
|
|
|
if ((brc= colp->InitValue(g)))
|
|
|
|
goto fin;
|
|
|
|
|
|
|
|
colp->AddColUse(U_P); // For PLG tables
|
|
|
|
} // endif
|
|
|
|
|
|
|
|
} else
|
|
|
|
brc= true;
|
|
|
|
|
|
|
|
fin:
|
|
|
|
g->jump_level--;
|
|
|
|
return brc;
|
|
|
|
} // end of CheckColumnList
|
|
|
|
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
/* IsOpened: returns true if the table is already opened. */
|
|
|
|
/****************************************************************************/
|
|
|
|
bool ha_connect::IsOpened(void)
|
|
|
|
{
|
|
|
|
return (!xp->CheckQuery(valid_query_id) && tdbp
|
|
|
|
&& tdbp->GetUse() == USE_OPEN);
|
|
|
|
} // end of IsOpened
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* Close a CONNECT table. */
|
|
|
|
/****************************************************************************/
|
|
|
|
int ha_connect::CloseTable(PGLOBAL g)
|
|
|
|
{
|
2014-07-17 18:13:51 +02:00
|
|
|
int rc= CntCloseTable(g, tdbp, nox, abort);
|
2013-02-07 10:34:27 +01:00
|
|
|
tdbp= NULL;
|
2015-01-09 23:36:50 +01:00
|
|
|
sdvalin1= sdvalin2= sdvalin3= sdvalin4= NULL;
|
2013-03-03 15:37:27 +01:00
|
|
|
sdvalout=NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
valid_info= false;
|
|
|
|
indexing= -1;
|
2014-08-22 17:30:22 +02:00
|
|
|
nox= true;
|
2014-07-17 18:13:51 +02:00
|
|
|
abort= false;
|
2013-02-07 10:34:27 +01:00
|
|
|
return rc;
|
|
|
|
} // end of CloseTable
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************/
|
|
|
|
/* Make a pseudo record from current row values. Specific to MySQL. */
|
|
|
|
/***********************************************************************/
|
|
|
|
int ha_connect::MakeRecord(char *buf)
|
|
|
|
{
|
2013-11-09 17:32:57 +01:00
|
|
|
char *p, *fmt, val[32];
|
|
|
|
int rc= 0;
|
|
|
|
Field* *field;
|
|
|
|
Field *fp;
|
|
|
|
my_bitmap_map *org_bitmap;
|
|
|
|
CHARSET_INFO *charset= tdbp->data_charset();
|
|
|
|
//MY_BITMAP readmap;
|
|
|
|
MY_BITMAP *map;
|
|
|
|
PVAL value;
|
|
|
|
PCOL colp= NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_ENTER("ha_connect::MakeRecord");
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace > 1)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Maps: read=%08X write=%08X vcol=%08X defr=%08X defw=%08X\n",
|
2013-02-07 10:34:27 +01:00
|
|
|
*table->read_set->bitmap, *table->write_set->bitmap,
|
|
|
|
*table->vcol_set->bitmap,
|
|
|
|
*table->def_read_set.bitmap, *table->def_write_set.bitmap);
|
|
|
|
|
|
|
|
// Avoid asserts in field::store() for columns that are not updated
|
|
|
|
org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
|
|
|
|
|
|
|
|
// This is for variable_length rows
|
|
|
|
memset(buf, 0, table->s->null_bytes);
|
|
|
|
|
|
|
|
// When sorting read_set selects all columns, so we use def_read_set
|
2013-11-09 17:32:57 +01:00
|
|
|
map= (MY_BITMAP *)&table->def_read_set;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// Make the pseudo record from field values
|
|
|
|
for (field= table->field; *field && !rc; field++) {
|
|
|
|
fp= *field;
|
|
|
|
|
2015-11-24 22:20:32 +01:00
|
|
|
if (!fp->stored_in_db())
|
2013-02-07 10:34:27 +01:00
|
|
|
continue; // This is a virtual column
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (bitmap_is_set(map, fp->field_index) || alter) {
|
2013-02-07 10:34:27 +01:00
|
|
|
// This is a used field, fill the buffer with value
|
|
|
|
for (colp= tdbp->GetColumns(); colp; colp= colp->GetNext())
|
2014-04-19 11:11:30 +02:00
|
|
|
if ((!mrr || colp->GetKcol()) &&
|
|
|
|
!stricmp(colp->GetName(), (char*)fp->field_name))
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (!colp) {
|
2014-04-19 11:11:30 +02:00
|
|
|
if (mrr)
|
|
|
|
continue;
|
|
|
|
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Column %s not found\n", fp->field_name);
|
2013-02-07 10:34:27 +01:00
|
|
|
dbug_tmp_restore_column_map(table->write_set, org_bitmap);
|
|
|
|
DBUG_RETURN(HA_ERR_WRONG_IN_RECORD);
|
|
|
|
} // endif colp
|
|
|
|
|
|
|
|
value= colp->GetValue();
|
2014-04-08 11:15:08 +02:00
|
|
|
p= NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-04-08 11:15:08 +02:00
|
|
|
// All this was better optimized
|
2013-02-24 01:23:18 +01:00
|
|
|
if (!value->IsNull()) {
|
|
|
|
switch (value->GetType()) {
|
|
|
|
case TYPE_DATE:
|
2013-03-03 15:37:27 +01:00
|
|
|
if (!sdvalout)
|
|
|
|
sdvalout= AllocateValue(xp->g, TYPE_STRING, 20);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2013-02-24 01:23:18 +01:00
|
|
|
switch (fp->type()) {
|
|
|
|
case MYSQL_TYPE_DATE:
|
|
|
|
fmt= "%Y-%m-%d";
|
|
|
|
break;
|
|
|
|
case MYSQL_TYPE_TIME:
|
|
|
|
fmt= "%H:%M:%S";
|
|
|
|
break;
|
2013-11-22 16:03:54 +01:00
|
|
|
case MYSQL_TYPE_YEAR:
|
|
|
|
fmt= "%Y";
|
|
|
|
break;
|
2013-02-24 01:23:18 +01:00
|
|
|
default:
|
|
|
|
fmt= "%Y-%m-%d %H:%M:%S";
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-24 01:23:18 +01:00
|
|
|
} // endswitch type
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2013-02-24 01:23:18 +01:00
|
|
|
// Get date in the format required by MySQL fields
|
2013-03-03 15:37:27 +01:00
|
|
|
value->FormatValue(sdvalout, fmt);
|
|
|
|
p= sdvalout->GetCharValue();
|
2014-04-08 11:15:08 +02:00
|
|
|
rc= fp->store(p, strlen(p), charset, CHECK_FIELD_WARN);
|
|
|
|
break;
|
|
|
|
case TYPE_STRING:
|
|
|
|
case TYPE_DECIM:
|
|
|
|
p= value->GetCharString(val);
|
|
|
|
charset= tdbp->data_charset();
|
|
|
|
rc= fp->store(p, strlen(p), charset, CHECK_FIELD_WARN);
|
2013-02-24 01:23:18 +01:00
|
|
|
break;
|
2013-12-28 15:46:49 +01:00
|
|
|
case TYPE_DOUBLE:
|
2014-04-08 11:15:08 +02:00
|
|
|
rc= fp->store(value->GetFloatValue());
|
2013-02-24 01:23:18 +01:00
|
|
|
break;
|
|
|
|
default:
|
2014-04-08 11:15:08 +02:00
|
|
|
rc= fp->store(value->GetBigintValue(), value->IsUnsigned());
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-24 01:23:18 +01:00
|
|
|
} // endswitch Type
|
|
|
|
|
2014-04-08 11:15:08 +02:00
|
|
|
// Store functions returns 1 on overflow and -1 on fatal error
|
|
|
|
if (rc > 0) {
|
2014-09-05 14:18:31 +02:00
|
|
|
char buf[256];
|
2014-04-08 11:15:08 +02:00
|
|
|
THD *thd= ha_thd();
|
2013-12-03 22:59:40 +01:00
|
|
|
|
2014-09-05 14:18:31 +02:00
|
|
|
sprintf(buf, "Out of range value %.140s for column '%s' at row %ld",
|
2014-04-08 11:15:08 +02:00
|
|
|
value->GetCharString(val),
|
|
|
|
fp->field_name,
|
|
|
|
thd->get_stmt_da()->current_row_for_warning());
|
2013-12-03 22:59:40 +01:00
|
|
|
|
2014-04-08 11:15:08 +02:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf);
|
|
|
|
DBUG_PRINT("MakeRecord", ("%s", buf));
|
|
|
|
rc= 0;
|
|
|
|
} else if (rc < 0)
|
|
|
|
rc= HA_ERR_WRONG_IN_RECORD;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-02-24 01:23:18 +01:00
|
|
|
fp->set_notnull();
|
2013-02-07 10:34:27 +01:00
|
|
|
} else
|
2013-02-24 01:23:18 +01:00
|
|
|
fp->set_null();
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
} // endif bitmap
|
|
|
|
|
|
|
|
} // endfor field
|
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
// This is sometimes required for partition tables because the buf
|
|
|
|
// can be different from the table->record[0] buffer
|
|
|
|
if (buf != (char*)table->record[0])
|
|
|
|
memcpy(buf, table->record[0], table->s->stored_rec_length);
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
// This is copied from ha_tina and is necessary to avoid asserts
|
|
|
|
dbug_tmp_restore_column_map(table->write_set, org_bitmap);
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of MakeRecord
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************/
|
|
|
|
/* Set row values from a MySQL pseudo record. Specific to MySQL. */
|
|
|
|
/***********************************************************************/
|
2015-05-10 12:14:21 +02:00
|
|
|
int ha_connect::ScanRecord(PGLOBAL g, uchar *)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
char attr_buffer[1024];
|
2013-02-18 16:21:52 +01:00
|
|
|
char data_buffer[1024];
|
2013-03-05 19:30:40 +01:00
|
|
|
char *fmt;
|
2013-02-07 10:34:27 +01:00
|
|
|
int rc= 0;
|
|
|
|
PCOL colp;
|
2015-01-09 23:36:50 +01:00
|
|
|
PVAL value, sdvalin;
|
2013-02-07 10:34:27 +01:00
|
|
|
Field *fp;
|
|
|
|
PTDBASE tp= (PTDBASE)tdbp;
|
|
|
|
String attribute(attr_buffer, sizeof(attr_buffer),
|
|
|
|
table->s->table_charset);
|
|
|
|
my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set);
|
2013-02-18 16:21:52 +01:00
|
|
|
const CHARSET_INFO *charset= tdbp->data_charset();
|
|
|
|
String data_charset_value(data_buffer, sizeof(data_buffer), charset);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// Scan the pseudo record for field values and set column values
|
|
|
|
for (Field **field=table->field ; *field ; field++) {
|
|
|
|
fp= *field;
|
|
|
|
|
2015-11-24 22:20:32 +01:00
|
|
|
if (!fp->stored_in_db() || fp->option_struct->special)
|
2013-02-07 10:34:27 +01:00
|
|
|
continue; // Is a virtual column possible here ???
|
|
|
|
|
2013-10-11 13:57:56 +02:00
|
|
|
if ((xmod == MODE_INSERT && tdbp->GetAmType() != TYPE_AM_MYSQL
|
|
|
|
&& tdbp->GetAmType() != TYPE_AM_ODBC) ||
|
2013-03-19 18:45:05 +01:00
|
|
|
bitmap_is_set(table->write_set, fp->field_index)) {
|
2013-02-07 10:34:27 +01:00
|
|
|
for (colp= tp->GetSetCols(); colp; colp= colp->GetNext())
|
|
|
|
if (!stricmp(colp->GetName(), fp->field_name))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!colp) {
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Column %s not found\n", fp->field_name);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_WRONG_IN_RECORD;
|
|
|
|
goto err;
|
|
|
|
} else
|
|
|
|
value= colp->GetValue();
|
|
|
|
|
|
|
|
// This is a used field, fill the value from the row buffer
|
|
|
|
// All this could be better optimized
|
2013-02-24 01:23:18 +01:00
|
|
|
if (fp->is_null()) {
|
|
|
|
if (colp->IsNullable())
|
|
|
|
value->SetNull(true);
|
|
|
|
|
|
|
|
value->Reset();
|
|
|
|
} else switch (value->GetType()) {
|
2013-12-28 15:46:49 +01:00
|
|
|
case TYPE_DOUBLE:
|
2013-02-07 10:34:27 +01:00
|
|
|
value->SetValue(fp->val_real());
|
|
|
|
break;
|
|
|
|
case TYPE_DATE:
|
2013-11-22 16:03:54 +01:00
|
|
|
// Get date in the format produced by MySQL fields
|
|
|
|
switch (fp->type()) {
|
|
|
|
case MYSQL_TYPE_DATE:
|
2015-01-09 23:36:50 +01:00
|
|
|
if (!sdvalin2) {
|
|
|
|
sdvalin2= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
|
|
|
|
fmt= "YYYY-MM-DD";
|
|
|
|
((DTVAL*)sdvalin2)->SetFormat(g, fmt, strlen(fmt));
|
|
|
|
} // endif sdvalin1
|
|
|
|
|
|
|
|
sdvalin= sdvalin2;
|
2013-11-22 16:03:54 +01:00
|
|
|
break;
|
|
|
|
case MYSQL_TYPE_TIME:
|
2015-01-09 23:36:50 +01:00
|
|
|
if (!sdvalin3) {
|
|
|
|
sdvalin3= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
|
|
|
|
fmt= "hh:mm:ss";
|
|
|
|
((DTVAL*)sdvalin3)->SetFormat(g, fmt, strlen(fmt));
|
|
|
|
} // endif sdvalin1
|
|
|
|
|
|
|
|
sdvalin= sdvalin3;
|
2013-11-22 16:03:54 +01:00
|
|
|
break;
|
|
|
|
case MYSQL_TYPE_YEAR:
|
2015-01-09 23:36:50 +01:00
|
|
|
if (!sdvalin4) {
|
|
|
|
sdvalin4= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
|
|
|
|
fmt= "YYYY";
|
|
|
|
((DTVAL*)sdvalin4)->SetFormat(g, fmt, strlen(fmt));
|
|
|
|
} // endif sdvalin1
|
|
|
|
|
|
|
|
sdvalin= sdvalin4;
|
2013-11-22 16:03:54 +01:00
|
|
|
break;
|
|
|
|
default:
|
2015-01-09 23:36:50 +01:00
|
|
|
if (!sdvalin1) {
|
|
|
|
sdvalin1= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
|
|
|
|
fmt= "YYYY-MM-DD hh:mm:ss";
|
|
|
|
((DTVAL*)sdvalin1)->SetFormat(g, fmt, strlen(fmt));
|
|
|
|
} // endif sdvalin1
|
|
|
|
|
|
|
|
sdvalin= sdvalin1;
|
2013-11-22 16:03:54 +01:00
|
|
|
} // endswitch type
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-12 12:05:05 +02:00
|
|
|
sdvalin->SetNullable(colp->IsNullable());
|
2013-02-07 10:34:27 +01:00
|
|
|
fp->val_str(&attribute);
|
2013-03-03 15:37:27 +01:00
|
|
|
sdvalin->SetValue_psz(attribute.c_ptr_safe());
|
|
|
|
value->SetValue_pval(sdvalin);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fp->val_str(&attribute);
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
if (charset != &my_charset_bin) {
|
2013-02-18 16:21:52 +01:00
|
|
|
// Convert from SQL field charset to DATA_CHARSET
|
|
|
|
uint cnv_errors;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
2013-02-18 16:21:52 +01:00
|
|
|
data_charset_value.copy(attribute.ptr(), attribute.length(),
|
|
|
|
attribute.charset(), charset, &cnv_errors);
|
2013-02-20 13:05:53 +01:00
|
|
|
value->SetValue_psz(data_charset_value.c_ptr_safe());
|
2014-02-03 16:14:13 +01:00
|
|
|
} else
|
|
|
|
value->SetValue_psz(attribute.c_ptr_safe());
|
|
|
|
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch Type
|
|
|
|
|
|
|
|
#ifdef NEWCHANGE
|
|
|
|
} else if (xmod == MODE_UPDATE) {
|
|
|
|
PCOL cp;
|
|
|
|
|
|
|
|
for (cp= tp->GetColumns(); cp; cp= cp->GetNext())
|
|
|
|
if (!stricmp(colp->GetName(), cp->GetName()))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!cp) {
|
|
|
|
rc= HA_ERR_WRONG_IN_RECORD;
|
|
|
|
goto err;
|
|
|
|
} // endif cp
|
|
|
|
|
|
|
|
value->SetValue_pval(cp->GetValue());
|
|
|
|
} else // mode Insert
|
|
|
|
value->Reset();
|
|
|
|
#else
|
|
|
|
} // endif bitmap_is_set
|
|
|
|
#endif
|
|
|
|
|
|
|
|
} // endfor field
|
|
|
|
|
|
|
|
err:
|
|
|
|
dbug_tmp_restore_column_map(table->read_set, bmap);
|
|
|
|
return rc;
|
|
|
|
} // end of ScanRecord
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************/
|
|
|
|
/* Check change in index column. Specific to MySQL. */
|
|
|
|
/* Should be elaborated to check for real changes. */
|
|
|
|
/***********************************************************************/
|
2015-05-10 12:14:21 +02:00
|
|
|
int ha_connect::CheckRecord(PGLOBAL g, const uchar *, uchar *newbuf)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2015-07-16 11:05:20 +02:00
|
|
|
return ScanRecord(g, newbuf);
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of dummy CheckRecord
|
|
|
|
|
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
/***********************************************************************/
|
2015-07-16 11:05:20 +02:00
|
|
|
/* Return true if this field is used in current indexing. */
|
2014-04-19 17:02:53 +02:00
|
|
|
/***********************************************************************/
|
2015-07-16 11:05:20 +02:00
|
|
|
bool ha_connect::IsIndexed(Field *fp)
|
2014-04-19 17:02:53 +02:00
|
|
|
{
|
2015-07-16 11:05:20 +02:00
|
|
|
if (active_index < MAX_KEY) {
|
|
|
|
KEY_PART_INFO *kpart;
|
|
|
|
KEY *kfp= &table->key_info[active_index];
|
|
|
|
uint rem= kfp->user_defined_key_parts;
|
2014-04-19 17:02:53 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
for (kpart= kfp->key_part; rem; rem--, kpart++)
|
|
|
|
if (kpart->field == fp)
|
|
|
|
return true;
|
2014-04-19 17:02:53 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
} // endif active_index
|
2014-04-19 17:02:53 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
return false;
|
|
|
|
} // end of IsIndexed
|
2014-04-19 17:02:53 +02:00
|
|
|
|
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Return the where clause for remote indexed read. */
|
|
|
|
/***********************************************************************/
|
|
|
|
bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
|
|
|
|
const key_range *kr)
|
|
|
|
{
|
|
|
|
const uchar *ptr;
|
2015-10-25 21:11:04 +01:00
|
|
|
//uint i, rem, len, klen, stlen;
|
|
|
|
uint i, rem, len, stlen;
|
2015-07-16 11:05:20 +02:00
|
|
|
bool nq, both, oom= false;
|
|
|
|
OPVAL op;
|
|
|
|
Field *fp;
|
|
|
|
const key_range *ranges[2];
|
|
|
|
my_bitmap_map *old_map;
|
|
|
|
KEY *kfp;
|
|
|
|
KEY_PART_INFO *kpart;
|
2014-04-19 17:02:53 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
if (active_index == MAX_KEY)
|
|
|
|
return false;
|
2014-04-19 17:02:53 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
ranges[0]= kr;
|
|
|
|
ranges[1]= (end_range && !eq_range) ? &save_end_range : NULL;
|
|
|
|
|
|
|
|
if (!ranges[0] && !ranges[1]) {
|
|
|
|
strcpy(g->Message, "MakeKeyWhere: No key");
|
|
|
|
return true;
|
|
|
|
} else
|
|
|
|
both= ranges[0] && ranges[1];
|
|
|
|
|
|
|
|
kfp= &table->key_info[active_index];
|
|
|
|
old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
|
|
|
|
|
|
|
for (i = 0; i <= 1; i++) {
|
|
|
|
if (ranges[i] == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (both && i > 0)
|
|
|
|
oom|= qry->Append(") AND (");
|
|
|
|
else
|
|
|
|
oom|= qry->Append(" WHERE (");
|
|
|
|
|
2015-10-25 21:11:04 +01:00
|
|
|
// klen= len= ranges[i]->length;
|
|
|
|
len= ranges[i]->length;
|
2015-07-16 11:05:20 +02:00
|
|
|
rem= kfp->user_defined_key_parts;
|
|
|
|
ptr= ranges[i]->key;
|
|
|
|
|
|
|
|
for (kpart= kfp->key_part; rem; rem--, kpart++) {
|
|
|
|
fp= kpart->field;
|
|
|
|
stlen= kpart->store_length;
|
|
|
|
nq= fp->str_needs_quotes();
|
|
|
|
|
|
|
|
if (kpart != kfp->key_part)
|
|
|
|
oom|= qry->Append(" AND ");
|
|
|
|
|
|
|
|
if (q) {
|
|
|
|
oom|= qry->Append(q);
|
|
|
|
oom|= qry->Append((PSZ)fp->field_name);
|
|
|
|
oom|= qry->Append(q);
|
|
|
|
} else
|
|
|
|
oom|= qry->Append((PSZ)fp->field_name);
|
|
|
|
|
|
|
|
switch (ranges[i]->flag) {
|
|
|
|
case HA_READ_KEY_EXACT:
|
|
|
|
// op= (stlen >= len || !nq || fp->result_type() != STRING_RESULT)
|
|
|
|
// ? OP_EQ : OP_LIKE;
|
|
|
|
op= OP_EQ;
|
|
|
|
break;
|
|
|
|
case HA_READ_AFTER_KEY:
|
|
|
|
op= (stlen >= len) ? (!i ? OP_GT : OP_LE) : OP_GE;
|
|
|
|
break;
|
|
|
|
case HA_READ_KEY_OR_NEXT:
|
|
|
|
op= OP_GE;
|
|
|
|
break;
|
|
|
|
case HA_READ_BEFORE_KEY:
|
|
|
|
op= (stlen >= len) ? OP_LT : OP_LE;
|
|
|
|
break;
|
|
|
|
case HA_READ_KEY_OR_PREV:
|
|
|
|
op= OP_LE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sprintf(g->Message, "cannot handle flag %d", ranges[i]->flag);
|
|
|
|
goto err;
|
|
|
|
} // endswitch flag
|
|
|
|
|
|
|
|
oom|= qry->Append((PSZ)GetValStr(op, false));
|
|
|
|
|
|
|
|
if (nq)
|
|
|
|
oom|= qry->Append('\'');
|
|
|
|
|
|
|
|
if (kpart->key_part_flag & HA_VAR_LENGTH_PART) {
|
|
|
|
String varchar;
|
|
|
|
uint var_length= uint2korr(ptr);
|
|
|
|
|
|
|
|
varchar.set_quick((char*)ptr + HA_KEY_BLOB_LENGTH,
|
|
|
|
var_length, &my_charset_bin);
|
|
|
|
oom|= qry->Append(varchar.ptr(), varchar.length(), nq);
|
|
|
|
} else {
|
|
|
|
char strbuff[MAX_FIELD_WIDTH];
|
|
|
|
String str(strbuff, sizeof(strbuff), kpart->field->charset()), *res;
|
|
|
|
|
|
|
|
res= fp->val_str(&str, ptr);
|
|
|
|
oom|= qry->Append(res->ptr(), res->length(), nq);
|
|
|
|
} // endif flag
|
|
|
|
|
|
|
|
if (nq)
|
|
|
|
oom |= qry->Append('\'');
|
|
|
|
|
|
|
|
if (stlen >= len)
|
|
|
|
break;
|
|
|
|
|
|
|
|
len-= stlen;
|
|
|
|
|
|
|
|
/* For nullable columns, null-byte is already skipped before, that is
|
|
|
|
ptr was incremented by 1. Since store_length still counts null-byte,
|
|
|
|
we need to subtract 1 from store_length. */
|
|
|
|
ptr+= stlen - MY_TEST(kpart->null_bit);
|
|
|
|
} // endfor kpart
|
|
|
|
|
|
|
|
} // endfor i
|
2014-04-19 17:02:53 +02:00
|
|
|
|
2015-04-04 19:29:34 +02:00
|
|
|
if ((oom|= qry->Append(")")))
|
|
|
|
strcpy(g->Message, "Out of memory");
|
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
dbug_tmp_restore_column_map(table->write_set, old_map);
|
|
|
|
return oom;
|
|
|
|
|
|
|
|
err:
|
|
|
|
dbug_tmp_restore_column_map(table->write_set, old_map);
|
|
|
|
return true;
|
2014-04-19 17:02:53 +02:00
|
|
|
} // end of MakeKeyWhere
|
|
|
|
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Return the string representing an operator. */
|
|
|
|
/***********************************************************************/
|
2013-02-07 13:37:44 +01:00
|
|
|
const char *ha_connect::GetValStr(OPVAL vop, bool neg)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2013-02-07 13:37:44 +01:00
|
|
|
const char *val;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
switch (vop) {
|
|
|
|
case OP_EQ:
|
|
|
|
val= " = ";
|
|
|
|
break;
|
|
|
|
case OP_NE:
|
|
|
|
val= " <> ";
|
|
|
|
break;
|
|
|
|
case OP_GT:
|
|
|
|
val= " > ";
|
|
|
|
break;
|
|
|
|
case OP_GE:
|
|
|
|
val= " >= ";
|
|
|
|
break;
|
|
|
|
case OP_LT:
|
|
|
|
val= " < ";
|
|
|
|
break;
|
|
|
|
case OP_LE:
|
|
|
|
val= " <= ";
|
|
|
|
break;
|
|
|
|
case OP_IN:
|
|
|
|
val= (neg) ? " NOT IN (" : " IN (";
|
|
|
|
break;
|
|
|
|
case OP_NULL:
|
2014-03-10 18:29:04 +01:00
|
|
|
val= (neg) ? " IS NOT NULL" : " IS NULL";
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
case OP_LIKE:
|
|
|
|
val= " LIKE ";
|
|
|
|
break;
|
|
|
|
case OP_XX:
|
2014-03-10 18:29:04 +01:00
|
|
|
val= (neg) ? " NOT BETWEEN " : " BETWEEN ";
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
case OP_EXIST:
|
2014-03-10 18:29:04 +01:00
|
|
|
val= (neg) ? " NOT EXISTS " : " EXISTS ";
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
case OP_AND:
|
|
|
|
val= " AND ";
|
|
|
|
break;
|
|
|
|
case OP_OR:
|
|
|
|
val= " OR ";
|
|
|
|
break;
|
|
|
|
case OP_NOT:
|
|
|
|
val= " NOT ";
|
|
|
|
break;
|
|
|
|
case OP_CNC:
|
|
|
|
val= " || ";
|
|
|
|
break;
|
|
|
|
case OP_ADD:
|
|
|
|
val= " + ";
|
|
|
|
break;
|
|
|
|
case OP_SUB:
|
|
|
|
val= " - ";
|
|
|
|
break;
|
|
|
|
case OP_MULT:
|
|
|
|
val= " * ";
|
|
|
|
break;
|
|
|
|
case OP_DIV:
|
|
|
|
val= " / ";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
val= " ? ";
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} /* endswitch */
|
|
|
|
|
|
|
|
return val;
|
|
|
|
} // end of GetValStr
|
|
|
|
|
2014-04-30 10:48:29 +02:00
|
|
|
#if 0
|
2014-04-26 00:17:26 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Check the WHERE condition and return a CONNECT filter. */
|
|
|
|
/***********************************************************************/
|
|
|
|
PFIL ha_connect::CheckFilter(PGLOBAL g)
|
|
|
|
{
|
|
|
|
return CondFilter(g, (Item *)pushed_cond);
|
|
|
|
} // end of CheckFilter
|
2014-04-30 10:48:29 +02:00
|
|
|
#endif // 0
|
2014-04-26 00:17:26 +02:00
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* Check the WHERE condition and return a CONNECT filter. */
|
|
|
|
/***********************************************************************/
|
|
|
|
PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
bool ismul= false;
|
|
|
|
OPVAL vop= OP_XX;
|
|
|
|
PFIL filp= NULL;
|
|
|
|
|
|
|
|
if (!cond)
|
|
|
|
return NULL;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Cond type=%d\n", cond->type());
|
|
|
|
|
|
|
|
if (cond->type() == COND::COND_ITEM) {
|
|
|
|
PFIL fp;
|
|
|
|
Item_cond *cond_item= (Item_cond *)cond;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(),
|
|
|
|
cond_item->func_name());
|
|
|
|
|
|
|
|
switch (cond_item->functype()) {
|
|
|
|
case Item_func::COND_AND_FUNC: vop= OP_AND; break;
|
|
|
|
case Item_func::COND_OR_FUNC: vop= OP_OR; break;
|
|
|
|
default: return NULL;
|
|
|
|
} // endswitch functype
|
|
|
|
|
|
|
|
List<Item>* arglist= cond_item->argument_list();
|
|
|
|
List_iterator<Item> li(*arglist);
|
|
|
|
Item *subitem;
|
|
|
|
|
|
|
|
for (i= 0; i < arglist->elements; i++)
|
|
|
|
if ((subitem= li++)) {
|
|
|
|
if (!(fp= CondFilter(g, subitem))) {
|
|
|
|
if (vop == OP_OR)
|
|
|
|
return NULL;
|
|
|
|
} else
|
|
|
|
filp= (filp) ? MakeFilter(g, filp, vop, fp) : fp;
|
|
|
|
|
|
|
|
} else
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
} else if (cond->type() == COND::FUNC_ITEM) {
|
|
|
|
unsigned int i;
|
|
|
|
bool iscol, neg= FALSE;
|
|
|
|
PCOL colp[2]= {NULL,NULL};
|
|
|
|
PPARM pfirst= NULL, pprec= NULL;
|
|
|
|
POPER pop;
|
|
|
|
Item_func *condf= (Item_func *)cond;
|
|
|
|
Item* *args= condf->arguments();
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Func type=%d argnum=%d\n", condf->functype(),
|
2014-10-24 19:22:05 +02:00
|
|
|
condf->argument_count());
|
2014-04-19 11:11:30 +02:00
|
|
|
|
|
|
|
switch (condf->functype()) {
|
|
|
|
case Item_func::EQUAL_FUNC:
|
|
|
|
case Item_func::EQ_FUNC: vop= OP_EQ; break;
|
|
|
|
case Item_func::NE_FUNC: vop= OP_NE; break;
|
|
|
|
case Item_func::LT_FUNC: vop= OP_LT; break;
|
|
|
|
case Item_func::LE_FUNC: vop= OP_LE; break;
|
|
|
|
case Item_func::GE_FUNC: vop= OP_GE; break;
|
|
|
|
case Item_func::GT_FUNC: vop= OP_GT; break;
|
|
|
|
case Item_func::IN_FUNC: vop= OP_IN;
|
|
|
|
case Item_func::BETWEEN:
|
|
|
|
ismul= true;
|
|
|
|
neg= ((Item_func_opt_neg *)condf)->negated;
|
|
|
|
break;
|
|
|
|
default: return NULL;
|
|
|
|
} // endswitch functype
|
|
|
|
|
|
|
|
pop= (POPER)PlugSubAlloc(g, NULL, sizeof(OPER));
|
|
|
|
pop->Name= NULL;
|
|
|
|
pop->Val=vop;
|
|
|
|
pop->Mod= 0;
|
|
|
|
|
|
|
|
if (condf->argument_count() < 2)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i= 0; i < condf->argument_count(); i++) {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Argtype(%d)=%d\n", i, args[i]->type());
|
|
|
|
|
|
|
|
if (i >= 2 && !ismul) {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Unexpected arg for vop=%d\n", vop);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
} // endif i
|
|
|
|
|
|
|
|
if ((iscol= args[i]->type() == COND::FIELD_ITEM)) {
|
|
|
|
Item_field *pField= (Item_field *)args[i];
|
|
|
|
|
|
|
|
// IN and BETWEEN clauses should be col VOP list
|
|
|
|
if (i && ismul)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (pField->field->table != table ||
|
|
|
|
!(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name, 0)))
|
|
|
|
return NULL; // Column does not belong to this table
|
|
|
|
|
2015-10-18 15:03:45 +02:00
|
|
|
// These types are not yet implemented (buggy)
|
|
|
|
switch (pField->field->type()) {
|
|
|
|
case MYSQL_TYPE_TIMESTAMP:
|
|
|
|
case MYSQL_TYPE_DATE:
|
|
|
|
case MYSQL_TYPE_TIME:
|
|
|
|
case MYSQL_TYPE_DATETIME:
|
|
|
|
case MYSQL_TYPE_YEAR:
|
|
|
|
case MYSQL_TYPE_NEWDATE:
|
|
|
|
return NULL;
|
2015-10-25 21:11:04 +01:00
|
|
|
default:
|
|
|
|
break;
|
2015-10-18 15:03:45 +02:00
|
|
|
} // endswitch type
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace) {
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Field index=%d\n", pField->field->field_index);
|
|
|
|
htrc("Field name=%s\n", pField->field->field_name);
|
2014-10-21 17:29:51 +02:00
|
|
|
} // endif trace
|
2014-04-19 11:11:30 +02:00
|
|
|
|
|
|
|
} else {
|
|
|
|
char buff[256];
|
|
|
|
String *res, tmp(buff, sizeof(buff), &my_charset_bin);
|
|
|
|
Item_basic_constant *pval= (Item_basic_constant *)args[i];
|
|
|
|
PPARM pp= (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM));
|
|
|
|
|
|
|
|
// IN and BETWEEN clauses should be col VOP list
|
|
|
|
if (!i && (ismul))
|
|
|
|
return NULL;
|
|
|
|
|
2015-09-16 12:11:28 +02:00
|
|
|
switch (args[i]->real_type()) {
|
2014-04-19 11:11:30 +02:00
|
|
|
case COND::STRING_ITEM:
|
2015-09-16 12:11:28 +02:00
|
|
|
res= pval->val_str(&tmp);
|
|
|
|
pp->Value= PlugSubAllocStr(g, NULL, res->ptr(), res->length());
|
2014-10-21 17:29:51 +02:00
|
|
|
pp->Type= (pp->Value) ? TYPE_STRING : TYPE_ERROR;
|
2014-04-19 11:11:30 +02:00
|
|
|
break;
|
|
|
|
case COND::INT_ITEM:
|
|
|
|
pp->Type= TYPE_INT;
|
|
|
|
pp->Value= PlugSubAlloc(g, NULL, sizeof(int));
|
|
|
|
*((int*)pp->Value)= (int)pval->val_int();
|
|
|
|
break;
|
|
|
|
case COND::DATE_ITEM:
|
|
|
|
pp->Type= TYPE_DATE;
|
|
|
|
pp->Value= PlugSubAlloc(g, NULL, sizeof(int));
|
|
|
|
*((int*)pp->Value)= (int)pval->val_int_from_date();
|
|
|
|
break;
|
|
|
|
case COND::REAL_ITEM:
|
|
|
|
pp->Type= TYPE_DOUBLE;
|
|
|
|
pp->Value= PlugSubAlloc(g, NULL, sizeof(double));
|
|
|
|
*((double*)pp->Value)= pval->val_real();
|
|
|
|
break;
|
|
|
|
case COND::DECIMAL_ITEM:
|
|
|
|
pp->Type= TYPE_DOUBLE;
|
|
|
|
pp->Value= PlugSubAlloc(g, NULL, sizeof(double));
|
|
|
|
*((double*)pp->Value)= pval->val_real_from_decimal();
|
|
|
|
break;
|
|
|
|
case COND::CACHE_ITEM: // Possible ???
|
|
|
|
case COND::NULL_ITEM: // TODO: handle this
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
} // endswitch type
|
|
|
|
|
2015-09-09 01:26:00 +02:00
|
|
|
if (trace)
|
2015-09-16 12:11:28 +02:00
|
|
|
htrc("Value type=%hd\n", pp->Type);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
|
|
|
// Append the value to the argument list
|
|
|
|
if (pprec)
|
|
|
|
pprec->Next= pp;
|
|
|
|
else
|
|
|
|
pfirst= pp;
|
|
|
|
|
|
|
|
pp->Domain= i;
|
|
|
|
pp->Next= NULL;
|
|
|
|
pprec= pp;
|
|
|
|
} // endif type
|
|
|
|
|
|
|
|
} // endfor i
|
|
|
|
|
|
|
|
filp= MakeFilter(g, colp, pop, pfirst, neg);
|
|
|
|
} else {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("Unsupported condition\n");
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
} // endif's type
|
|
|
|
|
|
|
|
return filp;
|
|
|
|
} // end of CondFilter
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/***********************************************************************/
|
2014-03-10 18:29:04 +01:00
|
|
|
/* Check the WHERE condition and return a MYSQL/ODBC/WQL filter. */
|
2013-02-07 10:34:27 +01:00
|
|
|
/***********************************************************************/
|
2015-07-16 11:05:20 +02:00
|
|
|
PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2015-07-16 11:05:20 +02:00
|
|
|
AMT tty = filp->Type;
|
2013-11-06 18:22:09 +01:00
|
|
|
char *body= filp->Body;
|
2013-02-07 10:34:27 +01:00
|
|
|
unsigned int i;
|
2013-11-06 18:22:09 +01:00
|
|
|
bool ismul= false, x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC);
|
2015-02-08 18:17:29 +01:00
|
|
|
bool nonul= (tty == TYPE_AM_ODBC && (tdbp->GetMode() == MODE_INSERT ||
|
|
|
|
tdbp->GetMode() == MODE_DELETE));
|
2013-02-07 10:34:27 +01:00
|
|
|
OPVAL vop= OP_XX;
|
|
|
|
|
|
|
|
if (!cond)
|
|
|
|
return NULL;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Cond type=%d\n", cond->type());
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (cond->type() == COND::COND_ITEM) {
|
|
|
|
char *p1, *p2;
|
|
|
|
Item_cond *cond_item= (Item_cond *)cond;
|
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
if (x)
|
|
|
|
return NULL;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(),
|
2015-02-08 18:17:29 +01:00
|
|
|
cond_item->func_name());
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
switch (cond_item->functype()) {
|
|
|
|
case Item_func::COND_AND_FUNC: vop= OP_AND; break;
|
|
|
|
case Item_func::COND_OR_FUNC: vop= OP_OR; break;
|
|
|
|
default: return NULL;
|
|
|
|
} // endswitch functype
|
|
|
|
|
|
|
|
List<Item>* arglist= cond_item->argument_list();
|
|
|
|
List_iterator<Item> li(*arglist);
|
2015-07-16 11:05:20 +02:00
|
|
|
const Item *subitem;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
p1= body + strlen(body);
|
2013-02-07 10:34:27 +01:00
|
|
|
strcpy(p1, "(");
|
|
|
|
p2= p1 + 1;
|
|
|
|
|
|
|
|
for (i= 0; i < arglist->elements; i++)
|
|
|
|
if ((subitem= li++)) {
|
2015-07-16 11:05:20 +02:00
|
|
|
if (!CheckCond(g, filp, subitem)) {
|
2015-02-08 18:17:29 +01:00
|
|
|
if (vop == OP_OR || nonul)
|
2013-02-07 10:34:27 +01:00
|
|
|
return NULL;
|
|
|
|
else
|
|
|
|
*p2= 0;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
p1= p2 + strlen(p2);
|
2014-04-19 17:02:53 +02:00
|
|
|
strcpy(p1, GetValStr(vop, false));
|
2013-02-07 10:34:27 +01:00
|
|
|
p2= p1 + strlen(p1);
|
|
|
|
} // endif CheckCond
|
|
|
|
|
|
|
|
} else
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (*p1 != '(')
|
|
|
|
strcpy(p1, ")");
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
} else if (cond->type() == COND::FUNC_ITEM) {
|
|
|
|
unsigned int i;
|
2013-11-06 18:22:09 +01:00
|
|
|
bool iscol, neg= FALSE;
|
2013-02-07 10:34:27 +01:00
|
|
|
Item_func *condf= (Item_func *)cond;
|
|
|
|
Item* *args= condf->arguments();
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Func type=%d argnum=%d\n", condf->functype(),
|
2015-07-16 11:05:20 +02:00
|
|
|
condf->argument_count());
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
switch (condf->functype()) {
|
|
|
|
case Item_func::EQUAL_FUNC:
|
2015-07-16 11:05:20 +02:00
|
|
|
case Item_func::EQ_FUNC: vop= OP_EQ; break;
|
|
|
|
case Item_func::NE_FUNC: vop= OP_NE; break;
|
|
|
|
case Item_func::LT_FUNC: vop= OP_LT; break;
|
|
|
|
case Item_func::LE_FUNC: vop= OP_LE; break;
|
|
|
|
case Item_func::GE_FUNC: vop= OP_GE; break;
|
|
|
|
case Item_func::GT_FUNC: vop= OP_GT; break;
|
|
|
|
case Item_func::LIKE_FUNC: vop= OP_LIKE; break;
|
|
|
|
case Item_func::ISNOTNULL_FUNC:
|
|
|
|
neg = true;
|
|
|
|
case Item_func::ISNULL_FUNC: vop= OP_NULL; break;
|
|
|
|
case Item_func::IN_FUNC: vop= OP_IN;
|
2014-04-19 11:11:30 +02:00
|
|
|
case Item_func::BETWEEN:
|
2014-03-10 18:29:04 +01:00
|
|
|
ismul= true;
|
2013-02-07 10:34:27 +01:00
|
|
|
neg= ((Item_func_opt_neg *)condf)->negated;
|
2014-03-10 18:29:04 +01:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
default: return NULL;
|
|
|
|
} // endswitch functype
|
|
|
|
|
|
|
|
if (condf->argument_count() < 2)
|
|
|
|
return NULL;
|
|
|
|
else if (ismul && tty == TYPE_AM_WMI)
|
|
|
|
return NULL; // Not supported by WQL
|
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
if (x && (neg || !(vop == OP_EQ || vop == OP_IN || vop == OP_NULL)))
|
2013-11-06 18:22:09 +01:00
|
|
|
return NULL;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
for (i= 0; i < condf->argument_count(); i++) {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Argtype(%d)=%d\n", i, args[i]->type());
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (i >= 2 && !ismul) {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Unexpected arg for vop=%d\n", vop);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
continue;
|
|
|
|
} // endif i
|
|
|
|
|
|
|
|
if ((iscol= args[i]->type() == COND::FIELD_ITEM)) {
|
|
|
|
const char *fnm;
|
|
|
|
ha_field_option_struct *fop;
|
|
|
|
Item_field *pField= (Item_field *)args[i];
|
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
if (x && i)
|
|
|
|
return NULL;
|
2015-07-16 11:05:20 +02:00
|
|
|
else if (pField->field->table != table)
|
|
|
|
return NULL; // Field does not belong to this table
|
|
|
|
else if (tty != TYPE_AM_WMI && IsIndexed(pField->field))
|
|
|
|
return NULL; // Will be handled by ReadKey
|
2013-02-07 10:34:27 +01:00
|
|
|
else
|
|
|
|
fop= GetFieldOptionStruct(pField->field);
|
|
|
|
|
|
|
|
if (fop && fop->special) {
|
|
|
|
if (tty == TYPE_AM_TBL && !stricmp(fop->special, "TABID"))
|
|
|
|
fnm= "TABID";
|
2014-02-03 16:14:13 +01:00
|
|
|
else if (tty == TYPE_AM_PLG)
|
|
|
|
fnm= fop->special;
|
2013-02-07 10:34:27 +01:00
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
} else if (tty == TYPE_AM_TBL)
|
|
|
|
return NULL;
|
|
|
|
else
|
|
|
|
fnm= pField->field->field_name;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace) {
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Field index=%d\n", pField->field->field_index);
|
|
|
|
htrc("Field name=%s\n", pField->field->field_name);
|
2015-02-08 18:17:29 +01:00
|
|
|
htrc("Field type=%d\n", pField->field->type());
|
|
|
|
htrc("Field_type=%d\n", args[i]->field_type());
|
2014-10-21 17:29:51 +02:00
|
|
|
} // endif trace
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// IN and BETWEEN clauses should be col VOP list
|
|
|
|
if (i && ismul)
|
|
|
|
return NULL;
|
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
strcat(body, fnm);
|
2014-02-03 16:14:13 +01:00
|
|
|
} else if (args[i]->type() == COND::FUNC_ITEM) {
|
|
|
|
if (tty == TYPE_AM_MYSQL) {
|
2015-07-16 11:05:20 +02:00
|
|
|
if (!CheckCond(g, filp, args[i]))
|
2014-02-03 16:14:13 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
} else
|
|
|
|
return NULL;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
} else {
|
2013-11-06 18:22:09 +01:00
|
|
|
char buff[256];
|
|
|
|
String *res, tmp(buff, sizeof(buff), &my_charset_bin);
|
2013-02-07 10:34:27 +01:00
|
|
|
Item_basic_constant *pval= (Item_basic_constant *)args[i];
|
2015-02-08 18:17:29 +01:00
|
|
|
Item::Type type= args[i]->real_type();
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-02-08 18:17:29 +01:00
|
|
|
switch (type) {
|
2014-02-03 16:14:13 +01:00
|
|
|
case COND::STRING_ITEM:
|
|
|
|
case COND::INT_ITEM:
|
|
|
|
case COND::REAL_ITEM:
|
|
|
|
case COND::NULL_ITEM:
|
|
|
|
case COND::DECIMAL_ITEM:
|
|
|
|
case COND::DATE_ITEM:
|
|
|
|
case COND::CACHE_ITEM:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
} // endswitch type
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if ((res= pval->val_str(&tmp)) == NULL)
|
|
|
|
return NULL; // To be clarified
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Value=%.*s\n", res->length(), res->ptr());
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// IN and BETWEEN clauses should be col VOP list
|
2013-11-06 18:22:09 +01:00
|
|
|
if (!i && (x || ismul))
|
2013-02-07 10:34:27 +01:00
|
|
|
return NULL;
|
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
if (!x) {
|
|
|
|
// Append the value to the filter
|
2015-02-08 18:17:29 +01:00
|
|
|
switch (args[i]->field_type()) {
|
|
|
|
case MYSQL_TYPE_TIMESTAMP:
|
|
|
|
case MYSQL_TYPE_DATETIME:
|
|
|
|
if (tty == TYPE_AM_ODBC) {
|
|
|
|
strcat(body, "{ts '");
|
2015-03-30 19:03:57 +02:00
|
|
|
strncat(body, res->ptr(), res->length());
|
|
|
|
|
|
|
|
if (res->length() < 19)
|
|
|
|
strcat(body, "1970-01-01 00:00:00" + res->length());
|
|
|
|
|
|
|
|
strcat(body, "'}");
|
2015-02-08 18:17:29 +01:00
|
|
|
break;
|
|
|
|
} // endif ODBC
|
|
|
|
|
|
|
|
case MYSQL_TYPE_DATE:
|
|
|
|
if (tty == TYPE_AM_ODBC) {
|
|
|
|
strcat(body, "{d '");
|
|
|
|
strcat(strncat(body, res->ptr(), res->length()), "'}");
|
|
|
|
break;
|
|
|
|
} // endif ODBC
|
|
|
|
|
|
|
|
case MYSQL_TYPE_TIME:
|
|
|
|
if (tty == TYPE_AM_ODBC) {
|
|
|
|
strcat(body, "{t '");
|
|
|
|
strcat(strncat(body, res->ptr(), res->length()), "'}");
|
|
|
|
break;
|
|
|
|
} // endif ODBC
|
|
|
|
|
|
|
|
case MYSQL_TYPE_VARCHAR:
|
|
|
|
if (tty == TYPE_AM_ODBC && i) {
|
|
|
|
switch (args[0]->field_type()) {
|
|
|
|
case MYSQL_TYPE_TIMESTAMP:
|
|
|
|
case MYSQL_TYPE_DATETIME:
|
|
|
|
strcat(body, "{ts '");
|
|
|
|
strncat(body, res->ptr(), res->length());
|
2015-03-28 20:18:46 +01:00
|
|
|
|
|
|
|
if (res->length() < 19)
|
|
|
|
strcat(body, "1970-01-01 00:00:00" + res->length());
|
|
|
|
|
2015-02-08 18:17:29 +01:00
|
|
|
strcat(body, "'}");
|
|
|
|
break;
|
|
|
|
case MYSQL_TYPE_DATE:
|
|
|
|
strcat(body, "{d '");
|
|
|
|
strncat(body, res->ptr(), res->length());
|
|
|
|
strcat(body, "'}");
|
|
|
|
break;
|
|
|
|
case MYSQL_TYPE_TIME:
|
|
|
|
strcat(body, "{t '");
|
|
|
|
strncat(body, res->ptr(), res->length());
|
|
|
|
strcat(body, "'}");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
strcat(body, "'");
|
|
|
|
strncat(body, res->ptr(), res->length());
|
|
|
|
strcat(body, "'");
|
|
|
|
} // endswitch field type
|
|
|
|
|
|
|
|
} else {
|
|
|
|
strcat(body, "'");
|
|
|
|
strncat(body, res->ptr(), res->length());
|
|
|
|
strcat(body, "'");
|
|
|
|
} // endif tty
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
strncat(body, res->ptr(), res->length());
|
|
|
|
} // endswitch field type
|
2013-11-06 18:22:09 +01:00
|
|
|
|
|
|
|
} else {
|
|
|
|
if (args[i]->field_type() == MYSQL_TYPE_VARCHAR) {
|
|
|
|
// Add the command to the list
|
2014-10-01 09:13:11 +02:00
|
|
|
PCMD *ncp, cmdp= new(g) CMD(g, (char*)res->c_ptr());
|
2013-11-06 18:22:09 +01:00
|
|
|
|
|
|
|
for (ncp= &filp->Cmds; *ncp; ncp= &(*ncp)->Next) ;
|
|
|
|
|
|
|
|
*ncp= cmdp;
|
|
|
|
} else
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
} // endif x
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
} // endif
|
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
if (!x) {
|
|
|
|
if (!i)
|
|
|
|
strcat(body, GetValStr(vop, neg));
|
|
|
|
else if (vop == OP_XX && i == 1)
|
|
|
|
strcat(body, " AND ");
|
|
|
|
else if (vop == OP_IN)
|
|
|
|
strcat(body, (i == condf->argument_count() - 1) ? ")" : ",");
|
|
|
|
|
|
|
|
} // endif x
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
} // endfor i
|
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
if (x)
|
|
|
|
filp->Op= vop;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
} else {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Unsupported condition\n");
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
} // endif's type
|
|
|
|
|
|
|
|
return filp;
|
|
|
|
} // end of CheckCond
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
Push condition down to the table handler.
|
|
|
|
|
|
|
|
@param cond Condition to be pushed. The condition tree must not be
|
|
|
|
modified by the caller.
|
|
|
|
|
|
|
|
@return
|
|
|
|
The 'remainder' condition that caller must use to filter out records.
|
|
|
|
NULL means the handler will not return rows that do not match the
|
|
|
|
passed condition.
|
|
|
|
|
|
|
|
@note
|
|
|
|
CONNECT handles the filtering only for table types that construct
|
|
|
|
an SQL or WQL query, but still leaves it to MySQL because only some
|
|
|
|
parts of the filter may be relevant.
|
|
|
|
The first suballocate finds the position where the string will be
|
|
|
|
constructed in the sarea. The second one does make the suballocation
|
|
|
|
with the proper length.
|
|
|
|
*/
|
|
|
|
const COND *ha_connect::cond_push(const COND *cond)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::cond_push");
|
|
|
|
|
|
|
|
if (tdbp) {
|
2014-10-21 17:29:51 +02:00
|
|
|
int rc;
|
2014-04-19 11:11:30 +02:00
|
|
|
PGLOBAL& g= xp->g;
|
|
|
|
AMT tty= tdbp->GetAmType();
|
|
|
|
bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC);
|
|
|
|
bool b= (tty == TYPE_AM_WMI || tty == TYPE_AM_ODBC ||
|
|
|
|
tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL ||
|
|
|
|
tty == TYPE_AM_PLG || x);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
// Save stack and allocation environment and prepare error return
|
|
|
|
if (g->jump_level == MAX_JUMP) {
|
|
|
|
strcpy(g->Message, MSG(TOO_MANY_JUMPS));
|
|
|
|
DBUG_RETURN(cond);
|
|
|
|
} // endif jump_level
|
|
|
|
|
|
|
|
// This should never happen but is done to avoid crashing
|
|
|
|
if ((rc= setjmp(g->jumper[++g->jump_level])) != 0)
|
|
|
|
goto fin;
|
|
|
|
|
2014-03-10 18:29:04 +01:00
|
|
|
if (b) {
|
2015-07-16 11:05:20 +02:00
|
|
|
PCFIL filp;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
if ((filp= tdbp->GetCondFil()) && filp->Cond == cond &&
|
|
|
|
filp->Idx == active_index && filp->Type == tty)
|
|
|
|
goto fin; // Already done
|
|
|
|
|
|
|
|
filp= new(g) CONDFIL(cond, active_index, tty);
|
2013-11-06 18:22:09 +01:00
|
|
|
filp->Body= (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
|
|
|
|
*filp->Body= 0;
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
if (CheckCond(g, filp, cond)) {
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("cond_push: %s\n", filp->Body);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2013-11-06 18:22:09 +01:00
|
|
|
if (!x)
|
|
|
|
PlugSubAlloc(g, NULL, strlen(filp->Body) + 1);
|
|
|
|
else
|
2014-03-10 18:29:04 +01:00
|
|
|
cond= NULL; // Does this work?
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-03-10 18:29:04 +01:00
|
|
|
tdbp->SetCondFil(filp);
|
2013-11-06 18:22:09 +01:00
|
|
|
} else if (x && cond)
|
2014-03-10 18:29:04 +01:00
|
|
|
tdbp->SetCondFil(filp); // Wrong filter
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-02-11 21:39:41 +01:00
|
|
|
} else if (tty != TYPE_AM_JSN && tty != TYPE_AM_JSON)
|
2014-04-19 11:11:30 +02:00
|
|
|
tdbp->SetFilter(CondFilter(g, (Item *)cond));
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
fin:
|
|
|
|
g->jump_level--;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endif tdbp
|
|
|
|
|
|
|
|
// Let MySQL do the filtering
|
|
|
|
DBUG_RETURN(cond);
|
|
|
|
} // end of cond_push
|
|
|
|
|
|
|
|
/**
|
|
|
|
Number of rows in table. It will only be called if
|
|
|
|
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
|
|
|
|
*/
|
|
|
|
ha_rows ha_connect::records()
|
|
|
|
{
|
|
|
|
if (!valid_info)
|
|
|
|
info(HA_STATUS_VARIABLE);
|
|
|
|
|
2014-08-07 17:59:21 +02:00
|
|
|
if (tdbp)
|
2013-02-07 10:34:27 +01:00
|
|
|
return stats.records;
|
|
|
|
else
|
|
|
|
return HA_POS_ERROR;
|
|
|
|
|
|
|
|
} // end of records
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
Return an error message specific to this handler.
|
|
|
|
|
|
|
|
@param error error code previously returned by handler
|
|
|
|
@param buf pointer to String where to add error message
|
|
|
|
|
|
|
|
@return
|
|
|
|
Returns true if this is a temporary error
|
|
|
|
*/
|
|
|
|
bool ha_connect::get_error_message(int error, String* buf)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::get_error_message");
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (xp && xp->g) {
|
|
|
|
PGLOBAL g= xp->g;
|
2014-04-05 19:26:32 +02:00
|
|
|
char msg[3072]; // MAX_STR * 3
|
2014-02-03 16:14:13 +01:00
|
|
|
uint dummy_errors;
|
|
|
|
uint32 len= copy_and_convert(msg, strlen(g->Message) * 3,
|
|
|
|
system_charset_info,
|
|
|
|
g->Message, strlen(g->Message),
|
|
|
|
&my_charset_latin1,
|
|
|
|
&dummy_errors);
|
2014-04-05 19:26:32 +02:00
|
|
|
|
|
|
|
if (trace)
|
2015-05-10 12:14:21 +02:00
|
|
|
htrc("GEM(%d): len=%u %s\n", error, len, g->Message);
|
2014-04-05 19:26:32 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
msg[len]= '\0';
|
|
|
|
buf->copy(msg, (uint)strlen(msg), system_charset_info);
|
|
|
|
} else
|
|
|
|
buf->copy("Cannot retrieve msg", 19, system_charset_info);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(false);
|
|
|
|
} // end of get_error_message
|
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
/**
|
|
|
|
Convert a filename partition name to system
|
|
|
|
*/
|
|
|
|
static char *decode(PGLOBAL g, const char *pn)
|
|
|
|
{
|
|
|
|
char *buf= (char*)PlugSubAlloc(g, NULL, strlen(pn) + 1);
|
|
|
|
uint dummy_errors;
|
|
|
|
uint32 len= copy_and_convert(buf, strlen(pn) + 1,
|
|
|
|
system_charset_info,
|
|
|
|
pn, strlen(pn),
|
|
|
|
&my_charset_filename,
|
|
|
|
&dummy_errors);
|
|
|
|
buf[len]= '\0';
|
|
|
|
return buf;
|
|
|
|
} // end of decode
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Used for opening tables. The name will be the name of the file.
|
|
|
|
|
|
|
|
@details
|
|
|
|
A table is opened when it needs to be opened; e.g. when a request comes in
|
|
|
|
for a SELECT on the table (tables are not open and closed for each request,
|
|
|
|
they are cached).
|
|
|
|
|
|
|
|
Called from handler.cc by handler::ha_open(). The server opens all tables by
|
|
|
|
calling ha_open() which then calls the handler specific open().
|
|
|
|
|
|
|
|
@note
|
|
|
|
For CONNECT no open can be done here because field information is not yet
|
|
|
|
updated. >>>>> TO BE CHECKED <<<<<
|
|
|
|
(Thread information could be get by using 'ha_thd')
|
|
|
|
|
|
|
|
@see
|
|
|
|
handler::ha_open() in handler.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::open(const char *name, int mode, uint test_if_locked)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
DBUG_ENTER("ha_connect::open");
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("open: name=%s mode=%d test=%u\n", name, mode, test_if_locked);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-07-23 16:29:16 +02:00
|
|
|
if (!(share= get_share()))
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
|
|
|
|
thr_lock_data_init(&share->lock,&lock,NULL);
|
|
|
|
|
|
|
|
// Try to get the user if possible
|
2013-04-19 20:35:43 +02:00
|
|
|
xp= GetUser(ha_thd(), xp);
|
2013-05-19 19:25:06 +02:00
|
|
|
PGLOBAL g= (xp) ? xp->g : NULL;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
// Try to set the database environment
|
2014-04-19 11:11:30 +02:00
|
|
|
if (g) {
|
2013-04-19 20:35:43 +02:00
|
|
|
rc= (CntCheckDB(g, this, name)) ? (-2) : 0;
|
2014-04-19 11:11:30 +02:00
|
|
|
|
|
|
|
if (g->Mrr) {
|
|
|
|
// This should only happen for the mrr secondary handler
|
|
|
|
mrr= true;
|
|
|
|
g->Mrr= false;
|
|
|
|
} else
|
|
|
|
mrr= false;
|
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
if (table->part_info) {
|
2014-07-17 18:13:51 +02:00
|
|
|
if (GetStringOption("Filename") || GetStringOption("Tabname")
|
|
|
|
|| GetStringOption("Connect")) {
|
|
|
|
strcpy(partname, decode(g, strrchr(name, '#') + 1));
|
|
|
|
// strcpy(partname, table->part_info->curr_part_elem->partition_name);
|
|
|
|
part_id= &table->part_info->full_part_field_set;
|
|
|
|
} else // Inward table
|
2014-05-31 12:31:26 +02:00
|
|
|
strcpy(partname, strrchr(name, slash) + 1);
|
2014-07-17 18:13:51 +02:00
|
|
|
part_id= &table->part_info->full_part_field_set; // Temporary
|
2014-05-31 12:31:26 +02:00
|
|
|
} // endif part_info
|
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
2014-04-19 11:11:30 +02:00
|
|
|
} else
|
2013-05-19 19:25:06 +02:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of open
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Make the indexes for this table
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
int ha_connect::optimize(THD* thd, HA_CHECK_OPT*)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
PGLOBAL& g= xp->g;
|
|
|
|
PDBUSER dup= PlgGetUser(g);
|
|
|
|
|
|
|
|
// Ignore error on the opt file
|
|
|
|
dup->Check &= ~CHK_OPT;
|
|
|
|
tdbp= GetTDB(g);
|
|
|
|
dup->Check |= CHK_OPT;
|
|
|
|
|
2014-03-10 18:29:04 +01:00
|
|
|
if (tdbp) {
|
2014-08-22 17:30:22 +02:00
|
|
|
bool dop= IsTypeIndexable(GetRealType(NULL));
|
|
|
|
bool dox= (((PTDBASE)tdbp)->GetDef()->Indexable() == 1);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-08-22 17:30:22 +02:00
|
|
|
if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) {
|
2013-04-10 23:38:27 +02:00
|
|
|
if (rc == RC_INFO) {
|
2013-07-23 16:29:16 +02:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
2013-04-10 23:38:27 +02:00
|
|
|
rc= 0;
|
|
|
|
} else
|
2013-08-09 18:02:47 +02:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
} // endif rc
|
2013-04-10 23:38:27 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
} else
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
} // end of optimize
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2013-07-23 16:29:16 +02:00
|
|
|
Closes a table.
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
Called from sql_base.cc, sql_select.cc, and table.cc. In sql_select.cc it is
|
|
|
|
only used to close up temporary tables or during the process where a
|
|
|
|
temporary table is converted over to being a myisam table.
|
|
|
|
|
|
|
|
For sql_base.cc look at close_data_tables().
|
|
|
|
|
|
|
|
@see
|
|
|
|
sql_base.cc, sql_select.cc and table.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::close(void)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
DBUG_ENTER("ha_connect::close");
|
|
|
|
|
|
|
|
// If this is called by a later query, the table may have
|
|
|
|
// been already closed and the tdbp is not valid anymore.
|
|
|
|
if (tdbp && xp->last_query_id == valid_query_id)
|
|
|
|
rc= CloseTable(xp->g);
|
|
|
|
|
2013-07-23 16:29:16 +02:00
|
|
|
DBUG_RETURN(rc);
|
2013-02-07 10:34:27 +01:00
|
|
|
} // end of close
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
write_row() inserts a row. No extra() hint is given currently if a bulk load
|
|
|
|
is happening. buf() is a byte array of data. You can use the field
|
|
|
|
information to extract the data from the native byte array type.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Example of this would be:
|
|
|
|
@code
|
|
|
|
for (Field **field=table->field ; *field ; field++)
|
|
|
|
{
|
|
|
|
...
|
|
|
|
}
|
|
|
|
@endcode
|
|
|
|
|
|
|
|
See ha_tina.cc for an example of extracting all of the data as strings.
|
|
|
|
ha_berekly.cc has an example of how to store it intact by "packing" it
|
|
|
|
for ha_berkeley's own native storage type.
|
|
|
|
|
|
|
|
See the note for update_row() on auto_increments and timestamps. This
|
|
|
|
case also applies to write_row().
|
|
|
|
|
|
|
|
Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
|
|
|
|
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
|
|
|
|
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::write_row(uchar *buf)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
PGLOBAL& g= xp->g;
|
|
|
|
DBUG_ENTER("ha_connect::write_row");
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
// This is not tested yet
|
2014-07-17 18:13:51 +02:00
|
|
|
if (xmod == MODE_ALTER) {
|
|
|
|
if (IsPartitioned() && GetStringOption("Filename", NULL))
|
|
|
|
// Why does this happen now that check_if_supported_inplace_alter is called?
|
|
|
|
DBUG_RETURN(0); // Alter table on an outward partition table
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
xmod= MODE_INSERT;
|
2014-08-08 19:46:02 +02:00
|
|
|
} else if (xmod == MODE_ANY)
|
|
|
|
DBUG_RETURN(0); // Probably never met
|
2014-02-03 16:14:13 +01:00
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
// Open the table if it was not opened yet (locked)
|
|
|
|
if (!IsOpened() || xmod != tdbp->GetMode()) {
|
|
|
|
if (IsOpened())
|
|
|
|
CloseTable(g);
|
|
|
|
|
2013-11-11 13:00:39 +01:00
|
|
|
if ((rc= OpenTable(g)))
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
} // endif isopened
|
|
|
|
|
2013-04-03 21:54:02 +02:00
|
|
|
#if 0 // AUTO_INCREMENT NIY
|
2013-04-04 15:36:42 +02:00
|
|
|
if (table->next_number_field && buf == table->record[0]) {
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((error= update_auto_increment()))
|
|
|
|
return error;
|
|
|
|
|
2013-04-03 21:54:02 +02:00
|
|
|
} // endif nex_number_field
|
|
|
|
#endif // 0
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
// Set column values from the passed pseudo record
|
|
|
|
if ((rc= ScanRecord(g, buf)))
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
|
|
|
|
// Return result code from write operation
|
|
|
|
if (CntWriteRow(g, tdbp)) {
|
2013-04-04 23:27:54 +02:00
|
|
|
DBUG_PRINT("write_row", ("%s", g->Message));
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("write_row: %s\n", g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-08-22 17:30:22 +02:00
|
|
|
} else // Table is modified
|
|
|
|
nox= false; // Indexes to be remade
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of write_row
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Yes, update_row() does what you expect, it updates a row. old_data will have
|
|
|
|
the previous row record in it, while new_data will have the newest data in it.
|
|
|
|
Keep in mind that the server can do updates based on ordering if an ORDER BY
|
|
|
|
clause was used. Consecutive ordering is not guaranteed.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Currently new_data will not have an updated auto_increament record, or
|
|
|
|
and updated timestamp field. You can do these for example by doing:
|
|
|
|
@code
|
|
|
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
|
|
|
table->timestamp_field->set_time();
|
|
|
|
if (table->next_number_field && record == table->record[0])
|
|
|
|
update_auto_increment();
|
|
|
|
@endcode
|
|
|
|
|
|
|
|
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
sql_select.cc, sql_acl.cc, sql_update.cc and sql_insert.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::update_row(const uchar *old_data, uchar *new_data)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
PGLOBAL& g= xp->g;
|
|
|
|
DBUG_ENTER("ha_connect::update_row");
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace > 1)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("update_row: old=%s new=%s\n", old_data, new_data);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// Check values for possible change in indexed column
|
|
|
|
if ((rc= CheckRecord(g, old_data, new_data)))
|
2014-07-17 18:13:51 +02:00
|
|
|
DBUG_RETURN(rc);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (CntUpdateRow(g, tdbp)) {
|
2013-04-04 23:27:54 +02:00
|
|
|
DBUG_PRINT("update_row", ("%s", g->Message));
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("update_row CONNECT: %s\n", g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-08-22 17:30:22 +02:00
|
|
|
} else
|
|
|
|
nox= false; // Table is modified
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of update_row
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
This will delete a row. buf will contain a copy of the row to be deleted.
|
|
|
|
The server will call this right after the current row has been called (from
|
|
|
|
either a previous rnd_nexT() or index call).
|
|
|
|
|
|
|
|
@details
|
|
|
|
If you keep a pointer to the last row or can access a primary key it will
|
|
|
|
make doing the deletion quite a bit easier. Keep in mind that the server does
|
|
|
|
not guarantee consecutive deletions. ORDER BY clauses can be used.
|
|
|
|
|
|
|
|
Called in sql_acl.cc and sql_udf.cc to manage internal table
|
|
|
|
information. Called in sql_delete.cc, sql_insert.cc, and
|
|
|
|
sql_select.cc. In sql_select it is used for removing duplicates
|
|
|
|
while in insert it is used for REPLACE calls.
|
|
|
|
|
|
|
|
@see
|
|
|
|
sql_acl.cc, sql_udf.cc, sql_delete.cc, sql_insert.cc and sql_select.cc
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
int ha_connect::delete_row(const uchar *)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
DBUG_ENTER("ha_connect::delete_row");
|
|
|
|
|
|
|
|
if (CntDeleteRow(xp->g, tdbp, false)) {
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("delete_row CONNECT: %s\n", xp->g->Message);
|
2014-08-22 17:30:22 +02:00
|
|
|
} else
|
|
|
|
nox= false; // To remake indexes
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of delete_row
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
2016-06-08 13:14:42 +02:00
|
|
|
/* We seem to come here at the beginning of an index use. */
|
2013-02-07 10:34:27 +01:00
|
|
|
/****************************************************************************/
|
|
|
|
int ha_connect::index_init(uint idx, bool sorted)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
PGLOBAL& g= xp->g;
|
|
|
|
DBUG_ENTER("index_init");
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("index_init: this=%p idx=%u sorted=%d\n", this, idx, sorted);
|
2014-02-03 16:14:13 +01:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
if (GetIndexType(GetRealType()) == 2) {
|
2014-07-17 18:13:51 +02:00
|
|
|
if (xmod == MODE_READ)
|
|
|
|
// This is a remote index
|
|
|
|
xmod= MODE_READX;
|
2014-04-19 17:02:53 +02:00
|
|
|
|
|
|
|
if (!(rc= rnd_init(0))) {
|
2014-08-07 17:59:21 +02:00
|
|
|
// if (xmod == MODE_READX) {
|
2014-07-17 18:13:51 +02:00
|
|
|
active_index= idx;
|
|
|
|
indexing= IsUnique(idx) ? 1 : 2;
|
2014-08-07 17:59:21 +02:00
|
|
|
// } else {
|
|
|
|
// active_index= MAX_KEY;
|
|
|
|
// indexing= 0;
|
|
|
|
// } // endif xmod
|
2014-07-17 18:13:51 +02:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
} //endif rc
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif index type
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if ((rc= rnd_init(0)))
|
2014-05-05 17:36:16 +02:00
|
|
|
DBUG_RETURN(rc);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
if (locked == 2) {
|
|
|
|
// Indexes are not updated in lock write mode
|
|
|
|
active_index= MAX_KEY;
|
|
|
|
indexing= 0;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
} // endif locked
|
|
|
|
|
2014-08-22 17:30:22 +02:00
|
|
|
indexing= CntIndexInit(g, tdbp, (signed)idx, sorted);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (indexing <= 0) {
|
2013-04-04 23:27:54 +02:00
|
|
|
DBUG_PRINT("index_init", ("%s", g->Message));
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("index_init CONNECT: %s\n", g->Message);
|
2013-03-05 19:30:40 +01:00
|
|
|
active_index= MAX_KEY;
|
2013-03-10 19:48:45 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-04-26 00:17:26 +02:00
|
|
|
} else if (((PTDBDOX)tdbp)->To_Kindex) {
|
2013-02-07 10:34:27 +01:00
|
|
|
if (((PTDBDOX)tdbp)->To_Kindex->GetNum_K()) {
|
|
|
|
if (((PTDBASE)tdbp)->GetFtype() != RECFM_NAF)
|
|
|
|
((PTDBDOX)tdbp)->GetTxfp()->ResetBuffer(g);
|
|
|
|
|
|
|
|
active_index= idx;
|
2014-09-06 18:08:28 +02:00
|
|
|
// } else { // Void table
|
|
|
|
// active_index= MAX_KEY;
|
|
|
|
// indexing= 0;
|
|
|
|
} // endif Num
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
rc= 0;
|
|
|
|
} // endif indexing
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("index_init: rc=%d indexing=%d active_index=%d\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
rc, indexing, active_index);
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of index_init
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* We seem to come here at the end of an index use. */
|
|
|
|
/****************************************************************************/
|
|
|
|
int ha_connect::index_end()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("index_end");
|
2013-03-05 19:30:40 +01:00
|
|
|
active_index= MAX_KEY;
|
2014-04-19 11:11:30 +02:00
|
|
|
ds_mrr.dsmrr_close();
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rnd_end());
|
|
|
|
} // end of index_end
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* This is internally called by all indexed reading functions. */
|
|
|
|
/****************************************************************************/
|
2015-07-16 11:05:20 +02:00
|
|
|
int ha_connect::ReadIndexed(uchar *buf, OPVAL op, const key_range *kr)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
//statistic_increment(ha_read_key_count, &LOCK_status);
|
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
switch (CntIndexRead(xp->g, tdbp, op, kr, mrr)) {
|
2013-02-07 10:34:27 +01:00
|
|
|
case RC_OK:
|
|
|
|
xp->fnd++;
|
|
|
|
rc= MakeRecord((char*)buf);
|
|
|
|
break;
|
|
|
|
case RC_EF: // End of file
|
|
|
|
rc= HA_ERR_END_OF_FILE;
|
|
|
|
break;
|
|
|
|
case RC_NF: // Not found
|
|
|
|
xp->nfd++;
|
|
|
|
rc= (op == OP_SAME) ? HA_ERR_END_OF_FILE : HA_ERR_KEY_NOT_FOUND;
|
|
|
|
break;
|
|
|
|
default: // Read error
|
2013-04-04 23:27:54 +02:00
|
|
|
DBUG_PRINT("ReadIndexed", ("%s", xp->g->Message));
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("ReadIndexed: %s\n", xp->g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch RC
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace > 1)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("ReadIndexed: op=%d rc=%d\n", op, rc);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-03-19 12:21:08 +01:00
|
|
|
table->status= (rc == RC_OK) ? 0 : STATUS_NOT_FOUND;
|
2013-02-07 10:34:27 +01:00
|
|
|
return rc;
|
|
|
|
} // end of ReadIndexed
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef NOT_USED
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Positions an index cursor to the index specified in the handle. Fetches the
|
|
|
|
row if available. If the key value is null, begin at the first key of the
|
|
|
|
index.
|
|
|
|
*/
|
|
|
|
int ha_connect::index_read_map(uchar *buf, const uchar *key,
|
|
|
|
key_part_map keypart_map __attribute__((unused)),
|
|
|
|
enum ha_rkey_function find_flag
|
|
|
|
__attribute__((unused)))
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::index_read");
|
|
|
|
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
|
|
|
|
}
|
|
|
|
#endif // NOT_USED
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* This is called by handler::index_read_map. */
|
|
|
|
/****************************************************************************/
|
|
|
|
int ha_connect::index_read(uchar * buf, const uchar * key, uint key_len,
|
|
|
|
enum ha_rkey_function find_flag)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
OPVAL op= OP_XX;
|
|
|
|
DBUG_ENTER("ha_connect::index_read");
|
|
|
|
|
|
|
|
switch(find_flag) {
|
|
|
|
case HA_READ_KEY_EXACT: op= OP_EQ; break;
|
|
|
|
case HA_READ_AFTER_KEY: op= OP_GT; break;
|
|
|
|
case HA_READ_KEY_OR_NEXT: op= OP_GE; break;
|
2014-04-19 11:11:30 +02:00
|
|
|
default: DBUG_RETURN(-1); break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch find_flag
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace > 1)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("%p index_read: op=%d\n", this, op);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-08-07 17:59:21 +02:00
|
|
|
if (indexing > 0) {
|
2015-07-16 11:05:20 +02:00
|
|
|
start_key.key= key;
|
|
|
|
start_key.length= key_len;
|
|
|
|
start_key.flag= find_flag;
|
|
|
|
start_key.keypart_map= 0;
|
|
|
|
|
|
|
|
rc= ReadIndexed(buf, op, &start_key);
|
2014-08-07 17:59:21 +02:00
|
|
|
|
|
|
|
if (rc == HA_ERR_INTERNAL_ERROR) {
|
|
|
|
nox= true; // To block making indexes
|
|
|
|
abort= true; // Don't rename temp file
|
|
|
|
} // endif rc
|
|
|
|
|
|
|
|
} else
|
2014-07-17 18:13:51 +02:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR; // HA_ERR_KEY_NOT_FOUND ?
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of index_read
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Used to read forward through the index.
|
|
|
|
*/
|
|
|
|
int ha_connect::index_next(uchar *buf)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
DBUG_ENTER("ha_connect::index_next");
|
|
|
|
//statistic_increment(ha_read_next_count, &LOCK_status);
|
|
|
|
|
|
|
|
if (indexing > 0)
|
|
|
|
rc= ReadIndexed(buf, OP_NEXT);
|
|
|
|
else if (!indexing)
|
|
|
|
rc= rnd_next(buf);
|
|
|
|
else
|
2013-03-10 19:48:45 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of index_next
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Used to read backwards through the index.
|
|
|
|
*/
|
|
|
|
int ha_connect::index_prev(uchar *buf)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::index_prev");
|
2014-04-08 11:15:08 +02:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (indexing > 0) {
|
|
|
|
rc= ReadIndexed(buf, OP_PREV);
|
|
|
|
} else
|
|
|
|
rc= HA_ERR_WRONG_COMMAND;
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of index_prev
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
index_first() asks for the first key in the index.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from opt_range.cc, opt_sum.cc, sql_handler.cc, and sql_select.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
opt_range.cc, opt_sum.cc, sql_handler.cc and sql_select.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::index_first(uchar *buf)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
DBUG_ENTER("ha_connect::index_first");
|
|
|
|
|
|
|
|
if (indexing > 0)
|
|
|
|
rc= ReadIndexed(buf, OP_FIRST);
|
|
|
|
else if (indexing < 0)
|
2013-03-10 19:48:45 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
else if (CntRewindTable(xp->g, tdbp)) {
|
2015-03-19 12:21:08 +01:00
|
|
|
table->status= STATUS_NOT_FOUND;
|
2013-03-10 19:48:45 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
} else
|
|
|
|
rc= rnd_next(buf);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of index_first
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
index_last() asks for the last key in the index.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from opt_range.cc, opt_sum.cc, sql_handler.cc, and sql_select.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
opt_range.cc, opt_sum.cc, sql_handler.cc and sql_select.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::index_last(uchar *buf)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::index_last");
|
2014-04-01 18:14:57 +02:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (indexing <= 0) {
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
} else
|
|
|
|
rc= ReadIndexed(buf, OP_LAST);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
2014-04-19 11:11:30 +02:00
|
|
|
}
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
/* This is called to get more rows having the same index value. */
|
|
|
|
/****************************************************************************/
|
2015-05-10 12:14:21 +02:00
|
|
|
//t ha_connect::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
|
|
|
int ha_connect::index_next_same(uchar *buf, const uchar *, uint)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
DBUG_ENTER("ha_connect::index_next_same");
|
|
|
|
//statistic_increment(ha_read_next_count, &LOCK_status);
|
|
|
|
|
|
|
|
if (!indexing)
|
|
|
|
rc= rnd_next(buf);
|
|
|
|
else if (indexing > 0)
|
|
|
|
rc= ReadIndexed(buf, OP_SAME);
|
|
|
|
else
|
2013-03-10 19:48:45 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of index_next_same
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
rnd_init() is called when the system wants the storage engine to do a table
|
|
|
|
scan. See the example in the introduction at the top of this file to see when
|
|
|
|
rnd_init() is called.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc,
|
|
|
|
and sql_update.cc.
|
|
|
|
|
|
|
|
@note
|
|
|
|
We always call open and extern_lock/start_stmt before comming here.
|
|
|
|
|
|
|
|
@see
|
|
|
|
filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc and sql_update.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::rnd_init(bool scan)
|
|
|
|
{
|
2013-04-19 20:35:43 +02:00
|
|
|
PGLOBAL g= ((table && table->in_use) ? GetPlug(table->in_use, xp) :
|
2013-02-07 10:34:27 +01:00
|
|
|
(xp) ? xp->g : NULL);
|
|
|
|
DBUG_ENTER("ha_connect::rnd_init");
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
// This is not tested yet
|
|
|
|
if (xmod == MODE_ALTER) {
|
|
|
|
xmod= MODE_READ;
|
|
|
|
alter= 1;
|
|
|
|
} // endif xmod
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("rnd_init: this=%p scan=%d xmod=%d alter=%d\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
this, scan, xmod, alter);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-11-09 17:32:57 +01:00
|
|
|
if (!g || !table || xmod == MODE_INSERT)
|
|
|
|
DBUG_RETURN(HA_ERR_INITIALIZATION);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-12-06 01:37:56 +01:00
|
|
|
// Do not close the table if it was opened yet (locked?)
|
2014-03-21 22:24:54 +01:00
|
|
|
if (IsOpened()) {
|
2014-07-17 18:13:51 +02:00
|
|
|
if (IsPartitioned() && xmod != MODE_INSERT)
|
|
|
|
if (CheckColumnList(g)) // map can have been changed
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
if (tdbp->OpenDB(g)) // Rewind table
|
2014-03-21 22:24:54 +01:00
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
else
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
|
|
|
} else if (xp->CheckQuery(valid_query_id))
|
2013-12-06 01:37:56 +01:00
|
|
|
tdbp= NULL; // Not valid anymore
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-11-09 17:32:57 +01:00
|
|
|
// When updating, to avoid skipped update, force the table
|
2014-04-19 11:11:30 +02:00
|
|
|
// handler to retrieve write-only fields to be able to compare
|
2013-11-09 17:32:57 +01:00
|
|
|
// records and detect data change.
|
|
|
|
if (xmod == MODE_UPDATE)
|
|
|
|
bitmap_union(table->read_set, table->write_set);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-05-09 12:35:19 +02:00
|
|
|
if (OpenTable(g, xmod == MODE_DELETE))
|
|
|
|
DBUG_RETURN(HA_ERR_INITIALIZATION);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
xp->nrd= xp->fnd= xp->nfd= 0;
|
|
|
|
xp->tb1= my_interval_timer();
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
} // end of rnd_init
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Not described.
|
|
|
|
|
|
|
|
@note
|
|
|
|
The previous version said:
|
|
|
|
Stop scanning of table. Note that this may be called several times during
|
|
|
|
execution of a sub select.
|
|
|
|
=====> This has been moved to external lock to avoid closing subselect tables.
|
|
|
|
*/
|
|
|
|
int ha_connect::rnd_end()
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
DBUG_ENTER("ha_connect::rnd_end");
|
|
|
|
|
|
|
|
// If this is called by a later query, the table may have
|
|
|
|
// been already closed and the tdbp is not valid anymore.
|
|
|
|
// if (tdbp && xp->last_query_id == valid_query_id)
|
|
|
|
// rc= CloseTable(xp->g);
|
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
ds_mrr.dsmrr_close();
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of rnd_end
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
This is called for each row of the table scan. When you run out of records
|
|
|
|
you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
|
|
|
|
The Field structure for the table is the key to getting data into buf
|
|
|
|
in a manner that will allow the server to understand it.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc,
|
|
|
|
and sql_update.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc and sql_update.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::rnd_next(uchar *buf)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
DBUG_ENTER("ha_connect::rnd_next");
|
|
|
|
//statistic_increment(ha_read_rnd_next_count, &LOCK_status);
|
|
|
|
|
|
|
|
if (tdbp->GetMode() == MODE_ANY) {
|
|
|
|
// We will stop on next read
|
|
|
|
if (!stop) {
|
|
|
|
stop= true;
|
|
|
|
DBUG_RETURN(RC_OK);
|
|
|
|
} else
|
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
|
|
|
|
|
|
|
} // endif Mode
|
|
|
|
|
|
|
|
switch (CntReadNext(xp->g, tdbp)) {
|
|
|
|
case RC_OK:
|
|
|
|
rc= MakeRecord((char*)buf);
|
|
|
|
break;
|
|
|
|
case RC_EF: // End of file
|
|
|
|
rc= HA_ERR_END_OF_FILE;
|
|
|
|
break;
|
|
|
|
case RC_NF: // Not found
|
|
|
|
rc= HA_ERR_RECORD_DELETED;
|
|
|
|
break;
|
|
|
|
default: // Read error
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("rnd_next CONNECT: %s\n", xp->g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= (records()) ? HA_ERR_INTERNAL_ERROR : HA_ERR_END_OF_FILE;
|
|
|
|
break;
|
|
|
|
} // endswitch RC
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace > 1 && (rc || !(xp->nrd++ % 16384))) {
|
2013-02-07 10:34:27 +01:00
|
|
|
ulonglong tb2= my_interval_timer();
|
|
|
|
double elapsed= (double) (tb2 - xp->tb1) / 1000000000ULL;
|
|
|
|
DBUG_PRINT("rnd_next", ("rc=%d nrd=%u fnd=%u nfd=%u sec=%.3lf\n",
|
2013-02-08 03:27:12 +01:00
|
|
|
rc, (uint)xp->nrd, (uint)xp->fnd,
|
|
|
|
(uint)xp->nfd, elapsed));
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("rnd_next: rc=%d nrd=%u fnd=%u nfd=%u sec=%.3lf\n",
|
|
|
|
rc, (uint)xp->nrd, (uint)xp->fnd,
|
|
|
|
(uint)xp->nfd, elapsed);
|
2013-02-07 10:34:27 +01:00
|
|
|
xp->tb1= tb2;
|
|
|
|
xp->fnd= xp->nfd= 0;
|
|
|
|
} // endif nrd
|
|
|
|
|
2015-03-19 12:21:08 +01:00
|
|
|
table->status= (!rc) ? 0 : STATUS_NOT_FOUND;
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of rnd_next
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
position() is called after each call to rnd_next() if the data needs
|
|
|
|
to be ordered. You can do something like the following to store
|
|
|
|
the position:
|
|
|
|
@code
|
|
|
|
my_store_ptr(ref, ref_length, current_position);
|
|
|
|
@endcode
|
|
|
|
|
|
|
|
@details
|
|
|
|
The server uses ref to store data. ref_length in the above case is
|
|
|
|
the size needed to store current_position. ref is just a byte array
|
|
|
|
that the server will maintain. If you are using offsets to mark rows, then
|
|
|
|
current_position should be the offset. If it is a primary key like in
|
|
|
|
BDB, then it needs to be a primary key.
|
|
|
|
|
|
|
|
Called from filesort.cc, sql_select.cc, sql_delete.cc, and sql_update.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
void ha_connect::position(const uchar *)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::position");
|
2013-12-06 01:37:56 +01:00
|
|
|
//if (((PTDBASE)tdbp)->GetDef()->Indexable())
|
2013-02-07 10:34:27 +01:00
|
|
|
my_store_ptr(ref, ref_length, (my_off_t)((PTDBASE)tdbp)->GetRecpos());
|
2014-05-10 12:21:08 +02:00
|
|
|
|
2015-07-16 11:05:20 +02:00
|
|
|
if (trace > 1)
|
2014-05-10 12:21:08 +02:00
|
|
|
htrc("position: pos=%d\n", ((PTDBASE)tdbp)->GetRecpos());
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
} // end of position
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
This is like rnd_next, but you are given a position to use
|
|
|
|
to determine the row. The position will be of the type that you stored in
|
|
|
|
ref. You can use my_get_ptr(pos,ref_length) to retrieve whatever key
|
|
|
|
or position you saved when position() was called.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from filesort.cc, records.cc, sql_insert.cc, sql_select.cc, and sql_update.cc.
|
|
|
|
|
|
|
|
@note
|
|
|
|
Is this really useful? It was never called even when sorting.
|
|
|
|
|
|
|
|
@see
|
|
|
|
filesort.cc, records.cc, sql_insert.cc, sql_select.cc and sql_update.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::rnd_pos(uchar *buf, uchar *pos)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
PTDBASE tp= (PTDBASE)tdbp;
|
|
|
|
DBUG_ENTER("ha_connect::rnd_pos");
|
|
|
|
|
2014-05-10 12:21:08 +02:00
|
|
|
if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) {
|
|
|
|
if (trace)
|
|
|
|
htrc("rnd_pos: %d\n", tp->GetRecpos());
|
|
|
|
|
|
|
|
tp->SetFilter(NULL);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= rnd_next(buf);
|
2014-05-10 12:21:08 +02:00
|
|
|
} else
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_KEY_NOT_FOUND;
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of rnd_pos
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
::info() is used to return information to the optimizer. See my_base.h for
|
|
|
|
the complete description.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Currently this table handler doesn't implement most of the fields really needed.
|
|
|
|
SHOW also makes use of this data.
|
|
|
|
|
|
|
|
You will probably want to have the following in your code:
|
|
|
|
@code
|
|
|
|
if (records < 2)
|
|
|
|
records= 2;
|
|
|
|
@endcode
|
|
|
|
The reason is that the server will optimize for cases of only a single
|
|
|
|
record. If, in a table scan, you don't know the number of records, it
|
|
|
|
will probably be better to set records to two so you can return as many
|
|
|
|
records as you need. Along with records, a few more variables you may wish
|
|
|
|
to set are:
|
|
|
|
records
|
|
|
|
deleted
|
|
|
|
data_file_length
|
|
|
|
index_file_length
|
|
|
|
delete_length
|
|
|
|
check_time
|
|
|
|
Take a look at the public variables in handler.h for more information.
|
|
|
|
|
|
|
|
Called in filesort.cc, ha_heap.cc, item_sum.cc, opt_sum.cc, sql_delete.cc,
|
|
|
|
sql_delete.cc, sql_derived.cc, sql_select.cc, sql_select.cc, sql_select.cc,
|
|
|
|
sql_select.cc, sql_select.cc, sql_show.cc, sql_show.cc, sql_show.cc, sql_show.cc,
|
|
|
|
sql_table.cc, sql_union.cc, and sql_update.cc.
|
|
|
|
|
|
|
|
@see
|
|
|
|
filesort.cc, ha_heap.cc, item_sum.cc, opt_sum.cc, sql_delete.cc, sql_delete.cc,
|
|
|
|
sql_derived.cc, sql_select.cc, sql_select.cc, sql_select.cc, sql_select.cc,
|
|
|
|
sql_select.cc, sql_show.cc, sql_show.cc, sql_show.cc, sql_show.cc, sql_table.cc,
|
|
|
|
sql_union.cc and sql_update.cc
|
|
|
|
*/
|
|
|
|
int ha_connect::info(uint flag)
|
|
|
|
{
|
|
|
|
bool pure= false;
|
2013-04-19 20:35:43 +02:00
|
|
|
PGLOBAL g= GetPlug((table) ? table->in_use : NULL, xp);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_ENTER("ha_connect::info");
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("%p In info: flag=%u valid_info=%d\n", this, flag, valid_info);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
// tdbp must be available to get updated info
|
|
|
|
if (xp->CheckQuery(valid_query_id) || !tdbp) {
|
|
|
|
PDBUSER dup= PlgGetUser(g);
|
|
|
|
PCATLG cat= (dup) ? dup->Catalog : NULL;
|
2014-04-14 14:26:48 +02:00
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
if (xmod == MODE_ANY || xmod == MODE_ALTER) {
|
|
|
|
// Pure info, not a query
|
|
|
|
pure= true;
|
|
|
|
xp->CheckCleanup();
|
|
|
|
} // endif xmod
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
// This is necessary for getting file length
|
2014-08-23 19:17:15 +02:00
|
|
|
if (table)
|
|
|
|
SetDataPath(g, table->s->db.str);
|
2014-07-17 18:13:51 +02:00
|
|
|
else
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen
|
|
|
|
|
|
|
|
if (!(tdbp= GetTDB(g)))
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen
|
|
|
|
|
|
|
|
valid_info = false;
|
|
|
|
} // endif tdbp
|
|
|
|
|
|
|
|
if (!valid_info) {
|
2013-02-07 10:34:27 +01:00
|
|
|
valid_info= CntInfo(g, tdbp, &xinfo);
|
2014-04-19 17:02:53 +02:00
|
|
|
|
|
|
|
if (((signed)xinfo.records) < 0)
|
|
|
|
DBUG_RETURN(HA_ERR_INITIALIZATION); // Error in Cardinality
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endif valid_info
|
|
|
|
|
|
|
|
if (flag & HA_STATUS_VARIABLE) {
|
|
|
|
stats.records= xinfo.records;
|
|
|
|
stats.deleted= 0;
|
|
|
|
stats.data_file_length= xinfo.data_file_length;
|
|
|
|
stats.index_file_length= 0;
|
|
|
|
stats.delete_length= 0;
|
|
|
|
stats.check_time= 0;
|
|
|
|
stats.mean_rec_length= xinfo.mean_rec_length;
|
|
|
|
} // endif HA_STATUS_VARIABLE
|
|
|
|
|
|
|
|
if (flag & HA_STATUS_CONST) {
|
|
|
|
// This is imported from the previous handler and must be reconsidered
|
2014-02-06 15:14:09 +01:00
|
|
|
stats.max_data_file_length= 4294967295LL;
|
|
|
|
stats.max_index_file_length= 4398046510080LL;
|
2013-02-07 10:34:27 +01:00
|
|
|
stats.create_time= 0;
|
|
|
|
data_file_name= xinfo.data_file_name;
|
|
|
|
index_file_name= NULL;
|
|
|
|
// sortkey= (uint) - 1; // Table is not sorted
|
|
|
|
ref_length= sizeof(int); // Pointer size to row
|
|
|
|
table->s->db_options_in_use= 03;
|
|
|
|
stats.block_size= 1024;
|
|
|
|
table->s->keys_in_use.set_prefix(table->s->keys);
|
|
|
|
table->s->keys_for_keyread= table->s->keys_in_use;
|
|
|
|
// table->s->keys_for_keyread.subtract(table->s->read_only_keys);
|
|
|
|
table->s->db_record_offset= 0;
|
|
|
|
} // endif HA_STATUS_CONST
|
|
|
|
|
|
|
|
if (flag & HA_STATUS_ERRKEY) {
|
|
|
|
errkey= 0;
|
|
|
|
} // endif HA_STATUS_ERRKEY
|
|
|
|
|
|
|
|
if (flag & HA_STATUS_TIME)
|
|
|
|
stats.update_time= 0;
|
|
|
|
|
|
|
|
if (flag & HA_STATUS_AUTO)
|
|
|
|
stats.auto_increment_value= 1;
|
|
|
|
|
|
|
|
if (tdbp && pure)
|
|
|
|
CloseTable(g); // Not used anymore
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
} // end of info
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
extra() is called whenever the server wishes to send a hint to
|
|
|
|
the storage engine. The myisam engine implements the most hints.
|
|
|
|
ha_innodb.cc has the most exhaustive list of these hints.
|
|
|
|
|
|
|
|
@note
|
|
|
|
This is not yet implemented for CONNECT.
|
|
|
|
|
|
|
|
@see
|
|
|
|
ha_innodb.cc
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
int ha_connect::extra(enum ha_extra_function /*operation*/)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::extra");
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
} // end of extra
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Used to delete all rows in a table, including cases of truncate and cases where
|
|
|
|
the optimizer realizes that all rows will be removed as a result of an SQL statement.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from item_sum.cc by Item_func_group_concat::clear(),
|
|
|
|
Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
|
|
|
|
Called from sql_delete.cc by mysql_delete().
|
|
|
|
Called from sql_select.cc by JOIN::reinit().
|
|
|
|
Called from sql_union.cc by st_select_lex_unit::exec().
|
|
|
|
|
|
|
|
@see
|
|
|
|
Item_func_group_concat::clear(), Item_sum_count_distinct::clear() and
|
|
|
|
Item_func_group_concat::clear() in item_sum.cc;
|
|
|
|
mysql_delete() in sql_delete.cc;
|
|
|
|
JOIN::reinit() in sql_select.cc and
|
|
|
|
st_select_lex_unit::exec() in sql_union.cc.
|
|
|
|
*/
|
|
|
|
int ha_connect::delete_all_rows()
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
PGLOBAL g= xp->g;
|
|
|
|
DBUG_ENTER("ha_connect::delete_all_rows");
|
|
|
|
|
2013-11-11 13:00:39 +01:00
|
|
|
if (tdbp && tdbp->GetUse() == USE_OPEN &&
|
|
|
|
tdbp->GetAmType() != TYPE_AM_XML &&
|
|
|
|
((PTDBASE)tdbp)->GetFtype() != RECFM_NAF)
|
2013-09-22 13:40:31 +02:00
|
|
|
// Close and reopen the table so it will be deleted
|
|
|
|
rc= CloseTable(g);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-11-11 13:00:39 +01:00
|
|
|
if (!(rc= OpenTable(g))) {
|
2013-02-07 10:34:27 +01:00
|
|
|
if (CntDeleteRow(g, tdbp, true)) {
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("%s\n", g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-08-22 17:30:22 +02:00
|
|
|
} else
|
|
|
|
nox= false;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-11-11 13:00:39 +01:00
|
|
|
} // endif rc
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of delete_all_rows
|
|
|
|
|
2013-03-22 08:28:58 +01:00
|
|
|
|
2013-12-31 13:08:29 +01:00
|
|
|
bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn)
|
2013-03-22 08:28:58 +01:00
|
|
|
{
|
2013-12-31 13:08:29 +01:00
|
|
|
const char *db= (dbn && *dbn) ? dbn : NULL;
|
2014-02-03 16:14:13 +01:00
|
|
|
TABTYPE type=GetRealType(options);
|
2013-12-31 13:08:29 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
switch (type) {
|
2013-03-22 08:28:58 +01:00
|
|
|
case TAB_UNDEF:
|
2013-04-29 13:50:20 +02:00
|
|
|
// case TAB_CATLG:
|
2013-03-22 08:28:58 +01:00
|
|
|
case TAB_PLG:
|
|
|
|
case TAB_JCT:
|
|
|
|
case TAB_DMY:
|
|
|
|
case TAB_NIY:
|
2013-04-19 20:35:43 +02:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR,
|
|
|
|
"Unsupported table type %s", MYF(0), options->type);
|
|
|
|
return true;
|
2013-03-22 08:28:58 +01:00
|
|
|
|
|
|
|
case TAB_DOS:
|
|
|
|
case TAB_FIX:
|
|
|
|
case TAB_BIN:
|
|
|
|
case TAB_CSV:
|
|
|
|
case TAB_FMT:
|
|
|
|
case TAB_DBF:
|
|
|
|
case TAB_XML:
|
|
|
|
case TAB_INI:
|
|
|
|
case TAB_VEC:
|
2015-01-19 18:55:25 +01:00
|
|
|
case TAB_JSON:
|
2014-02-03 16:14:13 +01:00
|
|
|
if (options->filename && *options->filename) {
|
|
|
|
char *s, path[FN_REFLEN], dbpath[FN_REFLEN];
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2014-04-19 11:11:30 +02:00
|
|
|
s= "\\";
|
2015-05-27 16:23:38 +02:00
|
|
|
#else // !__WIN__
|
2014-04-19 11:11:30 +02:00
|
|
|
s= "/";
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2014-02-03 16:14:13 +01:00
|
|
|
strcpy(dbpath, mysql_real_data_home);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (db)
|
|
|
|
strcat(strcat(dbpath, db), s);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
(void) fn_format(path, options->filename, dbpath, "",
|
|
|
|
MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
|
|
|
|
|
|
|
|
if (!is_secure_file_path(path)) {
|
|
|
|
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
|
|
|
|
return true;
|
|
|
|
} // endif path
|
|
|
|
|
|
|
|
} else
|
2013-04-02 11:10:42 +02:00
|
|
|
return false;
|
2013-03-22 08:28:58 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/* Fall through to check FILE_ACL */
|
2013-03-22 08:28:58 +01:00
|
|
|
case TAB_ODBC:
|
|
|
|
case TAB_MYSQL:
|
|
|
|
case TAB_DIR:
|
|
|
|
case TAB_MAC:
|
|
|
|
case TAB_WMI:
|
|
|
|
case TAB_OEM:
|
2015-07-26 00:03:34 +02:00
|
|
|
#ifdef NO_EMBEDDED_ACCESS_CHECKS
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
If table or table->mdl_ticket is NULL - it's a DLL, e.g. CREATE TABLE.
|
|
|
|
if the table has an MDL_EXCLUSIVE lock - it's a DDL too, e.g. the
|
|
|
|
insert step of CREATE ... SELECT.
|
|
|
|
|
|
|
|
Otherwise it's a DML, the table was normally opened, locked,
|
|
|
|
privilege were already checked, and table->grant.privilege is set.
|
|
|
|
With SQL SECURITY DEFINER, table->grant.privilege has definer's privileges.
|
|
|
|
*/
|
|
|
|
if (!table || !table->mdl_ticket || table->mdl_ticket->get_type() == MDL_EXCLUSIVE)
|
|
|
|
return check_access(thd, FILE_ACL, db, NULL, NULL, 0, 0);
|
|
|
|
if (table->grant.privilege & FILE_ACL)
|
|
|
|
return false;
|
|
|
|
status_var_increment(thd->status_var.access_denied_errors);
|
|
|
|
my_error(access_denied_error_code(thd->password), MYF(0),
|
|
|
|
thd->security_ctx->priv_user, thd->security_ctx->priv_host,
|
|
|
|
(thd->password ? ER(ER_YES) : ER(ER_NO)));
|
|
|
|
return true;
|
2013-03-22 08:28:58 +01:00
|
|
|
|
2013-04-29 13:50:20 +02:00
|
|
|
// This is temporary until a solution is found
|
2013-03-22 08:28:58 +01:00
|
|
|
case TAB_TBL:
|
2013-04-29 13:50:20 +02:00
|
|
|
case TAB_XCL:
|
|
|
|
case TAB_PRX:
|
|
|
|
case TAB_OCCUR:
|
2013-05-10 20:22:21 +02:00
|
|
|
case TAB_PIVOT:
|
2014-10-31 12:28:07 +01:00
|
|
|
case TAB_VIR:
|
2013-03-22 08:28:58 +01:00
|
|
|
return false;
|
2014-02-03 16:14:13 +01:00
|
|
|
} // endswitch type
|
2013-03-22 08:28:58 +01:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0));
|
|
|
|
return true;
|
2013-04-29 13:50:20 +02:00
|
|
|
} // end of check_privileges
|
2013-03-22 08:28:58 +01:00
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
// Check that two indexes are equivalent
|
2013-04-10 14:24:28 +02:00
|
|
|
bool ha_connect::IsSameIndex(PIXDEF xp1, PIXDEF xp2)
|
|
|
|
{
|
|
|
|
bool b= true;
|
|
|
|
PKPDEF kp1, kp2;
|
|
|
|
|
|
|
|
if (stricmp(xp1->Name, xp2->Name))
|
|
|
|
b= false;
|
|
|
|
else if (xp1->Nparts != xp2->Nparts ||
|
|
|
|
xp1->MaxSame != xp2->MaxSame ||
|
|
|
|
xp1->Unique != xp2->Unique)
|
|
|
|
b= false;
|
|
|
|
else for (kp1= xp1->ToKeyParts, kp2= xp2->ToKeyParts;
|
|
|
|
b && (kp1 || kp2);
|
|
|
|
kp1= kp1->Next, kp2= kp2->Next)
|
|
|
|
if (!kp1 || !kp2)
|
|
|
|
b= false;
|
|
|
|
else if (stricmp(kp1->Name, kp2->Name))
|
|
|
|
b= false;
|
|
|
|
else if (kp1->Klen != kp2->Klen)
|
|
|
|
b= false;
|
|
|
|
|
|
|
|
return b;
|
|
|
|
} // end of IsSameIndex
|
2013-03-22 08:28:58 +01:00
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
MODE ha_connect::CheckMode(PGLOBAL g, THD *thd,
|
2013-08-12 21:51:56 +02:00
|
|
|
MODE newmode, bool *chk, bool *cras)
|
|
|
|
{
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace) {
|
2013-08-12 21:51:56 +02:00
|
|
|
LEX_STRING *query_string= thd_query_string(thd);
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("%p check_mode: cmdtype=%d\n", this, thd_sql_command(thd));
|
|
|
|
htrc("Cmd=%.*s\n", (int) query_string->length, query_string->str);
|
2014-10-21 17:29:51 +02:00
|
|
|
} // endif trace
|
2013-08-12 21:51:56 +02:00
|
|
|
|
|
|
|
// Next code is temporarily replaced until sql_command is set
|
|
|
|
stop= false;
|
|
|
|
|
|
|
|
if (newmode == MODE_WRITE) {
|
|
|
|
switch (thd_sql_command(thd)) {
|
|
|
|
case SQLCOM_LOCK_TABLES:
|
|
|
|
locked= 2;
|
|
|
|
case SQLCOM_CREATE_TABLE:
|
|
|
|
case SQLCOM_INSERT:
|
|
|
|
case SQLCOM_LOAD:
|
|
|
|
case SQLCOM_INSERT_SELECT:
|
|
|
|
newmode= MODE_INSERT;
|
|
|
|
break;
|
|
|
|
// case SQLCOM_REPLACE:
|
|
|
|
// case SQLCOM_REPLACE_SELECT:
|
|
|
|
// newmode= MODE_UPDATE; // To be checked
|
|
|
|
// break;
|
|
|
|
case SQLCOM_DELETE:
|
|
|
|
case SQLCOM_DELETE_MULTI:
|
|
|
|
case SQLCOM_TRUNCATE:
|
|
|
|
newmode= MODE_DELETE;
|
|
|
|
break;
|
|
|
|
case SQLCOM_UPDATE:
|
|
|
|
case SQLCOM_UPDATE_MULTI:
|
|
|
|
newmode= MODE_UPDATE;
|
|
|
|
break;
|
|
|
|
case SQLCOM_SELECT:
|
|
|
|
case SQLCOM_OPTIMIZE:
|
|
|
|
newmode= MODE_READ;
|
|
|
|
break;
|
2014-11-01 17:08:39 +01:00
|
|
|
case SQLCOM_FLUSH:
|
|
|
|
locked= 0;
|
2013-08-12 21:51:56 +02:00
|
|
|
case SQLCOM_DROP_TABLE:
|
|
|
|
case SQLCOM_RENAME_TABLE:
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
|
|
|
case SQLCOM_CREATE_VIEW:
|
|
|
|
case SQLCOM_DROP_VIEW:
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
2014-02-03 16:14:13 +01:00
|
|
|
case SQLCOM_ALTER_TABLE:
|
|
|
|
newmode= MODE_ALTER;
|
|
|
|
break;
|
2014-07-17 18:13:51 +02:00
|
|
|
case SQLCOM_DROP_INDEX:
|
|
|
|
case SQLCOM_CREATE_INDEX:
|
|
|
|
// if (!IsPartitioned()) {
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
|
|
|
// } // endif partitioned
|
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
default:
|
2014-04-19 17:02:53 +02:00
|
|
|
htrc("Unsupported sql_command=%d\n", thd_sql_command(thd));
|
2013-08-12 21:51:56 +02:00
|
|
|
strcpy(g->Message, "CONNECT Unsupported command");
|
|
|
|
my_message(ER_NOT_ALLOWED_COMMAND, g->Message, MYF(0));
|
|
|
|
newmode= MODE_ERROR;
|
|
|
|
break;
|
|
|
|
} // endswitch newmode
|
|
|
|
|
|
|
|
} else if (newmode == MODE_READ) {
|
|
|
|
switch (thd_sql_command(thd)) {
|
|
|
|
case SQLCOM_CREATE_TABLE:
|
|
|
|
*chk= true;
|
|
|
|
*cras= true;
|
|
|
|
case SQLCOM_INSERT:
|
|
|
|
case SQLCOM_LOAD:
|
|
|
|
case SQLCOM_INSERT_SELECT:
|
|
|
|
// case SQLCOM_REPLACE:
|
|
|
|
// case SQLCOM_REPLACE_SELECT:
|
|
|
|
case SQLCOM_DELETE:
|
|
|
|
case SQLCOM_DELETE_MULTI:
|
|
|
|
case SQLCOM_TRUNCATE:
|
|
|
|
case SQLCOM_UPDATE:
|
|
|
|
case SQLCOM_UPDATE_MULTI:
|
|
|
|
case SQLCOM_SELECT:
|
|
|
|
case SQLCOM_OPTIMIZE:
|
2015-02-24 23:18:04 +01:00
|
|
|
case SQLCOM_SET_OPTION:
|
2013-08-12 21:51:56 +02:00
|
|
|
break;
|
|
|
|
case SQLCOM_LOCK_TABLES:
|
|
|
|
locked= 1;
|
|
|
|
break;
|
|
|
|
case SQLCOM_DROP_TABLE:
|
|
|
|
case SQLCOM_RENAME_TABLE:
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
|
|
|
case SQLCOM_CREATE_VIEW:
|
|
|
|
case SQLCOM_DROP_VIEW:
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
2014-02-03 16:14:13 +01:00
|
|
|
case SQLCOM_ALTER_TABLE:
|
|
|
|
*chk= true;
|
|
|
|
newmode= MODE_ALTER;
|
|
|
|
break;
|
2014-07-17 18:13:51 +02:00
|
|
|
case SQLCOM_DROP_INDEX:
|
|
|
|
case SQLCOM_CREATE_INDEX:
|
|
|
|
// if (!IsPartitioned()) {
|
|
|
|
*chk= true;
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
|
|
|
// } // endif partitioned
|
|
|
|
|
2015-03-28 20:18:46 +01:00
|
|
|
case SQLCOM_END:
|
|
|
|
// Met in procedures: IF(EXISTS(SELECT...
|
|
|
|
newmode= MODE_READ;
|
|
|
|
break;
|
2013-08-12 21:51:56 +02:00
|
|
|
default:
|
2014-04-19 17:02:53 +02:00
|
|
|
htrc("Unsupported sql_command=%d\n", thd_sql_command(thd));
|
2013-08-12 21:51:56 +02:00
|
|
|
strcpy(g->Message, "CONNECT Unsupported command");
|
|
|
|
my_message(ER_NOT_ALLOWED_COMMAND, g->Message, MYF(0));
|
|
|
|
newmode= MODE_ERROR;
|
|
|
|
break;
|
|
|
|
} // endswitch newmode
|
|
|
|
|
|
|
|
} // endif's newmode
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("New mode=%d\n", newmode);
|
2013-08-12 21:51:56 +02:00
|
|
|
|
|
|
|
return newmode;
|
|
|
|
} // end of check_mode
|
|
|
|
|
|
|
|
int ha_connect::start_stmt(THD *thd, thr_lock_type lock_type)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
bool chk=false, cras= false;
|
|
|
|
MODE newmode;
|
|
|
|
PGLOBAL g= GetPlug(thd, xp);
|
|
|
|
DBUG_ENTER("ha_connect::start_stmt");
|
|
|
|
|
|
|
|
// Action will depend on lock_type
|
|
|
|
switch (lock_type) {
|
|
|
|
case TL_WRITE_ALLOW_WRITE:
|
|
|
|
case TL_WRITE_CONCURRENT_INSERT:
|
|
|
|
case TL_WRITE_DELAYED:
|
|
|
|
case TL_WRITE_DEFAULT:
|
|
|
|
case TL_WRITE_LOW_PRIORITY:
|
|
|
|
case TL_WRITE:
|
|
|
|
case TL_WRITE_ONLY:
|
|
|
|
newmode= MODE_WRITE;
|
|
|
|
break;
|
|
|
|
case TL_READ:
|
|
|
|
case TL_READ_WITH_SHARED_LOCKS:
|
|
|
|
case TL_READ_HIGH_PRIORITY:
|
|
|
|
case TL_READ_NO_INSERT:
|
|
|
|
case TL_READ_DEFAULT:
|
|
|
|
newmode= MODE_READ;
|
|
|
|
break;
|
|
|
|
case TL_UNLOCK:
|
|
|
|
default:
|
|
|
|
newmode= MODE_ANY;
|
|
|
|
break;
|
|
|
|
} // endswitch mode
|
|
|
|
|
|
|
|
xmod= CheckMode(g, thd, newmode, &chk, &cras);
|
|
|
|
DBUG_RETURN((xmod == MODE_ERROR) ? HA_ERR_INTERNAL_ERROR : 0);
|
|
|
|
} // end of start_stmt
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
This create a lock on the table. If you are implementing a storage engine
|
|
|
|
that can handle transacations look at ha_berkely.cc to see how you will
|
|
|
|
want to go about doing this. Otherwise you should consider calling flock()
|
|
|
|
here. Hint: Read the section "locking functions for mysql" in lock.cc to understand
|
|
|
|
this.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Called from lock.cc by lock_external() and unlock_external(). Also called
|
|
|
|
from sql_table.cc by copy_data_between_tables().
|
|
|
|
|
|
|
|
@note
|
|
|
|
Following what we did in the MySQL XDB handler, we use this call to actually
|
|
|
|
physically open the table. This could be reconsider when finalizing this handler
|
|
|
|
design, which means we have a better understanding of what MariaDB does.
|
|
|
|
|
|
|
|
@see
|
|
|
|
lock.cc by lock_external() and unlock_external() in lock.cc;
|
|
|
|
the section "locking functions for mysql" in lock.cc;
|
|
|
|
copy_data_between_tables() in sql_table.cc.
|
|
|
|
*/
|
|
|
|
int ha_connect::external_lock(THD *thd, int lock_type)
|
|
|
|
{
|
2013-03-22 12:49:41 +01:00
|
|
|
int rc= 0;
|
2013-06-03 14:43:47 +02:00
|
|
|
bool xcheck=false, cras= false;
|
2013-03-22 12:49:41 +01:00
|
|
|
MODE newmode;
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS options= GetTableOptionStruct();
|
2013-04-19 20:35:43 +02:00
|
|
|
PGLOBAL g= GetPlug(thd, xp);
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_ENTER("ha_connect::external_lock");
|
|
|
|
|
2013-05-24 07:56:04 +02:00
|
|
|
DBUG_ASSERT(thd == current_thd);
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("external_lock: this=%p thd=%p xp=%p g=%p lock_type=%d\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
this, thd, xp, g, lock_type);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if (!g)
|
2013-03-10 19:48:45 +01:00
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// Action will depend on lock_type
|
|
|
|
switch (lock_type) {
|
|
|
|
case F_WRLCK:
|
|
|
|
newmode= MODE_WRITE;
|
|
|
|
break;
|
|
|
|
case F_RDLCK:
|
|
|
|
newmode= MODE_READ;
|
|
|
|
break;
|
|
|
|
case F_UNLCK:
|
|
|
|
default:
|
|
|
|
newmode= MODE_ANY;
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch mode
|
|
|
|
|
|
|
|
if (newmode == MODE_ANY) {
|
2014-02-03 16:14:13 +01:00
|
|
|
int sqlcom= thd_sql_command(thd);
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
// This is unlocking, do it by closing the table
|
2014-02-03 16:14:13 +01:00
|
|
|
if (xp->CheckQueryID() && sqlcom != SQLCOM_UNLOCK_TABLES
|
2014-03-21 02:40:27 +01:00
|
|
|
&& sqlcom != SQLCOM_LOCK_TABLES
|
2014-11-01 17:08:39 +01:00
|
|
|
&& sqlcom != SQLCOM_FLUSH
|
|
|
|
&& sqlcom != SQLCOM_BEGIN
|
2014-03-21 02:40:27 +01:00
|
|
|
&& sqlcom != SQLCOM_DROP_TABLE) {
|
|
|
|
sprintf(g->Message, "external_lock: unexpected command %d", sqlcom);
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
|
|
|
DBUG_RETURN(0);
|
2014-04-19 11:11:30 +02:00
|
|
|
} else if (g->Xchk) {
|
2014-02-03 16:14:13 +01:00
|
|
|
if (!tdbp) {
|
|
|
|
if (!(tdbp= GetTDB(g)))
|
2013-12-04 23:53:30 +01:00
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
else if (!((PTDBASE)tdbp)->GetDef()->Indexable()) {
|
2013-12-11 16:52:01 +01:00
|
|
|
sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName());
|
2014-03-21 02:40:27 +01:00
|
|
|
// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error
|
2013-12-04 23:53:30 +01:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
|
|
|
DBUG_RETURN(0);
|
2014-04-19 17:02:53 +02:00
|
|
|
} else if (((PTDBASE)tdbp)->GetDef()->Indexable() == 1) {
|
|
|
|
bool oldsep= ((PCHK)g->Xchk)->oldsep;
|
|
|
|
bool newsep= ((PCHK)g->Xchk)->newsep;
|
|
|
|
PTDBDOS tdp= (PTDBDOS)tdbp;
|
|
|
|
|
|
|
|
PDOSDEF ddp= (PDOSDEF)tdp->GetDef();
|
|
|
|
PIXDEF xp, xp1, xp2, drp=NULL, adp= NULL;
|
|
|
|
PIXDEF oldpix= ((PCHK)g->Xchk)->oldpix;
|
|
|
|
PIXDEF newpix= ((PCHK)g->Xchk)->newpix;
|
|
|
|
PIXDEF *xlst, *xprc;
|
|
|
|
|
|
|
|
ddp->SetIndx(oldpix);
|
|
|
|
|
|
|
|
if (oldsep != newsep) {
|
|
|
|
// All indexes have to be remade
|
|
|
|
ddp->DeleteIndexFile(g, NULL);
|
|
|
|
oldpix= NULL;
|
|
|
|
ddp->SetIndx(NULL);
|
|
|
|
SetBooleanOption("Sepindex", newsep);
|
|
|
|
} else if (newsep) {
|
|
|
|
// Make the list of dropped indexes
|
|
|
|
xlst= &drp; xprc= &oldpix;
|
|
|
|
|
|
|
|
for (xp2= oldpix; xp2; xp2= xp) {
|
|
|
|
for (xp1= newpix; xp1; xp1= xp1->Next)
|
|
|
|
if (IsSameIndex(xp1, xp2))
|
|
|
|
break; // Index not to drop
|
2013-04-09 23:14:45 +02:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
xp= xp2->GetNext();
|
|
|
|
|
|
|
|
if (!xp1) {
|
|
|
|
*xlst= xp2;
|
|
|
|
*xprc= xp;
|
|
|
|
*(xlst= &xp2->Next)= NULL;
|
|
|
|
} else
|
|
|
|
xprc= &xp2->Next;
|
|
|
|
|
|
|
|
} // endfor xp2
|
|
|
|
|
|
|
|
if (drp) {
|
|
|
|
// Here we erase the index files
|
|
|
|
ddp->DeleteIndexFile(g, drp);
|
|
|
|
} // endif xp1
|
|
|
|
|
|
|
|
} else if (oldpix) {
|
|
|
|
// TODO: optimize the case of just adding new indexes
|
|
|
|
if (!newpix)
|
|
|
|
ddp->DeleteIndexFile(g, NULL);
|
|
|
|
|
|
|
|
oldpix= NULL; // To remake all indexes
|
|
|
|
ddp->SetIndx(NULL);
|
|
|
|
} // endif sepindex
|
|
|
|
|
|
|
|
// Make the list of new created indexes
|
|
|
|
xlst= &adp; xprc= &newpix;
|
|
|
|
|
|
|
|
for (xp1= newpix; xp1; xp1= xp) {
|
|
|
|
for (xp2= oldpix; xp2; xp2= xp2->Next)
|
2013-04-10 14:24:28 +02:00
|
|
|
if (IsSameIndex(xp1, xp2))
|
2014-04-19 17:02:53 +02:00
|
|
|
break; // Index already made
|
2013-04-09 23:14:45 +02:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
xp= xp1->Next;
|
2013-04-09 23:14:45 +02:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
if (!xp2) {
|
|
|
|
*xlst= xp1;
|
2013-04-09 23:14:45 +02:00
|
|
|
*xprc= xp;
|
2014-04-19 17:02:53 +02:00
|
|
|
*(xlst= &xp1->Next)= NULL;
|
2013-04-09 23:14:45 +02:00
|
|
|
} else
|
2014-04-19 17:02:53 +02:00
|
|
|
xprc= &xp1->Next;
|
2013-04-09 23:14:45 +02:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
} // endfor xp1
|
2013-04-09 23:14:45 +02:00
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
if (adp)
|
|
|
|
// Here we do make the new indexes
|
|
|
|
if (tdp->MakeIndex(g, adp, true) == RC_FX) {
|
|
|
|
// Make it a warning to avoid crash
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
|
|
0, g->Message);
|
|
|
|
rc= 0;
|
|
|
|
} // endif MakeIndex
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
} else if (((PTDBASE)tdbp)->GetDef()->Indexable() == 3) {
|
|
|
|
if (CheckVirtualIndex(NULL)) {
|
|
|
|
// Make it a warning to avoid crash
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
|
|
0, g->Message);
|
|
|
|
rc= 0;
|
|
|
|
} // endif Check
|
|
|
|
|
2014-04-19 17:02:53 +02:00
|
|
|
} // endif indexable
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
} // endif Tdbp
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-09 23:14:45 +02:00
|
|
|
} // endelse Xchk
|
2013-02-20 01:30:37 +01:00
|
|
|
|
2013-06-12 20:48:55 +02:00
|
|
|
if (CloseTable(g)) {
|
|
|
|
// This is an error while builing index
|
2014-02-03 16:14:13 +01:00
|
|
|
// Make it a warning to avoid crash
|
2013-07-23 16:29:16 +02:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
2013-06-12 20:48:55 +02:00
|
|
|
rc= 0;
|
|
|
|
} // endif Close
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
locked= 0;
|
2014-07-17 18:13:51 +02:00
|
|
|
xmod= MODE_ANY; // For info commands
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif MODE_ANY
|
2015-07-26 00:05:58 +02:00
|
|
|
else
|
2013-12-31 13:08:29 +01:00
|
|
|
if (check_privileges(thd, options, table->s->db.str)) {
|
|
|
|
strcpy(g->Message, "This operation requires the FILE privilege");
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("%s\n", g->Message);
|
2013-12-31 13:08:29 +01:00
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif check_privileges
|
|
|
|
|
2015-07-26 00:05:58 +02:00
|
|
|
|
|
|
|
DBUG_ASSERT(table && table->s);
|
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
// Table mode depends on the query type
|
|
|
|
newmode= CheckMode(g, thd, newmode, &xcheck, &cras);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-08-12 21:51:56 +02:00
|
|
|
if (newmode == MODE_ERROR)
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// If this is the start of a new query, cleanup the previous one
|
|
|
|
if (xp->CheckCleanup()) {
|
|
|
|
tdbp= NULL;
|
|
|
|
valid_info= false;
|
|
|
|
} // endif CheckCleanup
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
#if 0
|
2013-04-09 23:14:45 +02:00
|
|
|
if (xcheck) {
|
|
|
|
// This must occur after CheckCleanup
|
2014-02-03 16:14:13 +01:00
|
|
|
if (!g->Xchk) {
|
|
|
|
g->Xchk= new(g) XCHK;
|
|
|
|
((PCHK)g->Xchk)->oldsep= GetBooleanOption("Sepindex", false);
|
|
|
|
((PCHK)g->Xchk)->oldpix= GetIndexInfo();
|
|
|
|
} // endif Xchk
|
|
|
|
|
2013-12-11 16:52:01 +01:00
|
|
|
} else
|
2014-04-19 11:11:30 +02:00
|
|
|
g->Xchk= NULL;
|
2014-02-03 16:14:13 +01:00
|
|
|
#endif // 0
|
2013-04-09 23:14:45 +02:00
|
|
|
|
|
|
|
if (cras)
|
|
|
|
g->Createas= 1; // To tell created table to ignore FLAG
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace) {
|
2014-02-03 16:14:13 +01:00
|
|
|
#if 0
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("xcheck=%d cras=%d\n", xcheck, cras);
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
if (xcheck)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("oldsep=%d oldpix=%p\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
((PCHK)g->Xchk)->oldsep, ((PCHK)g->Xchk)->oldpix);
|
|
|
|
#endif // 0
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Calling CntCheckDB db=%s cras=%d\n", GetDBName(NULL), cras);
|
2014-10-21 17:29:51 +02:00
|
|
|
} // endif trace
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
// Set or reset the good database environment
|
|
|
|
if (CntCheckDB(g, this, GetDBName(NULL))) {
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("%p external_lock: %s\n", this, g->Message);
|
2013-02-07 10:34:27 +01:00
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
// This can NOT be called without open called first, but
|
|
|
|
// the table can have been closed since then
|
|
|
|
} else if (!tdbp || xp->CheckQuery(valid_query_id) || xmod != newmode) {
|
2013-05-13 10:37:35 +02:00
|
|
|
if (tdbp) {
|
|
|
|
// If this is called by a later query, the table may have
|
|
|
|
// been already closed and the tdbp is not valid anymore.
|
|
|
|
if (xp->last_query_id == valid_query_id)
|
|
|
|
rc= CloseTable(g);
|
|
|
|
else
|
|
|
|
tdbp= NULL;
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
} // endif tdbp
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
xmod= newmode;
|
|
|
|
|
|
|
|
// Delay open until used fields are known
|
|
|
|
} // endif tdbp
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("external_lock: rc=%d\n", rc);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of external_lock
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
The idea with handler::store_lock() is: The statement decides which locks
|
|
|
|
should be needed for the table. For updates/deletes/inserts we get WRITE
|
|
|
|
locks, for SELECT... we get read locks.
|
|
|
|
|
|
|
|
@details
|
|
|
|
Before adding the lock into the table lock handler (see thr_lock.c),
|
|
|
|
mysqld calls store lock with the requested locks. Store lock can now
|
|
|
|
modify a write lock to a read lock (or some other lock), ignore the
|
|
|
|
lock (if we don't want to use MySQL table locks at all), or add locks
|
|
|
|
for many tables (like we do when we are using a MERGE handler).
|
|
|
|
|
|
|
|
Berkeley DB, for example, changes all WRITE locks to TL_WRITE_ALLOW_WRITE
|
|
|
|
(which signals that we are doing WRITES, but are still allowing other
|
|
|
|
readers and writers).
|
|
|
|
|
|
|
|
When releasing locks, store_lock() is also called. In this case one
|
|
|
|
usually doesn't have to do anything.
|
|
|
|
|
|
|
|
In some exceptional cases MySQL may send a request for a TL_IGNORE;
|
|
|
|
This means that we are requesting the same lock as last time and this
|
|
|
|
should also be ignored. (This may happen when someone does a flush
|
|
|
|
table when we have opened a part of the tables, in which case mysqld
|
|
|
|
closes and reopens the tables and tries to get the same locks at last
|
|
|
|
time). In the future we will probably try to remove this.
|
|
|
|
|
|
|
|
Called from lock.cc by get_lock_data().
|
|
|
|
|
|
|
|
@note
|
|
|
|
In this method one should NEVER rely on table->in_use, it may, in fact,
|
|
|
|
refer to a different thread! (this happens if get_lock_data() is called
|
|
|
|
from mysql_lock_abort_for_thread() function)
|
|
|
|
|
|
|
|
@see
|
|
|
|
get_lock_data() in lock.cc
|
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
THR_LOCK_DATA **ha_connect::store_lock(THD *,
|
2013-02-07 10:34:27 +01:00
|
|
|
THR_LOCK_DATA **to,
|
|
|
|
enum thr_lock_type lock_type)
|
|
|
|
{
|
|
|
|
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
|
|
|
|
lock.type=lock_type;
|
|
|
|
*to++ = &lock;
|
|
|
|
return to;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-21 16:29:10 +02:00
|
|
|
/**
|
|
|
|
Searches for a pointer to the last occurrence of the
|
|
|
|
character c in the string src.
|
|
|
|
Returns true on failure, false on success.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
strnrchr(LEX_CSTRING *ls, const char *src, size_t length, int c)
|
|
|
|
{
|
|
|
|
const char *srcend, *s;
|
|
|
|
for (s= srcend= src + length; s > src; s--)
|
|
|
|
{
|
|
|
|
if (s[-1] == c)
|
|
|
|
{
|
|
|
|
ls->str= s;
|
|
|
|
ls->length= srcend - s;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
Split filename into database and table name.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
filename_to_dbname_and_tablename(const char *filename,
|
|
|
|
char *database, size_t database_size,
|
|
|
|
char *table, size_t table_size)
|
|
|
|
{
|
|
|
|
LEX_CSTRING d, t;
|
|
|
|
size_t length= strlen(filename);
|
|
|
|
|
|
|
|
/* Find filename - the rightmost directory part */
|
|
|
|
if (strnrchr(&t, filename, length, slash) || t.length + 1 > table_size)
|
|
|
|
return true;
|
|
|
|
memcpy(table, t.str, t.length);
|
|
|
|
table[t.length]= '\0';
|
|
|
|
if (!(length-= t.length))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
length--; /* Skip slash */
|
|
|
|
|
|
|
|
/* Find database name - the second rightmost directory part */
|
|
|
|
if (strnrchr(&d, filename, length, slash) || d.length + 1 > database_size)
|
|
|
|
return true;
|
|
|
|
memcpy(database, d.str, d.length);
|
|
|
|
database[d.length]= '\0';
|
|
|
|
return false;
|
2013-12-31 13:08:29 +01:00
|
|
|
} // end of filename_to_dbname_and_tablename
|
2013-05-21 16:29:10 +02:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/**
|
|
|
|
@brief
|
2013-03-13 01:10:20 +01:00
|
|
|
Used to delete or rename a table. By the time delete_table() has been
|
2014-04-19 11:11:30 +02:00
|
|
|
called all opened references to this table will have been closed
|
2013-03-13 01:10:20 +01:00
|
|
|
(and your globally shared references released) ===> too bad!!!
|
|
|
|
The variable name will just be the name of the table.
|
2014-04-19 11:11:30 +02:00
|
|
|
You will need to remove or rename any files you have created at
|
2013-03-13 01:10:20 +01:00
|
|
|
this point.
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
If you do not implement this, the default delete_table() is called from
|
|
|
|
handler.cc and it will delete all files with the file extensions returned
|
|
|
|
by bas_ext().
|
|
|
|
|
|
|
|
Called from handler.cc by delete_table and ha_create_table(). Only used
|
|
|
|
during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
|
|
|
|
the storage engine.
|
|
|
|
|
|
|
|
@see
|
|
|
|
delete_table and ha_create_table() in handler.cc
|
|
|
|
*/
|
2013-03-13 01:10:20 +01:00
|
|
|
int ha_connect::delete_or_rename_table(const char *name, const char *to)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2013-03-13 01:10:20 +01:00
|
|
|
DBUG_ENTER("ha_connect::delete_or_rename_table");
|
2014-02-03 16:14:13 +01:00
|
|
|
char db[128], tabname[128];
|
|
|
|
int rc= 0;
|
|
|
|
bool ok= false;
|
|
|
|
THD *thd= current_thd;
|
|
|
|
int sqlcom= thd_sql_command(thd);
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace) {
|
2014-02-03 16:14:13 +01:00
|
|
|
if (to)
|
2014-04-19 11:11:30 +02:00
|
|
|
htrc("rename_table: this=%p thd=%p sqlcom=%d from=%s to=%s\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
this, thd, sqlcom, name, to);
|
|
|
|
else
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("delete_table: this=%p thd=%p sqlcom=%d name=%s\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
this, thd, sqlcom, name);
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
} // endif trace
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2013-05-21 16:29:10 +02:00
|
|
|
if (to && (filename_to_dbname_and_tablename(to, db, sizeof(db),
|
2014-02-03 16:14:13 +01:00
|
|
|
tabname, sizeof(tabname))
|
|
|
|
|| (*tabname == '#' && sqlcom == SQLCOM_CREATE_INDEX)))
|
|
|
|
DBUG_RETURN(0);
|
2013-03-15 00:11:46 +01:00
|
|
|
|
2013-05-21 16:29:10 +02:00
|
|
|
if (filename_to_dbname_and_tablename(name, db, sizeof(db),
|
2014-02-03 16:14:13 +01:00
|
|
|
tabname, sizeof(tabname))
|
|
|
|
|| (*tabname == '#' && sqlcom == SQLCOM_CREATE_INDEX))
|
|
|
|
DBUG_RETURN(0);
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
// If a temporary file exists, all the tests below were passed
|
|
|
|
// successfully when making it, so they are not needed anymore
|
|
|
|
// in particular because they sometimes cause DBUG_ASSERT crash.
|
2014-05-31 12:31:26 +02:00
|
|
|
// Also, for partitioned tables, no test can be done because when
|
|
|
|
// this function is called, the .par file is already deleted and
|
|
|
|
// this causes the open_table_def function to fail.
|
|
|
|
// Not having any other clues (table and table_share are NULL)
|
|
|
|
// the only mean we have to test for partitioning is this:
|
|
|
|
if (*tabname != '#' && !strstr(tabname, "#P#")) {
|
2014-02-03 16:14:13 +01:00
|
|
|
// We have to retrieve the information about this table options.
|
|
|
|
ha_table_option_struct *pos;
|
|
|
|
char key[MAX_DBKEY_LENGTH];
|
|
|
|
uint key_length;
|
|
|
|
TABLE_SHARE *share;
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
// if ((p= strstr(tabname, "#P#"))) won't work, see above
|
|
|
|
// *p= 0; // Get the main the table name
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
key_length= tdc_create_key(key, db, tabname);
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
// share contains the option struct that we need
|
|
|
|
if (!(share= alloc_table_share(db, tabname, key, key_length)))
|
|
|
|
DBUG_RETURN(rc);
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
// Get the share info from the .frm file
|
|
|
|
if (!open_table_def(thd, share)) {
|
|
|
|
// Now we can work
|
|
|
|
if ((pos= share->option_struct)) {
|
|
|
|
if (check_privileges(thd, pos, db))
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR; // ???
|
|
|
|
else
|
|
|
|
if (IsFileType(GetRealType(pos)) && !pos->filename)
|
|
|
|
ok= true;
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
} // endif pos
|
|
|
|
|
2015-05-05 11:37:21 +02:00
|
|
|
} // endif open_table_def
|
|
|
|
|
|
|
|
// This below was done to avoid DBUG_ASSERT in some case that
|
|
|
|
// we don't know anymore what they were. It was suppressed because
|
|
|
|
// it did cause assertion in other cases (see MDEV-7935)
|
|
|
|
// } else // Avoid infamous DBUG_ASSERT
|
|
|
|
// thd->get_stmt_da()->reset_diagnostics_area();
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2013-03-22 10:44:21 +01:00
|
|
|
free_table_share(share);
|
2014-02-03 16:14:13 +01:00
|
|
|
} else // Temporary file
|
|
|
|
ok= true;
|
2013-03-22 10:44:21 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (ok) {
|
2013-03-13 01:10:20 +01:00
|
|
|
// Let the base handler do the job
|
|
|
|
if (to)
|
|
|
|
rc= handler::rename_table(name, to);
|
2014-02-03 16:14:13 +01:00
|
|
|
else if ((rc= handler::delete_table(name)) == ENOENT)
|
|
|
|
rc= 0; // No files is not an error for CONNECT
|
|
|
|
|
|
|
|
} // endif ok
|
2013-03-13 01:10:20 +01:00
|
|
|
|
2013-06-03 14:43:47 +02:00
|
|
|
DBUG_RETURN(rc);
|
2013-03-13 01:10:20 +01:00
|
|
|
} // end of delete_or_rename_table
|
|
|
|
|
|
|
|
int ha_connect::delete_table(const char *name)
|
|
|
|
{
|
|
|
|
return delete_or_rename_table(name, NULL);
|
|
|
|
} // end of delete_table
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-03-13 01:10:20 +01:00
|
|
|
int ha_connect::rename_table(const char *from, const char *to)
|
|
|
|
{
|
|
|
|
return delete_or_rename_table(from, to);
|
|
|
|
} // end of rename_table
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Given a starting key and an ending key, estimate the number of rows that
|
|
|
|
will exist between the two keys.
|
|
|
|
|
|
|
|
@details
|
|
|
|
end_key may be empty, in which case determine if start_key matches any rows.
|
|
|
|
|
|
|
|
Called from opt_range.cc by check_quick_keys().
|
|
|
|
|
|
|
|
@see
|
|
|
|
check_quick_keys() in opt_range.cc
|
|
|
|
*/
|
|
|
|
ha_rows ha_connect::records_in_range(uint inx, key_range *min_key,
|
|
|
|
key_range *max_key)
|
|
|
|
{
|
|
|
|
ha_rows rows;
|
|
|
|
DBUG_ENTER("ha_connect::records_in_range");
|
|
|
|
|
|
|
|
if (indexing < 0 || inx != active_index)
|
2014-05-05 17:36:16 +02:00
|
|
|
if (index_init(inx, false))
|
|
|
|
DBUG_RETURN(HA_POS_ERROR);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("records_in_range: inx=%d indexing=%d\n", inx, indexing);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (indexing > 0) {
|
|
|
|
int nval;
|
|
|
|
uint len[2];
|
|
|
|
const uchar *key[2];
|
|
|
|
bool incl[2];
|
|
|
|
key_part_map kmap[2];
|
|
|
|
|
|
|
|
key[0]= (min_key) ? min_key->key : NULL;
|
|
|
|
key[1]= (max_key) ? max_key->key : NULL;
|
|
|
|
len[0]= (min_key) ? min_key->length : 0;
|
|
|
|
len[1]= (max_key) ? max_key->length : 0;
|
|
|
|
incl[0]= (min_key) ? (min_key->flag == HA_READ_KEY_EXACT) : false;
|
|
|
|
incl[1]= (max_key) ? (max_key->flag == HA_READ_AFTER_KEY) : false;
|
|
|
|
kmap[0]= (min_key) ? min_key->keypart_map : 0;
|
|
|
|
kmap[1]= (max_key) ? max_key->keypart_map : 0;
|
|
|
|
|
|
|
|
if ((nval= CntIndexRange(xp->g, tdbp, key, len, incl, kmap)) < 0)
|
|
|
|
rows= HA_POS_ERROR;
|
|
|
|
else
|
|
|
|
rows= (ha_rows)nval;
|
|
|
|
|
2014-08-07 17:59:21 +02:00
|
|
|
} else if (indexing == 0)
|
2013-02-07 10:34:27 +01:00
|
|
|
rows= 100000000; // Don't use missing index
|
2014-08-07 17:59:21 +02:00
|
|
|
else
|
2014-07-17 18:13:51 +02:00
|
|
|
rows= HA_POS_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-05-30 10:59:34 +02:00
|
|
|
if (trace)
|
|
|
|
htrc("records_in_range: rows=%llu\n", rows);
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rows);
|
|
|
|
} // end of records_in_range
|
|
|
|
|
2015-05-01 15:59:12 +02:00
|
|
|
// Used to check whether a MYSQL table is created on itself
|
|
|
|
bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
|
|
|
|
const char *db, char *tab, const char *src, int port)
|
|
|
|
{
|
|
|
|
if (src)
|
|
|
|
return false;
|
|
|
|
else if (host && stricmp(host, "localhost") && strcmp(host, "127.0.0.1"))
|
|
|
|
return false;
|
|
|
|
else if (db && stricmp(db, s->db.str))
|
|
|
|
return false;
|
|
|
|
else if (tab && stricmp(tab, s->table_name.str))
|
|
|
|
return false;
|
|
|
|
else if (port && port != (signed)GetDefaultPort())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
strcpy(g->Message, "This MySQL table is defined on itself");
|
|
|
|
return true;
|
|
|
|
} // end of CheckSelf
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/**
|
|
|
|
Convert an ISO-8859-1 column name to UTF-8
|
|
|
|
*/
|
2014-07-17 18:13:51 +02:00
|
|
|
static char *encode(PGLOBAL g, const char *cnm)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
char *buf= (char*)PlugSubAlloc(g, NULL, strlen(cnm) * 3);
|
|
|
|
uint dummy_errors;
|
|
|
|
uint32 len= copy_and_convert(buf, strlen(cnm) * 3,
|
|
|
|
&my_charset_utf8_general_ci,
|
|
|
|
cnm, strlen(cnm),
|
|
|
|
&my_charset_latin1,
|
|
|
|
&dummy_errors);
|
|
|
|
buf[len]= '\0';
|
|
|
|
return buf;
|
2014-07-17 18:13:51 +02:00
|
|
|
} // end of encode
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Store field definition for create.
|
|
|
|
|
|
|
|
@return
|
|
|
|
Return 0 if ok
|
|
|
|
*/
|
2015-03-18 13:30:14 +01:00
|
|
|
static bool add_field(String *sql, const char *field_name, int typ, int len,
|
|
|
|
int dec, char *key, uint tm, const char *rem, char *dft,
|
|
|
|
char *xtra, char *fmt, int flag, bool dbf, char v)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2013-11-06 18:22:09 +01:00
|
|
|
char var = (len > 255) ? 'V' : v;
|
2015-01-23 17:54:53 +01:00
|
|
|
bool q, error= false;
|
2013-11-06 18:22:09 +01:00
|
|
|
const char *type= PLGtoMYSQLtype(typ, dbf, var);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-04-29 13:50:20 +02:00
|
|
|
error|= sql->append('`');
|
2013-04-19 20:35:43 +02:00
|
|
|
error|= sql->append(field_name);
|
2013-04-29 13:50:20 +02:00
|
|
|
error|= sql->append("` ");
|
2013-04-19 20:35:43 +02:00
|
|
|
error|= sql->append(type);
|
2013-05-28 17:22:38 +02:00
|
|
|
|
2016-02-15 23:41:59 +01:00
|
|
|
if (len && typ != TYPE_DATE && (typ != TYPE_DOUBLE || dec >= 0)) {
|
2013-04-19 20:35:43 +02:00
|
|
|
error|= sql->append('(');
|
|
|
|
error|= sql->append_ulonglong(len);
|
2013-05-28 17:22:38 +02:00
|
|
|
|
2016-02-15 23:41:59 +01:00
|
|
|
if (typ == TYPE_DOUBLE) {
|
2013-04-19 20:35:43 +02:00
|
|
|
error|= sql->append(',');
|
2013-12-11 23:33:36 +01:00
|
|
|
// dec must be < len and < 31
|
2014-04-21 12:57:10 +02:00
|
|
|
error|= sql->append_ulonglong(MY_MIN(dec, (MY_MIN(len, 31) - 1)));
|
2013-12-28 15:46:49 +01:00
|
|
|
} else if (dec > 0 && !strcmp(type, "DECIMAL")) {
|
|
|
|
error|= sql->append(',');
|
|
|
|
// dec must be < len
|
2014-04-21 12:57:10 +02:00
|
|
|
error|= sql->append_ulonglong(MY_MIN(dec, len - 1));
|
2013-12-28 15:46:49 +01:00
|
|
|
} // endif dec
|
2013-05-28 17:22:38 +02:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
error|= sql->append(')');
|
2013-05-28 17:22:38 +02:00
|
|
|
} // endif len
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2013-12-03 22:59:40 +01:00
|
|
|
if (v == 'U')
|
|
|
|
error|= sql->append(" UNSIGNED");
|
|
|
|
else if (v == 'Z')
|
|
|
|
error|= sql->append(" ZEROFILL");
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
if (key && *key) {
|
|
|
|
error|= sql->append(" ");
|
|
|
|
error|= sql->append(key);
|
|
|
|
} // endif key
|
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
if (tm)
|
|
|
|
error|= sql->append(STRING_WITH_LEN(" NOT NULL"), system_charset_info);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-12-03 22:59:40 +01:00
|
|
|
if (dft && *dft) {
|
|
|
|
error|= sql->append(" DEFAULT ");
|
|
|
|
|
2015-01-23 17:54:53 +01:00
|
|
|
if (typ == TYPE_DATE)
|
|
|
|
q = (strspn(dft, "0123456789 -:/") == strlen(dft));
|
|
|
|
else
|
|
|
|
q = !IsTypeNum(typ);
|
|
|
|
|
|
|
|
if (q) {
|
2013-12-03 22:59:40 +01:00
|
|
|
error|= sql->append("'");
|
|
|
|
error|= sql->append_for_single_quote(dft, strlen(dft));
|
|
|
|
error|= sql->append("'");
|
|
|
|
} else
|
|
|
|
error|= sql->append(dft);
|
|
|
|
|
2013-12-19 12:56:06 +01:00
|
|
|
} // endif dft
|
|
|
|
|
|
|
|
if (xtra && *xtra) {
|
|
|
|
error|= sql->append(" ");
|
|
|
|
error|= sql->append(xtra);
|
2013-12-03 22:59:40 +01:00
|
|
|
} // endif rem
|
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
if (rem && *rem) {
|
2013-04-29 13:50:20 +02:00
|
|
|
error|= sql->append(" COMMENT '");
|
2013-04-19 20:35:43 +02:00
|
|
|
error|= sql->append_for_single_quote(rem, strlen(rem));
|
|
|
|
error|= sql->append("'");
|
2013-05-28 17:22:38 +02:00
|
|
|
} // endif rem
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-03-18 13:30:14 +01:00
|
|
|
if (fmt && *fmt) {
|
|
|
|
error|= sql->append(" FIELD_FORMAT='");
|
|
|
|
error|= sql->append_for_single_quote(fmt, strlen(fmt));
|
|
|
|
error|= sql->append("'");
|
|
|
|
} // endif flag
|
|
|
|
|
2013-05-28 17:22:38 +02:00
|
|
|
if (flag) {
|
|
|
|
error|= sql->append(" FLAG=");
|
|
|
|
error|= sql->append_ulonglong(flag);
|
|
|
|
} // endif flag
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-07-25 19:05:57 +02:00
|
|
|
error|= sql->append(',');
|
2013-04-19 20:35:43 +02:00
|
|
|
return error;
|
|
|
|
} // end of add_field
|
2013-07-25 19:05:57 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
Initialise the table share with the new columns.
|
|
|
|
|
|
|
|
@return
|
|
|
|
Return 0 if ok
|
|
|
|
*/
|
2014-04-19 11:11:30 +02:00
|
|
|
static int init_table_share(THD* thd,
|
|
|
|
TABLE_SHARE *table_s,
|
2013-07-25 19:05:57 +02:00
|
|
|
HA_CREATE_INFO *create_info,
|
|
|
|
String *sql)
|
|
|
|
{
|
|
|
|
bool oom= false;
|
|
|
|
PTOS topt= table_s->option_struct;
|
|
|
|
|
|
|
|
sql->length(sql->length()-1); // remove the trailing comma
|
|
|
|
sql->append(')');
|
|
|
|
|
|
|
|
for (ha_create_table_option *opt= connect_table_option_list;
|
|
|
|
opt->name; opt++) {
|
|
|
|
ulonglong vull;
|
|
|
|
const char *vstr;
|
|
|
|
|
|
|
|
switch (opt->type) {
|
|
|
|
case HA_OPTION_TYPE_ULL:
|
|
|
|
vull= *(ulonglong*)(((char*)topt) + opt->offset);
|
|
|
|
|
|
|
|
if (vull != opt->def_value) {
|
|
|
|
oom|= sql->append(' ');
|
|
|
|
oom|= sql->append(opt->name);
|
|
|
|
oom|= sql->append('=');
|
|
|
|
oom|= sql->append_ulonglong(vull);
|
|
|
|
} // endif vull
|
|
|
|
|
|
|
|
break;
|
|
|
|
case HA_OPTION_TYPE_STRING:
|
|
|
|
vstr= *(char**)(((char*)topt) + opt->offset);
|
|
|
|
|
|
|
|
if (vstr) {
|
|
|
|
oom|= sql->append(' ');
|
|
|
|
oom|= sql->append(opt->name);
|
|
|
|
oom|= sql->append("='");
|
|
|
|
oom|= sql->append_for_single_quote(vstr, strlen(vstr));
|
|
|
|
oom|= sql->append('\'');
|
|
|
|
} // endif vstr
|
|
|
|
|
|
|
|
break;
|
|
|
|
case HA_OPTION_TYPE_BOOL:
|
|
|
|
vull= *(bool*)(((char*)topt) + opt->offset);
|
|
|
|
|
|
|
|
if (vull != opt->def_value) {
|
|
|
|
oom|= sql->append(' ');
|
|
|
|
oom|= sql->append(opt->name);
|
|
|
|
oom|= sql->append('=');
|
2015-05-07 16:59:25 +02:00
|
|
|
oom|= sql->append(vull ? "YES" : "NO");
|
2013-07-25 19:05:57 +02:00
|
|
|
} // endif vull
|
|
|
|
|
|
|
|
break;
|
|
|
|
default: // no enums here, good :)
|
|
|
|
break;
|
|
|
|
} // endswitch type
|
|
|
|
|
|
|
|
if (oom)
|
|
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
|
|
|
|
} // endfor opt
|
|
|
|
|
|
|
|
if (create_info->connect_string.length) {
|
|
|
|
oom|= sql->append(' ');
|
|
|
|
oom|= sql->append("CONNECTION='");
|
|
|
|
oom|= sql->append_for_single_quote(create_info->connect_string.str,
|
|
|
|
create_info->connect_string.length);
|
|
|
|
oom|= sql->append('\'');
|
|
|
|
|
|
|
|
if (oom)
|
|
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
|
|
|
|
} // endif string
|
|
|
|
|
|
|
|
if (create_info->default_table_charset) {
|
|
|
|
oom|= sql->append(' ');
|
|
|
|
oom|= sql->append("CHARSET=");
|
|
|
|
oom|= sql->append(create_info->default_table_charset->csname);
|
|
|
|
|
|
|
|
if (oom)
|
|
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
|
|
|
|
} // endif charset
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2013-07-25 19:05:57 +02:00
|
|
|
htrc("s_init: %.*s\n", sql->length(), sql->ptr());
|
|
|
|
|
|
|
|
return table_s->init_from_sql_statement_string(thd, true,
|
|
|
|
sql->ptr(), sql->length());
|
|
|
|
} // end of init_table_share
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2013-04-19 20:35:43 +02:00
|
|
|
connect_assisted_discovery() is called when creating a table with no columns.
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
@details
|
2013-04-19 20:35:43 +02:00
|
|
|
When assisted discovery is used the .frm file have not already been
|
2013-02-07 10:34:27 +01:00
|
|
|
created. You can overwrite some definitions at this point but the
|
|
|
|
main purpose of it is to define the columns for some table types.
|
2013-07-25 19:05:57 +02:00
|
|
|
|
|
|
|
@note
|
|
|
|
this function is no more called in case of CREATE .. SELECT
|
2013-02-07 10:34:27 +01:00
|
|
|
*/
|
2015-05-10 12:14:21 +02:00
|
|
|
static int connect_assisted_discovery(handlerton *, THD* thd,
|
2013-04-19 20:35:43 +02:00
|
|
|
TABLE_SHARE *table_s,
|
|
|
|
HA_CREATE_INFO *create_info)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
2014-02-06 15:14:09 +01:00
|
|
|
char v=0, spc= ',', qch= 0;
|
2013-02-12 12:34:14 +01:00
|
|
|
const char *fncn= "?";
|
2013-10-27 10:37:12 +01:00
|
|
|
const char *user, *fn, *db, *host, *pwd, *sep, *tbl, *src;
|
2014-04-08 18:18:02 +02:00
|
|
|
const char *col, *ocl, *rnk, *pic, *fcl, *skc;
|
2015-05-26 01:02:33 +02:00
|
|
|
char *tab, *dsn, *shm, *dpath;
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2013-02-12 12:34:14 +01:00
|
|
|
char *nsp= NULL, *cls= NULL;
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // __WIN__
|
2015-05-26 01:02:33 +02:00
|
|
|
int port= 0, hdr= 0, mxr= 0, mxe= 0, rc= 0;
|
2015-06-02 10:34:51 +02:00
|
|
|
int cop __attribute__((unused))= 0, lrecl= 0;
|
2015-01-13 17:24:31 +01:00
|
|
|
#if defined(ODBC_SUPPORT)
|
2015-02-01 12:16:30 +01:00
|
|
|
POPARM sop = NULL;
|
|
|
|
char *ucnc = NULL;
|
|
|
|
bool cnc= false;
|
2015-01-13 17:24:31 +01:00
|
|
|
int cto= -1, qto= -1;
|
|
|
|
#endif // ODBC_SUPPORT
|
2013-02-12 12:34:14 +01:00
|
|
|
uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL);
|
2013-05-24 00:19:26 +02:00
|
|
|
bool bif, ok= false, dbf= false;
|
2013-02-12 12:34:14 +01:00
|
|
|
TABTYPE ttp= TAB_UNDEF;
|
2013-07-08 19:03:15 +02:00
|
|
|
PQRYRES qrp= NULL;
|
2013-02-12 12:34:14 +01:00
|
|
|
PCOLRES crp;
|
2013-11-28 01:25:39 +01:00
|
|
|
PCONNECT xp= NULL;
|
|
|
|
PGLOBAL g= GetPlug(thd, xp);
|
2014-03-30 22:52:54 +02:00
|
|
|
PDBUSER dup= PlgGetUser(g);
|
|
|
|
PCATLG cat= (dup) ? dup->Catalog : NULL;
|
2013-04-19 20:35:43 +02:00
|
|
|
PTOS topt= table_s->option_struct;
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
//CHARSET_INFO *cs;
|
|
|
|
Alter_info alter_info;
|
|
|
|
#else // !NEW_WAY
|
2013-12-11 23:33:36 +01:00
|
|
|
char buf[1024];
|
2013-04-19 20:35:43 +02:00
|
|
|
String sql(buf, sizeof(buf), system_charset_info);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-07-25 19:05:57 +02:00
|
|
|
sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info);
|
2014-04-19 11:11:30 +02:00
|
|
|
#endif // !NEW_WAY
|
2013-07-25 19:05:57 +02:00
|
|
|
|
2013-02-09 01:08:15 +01:00
|
|
|
if (!g)
|
2013-04-19 20:35:43 +02:00
|
|
|
return HA_ERR_INTERNAL_ERROR;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2015-05-26 01:02:33 +02:00
|
|
|
user= host= pwd= tbl= src= col= ocl= pic= fcl= skc= rnk= dsn= NULL;
|
2013-02-09 01:08:15 +01:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
// Get the useful create options
|
2013-05-19 19:25:06 +02:00
|
|
|
ttp= GetTypeID(topt->type);
|
|
|
|
fn= topt->filename;
|
|
|
|
tab= (char*)topt->tabname;
|
|
|
|
src= topt->srcdef;
|
|
|
|
db= topt->dbname;
|
2013-04-19 20:35:43 +02:00
|
|
|
fncn= topt->catfunc;
|
|
|
|
fnc= GetFuncID(fncn);
|
2013-05-19 19:25:06 +02:00
|
|
|
sep= topt->separator;
|
2016-03-16 18:53:56 +01:00
|
|
|
spc= (!sep) ? ',' : *sep;
|
2013-06-26 19:52:38 +02:00
|
|
|
qch= topt->qchar ? *topt->qchar : (signed)topt->quoted >= 0 ? '"' : 0;
|
2013-04-29 13:50:20 +02:00
|
|
|
hdr= (int)topt->header;
|
|
|
|
tbl= topt->tablist;
|
2013-05-28 17:22:38 +02:00
|
|
|
col= topt->colist;
|
2013-04-29 13:50:20 +02:00
|
|
|
|
2013-04-19 20:35:43 +02:00
|
|
|
if (topt->oplist) {
|
2013-10-26 00:43:03 +02:00
|
|
|
host= GetListOption(g, "host", topt->oplist, "localhost");
|
2015-01-31 15:05:43 +01:00
|
|
|
user= GetListOption(g, "user", topt->oplist,
|
|
|
|
(ttp == TAB_ODBC ? NULL : "root"));
|
2013-04-29 13:50:20 +02:00
|
|
|
// Default value db can come from the DBNAME=xxx option.
|
2013-10-26 00:43:03 +02:00
|
|
|
db= GetListOption(g, "database", topt->oplist, db);
|
|
|
|
col= GetListOption(g, "colist", topt->oplist, col);
|
|
|
|
ocl= GetListOption(g, "occurcol", topt->oplist, NULL);
|
|
|
|
pic= GetListOption(g, "pivotcol", topt->oplist, NULL);
|
|
|
|
fcl= GetListOption(g, "fnccol", topt->oplist, NULL);
|
2014-04-08 18:18:02 +02:00
|
|
|
skc= GetListOption(g, "skipcol", topt->oplist, NULL);
|
2013-10-26 00:43:03 +02:00
|
|
|
rnk= GetListOption(g, "rankcol", topt->oplist, NULL);
|
|
|
|
pwd= GetListOption(g, "password", topt->oplist);
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2013-10-26 00:43:03 +02:00
|
|
|
nsp= GetListOption(g, "namespace", topt->oplist);
|
|
|
|
cls= GetListOption(g, "class", topt->oplist);
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // __WIN__
|
2013-10-27 10:37:12 +01:00
|
|
|
port= atoi(GetListOption(g, "port", topt->oplist, "0"));
|
2014-01-02 10:19:19 +01:00
|
|
|
#if defined(ODBC_SUPPORT)
|
2013-12-16 01:32:47 +01:00
|
|
|
mxr= atoi(GetListOption(g,"maxres", topt->oplist, "0"));
|
2015-01-13 17:24:31 +01:00
|
|
|
cto= atoi(GetListOption(g,"ConnectTimeout", topt->oplist, "-1"));
|
|
|
|
qto= atoi(GetListOption(g,"QueryTimeout", topt->oplist, "-1"));
|
2015-02-01 12:16:30 +01:00
|
|
|
|
2015-01-31 15:05:43 +01:00
|
|
|
if ((ucnc= GetListOption(g, "UseDSN", topt->oplist)))
|
|
|
|
cnc= (!*ucnc || *ucnc == 'y' || *ucnc == 'Y' || atoi(ucnc) != 0);
|
2014-01-02 10:19:19 +01:00
|
|
|
#endif
|
2013-12-16 01:32:47 +01:00
|
|
|
mxe= atoi(GetListOption(g,"maxerr", topt->oplist, "0"));
|
2013-11-22 16:03:54 +01:00
|
|
|
#if defined(PROMPT_OK)
|
2013-10-27 10:37:12 +01:00
|
|
|
cop= atoi(GetListOption(g, "checkdsn", topt->oplist, "0"));
|
2013-11-22 16:03:54 +01:00
|
|
|
#endif // PROMPT_OK
|
2013-05-19 19:25:06 +02:00
|
|
|
} else {
|
|
|
|
host= "localhost";
|
2015-02-01 12:16:30 +01:00
|
|
|
user= (ttp == TAB_ODBC ? NULL : "root");
|
2013-05-19 19:25:06 +02:00
|
|
|
} // endif option_list
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-12-16 01:32:47 +01:00
|
|
|
if (!(shm= (char*)db))
|
2014-11-08 13:35:03 +01:00
|
|
|
db= table_s->db.str; // Default value
|
2013-02-21 17:48:35 +01:00
|
|
|
|
2013-02-15 01:33:23 +01:00
|
|
|
// Check table type
|
2013-03-20 23:42:23 +01:00
|
|
|
if (ttp == TAB_UNDEF) {
|
2013-05-19 19:25:06 +02:00
|
|
|
topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS";
|
|
|
|
ttp= GetTypeID(topt->type);
|
|
|
|
sprintf(g->Message, "No table_type. Was set to %s", topt->type);
|
2013-07-23 16:29:16 +02:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
2013-03-20 23:42:23 +01:00
|
|
|
} else if (ttp == TAB_NIY) {
|
2013-04-19 20:35:43 +02:00
|
|
|
sprintf(g->Message, "Unsupported table type %s", topt->type);
|
2013-03-20 23:42:23 +01:00
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2013-04-19 20:35:43 +02:00
|
|
|
return HA_ERR_INTERNAL_ERROR;
|
2013-03-20 23:42:23 +01:00
|
|
|
} // endif ttp
|
2013-02-15 01:33:23 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
// Save stack and allocation environment and prepare error return
|
|
|
|
if (g->jump_level == MAX_JUMP) {
|
|
|
|
strcpy(g->Message, MSG(TOO_MANY_JUMPS));
|
|
|
|
return HA_ERR_INTERNAL_ERROR;
|
|
|
|
} // endif jump_level
|
|
|
|
|
|
|
|
if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
goto err;
|
|
|
|
} // endif rc
|
|
|
|
|
2013-04-29 13:50:20 +02:00
|
|
|
if (!tab) {
|
|
|
|
if (ttp == TAB_TBL) {
|
|
|
|
// Make tab the first table of the list
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
if (!tbl) {
|
|
|
|
strcpy(g->Message, "Missing table list");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-10-21 17:29:51 +02:00
|
|
|
goto err;
|
2013-04-29 13:50:20 +02:00
|
|
|
} // endif tbl
|
|
|
|
|
2015-03-18 13:30:14 +01:00
|
|
|
tab= PlugDup(g, tbl);
|
2013-04-29 13:50:20 +02:00
|
|
|
|
|
|
|
if ((p= strchr(tab, ',')))
|
|
|
|
*p= 0;
|
|
|
|
|
|
|
|
if ((p=strchr(tab, '.'))) {
|
|
|
|
*p= 0;
|
|
|
|
db= tab;
|
|
|
|
tab= p + 1;
|
|
|
|
} // endif p
|
|
|
|
|
|
|
|
} else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL)))
|
2014-11-08 13:35:03 +01:00
|
|
|
tab= table_s->table_name.str; // Default value
|
2013-04-29 13:50:20 +02:00
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
// add_option(thd, create_info, "tabname", tab);
|
|
|
|
#endif // NEW_WAY
|
2013-04-29 13:50:20 +02:00
|
|
|
} // endif tab
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
switch (ttp) {
|
|
|
|
#if defined(ODBC_SUPPORT)
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_ODBC:
|
2014-11-08 13:35:03 +01:00
|
|
|
dsn= strz(g, create_info->connect_string);
|
2013-10-26 00:43:03 +02:00
|
|
|
|
2013-11-22 16:03:54 +01:00
|
|
|
if (fnc & (FNC_DSN | FNC_DRIVER)) {
|
2013-10-26 00:43:03 +02:00
|
|
|
ok= true;
|
2013-11-22 16:03:54 +01:00
|
|
|
#if defined(PROMPT_OK)
|
|
|
|
} else if (!stricmp(thd->main_security_ctx.host, "localhost")
|
2013-10-27 10:37:12 +01:00
|
|
|
&& cop == 1) {
|
2013-10-26 00:43:03 +02:00
|
|
|
if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) {
|
2013-10-27 10:37:12 +01:00
|
|
|
thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn));
|
2013-10-26 00:43:03 +02:00
|
|
|
ok= true;
|
|
|
|
} // endif dsn
|
2013-11-22 16:03:54 +01:00
|
|
|
#endif // PROMPT_OK
|
2013-10-26 00:43:03 +02:00
|
|
|
|
2015-01-31 15:05:43 +01:00
|
|
|
} else if (!dsn) {
|
2013-04-19 20:35:43 +02:00
|
|
|
sprintf(g->Message, "Missing %s connection string", topt->type);
|
2015-01-31 15:05:43 +01:00
|
|
|
} else {
|
|
|
|
// Store ODBC additional parameters
|
|
|
|
sop= (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM));
|
|
|
|
sop->User= (char*)user;
|
|
|
|
sop->Pwd= (char*)pwd;
|
|
|
|
sop->Cto= cto;
|
|
|
|
sop->Qto= qto;
|
2015-02-01 12:16:30 +01:00
|
|
|
sop->UseCnc= cnc;
|
2013-02-08 03:27:12 +01:00
|
|
|
ok= true;
|
2015-01-31 15:05:43 +01:00
|
|
|
} // endif's
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-02-12 12:34:14 +01:00
|
|
|
supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
#endif // ODBC_SUPPORT
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_DBF:
|
2013-02-09 01:08:15 +01:00
|
|
|
dbf= true;
|
|
|
|
// Passthru
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_CSV:
|
2013-03-12 01:20:52 +01:00
|
|
|
if (!fn && fnc != FNC_NO)
|
2013-04-19 20:35:43 +02:00
|
|
|
sprintf(g->Message, "Missing %s file name", topt->type);
|
2016-03-16 18:53:56 +01:00
|
|
|
else if (sep && strlen(sep) > 1)
|
|
|
|
sprintf(g->Message, "Invalid separator %s", sep);
|
|
|
|
else
|
|
|
|
ok= true;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
break;
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_MYSQL:
|
2013-02-22 17:26:08 +01:00
|
|
|
ok= true;
|
|
|
|
|
2014-11-08 13:35:03 +01:00
|
|
|
if (create_info->connect_string.str &&
|
|
|
|
create_info->connect_string.length) {
|
2013-04-09 23:14:45 +02:00
|
|
|
PMYDEF mydef= new(g) MYSQLDEF();
|
2013-02-22 17:26:08 +01:00
|
|
|
|
2014-11-08 13:35:03 +01:00
|
|
|
dsn= strz(g, create_info->connect_string);
|
2013-04-19 20:35:43 +02:00
|
|
|
mydef->SetName(create_info->alias);
|
2013-02-22 17:26:08 +01:00
|
|
|
|
2013-10-27 10:37:12 +01:00
|
|
|
if (!mydef->ParseURL(g, dsn, false)) {
|
|
|
|
if (mydef->GetHostname())
|
|
|
|
host= mydef->GetHostname();
|
|
|
|
|
|
|
|
if (mydef->GetUsername())
|
|
|
|
user= mydef->GetUsername();
|
|
|
|
|
|
|
|
if (mydef->GetPassword())
|
|
|
|
pwd= mydef->GetPassword();
|
|
|
|
|
|
|
|
if (mydef->GetDatabase())
|
|
|
|
db= mydef->GetDatabase();
|
|
|
|
|
|
|
|
if (mydef->GetTabname())
|
|
|
|
tab= mydef->GetTabname();
|
|
|
|
|
|
|
|
if (mydef->GetPortnumber())
|
|
|
|
port= mydef->GetPortnumber();
|
|
|
|
|
2013-02-22 17:26:08 +01:00
|
|
|
} else
|
|
|
|
ok= false;
|
|
|
|
|
|
|
|
} else if (!user)
|
2013-05-24 09:31:43 +02:00
|
|
|
user= "root";
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-08-08 19:46:02 +02:00
|
|
|
if (ok && CheckSelf(g, table_s, host, db, tab, src, port))
|
2013-10-27 10:37:12 +01:00
|
|
|
ok= false;
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_WMI:
|
2013-02-07 10:34:27 +01:00
|
|
|
ok= true;
|
|
|
|
break;
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // __WIN__
|
2015-03-22 11:34:29 +01:00
|
|
|
#if defined(PIVOT_SUPPORT)
|
2013-05-28 17:22:38 +02:00
|
|
|
case TAB_PIVOT:
|
2013-08-09 18:02:47 +02:00
|
|
|
supfnc= FNC_NO;
|
2015-03-22 11:34:29 +01:00
|
|
|
#endif // PIVOT_SUPPORT
|
2013-04-29 13:50:20 +02:00
|
|
|
case TAB_PRX:
|
|
|
|
case TAB_TBL:
|
|
|
|
case TAB_XCL:
|
2013-05-28 17:22:38 +02:00
|
|
|
case TAB_OCCUR:
|
2013-12-28 15:46:49 +01:00
|
|
|
if (!src && !stricmp(tab, create_info->alias) &&
|
2013-07-11 17:45:31 +02:00
|
|
|
(!db || !stricmp(db, table_s->db.str)))
|
|
|
|
sprintf(g->Message, "A %s table cannot refer to itself", topt->type);
|
|
|
|
else
|
|
|
|
ok= true;
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
break;
|
|
|
|
case TAB_OEM:
|
|
|
|
if (topt->module && topt->subtype)
|
|
|
|
ok= true;
|
|
|
|
else
|
|
|
|
strcpy(g->Message, "Missing OEM module or subtype");
|
|
|
|
|
2015-03-18 13:30:14 +01:00
|
|
|
break;
|
2015-04-17 20:05:41 +02:00
|
|
|
#if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT)
|
|
|
|
case TAB_XML:
|
|
|
|
#endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT
|
2015-03-18 13:30:14 +01:00
|
|
|
case TAB_JSON:
|
|
|
|
if (!fn)
|
|
|
|
sprintf(g->Message, "Missing %s file name", topt->type);
|
|
|
|
else
|
|
|
|
ok= true;
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
break;
|
|
|
|
case TAB_VIR:
|
|
|
|
ok= true;
|
2013-04-29 13:50:20 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
default:
|
2013-04-19 20:35:43 +02:00
|
|
|
sprintf(g->Message, "Cannot get column info for table type %s", topt->type);
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endif ttp
|
|
|
|
|
2013-02-11 00:31:03 +01:00
|
|
|
// Check for supported catalog function
|
2013-02-12 12:34:14 +01:00
|
|
|
if (ok && !(supfnc & fnc)) {
|
|
|
|
sprintf(g->Message, "Unsupported catalog function %s for table type %s",
|
2013-04-19 20:35:43 +02:00
|
|
|
fncn, topt->type);
|
2013-02-11 00:31:03 +01:00
|
|
|
ok= false;
|
|
|
|
} // endif supfnc
|
|
|
|
|
2013-05-24 00:19:26 +02:00
|
|
|
if (src && fnc != FNC_NO) {
|
|
|
|
strcpy(g->Message, "Cannot make catalog table from srcdef");
|
|
|
|
ok= false;
|
|
|
|
} // endif src
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if (ok) {
|
2015-03-18 13:30:14 +01:00
|
|
|
char *cnm, *rem, *dft, *xtra, *key, *fmt;
|
2013-12-03 22:59:40 +01:00
|
|
|
int i, len, prec, dec, typ, flg;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-08-23 19:17:15 +02:00
|
|
|
// if (cat)
|
|
|
|
// cat->SetDataPath(g, table_s->db.str);
|
|
|
|
// else
|
|
|
|
// return HA_ERR_INTERNAL_ERROR; // Should never happen
|
|
|
|
|
|
|
|
dpath= SetPath(g, table_s->db.str);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-10-11 13:57:56 +02:00
|
|
|
if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC) {
|
2013-05-19 19:25:06 +02:00
|
|
|
qrp= SrcColumns(g, host, db, user, pwd, src, port);
|
2013-05-28 17:22:38 +02:00
|
|
|
|
2013-05-28 21:06:15 +02:00
|
|
|
if (qrp && ttp == TAB_OCCUR)
|
2013-05-28 17:22:38 +02:00
|
|
|
if (OcrSrcCols(g, qrp, col, ocl, rnk)) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-10-21 17:29:51 +02:00
|
|
|
goto err;
|
2013-05-28 17:22:38 +02:00
|
|
|
} // endif OcrSrcCols
|
|
|
|
|
|
|
|
} else switch (ttp) {
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_DBF:
|
2014-08-23 19:17:15 +02:00
|
|
|
qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
#if defined(ODBC_SUPPORT)
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_ODBC:
|
2013-02-09 01:08:15 +01:00
|
|
|
switch (fnc) {
|
2013-02-12 12:34:14 +01:00
|
|
|
case FNC_NO:
|
|
|
|
case FNC_COL:
|
2013-10-11 13:57:56 +02:00
|
|
|
if (src) {
|
2015-01-31 15:05:43 +01:00
|
|
|
qrp= ODBCSrcCols(g, dsn, (char*)src, sop);
|
2013-10-11 13:57:56 +02:00
|
|
|
src= NULL; // for next tests
|
2014-04-19 11:11:30 +02:00
|
|
|
} else
|
2015-01-31 15:05:43 +01:00
|
|
|
qrp= ODBCColumns(g, dsn, shm, tab, NULL,
|
|
|
|
mxr, fnc == FNC_COL, sop);
|
2013-10-11 13:57:56 +02:00
|
|
|
|
2013-02-09 01:08:15 +01:00
|
|
|
break;
|
2013-02-12 12:34:14 +01:00
|
|
|
case FNC_TABLE:
|
2015-01-31 15:05:43 +01:00
|
|
|
qrp= ODBCTables(g, dsn, shm, tab, mxr, true, sop);
|
2013-02-09 01:08:15 +01:00
|
|
|
break;
|
2013-02-12 12:34:14 +01:00
|
|
|
case FNC_DSN:
|
2013-12-16 01:32:47 +01:00
|
|
|
qrp= ODBCDataSources(g, mxr, true);
|
2013-02-09 01:08:15 +01:00
|
|
|
break;
|
2013-02-12 12:34:14 +01:00
|
|
|
case FNC_DRIVER:
|
2013-12-16 01:32:47 +01:00
|
|
|
qrp= ODBCDrivers(g, mxr, true);
|
2013-02-09 01:08:15 +01:00
|
|
|
break;
|
|
|
|
default:
|
2013-02-15 01:33:23 +01:00
|
|
|
sprintf(g->Message, "invalid catfunc %s", fncn);
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-09 01:08:15 +01:00
|
|
|
} // endswitch info
|
2013-02-08 03:27:12 +01:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
|
|
|
#endif // ODBC_SUPPORT
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_MYSQL:
|
2014-04-19 11:11:30 +02:00
|
|
|
qrp= MyColumns(g, thd, host, db, user, pwd, tab,
|
2013-05-19 19:25:06 +02:00
|
|
|
NULL, port, fnc == FNC_COL);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_CSV:
|
2014-08-23 19:17:15 +02:00
|
|
|
qrp= CSVColumns(g, dpath, fn, spc, qch, hdr, mxe, fnc == FNC_COL);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2013-02-12 12:34:14 +01:00
|
|
|
case TAB_WMI:
|
|
|
|
qrp= WMIColumns(g, nsp, cls, fnc == FNC_COL);
|
2013-02-07 10:34:27 +01:00
|
|
|
break;
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // __WIN__
|
2013-04-29 13:50:20 +02:00
|
|
|
case TAB_PRX:
|
|
|
|
case TAB_TBL:
|
|
|
|
case TAB_XCL:
|
2013-05-28 17:22:38 +02:00
|
|
|
case TAB_OCCUR:
|
2013-05-24 00:19:26 +02:00
|
|
|
bif= fnc == FNC_COL;
|
|
|
|
qrp= TabColumns(g, thd, db, tab, bif);
|
|
|
|
|
|
|
|
if (!qrp && bif && fnc != FNC_COL) // tab is a view
|
2014-03-30 22:52:54 +02:00
|
|
|
qrp= MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false);
|
2013-05-24 00:19:26 +02:00
|
|
|
|
2013-05-28 21:06:15 +02:00
|
|
|
if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL)
|
2013-05-28 17:22:38 +02:00
|
|
|
if (OcrColumns(g, qrp, col, ocl, rnk)) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-10-21 17:29:51 +02:00
|
|
|
goto err;
|
2013-05-28 17:22:38 +02:00
|
|
|
} // endif OcrColumns
|
|
|
|
|
|
|
|
break;
|
2015-03-22 11:34:29 +01:00
|
|
|
#if defined(PIVOT_SUPPORT)
|
2013-05-28 17:22:38 +02:00
|
|
|
case TAB_PIVOT:
|
2014-04-08 18:18:02 +02:00
|
|
|
qrp= PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port);
|
2013-04-29 13:50:20 +02:00
|
|
|
break;
|
2015-03-22 11:34:29 +01:00
|
|
|
#endif // PIVOT_SUPPORT
|
2014-10-31 12:28:07 +01:00
|
|
|
case TAB_VIR:
|
2015-05-09 17:30:20 +02:00
|
|
|
qrp= VirColumns(g, fnc == FNC_COL);
|
2014-10-31 12:28:07 +01:00
|
|
|
break;
|
2015-03-18 13:30:14 +01:00
|
|
|
case TAB_JSON:
|
2015-05-26 01:02:33 +02:00
|
|
|
qrp= JSONColumns(g, (char*)db, topt, fnc == FNC_COL);
|
2015-03-18 13:30:14 +01:00
|
|
|
break;
|
2015-04-17 20:05:41 +02:00
|
|
|
#if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT)
|
|
|
|
case TAB_XML:
|
|
|
|
qrp= XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL);
|
|
|
|
break;
|
|
|
|
#endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT
|
2014-02-03 16:14:13 +01:00
|
|
|
case TAB_OEM:
|
|
|
|
qrp= OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL);
|
|
|
|
break;
|
2013-02-12 12:34:14 +01:00
|
|
|
default:
|
2013-04-19 20:35:43 +02:00
|
|
|
strcpy(g->Message, "System error during assisted discovery");
|
2013-02-12 12:34:14 +01:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch ttp
|
|
|
|
|
|
|
|
if (!qrp) {
|
2013-02-15 01:33:23 +01:00
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-10-21 17:29:51 +02:00
|
|
|
goto err;
|
2014-04-25 15:34:02 +02:00
|
|
|
} // endif !qrp
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-05-28 17:22:38 +02:00
|
|
|
if (fnc != FNC_NO || src || ttp == TAB_PIVOT) {
|
|
|
|
// Catalog like table
|
2013-07-25 19:05:57 +02:00
|
|
|
for (crp= qrp->Colresp; !rc && crp; crp= crp->Next) {
|
2015-03-18 13:30:14 +01:00
|
|
|
cnm= (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name);
|
2013-07-25 19:05:57 +02:00
|
|
|
typ= crp->Type;
|
2013-02-08 03:27:12 +01:00
|
|
|
len= crp->Length;
|
2013-05-24 00:19:26 +02:00
|
|
|
dec= crp->Prec;
|
2013-05-28 17:22:38 +02:00
|
|
|
flg= crp->Flag;
|
2014-03-18 19:25:50 +01:00
|
|
|
v= crp->Var;
|
2016-02-15 23:41:59 +01:00
|
|
|
tm= (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG;
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if (!len && typ == TYPE_STRING)
|
|
|
|
len= 256; // STRBLK's have 0 length
|
|
|
|
|
2013-02-08 03:27:12 +01:00
|
|
|
// Now add the field
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
rc= add_fields(g, thd, &alter_info, cnm, typ, len, dec,
|
2016-02-15 23:41:59 +01:00
|
|
|
tm, "", flg, dbf, v);
|
2014-04-19 11:11:30 +02:00
|
|
|
#else // !NEW_WAY
|
2016-02-15 23:41:59 +01:00
|
|
|
if (add_field(&sql, cnm, typ, len, dec, NULL, tm,
|
2015-03-18 13:30:14 +01:00
|
|
|
NULL, NULL, NULL, NULL, flg, dbf, v))
|
2013-07-25 19:05:57 +02:00
|
|
|
rc= HA_ERR_OUT_OF_MEM;
|
2014-04-19 11:11:30 +02:00
|
|
|
#endif // !NEW_WAY
|
2013-07-25 19:05:57 +02:00
|
|
|
} // endfor crp
|
2013-02-08 03:27:12 +01:00
|
|
|
|
2015-04-17 20:05:41 +02:00
|
|
|
} else {
|
|
|
|
char *schem= NULL;
|
|
|
|
|
2014-04-25 15:34:02 +02:00
|
|
|
// Not a catalog table
|
|
|
|
if (!qrp->Nblin) {
|
|
|
|
if (tab)
|
|
|
|
sprintf(g->Message, "Cannot get columns from %s", tab);
|
|
|
|
else
|
|
|
|
strcpy(g->Message, "Fail to retrieve columns");
|
|
|
|
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-10-21 17:29:51 +02:00
|
|
|
goto err;
|
2014-04-25 15:34:02 +02:00
|
|
|
} // endif !nblin
|
|
|
|
|
2013-07-25 19:05:57 +02:00
|
|
|
for (i= 0; !rc && i < qrp->Nblin; i++) {
|
2013-12-03 22:59:40 +01:00
|
|
|
typ= len= prec= dec= 0;
|
2013-02-11 00:31:03 +01:00
|
|
|
tm= NOT_NULL_FLAG;
|
2013-07-08 19:03:15 +02:00
|
|
|
cnm= (char*)"noname";
|
2015-03-18 13:30:14 +01:00
|
|
|
dft= xtra= key= fmt= NULL;
|
2014-11-20 11:00:02 +01:00
|
|
|
v= ' ';
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
rem= "";
|
|
|
|
// cs= NULL;
|
|
|
|
#else // !NEW_WAY
|
2013-07-25 19:05:57 +02:00
|
|
|
rem= NULL;
|
2014-04-19 11:11:30 +02:00
|
|
|
#endif // !NEW_WAY
|
2013-02-11 00:31:03 +01:00
|
|
|
|
|
|
|
for (crp= qrp->Colresp; crp; crp= crp->Next)
|
|
|
|
switch (crp->Fld) {
|
|
|
|
case FLD_NAME:
|
2015-02-11 21:39:41 +01:00
|
|
|
if (ttp == TAB_PRX ||
|
|
|
|
(ttp == TAB_CSV && topt->data_charset &&
|
2015-01-17 12:19:06 +01:00
|
|
|
(!stricmp(topt->data_charset, "UTF8") ||
|
2015-02-11 21:39:41 +01:00
|
|
|
!stricmp(topt->data_charset, "UTF-8"))))
|
2015-01-17 12:19:06 +01:00
|
|
|
cnm= crp->Kdata->GetCharValue(i);
|
|
|
|
else
|
|
|
|
cnm= encode(g, crp->Kdata->GetCharValue(i));
|
|
|
|
|
2013-02-11 00:31:03 +01:00
|
|
|
break;
|
|
|
|
case FLD_TYPE:
|
2013-02-14 00:32:29 +01:00
|
|
|
typ= crp->Kdata->GetIntValue(i);
|
2013-11-06 18:22:09 +01:00
|
|
|
v = (crp->Nulls) ? crp->Nulls[i] : 0;
|
2013-02-11 00:31:03 +01:00
|
|
|
break;
|
|
|
|
case FLD_PREC:
|
2013-12-03 22:59:40 +01:00
|
|
|
// PREC must be always before LENGTH
|
|
|
|
len= prec= crp->Kdata->GetIntValue(i);
|
|
|
|
break;
|
|
|
|
case FLD_LENGTH:
|
2013-02-11 00:31:03 +01:00
|
|
|
len= crp->Kdata->GetIntValue(i);
|
|
|
|
break;
|
|
|
|
case FLD_SCALE:
|
2016-02-15 23:41:59 +01:00
|
|
|
dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1;
|
2013-02-11 00:31:03 +01:00
|
|
|
break;
|
|
|
|
case FLD_NULL:
|
|
|
|
if (crp->Kdata->GetIntValue(i))
|
|
|
|
tm= 0; // Nullable
|
|
|
|
|
2015-03-18 13:30:14 +01:00
|
|
|
break;
|
|
|
|
case FLD_FORMAT:
|
|
|
|
fmt= (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL;
|
2013-02-11 00:31:03 +01:00
|
|
|
break;
|
|
|
|
case FLD_REM:
|
|
|
|
rem= crp->Kdata->GetCharValue(i);
|
|
|
|
break;
|
2014-04-19 11:11:30 +02:00
|
|
|
// case FLD_CHARSET:
|
2013-02-11 00:31:03 +01:00
|
|
|
// No good because remote table is already translated
|
|
|
|
// if (*(csn= crp->Kdata->GetCharValue(i)))
|
|
|
|
// cs= get_charset_by_name(csn, 0);
|
|
|
|
|
|
|
|
// break;
|
2013-12-03 22:59:40 +01:00
|
|
|
case FLD_DEFAULT:
|
|
|
|
dft= crp->Kdata->GetCharValue(i);
|
|
|
|
break;
|
2013-12-19 12:56:06 +01:00
|
|
|
case FLD_EXTRA:
|
|
|
|
xtra= crp->Kdata->GetCharValue(i);
|
2014-03-30 22:52:54 +02:00
|
|
|
|
|
|
|
// Auto_increment is not supported yet
|
|
|
|
if (!stricmp(xtra, "AUTO_INCREMENT"))
|
|
|
|
xtra= NULL;
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
break;
|
|
|
|
case FLD_KEY:
|
|
|
|
if (ttp == TAB_VIR)
|
|
|
|
key= crp->Kdata->GetCharValue(i);
|
|
|
|
|
2013-12-19 12:56:06 +01:00
|
|
|
break;
|
2015-04-17 20:05:41 +02:00
|
|
|
case FLD_SCHEM:
|
|
|
|
#if defined(ODBC_SUPPORT)
|
|
|
|
if (ttp == TAB_ODBC && crp->Kdata) {
|
|
|
|
if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) {
|
|
|
|
sprintf(g->Message,
|
|
|
|
"Several %s tables found, specify DBNAME", tab);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
goto err;
|
|
|
|
} else if (!schem)
|
|
|
|
schem= crp->Kdata->GetCharValue(i);
|
|
|
|
|
|
|
|
} // endif ttp
|
|
|
|
#endif // ODBC_SUPPORT
|
2013-02-12 12:34:14 +01:00
|
|
|
default:
|
|
|
|
break; // Ignore
|
2013-02-11 00:31:03 +01:00
|
|
|
} // endswitch Fld
|
|
|
|
|
2013-02-14 00:32:29 +01:00
|
|
|
#if defined(ODBC_SUPPORT)
|
|
|
|
if (ttp == TAB_ODBC) {
|
2015-05-30 10:59:34 +02:00
|
|
|
int plgtyp;
|
|
|
|
bool w= false; // Wide character type
|
2013-02-14 00:32:29 +01:00
|
|
|
|
|
|
|
// typ must be PLG type, not SQL type
|
2015-05-30 10:59:34 +02:00
|
|
|
if (!(plgtyp= TranslateSQLType(typ, dec, prec, v, w))) {
|
2015-02-07 11:33:52 +01:00
|
|
|
if (GetTypeConv() == TPC_SKIP) {
|
|
|
|
// Skip this column
|
|
|
|
sprintf(g->Message, "Column %s skipped (unsupported type %d)",
|
|
|
|
cnm, typ);
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
sprintf(g->Message, "Unsupported SQL type %d", typ);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
goto err;
|
|
|
|
} // endif type_conv
|
|
|
|
|
2013-02-14 00:32:29 +01:00
|
|
|
} else
|
|
|
|
typ= plgtyp;
|
|
|
|
|
2013-12-28 15:46:49 +01:00
|
|
|
switch (typ) {
|
2015-05-30 10:59:34 +02:00
|
|
|
case TYPE_STRING:
|
|
|
|
if (w) {
|
|
|
|
sprintf(g->Message, "Column %s is wide characters", cnm);
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message);
|
|
|
|
} // endif w
|
|
|
|
|
|
|
|
break;
|
2013-12-28 15:46:49 +01:00
|
|
|
case TYPE_DOUBLE:
|
|
|
|
// Some data sources do not count dec in length (prec)
|
|
|
|
prec += (dec + 2); // To be safe
|
2015-02-28 23:01:55 +01:00
|
|
|
break;
|
2013-12-28 15:46:49 +01:00
|
|
|
case TYPE_DECIM:
|
2015-02-28 23:01:55 +01:00
|
|
|
prec= len;
|
2013-12-28 15:46:49 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dec= 0;
|
|
|
|
} // endswitch typ
|
2013-02-14 00:32:29 +01:00
|
|
|
|
2016-02-15 23:41:59 +01:00
|
|
|
} else
|
2013-02-14 00:32:29 +01:00
|
|
|
#endif // ODBC_SUPPORT
|
2013-04-29 13:50:20 +02:00
|
|
|
// Make the arguments as required by add_fields
|
2016-02-15 23:41:59 +01:00
|
|
|
if (typ == TYPE_DOUBLE)
|
|
|
|
prec= len;
|
|
|
|
|
|
|
|
if (typ == TYPE_DATE)
|
2013-12-03 22:59:40 +01:00
|
|
|
prec= 0;
|
2013-04-29 13:50:20 +02:00
|
|
|
|
|
|
|
// Now add the field
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
rc= add_fields(g, thd, &alter_info, cnm, typ, prec, dec,
|
|
|
|
tm, rem, 0, dbf, v);
|
|
|
|
#else // !NEW_WAY
|
2014-10-31 12:28:07 +01:00
|
|
|
if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
|
2015-03-18 13:30:14 +01:00
|
|
|
fmt, 0, dbf, v))
|
2013-07-25 19:05:57 +02:00
|
|
|
rc= HA_ERR_OUT_OF_MEM;
|
2014-04-19 11:11:30 +02:00
|
|
|
#endif // !NEW_WAY
|
2013-02-08 03:27:12 +01:00
|
|
|
} // endfor i
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-04-25 15:34:02 +02:00
|
|
|
} // endif fnc
|
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
#if defined(NEW_WAY)
|
|
|
|
rc= init_table_share(thd, table_s, create_info, &alter_info);
|
|
|
|
#else // !NEW_WAY
|
2013-07-25 19:05:57 +02:00
|
|
|
if (!rc)
|
|
|
|
rc= init_table_share(thd, table_s, create_info, &sql);
|
2013-10-26 00:43:03 +02:00
|
|
|
// rc= init_table_share(thd, table_s, create_info, dsn, &sql);
|
2014-04-19 11:11:30 +02:00
|
|
|
#endif // !NEW_WAY
|
2013-04-29 13:50:20 +02:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
g->jump_level--;
|
2013-07-25 19:05:57 +02:00
|
|
|
return rc;
|
2013-02-08 03:27:12 +01:00
|
|
|
} // endif ok
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-02-15 01:33:23 +01:00
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-10-21 17:29:51 +02:00
|
|
|
|
|
|
|
err:
|
|
|
|
g->jump_level--;
|
2013-04-19 20:35:43 +02:00
|
|
|
return HA_ERR_INTERNAL_ERROR;
|
2013-04-29 13:50:20 +02:00
|
|
|
} // end of connect_assisted_discovery
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-12-31 13:08:29 +01:00
|
|
|
/**
|
|
|
|
Get the database name from a qualified table name.
|
|
|
|
*/
|
|
|
|
char *ha_connect::GetDBfromName(const char *name)
|
|
|
|
{
|
|
|
|
char *db, dbname[128], tbname[128];
|
|
|
|
|
|
|
|
if (filename_to_dbname_and_tablename(name, dbname, sizeof(dbname),
|
|
|
|
tbname, sizeof(tbname)))
|
|
|
|
*dbname= 0;
|
|
|
|
|
|
|
|
if (*dbname) {
|
|
|
|
assert(xp && xp->g);
|
|
|
|
db= (char*)PlugSubAlloc(xp->g, NULL, strlen(dbname + 1));
|
|
|
|
strcpy(db, dbname);
|
|
|
|
} else
|
|
|
|
db= NULL;
|
|
|
|
|
|
|
|
return db;
|
|
|
|
} // end of GetDBfromName
|
|
|
|
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
create() is called to create a database. The variable name will have the name
|
|
|
|
of the table.
|
|
|
|
|
|
|
|
@details
|
|
|
|
When create() is called you do not need to worry about
|
|
|
|
opening the table. Also, the .frm file will have already been
|
|
|
|
created so adjusting create_info is not necessary. You can overwrite
|
|
|
|
the .frm file at this point if you wish to change the table
|
|
|
|
definition, but there are no methods currently provided for doing
|
|
|
|
so.
|
|
|
|
|
|
|
|
Called from handle.cc by ha_create_table().
|
|
|
|
|
|
|
|
@note
|
2013-02-13 00:51:41 +01:00
|
|
|
Currently we do some checking on the create definitions and stop
|
|
|
|
creating if an error is found. We wish we could change the table
|
|
|
|
definition such as providing a default table type. However, as said
|
|
|
|
above, there are no method to do so.
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
@see
|
|
|
|
ha_create_table() in handle.cc
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_connect::create(const char *name, TABLE *table_arg,
|
|
|
|
HA_CREATE_INFO *create_info)
|
|
|
|
{
|
|
|
|
int rc= RC_OK;
|
2014-07-17 18:13:51 +02:00
|
|
|
bool dbf, inward;
|
2013-02-07 10:34:27 +01:00
|
|
|
Field* *field;
|
|
|
|
Field *fp;
|
2013-03-17 11:31:11 +01:00
|
|
|
TABTYPE type;
|
2013-02-07 10:34:27 +01:00
|
|
|
TABLE *st= table; // Probably unuseful
|
2014-04-19 11:11:30 +02:00
|
|
|
THD *thd= ha_thd();
|
2014-05-31 12:31:26 +02:00
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
partition_info *part_info= table_arg->part_info;
|
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
2013-05-02 16:33:15 +02:00
|
|
|
xp= GetUser(thd, xp);
|
2013-04-19 20:35:43 +02:00
|
|
|
PGLOBAL g= xp->g;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
DBUG_ENTER("ha_connect::create");
|
2015-07-26 00:04:36 +02:00
|
|
|
/*
|
|
|
|
This assignment fixes test failures if some
|
|
|
|
"ALTER TABLE t1 ADD KEY(a)" query exits on ER_ACCESS_DENIED_ERROR
|
|
|
|
(e.g. on missing FILE_ACL). All following "CREATE TABLE" failed with
|
|
|
|
"ERROR 1105: CONNECT index modification should be in-place"
|
|
|
|
TODO: check with Olivier.
|
|
|
|
*/
|
|
|
|
g->Xchk= NULL;
|
2014-02-03 16:14:13 +01:00
|
|
|
int sqlcom= thd_sql_command(table_arg->in_use);
|
2014-04-22 19:15:08 +02:00
|
|
|
PTOS options= GetTableOptionStruct(table_arg->s);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
table= table_arg; // Used by called functions
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("create: this=%p thd=%p xp=%p g=%p sqlcom=%d name=%s\n",
|
2014-02-03 16:14:13 +01:00
|
|
|
this, thd, xp, g, sqlcom, GetTableName());
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
// CONNECT engine specific table options:
|
|
|
|
DBUG_ASSERT(options);
|
2013-03-17 11:31:11 +01:00
|
|
|
type= GetTypeID(options->type);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-05-02 16:33:15 +02:00
|
|
|
// Check table type
|
|
|
|
if (type == TAB_UNDEF) {
|
2014-04-19 11:11:30 +02:00
|
|
|
options->type= (options->srcdef) ? "MYSQL" :
|
2013-05-24 00:19:26 +02:00
|
|
|
(options->tabname) ? "PROXY" : "DOS";
|
|
|
|
type= GetTypeID(options->type);
|
|
|
|
sprintf(g->Message, "No table_type. Will be set to %s", options->type);
|
2014-02-07 22:44:43 +01:00
|
|
|
|
|
|
|
if (sqlcom == SQLCOM_CREATE_TABLE)
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
|
|
|
|
2013-05-02 16:33:15 +02:00
|
|
|
} else if (type == TAB_NIY) {
|
|
|
|
sprintf(g->Message, "Unsupported table type %s", options->type);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif ttp
|
|
|
|
|
2013-12-31 13:08:29 +01:00
|
|
|
if (check_privileges(thd, options, GetDBfromName(name)))
|
2013-03-22 08:28:58 +01:00
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
inward= IsFileType(type) && !options->filename;
|
|
|
|
|
2013-03-17 11:31:11 +01:00
|
|
|
if (options->data_charset) {
|
2013-02-19 11:29:55 +01:00
|
|
|
const CHARSET_INFO *data_charset;
|
2013-03-17 11:31:11 +01:00
|
|
|
|
2013-02-18 16:21:52 +01:00
|
|
|
if (!(data_charset= get_charset_by_csname(options->data_charset,
|
2013-03-17 11:31:11 +01:00
|
|
|
MY_CS_PRIMARY, MYF(0)))) {
|
2013-02-18 16:21:52 +01:00
|
|
|
my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), options->data_charset);
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
2013-03-17 11:31:11 +01:00
|
|
|
} // endif charset
|
|
|
|
|
|
|
|
if (type == TAB_XML && data_charset != &my_charset_utf8_general_ci) {
|
2013-02-18 16:21:52 +01:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR,
|
|
|
|
"DATA_CHARSET='%s' is not supported for TABLE_TYPE=XML",
|
|
|
|
MYF(0), options->data_charset);
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
2013-03-17 11:31:11 +01:00
|
|
|
} // endif utf8
|
|
|
|
|
|
|
|
} // endif charset
|
2013-02-18 16:21:52 +01:00
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
if (!g) {
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
DBUG_RETURN(rc);
|
2013-02-15 01:33:23 +01:00
|
|
|
} else
|
2013-04-05 23:57:30 +02:00
|
|
|
dbf= (GetTypeID(options->type) == TAB_DBF && !options->catfunc);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-05-04 00:47:55 +02:00
|
|
|
// Can be null in ALTER TABLE
|
|
|
|
if (create_info->alias)
|
|
|
|
// Check whether a table is defined on itself
|
|
|
|
switch (type) {
|
|
|
|
case TAB_PRX:
|
|
|
|
case TAB_XCL:
|
2013-07-11 17:45:31 +02:00
|
|
|
case TAB_PIVOT:
|
2013-05-04 00:47:55 +02:00
|
|
|
case TAB_OCCUR:
|
2013-05-19 19:25:06 +02:00
|
|
|
if (options->srcdef) {
|
|
|
|
strcpy(g->Message, "Cannot check looping reference");
|
2013-07-23 16:29:16 +02:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
2013-05-19 19:25:06 +02:00
|
|
|
} else if (options->tabname) {
|
|
|
|
if (!stricmp(options->tabname, create_info->alias) &&
|
2014-10-21 17:29:51 +02:00
|
|
|
(!options->dbname ||
|
2014-11-08 13:35:03 +01:00
|
|
|
!stricmp(options->dbname, table_arg->s->db.str))) {
|
2013-05-19 19:25:06 +02:00
|
|
|
sprintf(g->Message, "A %s table cannot refer to itself",
|
|
|
|
options->type);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif tab
|
|
|
|
|
|
|
|
} else {
|
|
|
|
strcpy(g->Message, "Missing object table name or definition");
|
2013-05-04 00:47:55 +02:00
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2013-05-19 19:25:06 +02:00
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif tabname
|
2013-05-02 16:33:15 +02:00
|
|
|
|
2013-10-27 10:37:12 +01:00
|
|
|
case TAB_MYSQL:
|
2014-07-17 18:13:51 +02:00
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
if (!part_info)
|
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
2013-10-27 10:37:12 +01:00
|
|
|
{const char *src= options->srcdef;
|
|
|
|
char *host, *db, *tab= (char*)options->tabname;
|
|
|
|
int port;
|
|
|
|
|
|
|
|
host= GetListOption(g, "host", options->oplist, NULL);
|
2014-05-27 12:50:52 +02:00
|
|
|
db= GetStringOption("database", NULL);
|
2013-10-27 10:37:12 +01:00
|
|
|
port= atoi(GetListOption(g, "port", options->oplist, "0"));
|
|
|
|
|
2014-11-08 13:35:03 +01:00
|
|
|
if (create_info->connect_string.str &&
|
|
|
|
create_info->connect_string.length) {
|
|
|
|
char *dsn= strz(g, create_info->connect_string);
|
2013-10-27 10:37:12 +01:00
|
|
|
PMYDEF mydef= new(g) MYSQLDEF();
|
|
|
|
|
|
|
|
mydef->SetName(create_info->alias);
|
|
|
|
|
|
|
|
if (!mydef->ParseURL(g, dsn, false)) {
|
|
|
|
if (mydef->GetHostname())
|
|
|
|
host= mydef->GetHostname();
|
|
|
|
|
|
|
|
if (mydef->GetDatabase())
|
|
|
|
db= mydef->GetDatabase();
|
|
|
|
|
|
|
|
if (mydef->GetTabname())
|
|
|
|
tab= mydef->GetTabname();
|
|
|
|
|
|
|
|
if (mydef->GetPortnumber())
|
|
|
|
port= mydef->GetPortnumber();
|
|
|
|
|
|
|
|
} else {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif ParseURL
|
|
|
|
|
|
|
|
} // endif connect_string
|
|
|
|
|
|
|
|
if (CheckSelf(g, table_arg->s, host, db, tab, src, port)) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif CheckSelf
|
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
}break;
|
2013-05-13 13:23:24 +02:00
|
|
|
default: /* do nothing */;
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
|
|
|
} // endswitch ttp
|
2013-05-02 16:33:15 +02:00
|
|
|
|
2013-03-30 22:06:35 +01:00
|
|
|
if (type == TAB_XML) {
|
|
|
|
bool dom; // True: MS-DOM, False libxml2
|
2013-04-19 20:35:43 +02:00
|
|
|
char *xsup= GetListOption(g, "Xmlsup", options->oplist, "*");
|
2013-03-30 22:06:35 +01:00
|
|
|
|
|
|
|
// Note that if no support is specified, the default is MS-DOM
|
|
|
|
// on Windows and libxml2 otherwise
|
|
|
|
switch (*xsup) {
|
|
|
|
case '*':
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2013-03-30 22:06:35 +01:00
|
|
|
dom= true;
|
2015-05-27 16:23:38 +02:00
|
|
|
#else // !__WIN__
|
2013-03-30 22:06:35 +01:00
|
|
|
dom= false;
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2013-03-30 22:06:35 +01:00
|
|
|
break;
|
|
|
|
case 'M':
|
|
|
|
case 'D':
|
|
|
|
dom= true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dom= false;
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-03-30 22:06:35 +01:00
|
|
|
} // endswitch xsup
|
|
|
|
|
|
|
|
#if !defined(DOMDOC_SUPPORT)
|
|
|
|
if (dom) {
|
|
|
|
strcpy(g->Message, "MS-DOM not supported by this version");
|
|
|
|
xsup= NULL;
|
|
|
|
} // endif DomDoc
|
|
|
|
#endif // !DOMDOC_SUPPORT
|
|
|
|
|
|
|
|
#if !defined(LIBXML2_SUPPORT)
|
|
|
|
if (!dom) {
|
|
|
|
strcpy(g->Message, "libxml2 not supported by this version");
|
|
|
|
xsup= NULL;
|
|
|
|
} // endif Libxml2
|
|
|
|
#endif // !LIBXML2_SUPPORT
|
|
|
|
|
2013-04-04 15:36:42 +02:00
|
|
|
if (!xsup) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif xsup
|
2013-03-30 22:06:35 +01:00
|
|
|
|
|
|
|
} // endif type
|
|
|
|
|
2015-04-17 20:05:41 +02:00
|
|
|
if (type == TAB_JSON) {
|
|
|
|
int pretty= atoi(GetListOption(g, "Pretty", options->oplist, "2"));
|
|
|
|
|
|
|
|
if (!options->lrecl && pretty != 2) {
|
|
|
|
sprintf(g->Message, "LRECL must be specified for pretty=%d", pretty);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif lrecl
|
|
|
|
|
2016-03-16 18:53:56 +01:00
|
|
|
} // endif type JSON
|
|
|
|
|
|
|
|
if (type == TAB_CSV) {
|
|
|
|
const char *sep = options->separator;
|
|
|
|
|
|
|
|
if (sep && strlen(sep) > 1) {
|
|
|
|
sprintf(g->Message, "Invalid separator %s", sep);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif sep
|
|
|
|
|
|
|
|
} // endif type CSV
|
2015-04-17 20:05:41 +02:00
|
|
|
|
2013-02-12 22:37:38 +01:00
|
|
|
// Check column types
|
2013-02-07 10:34:27 +01:00
|
|
|
for (field= table_arg->field; *field; field++) {
|
|
|
|
fp= *field;
|
|
|
|
|
2015-11-24 22:20:32 +01:00
|
|
|
if (!fp->stored_in_db())
|
2013-02-07 10:34:27 +01:00
|
|
|
continue; // This is a virtual column
|
|
|
|
|
2013-04-03 21:54:02 +02:00
|
|
|
if (fp->flags & AUTO_INCREMENT_FLAG) {
|
|
|
|
strcpy(g->Message, "Auto_increment is not supported yet");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-06-12 01:02:04 +02:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif flags
|
|
|
|
|
2013-06-14 20:52:46 +02:00
|
|
|
if (fp->flags & (BLOB_FLAG | ENUM_FLAG | SET_FLAG)) {
|
2013-06-12 01:02:04 +02:00
|
|
|
sprintf(g->Message, "Unsupported type for column %s",
|
|
|
|
fp->field_name);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2013-04-03 21:54:02 +02:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif flags
|
|
|
|
|
2014-10-31 13:58:43 +01:00
|
|
|
if (type == TAB_VIR)
|
|
|
|
if (!fp->option_struct || !fp->option_struct->special) {
|
|
|
|
strcpy(g->Message, "Virtual tables accept only special or virtual columns");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif special
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
switch (fp->type()) {
|
|
|
|
case MYSQL_TYPE_SHORT:
|
|
|
|
case MYSQL_TYPE_LONG:
|
|
|
|
case MYSQL_TYPE_FLOAT:
|
|
|
|
case MYSQL_TYPE_DOUBLE:
|
|
|
|
case MYSQL_TYPE_TIMESTAMP:
|
|
|
|
case MYSQL_TYPE_DATE:
|
|
|
|
case MYSQL_TYPE_TIME:
|
|
|
|
case MYSQL_TYPE_DATETIME:
|
|
|
|
case MYSQL_TYPE_YEAR:
|
|
|
|
case MYSQL_TYPE_NEWDATE:
|
|
|
|
case MYSQL_TYPE_LONGLONG:
|
2013-03-11 16:52:59 +01:00
|
|
|
case MYSQL_TYPE_TINY:
|
2013-02-07 10:34:27 +01:00
|
|
|
case MYSQL_TYPE_DECIMAL:
|
|
|
|
case MYSQL_TYPE_NEWDECIMAL:
|
|
|
|
case MYSQL_TYPE_INT24:
|
2014-03-19 15:45:21 +01:00
|
|
|
break; // Ok
|
|
|
|
case MYSQL_TYPE_VARCHAR:
|
|
|
|
case MYSQL_TYPE_VAR_STRING:
|
|
|
|
case MYSQL_TYPE_STRING:
|
|
|
|
if (!fp->field_length) {
|
|
|
|
sprintf(g->Message, "Unsupported 0 length for column %s",
|
|
|
|
fp->field_name);
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-04-19 11:11:30 +02:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR,
|
|
|
|
"Unsupported 0 length for column %s",
|
2014-03-21 22:47:40 +01:00
|
|
|
MYF(0), fp->field_name);
|
2014-03-19 15:45:21 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif fp
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
break; // To be checked
|
|
|
|
case MYSQL_TYPE_BIT:
|
|
|
|
case MYSQL_TYPE_NULL:
|
|
|
|
case MYSQL_TYPE_ENUM:
|
|
|
|
case MYSQL_TYPE_SET:
|
|
|
|
case MYSQL_TYPE_TINY_BLOB:
|
|
|
|
case MYSQL_TYPE_MEDIUM_BLOB:
|
|
|
|
case MYSQL_TYPE_LONG_BLOB:
|
|
|
|
case MYSQL_TYPE_BLOB:
|
|
|
|
case MYSQL_TYPE_GEOMETRY:
|
|
|
|
default:
|
|
|
|
// fprintf(stderr, "Unsupported type column %s\n", fp->field_name);
|
|
|
|
sprintf(g->Message, "Unsupported type for column %s",
|
|
|
|
fp->field_name);
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
2014-03-21 22:47:40 +01:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR, "Unsupported type for column %s",
|
|
|
|
MYF(0), fp->field_name);
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
2013-06-29 01:10:31 +02:00
|
|
|
break;
|
2013-02-07 10:34:27 +01:00
|
|
|
} // endswitch type
|
|
|
|
|
2013-03-17 11:31:11 +01:00
|
|
|
if ((fp)->real_maybe_null() && !IsTypeNullable(type)) {
|
2014-04-19 11:11:30 +02:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR,
|
2014-03-21 22:47:40 +01:00
|
|
|
"Table type %s does not support nullable columns",
|
|
|
|
MYF(0), options->type);
|
2013-03-17 11:31:11 +01:00
|
|
|
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
|
|
|
} // endif !nullable
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
if (dbf) {
|
|
|
|
bool b= false;
|
|
|
|
|
2013-02-12 11:58:58 +01:00
|
|
|
if ((b= strlen(fp->field_name) > 10))
|
|
|
|
sprintf(g->Message, "DBF: Column name '%s' is too long (max=10)",
|
2013-02-07 10:34:27 +01:00
|
|
|
fp->field_name);
|
|
|
|
else if ((b= fp->field_length > 255))
|
|
|
|
sprintf(g->Message, "DBF: Column length too big for '%s' (max=255)",
|
|
|
|
fp->field_name);
|
|
|
|
|
|
|
|
if (b) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // endif b
|
|
|
|
|
|
|
|
} // endif dbf
|
|
|
|
|
|
|
|
} // endfor field
|
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
if ((sqlcom == SQLCOM_CREATE_TABLE || *GetTableName() == '#') && inward) {
|
2014-02-03 16:14:13 +01:00
|
|
|
// The file name is not specified, create a default file in
|
|
|
|
// the database directory named table_name.table_type.
|
|
|
|
// (temporarily not done for XML because a void file causes
|
|
|
|
// the XML parsers to report an error on the first Insert)
|
2014-10-21 17:29:51 +02:00
|
|
|
char buf[_MAX_PATH], fn[_MAX_PATH], dbpath[_MAX_PATH], lwt[12];
|
2014-02-03 16:14:13 +01:00
|
|
|
int h;
|
|
|
|
|
|
|
|
// Check for incompatible options
|
|
|
|
if (options->sepindex) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR,
|
|
|
|
"SEPINDEX is incompatible with unspecified file name",
|
|
|
|
MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
|
|
|
} else if (GetTypeID(options->type) == TAB_VEC)
|
|
|
|
if (!table->s->max_rows || options->split) {
|
2014-04-19 11:11:30 +02:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR,
|
2014-02-03 16:14:13 +01:00
|
|
|
"%s tables whose file name is unspecified cannot be split",
|
|
|
|
MYF(0), options->type);
|
|
|
|
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
|
|
|
} else if (options->header == 2) {
|
2014-04-19 11:11:30 +02:00
|
|
|
my_printf_error(ER_UNKNOWN_ERROR,
|
2014-02-03 16:14:13 +01:00
|
|
|
"header=2 is not allowed for %s tables whose file name is unspecified",
|
|
|
|
MYF(0), options->type);
|
|
|
|
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
|
|
|
} // endif's
|
|
|
|
|
|
|
|
// Fold type to lower case
|
|
|
|
for (int i= 0; i < 12; i++)
|
|
|
|
if (!options->type[i]) {
|
|
|
|
lwt[i]= 0;
|
|
|
|
break;
|
|
|
|
} else
|
|
|
|
lwt[i]= tolower(options->type[i]);
|
2014-02-07 22:44:43 +01:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
if (part_info) {
|
|
|
|
char *p;
|
2014-02-07 22:44:43 +01:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
strcpy(dbpath, name);
|
|
|
|
p= strrchr(dbpath, slash);
|
|
|
|
strcpy(partname, ++p);
|
|
|
|
strcat(strcat(strcpy(buf, p), "."), lwt);
|
|
|
|
*p= 0;
|
|
|
|
} else {
|
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
|
|
|
strcat(strcat(strcpy(buf, GetTableName()), "."), lwt);
|
|
|
|
sprintf(g->Message, "No file name. Table will use %s", buf);
|
|
|
|
|
|
|
|
if (sqlcom == SQLCOM_CREATE_TABLE)
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
|
|
|
|
|
|
|
strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/");
|
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
} // endif part_info
|
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
2014-02-07 22:44:43 +01:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
PlugSetPath(fn, buf, dbpath);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
if ((h= ::open(fn, O_CREAT | O_EXCL, 0666)) == -1) {
|
|
|
|
if (errno == EEXIST)
|
|
|
|
sprintf(g->Message, "Default file %s already exists", fn);
|
|
|
|
else
|
|
|
|
sprintf(g->Message, "Error %d creating file %s", errno, fn);
|
2013-03-30 22:06:35 +01:00
|
|
|
|
2014-02-07 22:44:43 +01:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
2014-02-03 16:14:13 +01:00
|
|
|
} else
|
|
|
|
::close(h);
|
|
|
|
|
2014-04-22 19:15:08 +02:00
|
|
|
if ((type == TAB_FMT || options->readonly) && sqlcom == SQLCOM_CREATE_TABLE)
|
2014-02-07 22:44:43 +01:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0,
|
2014-02-03 16:14:13 +01:00
|
|
|
"Congratulation, you just created a read-only void table!");
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
} // endif sqlcom
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace)
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas);
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
// To check whether indexes have to be made or remade
|
2013-12-05 01:00:28 +01:00
|
|
|
if (!g->Xchk) {
|
|
|
|
PIXDEF xdp;
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
// We should be in CREATE TABLE, ALTER_TABLE or CREATE INDEX
|
|
|
|
if (!(sqlcom == SQLCOM_CREATE_TABLE || sqlcom == SQLCOM_ALTER_TABLE ||
|
2014-07-17 18:13:51 +02:00
|
|
|
sqlcom == SQLCOM_CREATE_INDEX || sqlcom == SQLCOM_DROP_INDEX))
|
|
|
|
// (sqlcom == SQLCOM_CREATE_INDEX && part_info) ||
|
|
|
|
// (sqlcom == SQLCOM_DROP_INDEX && part_info)))
|
2013-12-05 01:00:28 +01:00
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0,
|
2014-05-31 12:31:26 +02:00
|
|
|
"Unexpected command in create, please contact CONNECT team");
|
2013-03-12 01:20:52 +01:00
|
|
|
|
2014-07-17 18:13:51 +02:00
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
if (part_info && !inward)
|
|
|
|
strcpy(partname, decode(g, strrchr(name, '#') + 1));
|
|
|
|
// strcpy(partname, part_info->curr_part_elem->partition_name);
|
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
|
|
|
|
|
|
|
if (g->Alchecked == 0 &&
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
(!IsFileType(type) || FileExists(options->filename, false))) {
|
2014-07-17 18:13:51 +02:00
|
|
|
if (part_info) {
|
|
|
|
sprintf(g->Message, "Data repartition in %s is unchecked", partname);
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
|
|
|
|
} else if (sqlcom == SQLCOM_ALTER_TABLE) {
|
|
|
|
// This is an ALTER to CONNECT from another engine.
|
|
|
|
// It cannot be accepted because the table data would be modified
|
|
|
|
// except when the target file does not exist.
|
|
|
|
strcpy(g->Message, "Operation denied. Table data would be modified.");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
|
|
} // endif part_info
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
} // endif outward
|
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
// Get the index definitions
|
2014-05-31 12:31:26 +02:00
|
|
|
if ((xdp= GetIndexInfo()) || sqlcom == SQLCOM_DROP_INDEX) {
|
2014-04-25 19:14:33 +02:00
|
|
|
if (options->multiple) {
|
|
|
|
strcpy(g->Message, "Multiple tables are not indexable");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_UNSUPPORTED;
|
|
|
|
} else if (options->compressed) {
|
|
|
|
strcpy(g->Message, "Compressed tables are not indexable");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_UNSUPPORTED;
|
|
|
|
} else if (GetIndexType(type) == 1) {
|
2013-12-05 01:00:28 +01:00
|
|
|
PDBUSER dup= PlgGetUser(g);
|
|
|
|
PCATLG cat= (dup) ? dup->Catalog : NULL;
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-08-23 19:17:15 +02:00
|
|
|
SetDataPath(g, table_arg->s->db.str);
|
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
if (cat) {
|
2014-08-23 19:17:15 +02:00
|
|
|
// cat->SetDataPath(g, table_arg->s->db.str);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2014-05-31 12:31:26 +02:00
|
|
|
#if defined(WITH_PARTITION_STORAGE_ENGINE)
|
|
|
|
if (part_info)
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
strcpy(partname,
|
|
|
|
decode(g, strrchr(name, (inward ? slash : '#')) + 1));
|
2014-05-31 12:31:26 +02:00
|
|
|
#endif // WITH_PARTITION_STORAGE_ENGINE
|
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
if ((rc= optimize(table->in_use, NULL))) {
|
2014-03-10 18:29:04 +01:00
|
|
|
htrc("Create rc=%d %s\n", rc, g->Message);
|
2013-12-05 01:00:28 +01:00
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_INTERNAL_ERROR;
|
|
|
|
} else
|
|
|
|
CloseTable(g);
|
2014-04-19 11:11:30 +02:00
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
} // endif cat
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
} else if (GetIndexType(type) == 3) {
|
|
|
|
if (CheckVirtualIndex(table_arg->s)) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
rc= HA_ERR_UNSUPPORTED;
|
|
|
|
} // endif Check
|
|
|
|
|
2014-04-14 14:26:48 +02:00
|
|
|
} else if (!GetIndexType(type)) {
|
2013-12-05 01:00:28 +01:00
|
|
|
sprintf(g->Message, "Table type %s is not indexable", options->type);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
2014-02-07 22:44:43 +01:00
|
|
|
rc= HA_ERR_UNSUPPORTED;
|
2014-04-19 17:02:53 +02:00
|
|
|
} // endif index type
|
2013-04-09 23:14:45 +02:00
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
} // endif xdp
|
2013-12-04 23:53:30 +01:00
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
} else {
|
2014-02-03 16:14:13 +01:00
|
|
|
// This should not happen anymore with indexing new way
|
|
|
|
my_message(ER_UNKNOWN_ERROR,
|
|
|
|
"CONNECT index modification should be in-place", MYF(0));
|
|
|
|
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
2013-12-05 01:00:28 +01:00
|
|
|
} // endif Xchk
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2013-12-05 01:00:28 +01:00
|
|
|
table= st;
|
2013-02-07 10:34:27 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
} // end of create
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/**
|
|
|
|
Used to check whether a file based outward table can be populated by
|
|
|
|
an ALTER TABLE command. The conditions are:
|
|
|
|
- file does not exist or is void
|
|
|
|
- user has file privilege
|
|
|
|
*/
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
bool ha_connect::FileExists(const char *fn, bool bf)
|
2014-02-03 16:14:13 +01:00
|
|
|
{
|
|
|
|
if (!fn || !*fn)
|
|
|
|
return false;
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
else if (IsPartitioned() && bf)
|
|
|
|
return true;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
if (table) {
|
2014-10-21 17:29:51 +02:00
|
|
|
char *s, tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH];
|
2014-07-17 18:13:51 +02:00
|
|
|
bool b= false;
|
2014-02-03 16:14:13 +01:00
|
|
|
int n;
|
|
|
|
struct stat info;
|
|
|
|
|
2015-05-27 16:23:38 +02:00
|
|
|
#if defined(__WIN__)
|
2014-04-19 11:11:30 +02:00
|
|
|
s= "\\";
|
2015-05-27 16:23:38 +02:00
|
|
|
#else // !__WIN__
|
2014-04-19 11:11:30 +02:00
|
|
|
s= "/";
|
2015-05-27 16:23:38 +02:00
|
|
|
#endif // !__WIN__
|
2014-07-17 18:13:51 +02:00
|
|
|
if (IsPartitioned()) {
|
|
|
|
sprintf(tfn, fn, GetPartName());
|
|
|
|
|
|
|
|
// This is to avoid an initialization error raised by the
|
|
|
|
// test on check_table_flags made in ha_partition::open
|
|
|
|
// that can fail if some partition files are empty.
|
|
|
|
b= true;
|
|
|
|
} else
|
|
|
|
strcpy(tfn, fn);
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
strcat(strcat(strcat(strcpy(path, "."), s), table->s->db.str), s);
|
2014-07-17 18:13:51 +02:00
|
|
|
PlugSetPath(filename, tfn, path);
|
2014-02-03 16:14:13 +01:00
|
|
|
n= stat(filename, &info);
|
|
|
|
|
|
|
|
if (n < 0) {
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
char buf[_MAX_PATH + 20];
|
|
|
|
|
|
|
|
sprintf(buf, "Error %d for file %s", errno, filename);
|
|
|
|
push_warning(table->in_use, Sql_condition::WARN_LEVEL_WARN, 0, buf);
|
|
|
|
return true;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
} else
|
2014-07-17 18:13:51 +02:00
|
|
|
return (info.st_size || b) ? true : false;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
} // endif table
|
|
|
|
|
|
|
|
return true;
|
|
|
|
} // end of FileExists
|
|
|
|
|
2014-02-07 22:44:43 +01:00
|
|
|
// Called by SameString and NoFieldOptionChange
|
|
|
|
bool ha_connect::CheckString(const char *str1, const char *str2)
|
|
|
|
{
|
|
|
|
bool b1= (!str1 || !*str1), b2= (!str2 || !*str2);
|
|
|
|
|
|
|
|
if (b1 && b2)
|
|
|
|
return true;
|
|
|
|
else if ((b1 && !b2) || (!b1 && b2) || stricmp(str1, str2))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
} // end of CheckString
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
/**
|
|
|
|
check whether a string option have changed
|
|
|
|
*/
|
2014-02-07 22:44:43 +01:00
|
|
|
bool ha_connect::SameString(TABLE *tab, char *opn)
|
2014-02-03 16:14:13 +01:00
|
|
|
{
|
|
|
|
char *str1, *str2;
|
|
|
|
|
|
|
|
tshp= tab->s; // The altered table
|
|
|
|
str1= GetStringOption(opn);
|
|
|
|
tshp= NULL;
|
|
|
|
str2= GetStringOption(opn);
|
2014-02-07 22:44:43 +01:00
|
|
|
return CheckString(str1, str2);
|
|
|
|
} // end of SameString
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
check whether a Boolean option have changed
|
|
|
|
*/
|
|
|
|
bool ha_connect::SameBool(TABLE *tab, char *opn)
|
|
|
|
{
|
|
|
|
bool b1, b2;
|
|
|
|
|
|
|
|
tshp= tab->s; // The altered table
|
|
|
|
b1= GetBooleanOption(opn, false);
|
|
|
|
tshp= NULL;
|
|
|
|
b2= GetBooleanOption(opn, false);
|
|
|
|
return (b1 == b2);
|
|
|
|
} // end of SameBool
|
|
|
|
|
|
|
|
/**
|
|
|
|
check whether an integer option have changed
|
|
|
|
*/
|
|
|
|
bool ha_connect::SameInt(TABLE *tab, char *opn)
|
|
|
|
{
|
|
|
|
int i1, i2;
|
|
|
|
|
|
|
|
tshp= tab->s; // The altered table
|
|
|
|
i1= GetIntegerOption(opn);
|
|
|
|
tshp= NULL;
|
|
|
|
i2= GetIntegerOption(opn);
|
|
|
|
|
|
|
|
if (!stricmp(opn, "lrecl"))
|
|
|
|
return (i1 == i2 || !i1 || !i2);
|
|
|
|
else if (!stricmp(opn, "ending"))
|
|
|
|
return (i1 == i2 || i1 <= 0 || i2 <= 0);
|
|
|
|
else
|
|
|
|
return (i1 == i2);
|
|
|
|
|
|
|
|
} // end of SameInt
|
|
|
|
|
2014-02-07 22:44:43 +01:00
|
|
|
/**
|
|
|
|
check whether a field option have changed
|
|
|
|
*/
|
|
|
|
bool ha_connect::NoFieldOptionChange(TABLE *tab)
|
|
|
|
{
|
|
|
|
bool rc= true;
|
|
|
|
ha_field_option_struct *fop1, *fop2;
|
|
|
|
Field* *fld1= table->s->field;
|
|
|
|
Field* *fld2= tab->s->field;
|
|
|
|
|
|
|
|
for (; rc && *fld1 && *fld2; fld1++, fld2++) {
|
|
|
|
fop1= (*fld1)->option_struct;
|
|
|
|
fop2= (*fld2)->option_struct;
|
|
|
|
|
|
|
|
rc= (fop1->offset == fop2->offset &&
|
|
|
|
fop1->fldlen == fop2->fldlen &&
|
|
|
|
CheckString(fop1->dateformat, fop2->dateformat) &&
|
|
|
|
CheckString(fop1->fieldformat, fop2->fieldformat) &&
|
|
|
|
CheckString(fop1->special, fop2->special));
|
|
|
|
} // endfor fld
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
} // end of NoFieldOptionChange
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Check if a storage engine supports a particular alter table in-place
|
|
|
|
|
|
|
|
@param altered_table TABLE object for new version of table.
|
|
|
|
@param ha_alter_info Structure describing changes to be done
|
|
|
|
by ALTER TABLE and holding data used
|
|
|
|
during in-place alter.
|
|
|
|
|
|
|
|
@retval HA_ALTER_ERROR Unexpected error.
|
|
|
|
@retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported, must use copy.
|
|
|
|
@retval HA_ALTER_INPLACE_EXCLUSIVE_LOCK Supported, but requires X lock.
|
|
|
|
@retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE
|
|
|
|
Supported, but requires SNW lock
|
|
|
|
during main phase. Prepare phase
|
|
|
|
requires X lock.
|
|
|
|
@retval HA_ALTER_INPLACE_SHARED_LOCK Supported, but requires SNW lock.
|
|
|
|
@retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE
|
|
|
|
Supported, concurrent reads/writes
|
|
|
|
allowed. However, prepare phase
|
|
|
|
requires X lock.
|
|
|
|
@retval HA_ALTER_INPLACE_NO_LOCK Supported, concurrent
|
|
|
|
reads/writes allowed.
|
|
|
|
|
|
|
|
@note The default implementation uses the old in-place ALTER API
|
|
|
|
to determine if the storage engine supports in-place ALTER or not.
|
|
|
|
|
|
|
|
@note Called without holding thr_lock.c lock.
|
|
|
|
*/
|
|
|
|
enum_alter_inplace_result
|
|
|
|
ha_connect::check_if_supported_inplace_alter(TABLE *altered_table,
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
Alter_inplace_info *ha_alter_info)
|
2014-02-03 16:14:13 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("check_if_supported_alter");
|
|
|
|
|
|
|
|
bool idx= false, outward= false;
|
|
|
|
THD *thd= ha_thd();
|
|
|
|
int sqlcom= thd_sql_command(thd);
|
|
|
|
TABTYPE newtyp, type= TAB_UNDEF;
|
|
|
|
HA_CREATE_INFO *create_info= ha_alter_info->create_info;
|
|
|
|
PTOS newopt, oldopt;
|
|
|
|
xp= GetUser(thd, xp);
|
|
|
|
PGLOBAL g= xp->g;
|
|
|
|
|
|
|
|
if (!g || !table) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, "Cannot check ALTER operations", MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
|
|
|
} // endif Xchk
|
|
|
|
|
|
|
|
newopt= altered_table->s->option_struct;
|
|
|
|
oldopt= table->s->option_struct;
|
|
|
|
|
|
|
|
// If this is the start of a new query, cleanup the previous one
|
|
|
|
if (xp->CheckCleanup()) {
|
|
|
|
tdbp= NULL;
|
|
|
|
valid_info= false;
|
|
|
|
} // endif CheckCleanup
|
|
|
|
|
|
|
|
g->Alchecked= 1; // Tested in create
|
|
|
|
g->Xchk= NULL;
|
|
|
|
type= GetRealType(oldopt);
|
|
|
|
newtyp= GetRealType(newopt);
|
|
|
|
|
|
|
|
// No copy algorithm for outward tables
|
|
|
|
outward= (!IsFileType(type) || (oldopt->filename && *oldopt->filename));
|
|
|
|
|
|
|
|
// Index operations
|
|
|
|
Alter_inplace_info::HA_ALTER_FLAGS index_operations=
|
2014-04-19 11:11:30 +02:00
|
|
|
Alter_inplace_info::ADD_INDEX |
|
2014-02-03 16:14:13 +01:00
|
|
|
Alter_inplace_info::DROP_INDEX |
|
2014-04-19 11:11:30 +02:00
|
|
|
Alter_inplace_info::ADD_UNIQUE_INDEX |
|
2014-02-03 16:14:13 +01:00
|
|
|
Alter_inplace_info::DROP_UNIQUE_INDEX |
|
2014-04-19 11:11:30 +02:00
|
|
|
Alter_inplace_info::ADD_PK_INDEX |
|
2014-02-03 16:14:13 +01:00
|
|
|
Alter_inplace_info::DROP_PK_INDEX;
|
|
|
|
|
|
|
|
Alter_inplace_info::HA_ALTER_FLAGS inplace_offline_operations=
|
|
|
|
Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH |
|
|
|
|
Alter_inplace_info::ALTER_COLUMN_NAME |
|
|
|
|
Alter_inplace_info::ALTER_COLUMN_DEFAULT |
|
|
|
|
Alter_inplace_info::CHANGE_CREATE_OPTION |
|
2014-08-05 17:01:41 +02:00
|
|
|
Alter_inplace_info::ALTER_RENAME |
|
|
|
|
Alter_inplace_info::ALTER_PARTITIONED | index_operations;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
if (ha_alter_info->handler_flags & index_operations ||
|
2014-02-07 22:44:43 +01:00
|
|
|
!SameString(altered_table, "optname") ||
|
2014-02-03 16:14:13 +01:00
|
|
|
!SameBool(altered_table, "sepindex")) {
|
2014-04-25 19:14:33 +02:00
|
|
|
if (newopt->multiple) {
|
|
|
|
strcpy(g->Message, "Multiple tables are not indexable");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
|
|
|
} else if (newopt->compressed) {
|
|
|
|
strcpy(g->Message, "Compressed tables are not indexable");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
|
|
|
} else if (GetIndexType(type) == 1) {
|
2014-04-14 14:26:48 +02:00
|
|
|
g->Xchk= new(g) XCHK;
|
|
|
|
PCHK xcp= (PCHK)g->Xchk;
|
|
|
|
|
|
|
|
xcp->oldpix= GetIndexInfo(table->s);
|
|
|
|
xcp->newpix= GetIndexInfo(altered_table->s);
|
|
|
|
xcp->oldsep= GetBooleanOption("sepindex", false);
|
|
|
|
xcp->oldsep= xcp->SetName(g, GetStringOption("optname"));
|
|
|
|
tshp= altered_table->s;
|
|
|
|
xcp->newsep= GetBooleanOption("sepindex", false);
|
|
|
|
xcp->newsep= xcp->SetName(g, GetStringOption("optname"));
|
|
|
|
tshp= NULL;
|
|
|
|
|
2014-10-21 17:29:51 +02:00
|
|
|
if (trace && g->Xchk)
|
2014-04-14 14:26:48 +02:00
|
|
|
htrc(
|
|
|
|
"oldsep=%d newsep=%d oldopn=%s newopn=%s oldpix=%p newpix=%p\n",
|
|
|
|
xcp->oldsep, xcp->newsep,
|
|
|
|
SVP(xcp->oldopn), SVP(xcp->newopn),
|
|
|
|
xcp->oldpix, xcp->newpix);
|
|
|
|
|
|
|
|
if (sqlcom == SQLCOM_ALTER_TABLE)
|
|
|
|
idx= true;
|
|
|
|
else
|
|
|
|
DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK);
|
|
|
|
|
2014-10-31 12:28:07 +01:00
|
|
|
} else if (GetIndexType(type) == 3) {
|
|
|
|
if (CheckVirtualIndex(altered_table->s)) {
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
|
|
|
} // endif Check
|
|
|
|
|
2014-04-14 14:26:48 +02:00
|
|
|
} else if (!GetIndexType(type)) {
|
2014-02-03 16:14:13 +01:00
|
|
|
sprintf(g->Message, "Table type %s is not indexable", oldopt->type);
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
2014-04-14 14:26:48 +02:00
|
|
|
} // endif index type
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
} // endif index operation
|
|
|
|
|
2014-02-07 22:44:43 +01:00
|
|
|
if (!SameString(altered_table, "filename")) {
|
|
|
|
if (!outward) {
|
|
|
|
// Conversion to outward table is only allowed for file based
|
|
|
|
// tables whose file does not exist.
|
|
|
|
tshp= altered_table->s;
|
|
|
|
char *fn= GetStringOption("filename");
|
|
|
|
tshp= NULL;
|
2014-02-03 16:14:13 +01:00
|
|
|
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
if (FileExists(fn, false)) {
|
2014-02-07 22:44:43 +01:00
|
|
|
strcpy(g->Message, "Operation denied. Table data would be lost.");
|
|
|
|
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
2014-02-03 16:14:13 +01:00
|
|
|
} else
|
|
|
|
goto fin;
|
|
|
|
|
2014-02-07 22:44:43 +01:00
|
|
|
} else
|
|
|
|
goto fin;
|
|
|
|
|
|
|
|
} // endif filename
|
2014-02-03 16:14:13 +01:00
|
|
|
|
|
|
|
/* Is there at least one operation that requires copy algorithm? */
|
|
|
|
if (ha_alter_info->handler_flags & ~inplace_offline_operations)
|
|
|
|
goto fin;
|
|
|
|
|
|
|
|
/*
|
|
|
|
ALTER TABLE tbl_name CONVERT TO CHARACTER SET .. and
|
|
|
|
ALTER TABLE table_name DEFAULT CHARSET = .. most likely
|
|
|
|
change column charsets and so not supported in-place through
|
|
|
|
old API.
|
|
|
|
|
|
|
|
Changing of PACK_KEYS, MAX_ROWS and ROW_FORMAT options were
|
|
|
|
not supported as in-place operations in old API either.
|
|
|
|
*/
|
|
|
|
if (create_info->used_fields & (HA_CREATE_USED_CHARSET |
|
|
|
|
HA_CREATE_USED_DEFAULT_CHARSET |
|
|
|
|
HA_CREATE_USED_PACK_KEYS |
|
|
|
|
HA_CREATE_USED_MAX_ROWS) ||
|
|
|
|
(table->s->row_type != create_info->row_type))
|
|
|
|
goto fin;
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
uint table_changes= (ha_alter_info->handler_flags &
|
|
|
|
Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH) ?
|
|
|
|
IS_EQUAL_PACK_LENGTH : IS_EQUAL_YES;
|
|
|
|
|
|
|
|
if (table->file->check_if_incompatible_data(create_info, table_changes)
|
|
|
|
== COMPATIBLE_DATA_YES)
|
|
|
|
DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK);
|
|
|
|
#endif // 0
|
|
|
|
|
|
|
|
// This was in check_if_incompatible_data
|
2014-02-07 22:44:43 +01:00
|
|
|
if (NoFieldOptionChange(altered_table) &&
|
2014-04-19 11:11:30 +02:00
|
|
|
type == newtyp &&
|
2014-02-03 16:14:13 +01:00
|
|
|
SameInt(altered_table, "lrecl") &&
|
|
|
|
SameInt(altered_table, "elements") &&
|
|
|
|
SameInt(altered_table, "header") &&
|
|
|
|
SameInt(altered_table, "quoted") &&
|
|
|
|
SameInt(altered_table, "ending") &&
|
|
|
|
SameInt(altered_table, "compressed"))
|
|
|
|
DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK);
|
|
|
|
|
|
|
|
fin:
|
|
|
|
if (idx) {
|
|
|
|
// Indexing is only supported inplace
|
2014-04-19 11:11:30 +02:00
|
|
|
my_message(ER_ALTER_OPERATION_NOT_SUPPORTED,
|
2014-02-03 16:14:13 +01:00
|
|
|
"Alter operations not supported together by CONNECT", MYF(0));
|
|
|
|
DBUG_RETURN(HA_ALTER_ERROR);
|
|
|
|
} else if (outward) {
|
This is a new version of the CONNECT storage engine. It was developed in
a sub-branch of this one and merged by pushing all the changes from it.
This version adds the following to CONNECT:
- MRR support (similar to the MyISAM one)
- Block, Remote and dynamic indexing
- Partitioning support (using the PARTITION engine)
Here is a list of the commited changes made in the sub-branch:
========================================================================
------------------------------------------------------------
revno: 4009
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Thu 2014-07-17 18:13:51 +0200
message:
This commit brings many changes, in particular two important ones:
1) Support of partitioning by connect. A table can be partitioned
by files, this is an enhanced MULTIPLE table. It can be also
partitioned by sub-tables like TBL and this enables table sharding.
2) Handling a CONNECT bug that causes in some cases extraneous rows
to remain in the table after an UPDATE or DELETE when the command
uses indexing (for not fixed file tables). Until a real fix is
done, CONNECT tries to ignore indexing and if it cannot do it
abort the command with an error message.
- Add tests on partitioning
added:
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/part_table.result
storage/connect/mysql-test/connect/t/part_file.test
storage/connect/mysql-test/connect/t/part_table.test
- Temporary fix
modified:
sql/sql_partition.cc
- Add partition support
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
storage/connect/reldef.h
storage/connect/tabdos.cpp
- Add functions ha_connect::IsUnique and ha_connect::CheckColumnList
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Prevent updating a partition table column that is part of
the partition function (outward tables only)
modified:
storage/connect/ha_connect.cc
- Support INSERT/UPDATE/DELETE for PROXY tables
modified:
storage/connect/tabutil.cpp
- Handle the bug on updating rows via indexing. Waiting for a real fix,
Don't use indexing when possible else raise an error and abort.
modified:
storage/connect/ha_connect.cc
- dbuserp->UseTemp set to TMP_AUTO
modified:
storage/connect/connect.cc
- Add members nox, abort and only
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
- Add arguments nox and abort to CntCloseTable
modified:
storage/connect/connect.cc
storage/connect/connect.h
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/ha_connect.cc
- Add arguments abort to CloseTableFile and RenameTempFile
modified:
storage/connect/filamap.cpp
storage/connect/filamap.h
storage/connect/filamdbf.cpp
storage/connect/filamdbf.h
storage/connect/filamfix.cpp
storage/connect/filamfix.h
storage/connect/filamtxt.cpp
storage/connect/filamtxt.h
storage/connect/filamvct.cpp
storage/connect/filamvct.h
storage/connect/filamzip.cpp
storage/connect/filamzip.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/xtable.h
- Fix info->records when file does not exists
modified:
storage/connect/connect.cc
- Close XML table when opened for info
modified:
storage/connect/connect.cc
- Add function VCTFAM::GetFileLength
modified:
storage/connect/filamvct.cpp
storage/connect/filamvct.h
- Column option DISTRIB -> ENUM
modified:
storage/connect/ha_connect.cc
- Options connect, query_string and partname allways available
modified:
storage/connect/ha_connect.cc
- Add function MYSQLC::GetTableSize
modified:
storage/connect/myconn.cpp
storage/connect/myconn.h
- Add new special columns (PARTNAME, FNAME, FPATH, FTYPE and FDISK)
modified:
storage/connect/colblk.cpp
storage/connect/colblk.h
storage/connect/plgdbsem.h
storage/connect/table.cpp
- Add function ExtractFromPath
modified:
storage/connect/colblk.cpp
storage/connect/plgdbsem.h
storage/connect/plgdbutl.cpp
- Enhance Cardinality for some table types
modified:
storage/connect/tabdos.cpp
storage/connect/tabmysql.cpp
storage/connect/tabmysql.h
storage/connect/tabodbc.cpp
storage/connect/tabodbc.h
storage/connect/tabsys.cpp
storage/connect/tabsys.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
- Add test on special column
modified:
storage/connect/tabfmt.cpp
- Add new files (added for block indexing)
modified:
storage/connect/CMakeLists.txt
------------------------------------------------------------
revno: 4007 [merge]
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-31 12:31:26 +0200
message:
- Begin adding support of partition tables
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- Add INSERT/UPDATE support to PROXY tables
modified:
storage/connect/tabutil.cpp
storage/connect/tabutil.h
- Take care of SPECIAL columns
modified:
storage/connect/filamdbf.cpp
storage/connect/reldef.h
storage/connect/tabfmt.cpp
-Typo and misc
modified:
storage/connect/odbconn.cpp
storage/connect/tabfix.cpp
storage/connect/xindex.cpp
------------------------------------------------------------
revno: 4006
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-05-10 12:21:08 +0200
message:
- FIX some MAP and XMAP errors (such as mapped indexes not closed)
Do not put version in XML files header
Remove HTON_NO_PARTITION for testing
Fix a wrong return (instead of DBUG_RETURN) in index_init
Plus a few typos
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/ha_connect.cc
storage/connect/maputil.cpp
storage/connect/mysql-test/connect/r/alter_xml.result
storage/connect/mysql-test/connect/r/xml.result
storage/connect/table.cpp
storage/connect/tabxml.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4005
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Fri 2014-05-02 15:55:45 +0200
message:
- Adding fetched columns to Dynamic index key (unique only)
Fix two bugs concerning added KXYCOL's:
1 - Not set during reading
2 - Val_K not set in FastFind
modified:
storage/connect/connect.cc
storage/connect/filamtxt.h
storage/connect/tabdos.cpp
storage/connect/tabfix.cpp
storage/connect/table.cpp
storage/connect/valblk.h
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 4003
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Wed 2014-04-30 10:48:29 +0200
message:
- Implementation of adding selected columns to dynamic indexes.
modified:
storage/connect/connect.cc
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/tabvct.cpp
storage/connect/tabvct.h
storage/connect/xindex.cpp
storage/connect/xindex.h
------------------------------------------------------------
revno: 4001
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sat 2014-04-26 00:17:26 +0200
message:
- Implement dynamic indexing
modified:
storage/connect/connect.cc
storage/connect/filter.cpp
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/tabdos.cpp
storage/connect/tabdos.h
storage/connect/table.cpp
storage/connect/xindex.cpp
storage/connect/xindex.h
storage/connect/xtable.h
------------------------------------------------------------
revno: 3995
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Sun 2014-03-23 18:49:19 +0100
message:
- Work in progress
modified:
storage/connect/filter.h
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/mysql-test/connect/r/alter.result
storage/connect/mysql-test/connect/r/xml.result
------------------------------------------------------------
revno: 3991
committer: Olivier Bertrand <bertrandop@gmail.com>
branch nick: 10.0-connect
timestamp: Mon 2014-03-10 18:59:36 +0100
message:
- Adding files needed for block indexing
added:
storage/connect/array.cpp
storage/connect/array.h
storage/connect/blkfil.cpp
storage/connect/blkfil.h
storage/connect/filter.cpp
storage/connect/filter.h
========================================================================
This commit of the main branch adds:
- A change needed to have the engine function check_if_supported_inplace_alter
called for partition tables (was done manually in the sub-branch) by adding
the preparser define: PARTITION_SUPPORTS_INPLACE_ALTER
modified:
sql/CMakeLists.txt
- A fix concerning the FileExists function. It was needed to force the function
table_flags to return the same flags for all partitions. This is tested by
the partition engine and raises an error if flags are not equal.
The way file name, table name and connection string are retrieved has been
modified to cope with it.
modified:
storage/connect/ha_connect.cc
storage/connect/ha_connect.h
storage/connect/reldef.cpp
- A few typos, such as the version string.
modified:
storage/connect/ha_connect.cc
- Updating some test result files because some warnings are no more raised.
modified:
storage/connect/mysql-test/connect/r/occur.result
storage/connect/mysql-test/connect/r/part_file.result
storage/connect/mysql-test/connect/r/pivot.result
2014-07-20 12:31:42 +02:00
|
|
|
if (IsFileType(type))
|
|
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0,
|
|
|
|
"This is an outward table, table data were not modified.");
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK);
|
|
|
|
} else
|
|
|
|
DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
|
|
|
|
|
|
|
|
} // end of check_if_supported_inplace_alter
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
check_if_incompatible_data() called if ALTER TABLE can't detect otherwise
|
|
|
|
if new and old definition are compatible
|
|
|
|
|
|
|
|
@details If there are no other explicit signs like changed number of
|
|
|
|
fields this function will be called by compare_tables()
|
|
|
|
(sql/sql_tables.cc) to decide should we rewrite whole table or only .frm
|
|
|
|
file.
|
|
|
|
|
2014-02-03 16:14:13 +01:00
|
|
|
@note: This function is no more called by check_if_supported_inplace_alter
|
2013-02-07 10:34:27 +01:00
|
|
|
*/
|
|
|
|
|
2015-05-10 12:14:21 +02:00
|
|
|
bool ha_connect::check_if_incompatible_data(HA_CREATE_INFO *, uint)
|
2013-02-07 10:34:27 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_connect::check_if_incompatible_data");
|
2013-04-09 23:14:45 +02:00
|
|
|
// TO DO: really implement and check it.
|
2014-04-19 11:11:30 +02:00
|
|
|
push_warning(ha_thd(), Sql_condition::WARN_LEVEL_WARN, 0,
|
2014-02-03 16:14:13 +01:00
|
|
|
"Unexpected call to check_if_incompatible_data.");
|
|
|
|
DBUG_RETURN(COMPATIBLE_DATA_NO);
|
2013-04-02 11:31:46 +02:00
|
|
|
} // end of check_if_incompatible_data
|
2013-02-07 10:34:27 +01:00
|
|
|
|
2014-04-19 11:11:30 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* CONNECT MRR implementation: use DS-MRR
|
|
|
|
This is just copied from myisam
|
|
|
|
***************************************************************************/
|
|
|
|
|
|
|
|
int ha_connect::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
|
|
|
|
uint n_ranges, uint mode,
|
|
|
|
HANDLER_BUFFER *buf)
|
|
|
|
{
|
|
|
|
return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf);
|
|
|
|
} // end of multi_range_read_init
|
|
|
|
|
|
|
|
int ha_connect::multi_range_read_next(range_id_t *range_info)
|
|
|
|
{
|
|
|
|
return ds_mrr.dsmrr_next(range_info);
|
|
|
|
} // end of multi_range_read_next
|
|
|
|
|
|
|
|
ha_rows ha_connect::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
|
|
|
|
void *seq_init_param,
|
|
|
|
uint n_ranges, uint *bufsz,
|
|
|
|
uint *flags, Cost_estimate *cost)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
This call is here because there is no location where this->table would
|
|
|
|
already be known.
|
|
|
|
TODO: consider moving it into some per-query initialization call.
|
|
|
|
*/
|
|
|
|
ds_mrr.init(this, table);
|
|
|
|
|
|
|
|
// MMR is implemented for "local" file based tables only
|
2014-04-23 12:34:24 +02:00
|
|
|
if (!IsFileType(GetRealType(GetTableOptionStruct())))
|
2014-04-19 11:11:30 +02:00
|
|
|
*flags|= HA_MRR_USE_DEFAULT_IMPL;
|
|
|
|
|
|
|
|
ha_rows rows= ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges,
|
|
|
|
bufsz, flags, cost);
|
|
|
|
xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL);
|
|
|
|
return rows;
|
|
|
|
} // end of multi_range_read_info_const
|
|
|
|
|
|
|
|
ha_rows ha_connect::multi_range_read_info(uint keyno, uint n_ranges, uint keys,
|
|
|
|
uint key_parts, uint *bufsz,
|
|
|
|
uint *flags, Cost_estimate *cost)
|
|
|
|
{
|
|
|
|
ds_mrr.init(this, table);
|
|
|
|
|
|
|
|
// MMR is implemented for "local" file based tables only
|
2014-04-23 12:34:24 +02:00
|
|
|
if (!IsFileType(GetRealType(GetTableOptionStruct())))
|
2014-04-19 11:11:30 +02:00
|
|
|
*flags|= HA_MRR_USE_DEFAULT_IMPL;
|
|
|
|
|
|
|
|
ha_rows rows= ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz,
|
|
|
|
flags, cost);
|
|
|
|
xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL);
|
|
|
|
return rows;
|
|
|
|
} // end of multi_range_read_info
|
|
|
|
|
|
|
|
|
|
|
|
int ha_connect::multi_range_read_explain_info(uint mrr_mode, char *str,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
return ds_mrr.dsmrr_explain_info(mrr_mode, str, size);
|
|
|
|
} // end of multi_range_read_explain_info
|
|
|
|
|
|
|
|
/* CONNECT MRR implementation ends */
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
// Does this make sens for CONNECT?
|
|
|
|
Item *ha_connect::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
|
|
|
|
{
|
|
|
|
pushed_idx_cond_keyno= keyno_arg;
|
|
|
|
pushed_idx_cond= idx_cond_arg;
|
|
|
|
in_range_check_pushed_down= TRUE;
|
|
|
|
if (active_index == pushed_idx_cond_keyno)
|
|
|
|
mi_set_index_cond_func(file, handler_index_cond_check, this);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif // 0
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
|
|
|
|
struct st_mysql_storage_engine connect_storage_engine=
|
|
|
|
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
|
|
|
|
|
2014-08-22 17:30:22 +02:00
|
|
|
/***********************************************************************/
|
|
|
|
/* CONNECT global variables definitions. */
|
|
|
|
/***********************************************************************/
|
2014-03-30 22:52:54 +02:00
|
|
|
#if defined(XMAP)
|
|
|
|
// Using file mapping for indexes if true
|
2014-11-15 18:28:24 +01:00
|
|
|
static MYSQL_SYSVAR_BOOL(indx_map, xmap, PLUGIN_VAR_RQCMDARG,
|
|
|
|
"Using file mapping for indexes", NULL, NULL, 0);
|
2014-03-30 22:52:54 +02:00
|
|
|
#endif // XMAP
|
|
|
|
|
2014-11-08 13:35:03 +01:00
|
|
|
#if defined(XMSG)
|
|
|
|
static MYSQL_SYSVAR_STR(errmsg_dir_path, msg_path,
|
2014-11-15 18:28:24 +01:00
|
|
|
// PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
|
2014-11-08 13:35:03 +01:00
|
|
|
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
|
|
|
|
"Path to the directory where are the message files",
|
2014-11-15 18:28:24 +01:00
|
|
|
// check_msg_path, update_msg_path,
|
|
|
|
NULL, NULL,
|
|
|
|
"../../../../storage/connect/"); // for testing
|
2014-11-08 13:35:03 +01:00
|
|
|
#endif // XMSG
|
2014-08-22 17:30:22 +02:00
|
|
|
|
2014-03-18 19:25:50 +01:00
|
|
|
static struct st_mysql_sys_var* connect_system_variables[]= {
|
|
|
|
MYSQL_SYSVAR(xtrace),
|
2014-03-30 22:52:54 +02:00
|
|
|
MYSQL_SYSVAR(conv_size),
|
|
|
|
MYSQL_SYSVAR(type_conv),
|
|
|
|
#if defined(XMAP)
|
|
|
|
MYSQL_SYSVAR(indx_map),
|
|
|
|
#endif // XMAP
|
2014-04-05 19:26:32 +02:00
|
|
|
MYSQL_SYSVAR(work_size),
|
2014-08-22 17:30:22 +02:00
|
|
|
MYSQL_SYSVAR(use_tempfile),
|
|
|
|
MYSQL_SYSVAR(exact_info),
|
2014-11-15 18:28:24 +01:00
|
|
|
#if defined(XMSG) || defined(NEWMSG)
|
2014-11-08 13:35:03 +01:00
|
|
|
MYSQL_SYSVAR(msg_lang),
|
2014-11-15 18:28:24 +01:00
|
|
|
#endif // XMSG || NEWMSG
|
|
|
|
#if defined(XMSG)
|
2014-11-08 13:35:03 +01:00
|
|
|
MYSQL_SYSVAR(errmsg_dir_path),
|
|
|
|
#endif // XMSG
|
2015-02-22 17:53:02 +01:00
|
|
|
MYSQL_SYSVAR(json_grp_size),
|
2014-03-18 19:25:50 +01:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2013-02-07 10:34:27 +01:00
|
|
|
maria_declare_plugin(connect)
|
|
|
|
{
|
|
|
|
MYSQL_STORAGE_ENGINE_PLUGIN,
|
|
|
|
&connect_storage_engine,
|
|
|
|
"CONNECT",
|
|
|
|
"Olivier Bertrand",
|
2013-12-16 01:32:47 +01:00
|
|
|
"Management of External Data (SQL/MED), including many file formats",
|
2013-02-07 10:34:27 +01:00
|
|
|
PLUGIN_LICENSE_GPL,
|
|
|
|
connect_init_func, /* Plugin Init */
|
|
|
|
connect_done_func, /* Plugin Deinit */
|
2015-07-16 11:05:20 +02:00
|
|
|
0x0104, /* version number (1.04) */
|
2013-02-07 10:34:27 +01:00
|
|
|
NULL, /* status variables */
|
2014-03-18 19:25:50 +01:00
|
|
|
connect_system_variables, /* system variables */
|
2016-03-16 18:53:56 +01:00
|
|
|
"1.04.0006", /* string version */
|
2016-03-11 08:59:51 +01:00
|
|
|
MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */
|
2013-02-07 10:34:27 +01:00
|
|
|
}
|
|
|
|
maria_declare_plugin_end;
|