mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 13:32:33 +01:00
Merge mysql.com:/users/lthalmann/bkroot/mysql-5.1
into mysql.com:/users/lthalmann/bk/MERGE/mysql-5.1-merge
This commit is contained in:
commit
f3d8d60259
42 changed files with 593 additions and 179 deletions
|
@ -38,3 +38,25 @@ AC_DEFUN([AC_SYS_OS_COMPILER_FLAG],
|
||||||
fi
|
fi
|
||||||
])
|
])
|
||||||
|
|
||||||
|
AC_DEFUN([AC_CHECK_NOEXECSTACK],
|
||||||
|
[
|
||||||
|
AC_CACHE_CHECK(whether --noexecstack is desirable for .S files,
|
||||||
|
mysql_cv_as_noexecstack, [dnl
|
||||||
|
cat > conftest.c <<EOF
|
||||||
|
void foo (void) { }
|
||||||
|
EOF
|
||||||
|
if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS
|
||||||
|
-S -o conftest.s conftest.c 1>&AS_MESSAGE_LOG_FD]) \
|
||||||
|
&& grep .note.GNU-stack conftest.s >/dev/null \
|
||||||
|
&& AC_TRY_COMMAND([${CC-cc} $CCASFLAGS $CPPFLAGS -Wa,--noexecstack
|
||||||
|
-c -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD])
|
||||||
|
then
|
||||||
|
mysql_cv_as_noexecstack=yes
|
||||||
|
else
|
||||||
|
mysql_cv_as_noexecstack=no
|
||||||
|
fi
|
||||||
|
rm -f conftest*])
|
||||||
|
if test $mysql_cv_as_noexecstack = yes; then
|
||||||
|
CCASFLAGS="$CCASFLAGS -Wa,--noexecstack"
|
||||||
|
fi
|
||||||
|
])
|
||||||
|
|
|
@ -482,6 +482,10 @@ AM_PROG_CC_STDC
|
||||||
|
|
||||||
# We need an assembler, too
|
# We need an assembler, too
|
||||||
AM_PROG_AS
|
AM_PROG_AS
|
||||||
|
CCASFLAGS="$CCASFLAGS $ASFLAGS"
|
||||||
|
|
||||||
|
# Check if we need noexec stack for assembler
|
||||||
|
AC_CHECK_NOEXECSTACK
|
||||||
|
|
||||||
if test "$am_cv_prog_cc_stdc" = "no"
|
if test "$am_cv_prog_cc_stdc" = "no"
|
||||||
then
|
then
|
||||||
|
|
|
@ -690,3 +690,8 @@ CREATE TABLE t1 (a int PRIMARY KEY);
|
||||||
INSERT INTO t1 values (1), (2);
|
INSERT INTO t1 values (1), (2);
|
||||||
INSERT INTO t1 SELECT a + 2 FROM t1 LIMIT 1;
|
INSERT INTO t1 SELECT a + 2 FROM t1 LIMIT 1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1 (x int, y int);
|
||||||
|
CREATE TABLE t2 (z int, y int);
|
||||||
|
CREATE TABLE t3 (a int, b int);
|
||||||
|
INSERT INTO t3 (SELECT x, y FROM t1 JOIN t2 USING (y) WHERE z = 1);
|
||||||
|
DROP TABLE IF EXISTS t1,t2,t3;
|
||||||
|
|
|
@ -330,6 +330,16 @@ alter table t1 add key (c1,c1,c2);
|
||||||
ERROR 42S21: Duplicate column name 'c1'
|
ERROR 42S21: Duplicate column name 'c1'
|
||||||
drop table t1;
|
drop table t1;
|
||||||
create table t1 (
|
create table t1 (
|
||||||
|
i1 INT NOT NULL,
|
||||||
|
i2 INT NOT NULL,
|
||||||
|
UNIQUE i1idx (i1),
|
||||||
|
UNIQUE i2idx (i2));
|
||||||
|
desc t1;
|
||||||
|
Field Type Null Key Default Extra
|
||||||
|
i1 int(11) NO UNI
|
||||||
|
i2 int(11) NO UNI
|
||||||
|
drop table t1;
|
||||||
|
create table t1 (
|
||||||
c1 int,
|
c1 int,
|
||||||
c2 varchar(20) not null,
|
c2 varchar(20) not null,
|
||||||
primary key (c1),
|
primary key (c1),
|
||||||
|
|
|
@ -188,6 +188,19 @@ ENGINE NDB;
|
||||||
CREATE INDEX b_i on t1(b);
|
CREATE INDEX b_i on t1(b);
|
||||||
CREATE INDEX bc_i on t1(b, c);
|
CREATE INDEX bc_i on t1(b, c);
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLESPACE ts2
|
||||||
|
ADD DATAFILE 'datafile3.dat'
|
||||||
|
USE LOGFILE GROUP lg1
|
||||||
|
INITIAL_SIZE 1M
|
||||||
|
ENGINE NDB;
|
||||||
|
ALTER TABLESPACE ts1
|
||||||
|
DROP DATAFILE 'datafile3.dat'
|
||||||
|
ENGINE NDB;
|
||||||
|
ERROR HY000: Failed to alter: NO SUCH FILE
|
||||||
|
ALTER TABLESPACE ts2
|
||||||
|
DROP DATAFILE 'datafile2.dat'
|
||||||
|
ENGINE NDB;
|
||||||
|
ERROR HY000: Failed to alter: NO SUCH FILE
|
||||||
ALTER TABLESPACE ts1
|
ALTER TABLESPACE ts1
|
||||||
DROP DATAFILE 'datafile2.dat'
|
DROP DATAFILE 'datafile2.dat'
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
|
@ -196,6 +209,11 @@ DROP DATAFILE 'datafile.dat'
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
DROP TABLESPACE ts1
|
DROP TABLESPACE ts1
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
|
ALTER TABLESPACE ts2
|
||||||
|
DROP DATAFILE 'datafile3.dat'
|
||||||
|
ENGINE NDB;
|
||||||
|
DROP TABLESPACE ts2
|
||||||
|
ENGINE NDB;
|
||||||
DROP LOGFILE GROUP lg1
|
DROP LOGFILE GROUP lg1
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
**** End = And No = ****
|
**** End = And No = ****
|
||||||
|
|
|
@ -649,3 +649,15 @@ DROP VIEW mysqltest_db1.view1;
|
||||||
DROP TABLE mysqltest_db1.t1;
|
DROP TABLE mysqltest_db1.t1;
|
||||||
DROP SCHEMA mysqltest_db1;
|
DROP SCHEMA mysqltest_db1;
|
||||||
DROP USER mysqltest_db1@localhost;
|
DROP USER mysqltest_db1@localhost;
|
||||||
|
CREATE DATABASE test1;
|
||||||
|
CREATE DATABASE test2;
|
||||||
|
CREATE TABLE test1.t0 (a VARCHAR(20));
|
||||||
|
CREATE TABLE test2.t1 (a VARCHAR(20));
|
||||||
|
CREATE VIEW test2.t3 AS SELECT * FROM test1.t0;
|
||||||
|
CREATE OR REPLACE VIEW test.v1 AS
|
||||||
|
SELECT ta.a AS col1, tb.a AS col2 FROM test2.t3 ta, test2.t1 tb;
|
||||||
|
DROP VIEW test.v1;
|
||||||
|
DROP VIEW test2.t3;
|
||||||
|
DROP TABLE test2.t1, test1.t0;
|
||||||
|
DROP DATABASE test2;
|
||||||
|
DROP DATABASE test1;
|
||||||
|
|
|
@ -238,3 +238,12 @@ INSERT INTO t1 SELECT a + 2 FROM t1 LIMIT 1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
# End of 4.1 tests
|
# End of 4.1 tests
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug #18080: INSERT ... SELECT ... JOIN results in ambiguous field list error
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (x int, y int);
|
||||||
|
CREATE TABLE t2 (z int, y int);
|
||||||
|
CREATE TABLE t3 (a int, b int);
|
||||||
|
INSERT INTO t3 (SELECT x, y FROM t1 JOIN t2 USING (y) WHERE z = 1);
|
||||||
|
DROP TABLE IF EXISTS t1,t2,t3;
|
||||||
|
|
|
@ -325,6 +325,17 @@ alter table t1 add key (c1,c2,c1);
|
||||||
alter table t1 add key (c1,c1,c2);
|
alter table t1 add key (c1,c1,c2);
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#11228: DESC shows arbitrary column as "PRI"
|
||||||
|
#
|
||||||
|
create table t1 (
|
||||||
|
i1 INT NOT NULL,
|
||||||
|
i2 INT NOT NULL,
|
||||||
|
UNIQUE i1idx (i1),
|
||||||
|
UNIQUE i2idx (i2));
|
||||||
|
desc t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
#
|
#
|
||||||
# Bug#12565 - ERROR 1034 when running simple UPDATE or DELETE
|
# Bug#12565 - ERROR 1034 when running simple UPDATE or DELETE
|
||||||
# on large MyISAM table
|
# on large MyISAM table
|
||||||
|
|
|
@ -280,6 +280,25 @@ CREATE INDEX bc_i on t1(b, c);
|
||||||
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
# bug#20053
|
||||||
|
|
||||||
|
CREATE TABLESPACE ts2
|
||||||
|
ADD DATAFILE 'datafile3.dat'
|
||||||
|
USE LOGFILE GROUP lg1
|
||||||
|
INITIAL_SIZE 1M
|
||||||
|
ENGINE NDB;
|
||||||
|
|
||||||
|
--error ER_ALTER_FILEGROUP_FAILED
|
||||||
|
ALTER TABLESPACE ts1
|
||||||
|
DROP DATAFILE 'datafile3.dat'
|
||||||
|
ENGINE NDB;
|
||||||
|
|
||||||
|
--error ER_ALTER_FILEGROUP_FAILED
|
||||||
|
ALTER TABLESPACE ts2
|
||||||
|
DROP DATAFILE 'datafile2.dat'
|
||||||
|
ENGINE NDB;
|
||||||
|
# bug#20053
|
||||||
|
|
||||||
ALTER TABLESPACE ts1
|
ALTER TABLESPACE ts1
|
||||||
DROP DATAFILE 'datafile2.dat'
|
DROP DATAFILE 'datafile2.dat'
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
|
@ -291,6 +310,13 @@ ENGINE NDB;
|
||||||
DROP TABLESPACE ts1
|
DROP TABLESPACE ts1
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
|
|
||||||
|
ALTER TABLESPACE ts2
|
||||||
|
DROP DATAFILE 'datafile3.dat'
|
||||||
|
ENGINE NDB;
|
||||||
|
|
||||||
|
DROP TABLESPACE ts2
|
||||||
|
ENGINE NDB;
|
||||||
|
|
||||||
DROP LOGFILE GROUP lg1
|
DROP LOGFILE GROUP lg1
|
||||||
ENGINE NDB;
|
ENGINE NDB;
|
||||||
|
|
||||||
|
|
|
@ -852,3 +852,23 @@ DROP VIEW mysqltest_db1.view1;
|
||||||
DROP TABLE mysqltest_db1.t1;
|
DROP TABLE mysqltest_db1.t1;
|
||||||
DROP SCHEMA mysqltest_db1;
|
DROP SCHEMA mysqltest_db1;
|
||||||
DROP USER mysqltest_db1@localhost;
|
DROP USER mysqltest_db1@localhost;
|
||||||
|
#
|
||||||
|
# BUG#20482: failure on Create join view with sources views/tables
|
||||||
|
# in different schemas
|
||||||
|
#
|
||||||
|
--disable_warnings
|
||||||
|
CREATE DATABASE test1;
|
||||||
|
CREATE DATABASE test2;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
CREATE TABLE test1.t0 (a VARCHAR(20));
|
||||||
|
CREATE TABLE test2.t1 (a VARCHAR(20));
|
||||||
|
CREATE VIEW test2.t3 AS SELECT * FROM test1.t0;
|
||||||
|
CREATE OR REPLACE VIEW test.v1 AS
|
||||||
|
SELECT ta.a AS col1, tb.a AS col2 FROM test2.t3 ta, test2.t1 tb;
|
||||||
|
|
||||||
|
DROP VIEW test.v1;
|
||||||
|
DROP VIEW test2.t3;
|
||||||
|
DROP TABLE test2.t1, test1.t0;
|
||||||
|
DROP DATABASE test2;
|
||||||
|
DROP DATABASE test1;
|
||||||
|
|
|
@ -186,8 +186,8 @@ static int update_status_variables(Ndb_cluster_connection *c)
|
||||||
|
|
||||||
SHOW_VAR ndb_status_variables[]= {
|
SHOW_VAR ndb_status_variables[]= {
|
||||||
{"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG},
|
{"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG},
|
||||||
{"connected_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
|
{"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
|
||||||
{"connected_port", (char*) &ndb_connected_port, SHOW_LONG},
|
{"config_from_port", (char*) &ndb_connected_port, SHOW_LONG},
|
||||||
// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG},
|
// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG},
|
||||||
{"number_of_storage_nodes",(char*) &ndb_number_of_storage_nodes, SHOW_LONG},
|
{"number_of_storage_nodes",(char*) &ndb_number_of_storage_nodes, SHOW_LONG},
|
||||||
{NullS, NullS, SHOW_LONG}
|
{NullS, NullS, SHOW_LONG}
|
||||||
|
@ -414,6 +414,37 @@ void ha_ndbcluster::set_rec_per_key()
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ha_rows ha_ndbcluster::records()
|
||||||
|
{
|
||||||
|
ha_rows retval;
|
||||||
|
DBUG_ENTER("ha_ndbcluster::records");
|
||||||
|
struct Ndb_local_table_statistics *info= m_table_info;
|
||||||
|
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
|
||||||
|
((const NDBTAB *)m_table)->getTableId(),
|
||||||
|
info->no_uncommitted_rows_count));
|
||||||
|
|
||||||
|
Ndb *ndb= get_ndb();
|
||||||
|
ndb->setDatabaseName(m_dbname);
|
||||||
|
struct Ndb_statistics stat;
|
||||||
|
if (ndb_get_table_statistics(ndb, m_table, &stat) == 0)
|
||||||
|
{
|
||||||
|
retval= stat.row_count;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* Be consistent with BUG#19914 until we fix it properly
|
||||||
|
*/
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
THD *thd= current_thd;
|
||||||
|
if (get_thd_ndb(thd)->error)
|
||||||
|
info->no_uncommitted_rows_count= 0;
|
||||||
|
|
||||||
|
DBUG_RETURN(retval + info->no_uncommitted_rows_count);
|
||||||
|
}
|
||||||
|
|
||||||
void ha_ndbcluster::records_update()
|
void ha_ndbcluster::records_update()
|
||||||
{
|
{
|
||||||
if (m_ha_not_exact_count)
|
if (m_ha_not_exact_count)
|
||||||
|
@ -5455,7 +5486,8 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||||
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \
|
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \
|
||||||
HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \
|
HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \
|
||||||
HA_PARTIAL_COLUMN_READ | \
|
HA_PARTIAL_COLUMN_READ | \
|
||||||
HA_HAS_OWN_BINLOGGING
|
HA_HAS_OWN_BINLOGGING | \
|
||||||
|
HA_HAS_RECORDS
|
||||||
|
|
||||||
ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
|
ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
|
||||||
handler(&ndbcluster_hton, table_arg),
|
handler(&ndbcluster_hton, table_arg),
|
||||||
|
@ -10006,7 +10038,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
|
||||||
}
|
}
|
||||||
|
|
||||||
NdbError err;
|
NdbError err;
|
||||||
NDBDICT *dict = ndb->getDictionary();
|
NDBDICT *dict= ndb->getDictionary();
|
||||||
int error;
|
int error;
|
||||||
const char * errmsg;
|
const char * errmsg;
|
||||||
LINT_INIT(errmsg);
|
LINT_INIT(errmsg);
|
||||||
|
@ -10070,9 +10102,12 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
|
||||||
}
|
}
|
||||||
else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
|
else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
|
||||||
{
|
{
|
||||||
NdbDictionary::Datafile df = dict->getDatafile(0,
|
NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name);
|
||||||
info->data_file_name);
|
NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name);
|
||||||
if (strcmp(df.getPath(), info->data_file_name) == 0)
|
NdbDictionary::ObjectId objid;
|
||||||
|
df.getTablespaceId(&objid);
|
||||||
|
if (ts.getObjectId() == objid.getObjectId() &&
|
||||||
|
strcmp(df.getPath(), info->data_file_name) == 0)
|
||||||
{
|
{
|
||||||
errmsg= " DROP DATAFILE";
|
errmsg= " DROP DATAFILE";
|
||||||
if (dict->dropDatafile(df))
|
if (dict->dropDatafile(df))
|
||||||
|
@ -10401,10 +10436,12 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables,
|
||||||
table->field[c++]->set_null(); // TABLE_NAME
|
table->field[c++]->set_null(); // TABLE_NAME
|
||||||
|
|
||||||
// LOGFILE_GROUP_NAME
|
// LOGFILE_GROUP_NAME
|
||||||
|
NdbDictionary::ObjectId objid;
|
||||||
|
uf.getLogfileGroupId(&objid);
|
||||||
table->field[c++]->store(uf.getLogfileGroup(),
|
table->field[c++]->store(uf.getLogfileGroup(),
|
||||||
strlen(uf.getLogfileGroup()),
|
strlen(uf.getLogfileGroup()),
|
||||||
system_charset_info);
|
system_charset_info);
|
||||||
table->field[c++]->store(uf.getLogfileGroupId()); // LOGFILE_GROUP_NUMBER
|
table->field[c++]->store(objid.getObjectId()); // LOGFILE_GROUP_NUMBER
|
||||||
table->field[c++]->store(ndbcluster_hton_name,
|
table->field[c++]->store(ndbcluster_hton_name,
|
||||||
ndbcluster_hton_name_length,
|
ndbcluster_hton_name_length,
|
||||||
system_charset_info); // ENGINE
|
system_charset_info); // ENGINE
|
||||||
|
|
|
@ -622,6 +622,7 @@ class ha_ndbcluster: public handler
|
||||||
int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
|
int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
|
||||||
|
|
||||||
bool get_error_message(int error, String *buf);
|
bool get_error_message(int error, String *buf);
|
||||||
|
ha_rows records();
|
||||||
void info(uint);
|
void info(uint);
|
||||||
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
|
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
|
||||||
int extra(enum ha_extra_function operation);
|
int extra(enum ha_extra_function operation);
|
||||||
|
|
|
@ -532,6 +532,8 @@ void cleanup_items(Item *item);
|
||||||
class THD;
|
class THD;
|
||||||
void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0);
|
void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0);
|
||||||
bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables);
|
bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables);
|
||||||
|
bool check_single_table_access(THD *thd, ulong privilege,
|
||||||
|
TABLE_LIST *tables);
|
||||||
bool check_routine_access(THD *thd,ulong want_access,char *db,char *name,
|
bool check_routine_access(THD *thd,ulong want_access,char *db,char *name,
|
||||||
bool is_proc, bool no_errors);
|
bool is_proc, bool no_errors);
|
||||||
bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table);
|
bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table);
|
||||||
|
|
|
@ -5471,7 +5471,7 @@ bool setup_tables_and_check_access(THD *thd,
|
||||||
for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf)
|
for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf)
|
||||||
{
|
{
|
||||||
if (leaves_tmp->belong_to_view &&
|
if (leaves_tmp->belong_to_view &&
|
||||||
check_one_table_access(thd, want_access, leaves_tmp))
|
check_single_table_access(thd, want_access, leaves_tmp))
|
||||||
{
|
{
|
||||||
tables->hide_view_error(thd);
|
tables->hide_view_error(thd);
|
||||||
return TRUE;
|
return TRUE;
|
||||||
|
|
|
@ -1941,15 +1941,10 @@ bool select_dumpvar::send_data(List<Item> &items)
|
||||||
Item_func_set_user_var *xx;
|
Item_func_set_user_var *xx;
|
||||||
Item_splocal *yy;
|
Item_splocal *yy;
|
||||||
my_var *zz;
|
my_var *zz;
|
||||||
DBUG_ENTER("send_data");
|
DBUG_ENTER("select_dumpvar::send_data");
|
||||||
if (unit->offset_limit_cnt)
|
|
||||||
{ // using limit offset,count
|
|
||||||
unit->offset_limit_cnt--;
|
|
||||||
DBUG_RETURN(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unit->offset_limit_cnt)
|
if (unit->offset_limit_cnt)
|
||||||
{ // Using limit offset,count
|
{ // using limit offset,count
|
||||||
unit->offset_limit_cnt--;
|
unit->offset_limit_cnt--;
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3382,15 +3382,6 @@ end_with_restore_list:
|
||||||
&lex->value_list,
|
&lex->value_list,
|
||||||
lex->duplicates, lex->ignore)))
|
lex->duplicates, lex->ignore)))
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
Skip first table, which is the table we are inserting in.
|
|
||||||
Below we set context.table_list again because the call above to
|
|
||||||
mysql_insert_select_prepare() calls resolve_in_table_list_only(),
|
|
||||||
which in turn resets context.table_list and
|
|
||||||
context.first_name_resolution_table.
|
|
||||||
*/
|
|
||||||
select_lex->context.table_list=
|
|
||||||
select_lex->context.first_name_resolution_table= second_table;
|
|
||||||
res= handle_select(thd, lex, result, OPTION_SETUP_TABLES_DONE);
|
res= handle_select(thd, lex, result, OPTION_SETUP_TABLES_DONE);
|
||||||
/*
|
/*
|
||||||
Invalidate the table in the query cache if something changed
|
Invalidate the table in the query cache if something changed
|
||||||
|
@ -5249,11 +5240,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Check grants for commands which work only with one table and all other
|
Check grants for commands which work only with one table.
|
||||||
tables belonging to subselects or implicitly opened tables.
|
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
check_one_table_access()
|
check_single_table_access()
|
||||||
thd Thread handler
|
thd Thread handler
|
||||||
privilege requested privilege
|
privilege requested privilege
|
||||||
all_tables global table list of query
|
all_tables global table list of query
|
||||||
|
@ -5263,7 +5253,8 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
|
||||||
1 - access denied, error is sent to client
|
1 - access denied, error is sent to client
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
|
bool check_single_table_access(THD *thd, ulong privilege,
|
||||||
|
TABLE_LIST *all_tables)
|
||||||
{
|
{
|
||||||
Security_context * backup_ctx= thd->security_ctx;
|
Security_context * backup_ctx= thd->security_ctx;
|
||||||
|
|
||||||
|
@ -5288,19 +5279,41 @@ bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
|
||||||
goto deny;
|
goto deny;
|
||||||
|
|
||||||
thd->security_ctx= backup_ctx;
|
thd->security_ctx= backup_ctx;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
deny:
|
||||||
|
thd->security_ctx= backup_ctx;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Check grants for commands which work only with one table and all other
|
||||||
|
tables belonging to subselects or implicitly opened tables.
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
check_one_table_access()
|
||||||
|
thd Thread handler
|
||||||
|
privilege requested privilege
|
||||||
|
all_tables global table list of query
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
0 - OK
|
||||||
|
1 - access denied, error is sent to client
|
||||||
|
*/
|
||||||
|
|
||||||
|
bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
|
||||||
|
{
|
||||||
|
if (check_single_table_access (thd,privilege,all_tables))
|
||||||
|
return 1;
|
||||||
|
|
||||||
/* Check rights on tables of subselects and implictly opened tables */
|
/* Check rights on tables of subselects and implictly opened tables */
|
||||||
TABLE_LIST *subselects_tables;
|
TABLE_LIST *subselects_tables;
|
||||||
if ((subselects_tables= all_tables->next_global))
|
if ((subselects_tables= all_tables->next_global))
|
||||||
{
|
{
|
||||||
if ((check_table_access(thd, SELECT_ACL, subselects_tables, 0)))
|
if ((check_table_access(thd, SELECT_ACL, subselects_tables, 0)))
|
||||||
goto deny;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
deny:
|
|
||||||
thd->security_ctx= backup_ctx;
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
21
sql/table.cc
21
sql/table.cc
|
@ -1025,27 +1025,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
|
||||||
if (share->key_info[key].flags & HA_FULLTEXT)
|
if (share->key_info[key].flags & HA_FULLTEXT)
|
||||||
share->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
|
share->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
|
||||||
|
|
||||||
if (primary_key >= MAX_KEY && (keyinfo->flags & HA_NOSAME))
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
If the UNIQUE key doesn't have NULL columns and is not a part key
|
|
||||||
declare this as a primary key.
|
|
||||||
*/
|
|
||||||
primary_key=key;
|
|
||||||
for (i=0 ; i < keyinfo->key_parts ;i++)
|
|
||||||
{
|
|
||||||
uint fieldnr= key_part[i].fieldnr;
|
|
||||||
if (!fieldnr ||
|
|
||||||
share->field[fieldnr-1]->null_ptr ||
|
|
||||||
share->field[fieldnr-1]->key_length() !=
|
|
||||||
key_part[i].length)
|
|
||||||
{
|
|
||||||
primary_key=MAX_KEY; // Can't be used
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i=0 ; i < keyinfo->key_parts ; key_part++,i++)
|
for (i=0 ; i < keyinfo->key_parts ; key_part++,i++)
|
||||||
{
|
{
|
||||||
Field *field;
|
Field *field;
|
||||||
|
|
|
@ -159,7 +159,8 @@ struct CreateFileRef {
|
||||||
InvalidFilegroupVersion = 754,
|
InvalidFilegroupVersion = 754,
|
||||||
FilenameAlreadyExists = 760,
|
FilenameAlreadyExists = 760,
|
||||||
OutOfFileRecords = 751,
|
OutOfFileRecords = 751,
|
||||||
InvalidFileType = 750
|
InvalidFileType = 750,
|
||||||
|
NotSupportedWhenDiskless = 775
|
||||||
};
|
};
|
||||||
|
|
||||||
Uint32 senderData;
|
Uint32 senderData;
|
||||||
|
|
|
@ -45,7 +45,8 @@ public:
|
||||||
CopyFragRefError = 5,
|
CopyFragRefError = 5,
|
||||||
TestStopOnError = 6,
|
TestStopOnError = 6,
|
||||||
CopySubscriptionRef = 7,
|
CopySubscriptionRef = 7,
|
||||||
CopySubscriberRef = 8
|
CopySubscriberRef = 8,
|
||||||
|
StartFragRefError = 9
|
||||||
};
|
};
|
||||||
|
|
||||||
Uint32 errorRef;
|
Uint32 errorRef;
|
||||||
|
|
|
@ -184,7 +184,7 @@ public:
|
||||||
virtual int getObjectId() const;
|
virtual int getObjectId() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class Dictionary;
|
friend class NdbDictObjectImpl;
|
||||||
class NdbDictObjectImpl & m_impl;
|
class NdbDictObjectImpl & m_impl;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1469,11 +1469,11 @@ public:
|
||||||
void setSize(Uint64);
|
void setSize(Uint64);
|
||||||
Uint64 getSize() const;
|
Uint64 getSize() const;
|
||||||
Uint64 getFree() const;
|
Uint64 getFree() const;
|
||||||
|
|
||||||
void setTablespace(const char * name);
|
void setTablespace(const char * name);
|
||||||
void setTablespace(const class Tablespace &);
|
void setTablespace(const class Tablespace &);
|
||||||
const char * getTablespace() const;
|
const char * getTablespace() const;
|
||||||
Uint32 getTablespaceId() const;
|
void getTablespaceId(ObjectId * dst) const;
|
||||||
|
|
||||||
void setNode(Uint32 nodeId);
|
void setNode(Uint32 nodeId);
|
||||||
Uint32 getNode() const;
|
Uint32 getNode() const;
|
||||||
|
@ -1516,7 +1516,7 @@ public:
|
||||||
void setLogfileGroup(const char * name);
|
void setLogfileGroup(const char * name);
|
||||||
void setLogfileGroup(const class LogfileGroup &);
|
void setLogfileGroup(const class LogfileGroup &);
|
||||||
const char * getLogfileGroup() const;
|
const char * getLogfileGroup() const;
|
||||||
Uint32 getLogfileGroupId() const;
|
void getLogfileGroupId(ObjectId * dst) const;
|
||||||
|
|
||||||
void setNode(Uint32 nodeId);
|
void setNode(Uint32 nodeId);
|
||||||
Uint32 getNode() const;
|
Uint32 getNode() const;
|
||||||
|
|
|
@ -42,7 +42,9 @@ public:
|
||||||
* @param parallel No of fragments to scan in parallel (0=max)
|
* @param parallel No of fragments to scan in parallel (0=max)
|
||||||
*/
|
*/
|
||||||
virtual int readTuples(LockMode lock_mode = LM_Read,
|
virtual int readTuples(LockMode lock_mode = LM_Read,
|
||||||
Uint32 scan_flags = 0, Uint32 parallel = 0);
|
Uint32 scan_flags = 0,
|
||||||
|
Uint32 parallel = 0,
|
||||||
|
Uint32 batch = 0);
|
||||||
|
|
||||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||||
/**
|
/**
|
||||||
|
@ -70,7 +72,7 @@ public:
|
||||||
(SF_ReadRangeNo & -(Int32)read_range_no) |
|
(SF_ReadRangeNo & -(Int32)read_range_no) |
|
||||||
(SF_KeyInfo & -(Int32)keyinfo);
|
(SF_KeyInfo & -(Int32)keyinfo);
|
||||||
|
|
||||||
return readTuples(lock_mode, scan_flags, parallel);
|
return readTuples(lock_mode, scan_flags, parallel, batch);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,9 @@ public:
|
||||||
*/
|
*/
|
||||||
virtual
|
virtual
|
||||||
int readTuples(LockMode lock_mode = LM_Read,
|
int readTuples(LockMode lock_mode = LM_Read,
|
||||||
Uint32 scan_flags = 0, Uint32 parallel = 0);
|
Uint32 scan_flags = 0,
|
||||||
|
Uint32 parallel = 0,
|
||||||
|
Uint32 batch = 0);
|
||||||
|
|
||||||
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
|
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -9537,7 +9537,14 @@ Dbdict::createEventComplete_RT_USER_GET(Signal* signal,
|
||||||
|
|
||||||
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
||||||
RequestTracker & p = evntRecPtr.p->m_reqTracker;
|
RequestTracker & p = evntRecPtr.p->m_reqTracker;
|
||||||
p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, evntRecPtr.i);
|
if (!p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF,
|
||||||
|
evntRecPtr.i))
|
||||||
|
{
|
||||||
|
jam();
|
||||||
|
evntRecPtr.p->m_errorCode = 701;
|
||||||
|
createEvent_sendReply(signal, evntRecPtr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB);
|
sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB);
|
||||||
}
|
}
|
||||||
|
@ -9825,8 +9832,12 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
OpSubEventPtr subbPtr;
|
OpSubEventPtr subbPtr;
|
||||||
|
Uint32 errCode = 0;
|
||||||
if (!c_opSubEvent.seize(subbPtr)) {
|
if (!c_opSubEvent.seize(subbPtr)) {
|
||||||
|
errCode = SubStartRef::Busy;
|
||||||
|
busy:
|
||||||
SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
|
SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
|
||||||
|
|
||||||
{ // fix
|
{ // fix
|
||||||
Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef;
|
Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef;
|
||||||
ref->subscriberRef = subcriberRef;
|
ref->subscriberRef = subcriberRef;
|
||||||
|
@ -9836,7 +9847,7 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
|
||||||
// ret->setErrorLine(__LINE__);
|
// ret->setErrorLine(__LINE__);
|
||||||
// ret->setErrorNode(reference());
|
// ret->setErrorNode(reference());
|
||||||
ref->senderRef = reference();
|
ref->senderRef = reference();
|
||||||
ref->errorCode = SubStartRef::Busy;
|
ref->errorCode = errCode;
|
||||||
|
|
||||||
sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
|
sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
|
||||||
SubStartRef::SignalLength2, JBB);
|
SubStartRef::SignalLength2, JBB);
|
||||||
|
@ -9859,7 +9870,12 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
|
||||||
subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
|
subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
|
||||||
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
||||||
RequestTracker & p = subbPtr.p->m_reqTracker;
|
RequestTracker & p = subbPtr.p->m_reqTracker;
|
||||||
p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i);
|
if (!p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i))
|
||||||
|
{
|
||||||
|
c_opSubEvent.release(subbPtr);
|
||||||
|
errCode = SubStartRef::Busy;
|
||||||
|
goto busy;
|
||||||
|
}
|
||||||
|
|
||||||
SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
|
SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
|
||||||
|
|
||||||
|
@ -10049,14 +10065,17 @@ void Dbdict::execSUB_STOP_REQ(Signal* signal)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
OpSubEventPtr subbPtr;
|
OpSubEventPtr subbPtr;
|
||||||
|
Uint32 errCode = 0;
|
||||||
if (!c_opSubEvent.seize(subbPtr)) {
|
if (!c_opSubEvent.seize(subbPtr)) {
|
||||||
|
errCode = SubStopRef::Busy;
|
||||||
|
busy:
|
||||||
SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
|
SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
|
||||||
jam();
|
jam();
|
||||||
// ret->setErrorCode(SubStartRef::SeizeError);
|
// ret->setErrorCode(SubStartRef::SeizeError);
|
||||||
// ret->setErrorLine(__LINE__);
|
// ret->setErrorLine(__LINE__);
|
||||||
// ret->setErrorNode(reference());
|
// ret->setErrorNode(reference());
|
||||||
ref->senderRef = reference();
|
ref->senderRef = reference();
|
||||||
ref->errorCode = SubStopRef::Busy;
|
ref->errorCode = errCode;
|
||||||
|
|
||||||
sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
|
sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
|
||||||
SubStopRef::SignalLength, JBB);
|
SubStopRef::SignalLength, JBB);
|
||||||
|
@ -10081,10 +10100,16 @@ void Dbdict::execSUB_STOP_REQ(Signal* signal)
|
||||||
subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
|
subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
|
||||||
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
||||||
RequestTracker & p = subbPtr.p->m_reqTracker;
|
RequestTracker & p = subbPtr.p->m_reqTracker;
|
||||||
p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i);
|
if (!p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i))
|
||||||
|
{
|
||||||
|
jam();
|
||||||
|
c_opSubEvent.release(subbPtr);
|
||||||
|
errCode = SubStopRef::Busy;
|
||||||
|
goto busy;
|
||||||
|
}
|
||||||
|
|
||||||
SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
|
SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
|
||||||
|
|
||||||
req->senderRef = reference();
|
req->senderRef = reference();
|
||||||
req->senderData = subbPtr.i;
|
req->senderData = subbPtr.i;
|
||||||
|
|
||||||
|
@ -10374,9 +10399,14 @@ Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
|
||||||
|
|
||||||
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
|
||||||
RequestTracker & p = evntRecPtr.p->m_reqTracker;
|
RequestTracker & p = evntRecPtr.p->m_reqTracker;
|
||||||
p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
|
if (!p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
|
||||||
evntRecPtr.i);
|
evntRecPtr.i))
|
||||||
|
{
|
||||||
|
evntRecPtr.p->m_errorCode = 701;
|
||||||
|
dropEvent_sendReply(signal, evntRecPtr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
|
SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
|
||||||
|
|
||||||
req->senderRef = reference();
|
req->senderRef = reference();
|
||||||
|
@ -15483,6 +15513,17 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Uint32 dl;
|
||||||
|
const ndb_mgm_configuration_iterator * p =
|
||||||
|
m_ctx.m_config.getOwnConfigIterator();
|
||||||
|
if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl)
|
||||||
|
{
|
||||||
|
op->m_errorCode = CreateFileRef::NotSupportedWhenDiskless;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Loop through all filenames...
|
// Loop through all filenames...
|
||||||
if(!c_obj_pool.seize(obj_ptr)){
|
if(!c_obj_pool.seize(obj_ptr)){
|
||||||
op->m_errorCode = CreateTableRef::NoMoreTableRecords;
|
op->m_errorCode = CreateTableRef::NoMoreTableRecords;
|
||||||
|
|
|
@ -683,6 +683,7 @@ private:
|
||||||
void execGETGCIREQ(Signal *);
|
void execGETGCIREQ(Signal *);
|
||||||
void execDIH_RESTARTREQ(Signal *);
|
void execDIH_RESTARTREQ(Signal *);
|
||||||
void execSTART_RECCONF(Signal *);
|
void execSTART_RECCONF(Signal *);
|
||||||
|
void execSTART_FRAGREF(Signal *);
|
||||||
void execSTART_FRAGCONF(Signal *);
|
void execSTART_FRAGCONF(Signal *);
|
||||||
void execADD_FRAGCONF(Signal *);
|
void execADD_FRAGCONF(Signal *);
|
||||||
void execADD_FRAGREF(Signal *);
|
void execADD_FRAGREF(Signal *);
|
||||||
|
|
|
@ -257,6 +257,9 @@ Dbdih::Dbdih(Block_context& ctx):
|
||||||
addRecSignal(GSN_DICT_LOCK_CONF, &Dbdih::execDICT_LOCK_CONF);
|
addRecSignal(GSN_DICT_LOCK_CONF, &Dbdih::execDICT_LOCK_CONF);
|
||||||
addRecSignal(GSN_DICT_LOCK_REF, &Dbdih::execDICT_LOCK_REF);
|
addRecSignal(GSN_DICT_LOCK_REF, &Dbdih::execDICT_LOCK_REF);
|
||||||
|
|
||||||
|
addRecSignal(GSN_START_FRAGREF,
|
||||||
|
&Dbdih::execSTART_FRAGREF);
|
||||||
|
|
||||||
apiConnectRecord = 0;
|
apiConnectRecord = 0;
|
||||||
connectRecord = 0;
|
connectRecord = 0;
|
||||||
fileRecord = 0;
|
fileRecord = 0;
|
||||||
|
|
|
@ -1107,6 +1107,26 @@ void Dbdih::execSTART_FRAGCONF(Signal* signal)
|
||||||
return;
|
return;
|
||||||
}//Dbdih::execSTART_FRAGCONF()
|
}//Dbdih::execSTART_FRAGCONF()
|
||||||
|
|
||||||
|
void Dbdih::execSTART_FRAGREF(Signal* signal)
|
||||||
|
{
|
||||||
|
jamEntry();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kill starting node
|
||||||
|
*/
|
||||||
|
Uint32 errCode = signal->theData[1];
|
||||||
|
Uint32 nodeId = signal->theData[2];
|
||||||
|
|
||||||
|
SystemError * const sysErr = (SystemError*)&signal->theData[0];
|
||||||
|
sysErr->errorCode = SystemError::StartFragRefError;
|
||||||
|
sysErr->errorRef = reference();
|
||||||
|
sysErr->data1 = errCode;
|
||||||
|
sysErr->data2 = 0;
|
||||||
|
sendSignal(calcNdbCntrBlockRef(nodeId), GSN_SYSTEM_ERROR, signal,
|
||||||
|
SystemError::SignalLength, JBB);
|
||||||
|
return;
|
||||||
|
}//Dbdih::execSTART_FRAGCONF()
|
||||||
|
|
||||||
void Dbdih::execSTART_MEREF(Signal* signal)
|
void Dbdih::execSTART_MEREF(Signal* signal)
|
||||||
{
|
{
|
||||||
jamEntry();
|
jamEntry();
|
||||||
|
|
|
@ -8073,15 +8073,15 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
|
||||||
scanptr.p->m_curr_batch_size_rows = 0;
|
scanptr.p->m_curr_batch_size_rows = 0;
|
||||||
scanptr.p->m_curr_batch_size_bytes = 0;
|
scanptr.p->m_curr_batch_size_bytes = 0;
|
||||||
closeScanLab(signal);
|
closeScanLab(signal);
|
||||||
|
} else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
|
||||||
|
jam();
|
||||||
|
closeScanLab(signal);
|
||||||
|
return;
|
||||||
} else if (scanptr.p->check_scan_batch_completed() &&
|
} else if (scanptr.p->check_scan_batch_completed() &&
|
||||||
scanptr.p->scanLockHold != ZTRUE) {
|
scanptr.p->scanLockHold != ZTRUE) {
|
||||||
jam();
|
jam();
|
||||||
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
|
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
|
||||||
sendScanFragConf(signal, ZFALSE);
|
sendScanFragConf(signal, ZFALSE);
|
||||||
} else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
|
|
||||||
jam();
|
|
||||||
closeScanLab(signal);
|
|
||||||
return;
|
|
||||||
} else {
|
} else {
|
||||||
jam();
|
jam();
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -205,6 +205,13 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal)
|
||||||
killingNode, data1);
|
killingNode, data1);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case SystemError::StartFragRefError:
|
||||||
|
BaseString::snprintf(buf, sizeof(buf),
|
||||||
|
"Node %d killed this node because "
|
||||||
|
"it replied StartFragRef error code: %u.",
|
||||||
|
killingNode, data1);
|
||||||
|
break;
|
||||||
|
|
||||||
case SystemError::CopySubscriptionRef:
|
case SystemError::CopySubscriptionRef:
|
||||||
BaseString::snprintf(buf, sizeof(buf),
|
BaseString::snprintf(buf, sizeof(buf),
|
||||||
"Node %d killed this node because "
|
"Node %d killed this node because "
|
||||||
|
|
|
@ -2480,7 +2480,8 @@ Suma::execSUB_STOP_REQ(Signal* signal){
|
||||||
|
|
||||||
TablePtr tabPtr;
|
TablePtr tabPtr;
|
||||||
tabPtr.i = subPtr.p->m_table_ptrI;
|
tabPtr.i = subPtr.p->m_table_ptrI;
|
||||||
if (!(tabPtr.p = c_tables.getPtr(tabPtr.i)) ||
|
if (tabPtr.i == RNIL ||
|
||||||
|
!(tabPtr.p = c_tables.getPtr(tabPtr.i)) ||
|
||||||
tabPtr.p->m_tableId != subPtr.p->m_tableId)
|
tabPtr.p->m_tableId != subPtr.p->m_tableId)
|
||||||
{
|
{
|
||||||
jam();
|
jam();
|
||||||
|
|
|
@ -26,12 +26,12 @@ public:
|
||||||
void init() { m_confs.clear(); m_nRefs = 0; }
|
void init() { m_confs.clear(); m_nRefs = 0; }
|
||||||
|
|
||||||
template<typename SignalClass>
|
template<typename SignalClass>
|
||||||
void init(SafeCounterManager& mgr,
|
bool init(SafeCounterManager& mgr,
|
||||||
NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData)
|
NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData)
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
SafeCounter tmp(mgr, m_sc);
|
SafeCounter tmp(mgr, m_sc);
|
||||||
tmp.init<SignalClass>(rg, GSN, senderData);
|
return tmp.init<SignalClass>(rg, GSN, senderData);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ignoreRef(SafeCounterManager& mgr, Uint32 nodeId)
|
bool ignoreRef(SafeCounterManager& mgr, Uint32 nodeId)
|
||||||
|
|
|
@ -230,10 +230,13 @@ inline
|
||||||
bool
|
bool
|
||||||
SafeCounter::init(NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData){
|
SafeCounter::init(NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData){
|
||||||
|
|
||||||
bool b = init<Ref>(rg.m_block, GSN, senderData);
|
if (init<Ref>(rg.m_block, GSN, senderData))
|
||||||
m_nodes = rg.m_nodes;
|
{
|
||||||
m_count = m_nodes.count();
|
m_nodes = rg.m_nodes;
|
||||||
return b;
|
m_count = m_nodes.count();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Ref>
|
template<typename Ref>
|
||||||
|
@ -241,10 +244,13 @@ inline
|
||||||
bool
|
bool
|
||||||
SafeCounter::init(NodeReceiverGroup rg, Uint32 senderData){
|
SafeCounter::init(NodeReceiverGroup rg, Uint32 senderData){
|
||||||
|
|
||||||
bool b = init<Ref>(rg.m_block, Ref::GSN, senderData);
|
if (init<Ref>(rg.m_block, Ref::GSN, senderData))
|
||||||
m_nodes = rg.m_nodes;
|
{
|
||||||
m_count = m_nodes.count();
|
m_nodes = rg.m_nodes;
|
||||||
return b;
|
m_count = m_nodes.count();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
|
|
|
@ -30,6 +30,7 @@ extern my_bool opt_core;
|
||||||
#define MAX_LINE_LENGTH 255
|
#define MAX_LINE_LENGTH 255
|
||||||
#define KEY_INTERNAL 0
|
#define KEY_INTERNAL 0
|
||||||
#define MAX_INT_RNIL 0xfffffeff
|
#define MAX_INT_RNIL 0xfffffeff
|
||||||
|
#define MAX_PORT_NO 65535
|
||||||
|
|
||||||
#define _STR_VALUE(x) #x
|
#define _STR_VALUE(x) #x
|
||||||
#define STR_VALUE(x) _STR_VALUE(x)
|
#define STR_VALUE(x) _STR_VALUE(x)
|
||||||
|
@ -422,7 +423,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
UNDEFINED,
|
UNDEFINED,
|
||||||
"1",
|
"1",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_PORT_NO) },
|
||||||
|
|
||||||
{
|
{
|
||||||
CFG_DB_NO_REPLICAS,
|
CFG_DB_NO_REPLICAS,
|
||||||
|
@ -877,7 +878,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
false,
|
false,
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
"8",
|
"8",
|
||||||
"1",
|
"3",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_INT_RNIL) },
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -1510,7 +1511,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
NDB_PORT,
|
NDB_PORT,
|
||||||
"0",
|
"0",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_PORT_NO) },
|
||||||
|
|
||||||
{
|
{
|
||||||
KEY_INTERNAL,
|
KEY_INTERNAL,
|
||||||
|
@ -1522,7 +1523,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
UNDEFINED,
|
UNDEFINED,
|
||||||
"0",
|
"0",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_PORT_NO) },
|
||||||
|
|
||||||
{
|
{
|
||||||
CFG_NODE_ARBIT_RANK,
|
CFG_NODE_ARBIT_RANK,
|
||||||
|
@ -1664,7 +1665,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
MANDATORY,
|
MANDATORY,
|
||||||
"0",
|
"0",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_PORT_NO) },
|
||||||
|
|
||||||
{
|
{
|
||||||
CFG_TCP_SEND_BUFFER_SIZE,
|
CFG_TCP_SEND_BUFFER_SIZE,
|
||||||
|
@ -1770,7 +1771,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
MANDATORY,
|
MANDATORY,
|
||||||
"0",
|
"0",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_PORT_NO) },
|
||||||
|
|
||||||
{
|
{
|
||||||
CFG_SHM_SIGNUM,
|
CFG_SHM_SIGNUM,
|
||||||
|
@ -1992,7 +1993,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||||
ConfigInfo::CI_INT,
|
ConfigInfo::CI_INT,
|
||||||
MANDATORY,
|
MANDATORY,
|
||||||
"0",
|
"0",
|
||||||
STR_VALUE(MAX_INT_RNIL) },
|
STR_VALUE(MAX_PORT_NO) },
|
||||||
|
|
||||||
{
|
{
|
||||||
CFG_SCI_HOST1_ID_0,
|
CFG_SCI_HOST1_ID_0,
|
||||||
|
|
|
@ -1236,9 +1236,14 @@ NdbDictionary::Datafile::getTablespace() const {
|
||||||
return m_impl.m_filegroup_name.c_str();
|
return m_impl.m_filegroup_name.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
Uint32
|
void
|
||||||
NdbDictionary::Datafile::getTablespaceId() const {
|
NdbDictionary::Datafile::getTablespaceId(NdbDictionary::ObjectId* dst) const
|
||||||
return m_impl.m_filegroup_id;
|
{
|
||||||
|
if (dst)
|
||||||
|
{
|
||||||
|
NdbDictObjectImpl::getImpl(* dst).m_id = m_impl.m_filegroup_id;
|
||||||
|
NdbDictObjectImpl::getImpl(* dst).m_version = m_impl.m_filegroup_version;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
NdbDictionary::Object::Status
|
NdbDictionary::Object::Status
|
||||||
|
@ -1322,9 +1327,14 @@ NdbDictionary::Undofile::getLogfileGroup() const {
|
||||||
return m_impl.m_filegroup_name.c_str();
|
return m_impl.m_filegroup_name.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
Uint32
|
void
|
||||||
NdbDictionary::Undofile::getLogfileGroupId() const {
|
NdbDictionary::Undofile::getLogfileGroupId(NdbDictionary::ObjectId * dst)const
|
||||||
return m_impl.m_filegroup_id;
|
{
|
||||||
|
if (dst)
|
||||||
|
{
|
||||||
|
NdbDictObjectImpl::getImpl(* dst).m_id = m_impl.m_filegroup_id;
|
||||||
|
NdbDictObjectImpl::getImpl(* dst).m_version = m_impl.m_filegroup_version;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
NdbDictionary::Object::Status
|
NdbDictionary::Object::Status
|
||||||
|
@ -1841,7 +1851,8 @@ NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg,
|
||||||
ObjectId * obj)
|
ObjectId * obj)
|
||||||
{
|
{
|
||||||
return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg),
|
return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg),
|
||||||
obj ? &obj->m_impl : 0);
|
obj ?
|
||||||
|
& NdbDictObjectImpl::getImpl(* obj) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -1864,7 +1875,8 @@ NdbDictionary::Dictionary::createTablespace(const Tablespace & lg,
|
||||||
ObjectId * obj)
|
ObjectId * obj)
|
||||||
{
|
{
|
||||||
return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg),
|
return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg),
|
||||||
obj ? &obj->m_impl : 0);
|
obj ?
|
||||||
|
& NdbDictObjectImpl::getImpl(* obj) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -1899,7 +1911,7 @@ NdbDictionary::Dictionary::createDatafile(const Datafile & df,
|
||||||
{
|
{
|
||||||
return m_impl.createDatafile(NdbDatafileImpl::getImpl(df),
|
return m_impl.createDatafile(NdbDatafileImpl::getImpl(df),
|
||||||
force,
|
force,
|
||||||
obj ? &obj->m_impl : 0);
|
obj ? & NdbDictObjectImpl::getImpl(* obj) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -1925,7 +1937,7 @@ NdbDictionary::Dictionary::createUndofile(const Undofile & df,
|
||||||
{
|
{
|
||||||
return m_impl.createUndofile(NdbUndofileImpl::getImpl(df),
|
return m_impl.createUndofile(NdbUndofileImpl::getImpl(df),
|
||||||
force,
|
force,
|
||||||
obj ? &obj->m_impl : 0);
|
obj ? & NdbDictObjectImpl::getImpl(* obj) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -46,14 +46,22 @@ public:
|
||||||
NdbDictionary::Object::Status m_status;
|
NdbDictionary::Object::Status m_status;
|
||||||
|
|
||||||
bool change();
|
bool change();
|
||||||
|
|
||||||
|
static NdbDictObjectImpl & getImpl(NdbDictionary::ObjectId & t) {
|
||||||
|
return t.m_impl;
|
||||||
|
}
|
||||||
|
static const NdbDictObjectImpl & getImpl(const NdbDictionary::ObjectId & t){
|
||||||
|
return t.m_impl;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
friend class NdbDictionary::ObjectId;
|
||||||
|
|
||||||
NdbDictObjectImpl(NdbDictionary::Object::Type type) :
|
NdbDictObjectImpl(NdbDictionary::Object::Type type) :
|
||||||
m_type(type),
|
m_type(type),
|
||||||
m_status(NdbDictionary::Object::New) {
|
m_status(NdbDictionary::Object::New) {
|
||||||
m_id = -1;
|
m_id = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
friend class NdbDictionary::ObjectId;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -121,7 +121,15 @@ NdbReceiver::calculate_batch_size(Uint32 key_size,
|
||||||
* no more than MAX_SCAN_BATCH_SIZE is sent from all nodes in total per
|
* no more than MAX_SCAN_BATCH_SIZE is sent from all nodes in total per
|
||||||
* batch.
|
* batch.
|
||||||
*/
|
*/
|
||||||
batch_byte_size= max_batch_byte_size;
|
if (batch_size == 0)
|
||||||
|
{
|
||||||
|
batch_byte_size= max_batch_byte_size;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
batch_byte_size= batch_size * tot_size;
|
||||||
|
}
|
||||||
|
|
||||||
if (batch_byte_size * parallelism > max_scan_batch_size) {
|
if (batch_byte_size * parallelism > max_scan_batch_size) {
|
||||||
batch_byte_size= max_scan_batch_size / parallelism;
|
batch_byte_size= max_scan_batch_size / parallelism;
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,8 @@ NdbScanOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection)
|
||||||
int
|
int
|
||||||
NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
|
NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
|
||||||
Uint32 scan_flags,
|
Uint32 scan_flags,
|
||||||
Uint32 parallel)
|
Uint32 parallel,
|
||||||
|
Uint32 batch)
|
||||||
{
|
{
|
||||||
m_ordered = m_descending = false;
|
m_ordered = m_descending = false;
|
||||||
Uint32 fragCount = m_currentTable->m_fragmentCount;
|
Uint32 fragCount = m_currentTable->m_fragmentCount;
|
||||||
|
@ -191,8 +192,11 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
|
||||||
tupScan = false;
|
tupScan = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
theParallelism = parallel;
|
if (rangeScan && (scan_flags & SF_OrderBy))
|
||||||
|
parallel = fragCount;
|
||||||
|
|
||||||
|
theParallelism = parallel;
|
||||||
|
|
||||||
if(fix_receivers(parallel) == -1){
|
if(fix_receivers(parallel) == -1){
|
||||||
setErrorCodeAbort(4000);
|
setErrorCodeAbort(4000);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -211,6 +215,7 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
|
||||||
req->tableSchemaVersion = m_accessTable->m_version;
|
req->tableSchemaVersion = m_accessTable->m_version;
|
||||||
req->storedProcId = 0xFFFF;
|
req->storedProcId = 0xFFFF;
|
||||||
req->buddyConPtr = theNdbCon->theBuddyConPtr;
|
req->buddyConPtr = theNdbCon->theBuddyConPtr;
|
||||||
|
req->first_batch_size = batch; // Save user specified batch size
|
||||||
|
|
||||||
Uint32 reqInfo = 0;
|
Uint32 reqInfo = 0;
|
||||||
ScanTabReq::setParallelism(reqInfo, parallel);
|
ScanTabReq::setParallelism(reqInfo, parallel);
|
||||||
|
@ -768,13 +773,14 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
|
||||||
* The number of records sent by each LQH is calculated and the kernel
|
* The number of records sent by each LQH is calculated and the kernel
|
||||||
* is informed of this number by updating the SCAN_TABREQ signal
|
* is informed of this number by updating the SCAN_TABREQ signal
|
||||||
*/
|
*/
|
||||||
Uint32 batch_size, batch_byte_size, first_batch_size;
|
ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
|
||||||
|
Uint32 batch_size = req->first_batch_size; // User specified
|
||||||
|
Uint32 batch_byte_size, first_batch_size;
|
||||||
theReceiver.calculate_batch_size(key_size,
|
theReceiver.calculate_batch_size(key_size,
|
||||||
theParallelism,
|
theParallelism,
|
||||||
batch_size,
|
batch_size,
|
||||||
batch_byte_size,
|
batch_byte_size,
|
||||||
first_batch_size);
|
first_batch_size);
|
||||||
ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
|
|
||||||
ScanTabReq::setScanBatch(req->requestInfo, batch_size);
|
ScanTabReq::setScanBatch(req->requestInfo, batch_size);
|
||||||
req->batch_byte_size= batch_byte_size;
|
req->batch_byte_size= batch_byte_size;
|
||||||
req->first_batch_size= first_batch_size;
|
req->first_batch_size= first_batch_size;
|
||||||
|
@ -1268,13 +1274,14 @@ NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size)
|
||||||
int
|
int
|
||||||
NdbIndexScanOperation::readTuples(LockMode lm,
|
NdbIndexScanOperation::readTuples(LockMode lm,
|
||||||
Uint32 scan_flags,
|
Uint32 scan_flags,
|
||||||
Uint32 parallel)
|
Uint32 parallel,
|
||||||
|
Uint32 batch)
|
||||||
{
|
{
|
||||||
const bool order_by = scan_flags & SF_OrderBy;
|
const bool order_by = scan_flags & SF_OrderBy;
|
||||||
const bool order_desc = scan_flags & SF_Descending;
|
const bool order_desc = scan_flags & SF_Descending;
|
||||||
const bool read_range_no = scan_flags & SF_ReadRangeNo;
|
const bool read_range_no = scan_flags & SF_ReadRangeNo;
|
||||||
|
|
||||||
int res = NdbScanOperation::readTuples(lm, scan_flags, 0);
|
int res = NdbScanOperation::readTuples(lm, scan_flags, parallel, batch);
|
||||||
if(!res && read_range_no)
|
if(!res && read_range_no)
|
||||||
{
|
{
|
||||||
m_read_range_no = 1;
|
m_read_range_no = 1;
|
||||||
|
@ -1567,13 +1574,68 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool holdLock = false;
|
||||||
|
if (theSCAN_TABREQ)
|
||||||
|
{
|
||||||
|
ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
|
||||||
|
holdLock = ScanTabReq::getHoldLockFlag(req->requestInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When using locks, force close of scan directly
|
||||||
|
*/
|
||||||
|
if (holdLock && theError.code == 0 &&
|
||||||
|
(m_sent_receivers_count + m_conf_receivers_count + m_api_receivers_count))
|
||||||
|
{
|
||||||
|
NdbApiSignal tSignal(theNdb->theMyRef);
|
||||||
|
tSignal.setSignal(GSN_SCAN_NEXTREQ);
|
||||||
|
|
||||||
|
Uint32* theData = tSignal.getDataPtrSend();
|
||||||
|
Uint64 transId = theNdbCon->theTransactionId;
|
||||||
|
theData[0] = theNdbCon->theTCConPtr;
|
||||||
|
theData[1] = 1;
|
||||||
|
theData[2] = transId;
|
||||||
|
theData[3] = (Uint32) (transId >> 32);
|
||||||
|
|
||||||
|
tSignal.setLength(4);
|
||||||
|
int ret = tp->sendSignal(&tSignal, nodeId);
|
||||||
|
if (ret)
|
||||||
|
{
|
||||||
|
setErrorCode(4008);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If no receiver is outstanding...
|
||||||
|
* set it to 1 as execCLOSE_SCAN_REP resets it
|
||||||
|
*/
|
||||||
|
m_sent_receivers_count = m_sent_receivers_count ? m_sent_receivers_count : 1;
|
||||||
|
|
||||||
|
while(theError.code == 0 && (m_sent_receivers_count + m_conf_receivers_count))
|
||||||
|
{
|
||||||
|
int return_code = poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId, forceSend);
|
||||||
|
switch(return_code){
|
||||||
|
case 0:
|
||||||
|
break;
|
||||||
|
case -1:
|
||||||
|
setErrorCode(4008);
|
||||||
|
case -2:
|
||||||
|
m_api_receivers_count = 0;
|
||||||
|
m_conf_receivers_count = 0;
|
||||||
|
m_sent_receivers_count = 0;
|
||||||
|
theNdbCon->theReleaseOnClose = true;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for outstanding
|
* Wait for outstanding
|
||||||
*/
|
*/
|
||||||
while(theError.code == 0 && m_sent_receivers_count)
|
while(theError.code == 0 && m_sent_receivers_count)
|
||||||
{
|
{
|
||||||
int return_code= poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId,
|
int return_code= poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId, forceSend);
|
||||||
false);
|
|
||||||
switch(return_code){
|
switch(return_code){
|
||||||
case 0:
|
case 0:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -420,6 +420,7 @@ ErrorBundle ErrorCodes[] = {
|
||||||
{ 1514, DMEC, SE, "Currently there is a limit of one logfile group" },
|
{ 1514, DMEC, SE, "Currently there is a limit of one logfile group" },
|
||||||
|
|
||||||
{ 773, DMEC, SE, "Out of string memory, please modify StringMemory config parameter" },
|
{ 773, DMEC, SE, "Out of string memory, please modify StringMemory config parameter" },
|
||||||
|
{ 775, DMEC, SE, "Create file is not supported when Diskless=1" },
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* FunctionNotImplemented
|
* FunctionNotImplemented
|
||||||
|
|
|
@ -1151,70 +1151,76 @@ runScanVariants(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
{
|
{
|
||||||
for(int flags = 0; flags < 4; flags++)
|
for(int flags = 0; flags < 4; flags++)
|
||||||
{
|
{
|
||||||
for (int par = 0; par < 16; par += 1 + (rand() % 3))
|
for (int batch = 0; batch < 100; batch += (1 + batch + (batch >> 3)))
|
||||||
{
|
{
|
||||||
bool disk = flags & 1;
|
for (int par = 0; par < 16; par += 1 + (rand() % 3))
|
||||||
bool tups = flags & 2;
|
|
||||||
g_info << "lm: " << lm
|
|
||||||
<< " disk: " << disk
|
|
||||||
<< " tup scan: " << tups
|
|
||||||
<< " par: " << par
|
|
||||||
<< endl;
|
|
||||||
|
|
||||||
NdbConnection* pCon = pNdb->startTransaction();
|
|
||||||
NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName());
|
|
||||||
if (pOp == NULL) {
|
|
||||||
ERR(pCon->getNdbError());
|
|
||||||
return NDBT_FAILED;
|
|
||||||
}
|
|
||||||
|
|
||||||
if( pOp->readTuples((NdbOperation::LockMode)lm,
|
|
||||||
tups ? NdbScanOperation::SF_TupScan : 0,
|
|
||||||
par) != 0)
|
|
||||||
{
|
{
|
||||||
ERR(pCon->getNdbError());
|
bool disk = flags & 1;
|
||||||
return NDBT_FAILED;
|
bool tups = flags & 2;
|
||||||
}
|
g_info << "lm: " << lm
|
||||||
|
<< " disk: " << disk
|
||||||
int check = pOp->interpret_exit_ok();
|
<< " tup scan: " << tups
|
||||||
if( check == -1 ) {
|
<< " par: " << par
|
||||||
ERR(pCon->getNdbError());
|
<< " batch: " << batch
|
||||||
return NDBT_FAILED;
|
<< endl;
|
||||||
}
|
|
||||||
|
|
||||||
// Define attributes to read
|
|
||||||
bool found_disk = false;
|
|
||||||
for(int a = 0; a<pTab->getNoOfColumns(); a++){
|
|
||||||
if (pTab->getColumn(a)->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
|
|
||||||
{
|
|
||||||
found_disk = true;
|
|
||||||
if (!disk)
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if((pOp->getValue(pTab->getColumn(a)->getName())) == 0) {
|
NdbConnection* pCon = pNdb->startTransaction();
|
||||||
|
NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName());
|
||||||
|
if (pOp == NULL) {
|
||||||
ERR(pCon->getNdbError());
|
ERR(pCon->getNdbError());
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
if( pOp->readTuples((NdbOperation::LockMode)lm,
|
||||||
if (! (disk && !found_disk))
|
tups ? NdbScanOperation::SF_TupScan : 0,
|
||||||
{
|
par,
|
||||||
check = pCon->execute(NoCommit);
|
batch) != 0)
|
||||||
|
{
|
||||||
|
ERR(pCon->getNdbError());
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
int check = pOp->interpret_exit_ok();
|
||||||
if( check == -1 ) {
|
if( check == -1 ) {
|
||||||
ERR(pCon->getNdbError());
|
ERR(pCon->getNdbError());
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int res;
|
// Define attributes to read
|
||||||
int row = 0;
|
bool found_disk = false;
|
||||||
while((res = pOp->nextResult()) == 0);
|
for(int a = 0; a<pTab->getNoOfColumns(); a++){
|
||||||
|
if (pTab->getColumn(a)->getStorageType() ==
|
||||||
|
NdbDictionary::Column::StorageTypeDisk)
|
||||||
|
{
|
||||||
|
found_disk = true;
|
||||||
|
if (!disk)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if((pOp->getValue(pTab->getColumn(a)->getName())) == 0) {
|
||||||
|
ERR(pCon->getNdbError());
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! (disk && !found_disk))
|
||||||
|
{
|
||||||
|
check = pCon->execute(NoCommit);
|
||||||
|
if( check == -1 ) {
|
||||||
|
ERR(pCon->getNdbError());
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
int res;
|
||||||
|
int row = 0;
|
||||||
|
while((res = pOp->nextResult()) == 0);
|
||||||
|
}
|
||||||
|
pCon->close();
|
||||||
}
|
}
|
||||||
pCon->close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NDBT_OK;
|
return NDBT_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1559,6 +1559,56 @@ static int runCreateDropNR(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
DBUG_RETURN(result);
|
DBUG_RETURN(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
int
|
||||||
|
runSubscribeUnsubscribe(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
|
{
|
||||||
|
char buf[1024];
|
||||||
|
const NdbDictionary::Table & tab = * ctx->getTab();
|
||||||
|
sprintf(buf, "%s_EVENT", tab.getName());
|
||||||
|
Ndb* ndb = GETNDB(step);
|
||||||
|
int loops = 5 * ctx->getNumLoops();
|
||||||
|
|
||||||
|
while (--loops)
|
||||||
|
{
|
||||||
|
NdbEventOperation *pOp= ndb->createEventOperation(buf);
|
||||||
|
if (pOp == 0)
|
||||||
|
{
|
||||||
|
g_err << "createEventOperation: "
|
||||||
|
<< ndb->getNdbError().code << " "
|
||||||
|
<< ndb->getNdbError().message << endl;
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
int n_columns= tab.getNoOfColumns();
|
||||||
|
for (int j = 0; j < n_columns; j++)
|
||||||
|
{
|
||||||
|
pOp->getValue(tab.getColumn(j)->getName());
|
||||||
|
pOp->getPreValue(tab.getColumn(j)->getName());
|
||||||
|
}
|
||||||
|
if ( pOp->execute() )
|
||||||
|
{
|
||||||
|
g_err << "pOp->execute(): "
|
||||||
|
<< pOp->getNdbError().code << " "
|
||||||
|
<< pOp->getNdbError().message << endl;
|
||||||
|
|
||||||
|
ndb->dropEventOperation(pOp);
|
||||||
|
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ndb->dropEventOperation(pOp))
|
||||||
|
{
|
||||||
|
g_err << "pOp->execute(): "
|
||||||
|
<< ndb->getNdbError().code << " "
|
||||||
|
<< ndb->getNdbError().message << endl;
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NDBT_OK;
|
||||||
|
}
|
||||||
|
|
||||||
NDBT_TESTSUITE(test_event);
|
NDBT_TESTSUITE(test_event);
|
||||||
TESTCASE("BasicEventOperation",
|
TESTCASE("BasicEventOperation",
|
||||||
"Verify that we can listen to Events"
|
"Verify that we can listen to Events"
|
||||||
|
@ -1673,6 +1723,13 @@ TESTCASE("CreateDropNR",
|
||||||
"NOTE! No errors are allowed!" ){
|
"NOTE! No errors are allowed!" ){
|
||||||
FINALIZER(runCreateDropNR);
|
FINALIZER(runCreateDropNR);
|
||||||
}
|
}
|
||||||
|
TESTCASE("SubscribeUnsubscribe",
|
||||||
|
"A bunch of threads doing subscribe/unsubscribe in loop"
|
||||||
|
"NOTE! No errors are allowed!" ){
|
||||||
|
INITIALIZER(runCreateEvent);
|
||||||
|
STEPS(runSubscribeUnsubscribe, 16);
|
||||||
|
FINALIZER(runDropEvent);
|
||||||
|
}
|
||||||
NDBT_TESTSUITE_END(test_event);
|
NDBT_TESTSUITE_END(test_event);
|
||||||
|
|
||||||
int main(int argc, const char** argv){
|
int main(int argc, const char** argv){
|
||||||
|
|
|
@ -533,9 +533,11 @@ BackupRestore::object(Uint32 type, const void * ptr)
|
||||||
if (!m_no_restore_disk)
|
if (!m_no_restore_disk)
|
||||||
{
|
{
|
||||||
NdbDictionary::Datafile old(*(NdbDictionary::Datafile*)ptr);
|
NdbDictionary::Datafile old(*(NdbDictionary::Datafile*)ptr);
|
||||||
NdbDictionary::Tablespace * ts = m_tablespaces[old.getTablespaceId()];
|
NdbDictionary::ObjectId objid;
|
||||||
|
old.getTablespaceId(&objid);
|
||||||
|
NdbDictionary::Tablespace * ts = m_tablespaces[objid.getObjectId()];
|
||||||
debug << "Connecting datafile " << old.getPath()
|
debug << "Connecting datafile " << old.getPath()
|
||||||
<< " to tablespace: oldid: " << old.getTablespaceId()
|
<< " to tablespace: oldid: " << objid.getObjectId()
|
||||||
<< " newid: " << ts->getObjectId() << endl;
|
<< " newid: " << ts->getObjectId() << endl;
|
||||||
old.setTablespace(* ts);
|
old.setTablespace(* ts);
|
||||||
info << "Creating datafile \"" << old.getPath() << "\"..." << flush;
|
info << "Creating datafile \"" << old.getPath() << "\"..." << flush;
|
||||||
|
@ -554,10 +556,11 @@ BackupRestore::object(Uint32 type, const void * ptr)
|
||||||
if (!m_no_restore_disk)
|
if (!m_no_restore_disk)
|
||||||
{
|
{
|
||||||
NdbDictionary::Undofile old(*(NdbDictionary::Undofile*)ptr);
|
NdbDictionary::Undofile old(*(NdbDictionary::Undofile*)ptr);
|
||||||
NdbDictionary::LogfileGroup * lg =
|
NdbDictionary::ObjectId objid;
|
||||||
m_logfilegroups[old.getLogfileGroupId()];
|
old.getLogfileGroupId(&objid);
|
||||||
|
NdbDictionary::LogfileGroup * lg = m_logfilegroups[objid.getObjectId()];
|
||||||
debug << "Connecting undofile " << old.getPath()
|
debug << "Connecting undofile " << old.getPath()
|
||||||
<< " to logfile group: oldid: " << old.getLogfileGroupId()
|
<< " to logfile group: oldid: " << objid.getObjectId()
|
||||||
<< " newid: " << lg->getObjectId()
|
<< " newid: " << lg->getObjectId()
|
||||||
<< " " << (void*)lg << endl;
|
<< " " << (void*)lg << endl;
|
||||||
old.setLogfileGroup(* lg);
|
old.setLogfileGroup(* lg);
|
||||||
|
|
|
@ -67,12 +67,6 @@ conf_to_src_LDFLAGS= @NOINST_LDFLAGS@
|
||||||
#strtoull.o: @CHARSET_OBJS@
|
#strtoull.o: @CHARSET_OBJS@
|
||||||
|
|
||||||
|
|
||||||
if ASSEMBLER
|
|
||||||
# On Linux gcc can compile the assembly files
|
|
||||||
%.o : %.s
|
|
||||||
$(AS) $(ASFLAGS) -o $@ $<
|
|
||||||
endif
|
|
||||||
|
|
||||||
FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@
|
FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@
|
||||||
|
|
||||||
str_test: str_test.c $(pkglib_LIBRARIES)
|
str_test: str_test.c $(pkglib_LIBRARIES)
|
||||||
|
|
|
@ -148,6 +148,19 @@ They should be used with caution.
|
||||||
|
|
||||||
%{see_base}
|
%{see_base}
|
||||||
|
|
||||||
|
%package bench
|
||||||
|
Requires: %{name}-client perl-DBI perl
|
||||||
|
Summary: MySQL - Benchmarks and test system
|
||||||
|
Group: Applications/Databases
|
||||||
|
Provides: mysql-bench
|
||||||
|
Obsoletes: mysql-bench
|
||||||
|
AutoReqProv: no
|
||||||
|
|
||||||
|
%description bench
|
||||||
|
This package contains MySQL benchmark scripts and data.
|
||||||
|
|
||||||
|
%{see_base}
|
||||||
|
|
||||||
%package devel
|
%package devel
|
||||||
Summary: MySQL - Development header files and libraries
|
Summary: MySQL - Development header files and libraries
|
||||||
Group: Applications/Databases
|
Group: Applications/Databases
|
||||||
|
|
Loading…
Reference in a new issue