mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 13:02:28 +01:00
Merge mysql.com:/home/jonas/src/mysql-4.1
into mysql.com:/home/jonas/src/mysql-4.1-ndb
This commit is contained in:
commit
b931460546
192 changed files with 3467 additions and 3094 deletions
191
mysql-test/r/ndb_charset.result
Normal file
191
mysql-test/r/ndb_charset.result
Normal file
|
@ -0,0 +1,191 @@
|
|||
drop table if exists t1;
|
||||
create table t1 (
|
||||
a char(3) character set latin1 collate latin1_bin primary key
|
||||
) engine=ndb;
|
||||
insert into t1 values('aAa');
|
||||
insert into t1 values('aaa');
|
||||
insert into t1 values('AAA');
|
||||
select * from t1 order by a;
|
||||
a
|
||||
AAA
|
||||
aAa
|
||||
aaa
|
||||
select * from t1 where a = 'aAa';
|
||||
a
|
||||
aAa
|
||||
select * from t1 where a = 'aaa';
|
||||
a
|
||||
aaa
|
||||
select * from t1 where a = 'AaA';
|
||||
a
|
||||
select * from t1 where a = 'AAA';
|
||||
a
|
||||
AAA
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
a char(3) character set latin1 collate latin1_swedish_ci primary key
|
||||
) engine=ndb;
|
||||
insert into t1 values('aAa');
|
||||
insert into t1 values('aaa');
|
||||
ERROR 23000: Duplicate entry 'aaa' for key 1
|
||||
insert into t1 values('AAA');
|
||||
ERROR 23000: Duplicate entry 'AAA' for key 1
|
||||
select * from t1 order by a;
|
||||
a
|
||||
aAa
|
||||
select * from t1 where a = 'aAa';
|
||||
a
|
||||
aAa
|
||||
select * from t1 where a = 'aaa';
|
||||
a
|
||||
aAa
|
||||
select * from t1 where a = 'AaA';
|
||||
a
|
||||
aAa
|
||||
select * from t1 where a = 'AAA';
|
||||
a
|
||||
aAa
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_bin not null,
|
||||
unique key(a)
|
||||
) engine=ndb;
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
insert into t1 values(3, 'AAA');
|
||||
select * from t1 order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
select * from t1 where a = 'aAa';
|
||||
p a
|
||||
1 aAa
|
||||
select * from t1 where a = 'aaa';
|
||||
p a
|
||||
2 aaa
|
||||
select * from t1 where a = 'AaA';
|
||||
p a
|
||||
select * from t1 where a = 'AAA';
|
||||
p a
|
||||
3 AAA
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_swedish_ci not null,
|
||||
unique key(a)
|
||||
) engine=ndb;
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
ERROR 23000: Can't write, because of unique constraint, to table 't1'
|
||||
insert into t1 values(3, 'AAA');
|
||||
ERROR 23000: Can't write, because of unique constraint, to table 't1'
|
||||
select * from t1 order by p;
|
||||
p a
|
||||
1 aAa
|
||||
select * from t1 where a = 'aAa';
|
||||
p a
|
||||
1 aAa
|
||||
select * from t1 where a = 'aaa';
|
||||
p a
|
||||
1 aAa
|
||||
select * from t1 where a = 'AaA';
|
||||
p a
|
||||
1 aAa
|
||||
select * from t1 where a = 'AAA';
|
||||
p a
|
||||
1 aAa
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_bin not null,
|
||||
index(a)
|
||||
) engine=ndb;
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
insert into t1 values(3, 'AAA');
|
||||
insert into t1 values(4, 'aAa');
|
||||
insert into t1 values(5, 'aaa');
|
||||
insert into t1 values(6, 'AAA');
|
||||
select * from t1 order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
4 aAa
|
||||
5 aaa
|
||||
6 AAA
|
||||
explain select * from t1 where a = 'zZz' order by p;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort
|
||||
select * from t1 where a = 'aAa' order by p;
|
||||
p a
|
||||
1 aAa
|
||||
4 aAa
|
||||
select * from t1 where a = 'aaa' order by p;
|
||||
p a
|
||||
2 aaa
|
||||
5 aaa
|
||||
select * from t1 where a = 'AaA' order by p;
|
||||
p a
|
||||
select * from t1 where a = 'AAA' order by p;
|
||||
p a
|
||||
3 AAA
|
||||
6 AAA
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_swedish_ci not null,
|
||||
index(a)
|
||||
) engine=ndb;
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
insert into t1 values(3, 'AAA');
|
||||
insert into t1 values(4, 'aAa');
|
||||
insert into t1 values(5, 'aaa');
|
||||
insert into t1 values(6, 'AAA');
|
||||
select * from t1 order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
4 aAa
|
||||
5 aaa
|
||||
6 AAA
|
||||
explain select * from t1 where a = 'zZz' order by p;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort
|
||||
select * from t1 where a = 'aAa' order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
4 aAa
|
||||
5 aaa
|
||||
6 AAA
|
||||
select * from t1 where a = 'aaa' order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
4 aAa
|
||||
5 aaa
|
||||
6 AAA
|
||||
select * from t1 where a = 'AaA' order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
4 aAa
|
||||
5 aaa
|
||||
6 AAA
|
||||
select * from t1 where a = 'AAA' order by p;
|
||||
p a
|
||||
1 aAa
|
||||
2 aaa
|
||||
3 AAA
|
||||
4 aAa
|
||||
5 aaa
|
||||
6 AAA
|
||||
drop table t1;
|
|
@ -4,7 +4,7 @@ PORT varchar(16) NOT NULL,
|
|||
ACCESSNODE varchar(16) NOT NULL,
|
||||
POP varchar(48) NOT NULL,
|
||||
ACCESSTYPE int unsigned NOT NULL,
|
||||
CUSTOMER_ID varchar(20) NOT NULL,
|
||||
CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL,
|
||||
PROVIDER varchar(16),
|
||||
TEXPIRE int unsigned,
|
||||
NUM_IP int unsigned,
|
||||
|
|
159
mysql-test/t/ndb_charset.test
Normal file
159
mysql-test/t/ndb_charset.test
Normal file
|
@ -0,0 +1,159 @@
|
|||
--source include/have_ndb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Minimal NDB charset test.
|
||||
#
|
||||
|
||||
# pk - binary
|
||||
|
||||
create table t1 (
|
||||
a char(3) character set latin1 collate latin1_bin primary key
|
||||
) engine=ndb;
|
||||
# ok
|
||||
insert into t1 values('aAa');
|
||||
insert into t1 values('aaa');
|
||||
insert into t1 values('AAA');
|
||||
# 3
|
||||
select * from t1 order by a;
|
||||
# 1
|
||||
select * from t1 where a = 'aAa';
|
||||
# 1
|
||||
select * from t1 where a = 'aaa';
|
||||
# 0
|
||||
select * from t1 where a = 'AaA';
|
||||
# 1
|
||||
select * from t1 where a = 'AAA';
|
||||
drop table t1;
|
||||
|
||||
# pk - case insensitive
|
||||
|
||||
create table t1 (
|
||||
a char(3) character set latin1 collate latin1_swedish_ci primary key
|
||||
) engine=ndb;
|
||||
# ok
|
||||
insert into t1 values('aAa');
|
||||
# fail
|
||||
--error 1062
|
||||
insert into t1 values('aaa');
|
||||
--error 1062
|
||||
insert into t1 values('AAA');
|
||||
# 1
|
||||
select * from t1 order by a;
|
||||
# 1
|
||||
select * from t1 where a = 'aAa';
|
||||
# 1
|
||||
select * from t1 where a = 'aaa';
|
||||
# 1
|
||||
select * from t1 where a = 'AaA';
|
||||
# 1
|
||||
select * from t1 where a = 'AAA';
|
||||
drop table t1;
|
||||
|
||||
# unique hash index - binary
|
||||
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_bin not null,
|
||||
unique key(a)
|
||||
) engine=ndb;
|
||||
# ok
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
insert into t1 values(3, 'AAA');
|
||||
# 3
|
||||
select * from t1 order by p;
|
||||
# 1
|
||||
select * from t1 where a = 'aAa';
|
||||
# 1
|
||||
select * from t1 where a = 'aaa';
|
||||
# 0
|
||||
select * from t1 where a = 'AaA';
|
||||
# 1
|
||||
select * from t1 where a = 'AAA';
|
||||
drop table t1;
|
||||
|
||||
# unique hash index - case insensitive
|
||||
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_swedish_ci not null,
|
||||
unique key(a)
|
||||
) engine=ndb;
|
||||
# ok
|
||||
insert into t1 values(1, 'aAa');
|
||||
# fail
|
||||
--error 1169
|
||||
insert into t1 values(2, 'aaa');
|
||||
--error 1169
|
||||
insert into t1 values(3, 'AAA');
|
||||
# 1
|
||||
select * from t1 order by p;
|
||||
# 1
|
||||
select * from t1 where a = 'aAa';
|
||||
# 1
|
||||
select * from t1 where a = 'aaa';
|
||||
# 1
|
||||
select * from t1 where a = 'AaA';
|
||||
# 1
|
||||
select * from t1 where a = 'AAA';
|
||||
drop table t1;
|
||||
|
||||
# ordered index - binary
|
||||
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_bin not null,
|
||||
index(a)
|
||||
) engine=ndb;
|
||||
# ok
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
insert into t1 values(3, 'AAA');
|
||||
insert into t1 values(4, 'aAa');
|
||||
insert into t1 values(5, 'aaa');
|
||||
insert into t1 values(6, 'AAA');
|
||||
# 6
|
||||
select * from t1 order by p;
|
||||
# plan
|
||||
explain select * from t1 where a = 'zZz' order by p;
|
||||
# 2
|
||||
select * from t1 where a = 'aAa' order by p;
|
||||
# 2
|
||||
select * from t1 where a = 'aaa' order by p;
|
||||
# 0
|
||||
select * from t1 where a = 'AaA' order by p;
|
||||
# 2
|
||||
select * from t1 where a = 'AAA' order by p;
|
||||
drop table t1;
|
||||
|
||||
# ordered index - case insensitive
|
||||
|
||||
create table t1 (
|
||||
p int primary key,
|
||||
a char(3) character set latin1 collate latin1_swedish_ci not null,
|
||||
index(a)
|
||||
) engine=ndb;
|
||||
# ok
|
||||
insert into t1 values(1, 'aAa');
|
||||
insert into t1 values(2, 'aaa');
|
||||
insert into t1 values(3, 'AAA');
|
||||
insert into t1 values(4, 'aAa');
|
||||
insert into t1 values(5, 'aaa');
|
||||
insert into t1 values(6, 'AAA');
|
||||
# 6
|
||||
select * from t1 order by p;
|
||||
# plan
|
||||
explain select * from t1 where a = 'zZz' order by p;
|
||||
# 6
|
||||
select * from t1 where a = 'aAa' order by p;
|
||||
# 6
|
||||
select * from t1 where a = 'aaa' order by p;
|
||||
# 6
|
||||
select * from t1 where a = 'AaA' order by p;
|
||||
# 6
|
||||
select * from t1 where a = 'AAA' order by p;
|
||||
drop table t1;
|
|
@ -9,7 +9,7 @@ CREATE TABLE t1 (
|
|||
ACCESSNODE varchar(16) NOT NULL,
|
||||
POP varchar(48) NOT NULL,
|
||||
ACCESSTYPE int unsigned NOT NULL,
|
||||
CUSTOMER_ID varchar(20) NOT NULL,
|
||||
CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL,
|
||||
PROVIDER varchar(16),
|
||||
TEXPIRE int unsigned,
|
||||
NUM_IP int unsigned,
|
||||
|
|
|
@ -46,9 +46,9 @@
|
|||
*
|
||||
* NdbDictionary::Column
|
||||
* setName()
|
||||
* setPrimaryKey()
|
||||
* setType()
|
||||
* setLength()
|
||||
* setPrimaryKey()
|
||||
* setNullable()
|
||||
*
|
||||
* NdbDictionary::Table
|
||||
|
@ -234,9 +234,9 @@ int create_table(Ndb * myNdb)
|
|||
* Column REG_NO
|
||||
*/
|
||||
myColumn.setName("REG_NO");
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setType(NdbDictionary::Column::Unsigned);
|
||||
myColumn.setLength(1);
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
@ -244,9 +244,9 @@ int create_table(Ndb * myNdb)
|
|||
* Column BRAND
|
||||
*/
|
||||
myColumn.setName("BRAND");
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setType(NdbDictionary::Column::Char);
|
||||
myColumn.setLength(20);
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
@ -254,9 +254,9 @@ int create_table(Ndb * myNdb)
|
|||
* Column COLOR
|
||||
*/
|
||||
myColumn.setName("COLOR");
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setType(NdbDictionary::Column::Char);
|
||||
myColumn.setLength(20);
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
@ -454,6 +454,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData)
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database
|
||||
|
||||
/*******************************************
|
||||
|
@ -493,5 +494,3 @@ int main()
|
|||
std::cout << "Number of temporary errors: " << tempErrors << std::endl;
|
||||
delete myNdb;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
|
||||
NdbDictionary::Table myTable;
|
||||
NdbDictionary::Column myColumn;
|
||||
|
@ -78,16 +79,16 @@ int main()
|
|||
myTable.setName("MYTABLENAME");
|
||||
|
||||
myColumn.setName("ATTR1");
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setType(NdbDictionary::Column::Unsigned);
|
||||
myColumn.setLength(1);
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
myColumn.setName("ATTR2");
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setType(NdbDictionary::Column::Unsigned);
|
||||
myColumn.setLength(1);
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ static void callback(int result, NdbConnection* NdbObject, void* aObject);
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb( "TEST_DB_2" ); // Object representing the database
|
||||
|
||||
NdbConnection* myNdbConnection[2]; // For transactions
|
||||
|
|
|
@ -176,6 +176,7 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) {
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
|
||||
|
||||
/*******************************************
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
|
||||
NdbDictionary::Table myTable;
|
||||
NdbDictionary::Column myColumn;
|
||||
|
@ -79,16 +80,16 @@ int main()
|
|||
myTable.setName("MYTABLENAME");
|
||||
|
||||
myColumn.setName("ATTR1");
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setType(NdbDictionary::Column::Unsigned);
|
||||
myColumn.setLength(1);
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
myColumn.setName("ATTR2");
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setType(NdbDictionary::Column::Unsigned);
|
||||
myColumn.setLength(1);
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ int myCreateEvent(Ndb* myNdb,
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = myCreateNdb();
|
||||
NdbDictionary::Dictionary *myDict;
|
||||
|
||||
|
|
|
@ -47,9 +47,9 @@
|
|||
*
|
||||
* NdbDictionary::Column
|
||||
* setName()
|
||||
* setPrimaryKey()
|
||||
* setType()
|
||||
* setLength()
|
||||
* setPrimaryKey()
|
||||
* setNullable()
|
||||
*
|
||||
* NdbDictionary::Table
|
||||
|
@ -165,24 +165,24 @@ int create_table(Ndb * myNdb)
|
|||
myTable.setName("GARAGE");
|
||||
|
||||
myColumn.setName("REG_NO");
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setType(NdbDictionary::Column::Unsigned);
|
||||
myColumn.setLength(1);
|
||||
myColumn.setPrimaryKey(true);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
myColumn.setName("BRAND");
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setType(NdbDictionary::Column::Char);
|
||||
myColumn.setLength(20);
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
||||
myColumn.setName("COLOR");
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setType(NdbDictionary::Column::Char);
|
||||
myColumn.setLength(20);
|
||||
myColumn.setPrimaryKey(false);
|
||||
myColumn.setNullable(false);
|
||||
myTable.addColumn(myColumn);
|
||||
|
||||
|
@ -761,6 +761,7 @@ int scan_print(Ndb * myNdb, int parallelism,
|
|||
|
||||
int main()
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database
|
||||
|
||||
|
||||
|
@ -813,4 +814,3 @@ int main()
|
|||
|
||||
delete myNdb;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,6 +112,7 @@ const char* ResultSetContainer::getAttrName(int i) const {return m_names[i];}
|
|||
|
||||
int main(int argc, const char** argv)
|
||||
{
|
||||
ndb_init();
|
||||
Ndb* myNdb = new Ndb("ndbapi_example4"); // Object representing the database
|
||||
NdbConnection* myNdbConnection; // For transactions
|
||||
NdbOperation* myNdbOperation; // For operations
|
||||
|
|
|
@ -24,6 +24,32 @@
|
|||
#include <kernel/LogLevel.hpp>
|
||||
#include <signaldata/EventReport.hpp>
|
||||
|
||||
class EventLoggerBase {
|
||||
public:
|
||||
virtual ~EventLoggerBase();
|
||||
|
||||
/**
|
||||
* LogLevel settings
|
||||
*/
|
||||
LogLevel m_logLevel;
|
||||
|
||||
/**
|
||||
* This matrix defines which event should be printed when
|
||||
*
|
||||
* threshold - is in range [0-15]
|
||||
* severity - DEBUG to ALERT (Type of log message)
|
||||
*/
|
||||
struct EventRepLogLevelMatrix {
|
||||
EventReport::EventType eventType;
|
||||
LogLevel::EventCategory eventCategory;
|
||||
Uint32 threshold;
|
||||
Logger::LoggerLevel severity;
|
||||
};
|
||||
|
||||
static const EventRepLogLevelMatrix matrix[];
|
||||
static const Uint32 matrixSize;
|
||||
};
|
||||
|
||||
/**
|
||||
* The EventLogger is primarily used for logging NDB events
|
||||
* in the Management Server. It inherits all logging functionality of Logger.
|
||||
|
@ -58,7 +84,7 @@
|
|||
* @see Logger
|
||||
* @version #@ $Id: EventLogger.hpp,v 1.3 2003/09/01 10:15:52 innpeno Exp $
|
||||
*/
|
||||
class EventLogger : public Logger
|
||||
class EventLogger : public EventLoggerBase, public Logger
|
||||
{
|
||||
public:
|
||||
/**
|
||||
|
@ -70,7 +96,7 @@ public:
|
|||
/**
|
||||
* Destructor.
|
||||
*/
|
||||
~EventLogger();
|
||||
virtual ~EventLogger();
|
||||
|
||||
/**
|
||||
* Opens/creates the eventlog with the specified filename.
|
||||
|
@ -92,16 +118,6 @@ public:
|
|||
*/
|
||||
void close();
|
||||
|
||||
/**
|
||||
* Logs the NDB event.
|
||||
*
|
||||
* @param nodeId the node id of event origin.
|
||||
* @param eventType the type of event.
|
||||
* @param theData the event data.
|
||||
* @deprecated use log(int eventType, const Uint32* theData, NodeId nodeId)
|
||||
*/
|
||||
void log(NodeId nodeId, int eventType, const Uint32* theData);
|
||||
|
||||
/**
|
||||
* Logs the NDB event.
|
||||
*
|
||||
|
@ -109,32 +125,8 @@ public:
|
|||
* @param theData the event data.
|
||||
* @param nodeId the node id of event origin.
|
||||
*/
|
||||
void log(int eventType, const Uint32* theData, NodeId nodeId = 0);
|
||||
|
||||
/**
|
||||
* Returns the current log levels.
|
||||
* Enable, disable log levels to filter the events that are sent to the
|
||||
* eventlog.
|
||||
*
|
||||
* @return the log level.
|
||||
*/
|
||||
LogLevel& getLoglevel();
|
||||
virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0);
|
||||
|
||||
/**
|
||||
* Returns the log level that is used to filter an event. The event will not
|
||||
* be logged unless its event category's log level is <= levelFilter.
|
||||
*
|
||||
* @return the log level filter that is used for all event categories.
|
||||
*/
|
||||
int getFilterLevel() const;
|
||||
/**
|
||||
* Sets log level filter. The event will be logged if
|
||||
* the event category's log level is <= 'filterLevel'.
|
||||
*
|
||||
* @param level the log level to filter.
|
||||
*/
|
||||
void setFilterLevel(int filterLevel);
|
||||
|
||||
/**
|
||||
* Returns the event text for the specified event report type.
|
||||
*
|
||||
|
@ -143,72 +135,25 @@ public:
|
|||
* @param nodeId a node id.
|
||||
* @return the event report text.
|
||||
*/
|
||||
static const char* getText(int type,
|
||||
static const char* getText(char * dst, size_t dst_len,
|
||||
int type,
|
||||
const Uint32* theData, NodeId nodeId = 0);
|
||||
|
||||
/**
|
||||
* Find a category matching the string
|
||||
*
|
||||
* @param str string to match.
|
||||
* @param cat the event category.
|
||||
* @param exactMatch only do exact matching.
|
||||
*
|
||||
* @return TRUE if match is found, then cat is modified
|
||||
* FALSE if match is not found
|
||||
*/
|
||||
static bool matchEventCategory(const char * str,
|
||||
LogLevel::EventCategory * cat,
|
||||
bool exactMatch = false);
|
||||
|
||||
/**
|
||||
* Returns category name or NULL if not found.
|
||||
* Returns the log level that is used to filter an event. The event will not
|
||||
* be logged unless its event category's log level is <= levelFilter.
|
||||
*
|
||||
* @param cat the event category.
|
||||
* @return category name.
|
||||
* @return the log level filter that is used for all event categories.
|
||||
*/
|
||||
static const char * getEventCategoryName(LogLevel::EventCategory cat);
|
||||
int getFilterLevel() const;
|
||||
|
||||
/**
|
||||
* Specifies allowed event categories/log levels.
|
||||
*/
|
||||
struct EventCategoryName {
|
||||
LogLevel::EventCategory category;
|
||||
const char * name;
|
||||
};
|
||||
|
||||
static const EventCategoryName eventCategoryNames[];
|
||||
static const Uint32 noOfEventCategoryNames;
|
||||
|
||||
/**
|
||||
* This matrix defines which event should be printed when
|
||||
* Sets log level filter. The event will be logged if
|
||||
* the event category's log level is <= 'filterLevel'.
|
||||
*
|
||||
* threshold - is in range [0-15]
|
||||
* severity - DEBUG to ALERT (Type of log message)
|
||||
*/
|
||||
struct EventRepLogLevelMatrix {
|
||||
EventReport::EventType eventType;
|
||||
LogLevel::EventCategory eventCategory;
|
||||
Uint32 threshold;
|
||||
Logger::LoggerLevel severity;
|
||||
};
|
||||
|
||||
static const EventRepLogLevelMatrix matrix[];
|
||||
|
||||
/**
|
||||
* Default log levels for management nodes.
|
||||
*
|
||||
* threshold - is in range [0-15]
|
||||
* @param level the log level to filter.
|
||||
*/
|
||||
struct EventLogMatrix {
|
||||
LogLevel::EventCategory eventCategory;
|
||||
Uint32 threshold;
|
||||
};
|
||||
|
||||
static const EventLogMatrix defEventLogMatrix[];
|
||||
|
||||
|
||||
static const Uint32 matrixSize;
|
||||
static const Uint32 defEventLogMatrixSize;
|
||||
void setFilterLevel(int filterLevel);
|
||||
|
||||
private:
|
||||
/** Prohibit */
|
||||
|
@ -216,11 +161,10 @@ private:
|
|||
EventLogger operator = (const EventLogger&);
|
||||
bool operator == (const EventLogger&);
|
||||
|
||||
LogLevel m_logLevel;
|
||||
Uint32 m_filterLevel;
|
||||
|
||||
STATIC_CONST(MAX_TEXT_LENGTH = 256);
|
||||
static char m_text[MAX_TEXT_LENGTH];
|
||||
char m_text[MAX_TEXT_LENGTH];
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -45,81 +45,30 @@ public:
|
|||
* Copy operator
|
||||
*/
|
||||
LogLevel & operator= (const LogLevel &);
|
||||
|
||||
static const Uint32 MIN_LOGLEVEL_ID = CFG_LOGLEVEL_STARTUP;
|
||||
|
||||
|
||||
enum EventCategory {
|
||||
/**
|
||||
* Events during all kind of startups
|
||||
*/
|
||||
llStartUp = CFG_LOGLEVEL_STARTUP - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Events during shutdown
|
||||
*/
|
||||
llShutdown = CFG_LOGLEVEL_SHUTDOWN - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Transaction statistics
|
||||
* Job level
|
||||
* TCP/IP speed
|
||||
*/
|
||||
llStatistic = CFG_LOGLEVEL_STATISTICS - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Checkpoints
|
||||
*/
|
||||
llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Events during node restart
|
||||
*/
|
||||
llNodeRestart = CFG_LOGLEVEL_NODERESTART - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Events related to connection / communication
|
||||
*/
|
||||
llConnection = CFG_LOGLEVEL_CONNECTION - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Assorted event w.r.t unexpected happenings
|
||||
*/
|
||||
llError = CFG_LOGLEVEL_ERROR - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Assorted event w.r.t warning
|
||||
*/
|
||||
llWarning = CFG_LOGLEVEL_WARNING - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Assorted event w.r.t information
|
||||
*/
|
||||
llInfo = CFG_LOGLEVEL_INFO - MIN_LOGLEVEL_ID,
|
||||
|
||||
/**
|
||||
* Events related to global replication
|
||||
*/
|
||||
llGrep = CFG_LOGLEVEL_GREP - MIN_LOGLEVEL_ID
|
||||
llStartUp = CFG_LOGLEVEL_STARTUP - CFG_MIN_LOGLEVEL,
|
||||
llShutdown = CFG_LOGLEVEL_SHUTDOWN - CFG_MIN_LOGLEVEL,
|
||||
llStatistic = CFG_LOGLEVEL_STATISTICS - CFG_MIN_LOGLEVEL,
|
||||
llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - CFG_MIN_LOGLEVEL,
|
||||
llNodeRestart = CFG_LOGLEVEL_NODERESTART - CFG_MIN_LOGLEVEL,
|
||||
llConnection = CFG_LOGLEVEL_CONNECTION - CFG_MIN_LOGLEVEL,
|
||||
llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL,
|
||||
llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL,
|
||||
llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL,
|
||||
llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL,
|
||||
llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL
|
||||
,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL
|
||||
};
|
||||
|
||||
struct LogLevelCategoryName {
|
||||
const char* name;
|
||||
};
|
||||
|
||||
/**
|
||||
* Log/event level category names. Remember to update the names whenever
|
||||
* a new category is added.
|
||||
*/
|
||||
static const LogLevelCategoryName LOGLEVEL_CATEGORY_NAME[];
|
||||
|
||||
/**
|
||||
* No of categories
|
||||
*/
|
||||
#define _LOGLEVEL_CATEGORIES 10
|
||||
#define _LOGLEVEL_CATEGORIES (CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1);
|
||||
static const Uint32 LOGLEVEL_CATEGORIES = _LOGLEVEL_CATEGORIES;
|
||||
|
||||
|
||||
void clear();
|
||||
|
||||
|
||||
/**
|
||||
* Note level is valid as 0-15
|
||||
*/
|
||||
|
@ -130,26 +79,33 @@ public:
|
|||
*/
|
||||
Uint32 getLogLevel(EventCategory ec) const;
|
||||
|
||||
/**
|
||||
* Set this= max(this, ll) per category
|
||||
*/
|
||||
LogLevel& set_max(const LogLevel& ll);
|
||||
|
||||
bool operator==(const LogLevel& l) const {
|
||||
return memcmp(this, &l, sizeof(* this)) == 0;
|
||||
}
|
||||
|
||||
LogLevel& operator=(const class EventSubscribeReq & req);
|
||||
|
||||
private:
|
||||
/**
|
||||
* The actual data
|
||||
*/
|
||||
Uint32 logLevelData[LOGLEVEL_CATEGORIES];
|
||||
|
||||
LogLevel(const LogLevel &);
|
||||
Uint8 logLevelData[LOGLEVEL_CATEGORIES];
|
||||
};
|
||||
|
||||
inline
|
||||
LogLevel::LogLevel(){
|
||||
clear();
|
||||
clear();
|
||||
}
|
||||
|
||||
inline
|
||||
LogLevel &
|
||||
LogLevel::operator= (const LogLevel & org){
|
||||
for(Uint32 i = 0; i<LOGLEVEL_CATEGORIES; i++){
|
||||
logLevelData[i] = org.logLevelData[i];
|
||||
}
|
||||
memcpy(logLevelData, org.logLevelData, sizeof(logLevelData));
|
||||
return * this;
|
||||
}
|
||||
|
||||
|
@ -165,7 +121,7 @@ inline
|
|||
void
|
||||
LogLevel::setLogLevel(EventCategory ec, Uint32 level){
|
||||
assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES);
|
||||
logLevelData[ec] = level;
|
||||
logLevelData[ec] = (Uint8)level;
|
||||
}
|
||||
|
||||
inline
|
||||
|
@ -173,8 +129,30 @@ Uint32
|
|||
LogLevel::getLogLevel(EventCategory ec) const{
|
||||
assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES);
|
||||
|
||||
return logLevelData[ec];
|
||||
return (Uint32)logLevelData[ec];
|
||||
}
|
||||
|
||||
inline
|
||||
LogLevel &
|
||||
LogLevel::set_max(const LogLevel & org){
|
||||
for(Uint32 i = 0; i<LOGLEVEL_CATEGORIES; i++){
|
||||
if(logLevelData[i] < org.logLevelData[i])
|
||||
logLevelData[i] = org.logLevelData[i];
|
||||
}
|
||||
return * this;
|
||||
}
|
||||
|
||||
#include <signaldata/EventSubscribeReq.hpp>
|
||||
|
||||
inline
|
||||
LogLevel&
|
||||
LogLevel::operator=(const EventSubscribeReq& req)
|
||||
{
|
||||
clear();
|
||||
for(size_t i = 0; i<req.noOfEntries; i++){
|
||||
logLevelData[(req.theData[i] >> 16)] = req.theData[i] & 0xFFFF;
|
||||
}
|
||||
return * this;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -89,7 +89,8 @@ public:
|
|||
ArraySizeTooBig = 737,
|
||||
RecordTooBig = 738,
|
||||
InvalidPrimaryKeySize = 739,
|
||||
NullablePrimaryKey = 740
|
||||
NullablePrimaryKey = 740,
|
||||
InvalidCharset = 743
|
||||
};
|
||||
|
||||
private:
|
||||
|
|
|
@ -438,8 +438,8 @@ public:
|
|||
case DictTabInfo::ExtText:
|
||||
AttributeType = DictTabInfo::StringType;
|
||||
AttributeSize = DictTabInfo::an8Bit;
|
||||
// head + inline part [ attr precision ]
|
||||
AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + AttributeExtPrecision;
|
||||
// head + inline part [ attr precision lower half ]
|
||||
AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF);
|
||||
return true;
|
||||
};
|
||||
return false;
|
||||
|
|
|
@ -135,12 +135,17 @@ public:
|
|||
|
||||
//GREP
|
||||
GrepSubscriptionInfo = 52,
|
||||
GrepSubscriptionAlert = 53
|
||||
};
|
||||
GrepSubscriptionAlert = 53,
|
||||
|
||||
//BACKUP
|
||||
BackupStarted = 54,
|
||||
BackupFailedToStart = 55,
|
||||
BackupCompleted = 56,
|
||||
BackupAborted = 57
|
||||
};
|
||||
|
||||
void setEventType(EventType type);
|
||||
EventType getEventType() const;
|
||||
private:
|
||||
UintR eventType; // DATA 0
|
||||
};
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
* RECIVER: SimBlockCMCtrBlck
|
||||
*/
|
||||
|
||||
class EventSubscribeReq {
|
||||
struct EventSubscribeReq {
|
||||
/**
|
||||
* Receiver(s)
|
||||
*/
|
||||
|
@ -38,9 +38,8 @@ class EventSubscribeReq {
|
|||
*/
|
||||
friend class MgmtSrvr;
|
||||
|
||||
public:
|
||||
STATIC_CONST( SignalLength = 22 );
|
||||
private:
|
||||
STATIC_CONST( SignalLength = 2 + LogLevel::LOGLEVEL_CATEGORIES );
|
||||
|
||||
/**
|
||||
* Note: If you use the same blockRef as you have used earlier,
|
||||
* you update your ongoing subscription
|
||||
|
@ -53,8 +52,15 @@ private:
|
|||
*/
|
||||
Uint32 noOfEntries;
|
||||
|
||||
Uint32 theCategories[10];
|
||||
Uint32 theLevels[10];
|
||||
Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES];
|
||||
|
||||
EventSubscribeReq& operator= (const LogLevel& ll){
|
||||
noOfEntries = LogLevel::LOGLEVEL_CATEGORIES;
|
||||
for(size_t i = 0; i<noOfEntries; i++){
|
||||
theData[i] = (i << 16) | ll.getLogLevel((LogLevel::EventCategory)i);
|
||||
}
|
||||
return * this;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -130,7 +130,7 @@ private:
|
|||
Uint32 keyLength;
|
||||
Uint32 nextLCP;
|
||||
Uint32 noOfKeyAttr;
|
||||
Uint32 noOfNewAttr;
|
||||
Uint32 noOfNewAttr; // noOfCharsets in upper half
|
||||
Uint32 checksumIndicator;
|
||||
Uint32 noOfAttributeGroups;
|
||||
Uint32 GCPIndicator;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#define SET_LOGLEVEL_ORD_HPP
|
||||
|
||||
#include <LogLevel.hpp>
|
||||
#include "EventSubscribeReq.hpp"
|
||||
#include "SignalData.hpp"
|
||||
|
||||
/**
|
||||
|
@ -39,11 +40,10 @@ class SetLogLevelOrd {
|
|||
friend class NodeLogLevel;
|
||||
|
||||
private:
|
||||
STATIC_CONST( SignalLength = 25 );
|
||||
|
||||
STATIC_CONST( SignalLength = 1 + LogLevel::LOGLEVEL_CATEGORIES );
|
||||
|
||||
Uint32 noOfEntries;
|
||||
Uint32 theCategories[12];
|
||||
Uint32 theLevels[12];
|
||||
Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES];
|
||||
|
||||
void clear();
|
||||
|
||||
|
@ -51,6 +51,22 @@ private:
|
|||
* Note level is valid as 0-15
|
||||
*/
|
||||
void setLogLevel(LogLevel::EventCategory ec, int level = 7);
|
||||
|
||||
SetLogLevelOrd& operator= (const LogLevel& ll){
|
||||
noOfEntries = LogLevel::LOGLEVEL_CATEGORIES;
|
||||
for(size_t i = 0; i<noOfEntries; i++){
|
||||
theData[i] = (i << 16) | ll.getLogLevel((LogLevel::EventCategory)i);
|
||||
}
|
||||
return * this;
|
||||
}
|
||||
|
||||
SetLogLevelOrd& operator= (const EventSubscribeReq& ll){
|
||||
noOfEntries = ll.noOfEntries;
|
||||
for(size_t i = 0; i<noOfEntries; i++){
|
||||
theData[i] = ll.theData[i];
|
||||
}
|
||||
return * this;
|
||||
}
|
||||
};
|
||||
|
||||
inline
|
||||
|
@ -62,9 +78,7 @@ SetLogLevelOrd::clear(){
|
|||
inline
|
||||
void
|
||||
SetLogLevelOrd::setLogLevel(LogLevel::EventCategory ec, int level){
|
||||
assert(noOfEntries < 12);
|
||||
theCategories[noOfEntries] = ec;
|
||||
theLevels[noOfEntries] = level;
|
||||
theData[noOfEntries] = (ec << 16) | level;
|
||||
noOfEntries++;
|
||||
}
|
||||
|
||||
|
|
|
@ -119,12 +119,13 @@ class TupAddAttrReq {
|
|||
friend class Dblqh;
|
||||
friend class Dbtux;
|
||||
public:
|
||||
STATIC_CONST( SignalLength = 4 );
|
||||
STATIC_CONST( SignalLength = 5 );
|
||||
private:
|
||||
Uint32 tupConnectPtr;
|
||||
Uint32 notused1;
|
||||
Uint32 attrId;
|
||||
Uint32 attrDescriptor;
|
||||
Uint32 extTypeInfo;
|
||||
};
|
||||
|
||||
class TupAddAttrConf {
|
||||
|
@ -141,6 +142,10 @@ class TupAddAttrRef {
|
|||
friend class Dbtup;
|
||||
public:
|
||||
STATIC_CONST( SignalLength = 2 );
|
||||
enum ErrorCode {
|
||||
NoError = 0,
|
||||
InvalidCharset = 743
|
||||
};
|
||||
private:
|
||||
Uint32 userPtr;
|
||||
Uint32 errorCode;
|
||||
|
@ -178,7 +183,8 @@ public:
|
|||
STATIC_CONST( SignalLength = 2 );
|
||||
enum ErrorCode {
|
||||
NoError = 0,
|
||||
InvalidAttributeType = 831,
|
||||
InvalidAttributeType = 742,
|
||||
InvalidCharset = 743,
|
||||
InvalidNodeSize = 832
|
||||
};
|
||||
private:
|
||||
|
|
|
@ -55,24 +55,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Format of statistical information from the NDB Cluster.
|
||||
* STATISTIC_LINE is sent on the statistical port from the Management server,
|
||||
* each line is timestamped with STATISTIC_DATE.
|
||||
*/
|
||||
#define STATISTIC_LINE "date=%s epochsecs=%d nodeid=%u trans=%u commit=%u " \
|
||||
"read=%u insert=%u attrinfo=%u cops=%u abort=%u"
|
||||
/**
|
||||
* Format of statistical information from the NDB Cluster.
|
||||
* STATISTIC_LINE is sent on the statistical port from the Management server,
|
||||
* each line is timestamped with STATISTIC_DATE.
|
||||
*/
|
||||
#define STATISTIC_DATE "%d-%.2d-%.2d/%.2d:%.2d:%.2d"
|
||||
/**
|
||||
* Format of statistical information from the NDB Cluster.
|
||||
*/
|
||||
#define OP_STATISTIC_LINE "date=%s epochsecs=%d nodeid=%d operations=%u"
|
||||
|
||||
/**
|
||||
* The NdbMgmHandle.
|
||||
*/
|
||||
|
@ -272,19 +254,35 @@ extern "C" {
|
|||
* Log categories
|
||||
*/
|
||||
enum ndb_mgm_event_category {
|
||||
NDB_MGM_EVENT_CATEGORY_STARTUP, ///< Events during all kinds
|
||||
///< of startups
|
||||
NDB_MGM_EVENT_CATEGORY_SHUTDOWN, ///< Events during shutdown
|
||||
NDB_MGM_EVENT_CATEGORY_STATISTIC, ///< Transaction statistics
|
||||
///< (Job level, TCP/IP speed)
|
||||
NDB_MGM_EVENT_CATEGORY_CHECKPOINT, ///< Checkpoints
|
||||
NDB_MGM_EVENT_CATEGORY_NODE_RESTART, ///< Events during node restart
|
||||
NDB_MGM_EVENT_CATEGORY_CONNECTION, ///< Events related to connection
|
||||
///< and communication
|
||||
NDB_MGM_EVENT_CATEGORY_ERROR ///< Assorted event w.r.t.
|
||||
///< unexpected happenings
|
||||
};
|
||||
NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1, ///< Invalid
|
||||
/**
|
||||
* Events during all kinds of startups
|
||||
*/
|
||||
NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP,
|
||||
|
||||
/**
|
||||
* Events during shutdown
|
||||
*/
|
||||
NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN,
|
||||
|
||||
/**
|
||||
* Transaction statistics (Job level, TCP/IP speed)
|
||||
*/
|
||||
NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS,
|
||||
NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT,
|
||||
NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART,
|
||||
NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION,
|
||||
NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG,
|
||||
NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO,
|
||||
NDB_MGM_EVENT_CATEGORY_WARNING = CFG_LOGLEVEL_WARNING,
|
||||
NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR,
|
||||
NDB_MGM_EVENT_CATEGORY_GREP = CFG_LOGLEVEL_GREP,
|
||||
NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP,
|
||||
|
||||
NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL,
|
||||
NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL
|
||||
};
|
||||
|
||||
/***************************************************************************/
|
||||
/**
|
||||
* @name Functions: Error Handling
|
||||
|
@ -420,6 +418,9 @@ extern "C" {
|
|||
*/
|
||||
const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status);
|
||||
|
||||
ndb_mgm_event_category ndb_mgm_match_event_category(const char *);
|
||||
const char * ndb_mgm_get_event_category_string(enum ndb_mgm_event_category);
|
||||
|
||||
/** @} *********************************************************************/
|
||||
/**
|
||||
* @name Functions: State of cluster
|
||||
|
@ -580,8 +581,7 @@ extern "C" {
|
|||
*/
|
||||
int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle,
|
||||
int nodeId,
|
||||
/*enum ndb_mgm_event_category category*/
|
||||
char * category,
|
||||
enum ndb_mgm_event_category category,
|
||||
int level,
|
||||
struct ndb_mgm_reply* reply);
|
||||
|
||||
|
@ -597,8 +597,7 @@ extern "C" {
|
|||
*/
|
||||
int ndb_mgm_set_loglevel_node(NdbMgmHandle handle,
|
||||
int nodeId,
|
||||
/*enum ndb_mgm_event_category category*/
|
||||
char * category,
|
||||
enum ndb_mgm_event_category category,
|
||||
int level,
|
||||
struct ndb_mgm_reply* reply);
|
||||
|
||||
|
@ -669,6 +668,15 @@ extern "C" {
|
|||
int ndb_mgm_exit_single_user(NdbMgmHandle handle,
|
||||
struct ndb_mgm_reply* reply);
|
||||
|
||||
/**
|
||||
* Listen event
|
||||
*
|
||||
* @param filter pairs of { level, category } that will be
|
||||
* pushed to fd, level=0 ends lists
|
||||
* @return fd which events will be pushed to
|
||||
*/
|
||||
int ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]);
|
||||
|
||||
/**
|
||||
* Get configuration
|
||||
* @param handle NDB management handle.
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef MGMAPI_CONFIG_PARAMTERS_H
|
||||
#define MGMAPI_CONFIG_PARAMTERS_H
|
||||
|
||||
|
||||
#define CFG_SYS_NAME 3
|
||||
#define CFG_SYS_PRIMARY_MGM_NODE 1
|
||||
#define CFG_SYS_CONFIG_GENERATION 2
|
||||
|
@ -64,16 +63,6 @@
|
|||
#define CFG_DB_BACKUP_LOG_BUFFER_MEM 135
|
||||
#define CFG_DB_BACKUP_WRITE_SIZE 136
|
||||
|
||||
#define CFG_LOGLEVEL_STARTUP 137
|
||||
#define CFG_LOGLEVEL_SHUTDOWN 138
|
||||
#define CFG_LOGLEVEL_STATISTICS 139
|
||||
#define CFG_LOGLEVEL_CHECKPOINT 140
|
||||
#define CFG_LOGLEVEL_NODERESTART 141
|
||||
#define CFG_LOGLEVEL_CONNECTION 142
|
||||
#define CFG_LOGLEVEL_INFO 143
|
||||
#define CFG_LOGLEVEL_WARNING 144
|
||||
#define CFG_LOGLEVEL_ERROR 145
|
||||
#define CFG_LOGLEVEL_GREP 146
|
||||
#define CFG_LOG_DESTINATION 147
|
||||
|
||||
#define CFG_DB_DISCLESS 148
|
||||
|
@ -95,6 +84,21 @@
|
|||
#define CFG_NODE_ARBIT_RANK 200
|
||||
#define CFG_NODE_ARBIT_DELAY 201
|
||||
|
||||
#define CFG_MIN_LOGLEVEL 250
|
||||
#define CFG_LOGLEVEL_STARTUP 250
|
||||
#define CFG_LOGLEVEL_SHUTDOWN 251
|
||||
#define CFG_LOGLEVEL_STATISTICS 252
|
||||
#define CFG_LOGLEVEL_CHECKPOINT 253
|
||||
#define CFG_LOGLEVEL_NODERESTART 254
|
||||
#define CFG_LOGLEVEL_CONNECTION 255
|
||||
#define CFG_LOGLEVEL_INFO 256
|
||||
#define CFG_LOGLEVEL_WARNING 257
|
||||
#define CFG_LOGLEVEL_ERROR 258
|
||||
#define CFG_LOGLEVEL_GREP 259
|
||||
#define CFG_LOGLEVEL_DEBUG 260
|
||||
#define CFG_LOGLEVEL_BACKUP 261
|
||||
#define CFG_MAX_LOGLEVEL 261
|
||||
|
||||
#define CFG_MGM_PORT 300
|
||||
|
||||
#define CFG_CONNECTION_NODE_1 400
|
||||
|
@ -104,9 +108,9 @@
|
|||
#define CFG_CONNECTION_NODE_1_SYSTEM 404
|
||||
#define CFG_CONNECTION_NODE_2_SYSTEM 405
|
||||
#define CFG_CONNECTION_SERVER_PORT 406
|
||||
#define CFG_CONNECTION_HOSTNAME_1 407
|
||||
#define CFG_CONNECTION_HOSTNAME_2 408
|
||||
|
||||
#define CFG_TCP_HOSTNAME_1 450
|
||||
#define CFG_TCP_HOSTNAME_2 451
|
||||
#define CFG_TCP_SERVER 452
|
||||
#define CFG_TCP_SEND_BUFFER_SIZE 454
|
||||
#define CFG_TCP_RECEIVE_BUFFER_SIZE 455
|
||||
|
@ -128,8 +132,6 @@
|
|||
#define CFG_SCI_NODE2_ADAPTER0 555
|
||||
#define CFG_SCI_NODE2_ADAPTER1 556
|
||||
|
||||
#define CFG_OSE_HOSTNAME_1 600
|
||||
#define CFG_OSE_HOSTNAME_2 601
|
||||
#define CFG_OSE_PRIO_A_SIZE 602
|
||||
#define CFG_OSE_PRIO_B_SIZE 603
|
||||
#define CFG_OSE_RECEIVE_ARRAY_SIZE 604
|
||||
|
|
|
@ -76,6 +76,9 @@ extern "C" {
|
|||
|
||||
#include <assert.h>
|
||||
|
||||
/* call in main() - does not return on error */
|
||||
extern int ndb_init(void);
|
||||
|
||||
#ifndef HAVE_STRDUP
|
||||
extern char * strdup(const char *s);
|
||||
#endif
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include <ndb_types.h>
|
||||
|
||||
class Ndb;
|
||||
struct charset_info_st;
|
||||
typedef struct charset_info_st CHARSET_INFO;
|
||||
|
||||
/**
|
||||
* @class NdbDictionary
|
||||
|
@ -257,6 +259,10 @@ public:
|
|||
/**
|
||||
* Set type of column
|
||||
* @param type Type of column
|
||||
*
|
||||
* @note setType resets <em>all</em> column attributes
|
||||
* to (type dependent) defaults and should be the first
|
||||
* method to call. Default type is Unsigned.
|
||||
*/
|
||||
void setType(Type type);
|
||||
|
||||
|
@ -301,28 +307,36 @@ public:
|
|||
*/
|
||||
int getLength() const;
|
||||
|
||||
/**
|
||||
* For Char or Varchar or Text, set or get MySQL CHARSET_INFO. This
|
||||
* specifies both character set and collation. See get_charset()
|
||||
* etc in MySQL. (The cs is not "const" in MySQL).
|
||||
*/
|
||||
void setCharset(CHARSET_INFO* cs);
|
||||
CHARSET_INFO* getCharset() const;
|
||||
|
||||
/**
|
||||
* For blob, set or get "inline size" i.e. number of initial bytes
|
||||
* to store in table's blob attribute. This part is normally in
|
||||
* main memory and can be indexed and interpreted.
|
||||
*/
|
||||
void setInlineSize(int size) { setPrecision(size); }
|
||||
int getInlineSize() const { return getPrecision(); }
|
||||
void setInlineSize(int size);
|
||||
int getInlineSize() const;
|
||||
|
||||
/**
|
||||
* For blob, set or get "part size" i.e. number of bytes to store in
|
||||
* each tuple of the "blob table". Can be set to zero to omit parts
|
||||
* and to allow only inline bytes ("tinyblob").
|
||||
*/
|
||||
void setPartSize(int size) { setScale(size); }
|
||||
int getPartSize() const { return getScale(); }
|
||||
void setPartSize(int size);
|
||||
int getPartSize() const;
|
||||
|
||||
/**
|
||||
* For blob, set or get "stripe size" i.e. number of consecutive
|
||||
* <em>parts</em> to store in each node group.
|
||||
*/
|
||||
void setStripeSize(int size) { setLength(size); }
|
||||
int getStripeSize() const { return getLength(); }
|
||||
void setStripeSize(int size);
|
||||
int getStripeSize() const;
|
||||
|
||||
/**
|
||||
* Get size of element
|
||||
|
|
|
@ -218,15 +218,18 @@ public:
|
|||
void printState();
|
||||
#endif
|
||||
|
||||
unsigned short m_service_port;
|
||||
|
||||
class Transporter_interface {
|
||||
public:
|
||||
unsigned short m_service_port;
|
||||
const char *m_interface;
|
||||
};
|
||||
Vector<Transporter_interface> m_transporter_interface;
|
||||
void add_transporter_interface(const char *interface, unsigned short port);
|
||||
protected:
|
||||
|
||||
private:
|
||||
void * callbackObj;
|
||||
|
||||
TransporterService *m_transporter_service;
|
||||
char *m_interface_name;
|
||||
struct NdbThread *m_start_clients_thread;
|
||||
bool m_run_start_clients_thread;
|
||||
|
||||
|
|
|
@ -40,11 +40,14 @@ public:
|
|||
* Compare kernel attribute values. Returns -1, 0, +1 for less,
|
||||
* equal, greater, respectively. Parameters are pointers to values,
|
||||
* full attribute size in words, and size of available data in words.
|
||||
* There is also pointer to type specific extra info. Char types
|
||||
* receive CHARSET_INFO in it.
|
||||
*
|
||||
* If available size is less than full size, CmpUnknown may be
|
||||
* returned. If a value cannot be parsed, it compares like NULL i.e.
|
||||
* less than any valid value.
|
||||
*/
|
||||
typedef int Cmp(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size);
|
||||
typedef int Cmp(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size);
|
||||
|
||||
enum CmpResult {
|
||||
CmpLess = -1,
|
||||
|
@ -55,6 +58,7 @@ public:
|
|||
|
||||
/**
|
||||
* Kernel data types. Must match m_typeList in NdbSqlUtil.cpp.
|
||||
* Now also must match types in NdbDictionary.
|
||||
*/
|
||||
struct Type {
|
||||
enum Enum {
|
||||
|
@ -90,6 +94,18 @@ public:
|
|||
*/
|
||||
static const Type& getType(Uint32 typeId);
|
||||
|
||||
/**
|
||||
* Get type by id but replace char type by corresponding binary type.
|
||||
*/
|
||||
static const Type& getTypeBinary(Uint32 typeId);
|
||||
|
||||
/**
|
||||
* Check character set.
|
||||
*/
|
||||
static bool usable_in_pk(Uint32 typeId, const void* cs);
|
||||
static bool usable_in_hash_index(Uint32 typeId, const void* cs);
|
||||
static bool usable_in_ordered_index(Uint32 typeId, const void* cs);
|
||||
|
||||
private:
|
||||
/**
|
||||
* List of all types. Must match Type::Enum.
|
||||
|
|
|
@ -76,7 +76,7 @@ public:
|
|||
* then close the socket
|
||||
* Returns true if succeding in binding
|
||||
*/
|
||||
bool tryBind(unsigned short port, const char * intface = 0) const;
|
||||
static bool tryBind(unsigned short port, const char * intface = 0);
|
||||
|
||||
/**
|
||||
* Setup socket
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,7 +2,7 @@ SUBDIRS = signaldata
|
|||
|
||||
noinst_LTLIBRARIES = libtrace.la
|
||||
|
||||
libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp LogLevel.cpp EventLogger.cpp GrepError.cpp
|
||||
libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp
|
||||
|
||||
include $(top_srcdir)/ndb/config/common.mk.am
|
||||
include $(top_srcdir)/ndb/config/type_kernel.mk.am
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <ndb_version.h>
|
||||
|
||||
#include <ConfigRetriever.hpp>
|
||||
#include <SocketServer.hpp>
|
||||
|
||||
#include "LocalConfig.hpp"
|
||||
#include <NdbSleep.h>
|
||||
|
@ -272,43 +273,15 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
|
|||
NdbConfig_SetPath(datadir);
|
||||
}
|
||||
|
||||
char localhost[MAXHOSTNAMELEN];
|
||||
if(NdbHost_GetHostName(localhost) != 0){
|
||||
snprintf(buf, 255, "Unable to get own hostname");
|
||||
if (hostname && hostname[0] != 0 &&
|
||||
!SocketServer::tryBind(0,hostname)) {
|
||||
snprintf(buf, 255, "Config hostname(%s) don't match a local interface,"
|
||||
" tried to bind, error = %d - %s",
|
||||
hostname, errno, strerror(errno));
|
||||
setError(CR_ERROR, buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
do {
|
||||
if(strlen(hostname) == 0)
|
||||
break;
|
||||
|
||||
if(strcasecmp(hostname, localhost) == 0)
|
||||
break;
|
||||
|
||||
if(strcasecmp(hostname, "localhost") == 0)
|
||||
break;
|
||||
|
||||
struct in_addr local, config;
|
||||
bool b1 = false, b2 = false, b3 = false;
|
||||
b1 = Ndb_getInAddr(&local, localhost) == 0;
|
||||
b2 = Ndb_getInAddr(&config, hostname) == 0;
|
||||
b3 = memcmp(&local, &config, sizeof(local)) == 0;
|
||||
|
||||
if(b1 && b2 && b3)
|
||||
break;
|
||||
|
||||
b1 = Ndb_getInAddr(&local, "localhost") == 0;
|
||||
b3 = memcmp(&local, &config, sizeof(local)) == 0;
|
||||
if(b1 && b2 && b3)
|
||||
break;
|
||||
|
||||
snprintf(buf, 255, "Local hostname(%s) and config hostname(%s) dont match",
|
||||
localhost, hostname);
|
||||
setError(CR_ERROR, buf);
|
||||
return false;
|
||||
} while(false);
|
||||
|
||||
unsigned int _type;
|
||||
if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){
|
||||
snprintf(buf, 255, "Unable to get type of node(%d) from config",
|
||||
|
@ -344,7 +317,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
|
|||
const char * name;
|
||||
struct in_addr addr;
|
||||
BaseString tmp;
|
||||
if(!iter.get(CFG_TCP_HOSTNAME_1, &name) && strlen(name)){
|
||||
if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){
|
||||
if(Ndb_getInAddr(&addr, name) != 0){
|
||||
tmp.assfmt("Unable to lookup/illegal hostname %s, "
|
||||
"connection from node %d to node %d",
|
||||
|
@ -354,7 +327,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
|
|||
}
|
||||
}
|
||||
|
||||
if(!iter.get(CFG_TCP_HOSTNAME_2, &name) && strlen(name)){
|
||||
if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){
|
||||
if(Ndb_getInAddr(&addr, name) != 0){
|
||||
tmp.assfmt("Unable to lookup/illegal hostname %s, "
|
||||
"connection from node %d to node %d",
|
||||
|
|
|
@ -345,18 +345,27 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
const class ndb_mgm_configuration & config,
|
||||
class TransporterRegistry & tr){
|
||||
|
||||
Uint32 noOfTransportersCreated= 0, server_port= 0;
|
||||
Uint32 noOfTransportersCreated= 0;
|
||||
ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION);
|
||||
|
||||
for(iter.first(); iter.valid(); iter.next()){
|
||||
|
||||
Uint32 nodeId1, nodeId2, remoteNodeId;
|
||||
const char * remoteHostName= 0, * localHostName= 0;
|
||||
if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue;
|
||||
if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue;
|
||||
|
||||
if(nodeId1 != nodeId && nodeId2 != nodeId) continue;
|
||||
remoteNodeId = (nodeId == nodeId1 ? nodeId2 : nodeId1);
|
||||
|
||||
{
|
||||
const char * host1= 0, * host2= 0;
|
||||
iter.get(CFG_CONNECTION_HOSTNAME_1, &host1);
|
||||
iter.get(CFG_CONNECTION_HOSTNAME_2, &host2);
|
||||
localHostName = (nodeId == nodeId1 ? host1 : host2);
|
||||
remoteHostName = (nodeId == nodeId1 ? host2 : host1);
|
||||
}
|
||||
|
||||
Uint32 sendSignalId = 1;
|
||||
Uint32 checksum = 1;
|
||||
if(iter.get(CFG_CONNECTION_SEND_SIGNAL_ID, &sendSignalId)) continue;
|
||||
|
@ -365,14 +374,10 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
Uint32 type = ~0;
|
||||
if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue;
|
||||
|
||||
Uint32 tmp_server_port= 0;
|
||||
if(iter.get(CFG_CONNECTION_SERVER_PORT, &tmp_server_port)) break;
|
||||
Uint32 server_port= 0;
|
||||
if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break;
|
||||
if (nodeId <= nodeId1 && nodeId <= nodeId2) {
|
||||
if (server_port && server_port != tmp_server_port) {
|
||||
ndbout << "internal error in config setup of server ports line= " << __LINE__ << endl;
|
||||
exit(-1);
|
||||
}
|
||||
server_port= tmp_server_port;
|
||||
tr.add_transporter_interface(localHostName, server_port);
|
||||
}
|
||||
|
||||
switch(type){
|
||||
|
@ -388,7 +393,7 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break;
|
||||
if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break;
|
||||
|
||||
conf.port= tmp_server_port;
|
||||
conf.port= server_port;
|
||||
|
||||
if(!tr.createTransporter(&conf)){
|
||||
ndbout << "Failed to create SHM Transporter from: "
|
||||
|
@ -437,14 +442,10 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
case CONNECTION_TYPE_TCP:{
|
||||
TCP_TransporterConfiguration conf;
|
||||
|
||||
const char * host1, * host2;
|
||||
if(iter.get(CFG_TCP_HOSTNAME_1, &host1)) break;
|
||||
if(iter.get(CFG_TCP_HOSTNAME_2, &host2)) break;
|
||||
|
||||
if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break;
|
||||
if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break;
|
||||
|
||||
conf.port= tmp_server_port;
|
||||
conf.port= server_port;
|
||||
const char * proxy;
|
||||
if (!iter.get(CFG_TCP_PROXY, &proxy)) {
|
||||
if (strlen(proxy) > 0 && nodeId2 == nodeId) {
|
||||
|
@ -455,8 +456,8 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
|
||||
conf.localNodeId = nodeId;
|
||||
conf.remoteNodeId = remoteNodeId;
|
||||
conf.localHostName = (nodeId == nodeId1 ? host1 : host2);
|
||||
conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1);
|
||||
conf.localHostName = localHostName;
|
||||
conf.remoteHostName = remoteHostName;
|
||||
conf.byteOrder = 0;
|
||||
conf.compression = 0;
|
||||
conf.checksum = checksum;
|
||||
|
@ -470,19 +471,15 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
}
|
||||
case CONNECTION_TYPE_OSE:{
|
||||
OSE_TransporterConfiguration conf;
|
||||
|
||||
const char * host1, * host2;
|
||||
if(iter.get(CFG_OSE_HOSTNAME_1, &host1)) break;
|
||||
if(iter.get(CFG_OSE_HOSTNAME_2, &host2)) break;
|
||||
|
||||
|
||||
if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break;
|
||||
if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break;
|
||||
if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break;
|
||||
|
||||
conf.localNodeId = nodeId;
|
||||
conf.remoteNodeId = remoteNodeId;
|
||||
conf.localHostName = (nodeId == nodeId1 ? host1 : host2);
|
||||
conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1);
|
||||
conf.localHostName = localHostName;
|
||||
conf.remoteHostName = remoteHostName;
|
||||
conf.byteOrder = 0;
|
||||
conf.compression = 0;
|
||||
conf.checksum = checksum;
|
||||
|
@ -502,9 +499,6 @@ IPCConfig::configureTransporters(Uint32 nodeId,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
tr.m_service_port= server_port;
|
||||
|
||||
return noOfTransportersCreated;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
|
||||
#include <ndb_global.h>
|
||||
#include <NdbMutex.h>
|
||||
#include <NdbTCP.h>
|
||||
|
||||
|
@ -27,13 +28,14 @@ static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER;
|
|||
extern "C"
|
||||
int
|
||||
Ndb_getInAddr(struct in_addr * dst, const char *address) {
|
||||
DBUG_ENTER("Ndb_getInAddr");
|
||||
struct hostent * hostPtr;
|
||||
NdbMutex_Lock(&LOCK_gethostbyname);
|
||||
hostPtr = gethostbyname(address);
|
||||
if (hostPtr != NULL) {
|
||||
dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr;
|
||||
NdbMutex_Unlock(&LOCK_gethostbyname);
|
||||
return 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
NdbMutex_Unlock(&LOCK_gethostbyname);
|
||||
|
||||
|
@ -47,9 +49,11 @@ Ndb_getInAddr(struct in_addr * dst, const char *address) {
|
|||
#endif
|
||||
)
|
||||
{
|
||||
return 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
return -1;
|
||||
DBUG_PRINT("error",("inet_addr(%s) - %d - %s",
|
||||
address, errno, strerror(errno)));
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -98,9 +98,8 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
|
|||
|
||||
TransporterRegistry::TransporterRegistry(void * callback,
|
||||
unsigned _maxTransporters,
|
||||
unsigned sizeOfLongSignalMemory) {
|
||||
|
||||
m_transporter_service= 0;
|
||||
unsigned sizeOfLongSignalMemory)
|
||||
{
|
||||
nodeIdSpecified = false;
|
||||
maxTransporters = _maxTransporters;
|
||||
sendCounter = 1;
|
||||
|
@ -150,7 +149,6 @@ TransporterRegistry::~TransporterRegistry() {
|
|||
delete[] theTransporters;
|
||||
delete[] performStates;
|
||||
delete[] ioStates;
|
||||
|
||||
#ifdef NDB_OSE_TRANSPORTER
|
||||
if(theOSEReceiver != NULL){
|
||||
theOSEReceiver->destroyPhantom();
|
||||
|
@ -1159,55 +1157,67 @@ TransporterRegistry::stop_clients()
|
|||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
TransporterRegistry::add_transporter_interface(const char *interface, unsigned short port)
|
||||
{
|
||||
DBUG_ENTER("TransporterRegistry::add_transporter_interface");
|
||||
DBUG_PRINT("enter",("interface=%s, port= %d", interface, port));
|
||||
if (interface && strlen(interface) == 0)
|
||||
interface= 0;
|
||||
|
||||
for (unsigned i= 0; i < m_transporter_interface.size(); i++)
|
||||
{
|
||||
Transporter_interface &tmp= m_transporter_interface[i];
|
||||
if (port != tmp.m_service_port)
|
||||
continue;
|
||||
if (interface != 0 && tmp.m_interface != 0 &&
|
||||
strcmp(interface, tmp.m_interface) == 0)
|
||||
{
|
||||
DBUG_VOID_RETURN; // found match, no need to insert
|
||||
}
|
||||
if (interface == 0 && tmp.m_interface == 0)
|
||||
{
|
||||
DBUG_VOID_RETURN; // found match, no need to insert
|
||||
}
|
||||
}
|
||||
Transporter_interface t;
|
||||
t.m_service_port= port;
|
||||
t.m_interface= interface;
|
||||
m_transporter_interface.push_back(t);
|
||||
DBUG_PRINT("exit",("interface and port added"));
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
bool
|
||||
TransporterRegistry::start_service(SocketServer& socket_server)
|
||||
{
|
||||
#if 0
|
||||
for (int i= 0, n= 0; n < nTransporters; i++){
|
||||
Transporter * t = theTransporters[i];
|
||||
if (!t)
|
||||
continue;
|
||||
n++;
|
||||
if (t->isServer) {
|
||||
t->m_service = new TransporterService(new SocketAuthSimple("ndbd passwd"));
|
||||
if(!socket_server.setup(t->m_service, t->m_r_port, 0))
|
||||
{
|
||||
ndbout_c("Unable to setup transporter service port: %d!\n"
|
||||
"Please check if the port is already used,\n"
|
||||
"(perhaps a mgmt server is already running)",
|
||||
m_service_port);
|
||||
delete t->m_service;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (m_transporter_interface.size() > 0 && nodeIdSpecified != true)
|
||||
{
|
||||
ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (m_service_port != 0) {
|
||||
|
||||
m_transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
|
||||
|
||||
if (nodeIdSpecified != true) {
|
||||
ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
|
||||
for (unsigned i= 0; i < m_transporter_interface.size(); i++)
|
||||
{
|
||||
Transporter_interface &t= m_transporter_interface[i];
|
||||
if (t.m_service_port == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
TransporterService *transporter_service =
|
||||
new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
|
||||
if(!socket_server.setup(transporter_service,
|
||||
t.m_service_port, t.m_interface))
|
||||
{
|
||||
ndbout_c("Unable to setup transporter service port: %s:%d!\n"
|
||||
"Please check if the port is already used,\n"
|
||||
"(perhaps the node is already running)",
|
||||
t.m_interface ? t.m_interface : "*", t.m_service_port);
|
||||
delete transporter_service;
|
||||
return false;
|
||||
}
|
||||
|
||||
//m_interface_name = "ndbd";
|
||||
m_interface_name = 0;
|
||||
|
||||
if(!socket_server.setup(m_transporter_service, m_service_port, m_interface_name))
|
||||
{
|
||||
ndbout_c("Unable to setup transporter service port: %d!\n"
|
||||
"Please check if the port is already used,\n"
|
||||
"(perhaps a mgmt server is already running)",
|
||||
m_service_port);
|
||||
delete m_transporter_service;
|
||||
return false;
|
||||
}
|
||||
m_transporter_service->setTransporterRegistry(this);
|
||||
} else
|
||||
m_transporter_service= 0;
|
||||
|
||||
transporter_service->setTransporterRegistry(this);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1281,3 +1291,5 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){
|
|||
out << "trace: " << (int)sh.theTrace << endl;
|
||||
return out;
|
||||
}
|
||||
|
||||
template class Vector<TransporterRegistry::Transporter_interface>;
|
||||
|
|
|
@ -9,7 +9,7 @@ libgeneral_la_SOURCES = \
|
|||
NdbSqlUtil.cpp new.cpp \
|
||||
uucode.c random.c getarg.c version.c \
|
||||
strdup.c strlcat.c strlcpy.c \
|
||||
ConfigValues.cpp
|
||||
ConfigValues.cpp ndb_init.c
|
||||
|
||||
include $(top_srcdir)/ndb/config/common.mk.am
|
||||
include $(top_srcdir)/ndb/config/type_util.mk.am
|
||||
|
|
|
@ -176,10 +176,29 @@ NdbSqlUtil::getType(Uint32 typeId)
|
|||
return m_typeList[Type::Undefined];
|
||||
}
|
||||
|
||||
const NdbSqlUtil::Type&
|
||||
NdbSqlUtil::getTypeBinary(Uint32 typeId)
|
||||
{
|
||||
switch (typeId) {
|
||||
case Type::Char:
|
||||
typeId = Type::Binary;
|
||||
break;
|
||||
case Type::Varchar:
|
||||
typeId = Type::Varbinary;
|
||||
break;
|
||||
case Type::Text:
|
||||
typeId = Type::Blob;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return getType(typeId);
|
||||
}
|
||||
|
||||
// compare
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpTinyint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; Int8 v; } u1, u2;
|
||||
|
@ -193,7 +212,7 @@ NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpTinyunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; Uint8 v; } u1, u2;
|
||||
|
@ -207,7 +226,7 @@ NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uin
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpSmallint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; Int16 v; } u1, u2;
|
||||
|
@ -221,7 +240,7 @@ NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpSmallunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; Uint16 v; } u1, u2;
|
||||
|
@ -235,7 +254,7 @@ NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Ui
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpMediumint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { const Uint32* p; const unsigned char* v; } u1, u2;
|
||||
|
@ -251,7 +270,7 @@ NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpMediumunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { const Uint32* p; const unsigned char* v; } u1, u2;
|
||||
|
@ -267,7 +286,7 @@ NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, U
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpInt(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; Int32 v; } u1, u2;
|
||||
|
@ -281,7 +300,7 @@ NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpUnsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; Uint32 v; } u1, u2;
|
||||
|
@ -295,7 +314,7 @@ NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpBigint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
if (size >= 2) {
|
||||
|
@ -314,7 +333,7 @@ NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpBigunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
if (size >= 2) {
|
||||
|
@ -333,7 +352,7 @@ NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpFloat(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
union { Uint32 p[1]; float v; } u1, u2;
|
||||
|
@ -348,7 +367,7 @@ NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 siz
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpDouble(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
if (size >= 2) {
|
||||
|
@ -368,7 +387,7 @@ NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpDecimal(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
// not used by MySQL or NDB
|
||||
|
@ -377,27 +396,34 @@ NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpChar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpChar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
// collation does not work on prefix for some charsets
|
||||
assert(full == size && size > 0);
|
||||
/*
|
||||
* Char is blank-padded to length and null-padded to word size. There
|
||||
* is no terminator so we compare the full values.
|
||||
* Char is blank-padded to length and null-padded to word size.
|
||||
*/
|
||||
union { const Uint32* p; const char* v; } u1, u2;
|
||||
union { const Uint32* p; const uchar* v; } u1, u2;
|
||||
u1.p = p1;
|
||||
u2.p = p2;
|
||||
int k = memcmp(u1.v, u2.v, size << 2);
|
||||
return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
|
||||
// not const in MySQL
|
||||
CHARSET_INFO* cs = (CHARSET_INFO*)(info);
|
||||
// length in bytes including null padding to Uint32
|
||||
uint l1 = (full << 2);
|
||||
int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
|
||||
return k < 0 ? -1 : k > 0 ? +1 : 0;
|
||||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpVarchar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
/*
|
||||
* Varchar is not allowed to contain a null byte and the value is
|
||||
* null-padded. Therefore comparison does not need to use the length.
|
||||
*
|
||||
* Not used before MySQL 5.0. Format is likely to change. Handle
|
||||
* only binary collation for now.
|
||||
*/
|
||||
union { const Uint32* p; const char* v; } u1, u2;
|
||||
u1.p = p1;
|
||||
|
@ -408,7 +434,7 @@ NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpBinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
/*
|
||||
|
@ -422,12 +448,14 @@ NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpVarbinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
/*
|
||||
* Binary data of variable length padded with nulls. The comparison
|
||||
* does not need to use the length.
|
||||
*
|
||||
* Not used before MySQL 5.0. Format is likely to change.
|
||||
*/
|
||||
union { const Uint32* p; const unsigned char* v; } u1, u2;
|
||||
u1.p = p1;
|
||||
|
@ -438,11 +466,13 @@ NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpDatetime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
/*
|
||||
* Datetime is CC YY MM DD hh mm ss \0
|
||||
*
|
||||
* Not used via MySQL.
|
||||
*/
|
||||
union { const Uint32* p; const unsigned char* v; } u1, u2;
|
||||
u1.p = p1;
|
||||
|
@ -459,11 +489,13 @@ NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpTimespec(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
/*
|
||||
* Timespec is CC YY MM DD hh mm ss \0 NN NN NN NN
|
||||
*
|
||||
* Not used via MySQL.
|
||||
*/
|
||||
union { const Uint32* p; const unsigned char* v; } u1, u2;
|
||||
u1.p = p1;
|
||||
|
@ -490,12 +522,11 @@ NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpBlob(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
/*
|
||||
* Blob comparison is on the inline bytes. Except for larger header
|
||||
* the format is like Varbinary.
|
||||
* Blob comparison is on the inline bytes (null padded).
|
||||
*/
|
||||
const unsigned head = NDB_BLOB_HEAD_SIZE;
|
||||
// skip blob head
|
||||
|
@ -510,25 +541,107 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size
|
|||
}
|
||||
|
||||
int
|
||||
NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
NdbSqlUtil::cmpText(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
|
||||
{
|
||||
assert(full >= size && size > 0);
|
||||
// collation does not work on prefix for some charsets
|
||||
assert(full == size && size > 0);
|
||||
/*
|
||||
* Text comparison is on the inline bytes. Except for larger header
|
||||
* the format is like Varchar.
|
||||
* Text comparison is on the inline bytes (blank padded). Currently
|
||||
* not supported for multi-byte charsets.
|
||||
*/
|
||||
const unsigned head = NDB_BLOB_HEAD_SIZE;
|
||||
// skip blob head
|
||||
if (size >= head + 1) {
|
||||
union { const Uint32* p; const char* v; } u1, u2;
|
||||
union { const Uint32* p; const uchar* v; } u1, u2;
|
||||
u1.p = p1 + head;
|
||||
u2.p = p2 + head;
|
||||
int k = memcmp(u1.v, u2.v, (size - head) << 2);
|
||||
return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
|
||||
// not const in MySQL
|
||||
CHARSET_INFO* cs = (CHARSET_INFO*)(info);
|
||||
// length in bytes including null padding to Uint32
|
||||
uint l1 = (full << 2);
|
||||
int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
|
||||
return k < 0 ? -1 : k > 0 ? +1 : 0;
|
||||
}
|
||||
return CmpUnknown;
|
||||
}
|
||||
|
||||
// check charset
|
||||
|
||||
bool
|
||||
NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
|
||||
{
|
||||
const Type& type = getType(typeId);
|
||||
switch (type.m_typeId) {
|
||||
case Type::Undefined:
|
||||
break;
|
||||
case Type::Char:
|
||||
{
|
||||
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
|
||||
return
|
||||
cs != 0 &&
|
||||
cs->cset != 0 &&
|
||||
cs->coll != 0 &&
|
||||
cs->coll->strnxfrm != 0 &&
|
||||
cs->strxfrm_multiply == 1; // current limitation
|
||||
}
|
||||
break;
|
||||
case Type::Varchar:
|
||||
return true; // Varchar not used via MySQL
|
||||
case Type::Blob:
|
||||
case Type::Text:
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
NdbSqlUtil::usable_in_hash_index(Uint32 typeId, const void* info)
|
||||
{
|
||||
return usable_in_pk(typeId, info);
|
||||
}
|
||||
|
||||
bool
|
||||
NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
|
||||
{
|
||||
const Type& type = getType(typeId);
|
||||
switch (type.m_typeId) {
|
||||
case Type::Undefined:
|
||||
break;
|
||||
case Type::Char:
|
||||
{
|
||||
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
|
||||
return
|
||||
cs != 0 &&
|
||||
cs->cset != 0 &&
|
||||
cs->coll != 0 &&
|
||||
cs->coll->strnxfrm != 0 &&
|
||||
cs->coll->strnncollsp != 0 &&
|
||||
cs->strxfrm_multiply == 1; // current limitation
|
||||
}
|
||||
break;
|
||||
case Type::Varchar:
|
||||
return true; // Varchar not used via MySQL
|
||||
case Type::Text:
|
||||
{
|
||||
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
|
||||
return
|
||||
cs != 0 &&
|
||||
cs->mbmaxlen == 1 && // extra limitation
|
||||
cs->cset != 0 &&
|
||||
cs->coll != 0 &&
|
||||
cs->coll->strnxfrm != 0 &&
|
||||
cs->coll->strnncollsp != 0 &&
|
||||
cs->strxfrm_multiply == 1; // current limitation
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef NDB_SQL_UTIL_TEST
|
||||
|
||||
#include <NdbTick.h>
|
||||
|
@ -556,6 +669,7 @@ const Testcase testcase[] = {
|
|||
int
|
||||
main(int argc, char** argv)
|
||||
{
|
||||
ndb_init(); // for charsets
|
||||
unsigned count = argc > 1 ? atoi(argv[1]) : 1000000;
|
||||
ndbout_c("count = %u", count);
|
||||
assert(count != 0);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
|
||||
#include <ndb_global.h>
|
||||
#include <my_pthread.h>
|
||||
|
||||
#include <SocketServer.hpp>
|
||||
|
||||
|
@ -46,7 +47,7 @@ SocketServer::~SocketServer() {
|
|||
}
|
||||
|
||||
bool
|
||||
SocketServer::tryBind(unsigned short port, const char * intface) const {
|
||||
SocketServer::tryBind(unsigned short port, const char * intface) {
|
||||
struct sockaddr_in servaddr;
|
||||
memset(&servaddr, 0, sizeof(servaddr));
|
||||
servaddr.sin_family = AF_INET;
|
||||
|
@ -83,7 +84,8 @@ bool
|
|||
SocketServer::setup(SocketServer::Service * service,
|
||||
unsigned short port,
|
||||
const char * intface){
|
||||
|
||||
DBUG_ENTER("SocketServer::setup");
|
||||
DBUG_PRINT("enter",("interface=%s, port=%d", intface, port));
|
||||
struct sockaddr_in servaddr;
|
||||
memset(&servaddr, 0, sizeof(servaddr));
|
||||
servaddr.sin_family = AF_INET;
|
||||
|
@ -92,36 +94,44 @@ SocketServer::setup(SocketServer::Service * service,
|
|||
|
||||
if(intface != 0){
|
||||
if(Ndb_getInAddr(&servaddr.sin_addr, intface))
|
||||
return false;
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
const NDB_SOCKET_TYPE sock = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (sock == NDB_INVALID_SOCKET) {
|
||||
return false;
|
||||
DBUG_PRINT("error",("socket() - %d - %s",
|
||||
errno, strerror(errno)));
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
const int on = 1;
|
||||
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
(const char*)&on, sizeof(on)) == -1) {
|
||||
DBUG_PRINT("error",("getsockopt() - %d - %s",
|
||||
errno, strerror(errno)));
|
||||
NDB_CLOSE_SOCKET(sock);
|
||||
return false;
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
if (bind(sock, (struct sockaddr*) &servaddr, sizeof(servaddr)) == -1) {
|
||||
DBUG_PRINT("error",("bind() - %d - %s",
|
||||
errno, strerror(errno)));
|
||||
NDB_CLOSE_SOCKET(sock);
|
||||
return false;
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
if (listen(sock, m_maxSessions) == -1){
|
||||
DBUG_PRINT("error",("listen() - %d - %s",
|
||||
errno, strerror(errno)));
|
||||
NDB_CLOSE_SOCKET(sock);
|
||||
return false;
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
ServiceInstance i;
|
||||
i.m_socket = sock;
|
||||
i.m_service = service;
|
||||
m_services.push_back(i);
|
||||
return true;
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -177,8 +187,9 @@ void*
|
|||
socketServerThread_C(void* _ss){
|
||||
SocketServer * ss = (SocketServer *)_ss;
|
||||
|
||||
my_thread_init();
|
||||
ss->doRun();
|
||||
|
||||
my_thread_end();
|
||||
NdbThread_Exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -287,8 +298,10 @@ void*
|
|||
sessionThread_C(void* _sc){
|
||||
SocketServer::Session * si = (SocketServer::Session *)_sc;
|
||||
|
||||
my_thread_init();
|
||||
if(!transfer(si->m_socket)){
|
||||
si->m_stopped = true;
|
||||
my_thread_end();
|
||||
NdbThread_Exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -301,6 +314,7 @@ sessionThread_C(void* _sc){
|
|||
}
|
||||
|
||||
si->m_stopped = true;
|
||||
my_thread_end();
|
||||
NdbThread_Exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,17 +14,16 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <LogLevel.hpp>
|
||||
#include <ndb_global.h>
|
||||
#include <my_sys.h>
|
||||
|
||||
const LogLevel::LogLevelCategoryName LogLevel::LOGLEVEL_CATEGORY_NAME[] = {
|
||||
{ "LogLevelStartup" },
|
||||
{ "LogLevelShutdown" },
|
||||
{ "LogLevelStatistic" },
|
||||
{ "LogLevelCheckpoint" },
|
||||
{ "LogLevelNodeRestart" },
|
||||
{ "LogLevelConnection" },
|
||||
{ "LogLevelError" },
|
||||
{ "LogLevelWarning" },
|
||||
{ "LogLevelInfo" },
|
||||
{ "LogLevelGrep" }
|
||||
};
|
||||
int
|
||||
ndb_init()
|
||||
{
|
||||
if (my_init()) {
|
||||
const char* err = "my_init() failed - exit\n";
|
||||
write(2, err, strlen(err));
|
||||
exit(1);
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -47,7 +47,7 @@
|
|||
ParserRow<CPCDAPISession>::IgnoreMinMax, \
|
||||
0, 0, \
|
||||
fun, \
|
||||
desc }
|
||||
desc, 0 }
|
||||
|
||||
#define CPCD_ARG(name, type, opt, desc) \
|
||||
{ name, \
|
||||
|
@ -58,7 +58,7 @@
|
|||
ParserRow<CPCDAPISession>::IgnoreMinMax, \
|
||||
0, 0, \
|
||||
0, \
|
||||
desc }
|
||||
desc, 0 }
|
||||
|
||||
#define CPCD_ARG2(name, type, opt, min, max, desc) \
|
||||
{ name, \
|
||||
|
@ -69,7 +69,7 @@
|
|||
ParserRow<CPCDAPISession>::IgnoreMinMax, \
|
||||
min, max, \
|
||||
0, \
|
||||
desc }
|
||||
desc, 0 }
|
||||
|
||||
#define CPCD_END() \
|
||||
{ 0, \
|
||||
|
@ -80,7 +80,7 @@
|
|||
ParserRow<CPCDAPISession>::IgnoreMinMax, \
|
||||
0, 0, \
|
||||
0, \
|
||||
0 }
|
||||
0, 0 }
|
||||
|
||||
#define CPCD_CMD_ALIAS(name, realName, fun) \
|
||||
{ name, \
|
||||
|
@ -91,7 +91,7 @@
|
|||
ParserRow<CPCDAPISession>::IgnoreMinMax, \
|
||||
0, 0, \
|
||||
0, \
|
||||
0 }
|
||||
0, 0 }
|
||||
|
||||
#define CPCD_ARG_ALIAS(name, realName, fun) \
|
||||
{ name, \
|
||||
|
@ -102,7 +102,7 @@
|
|||
ParserRow<CPCDAPISession>::IgnoreMinMax, \
|
||||
0, 0, \
|
||||
0, \
|
||||
0 }
|
||||
0, 0 }
|
||||
|
||||
const
|
||||
ParserRow<CPCDAPISession> commands[] =
|
||||
|
|
|
@ -378,7 +378,7 @@ CPCD::getProcessList() {
|
|||
}
|
||||
|
||||
void
|
||||
CPCD::RequestStatus::err(enum RequestStatusCode status, char *msg) {
|
||||
CPCD::RequestStatus::err(enum RequestStatusCode status, const char *msg) {
|
||||
m_status = status;
|
||||
snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg);
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ public:
|
|||
RequestStatus() { m_status = OK; m_errorstring[0] = '\0'; };
|
||||
|
||||
/** @brief Sets an errorcode and a printable message */
|
||||
void err(enum RequestStatusCode, char *);
|
||||
void err(enum RequestStatusCode, const char *);
|
||||
|
||||
/** @brief Returns the error message */
|
||||
char *getErrMsg() { return m_errorstring; };
|
||||
|
|
|
@ -28,12 +28,12 @@
|
|||
|
||||
#include "common.hpp"
|
||||
|
||||
static char *work_dir = CPCD_DEFAULT_WORK_DIR;
|
||||
static const char *work_dir = CPCD_DEFAULT_WORK_DIR;
|
||||
static int port = CPCD_DEFAULT_TCP_PORT;
|
||||
static int use_syslog = 0;
|
||||
static char *logfile = NULL;
|
||||
static char *config_file = CPCD_DEFAULT_CONFIG_FILE;
|
||||
static char *user = 0;
|
||||
static const char *logfile = NULL;
|
||||
static const char *config_file = CPCD_DEFAULT_CONFIG_FILE;
|
||||
static const char *user = 0;
|
||||
|
||||
static struct getargs args[] = {
|
||||
{ "work-dir", 'w', arg_string, &work_dir,
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <signaldata/BackupImpl.hpp>
|
||||
#include <signaldata/BackupSignalData.hpp>
|
||||
#include <signaldata/BackupContinueB.hpp>
|
||||
#include <signaldata/EventReport.hpp>
|
||||
|
||||
#include <signaldata/UtilSequence.hpp>
|
||||
|
||||
|
@ -944,6 +945,13 @@ Backup::sendBackupRef(BlockReference senderRef, Signal *signal,
|
|||
ref->errorCode = errorCode;
|
||||
ref->masterRef = numberToRef(BACKUP, getMasterNodeId());
|
||||
sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB);
|
||||
|
||||
if(errorCode != BackupRef::IAmNotMaster){
|
||||
signal->theData[0] = EventReport::BackupFailedToStart;
|
||||
signal->theData[1] = senderRef;
|
||||
signal->theData[2] = errorCode;
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1226,7 +1234,13 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
|
|||
conf->nodes = ptr.p->nodes;
|
||||
sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal,
|
||||
BackupConf::SignalLength, JBB);
|
||||
|
||||
|
||||
signal->theData[0] = EventReport::BackupStarted;
|
||||
signal->theData[1] = ptr.p->clientRef;
|
||||
signal->theData[2] = ptr.p->backupId;
|
||||
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3);
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB);
|
||||
|
||||
ptr.p->masterData.state.setState(DEFINED);
|
||||
/**
|
||||
* Prepare Trig
|
||||
|
@ -2069,6 +2083,18 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
|
|||
rep->nodes = ptr.p->nodes;
|
||||
sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal,
|
||||
BackupCompleteRep::SignalLength, JBB);
|
||||
|
||||
signal->theData[0] = EventReport::BackupCompleted;
|
||||
signal->theData[1] = ptr.p->clientRef;
|
||||
signal->theData[2] = ptr.p->backupId;
|
||||
signal->theData[3] = ptr.p->startGCP;
|
||||
signal->theData[4] = ptr.p->stopGCP;
|
||||
signal->theData[5] = ptr.p->noOfBytes;
|
||||
signal->theData[6] = ptr.p->noOfRecords;
|
||||
signal->theData[7] = ptr.p->noOfLogBytes;
|
||||
signal->theData[8] = ptr.p->noOfLogRecords;
|
||||
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -2259,6 +2285,12 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr)
|
|||
rep->reason = ptr.p->errorCode;
|
||||
sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal,
|
||||
BackupAbortRep::SignalLength, JBB);
|
||||
|
||||
signal->theData[0] = EventReport::BackupAborted;
|
||||
signal->theData[1] = ptr.p->clientRef;
|
||||
signal->theData[2] = ptr.p->backupId;
|
||||
signal->theData[3] = ptr.p->errorCode;
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
|
||||
}//if
|
||||
|
||||
// ptr.p->masterData.state.setState(INITIAL);
|
||||
|
|
|
@ -48,6 +48,7 @@ static Uint32 logEntryNo;
|
|||
int
|
||||
main(int argc, const char * argv[]){
|
||||
|
||||
ndb_init();
|
||||
if(argc <= 1){
|
||||
printf("Usage: %s <filename>", argv[0]);
|
||||
exit(1);
|
||||
|
|
|
@ -206,6 +206,7 @@ free_data_callback()
|
|||
int
|
||||
main(int argc, const char** argv)
|
||||
{
|
||||
ndb_init();
|
||||
if (!readArguments(argc, argv))
|
||||
{
|
||||
return -1;
|
||||
|
@ -331,7 +332,7 @@ main(int argc, const char** argv)
|
|||
|
||||
for (i= 0; i < g_consumers.size(); i++)
|
||||
g_consumers[i]->endOfTuples();
|
||||
|
||||
|
||||
RestoreLogIterator logIter(metaData);
|
||||
if (!logIter.readHeader())
|
||||
{
|
||||
|
@ -357,7 +358,7 @@ main(int argc, const char** argv)
|
|||
}
|
||||
}
|
||||
clearConsumers();
|
||||
return 1;
|
||||
return 0;
|
||||
} // main
|
||||
|
||||
template class Vector<BackupConsumer*>;
|
||||
|
|
|
@ -97,7 +97,7 @@ Cmvmi::Cmvmi(const Configuration & conf) :
|
|||
const ndb_mgm_configuration_iterator * db = theConfig.getOwnConfigIterator();
|
||||
for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
|
||||
Uint32 logLevel;
|
||||
if(!ndb_mgm_get_int_parameter(db, LogLevel::MIN_LOGLEVEL_ID+j, &logLevel)){
|
||||
if(!ndb_mgm_get_int_parameter(db, CFG_MIN_LOGLEVEL+j, &logLevel)){
|
||||
clogLevel.setLogLevel((LogLevel::EventCategory)j,
|
||||
logLevel);
|
||||
}
|
||||
|
@ -169,9 +169,9 @@ void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
|
|||
jamEntry();
|
||||
|
||||
for(unsigned int i = 0; i<llOrd->noOfEntries; i++){
|
||||
category = (LogLevel::EventCategory)llOrd->theCategories[i];
|
||||
level = llOrd->theLevels[i];
|
||||
|
||||
category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16);
|
||||
level = llOrd->theData[i] & 0xFFFF;
|
||||
|
||||
clogLevel.setLogLevel(category, level);
|
||||
}
|
||||
}//execSET_LOGLEVELORD()
|
||||
|
@ -196,10 +196,10 @@ void Cmvmi::execEVENT_REP(Signal* signal)
|
|||
Uint32 threshold = 16;
|
||||
LogLevel::EventCategory eventCategory = (LogLevel::EventCategory)0;
|
||||
|
||||
for(unsigned int i = 0; i< EventLogger::matrixSize; i++){
|
||||
if(EventLogger::matrix[i].eventType == eventType){
|
||||
eventCategory = EventLogger::matrix[i].eventCategory;
|
||||
threshold = EventLogger::matrix[i].threshold;
|
||||
for(unsigned int i = 0; i< EventLoggerBase::matrixSize; i++){
|
||||
if(EventLoggerBase::matrix[i].eventType == eventType){
|
||||
eventCategory = EventLoggerBase::matrix[i].eventCategory;
|
||||
threshold = EventLoggerBase::matrix[i].threshold;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -250,17 +250,7 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
|
|||
sendSignal(subReq->blockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB);
|
||||
return;
|
||||
}
|
||||
/**
|
||||
* If it's a new subscription, clear the loglevel
|
||||
*
|
||||
* Clear only if noOfEntries is 0, this is needed beacuse we set
|
||||
* the default loglevels for the MGMT nodes during the inital connect phase.
|
||||
* See reportConnected().
|
||||
*/
|
||||
if (subReq->noOfEntries == 0){
|
||||
ptr.p->logLevel.clear();
|
||||
}
|
||||
|
||||
ptr.p->logLevel.clear();
|
||||
ptr.p->blockRef = subReq->blockRef;
|
||||
}
|
||||
|
||||
|
@ -276,10 +266,9 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
|
|||
LogLevel::EventCategory category;
|
||||
Uint32 level = 0;
|
||||
for(Uint32 i = 0; i<subReq->noOfEntries; i++){
|
||||
category = (LogLevel::EventCategory)subReq->theCategories[i];
|
||||
level = subReq->theLevels[i];
|
||||
ptr.p->logLevel.setLogLevel(category,
|
||||
level);
|
||||
category = (LogLevel::EventCategory)(subReq->theData[i] >> 16);
|
||||
level = subReq->theData[i] & 0xFFFF;
|
||||
ptr.p->logLevel.setLogLevel(category, level);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -384,11 +373,6 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
|
|||
|
||||
globalTransporterRegistry.setIOState(i, HaltIO);
|
||||
globalTransporterRegistry.do_disconnect(i);
|
||||
|
||||
/**
|
||||
* Cancel possible event subscription
|
||||
*/
|
||||
cancelSubscription(i);
|
||||
}
|
||||
}
|
||||
if (failNo != 0) {
|
||||
|
@ -494,6 +478,8 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
|
|||
globalTransporterRegistry.do_connect(hostId);
|
||||
}
|
||||
|
||||
cancelSubscription(hostId);
|
||||
|
||||
signal->theData[0] = EventReport::Disconnected;
|
||||
signal->theData[1] = hostId;
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
|
||||
|
@ -539,20 +525,6 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
|
|||
if(type == NodeInfo::MGM){
|
||||
jam();
|
||||
globalTransporterRegistry.setIOState(hostId, NoHalt);
|
||||
|
||||
EventSubscribeReq* dst = (EventSubscribeReq *)&signal->theData[0];
|
||||
|
||||
for (Uint32 i = 0; i < EventLogger::defEventLogMatrixSize; i++) {
|
||||
dst->theCategories[i] = EventLogger::defEventLogMatrix[i].eventCategory;
|
||||
dst->theLevels[i] = EventLogger::defEventLogMatrix[i].threshold;
|
||||
}
|
||||
|
||||
dst->noOfEntries = EventLogger::defEventLogMatrixSize;
|
||||
/* The BlockNumber is hardcoded as 1 in MgmtSrvr */
|
||||
dst->blockRef = numberToRef(MIN_API_BLOCK_NO, hostId);
|
||||
|
||||
execEVENT_SUBSCRIBE_REQ(signal);
|
||||
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <ndb_global.h>
|
||||
#include <my_sys.h>
|
||||
|
||||
#define DBDICT_C
|
||||
#include "Dbdict.hpp"
|
||||
|
@ -4100,6 +4101,8 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
|
|||
|
||||
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
|
||||
req->noOfNewAttr = 0;
|
||||
// noOfCharsets passed to TUP in upper half
|
||||
req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
|
||||
req->checksumIndicator = 1;
|
||||
req->noOfAttributeGroups = 1;
|
||||
req->GCPIndicator = 0;
|
||||
|
@ -4161,6 +4164,8 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal,
|
|||
entry.attrId = attrPtr.p->attributeId;
|
||||
entry.attrDescriptor = attrPtr.p->attributeDescriptor;
|
||||
entry.extTypeInfo = attrPtr.p->extType;
|
||||
// charset number passed to TUP, TUX in upper half
|
||||
entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
|
||||
if (tabPtr.p->isIndex()) {
|
||||
Uint32 primaryAttrId;
|
||||
if (attrPtr.p->nextAttrInTable != RNIL) {
|
||||
|
@ -4697,6 +4702,8 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
|
|||
Uint32 keyLength = 0;
|
||||
Uint32 attrCount = tablePtr.p->noOfAttributes;
|
||||
Uint32 nullCount = 0;
|
||||
Uint32 noOfCharsets = 0;
|
||||
Uint16 charsets[128];
|
||||
Uint32 recordLength = 0;
|
||||
AttributeRecordPtr attrPtr;
|
||||
c_attributeRecordHash.removeAll();
|
||||
|
@ -4751,6 +4758,31 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
|
|||
attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
|
||||
attrPtr.p->extScale = attrDesc.AttributeExtScale;
|
||||
attrPtr.p->extLength = attrDesc.AttributeExtLength;
|
||||
// charset in upper half of precision
|
||||
unsigned csNumber = (attrPtr.p->extPrecision >> 16);
|
||||
if (csNumber != 0) {
|
||||
CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
|
||||
if (cs == NULL) {
|
||||
parseP->errorCode = CreateTableRef::InvalidCharset;
|
||||
parseP->errorLine = __LINE__;
|
||||
return;
|
||||
}
|
||||
unsigned i = 0;
|
||||
while (i < noOfCharsets) {
|
||||
if (charsets[i] == csNumber)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
if (i == noOfCharsets) {
|
||||
noOfCharsets++;
|
||||
if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) {
|
||||
parseP->errorCode = CreateTableRef::InvalidFormat;
|
||||
parseP->errorLine = __LINE__;
|
||||
return;
|
||||
}
|
||||
charsets[i] = csNumber;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore incoming old-style type and recompute it.
|
||||
|
@ -4814,6 +4846,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
|
|||
|
||||
tablePtr.p->noOfPrimkey = keyCount;
|
||||
tablePtr.p->noOfNullAttr = nullCount;
|
||||
tablePtr.p->noOfCharsets = noOfCharsets;
|
||||
tablePtr.p->tupKeyLength = keyLength;
|
||||
|
||||
tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
|
||||
|
@ -6317,6 +6350,8 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
|
|||
w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored);
|
||||
// ext type overrides
|
||||
w.add(DictTabInfo::AttributeExtType, aRec->extType);
|
||||
w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
|
||||
w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
|
||||
w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
|
||||
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
|
||||
}
|
||||
|
|
|
@ -455,7 +455,7 @@ public:
|
|||
Uint16 totalAttrReceived;
|
||||
Uint16 fragCopyCreation;
|
||||
Uint16 noOfKeyAttr;
|
||||
Uint16 noOfNewAttr;
|
||||
Uint32 noOfNewAttr; // noOfCharsets in upper half
|
||||
Uint16 noOfAttributeGroups;
|
||||
Uint16 lh3DistrBits;
|
||||
Uint16 tableType;
|
||||
|
|
|
@ -1444,6 +1444,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
|
|||
tupreq->notused1 = 0;
|
||||
tupreq->attrId = attrId;
|
||||
tupreq->attrDescriptor = entry.attrDescriptor;
|
||||
tupreq->extTypeInfo = entry.extTypeInfo;
|
||||
sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ,
|
||||
signal, TupAddAttrReq::SignalLength, JBB);
|
||||
return;
|
||||
|
@ -7699,6 +7700,7 @@ void Dblqh::accScanConfScanLab(Signal* signal)
|
|||
ndbrequire(sz == boundAiLength);
|
||||
EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO,
|
||||
signal, TuxBoundInfo::SignalLength + boundAiLength);
|
||||
jamEntry();
|
||||
if (req->errorCode != 0) {
|
||||
jam();
|
||||
/*
|
||||
|
|
|
@ -22,26 +22,59 @@ class AttributeOffset {
|
|||
|
||||
private:
|
||||
static void setOffset(Uint32 & desc, Uint32 offset);
|
||||
static void setCharsetPos(Uint32 & desc, Uint32 offset);
|
||||
static void setNullFlagPos(Uint32 & desc, Uint32 offset);
|
||||
|
||||
static Uint32 getOffset(const Uint32 &);
|
||||
static bool getCharsetFlag(const Uint32 &);
|
||||
static Uint32 getCharsetPos(const Uint32 &);
|
||||
static Uint32 getNullFlagPos(const Uint32 &);
|
||||
static Uint32 getNullFlagOffset(const Uint32 &);
|
||||
static Uint32 getNullFlagBitOffset(const Uint32 &);
|
||||
static bool isNULL(const Uint32 &, const Uint32 &);
|
||||
};
|
||||
|
||||
#define AO_ATTRIBUTE_OFFSET_MASK (0xffff)
|
||||
#define AO_NULL_FLAG_POS_MASK (0x7ff)
|
||||
#define AO_NULL_FLAG_POS_SHIFT (21)
|
||||
#define AO_NULL_FLAG_WORD_MASK (31)
|
||||
#define AO_NULL_FLAG_OFFSET_SHIFT (5)
|
||||
/**
|
||||
* Allow for 4096 attributes, all nullable, and for 128 different
|
||||
* character sets.
|
||||
*
|
||||
* a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb )
|
||||
* c = Has charset flag 1 bits 11-11
|
||||
* s = Charset pointer position - 7 bits 12-18 ( in table descriptor )
|
||||
* f = Null flag offset in word - 5 bits 20-24 ( address 32 bits )
|
||||
* w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs )
|
||||
*
|
||||
* 1111111111222222222233
|
||||
* 01234567890123456789012345678901
|
||||
* aaaaaaaaaaacsssssss fffffwwwwwww
|
||||
*/
|
||||
|
||||
#define AO_ATTRIBUTE_OFFSET_SHIFT 0
|
||||
#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff
|
||||
|
||||
#define AO_CHARSET_FLAG_SHIFT 11
|
||||
#define AO_CHARSET_POS_SHIFT 12
|
||||
#define AO_CHARSET_POS_MASK 127
|
||||
|
||||
#define AO_NULL_FLAG_POS_MASK 0xfff // f+w
|
||||
#define AO_NULL_FLAG_POS_SHIFT 20
|
||||
|
||||
#define AO_NULL_FLAG_WORD_MASK 31 // f
|
||||
#define AO_NULL_FLAG_OFFSET_SHIFT 5
|
||||
|
||||
inline
|
||||
void
|
||||
AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){
|
||||
ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset");
|
||||
desc |= offset;
|
||||
desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT);
|
||||
}
|
||||
|
||||
inline
|
||||
void
|
||||
AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) {
|
||||
ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos");
|
||||
desc |= (1 << AO_CHARSET_FLAG_SHIFT);
|
||||
desc |= (offset << AO_CHARSET_POS_SHIFT);
|
||||
}
|
||||
|
||||
inline
|
||||
|
@ -55,7 +88,21 @@ inline
|
|||
Uint32
|
||||
AttributeOffset::getOffset(const Uint32 & desc)
|
||||
{
|
||||
return desc & AO_ATTRIBUTE_OFFSET_MASK;
|
||||
return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK;
|
||||
}
|
||||
|
||||
inline
|
||||
bool
|
||||
AttributeOffset::getCharsetFlag(const Uint32 & desc)
|
||||
{
|
||||
return (desc >> AO_CHARSET_FLAG_SHIFT) & 1;
|
||||
}
|
||||
|
||||
inline
|
||||
Uint32
|
||||
AttributeOffset::getCharsetPos(const Uint32 & desc)
|
||||
{
|
||||
return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK;
|
||||
}
|
||||
|
||||
inline
|
||||
|
|
|
@ -502,6 +502,7 @@ struct Fragoperrec {
|
|||
Uint32 attributeCount;
|
||||
Uint32 freeNullBit;
|
||||
Uint32 noOfNewAttrCount;
|
||||
Uint32 charsetIndex;
|
||||
BlockReference lqhBlockrefFrag;
|
||||
};
|
||||
typedef Ptr<Fragoperrec> FragoperrecPtr;
|
||||
|
@ -785,6 +786,7 @@ struct Tablerec {
|
|||
|
||||
ReadFunction* readFunctionArray;
|
||||
UpdateFunction* updateFunctionArray;
|
||||
CHARSET_INFO** charsetArray;
|
||||
|
||||
Uint32 readKeyArray;
|
||||
Uint32 tabDescriptor;
|
||||
|
@ -796,6 +798,7 @@ struct Tablerec {
|
|||
Uint16 tupheadsize;
|
||||
Uint16 noOfAttr;
|
||||
Uint16 noOfKeyAttr;
|
||||
Uint16 noOfCharsets;
|
||||
Uint16 noOfNewAttr;
|
||||
Uint16 noOfNullAttr;
|
||||
Uint16 noOfAttributeGroups;
|
||||
|
@ -1001,17 +1004,20 @@ public:
|
|||
void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node);
|
||||
|
||||
/*
|
||||
* TUX reads primary table attributes for index keys. Input is
|
||||
* attribute ids in AttributeHeader format. Output is pointers to
|
||||
* attribute data within tuple or 0 for NULL value.
|
||||
* TUX reads primary table attributes for index keys. Tuple is
|
||||
* specified by location of original tuple and version number. Input
|
||||
* is attribute ids in AttributeHeader format. Output is attribute
|
||||
* data with headers. Uses readAttributes with xfrm option set.
|
||||
* Returns number of words or negative (-terrorCode) on error.
|
||||
*/
|
||||
void tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData);
|
||||
int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut);
|
||||
|
||||
/*
|
||||
* TUX reads primary key without headers into an array of words. Used
|
||||
* for md5 summing and when returning keyinfo.
|
||||
* for md5 summing and when returning keyinfo. Returns number of
|
||||
* words or negative (-terrorCode) on error.
|
||||
*/
|
||||
void tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData);
|
||||
int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut);
|
||||
|
||||
/*
|
||||
* TUX checks if tuple is visible to scan.
|
||||
|
@ -1365,10 +1371,11 @@ private:
|
|||
//------------------------------------------------------------------
|
||||
int readAttributes(Page* const pagePtr,
|
||||
Uint32 TupHeadOffset,
|
||||
Uint32* inBuffer,
|
||||
const Uint32* inBuffer,
|
||||
Uint32 inBufLen,
|
||||
Uint32* outBuffer,
|
||||
Uint32 TmaxRead);
|
||||
Uint32 TmaxRead,
|
||||
bool xfrmFlag);
|
||||
|
||||
//------------------------------------------------------------------
|
||||
//------------------------------------------------------------------
|
||||
|
@ -1614,6 +1621,20 @@ private:
|
|||
Uint32 attrDescriptor,
|
||||
Uint32 attrDes2);
|
||||
|
||||
// *****************************************************************
|
||||
// Read char routines optionally (tXfrmFlag) apply strxfrm
|
||||
// *****************************************************************
|
||||
|
||||
bool readCharNotNULL(Uint32* outBuffer,
|
||||
AttributeHeader* ahOut,
|
||||
Uint32 attrDescriptor,
|
||||
Uint32 attrDes2);
|
||||
|
||||
bool readCharNULLable(Uint32* outBuffer,
|
||||
AttributeHeader* ahOut,
|
||||
Uint32 attrDescriptor,
|
||||
Uint32 attrDes2);
|
||||
|
||||
//------------------------------------------------------------------
|
||||
//------------------------------------------------------------------
|
||||
bool nullFlagCheck(Uint32 attrDes2);
|
||||
|
@ -1909,7 +1930,8 @@ private:
|
|||
void updatePackedList(Signal* signal, Uint16 ahostIndex);
|
||||
|
||||
void setUpDescriptorReferences(Uint32 descriptorReference,
|
||||
Tablerec* const regTabPtr);
|
||||
Tablerec* const regTabPtr,
|
||||
const Uint32* offset);
|
||||
void setUpKeyArray(Tablerec* const regTabPtr);
|
||||
bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
|
||||
void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
|
||||
|
@ -2098,7 +2120,8 @@ private:
|
|||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Public methods
|
||||
Uint32 allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups);
|
||||
Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
|
||||
Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
|
||||
void freeTabDescr(Uint32 retRef, Uint32 retNo);
|
||||
Uint32 getTabDescrWord(Uint32 index);
|
||||
void setTabDescrWord(Uint32 index, Uint32 word);
|
||||
|
@ -2217,6 +2240,7 @@ private:
|
|||
Uint32 tMaxRead;
|
||||
Uint32 tOutBufIndex;
|
||||
Uint32* tTupleHeader;
|
||||
bool tXfrmFlag;
|
||||
|
||||
// updateAttributes module
|
||||
Uint32 tInBufIndex;
|
||||
|
|
|
@ -903,7 +903,8 @@ int Dbtup::handleReadReq(Signal* signal,
|
|||
&cinBuffer[0],
|
||||
regOperPtr->attrinbufLen,
|
||||
dst,
|
||||
dstLen);
|
||||
dstLen,
|
||||
false);
|
||||
if (TnoOfDataRead != (Uint32)-1) {
|
||||
/* ------------------------------------------------------------------------- */
|
||||
// We have read all data into coutBuffer. Now send it to the API.
|
||||
|
@ -1274,7 +1275,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
|
|||
&cinBuffer[5],
|
||||
RinitReadLen,
|
||||
&dst[0],
|
||||
dstLen);
|
||||
dstLen,
|
||||
false);
|
||||
if (TnoDataRW != (Uint32)-1) {
|
||||
RattroutCounter = TnoDataRW;
|
||||
RinstructionCounter += RinitReadLen;
|
||||
|
@ -1347,7 +1349,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
|
|||
&cinBuffer[RinstructionCounter],
|
||||
RfinalRLen,
|
||||
&dst[RattroutCounter],
|
||||
(dstLen - RattroutCounter));
|
||||
(dstLen - RattroutCounter),
|
||||
false);
|
||||
if (TnoDataRW != (Uint32)-1) {
|
||||
RattroutCounter += TnoDataRW;
|
||||
} else {
|
||||
|
@ -1487,7 +1490,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||
&theAttrinfo,
|
||||
(Uint32)1,
|
||||
&TregMemBuffer[theRegister],
|
||||
(Uint32)3);
|
||||
(Uint32)3,
|
||||
false);
|
||||
if (TnoDataRW == 2) {
|
||||
/* ------------------------------------------------------------- */
|
||||
// Two words read means that we get the instruction plus one 32
|
||||
|
@ -1833,7 +1837,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||
Int32 TnoDataR = readAttributes(pagePtr,
|
||||
TupHeadOffset,
|
||||
&attrId, 1,
|
||||
tmpArea, tmpAreaSz);
|
||||
tmpArea, tmpAreaSz,
|
||||
false);
|
||||
|
||||
if (TnoDataR == -1) {
|
||||
jam();
|
||||
|
@ -1929,7 +1934,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||
Int32 TnoDataR = readAttributes(pagePtr,
|
||||
TupHeadOffset,
|
||||
&attrId, 1,
|
||||
tmpArea, tmpAreaSz);
|
||||
tmpArea, tmpAreaSz,
|
||||
false);
|
||||
|
||||
if (TnoDataR == -1) {
|
||||
jam();
|
||||
|
@ -1957,7 +1963,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||
Int32 TnoDataR = readAttributes(pagePtr,
|
||||
TupHeadOffset,
|
||||
&attrId, 1,
|
||||
tmpArea, tmpAreaSz);
|
||||
tmpArea, tmpAreaSz,
|
||||
false);
|
||||
|
||||
if (TnoDataR == -1) {
|
||||
jam();
|
||||
|
|
|
@ -1067,6 +1067,7 @@ Dbtup::initTab(Tablerec* const regTabPtr)
|
|||
}//for
|
||||
regTabPtr->readFunctionArray = NULL;
|
||||
regTabPtr->updateFunctionArray = NULL;
|
||||
regTabPtr->charsetArray = NULL;
|
||||
|
||||
regTabPtr->tabDescriptor = RNIL;
|
||||
regTabPtr->attributeGroupDescriptor = RNIL;
|
||||
|
|
|
@ -112,10 +112,11 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& no
|
|||
node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset;
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData)
|
||||
int
|
||||
Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut)
|
||||
{
|
||||
ljamEntry();
|
||||
// use own variables instead of globals
|
||||
FragrecordPtr fragPtr;
|
||||
fragPtr.i = fragPtrI;
|
||||
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
|
||||
|
@ -134,6 +135,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
|
|||
while (true) {
|
||||
ptrCheckGuard(opPtr, cnoOfOprec, operationrec);
|
||||
if (opPtr.p->realPageIdC != RNIL) {
|
||||
// update page and offset
|
||||
pagePtr.i = opPtr.p->realPageIdC;
|
||||
pageOffset = opPtr.p->pageOffsetC;
|
||||
ptrCheckGuard(pagePtr, cnoOfPage, page);
|
||||
|
@ -147,33 +149,34 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
|
|||
ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
|
||||
}
|
||||
}
|
||||
const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
|
||||
const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
|
||||
for (Uint32 i = 0; i < numAttrs; i++) {
|
||||
AttributeHeader ah(attrIds[i]);
|
||||
const Uint32 attrId = ah.getAttributeId();
|
||||
const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
|
||||
const Uint32 desc1 = tableDescriptor[index].tabDescr;
|
||||
const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
|
||||
if (AttributeDescriptor::getNullable(desc1)) {
|
||||
Uint32 offset = AttributeOffset::getNullFlagOffset(desc2);
|
||||
ndbrequire(offset < tablePtr.p->tupNullWords);
|
||||
offset += tablePtr.p->tupNullIndex;
|
||||
ndbrequire(offset < tablePtr.p->tupheadsize);
|
||||
if (AttributeOffset::isNULL(tupleHeader[offset], desc2)) {
|
||||
ljam();
|
||||
attrData[i] = 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
attrData[i] = tupleHeader + AttributeOffset::getOffset(desc2);
|
||||
// read key attributes from found tuple version
|
||||
// save globals
|
||||
TablerecPtr tabptr_old = tabptr;
|
||||
FragrecordPtr fragptr_old = fragptr;
|
||||
OperationrecPtr operPtr_old = operPtr;
|
||||
// new globals
|
||||
tabptr = tablePtr;
|
||||
fragptr = fragPtr;
|
||||
operPtr.i = RNIL;
|
||||
operPtr.p = NULL;
|
||||
// do it
|
||||
int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true);
|
||||
// restore globals
|
||||
tabptr = tabptr_old;
|
||||
fragptr = fragptr_old;
|
||||
operPtr = operPtr_old;
|
||||
// done
|
||||
if (ret == (Uint32)-1) {
|
||||
ret = terrorCode ? (-(int)terrorCode) : -1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData)
|
||||
int
|
||||
Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut)
|
||||
{
|
||||
ljamEntry();
|
||||
// use own variables instead of globals
|
||||
FragrecordPtr fragPtr;
|
||||
fragPtr.i = fragPtrI;
|
||||
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
|
||||
|
@ -184,25 +187,45 @@ Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pk
|
|||
pagePtr.i = pageId;
|
||||
ptrCheckGuard(pagePtr, cnoOfPage, page);
|
||||
const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
|
||||
const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
|
||||
const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
|
||||
const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
|
||||
Uint32 size = 0;
|
||||
for (Uint32 i = 0; i < numAttrs; i++) {
|
||||
AttributeHeader ah(attrIds[i]);
|
||||
const Uint32 attrId = ah.getAttributeId();
|
||||
const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
|
||||
const Uint32 desc1 = tableDescriptor[index].tabDescr;
|
||||
const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
|
||||
ndbrequire(! AttributeDescriptor::getNullable(desc1));
|
||||
const Uint32 attrSize = AttributeDescriptor::getSizeInWords(desc1);
|
||||
const Uint32* attrData = tupleHeader + AttributeOffset::getOffset(desc2);
|
||||
for (Uint32 j = 0; j < attrSize; j++) {
|
||||
pkData[size + j] = attrData[j];
|
||||
const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
|
||||
// read pk attributes from original tuple
|
||||
// save globals
|
||||
TablerecPtr tabptr_old = tabptr;
|
||||
FragrecordPtr fragptr_old = fragptr;
|
||||
OperationrecPtr operPtr_old = operPtr;
|
||||
// new globals
|
||||
tabptr = tablePtr;
|
||||
fragptr = fragPtr;
|
||||
operPtr.i = RNIL;
|
||||
operPtr.p = NULL;
|
||||
// do it
|
||||
int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true);
|
||||
// restore globals
|
||||
tabptr = tabptr_old;
|
||||
fragptr = fragptr_old;
|
||||
operPtr = operPtr_old;
|
||||
// done
|
||||
if (ret != (Uint32)-1) {
|
||||
// remove headers
|
||||
Uint32 n = 0;
|
||||
Uint32 i = 0;
|
||||
while (n < numAttrs) {
|
||||
const AttributeHeader ah(dataOut[i]);
|
||||
Uint32 size = ah.getDataSize();
|
||||
ndbrequire(size != 0);
|
||||
for (Uint32 j = 0; j < size; j++) {
|
||||
dataOut[i + j - n] = dataOut[i + j + 1];
|
||||
}
|
||||
n += 1;
|
||||
i += 1 + size;
|
||||
}
|
||||
size += attrSize;
|
||||
ndbrequire(i == ret);
|
||||
ret -= numAttrs;
|
||||
} else {
|
||||
ret = terrorCode ? (-(int)terrorCode) : -1;
|
||||
}
|
||||
*pkSize = size;
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
@ -20,12 +20,14 @@
|
|||
#include <RefConvert.hpp>
|
||||
#include <ndb_limits.h>
|
||||
#include <pc.hpp>
|
||||
#include <signaldata/TupFrag.hpp>
|
||||
#include <signaldata/FsConf.hpp>
|
||||
#include <signaldata/FsRemoveReq.hpp>
|
||||
#include <signaldata/DropTab.hpp>
|
||||
#include <signaldata/AlterTab.hpp>
|
||||
#include <AttributeDescriptor.hpp>
|
||||
#include "AttributeOffset.hpp"
|
||||
#include <my_sys.h>
|
||||
|
||||
#define ljam() { jamLine(20000 + __LINE__); }
|
||||
#define ljamEntry() { jamEntryLine(20000 + __LINE__); }
|
||||
|
@ -52,7 +54,10 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
|
|||
/* Uint32 schemaVersion = signal->theData[8];*/
|
||||
Uint32 noOfKeyAttr = signal->theData[9];
|
||||
|
||||
Uint32 noOfNewAttr = signal->theData[10];
|
||||
Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF);
|
||||
/* DICT sends number of character sets in upper half */
|
||||
Uint32 noOfCharsets = (signal->theData[10] >> 16);
|
||||
|
||||
Uint32 checksumIndicator = signal->theData[11];
|
||||
Uint32 noOfAttributeGroups = signal->theData[12];
|
||||
Uint32 globalCheckpointIdIndicator = signal->theData[13];
|
||||
|
@ -75,6 +80,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
|
|||
fragOperPtr.p->attributeCount = noOfAttributes;
|
||||
fragOperPtr.p->freeNullBit = noOfNullAttr;
|
||||
fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
|
||||
fragOperPtr.p->charsetIndex = 0;
|
||||
|
||||
ndbrequire(reqinfo == ZADDFRAG);
|
||||
|
||||
|
@ -156,6 +162,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
|
|||
regTabPtr.p->tupheadsize = regTabPtr.p->tupGCPIndex;
|
||||
|
||||
regTabPtr.p->noOfKeyAttr = noOfKeyAttr;
|
||||
regTabPtr.p->noOfCharsets = noOfCharsets;
|
||||
regTabPtr.p->noOfAttr = noOfAttributes;
|
||||
regTabPtr.p->noOfNewAttr = noOfNewAttr;
|
||||
regTabPtr.p->noOfNullAttr = noOfNullAttr;
|
||||
|
@ -163,13 +170,14 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
|
|||
|
||||
regTabPtr.p->notNullAttributeMask.clear();
|
||||
|
||||
Uint32 tableDescriptorRef = allocTabDescr(noOfAttributes, noOfKeyAttr, noOfAttributeGroups);
|
||||
Uint32 offset[10];
|
||||
Uint32 tableDescriptorRef = allocTabDescr(regTabPtr.p, offset);
|
||||
if (tableDescriptorRef == RNIL) {
|
||||
ljam();
|
||||
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
|
||||
return;
|
||||
}//if
|
||||
setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p);
|
||||
setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset);
|
||||
} else {
|
||||
ljam();
|
||||
fragOperPtr.p->definingFragment = false;
|
||||
|
@ -251,6 +259,9 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
|
|||
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
|
||||
Uint32 attrId = signal->theData[2];
|
||||
Uint32 attrDescriptor = signal->theData[3];
|
||||
// DICT sends extended type (ignored) and charset number
|
||||
Uint32 extType = (signal->theData[4] & 0xFF);
|
||||
Uint32 csNumber = (signal->theData[4] >> 16);
|
||||
|
||||
regTabPtr.i = fragOperPtr.p->tableidFrag;
|
||||
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
|
||||
|
@ -304,6 +315,29 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
|
|||
} else {
|
||||
ndbrequire(false);
|
||||
}//if
|
||||
if (csNumber != 0) {
|
||||
CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
|
||||
if (cs == NULL) {
|
||||
ljam();
|
||||
terrorCode = TupAddAttrRef::InvalidCharset;
|
||||
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
|
||||
return;
|
||||
}
|
||||
Uint32 i = 0;
|
||||
while (i < fragOperPtr.p->charsetIndex) {
|
||||
ljam();
|
||||
if (regTabPtr.p->charsetArray[i] == cs)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
if (i == fragOperPtr.p->charsetIndex) {
|
||||
ljam();
|
||||
fragOperPtr.p->charsetIndex++;
|
||||
}
|
||||
ndbrequire(i < regTabPtr.p->noOfCharsets);
|
||||
regTabPtr.p->charsetArray[i] = cs;
|
||||
AttributeOffset::setCharsetPos(attrDes2, i);
|
||||
}
|
||||
setTabDescrWord(firstTabDesIndex + 1, attrDes2);
|
||||
|
||||
if (regTabPtr.p->tupheadsize > MAX_TUPLE_SIZE_IN_WORDS) {
|
||||
|
@ -340,20 +374,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
|
|||
return;
|
||||
}//Dbtup::execTUP_ADD_ATTRREQ()
|
||||
|
||||
void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference,
|
||||
Tablerec* const regTabPtr)
|
||||
{
|
||||
Uint32 noOfAttributes = regTabPtr->noOfAttr;
|
||||
descriptorReference += ZTD_SIZE;
|
||||
ReadFunction * tmp = (ReadFunction*)&tableDescriptor[descriptorReference].tabDescr;
|
||||
regTabPtr->readFunctionArray = tmp;
|
||||
regTabPtr->updateFunctionArray = (UpdateFunction*)(tmp + noOfAttributes);
|
||||
/*
|
||||
* Descriptor has these parts:
|
||||
*
|
||||
* 0 readFunctionArray ( one for each attribute )
|
||||
* 1 updateFunctionArray ( ditto )
|
||||
* 2 charsetArray ( pointers to distinct CHARSET_INFO )
|
||||
* 3 readKeyArray ( attribute ids of keys )
|
||||
* 4 attributeGroupDescriptor ( currently size 1 but unused )
|
||||
* 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE )
|
||||
*/
|
||||
|
||||
TableDescriptor * start = &tableDescriptor[descriptorReference];
|
||||
TableDescriptor * end = (TableDescriptor*)(tmp + 2 * noOfAttributes);
|
||||
regTabPtr->readKeyArray = descriptorReference + (end - start);
|
||||
regTabPtr->attributeGroupDescriptor = regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr;
|
||||
regTabPtr->tabDescriptor = regTabPtr->attributeGroupDescriptor + regTabPtr->noOfAttributeGroups;
|
||||
void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference,
|
||||
Tablerec* const regTabPtr,
|
||||
const Uint32* offset)
|
||||
{
|
||||
Uint32* desc = &tableDescriptor[descriptorReference].tabDescr;
|
||||
regTabPtr->readFunctionArray = (ReadFunction*)(desc + offset[0]);
|
||||
regTabPtr->updateFunctionArray = (UpdateFunction*)(desc + offset[1]);
|
||||
regTabPtr->charsetArray = (CHARSET_INFO**)(desc + offset[2]);
|
||||
regTabPtr->readKeyArray = descriptorReference + offset[3];
|
||||
regTabPtr->attributeGroupDescriptor = descriptorReference + offset[4];
|
||||
regTabPtr->tabDescriptor = descriptorReference + offset[5];
|
||||
}//Dbtup::setUpDescriptorReferences()
|
||||
|
||||
Uint32
|
||||
|
@ -491,14 +533,18 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr)
|
|||
Uint32 descriptor = regTabPtr->readKeyArray;
|
||||
if (descriptor != RNIL) {
|
||||
ljam();
|
||||
Uint32 offset[10];
|
||||
getTabDescrOffsets(regTabPtr, offset);
|
||||
|
||||
regTabPtr->tabDescriptor = RNIL;
|
||||
regTabPtr->readKeyArray = RNIL;
|
||||
regTabPtr->readFunctionArray = NULL;
|
||||
regTabPtr->updateFunctionArray = NULL;
|
||||
regTabPtr->charsetArray = NULL;
|
||||
regTabPtr->attributeGroupDescriptor= RNIL;
|
||||
|
||||
Uint32 sizeFunctionArrays = 2 * (regTabPtr->noOfAttr * sizeOfReadFunction());
|
||||
descriptor -= (sizeFunctionArrays + ZTD_SIZE);
|
||||
// move to start of descriptor
|
||||
descriptor -= offset[3];
|
||||
Uint32 retNo = getTabDescrWord(descriptor + ZTD_DATASIZE);
|
||||
ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL);
|
||||
ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE));
|
||||
|
|
|
@ -35,6 +35,7 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
|
|||
for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
|
||||
Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE);
|
||||
Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr;
|
||||
Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr;
|
||||
if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
|
||||
if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
|
||||
(AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
|
||||
|
@ -54,6 +55,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
|
|||
} else {
|
||||
ndbrequire(false);
|
||||
}//if
|
||||
// replace read function of char attribute
|
||||
if (AttributeOffset::getCharsetFlag(attrOffset)) {
|
||||
ljam();
|
||||
regTabPtr->readFunctionArray[i] = &Dbtup::readCharNotNULL;
|
||||
}
|
||||
} else {
|
||||
if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) {
|
||||
ljam();
|
||||
|
@ -72,6 +78,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
|
|||
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
|
||||
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
|
||||
}//if
|
||||
// replace read function of char attribute
|
||||
if (AttributeOffset::getCharsetFlag(attrOffset)) {
|
||||
ljam();
|
||||
regTabPtr->readFunctionArray[i] = &Dbtup::readCharNULLable;
|
||||
}
|
||||
}//if
|
||||
} else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
|
||||
if (!AttributeDescriptor::getNullable(attrDescriptor)) {
|
||||
|
@ -146,10 +157,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
|
|||
/* ---------------------------------------------------------------- */
|
||||
int Dbtup::readAttributes(Page* const pagePtr,
|
||||
Uint32 tupHeadOffset,
|
||||
Uint32* inBuffer,
|
||||
const Uint32* inBuffer,
|
||||
Uint32 inBufLen,
|
||||
Uint32* outBuffer,
|
||||
Uint32 maxRead)
|
||||
Uint32 maxRead,
|
||||
bool xfrmFlag)
|
||||
{
|
||||
Tablerec* const regTabPtr = tabptr.p;
|
||||
Uint32 numAttributes = regTabPtr->noOfAttr;
|
||||
|
@ -162,6 +174,7 @@ int Dbtup::readAttributes(Page* const pagePtr,
|
|||
tCheckOffset = regTabPtr->tupheadsize;
|
||||
tMaxRead = maxRead;
|
||||
tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
|
||||
tXfrmFlag = xfrmFlag;
|
||||
|
||||
ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
|
||||
while (inBufIndex < inBufLen) {
|
||||
|
@ -542,6 +555,74 @@ Dbtup::readDynSmallVarSize(Uint32* outBuffer,
|
|||
return false;
|
||||
}//Dbtup::readDynSmallVarSize()
|
||||
|
||||
|
||||
bool
|
||||
Dbtup::readCharNotNULL(Uint32* outBuffer,
|
||||
AttributeHeader* ahOut,
|
||||
Uint32 attrDescriptor,
|
||||
Uint32 attrDes2)
|
||||
{
|
||||
Uint32 indexBuf = tOutBufIndex;
|
||||
Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
|
||||
Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
|
||||
Uint32 newIndexBuf = indexBuf + attrNoOfWords;
|
||||
Uint32 maxRead = tMaxRead;
|
||||
|
||||
ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
|
||||
if (newIndexBuf <= maxRead) {
|
||||
ljam();
|
||||
ahOut->setDataSize(attrNoOfWords);
|
||||
if (! tXfrmFlag) {
|
||||
MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
|
||||
&tTupleHeader[readOffset],
|
||||
attrNoOfWords);
|
||||
} else {
|
||||
ljam();
|
||||
Tablerec* regTabPtr = tabptr.p;
|
||||
Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
|
||||
ndbrequire(i < tabptr.p->noOfCharsets);
|
||||
// not const in MySQL
|
||||
CHARSET_INFO* cs = tabptr.p->charsetArray[i];
|
||||
// XXX should strip Uint32 null padding
|
||||
const unsigned nBytes = attrNoOfWords << 2;
|
||||
unsigned n =
|
||||
(*cs->coll->strnxfrm)(cs,
|
||||
(uchar*)&outBuffer[indexBuf],
|
||||
nBytes,
|
||||
(const uchar*)&tTupleHeader[readOffset],
|
||||
nBytes);
|
||||
// pad with ascii spaces
|
||||
while (n < nBytes)
|
||||
((uchar*)&outBuffer[indexBuf])[n++] = 0x20;
|
||||
}
|
||||
tOutBufIndex = newIndexBuf;
|
||||
return true;
|
||||
} else {
|
||||
ljam();
|
||||
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
Dbtup::readCharNULLable(Uint32* outBuffer,
|
||||
AttributeHeader* ahOut,
|
||||
Uint32 attrDescriptor,
|
||||
Uint32 attrDes2)
|
||||
{
|
||||
if (!nullFlagCheck(attrDes2)) {
|
||||
ljam();
|
||||
return readCharNotNULL(outBuffer,
|
||||
ahOut,
|
||||
attrDescriptor,
|
||||
attrDes2);
|
||||
} else {
|
||||
ljam();
|
||||
ahOut->setNULL();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
|
||||
/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
|
||||
|
|
|
@ -31,12 +31,33 @@
|
|||
/* memory attached to fragments (could be allocated per table */
|
||||
/* instead. Performs its task by a buddy algorithm. */
|
||||
/* **************************************************************** */
|
||||
Uint32 Dbtup::allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups)
|
||||
|
||||
Uint32
|
||||
Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset)
|
||||
{
|
||||
// belongs to configure.in
|
||||
unsigned sizeOfPointer = sizeof(CHARSET_INFO*);
|
||||
ndbrequire((sizeOfPointer & 0x3) == 0);
|
||||
sizeOfPointer = (sizeOfPointer >> 2);
|
||||
// do in layout order and return offsets (see DbtupMeta.cpp)
|
||||
Uint32 allocSize = 0;
|
||||
// magically aligned to 8 bytes
|
||||
offset[0] = allocSize += ZTD_SIZE;
|
||||
offset[1] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
|
||||
offset[2] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
|
||||
offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer;
|
||||
offset[4] = allocSize += regTabPtr->noOfKeyAttr;
|
||||
offset[5] = allocSize += regTabPtr->noOfAttributeGroups;
|
||||
allocSize += regTabPtr->noOfAttr * ZAD_SIZE;
|
||||
allocSize += ZTD_TRAILER_SIZE;
|
||||
// return number of words
|
||||
return allocSize;
|
||||
}
|
||||
|
||||
Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
|
||||
{
|
||||
Uint32 reference = RNIL;
|
||||
Uint32 allocSize = (ZTD_SIZE + ZTD_TRAILER_SIZE) + (noOfAttributes * ZAD_SIZE);
|
||||
allocSize += noOfAttributeGroups;
|
||||
allocSize += ((2 * noOfAttributes * sizeOfReadFunction()) + noOfKeyAttr);
|
||||
Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset);
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
|
|
@ -751,7 +751,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
|
|||
&tableDescriptor[regTabPtr->readKeyArray].tabDescr,
|
||||
regTabPtr->noOfKeyAttr,
|
||||
keyBuffer,
|
||||
ZATTR_BUFFER_SIZE);
|
||||
ZATTR_BUFFER_SIZE,
|
||||
true);
|
||||
ndbrequire(noPrimKey != (Uint32)-1);
|
||||
|
||||
Uint32 numAttrsToRead;
|
||||
|
@ -792,7 +793,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
|
|||
&readBuffer[0],
|
||||
numAttrsToRead,
|
||||
mainBuffer,
|
||||
ZATTR_BUFFER_SIZE);
|
||||
ZATTR_BUFFER_SIZE,
|
||||
true);
|
||||
ndbrequire(noMainWords != (Uint32)-1);
|
||||
} else {
|
||||
ljam();
|
||||
|
@ -816,7 +818,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
|
|||
&readBuffer[0],
|
||||
numAttrsToRead,
|
||||
copyBuffer,
|
||||
ZATTR_BUFFER_SIZE);
|
||||
ZATTR_BUFFER_SIZE,
|
||||
true);
|
||||
|
||||
ndbrequire(noCopyWords != (Uint32)-1);
|
||||
if ((noMainWords == noCopyWords) &&
|
||||
|
|
|
@ -162,11 +162,6 @@ private:
|
|||
// AttributeHeader size is assumed to be 1 word
|
||||
static const unsigned AttributeHeaderSize = 1;
|
||||
|
||||
/*
|
||||
* Array of pointers to TUP table attributes. Always read-on|y.
|
||||
*/
|
||||
typedef const Uint32** TableData;
|
||||
|
||||
/*
|
||||
* Logical tuple address, "local key". Identifies table tuples.
|
||||
*/
|
||||
|
@ -330,11 +325,15 @@ private:
|
|||
|
||||
/*
|
||||
* Attribute metadata. Size must be multiple of word size.
|
||||
*
|
||||
* Prefix comparison of char data must use strxfrm and binary
|
||||
* comparison. The charset is currently unused.
|
||||
*/
|
||||
struct DescAttr {
|
||||
Uint32 m_attrDesc; // standard AttributeDescriptor
|
||||
Uint16 m_primaryAttrId;
|
||||
Uint16 m_typeId;
|
||||
unsigned m_typeId : 6;
|
||||
unsigned m_charset : 10;
|
||||
};
|
||||
static const unsigned DescAttrSize = sizeof(DescAttr) >> 2;
|
||||
|
||||
|
@ -553,9 +552,9 @@ private:
|
|||
void execREAD_CONFIG_REQ(Signal* signal);
|
||||
// utils
|
||||
void setKeyAttrs(const Frag& frag);
|
||||
void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData);
|
||||
void readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData);
|
||||
void copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
|
||||
void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData);
|
||||
void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize);
|
||||
void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
|
||||
|
||||
/*
|
||||
* DbtuxMeta.cpp
|
||||
|
@ -622,17 +621,15 @@ private:
|
|||
/*
|
||||
* DbtuxSearch.cpp
|
||||
*/
|
||||
void searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
|
||||
void searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
|
||||
void searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
|
||||
void searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
|
||||
void searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
|
||||
|
||||
/*
|
||||
* DbtuxCmp.cpp
|
||||
*/
|
||||
int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
|
||||
int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey);
|
||||
int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
|
||||
int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
|
||||
int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey);
|
||||
|
||||
/*
|
||||
* DbtuxDebug.cpp
|
||||
|
@ -679,17 +676,27 @@ private:
|
|||
Uint32 c_typeOfStart;
|
||||
|
||||
/*
|
||||
* Array of index key attribute ids in AttributeHeader format.
|
||||
* Includes fixed attribute sizes. This is global data set at
|
||||
* operation start and is not passed as a parameter.
|
||||
* Global data set at operation start. Unpacked from index metadata.
|
||||
* Not passed as parameter to methods. Invalid across timeslices.
|
||||
*
|
||||
* TODO inline all into index metadata
|
||||
*/
|
||||
|
||||
// index key attr ids with sizes in AttributeHeader format
|
||||
Data c_keyAttrs;
|
||||
|
||||
// buffer for search key data as pointers to TUP storage
|
||||
TableData c_searchKey;
|
||||
// pointers to index key comparison functions
|
||||
NdbSqlUtil::Cmp** c_sqlCmp;
|
||||
|
||||
// buffer for current entry key data as pointers to TUP storage
|
||||
TableData c_entryKey;
|
||||
/*
|
||||
* Other buffers used during the operation.
|
||||
*/
|
||||
|
||||
// buffer for search key data with headers
|
||||
Data c_searchKey;
|
||||
|
||||
// buffer for current entry key data with headers
|
||||
Data c_entryKey;
|
||||
|
||||
// buffer for scan bounds and keyinfo (primary key)
|
||||
Data c_dataBuffer;
|
||||
|
|
|
@ -18,21 +18,24 @@
|
|||
#include "Dbtux.hpp"
|
||||
|
||||
/*
|
||||
* Search key vs node prefix.
|
||||
* Search key vs node prefix or entry
|
||||
*
|
||||
* The comparison starts at given attribute position (in fact 0). The
|
||||
* position is updated by number of equal initial attributes found. The
|
||||
* prefix may be partial in which case CmpUnknown may be returned.
|
||||
* The comparison starts at given attribute position. The position is
|
||||
* updated by number of equal initial attributes found. The entry data
|
||||
* may be partial in which case CmpUnknown may be returned.
|
||||
*/
|
||||
int
|
||||
Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen)
|
||||
Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen)
|
||||
{
|
||||
const unsigned numAttrs = frag.m_numAttrs;
|
||||
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
|
||||
// number of words of attribute data left
|
||||
unsigned len2 = maxlen;
|
||||
// skip to right position in search key
|
||||
searchKey += start;
|
||||
// skip to right position in search key only
|
||||
for (unsigned i = 0; i < start; i++) {
|
||||
jam();
|
||||
searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
|
||||
}
|
||||
int ret = 0;
|
||||
while (start < numAttrs) {
|
||||
if (len2 <= AttributeHeaderSize) {
|
||||
|
@ -41,22 +44,21 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
|
|||
break;
|
||||
}
|
||||
len2 -= AttributeHeaderSize;
|
||||
if (*searchKey != 0) {
|
||||
if (! searchKey.ah().isNULL()) {
|
||||
if (! entryData.ah().isNULL()) {
|
||||
jam();
|
||||
// current attribute
|
||||
const DescAttr& descAttr = descEnt.m_descAttr[start];
|
||||
const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
|
||||
ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
|
||||
// full data size
|
||||
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
|
||||
ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
|
||||
const unsigned size2 = min(size1, len2);
|
||||
len2 -= size2;
|
||||
// compare
|
||||
const Uint32* const p1 = *searchKey;
|
||||
NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start];
|
||||
const Uint32* const p1 = &searchKey[AttributeHeaderSize];
|
||||
const Uint32* const p2 = &entryData[AttributeHeaderSize];
|
||||
ret = (*type.m_cmp)(p1, p2, size1, size2);
|
||||
ret = (*cmp)(0, p1, p2, size1, size2);
|
||||
if (ret != 0) {
|
||||
jam();
|
||||
break;
|
||||
|
@ -75,7 +77,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
|
|||
break;
|
||||
}
|
||||
}
|
||||
searchKey += 1;
|
||||
searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
|
||||
entryData += AttributeHeaderSize + entryData.ah().getDataSize();
|
||||
start++;
|
||||
}
|
||||
|
@ -83,60 +85,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
|
|||
}
|
||||
|
||||
/*
|
||||
* Search key vs tree entry.
|
||||
*
|
||||
* Start position is updated as in previous routine.
|
||||
*/
|
||||
int
|
||||
Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey)
|
||||
{
|
||||
const unsigned numAttrs = frag.m_numAttrs;
|
||||
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
|
||||
// skip to right position
|
||||
searchKey += start;
|
||||
entryKey += start;
|
||||
int ret = 0;
|
||||
while (start < numAttrs) {
|
||||
if (*searchKey != 0) {
|
||||
if (*entryKey != 0) {
|
||||
jam();
|
||||
// current attribute
|
||||
const DescAttr& descAttr = descEnt.m_descAttr[start];
|
||||
const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
|
||||
ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
|
||||
// full data size
|
||||
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
|
||||
// compare
|
||||
const Uint32* const p1 = *searchKey;
|
||||
const Uint32* const p2 = *entryKey;
|
||||
ret = (*type.m_cmp)(p1, p2, size1, size1);
|
||||
if (ret != 0) {
|
||||
jam();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
jam();
|
||||
// not NULL > NULL
|
||||
ret = +1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (*entryKey != 0) {
|
||||
jam();
|
||||
// NULL < not NULL
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
searchKey += 1;
|
||||
entryKey += 1;
|
||||
start++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan bound vs node prefix.
|
||||
* Scan bound vs node prefix or entry.
|
||||
*
|
||||
* Compare lower or upper bound and index attribute data. The attribute
|
||||
* data may be partial in which case CmpUnknown may be returned.
|
||||
|
@ -183,9 +132,8 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
|
|||
jam();
|
||||
// current attribute
|
||||
const unsigned index = boundInfo.ah().getAttributeId();
|
||||
ndbrequire(index < frag.m_numAttrs);
|
||||
const DescAttr& descAttr = descEnt.m_descAttr[index];
|
||||
const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
|
||||
ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
|
||||
ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
|
||||
// full data size
|
||||
const unsigned size1 = boundInfo.ah().getDataSize();
|
||||
|
@ -193,9 +141,10 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
|
|||
const unsigned size2 = min(size1, len2);
|
||||
len2 -= size2;
|
||||
// compare
|
||||
NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index];
|
||||
const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
|
||||
const Uint32* const p2 = &entryData[AttributeHeaderSize];
|
||||
int ret = (*type.m_cmp)(p1, p2, size1, size2);
|
||||
int ret = (*cmp)(0, p1, p2, size1, size2);
|
||||
if (ret != 0) {
|
||||
jam();
|
||||
return ret;
|
||||
|
@ -244,72 +193,3 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
|
|||
return +1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan bound vs tree entry.
|
||||
*/
|
||||
int
|
||||
Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey)
|
||||
{
|
||||
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
|
||||
// direction 0-lower 1-upper
|
||||
ndbrequire(dir <= 1);
|
||||
// initialize type to equality
|
||||
unsigned type = 4;
|
||||
while (boundCount != 0) {
|
||||
// get and skip bound type
|
||||
type = boundInfo[0];
|
||||
boundInfo += 1;
|
||||
if (! boundInfo.ah().isNULL()) {
|
||||
if (*entryKey != 0) {
|
||||
jam();
|
||||
// current attribute
|
||||
const unsigned index = boundInfo.ah().getAttributeId();
|
||||
const DescAttr& descAttr = descEnt.m_descAttr[index];
|
||||
const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
|
||||
ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
|
||||
// full data size
|
||||
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
|
||||
// compare
|
||||
const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
|
||||
const Uint32* const p2 = *entryKey;
|
||||
int ret = (*type.m_cmp)(p1, p2, size1, size1);
|
||||
if (ret != 0) {
|
||||
jam();
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
jam();
|
||||
// not NULL > NULL
|
||||
return +1;
|
||||
}
|
||||
} else {
|
||||
jam();
|
||||
if (*entryKey != 0) {
|
||||
jam();
|
||||
// NULL < not NULL
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize();
|
||||
entryKey += 1;
|
||||
boundCount -= 1;
|
||||
}
|
||||
if (dir == 0) {
|
||||
// lower bound
|
||||
jam();
|
||||
if (type == 1) {
|
||||
jam();
|
||||
return +1;
|
||||
}
|
||||
return -1;
|
||||
} else {
|
||||
// upper bound
|
||||
jam();
|
||||
if (type == 3) {
|
||||
jam();
|
||||
return -1;
|
||||
}
|
||||
return +1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -207,14 +207,10 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
|
|||
}
|
||||
// check ordering within node
|
||||
for (unsigned j = 1; j < node.getOccup(); j++) {
|
||||
unsigned start = 0;
|
||||
const TreeEnt ent1 = node.getEnt(j - 1);
|
||||
const TreeEnt ent2 = node.getEnt(j);
|
||||
if (j == 1) {
|
||||
readKeyAttrs(frag, ent1, start, c_searchKey);
|
||||
} else {
|
||||
memcpy(c_searchKey, c_entryKey, frag.m_numAttrs << 2);
|
||||
}
|
||||
unsigned start = 0;
|
||||
readKeyAttrs(frag, ent1, start, c_searchKey);
|
||||
readKeyAttrs(frag, ent2, start, c_entryKey);
|
||||
int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
|
||||
if (ret == 0)
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
|
||||
#define DBTUX_GEN_CPP
|
||||
#include "Dbtux.hpp"
|
||||
#include <signaldata/TuxContinueB.hpp>
|
||||
#include <signaldata/TuxContinueB.hpp>
|
||||
|
||||
Dbtux::Dbtux(const Configuration& conf) :
|
||||
SimulatedBlock(DBTUX, conf),
|
||||
|
@ -202,8 +200,9 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
|
|||
}
|
||||
// allocate buffers
|
||||
c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes);
|
||||
c_searchKey = (TableData)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes);
|
||||
c_entryKey = (TableData)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes);
|
||||
c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes);
|
||||
c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize);
|
||||
c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize);
|
||||
c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1);
|
||||
// ack
|
||||
ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
|
||||
|
@ -218,7 +217,8 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
|
|||
void
|
||||
Dbtux::setKeyAttrs(const Frag& frag)
|
||||
{
|
||||
Data keyAttrs = c_keyAttrs; // global
|
||||
Data keyAttrs = c_keyAttrs; // global
|
||||
NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global
|
||||
const unsigned numAttrs = frag.m_numAttrs;
|
||||
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
|
||||
for (unsigned i = 0; i < numAttrs; i++) {
|
||||
|
@ -227,75 +227,71 @@ Dbtux::setKeyAttrs(const Frag& frag)
|
|||
// set attr id and fixed size
|
||||
keyAttrs.ah() = AttributeHeader(descAttr.m_primaryAttrId, size);
|
||||
keyAttrs += 1;
|
||||
// set comparison method pointer
|
||||
const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
|
||||
ndbrequire(sqlType.m_cmp != 0);
|
||||
*(sqlCmp++) = sqlType.m_cmp;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData)
|
||||
Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData)
|
||||
{
|
||||
ConstData keyAttrs = c_keyAttrs; // global
|
||||
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
|
||||
const TupLoc tupLoc = ent.m_tupLoc;
|
||||
const Uint32 tupVersion = ent.m_tupVersion;
|
||||
ndbrequire(start < frag.m_numAttrs);
|
||||
const unsigned numAttrs = frag.m_numAttrs - start;
|
||||
// start applies to both keys and output data
|
||||
const Uint32 numAttrs = frag.m_numAttrs - start;
|
||||
// skip to start position in keyAttrs only
|
||||
keyAttrs += start;
|
||||
keyData += start;
|
||||
c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, numAttrs, keyAttrs, keyData);
|
||||
int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, keyAttrs, numAttrs, keyData);
|
||||
jamEntry();
|
||||
// TODO handle error
|
||||
ndbrequire(ret > 0);
|
||||
}
|
||||
|
||||
void
|
||||
Dbtux::readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData)
|
||||
Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize)
|
||||
{
|
||||
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
|
||||
const TupLoc tupLoc = ent.m_tupLoc;
|
||||
Uint32 size = 0;
|
||||
c_tup->tuxReadKeys(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, &size, pkData);
|
||||
ndbrequire(size != 0);
|
||||
pkSize = size;
|
||||
int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, pkData);
|
||||
jamEntry();
|
||||
// TODO handle error
|
||||
ndbrequire(ret > 0);
|
||||
pkSize = ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Input is pointers to table attributes. Output is array of attribute
|
||||
* data with headers. Copies whatever fits.
|
||||
* Copy attribute data with headers. Input is all index key data.
|
||||
* Copies whatever fits.
|
||||
*/
|
||||
void
|
||||
Dbtux::copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2)
|
||||
Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2)
|
||||
{
|
||||
ConstData keyAttrs = c_keyAttrs; // global
|
||||
const unsigned numAttrs = frag.m_numAttrs;
|
||||
unsigned n = frag.m_numAttrs;
|
||||
unsigned len2 = maxlen2;
|
||||
for (unsigned n = 0; n < numAttrs; n++) {
|
||||
while (n != 0) {
|
||||
jam();
|
||||
const unsigned attrId = keyAttrs.ah().getAttributeId();
|
||||
const unsigned dataSize = keyAttrs.ah().getDataSize();
|
||||
const Uint32* const p1 = *data1;
|
||||
if (p1 != 0) {
|
||||
const unsigned dataSize = data1.ah().getDataSize();
|
||||
// copy header
|
||||
if (len2 == 0)
|
||||
return;
|
||||
data2[0] = data1[0];
|
||||
data1 += 1;
|
||||
data2 += 1;
|
||||
len2 -= 1;
|
||||
// copy data
|
||||
for (unsigned i = 0; i < dataSize; i++) {
|
||||
if (len2 == 0)
|
||||
return;
|
||||
data2.ah() = AttributeHeader(attrId, dataSize);
|
||||
data2 += 1;
|
||||
len2 -= 1;
|
||||
unsigned n = dataSize;
|
||||
for (unsigned i = 0; i < dataSize; i++) {
|
||||
if (len2 == 0)
|
||||
return;
|
||||
*data2 = p1[i];
|
||||
data2 += 1;
|
||||
len2 -= 1;
|
||||
}
|
||||
} else {
|
||||
if (len2 == 0)
|
||||
return;
|
||||
data2.ah() = AttributeHeader(attrId, 0);
|
||||
data2.ah().setNULL();
|
||||
data2 += 1;
|
||||
data2[i] = data1[i];
|
||||
len2 -= 1;
|
||||
}
|
||||
keyAttrs += 1;
|
||||
data1 += 1;
|
||||
data1 += dataSize;
|
||||
data2 += dataSize;
|
||||
n -= 1;
|
||||
}
|
||||
#ifdef VM_TRACE
|
||||
memset(data2, DataFillByte, len2 << 2);
|
||||
|
|
|
@ -178,19 +178,31 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
|
|||
descAttr.m_attrDesc = req->attrDescriptor;
|
||||
descAttr.m_primaryAttrId = req->primaryAttrId;
|
||||
descAttr.m_typeId = req->extTypeInfo & 0xFF;
|
||||
descAttr.m_charset = (req->extTypeInfo >> 16);
|
||||
#ifdef VM_TRACE
|
||||
if (debugFlags & DebugMeta) {
|
||||
debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl;
|
||||
}
|
||||
#endif
|
||||
// check if type is valid and has a comparison method
|
||||
const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
|
||||
// check that type is valid and has a binary comparison method
|
||||
const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
|
||||
if (type.m_typeId == NdbSqlUtil::Type::Undefined ||
|
||||
type.m_cmp == 0) {
|
||||
jam();
|
||||
errorCode = TuxAddAttrRef::InvalidAttributeType;
|
||||
break;
|
||||
}
|
||||
#ifdef dbtux_uses_charset
|
||||
if (descAttr.m_charset != 0) {
|
||||
CHARSET_INFO *cs = get_charset(descAttr.m_charset, MYF(0));
|
||||
// here use the non-binary type
|
||||
if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) {
|
||||
jam();
|
||||
errorCode = TuxAddAttrRef::InvalidCharset;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) {
|
||||
jam();
|
||||
// initialize tree header
|
||||
|
|
|
@ -112,6 +112,7 @@ Dbtux::execACC_SCANREQ(Signal* signal)
|
|||
void
|
||||
Dbtux::execTUX_BOUND_INFO(Signal* signal)
|
||||
{
|
||||
jamEntry();
|
||||
struct BoundInfo {
|
||||
unsigned offset;
|
||||
unsigned size;
|
||||
|
@ -389,7 +390,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
|
|||
jam();
|
||||
const TreeEnt ent = scan.m_scanPos.m_ent;
|
||||
// read tuple key
|
||||
readTablePk(frag, ent, pkSize, pkData);
|
||||
readTablePk(frag, ent, pkData, pkSize);
|
||||
// get read lock or exclusive lock
|
||||
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
|
||||
lockReq->returnCode = RNIL;
|
||||
|
@ -480,7 +481,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
|
|||
jam();
|
||||
if (pkSize == 0) {
|
||||
jam();
|
||||
readTablePk(frag, ent, pkSize, pkData);
|
||||
readTablePk(frag, ent, pkData, pkSize);
|
||||
}
|
||||
}
|
||||
// conf signal
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
* TODO optimize for initial equal attrs in node min/max
|
||||
*/
|
||||
void
|
||||
Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
|
||||
Dbtux::searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
|
||||
{
|
||||
const TreeHead& tree = frag.m_tree;
|
||||
const unsigned numAttrs = frag.m_numAttrs;
|
||||
|
@ -144,7 +144,7 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear
|
|||
* to it.
|
||||
*/
|
||||
void
|
||||
Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
|
||||
Dbtux::searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
|
||||
{
|
||||
const TreeHead& tree = frag.m_tree;
|
||||
const unsigned numAttrs = frag.m_numAttrs;
|
||||
|
|
|
@ -83,7 +83,7 @@ optim 13 mc02/a 39 ms 59 ms 50 pct
|
|||
mc02/c 9 ms 12 ms 44 pct
|
||||
mc02/d 246 ms 289 ms 17 pct
|
||||
|
||||
[ case d: what happened to PK read performance? ]
|
||||
[ case d: bug in testOIBasic killed PK read performance ]
|
||||
|
||||
optim 14 mc02/a 41 ms 60 ms 44 pct
|
||||
mc02/b 46 ms 81 ms 73 pct
|
||||
|
@ -91,5 +91,21 @@ optim 14 mc02/a 41 ms 60 ms 44 pct
|
|||
mc02/d 242 ms 285 ms 17 pct
|
||||
|
||||
[ case b: do long keys suffer from many subroutine calls? ]
|
||||
[ case d: bug in testOIBasic killed PK read performance ]
|
||||
|
||||
none mc02/a 35 ms 60 ms 71 pct
|
||||
mc02/b 42 ms 75 ms 76 pct
|
||||
mc02/c 5 ms 12 ms 106 pct
|
||||
mc02/d 165 ms 238 ms 44 pct
|
||||
|
||||
[ johan re-installed mc02 as fedora gcc-3.3.2 ]
|
||||
[ case c: table scan has improved... ]
|
||||
|
||||
charsets mc02/a 35 ms 60 ms 71 pct
|
||||
mc02/b 42 ms 84 ms 97 pct
|
||||
mc02/c 5 ms 12 ms 109 pct
|
||||
mc02/d 190 ms 236 ms 23 pct
|
||||
|
||||
[ case b: TUX can no longer use pointers to TUP data ]
|
||||
|
||||
vim: set et:
|
||||
|
|
|
@ -56,6 +56,7 @@ const char programName[] = "NDB Kernel";
|
|||
|
||||
NDB_MAIN(ndb_kernel){
|
||||
|
||||
ndb_init();
|
||||
// Print to stdout/console
|
||||
g_eventLogger.createConsoleHandler();
|
||||
g_eventLogger.setCategory("NDB");
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <ndb_global.h>
|
||||
#include <my_sys.h>
|
||||
|
||||
#include "Configuration.hpp"
|
||||
#include <ErrorHandlingMacros.hpp>
|
||||
|
@ -105,7 +104,6 @@ Configuration::init(int argc, const char** argv){
|
|||
}
|
||||
// check for depricated flag '-i'
|
||||
|
||||
my_init();
|
||||
#ifndef DBUG_OFF
|
||||
if (debug_option)
|
||||
DBUG_PUSH(debug_option);
|
||||
|
@ -506,7 +504,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
|
||||
for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
|
||||
Uint32 tmp;
|
||||
if(!ndb_mgm_get_int_parameter(&db, LogLevel::MIN_LOGLEVEL_ID+j, &tmp)){
|
||||
if(!ndb_mgm_get_int_parameter(&db, CFG_MIN_LOGLEVEL+j, &tmp)){
|
||||
m_logLevel->setLogLevel((LogLevel::EventCategory)j, tmp);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,6 +107,9 @@ public:
|
|||
/* Number of primary key attributes (should be computed) */
|
||||
Uint16 noOfPrimkey;
|
||||
|
||||
/* Number of distinct character sets (computed) */
|
||||
Uint16 noOfCharsets;
|
||||
|
||||
/* Length of primary key in words (should be computed) */
|
||||
/* For ordered index this is tree node size in words */
|
||||
Uint16 tupKeyLength;
|
||||
|
|
|
@ -954,13 +954,52 @@ struct ndb_mgm_event_categories
|
|||
{
|
||||
const char* name;
|
||||
enum ndb_mgm_event_category category;
|
||||
} categories[] = {
|
||||
{ "STARTUP", NDB_MGM_EVENT_CATEGORY_STARTUP },
|
||||
{ "SHUTDOWN", NDB_MGM_EVENT_CATEGORY_SHUTDOWN },
|
||||
{ "STATISTICS", NDB_MGM_EVENT_CATEGORY_STATISTIC },
|
||||
{ "NODERESTART", NDB_MGM_EVENT_CATEGORY_NODE_RESTART },
|
||||
{ "CONNECTION", NDB_MGM_EVENT_CATEGORY_CONNECTION },
|
||||
{ "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT },
|
||||
{ "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG },
|
||||
{ "INFO", NDB_MGM_EVENT_CATEGORY_INFO },
|
||||
{ "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR },
|
||||
{ "GREP", NDB_MGM_EVENT_CATEGORY_GREP },
|
||||
{ "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP },
|
||||
{ 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY }
|
||||
};
|
||||
|
||||
extern "C"
|
||||
ndb_mgm_event_category
|
||||
ndb_mgm_match_event_category(const char * status)
|
||||
{
|
||||
if(status == 0)
|
||||
return NDB_MGM_ILLEGAL_EVENT_CATEGORY;
|
||||
|
||||
for(int i = 0; categories[i].name !=0 ; i++)
|
||||
if(strcmp(status, categories[i].name) == 0)
|
||||
return categories[i].category;
|
||||
|
||||
return NDB_MGM_ILLEGAL_EVENT_CATEGORY;
|
||||
}
|
||||
|
||||
extern "C"
|
||||
const char *
|
||||
ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; categories[i].name != 0; i++)
|
||||
if(categories[i].category == status)
|
||||
return categories[i].name;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C"
|
||||
int
|
||||
ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
|
||||
/*enum ndb_mgm_event_category*/
|
||||
char * category, int level,
|
||||
enum ndb_mgm_event_category cat,
|
||||
int level,
|
||||
struct ndb_mgm_reply* /*reply*/)
|
||||
{
|
||||
SET_ERROR(handle, NDB_MGM_NO_ERROR,
|
||||
|
@ -975,14 +1014,14 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
|
|||
|
||||
Properties args;
|
||||
args.put("node", nodeId);
|
||||
args.put("category", category);
|
||||
args.put("category", cat);
|
||||
args.put("level", level);
|
||||
|
||||
|
||||
const Properties *reply;
|
||||
reply = ndb_mgm_call(handle, clusterlog_reply,
|
||||
"set cluster loglevel", &args);
|
||||
CHECK_REPLY(reply, -1);
|
||||
|
||||
|
||||
BaseString result;
|
||||
reply->get("result", result);
|
||||
if(strcmp(result.c_str(), "Ok") != 0) {
|
||||
|
@ -997,8 +1036,8 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
|
|||
extern "C"
|
||||
int
|
||||
ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
|
||||
/*enum ndb_mgm_event_category category*/
|
||||
char * category, int level,
|
||||
enum ndb_mgm_event_category category,
|
||||
int level,
|
||||
struct ndb_mgm_reply* /*reply*/)
|
||||
{
|
||||
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_set_loglevel_node");
|
||||
|
@ -1030,6 +1069,48 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern "C"
|
||||
int
|
||||
ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
|
||||
{
|
||||
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event");
|
||||
const ParserRow<ParserDummy> stat_reply[] = {
|
||||
MGM_CMD("listen event", NULL, ""),
|
||||
MGM_ARG("result", Int, Mandatory, "Error message"),
|
||||
MGM_ARG("msg", String, Optional, "Error message"),
|
||||
MGM_END()
|
||||
};
|
||||
CHECK_HANDLE(handle, -1);
|
||||
|
||||
SocketClient s(handle->hostname, handle->port);
|
||||
const NDB_SOCKET_TYPE sockfd = s.connect();
|
||||
if (sockfd < 0) {
|
||||
setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
|
||||
"Unable to connect to");
|
||||
return -1;
|
||||
}
|
||||
|
||||
Properties args;
|
||||
{
|
||||
BaseString tmp;
|
||||
for(int i = 0; filter[i] != 0; i += 2){
|
||||
tmp.appfmt("%d=%d ", filter[i+1], filter[i]);
|
||||
}
|
||||
args.put("filter", tmp.c_str());
|
||||
}
|
||||
|
||||
int tmp = handle->socket;
|
||||
handle->socket = sockfd;
|
||||
|
||||
const Properties *reply;
|
||||
reply = ndb_mgm_call(handle, stat_reply, "listen event", &args);
|
||||
|
||||
handle->socket = tmp;
|
||||
|
||||
CHECK_REPLY(reply, -1);
|
||||
return sockfd;
|
||||
}
|
||||
|
||||
extern "C"
|
||||
int
|
||||
ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/)
|
||||
|
|
|
@ -611,9 +611,9 @@ CommandInterpreter::executeHelp(char* parameters)
|
|||
<< endl;
|
||||
|
||||
ndbout << "<category> = ";
|
||||
for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++){
|
||||
ndbout << EventLogger::eventCategoryNames[i].name;
|
||||
if (i < EventLogger::noOfEventCategoryNames - 1) {
|
||||
for(int i = 0; i<CFG_MIN_LOGLEVEL; i++){
|
||||
ndbout << ndb_mgm_get_event_category_string((ndb_mgm_event_category)i);
|
||||
if (i < CFG_MIN_LOGLEVEL - 1) {
|
||||
ndbout << " | ";
|
||||
}
|
||||
}
|
||||
|
@ -673,8 +673,10 @@ CommandInterpreter::executeShutdown(char* parameters)
|
|||
if (mgm_id == 0)
|
||||
mgm_id= state->node_states[i].node_id;
|
||||
else {
|
||||
ndbout << "Unable to locate management server, shutdown manually with #STOP"
|
||||
ndbout << "Unable to locate management server, "
|
||||
<< "shutdown manually with <id> STOP"
|
||||
<< endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -721,11 +723,13 @@ const char *status_string(ndb_mgm_node_status status)
|
|||
|
||||
static void
|
||||
print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
|
||||
const char *proc_name, int no_proc, ndb_mgm_node_type type, int master_id)
|
||||
const char *proc_name, int no_proc, ndb_mgm_node_type type,
|
||||
int master_id)
|
||||
{
|
||||
int i;
|
||||
ndbout << "[" << proc_name
|
||||
<< "(" << ndb_mgm_get_node_type_string(type) << ")]\t" << no_proc << " node(s)" << endl;
|
||||
<< "(" << ndb_mgm_get_node_type_string(type) << ")]\t"
|
||||
<< no_proc << " node(s)" << endl;
|
||||
for(i=0; i < state->no_of_nodes; i++) {
|
||||
struct ndb_mgm_node_state *node_state= &(state->node_states[i]);
|
||||
if(node_state->node_type == type) {
|
||||
|
@ -733,7 +737,9 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
|
|||
ndbout << "id=" << node_id;
|
||||
if(node_state->version != 0) {
|
||||
const char *hostname= node_state->connect_address;
|
||||
if (hostname == 0 || strlen(hostname) == 0 || strcmp(hostname,"0.0.0.0") == 0)
|
||||
if (hostname == 0
|
||||
|| strlen(hostname) == 0
|
||||
|| strcmp(hostname,"0.0.0.0") == 0)
|
||||
ndbout << " ";
|
||||
else
|
||||
ndbout << "\t@" << hostname;
|
||||
|
@ -761,7 +767,8 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
|
|||
ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &config_hostname);
|
||||
if (config_hostname == 0 || config_hostname[0] == 0)
|
||||
config_hostname= "any host";
|
||||
ndbout << " (not connected, accepting connect from " << config_hostname << ")" << endl;
|
||||
ndbout << " (not connected, accepting connect from "
|
||||
<< config_hostname << ")" << endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1240,55 +1247,40 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters,
|
|||
{
|
||||
connect();
|
||||
(void) all;
|
||||
(void) parameters;
|
||||
|
||||
SetLogLevelOrd logLevel; logLevel.clear();
|
||||
LogLevel::EventCategory cat;
|
||||
int level;
|
||||
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
|
||||
for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
|
||||
logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
|
||||
} else {
|
||||
|
||||
char * tmpString = strdup(parameters);
|
||||
char * tmpPtr = 0;
|
||||
char * item = strtok_r(tmpString, ", ", &tmpPtr);
|
||||
while(item != NULL){
|
||||
char categoryTxt[255];
|
||||
const int m = sscanf(item, "%[^=]=%d", categoryTxt, &level);
|
||||
if(m != 2){
|
||||
free(tmpString);
|
||||
ndbout << "Invalid loglevel specification category=level" << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
if(!EventLogger::matchEventCategory(categoryTxt,
|
||||
&cat)){
|
||||
ndbout << "Invalid loglevel specification, unknown category: "
|
||||
<< categoryTxt << endl;
|
||||
free(tmpString);
|
||||
return ;
|
||||
}
|
||||
if(level < 0 || level > 15){
|
||||
ndbout << "Invalid loglevel specification row, level 0-15" << endl;
|
||||
free(tmpString);
|
||||
return ;
|
||||
}
|
||||
logLevel.setLogLevel(cat, level);
|
||||
|
||||
item = strtok_r(NULL, ", ", &tmpPtr);
|
||||
}
|
||||
free(tmpString);
|
||||
BaseString tmp(parameters);
|
||||
Vector<BaseString> spec;
|
||||
tmp.split(spec, "=");
|
||||
if(spec.size() != 2){
|
||||
ndbout << "Invalid loglevel specification: " << parameters << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
spec[0].trim().ndb_toupper();
|
||||
int category = ndb_mgm_match_event_category(spec[0].c_str());
|
||||
if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
|
||||
category = atoi(spec[0].c_str());
|
||||
if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
|
||||
category > NDB_MGM_MAX_EVENT_CATEGORY){
|
||||
ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int level = atoi(spec[1].c_str());
|
||||
if(level < 0 || level > 15){
|
||||
ndbout << "Invalid level: " << spec[1].c_str() << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
struct ndb_mgm_reply reply;
|
||||
int result;
|
||||
result = ndb_mgm_set_loglevel_node(m_mgmsrv,
|
||||
processId, // fast fix - pekka
|
||||
(char*)EventLogger::getEventCategoryName(cat),
|
||||
processId,
|
||||
(ndb_mgm_event_category)category,
|
||||
level,
|
||||
&reply);
|
||||
|
||||
|
||||
if (result < 0) {
|
||||
ndbout_c("Executing LOGLEVEL on node %d failed.", processId);
|
||||
printError();
|
||||
|
@ -1296,7 +1288,7 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters,
|
|||
ndbout << "Executing LOGLEVEL on node " << processId << " OK!"
|
||||
<< endl;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -1626,54 +1618,41 @@ CommandInterpreter::executeEventReporting(int processId,
|
|||
bool all)
|
||||
{
|
||||
connect();
|
||||
SetLogLevelOrd logLevel; logLevel.clear();
|
||||
char categoryTxt[255];
|
||||
int level;
|
||||
LogLevel::EventCategory cat;
|
||||
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
|
||||
for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
|
||||
logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
|
||||
} else {
|
||||
|
||||
char * tmpString = strdup(parameters);
|
||||
char * tmpPtr = 0;
|
||||
char * item = strtok_r(tmpString, ", ", &tmpPtr);
|
||||
while(item != NULL){
|
||||
const int m = sscanf(item, "%[^=]=%d", categoryTxt, &level);
|
||||
if(m != 2){
|
||||
free(tmpString);
|
||||
ndbout << "Invalid loglevel specification category=level" << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
if(!EventLogger::matchEventCategory(categoryTxt,
|
||||
&cat)){
|
||||
ndbout << "Invalid loglevel specification, unknown category: "
|
||||
<< categoryTxt << endl;
|
||||
free(tmpString);
|
||||
return ;
|
||||
}
|
||||
if(level < 0 || level > 15){
|
||||
ndbout << "Invalid loglevel specification row, level 0-15" << endl;
|
||||
free(tmpString);
|
||||
return ;
|
||||
}
|
||||
logLevel.setLogLevel(cat, level);
|
||||
|
||||
item = strtok_r(NULL, ", ", &tmpPtr);
|
||||
}
|
||||
free(tmpString);
|
||||
BaseString tmp(parameters);
|
||||
Vector<BaseString> spec;
|
||||
tmp.split(spec, "=");
|
||||
if(spec.size() != 2){
|
||||
ndbout << "Invalid loglevel specification: " << parameters << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
spec[0].trim().ndb_toupper();
|
||||
int category = ndb_mgm_match_event_category(spec[0].c_str());
|
||||
if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
|
||||
category = atoi(spec[0].c_str());
|
||||
if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
|
||||
category > NDB_MGM_MAX_EVENT_CATEGORY){
|
||||
ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int level = atoi(spec[1].c_str());
|
||||
if(level < 0 || level > 15){
|
||||
ndbout << "Invalid level: " << spec[1].c_str() << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
struct ndb_mgm_reply reply;
|
||||
int result;
|
||||
|
||||
result =
|
||||
ndb_mgm_set_loglevel_clusterlog(m_mgmsrv,
|
||||
processId, // fast fix - pekka
|
||||
(char*)
|
||||
EventLogger::getEventCategoryName(cat),
|
||||
level,
|
||||
&reply);
|
||||
result = ndb_mgm_set_loglevel_clusterlog(m_mgmsrv,
|
||||
processId, // fast fix - pekka
|
||||
(ndb_mgm_event_category)category,
|
||||
level,
|
||||
&reply);
|
||||
|
||||
if (result != 0) {
|
||||
ndbout_c("Executing CLUSTERLOG on node %d failed", processId);
|
||||
|
@ -1693,13 +1672,45 @@ CommandInterpreter::executeStartBackup(char* /*parameters*/)
|
|||
connect();
|
||||
struct ndb_mgm_reply reply;
|
||||
unsigned int backupId;
|
||||
|
||||
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
|
||||
int fd = ndb_mgm_listen_event(m_mgmsrv, filter);
|
||||
int result = ndb_mgm_start_backup(m_mgmsrv, &backupId, &reply);
|
||||
if (result != 0) {
|
||||
ndbout << "Start of backup failed" << endl;
|
||||
printError();
|
||||
} else {
|
||||
ndbout << "Backup started. Backup id " << backupId << "." << endl;
|
||||
close(fd);
|
||||
return;
|
||||
}
|
||||
|
||||
char *tmp;
|
||||
char buf[1024];
|
||||
{
|
||||
SocketInputStream in(fd);
|
||||
int count = 0;
|
||||
do {
|
||||
tmp = in.gets(buf, 1024);
|
||||
if(tmp)
|
||||
{
|
||||
ndbout << tmp;
|
||||
int id;
|
||||
if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} while(count < 2);
|
||||
}
|
||||
|
||||
SocketInputStream in(fd, 10);
|
||||
do {
|
||||
tmp = in.gets(buf, 1024);
|
||||
if(tmp && tmp[0] != 0)
|
||||
{
|
||||
ndbout << tmp;
|
||||
}
|
||||
} while(tmp && tmp[0] != 0);
|
||||
|
||||
close(fd);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -44,6 +44,7 @@ handler(int sig){
|
|||
}
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
ndb_init();
|
||||
int optind = 0;
|
||||
const char *_host = 0;
|
||||
int _port = 0;
|
||||
|
|
|
@ -52,7 +52,7 @@ static const char* helpTexts[] = {
|
|||
"{<id>|ALL} CLUSTERLOG {<category>=<level>}+ Set log level for cluster log",
|
||||
"QUIT Quit management server",
|
||||
};
|
||||
static const int noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*);
|
||||
static const unsigned noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*);
|
||||
|
||||
static const char* helpTextShow =
|
||||
"SHOW prints NDB Cluster information\n\n"
|
||||
|
@ -389,14 +389,14 @@ void CommandInterpreter::executeHelp(char* parameters) {
|
|||
<< endl;
|
||||
|
||||
ndbout << "<category> = ";
|
||||
for(i = 0; i<EventLogger::noOfEventCategoryNames; i++){
|
||||
ndbout << EventLogger::eventCategoryNames[i].name;
|
||||
if (i < EventLogger::noOfEventCategoryNames - 1) {
|
||||
for(i = 0; i<CFG_MIN_LOGLEVEL; i++){
|
||||
ndbout << ndb_mgm_get_event_category_string((ndb_mgm_event_category)i);
|
||||
if (i < CFG_MIN_LOGLEVEL - 1) {
|
||||
ndbout << " | ";
|
||||
}
|
||||
}
|
||||
ndbout << endl;
|
||||
|
||||
|
||||
ndbout << "<level> = " << "0 - 15"
|
||||
<< endl;
|
||||
|
||||
|
@ -831,12 +831,13 @@ void CommandInterpreter::executeStatus(int processId,
|
|||
//*****************************************************************************
|
||||
void CommandInterpreter::executeLogLevel(int processId,
|
||||
const char* parameters, bool all) {
|
||||
#if 0
|
||||
(void)all; // Don't want compiler warning
|
||||
SetLogLevelOrd logLevel; logLevel.clear();
|
||||
|
||||
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
|
||||
for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
|
||||
logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
|
||||
for(Uint32 i = 0; i<EventLoggerBase::noOfEventCategoryNames; i++)
|
||||
logLevel.setLogLevel(EventLoggerBase::eventCategoryNames[i].category, 7);
|
||||
} else {
|
||||
|
||||
char * tmpString = strdup(parameters);
|
||||
|
@ -852,7 +853,7 @@ void CommandInterpreter::executeLogLevel(int processId,
|
|||
return;
|
||||
}
|
||||
LogLevel::EventCategory cat;
|
||||
if(!EventLogger::matchEventCategory(categoryTxt,
|
||||
if(!EventLoggerBase::matchEventCategory(categoryTxt,
|
||||
&cat)){
|
||||
ndbout << "Invalid loglevel specification, unknown category: "
|
||||
<< categoryTxt << endl;
|
||||
|
@ -875,6 +876,7 @@ void CommandInterpreter::executeLogLevel(int processId,
|
|||
if (result != 0) {
|
||||
ndbout << _mgmtSrvr.getErrorText(result) << endl;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1080,12 +1082,13 @@ void CommandInterpreter::executeTestOff(int processId,
|
|||
void CommandInterpreter::executeEventReporting(int processId,
|
||||
const char* parameters,
|
||||
bool all) {
|
||||
#if 0
|
||||
(void)all; // Don't want compiler warning
|
||||
SetLogLevelOrd logLevel; logLevel.clear();
|
||||
|
||||
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
|
||||
for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
|
||||
logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
|
||||
for(Uint32 i = 0; i<EventLoggerBase::noOfEventCategoryNames; i++)
|
||||
logLevel.setLogLevel(EventLoggerBase::eventCategoryNames[i].category, 7);
|
||||
} else {
|
||||
|
||||
char * tmpString = strdup(parameters);
|
||||
|
@ -1101,7 +1104,7 @@ void CommandInterpreter::executeEventReporting(int processId,
|
|||
return;
|
||||
}
|
||||
LogLevel::EventCategory cat;
|
||||
if(!EventLogger::matchEventCategory(categoryTxt,
|
||||
if(!EventLoggerBase::matchEventCategory(categoryTxt,
|
||||
&cat)){
|
||||
ndbout << "Invalid loglevel specification, unknown category: "
|
||||
<< categoryTxt << endl;
|
||||
|
@ -1124,6 +1127,7 @@ void CommandInterpreter::executeEventReporting(int processId,
|
|||
if (result != 0) {
|
||||
ndbout << _mgmtSrvr.getErrorText(result) << endl;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1529,7 +1529,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
},
|
||||
|
||||
{
|
||||
CFG_TCP_HOSTNAME_1,
|
||||
CFG_CONNECTION_HOSTNAME_1,
|
||||
"HostName1",
|
||||
"TCP",
|
||||
"Name/IP of computer on one side of the connection",
|
||||
|
@ -1540,7 +1540,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
0, 0 },
|
||||
|
||||
{
|
||||
CFG_TCP_HOSTNAME_2,
|
||||
CFG_CONNECTION_HOSTNAME_2,
|
||||
"HostName2",
|
||||
"TCP",
|
||||
"Name/IP of computer on one side of the connection",
|
||||
|
@ -1935,7 +1935,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
},
|
||||
|
||||
{
|
||||
CFG_OSE_HOSTNAME_1,
|
||||
CFG_CONNECTION_HOSTNAME_1,
|
||||
"HostName1",
|
||||
"OSE",
|
||||
"Name of computer on one side of the connection",
|
||||
|
@ -1946,7 +1946,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
0, 0 },
|
||||
|
||||
{
|
||||
CFG_OSE_HOSTNAME_2,
|
||||
CFG_CONNECTION_HOSTNAME_2,
|
||||
"HostName2",
|
||||
"OSE",
|
||||
"Name of computer on one side of the connection",
|
||||
|
@ -2902,26 +2902,38 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
|
|||
DBUG_ENTER("fixPortNumber");
|
||||
|
||||
Uint32 id1= 0, id2= 0;
|
||||
const char *hostName1;
|
||||
const char *hostName2;
|
||||
require(ctx.m_currentSection->get("NodeId1", &id1));
|
||||
require(ctx.m_currentSection->get("NodeId2", &id2));
|
||||
require(ctx.m_currentSection->get("HostName1", &hostName1));
|
||||
require(ctx.m_currentSection->get("HostName2", &hostName2));
|
||||
DBUG_PRINT("info",("NodeId1=%d HostName1=\"%s\"",id1,hostName1));
|
||||
DBUG_PRINT("info",("NodeId2=%d HostName2=\"%s\"",id2,hostName2));
|
||||
|
||||
if (id1 > id2) {
|
||||
Uint32 tmp= id1;
|
||||
const char *tmp_name= hostName1;
|
||||
hostName1= hostName2;
|
||||
id1= id2;
|
||||
hostName2= tmp_name;
|
||||
id2= tmp;
|
||||
}
|
||||
|
||||
const Properties * node;
|
||||
require(ctx.m_config->get("Node", id1, &node));
|
||||
BaseString hostname;
|
||||
require(node->get("HostName", hostname));
|
||||
BaseString hostname(hostName1);
|
||||
// require(node->get("HostName", hostname));
|
||||
|
||||
if (hostname.c_str()[0] == 0) {
|
||||
ctx.reportError("Hostname required on nodeid %d since it will act as server.", id1);
|
||||
ctx.reportError("Hostname required on nodeid %d since it will "
|
||||
"act as server.", id1);
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
Uint32 port= 0;
|
||||
if (!node->get("ServerPort", &port) && !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
|
||||
if (!node->get("ServerPort", &port) &&
|
||||
!ctx.m_userProperties.get("ServerPort_", id1, &port)) {
|
||||
Uint32 adder= 0;
|
||||
{
|
||||
BaseString server_port_adder(hostname);
|
||||
|
@ -2932,7 +2944,8 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
|
|||
|
||||
Uint32 base= 0;
|
||||
if (!ctx.m_userProperties.get("ServerPortBase", &base)){
|
||||
if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) &&
|
||||
if(!(ctx.m_userDefaults &&
|
||||
ctx.m_userDefaults->get("PortNumber", &base)) &&
|
||||
!ctx.m_systemDefaults->get("PortNumber", &base)) {
|
||||
base= strtoll(NDB_BASE_PORT,0,0)+2;
|
||||
// ctx.reportError("Cannot retrieve base port number");
|
||||
|
@ -2945,12 +2958,15 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
|
|||
}
|
||||
|
||||
if(ctx.m_currentSection->contains("PortNumber")) {
|
||||
ndbout << "PortNumber should no longer be specificied per connection, please remove from config. Will be changed to " << port << endl;
|
||||
ndbout << "PortNumber should no longer be specificied "
|
||||
<< "per connection, please remove from config. "
|
||||
<< "Will be changed to " << port << endl;
|
||||
ctx.m_currentSection->put("PortNumber", port, true);
|
||||
} else
|
||||
ctx.m_currentSection->put("PortNumber", port);
|
||||
|
||||
DBUG_PRINT("info", ("connection %d-%d port %d host %s", id1, id2, port, hostname.c_str()));
|
||||
DBUG_PRINT("info", ("connection %d-%d port %d host %s",
|
||||
id1, id2, port, hostname.c_str()));
|
||||
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
|
|
@ -12,8 +12,6 @@ ndb_mgmd_SOURCES = \
|
|||
main.cpp \
|
||||
Services.cpp \
|
||||
convertStrToInt.cpp \
|
||||
NodeLogLevel.cpp \
|
||||
NodeLogLevelList.cpp \
|
||||
SignalQueue.cpp \
|
||||
MgmtSrvrConfig.cpp \
|
||||
ConfigInfo.cpp \
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -28,8 +28,8 @@
|
|||
#include <signaldata/ManagementServer.hpp>
|
||||
#include "SignalQueue.hpp"
|
||||
#include <ndb_version.h>
|
||||
|
||||
#include "NodeLogLevelList.hpp"
|
||||
#include <EventLogger.hpp>
|
||||
#include <signaldata/EventSubscribeReq.hpp>
|
||||
|
||||
/**
|
||||
* @desc Block number for Management server.
|
||||
|
@ -43,6 +43,29 @@ class Config;
|
|||
class SetLogLevelOrd;
|
||||
class SocketServer;
|
||||
|
||||
class MgmStatService : public EventLoggerBase
|
||||
{
|
||||
friend class MgmtSrvr;
|
||||
public:
|
||||
struct StatListener : public EventLoggerBase {
|
||||
NDB_SOCKET_TYPE m_socket;
|
||||
};
|
||||
|
||||
private:
|
||||
class MgmtSrvr * m_mgmsrv;
|
||||
MutexVector<StatListener> m_clients;
|
||||
public:
|
||||
MgmStatService(class MgmtSrvr * m) : m_clients(5) {
|
||||
m_mgmsrv = m;
|
||||
}
|
||||
|
||||
void add_listener(const StatListener&);
|
||||
|
||||
void log(int eventType, const Uint32* theData, NodeId nodeId);
|
||||
|
||||
void stopSessions();
|
||||
};
|
||||
|
||||
/**
|
||||
* @class MgmtSrvr
|
||||
* @brief Main class for the management server.
|
||||
|
@ -63,11 +86,6 @@ class SocketServer;
|
|||
class MgmtSrvr {
|
||||
|
||||
public:
|
||||
class StatisticsListner {
|
||||
public:
|
||||
virtual void println_statistics(const BaseString &s) = 0;
|
||||
};
|
||||
|
||||
// some compilers need all of this
|
||||
class Allocated_resources;
|
||||
friend class Allocated_resources;
|
||||
|
@ -84,11 +102,6 @@ public:
|
|||
NodeBitmask m_reserved_nodes;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set a reference to the socket server.
|
||||
*/
|
||||
void setStatisticsListner(StatisticsListner* listner);
|
||||
|
||||
/**
|
||||
* Start/initate the event log.
|
||||
*/
|
||||
|
@ -150,15 +163,6 @@ public:
|
|||
STATIC_CONST( OPERATION_IN_PROGRESS = 6667 );
|
||||
|
||||
STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 );
|
||||
/**
|
||||
* This class holds all statistical variables fetched with
|
||||
* the getStatistics methods.
|
||||
*/
|
||||
class Statistics { // TODO, Real statistic data to be added
|
||||
public:
|
||||
int _test1;
|
||||
};
|
||||
|
||||
/**
|
||||
* This enum specifies the different signal loggig modes possible to set
|
||||
* with the setSignalLoggingMode method.
|
||||
|
@ -206,7 +210,7 @@ public:
|
|||
typedef void (* EnterSingleCallback)(int nodeId, void * anyData,
|
||||
int errorCode);
|
||||
typedef void (* ExitSingleCallback)(int nodeId, void * anyData,
|
||||
int errorCode);
|
||||
int errorCode);
|
||||
|
||||
/**
|
||||
* Lock configuration
|
||||
|
@ -313,13 +317,6 @@ public:
|
|||
bool abort = false,
|
||||
int * stopCount = 0, StopCallback = 0, void * anyData = 0);
|
||||
|
||||
int setEventReportingLevel(int processId,
|
||||
const class SetLogLevelOrd & logLevel,
|
||||
bool isResend = false);
|
||||
|
||||
int startStatisticEventReporting(int level = 5);
|
||||
|
||||
|
||||
struct BackupEvent {
|
||||
enum Event {
|
||||
BackupStarted = 1,
|
||||
|
@ -356,8 +353,6 @@ public:
|
|||
/**
|
||||
* Backup functionallity
|
||||
*/
|
||||
typedef void (* BackupCallback)(const BackupEvent& Event);
|
||||
BackupCallback setCallback(BackupCallback);
|
||||
int startBackup(Uint32& backupId, bool waitCompleted = false);
|
||||
int abortBackup(Uint32 backupId);
|
||||
int performBackup(Uint32* backupId);
|
||||
|
@ -377,22 +372,8 @@ public:
|
|||
// INVALID_LEVEL
|
||||
//**************************************************************************
|
||||
|
||||
/**
|
||||
* Sets the Node's log level, i.e., its local event reporting.
|
||||
*
|
||||
* @param processId the DB node id.
|
||||
* @param logLevel the log level.
|
||||
* @param isResend Flag to indicate for resending log levels
|
||||
* during node restart
|
||||
|
||||
* @return 0 if successful or NO_CONTACT_WITH_PROCESS,
|
||||
* SEND_OR_RECEIVE_FAILED,
|
||||
* COULD_NOT_ALLOCATE_MEMORY
|
||||
*/
|
||||
int setNodeLogLevel(int processId,
|
||||
const class SetLogLevelOrd & logLevel,
|
||||
bool isResend = false);
|
||||
|
||||
int setEventReportingLevelImpl(int processId, const EventSubscribeReq& ll);
|
||||
int setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll);
|
||||
|
||||
/**
|
||||
* Insert an error in a DB process.
|
||||
|
@ -508,11 +489,6 @@ public:
|
|||
*/
|
||||
NodeId getPrimaryNode() const;
|
||||
|
||||
/**
|
||||
* Returns the statistics port number.
|
||||
* @return statistic port number.
|
||||
*/
|
||||
int getStatPort() const;
|
||||
/**
|
||||
* Returns the port number.
|
||||
* @return port number.
|
||||
|
@ -526,10 +502,7 @@ public:
|
|||
private:
|
||||
//**************************************************************************
|
||||
|
||||
int setEventReportingLevelImpl(int processId,
|
||||
const class SetLogLevelOrd & logLevel,
|
||||
bool isResend = false);
|
||||
|
||||
int setEventReportingLevel(int processId, LogLevel::EventCategory, Uint32);
|
||||
|
||||
/**
|
||||
* Check if it is possible to send a signal to a (DB) process
|
||||
|
@ -563,10 +536,6 @@ private:
|
|||
Allocated_resources m_allocated_resources;
|
||||
struct in_addr m_connect_address[MAX_NODES];
|
||||
|
||||
int _setVarReqResult; // The result of the SET_VAR_REQ response
|
||||
Statistics _statistics; // handleSTATISTICS_CONF store the result here,
|
||||
// and getStatistics reads it.
|
||||
|
||||
//**************************************************************************
|
||||
// Specific signal handling methods
|
||||
//**************************************************************************
|
||||
|
@ -598,14 +567,6 @@ private:
|
|||
// Returns: -
|
||||
//**************************************************************************
|
||||
|
||||
int handleSTATISTICS_CONF(NdbApiSignal* signal);
|
||||
//**************************************************************************
|
||||
// Description: Handle reception of signal STATISTICS_CONF
|
||||
// Parameters:
|
||||
// signal: The recieved signal
|
||||
// Returns: TODO, to be defined
|
||||
//**************************************************************************
|
||||
|
||||
void handle_MGM_LOCK_CONFIG_REQ(NdbApiSignal *signal);
|
||||
void handle_MGM_UNLOCK_CONFIG_REQ(NdbApiSignal *signal);
|
||||
|
||||
|
@ -631,7 +592,6 @@ private:
|
|||
*/
|
||||
enum WaitSignalType {
|
||||
NO_WAIT, // We don't expect to receive any signal
|
||||
WAIT_STATISTICS, // Accept STATISTICS_CONF
|
||||
WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
|
||||
WAIT_SUBSCRIBE_CONF, // Accept event subscription confirmation
|
||||
WAIT_STOP,
|
||||
|
@ -733,14 +693,6 @@ private:
|
|||
|
||||
class SignalQueue m_signalRecvQueue;
|
||||
|
||||
enum ndb_mgm_node_type nodeTypes[MAX_NODES];
|
||||
|
||||
int theConfCount; // The number of expected conf signals
|
||||
|
||||
StatisticsListner * m_statisticsListner; // Used for sending statistics info
|
||||
bool _isStatPortActive;
|
||||
bool _isClusterLogStatActive;
|
||||
|
||||
struct StopRecord {
|
||||
StopRecord(){ inUse = false; callback = 0; singleUserMode = false;}
|
||||
bool inUse;
|
||||
|
@ -765,10 +717,16 @@ private:
|
|||
|
||||
void handleStopReply(NodeId nodeId, Uint32 errCode);
|
||||
int translateStopRef(Uint32 errCode);
|
||||
|
||||
|
||||
bool _isStopThread;
|
||||
int _logLevelThreadSleep;
|
||||
int _startedNodeId;
|
||||
MutexVector<NodeId> m_started_nodes;
|
||||
MutexVector<EventSubscribeReq> m_log_level_requests;
|
||||
LogLevel m_nodeLogLevel[MAX_NODES];
|
||||
enum ndb_mgm_node_type nodeTypes[MAX_NODES];
|
||||
friend class MgmApiSession;
|
||||
friend class MgmStatService;
|
||||
MgmStatService m_statisticsListner;
|
||||
|
||||
/**
|
||||
* Handles the thread wich upon a 'Node is started' event will
|
||||
|
@ -782,15 +740,12 @@ private:
|
|||
static void *signalRecvThread_C(void *);
|
||||
void signalRecvThreadRun();
|
||||
|
||||
NodeLogLevelList* _nodeLogLevelList;
|
||||
NodeLogLevelList* _clusterLogLevelList;
|
||||
|
||||
void backupCallback(BackupEvent &);
|
||||
BackupCallback m_backupCallback;
|
||||
BackupEvent m_lastBackupEvent;
|
||||
|
||||
Config *_props;
|
||||
|
||||
int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type);
|
||||
public:
|
||||
/**
|
||||
* This method does not exist
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include "NodeLogLevel.hpp"
|
||||
// TODO_RONM: Clearly getCategory and getLevel is not correctly coded. Must be taken care of.
|
||||
|
||||
NodeLogLevel::NodeLogLevel(int nodeId, const SetLogLevelOrd& ll)
|
||||
{
|
||||
m_nodeId = nodeId;
|
||||
m_logLevel = ll;
|
||||
}
|
||||
|
||||
NodeLogLevel::~NodeLogLevel()
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
NodeLogLevel::getNodeId() const
|
||||
{
|
||||
return m_nodeId;
|
||||
}
|
||||
|
||||
Uint32
|
||||
NodeLogLevel::getCategory() const
|
||||
{
|
||||
for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
|
||||
{
|
||||
return m_logLevel.theCategories[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
NodeLogLevel::getLevel() const
|
||||
{
|
||||
for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
|
||||
{
|
||||
return m_logLevel.theLevels[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
NodeLogLevel::setLevel(int level)
|
||||
{
|
||||
for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
|
||||
{
|
||||
m_logLevel.theLevels[i] = level;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SetLogLevelOrd
|
||||
NodeLogLevel::getLogLevelOrd() const
|
||||
{
|
||||
return m_logLevel;
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#ifndef NODELOGLEVEL_H
|
||||
#define NODELOGLEVEL_H
|
||||
|
||||
#include <ndb_global.h>
|
||||
|
||||
#include <signaldata/SetLogLevelOrd.hpp>
|
||||
|
||||
/**
|
||||
* Holds a DB node's log level settings for both local and event log levels.
|
||||
* It only holds one log level setting even though SetLogLevelOrd can handle
|
||||
* multiple log levels at once, it is not used in that way in the managment
|
||||
* server.
|
||||
*
|
||||
* @version #@ $Id: NodeLogLevel.hpp,v 1.2 2003/07/05 17:40:22 elathal Exp $
|
||||
*/
|
||||
class NodeLogLevel
|
||||
{
|
||||
public:
|
||||
NodeLogLevel(int nodeId, const SetLogLevelOrd& ll);
|
||||
~NodeLogLevel();
|
||||
|
||||
int getNodeId() const;
|
||||
Uint32 getCategory() const;
|
||||
int getLevel() const;
|
||||
void setLevel(int level);
|
||||
SetLogLevelOrd getLogLevelOrd() const;
|
||||
|
||||
private:
|
||||
NodeLogLevel();
|
||||
NodeLogLevel(const NodeLogLevel&);
|
||||
bool operator == (const NodeLogLevel&);
|
||||
NodeLogLevel operator = (const NodeLogLevel&);
|
||||
|
||||
int m_nodeId;
|
||||
SetLogLevelOrd m_logLevel;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1,182 +0,0 @@
|
|||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <ndb_global.h>
|
||||
|
||||
#include "NodeLogLevelList.hpp"
|
||||
#include "NodeLogLevel.hpp"
|
||||
|
||||
//
|
||||
// PUBLIC
|
||||
//
|
||||
|
||||
NodeLogLevelList::NodeLogLevelList() :
|
||||
m_size(0),
|
||||
m_pHeadNode(NULL),
|
||||
m_pTailNode(NULL),
|
||||
m_pCurrNode(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
NodeLogLevelList::~NodeLogLevelList()
|
||||
{
|
||||
removeAll();
|
||||
}
|
||||
|
||||
void
|
||||
NodeLogLevelList::add(NodeLogLevel* pNewNode)
|
||||
{
|
||||
NodeLogLevelNode* pNode = new NodeLogLevelNode();
|
||||
|
||||
if (m_pHeadNode == NULL)
|
||||
{
|
||||
m_pHeadNode = pNode;
|
||||
pNode->pPrev = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_pTailNode->pNext = pNode;
|
||||
pNode->pPrev = m_pTailNode;
|
||||
}
|
||||
m_pTailNode = pNode;
|
||||
pNode->pNext = NULL;
|
||||
pNode->pHandler = pNewNode;
|
||||
|
||||
m_size++;
|
||||
}
|
||||
|
||||
bool
|
||||
NodeLogLevelList::remove(NodeLogLevel* pRemoveNode)
|
||||
{
|
||||
NodeLogLevelNode* pNode = m_pHeadNode;
|
||||
bool removed = false;
|
||||
do
|
||||
{
|
||||
if (pNode->pHandler == pRemoveNode)
|
||||
{
|
||||
removeNode(pNode);
|
||||
removed = true;
|
||||
break;
|
||||
}
|
||||
} while ( (pNode = next(pNode)) != NULL);
|
||||
|
||||
return removed;
|
||||
}
|
||||
|
||||
void
|
||||
NodeLogLevelList::removeAll()
|
||||
{
|
||||
while (m_pHeadNode != NULL)
|
||||
{
|
||||
removeNode(m_pHeadNode);
|
||||
}
|
||||
}
|
||||
|
||||
NodeLogLevel*
|
||||
NodeLogLevelList::next()
|
||||
{
|
||||
NodeLogLevel* pHandler = NULL;
|
||||
if (m_pCurrNode == NULL)
|
||||
{
|
||||
m_pCurrNode = m_pHeadNode;
|
||||
if (m_pCurrNode != NULL)
|
||||
{
|
||||
pHandler = m_pCurrNode->pHandler;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_pCurrNode = next(m_pCurrNode); // Next node
|
||||
if (m_pCurrNode != NULL)
|
||||
{
|
||||
pHandler = m_pCurrNode->pHandler;
|
||||
}
|
||||
}
|
||||
|
||||
return pHandler;
|
||||
}
|
||||
|
||||
int
|
||||
NodeLogLevelList::size() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
//
|
||||
// PRIVATE
|
||||
//
|
||||
|
||||
NodeLogLevelList::NodeLogLevelNode*
|
||||
NodeLogLevelList::next(NodeLogLevelNode* pNode)
|
||||
{
|
||||
NodeLogLevelNode* pCurr = pNode;
|
||||
if (pNode->pNext != NULL)
|
||||
{
|
||||
pCurr = pNode->pNext;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Tail
|
||||
pCurr = NULL;
|
||||
}
|
||||
return pCurr;
|
||||
}
|
||||
|
||||
NodeLogLevelList::NodeLogLevelNode*
|
||||
NodeLogLevelList::prev(NodeLogLevelNode* pNode)
|
||||
{
|
||||
NodeLogLevelNode* pCurr = pNode;
|
||||
if (pNode->pPrev != NULL) // head
|
||||
{
|
||||
pCurr = pNode->pPrev;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Head
|
||||
pCurr = NULL;
|
||||
}
|
||||
|
||||
return pCurr;
|
||||
}
|
||||
|
||||
void
|
||||
NodeLogLevelList::removeNode(NodeLogLevelNode* pNode)
|
||||
{
|
||||
if (pNode->pPrev == NULL) // If head
|
||||
{
|
||||
m_pHeadNode = pNode->pNext;
|
||||
}
|
||||
else
|
||||
{
|
||||
pNode->pPrev->pNext = pNode->pNext;
|
||||
}
|
||||
|
||||
if (pNode->pNext == NULL) // if tail
|
||||
{
|
||||
m_pTailNode = pNode->pPrev;
|
||||
}
|
||||
else
|
||||
{
|
||||
pNode->pNext->pPrev = pNode->pPrev;
|
||||
}
|
||||
|
||||
pNode->pNext = NULL;
|
||||
pNode->pPrev = NULL;
|
||||
delete pNode->pHandler; // Delete log handler
|
||||
delete pNode;
|
||||
|
||||
m_size--;
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#ifndef NODELOGLEVELLIST_H
|
||||
#define NODELOGLEVELLIST_H
|
||||
|
||||
class NodeLogLevel;
|
||||
|
||||
/**
|
||||
* Provides a simple linked list of NodeLogLevel.
|
||||
*
|
||||
* @see NodeLogLevel
|
||||
* @version #@ $Id: NodeLogLevelList.hpp,v 1.1 2002/08/09 12:53:50 eyualex Exp $
|
||||
*/
|
||||
class NodeLogLevelList
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Default Constructor.
|
||||
*/
|
||||
NodeLogLevelList();
|
||||
|
||||
/**
|
||||
* Destructor.
|
||||
*/
|
||||
~NodeLogLevelList();
|
||||
|
||||
/**
|
||||
* Adds a new node.
|
||||
*
|
||||
* @param pNewHandler a new NodeLogLevel.
|
||||
*/
|
||||
void add(NodeLogLevel* pNewNode);
|
||||
|
||||
/**
|
||||
* Removes a NodeLogLevel from the list and call its destructor.
|
||||
*
|
||||
* @param pRemoveHandler the NodeLogLevel to remove
|
||||
*/
|
||||
bool remove(NodeLogLevel* pRemoveNode);
|
||||
|
||||
/**
|
||||
* Removes all items.
|
||||
*/
|
||||
void removeAll();
|
||||
|
||||
/**
|
||||
* Returns the next node in the list.
|
||||
* returns a node or NULL.
|
||||
*/
|
||||
NodeLogLevel* next();
|
||||
|
||||
/**
|
||||
* Returns the size of the list.
|
||||
*/
|
||||
int size() const;
|
||||
private:
|
||||
/** List node */
|
||||
struct NodeLogLevelNode
|
||||
{
|
||||
NodeLogLevelNode* pPrev;
|
||||
NodeLogLevelNode* pNext;
|
||||
NodeLogLevel* pHandler;
|
||||
};
|
||||
|
||||
NodeLogLevelNode* next(NodeLogLevelNode* pNode);
|
||||
NodeLogLevelNode* prev(NodeLogLevelNode* pNode);
|
||||
|
||||
void removeNode(NodeLogLevelNode* pNode);
|
||||
|
||||
int m_size;
|
||||
|
||||
NodeLogLevelNode* m_pHeadNode;
|
||||
NodeLogLevelNode* m_pTailNode;
|
||||
NodeLogLevelNode* m_pCurrNode;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
|
@ -23,6 +23,7 @@
|
|||
#include <mgmapi.h>
|
||||
#include <EventLogger.hpp>
|
||||
#include <signaldata/SetLogLevelOrd.hpp>
|
||||
#include <LogLevel.hpp>
|
||||
#include <BaseString.hpp>
|
||||
#include <Base64.hpp>
|
||||
|
||||
|
@ -133,7 +134,7 @@ ParserRow<MgmApiSession> commands[] = {
|
|||
MGM_ARG("public key", String, Mandatory, "Public key"),
|
||||
|
||||
MGM_CMD("get version", &MgmApiSession::getVersion, ""),
|
||||
|
||||
|
||||
MGM_CMD("get status", &MgmApiSession::getStatus, ""),
|
||||
|
||||
MGM_CMD("get info clusterlog", &MgmApiSession::getInfoClusterLog, ""),
|
||||
|
@ -236,7 +237,11 @@ ParserRow<MgmApiSession> commands[] = {
|
|||
MGM_ARG("node", String, Mandatory, "Node"),
|
||||
MGM_ARG("parameter", String, Mandatory, "Parameter"),
|
||||
MGM_ARG("value", String, Mandatory, "Value"),
|
||||
|
||||
|
||||
MGM_CMD("listen event", &MgmApiSession::listen_event, ""),
|
||||
MGM_ARG("node", Int, Optional, "Node"),
|
||||
MGM_ARG("filter", String, Mandatory, "Event category"),
|
||||
|
||||
MGM_END()
|
||||
};
|
||||
|
||||
|
@ -289,7 +294,8 @@ MgmApiSession::runSession() {
|
|||
break;
|
||||
}
|
||||
}
|
||||
NDB_CLOSE_SOCKET(m_socket);
|
||||
if(m_socket >= 0)
|
||||
NDB_CLOSE_SOCKET(m_socket);
|
||||
}
|
||||
|
||||
#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
|
||||
|
@ -418,7 +424,8 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
|
|||
&addr, &addrlen, error_string)){
|
||||
const char *alias;
|
||||
const char *str;
|
||||
alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)nodetype, &str);
|
||||
alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)
|
||||
nodetype, &str);
|
||||
m_output->println(cmd);
|
||||
m_output->println("result: %s", error_string.c_str());
|
||||
m_output->println("");
|
||||
|
@ -554,7 +561,7 @@ MgmApiSession::getStatPort(Parser_t::Context &,
|
|||
const class Properties &) {
|
||||
|
||||
m_output->println("get statport reply");
|
||||
m_output->println("tcpport: %d", m_mgmsrv.getStatPort());
|
||||
m_output->println("tcpport: %d", 0);
|
||||
m_output->println("");
|
||||
}
|
||||
|
||||
|
@ -756,13 +763,12 @@ MgmApiSession::bye(Parser<MgmApiSession>::Context &,
|
|||
void
|
||||
MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
|
||||
Properties const &args) {
|
||||
Uint32 node, level;
|
||||
BaseString categoryName, errorString;
|
||||
Uint32 node, level, category;
|
||||
BaseString errorString;
|
||||
SetLogLevelOrd logLevel;
|
||||
int result;
|
||||
logLevel.clear();
|
||||
args.get("node", &node);
|
||||
args.get("category", categoryName);
|
||||
args.get("category", &category);
|
||||
args.get("level", &level);
|
||||
|
||||
/* XXX should use constants for this value */
|
||||
|
@ -771,25 +777,17 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
|
|||
goto error;
|
||||
}
|
||||
|
||||
categoryName.ndb_toupper();
|
||||
|
||||
LogLevel::EventCategory category;
|
||||
if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) {
|
||||
errorString.assign("Unknown category");
|
||||
goto error;
|
||||
}
|
||||
|
||||
logLevel.setLogLevel(category, level);
|
||||
result = m_mgmsrv.setEventReportingLevel(node, logLevel);
|
||||
|
||||
EventSubscribeReq req;
|
||||
req.blockRef = 0;
|
||||
req.noOfEntries = 1;
|
||||
req.theData[0] = (category << 16) | level;
|
||||
m_mgmsrv.m_log_level_requests.push_back(req);
|
||||
|
||||
m_output->println("set cluster loglevel reply");
|
||||
if(result != 0)
|
||||
m_output->println("result: %s", m_mgmsrv.getErrorText(result));
|
||||
else
|
||||
m_output->println("result: Ok");
|
||||
m_output->println("result: Ok");
|
||||
m_output->println("");
|
||||
return;
|
||||
error:
|
||||
error:
|
||||
m_output->println("set cluster loglevel reply");
|
||||
m_output->println("result: %s", errorString.c_str());
|
||||
m_output->println("");
|
||||
|
@ -798,13 +796,13 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
|
|||
void
|
||||
MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
|
||||
Properties const &args) {
|
||||
Uint32 node = 0, level = 0;
|
||||
BaseString categoryName, errorString;
|
||||
Uint32 node = 0, level = 0, category;
|
||||
BaseString errorString;
|
||||
SetLogLevelOrd logLevel;
|
||||
int result;
|
||||
logLevel.clear();
|
||||
args.get("node", &node);
|
||||
args.get("category", categoryName);
|
||||
args.get("category", &category);
|
||||
args.get("level", &level);
|
||||
|
||||
/* XXX should use constants for this value */
|
||||
|
@ -813,23 +811,14 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
|
|||
goto error;
|
||||
}
|
||||
|
||||
categoryName.ndb_toupper();
|
||||
|
||||
LogLevel::EventCategory category;
|
||||
if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) {
|
||||
errorString.assign("Unknown category");
|
||||
goto error;
|
||||
}
|
||||
|
||||
logLevel.setLogLevel(category, level);
|
||||
|
||||
result = m_mgmsrv.setNodeLogLevel(node, logLevel);
|
||||
|
||||
EventSubscribeReq req;
|
||||
req.blockRef = node;
|
||||
req.noOfEntries = 1;
|
||||
req.theData[0] = (category << 16) | level;
|
||||
m_mgmsrv.m_log_level_requests.push_back(req);
|
||||
|
||||
m_output->println("set loglevel reply");
|
||||
if(result != 0)
|
||||
m_output->println("result: %s", m_mgmsrv.getErrorText(result));
|
||||
else
|
||||
m_output->println("result: Ok");
|
||||
m_output->println("result: Ok");
|
||||
m_output->println("");
|
||||
return;
|
||||
error:
|
||||
|
@ -1248,33 +1237,91 @@ MgmApiSession::configChange(Parser_t::Context &,
|
|||
m_output->println("");
|
||||
}
|
||||
|
||||
void
|
||||
MgmStatService::println_statistics(const BaseString &line){
|
||||
MutexVector<NDB_SOCKET_TYPE> copy(m_sockets.size());
|
||||
m_sockets.lock();
|
||||
int i;
|
||||
for(i = m_sockets.size() - 1; i >= 0; i--){
|
||||
if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){
|
||||
copy.push_back(m_sockets[i]);
|
||||
m_sockets.erase(i, false);
|
||||
static NdbOut&
|
||||
operator<<(NdbOut& out, const LogLevel & ll)
|
||||
{
|
||||
out << "[LogLevel: ";
|
||||
for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
|
||||
out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
|
||||
out << "]";
|
||||
}
|
||||
|
||||
void
|
||||
MgmStatService::log(int eventType, const Uint32* theData, NodeId nodeId){
|
||||
|
||||
Uint32 threshold = 0;
|
||||
LogLevel::EventCategory cat;
|
||||
|
||||
for(unsigned i = 0; i<EventLogger::matrixSize; i++){
|
||||
if(EventLogger::matrix[i].eventType == eventType){
|
||||
cat = EventLogger::matrix[i].eventCategory;
|
||||
threshold = EventLogger::matrix[i].threshold;
|
||||
break;
|
||||
}
|
||||
}
|
||||
m_sockets.unlock();
|
||||
|
||||
for(i = copy.size() - 1; i >= 0; i--){
|
||||
NDB_CLOSE_SOCKET(copy[i]);
|
||||
copy.erase(i);
|
||||
|
||||
char m_text[256];
|
||||
EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId);
|
||||
|
||||
Vector<NDB_SOCKET_TYPE> copy;
|
||||
m_clients.lock();
|
||||
int i;
|
||||
for(i = m_clients.size() - 1; i >= 0; i--){
|
||||
if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)){
|
||||
if(m_clients[i].m_socket >= 0 &&
|
||||
println_socket(m_clients[i].m_socket,
|
||||
MAX_WRITE_TIMEOUT, m_text) == -1){
|
||||
copy.push_back(m_clients[i].m_socket);
|
||||
m_clients.erase(i, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
if(m_sockets.size() == 0 || false){
|
||||
m_mgmsrv->startStatisticEventReporting(0);
|
||||
m_clients.unlock();
|
||||
|
||||
for(i = 0; (unsigned)i<copy.size(); i++){
|
||||
NDB_CLOSE_SOCKET(copy[i]);
|
||||
}
|
||||
|
||||
if(copy.size()){
|
||||
LogLevel tmp; tmp.clear();
|
||||
m_clients.lock();
|
||||
for(i = 0; i < m_clients.size(); i++){
|
||||
tmp.set_max(m_clients[i].m_logLevel);
|
||||
}
|
||||
m_clients.unlock();
|
||||
|
||||
if(!(tmp == m_logLevel)){
|
||||
m_logLevel = tmp;
|
||||
EventSubscribeReq req;
|
||||
req = tmp;
|
||||
req.blockRef = 0;
|
||||
m_mgmsrv->m_log_level_requests.push_back(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MgmStatService::add_listener(const StatListener& client){
|
||||
m_clients.push_back(client);
|
||||
LogLevel tmp = m_logLevel;
|
||||
tmp.set_max(client.m_logLevel);
|
||||
|
||||
if(!(tmp == m_logLevel)){
|
||||
m_logLevel = tmp;
|
||||
EventSubscribeReq req;
|
||||
req = tmp;
|
||||
req.blockRef = 0;
|
||||
m_mgmsrv->m_log_level_requests.push_back(req);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MgmStatService::stopSessions(){
|
||||
for(int i = m_sockets.size() - 1; i >= 0; i--){
|
||||
NDB_CLOSE_SOCKET(m_sockets[i]);
|
||||
m_sockets.erase(i);
|
||||
for(int i = m_clients.size() - 1; i >= 0; i--){
|
||||
if(m_clients[i].m_socket >= 0){
|
||||
NDB_CLOSE_SOCKET(m_clients[i].m_socket);
|
||||
m_clients.erase(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1298,6 +1345,75 @@ MgmApiSession::setParameter(Parser_t::Context &,
|
|||
m_output->println("");
|
||||
}
|
||||
|
||||
void
|
||||
MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
|
||||
Properties const & args) {
|
||||
|
||||
BaseString node, param, value;
|
||||
args.get("node", node);
|
||||
args.get("filter", param);
|
||||
|
||||
int result = 0;
|
||||
BaseString msg;
|
||||
|
||||
MgmStatService::StatListener le;
|
||||
le.m_socket = m_socket;
|
||||
|
||||
Vector<BaseString> list;
|
||||
param.trim();
|
||||
param.split(list, " ,");
|
||||
for(size_t i = 0; i<list.size(); i++){
|
||||
Vector<BaseString> spec;
|
||||
list[i].trim();
|
||||
list[i].split(spec, "=:");
|
||||
if(spec.size() != 2){
|
||||
msg.appfmt("Invalid filter specification: >%s< >%s< %d",
|
||||
param.c_str(), list[i].c_str(), spec.size());
|
||||
result = -1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
spec[0].trim().ndb_toupper();
|
||||
int category = ndb_mgm_match_event_category(spec[0].c_str());
|
||||
if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
|
||||
category = atoi(spec[0].c_str());
|
||||
if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
|
||||
category > NDB_MGM_MAX_EVENT_CATEGORY){
|
||||
msg.appfmt("Unknown category: >%s<", spec[0].c_str());
|
||||
result = -1;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
int level = atoi(spec[1].c_str());
|
||||
if(level < 0 || level > 15){
|
||||
msg.appfmt("Invalid level: >%s<", spec[1].c_str());
|
||||
result = -1;
|
||||
goto done;
|
||||
}
|
||||
category -= CFG_MIN_LOGLEVEL;
|
||||
le.m_logLevel.setLogLevel((LogLevel::EventCategory)category, level);
|
||||
}
|
||||
|
||||
if(list.size() == 0){
|
||||
msg.appfmt("Empty filter specification");
|
||||
result = -1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
m_mgmsrv.m_statisticsListner.add_listener(le);
|
||||
|
||||
m_stop = true;
|
||||
m_socket = -1;
|
||||
|
||||
done:
|
||||
m_output->println("listen event");
|
||||
m_output->println("result: %d", result);
|
||||
if(result != 0)
|
||||
m_output->println("msg: %s", msg.c_str());
|
||||
m_output->println("");
|
||||
}
|
||||
|
||||
template class MutexVector<int>;
|
||||
template class Vector<ParserRow<MgmApiSession> const*>;
|
||||
template class Vector<unsigned short>;
|
||||
|
|
|
@ -83,7 +83,8 @@ public:
|
|||
void configChange(Parser_t::Context &ctx, const class Properties &args);
|
||||
|
||||
void setParameter(Parser_t::Context &ctx, const class Properties &args);
|
||||
|
||||
void listen_event(Parser_t::Context &ctx, const class Properties &args);
|
||||
|
||||
void repCommand(Parser_t::Context &ctx, const class Properties &args);
|
||||
};
|
||||
|
||||
|
@ -103,28 +104,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
class MgmStatService : public SocketServer::Service,
|
||||
public MgmtSrvr::StatisticsListner
|
||||
{
|
||||
class MgmtSrvr * m_mgmsrv;
|
||||
MutexVector<NDB_SOCKET_TYPE> m_sockets;
|
||||
public:
|
||||
MgmStatService() : m_sockets(5) {
|
||||
m_mgmsrv = 0;
|
||||
}
|
||||
|
||||
void setMgm(class MgmtSrvr * mgmsrv){
|
||||
m_mgmsrv = mgmsrv;
|
||||
}
|
||||
|
||||
SocketServer::Session * newSession(NDB_SOCKET_TYPE socket){
|
||||
m_sockets.push_back(socket);
|
||||
m_mgmsrv->startStatisticEventReporting(5);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void stopSessions();
|
||||
|
||||
void println_statistics(const BaseString &line);
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <ndb_global.h>
|
||||
#include <my_sys.h>
|
||||
|
||||
#include "MgmtSrvr.hpp"
|
||||
#include "EventLogger.hpp"
|
||||
|
@ -70,7 +69,6 @@ struct MgmGlobals {
|
|||
bool use_specific_ip;
|
||||
char * interface_name;
|
||||
int port;
|
||||
int port_stats;
|
||||
|
||||
/** The configuration of the cluster */
|
||||
Config * cluster_config;
|
||||
|
@ -82,6 +80,7 @@ struct MgmGlobals {
|
|||
SocketServer * socketServer;
|
||||
};
|
||||
|
||||
int g_no_nodeid_checks= 0;
|
||||
static MgmGlobals glob;
|
||||
|
||||
|
||||
|
@ -118,7 +117,9 @@ struct getargs args[] = {
|
|||
"Specify configuration file connect string (will default use Ndb.cfg if available)",
|
||||
"filename" },
|
||||
{ "interactive", 0, arg_flag, &glob.interactive,
|
||||
"Run interactive. Not supported but provided for testing purposes", "" },
|
||||
"Run interactive. Not supported but provided for testing purposes", "" },
|
||||
{ "no-nodeid-checks", 0, arg_flag, &g_no_nodeid_checks,
|
||||
"Do not provide any node id checks", "" },
|
||||
{ "nodaemon", 0, arg_flag, &glob.non_interactive,
|
||||
"Don't run as daemon, but don't read from stdin", "non-interactive" }
|
||||
};
|
||||
|
@ -129,6 +130,7 @@ int num_args = sizeof(args) / sizeof(args[0]);
|
|||
* MAIN
|
||||
*/
|
||||
NDB_MAIN(mgmsrv){
|
||||
ndb_init();
|
||||
/**
|
||||
* OSE specific. Enable shared ownership of file system resources.
|
||||
* This is needed in order to use the cluster log since the events
|
||||
|
@ -151,7 +153,6 @@ NDB_MAIN(mgmsrv){
|
|||
glob.daemon= 0;
|
||||
}
|
||||
|
||||
my_init();
|
||||
#ifndef DBUG_OFF
|
||||
if (debug_option)
|
||||
DBUG_PUSH(debug_option);
|
||||
|
@ -169,8 +170,6 @@ NDB_MAIN(mgmsrv){
|
|||
|
||||
MgmApiService * mapi = new MgmApiService();
|
||||
|
||||
MgmStatService * mstat = new MgmStatService();
|
||||
|
||||
/****************************
|
||||
* Read configuration files *
|
||||
****************************/
|
||||
|
@ -230,13 +229,6 @@ NDB_MAIN(mgmsrv){
|
|||
goto error_end;
|
||||
}
|
||||
|
||||
if(!glob.socketServer->setup(mstat, glob.port_stats, glob.interface_name)){
|
||||
ndbout_c("Unable to setup statistic port: %d!\nPlease check if the port"
|
||||
" is already used.", glob.port_stats);
|
||||
delete mstat;
|
||||
goto error_end;
|
||||
}
|
||||
|
||||
if(!glob.mgmObject->check_start()){
|
||||
ndbout_c("Unable to check start management server.");
|
||||
ndbout_c("Probably caused by illegal initial configuration file.");
|
||||
|
@ -267,10 +259,7 @@ NDB_MAIN(mgmsrv){
|
|||
}
|
||||
|
||||
//glob.mgmObject->saveConfig();
|
||||
|
||||
mstat->setMgm(glob.mgmObject);
|
||||
mapi->setMgm(glob.mgmObject);
|
||||
glob.mgmObject->setStatisticsListner(mstat);
|
||||
|
||||
char msg[256];
|
||||
snprintf(msg, sizeof(msg),
|
||||
|
@ -278,8 +267,8 @@ NDB_MAIN(mgmsrv){
|
|||
ndbout_c(msg);
|
||||
g_EventLogger.info(msg);
|
||||
|
||||
snprintf(msg, 256, "Id: %d, Command port: %d, Statistics port: %d",
|
||||
glob.localNodeId, glob.port, glob.port_stats);
|
||||
snprintf(msg, 256, "Id: %d, Command port: %d",
|
||||
glob.localNodeId, glob.port);
|
||||
ndbout_c(msg);
|
||||
g_EventLogger.info(msg);
|
||||
|
||||
|
@ -309,7 +298,6 @@ NDB_MAIN(mgmsrv){
|
|||
MgmGlobals::MgmGlobals(){
|
||||
// Default values
|
||||
port = 0;
|
||||
port_stats = 0;
|
||||
config_filename = NULL;
|
||||
local_config_filename = NULL;
|
||||
interface_name = 0;
|
||||
|
@ -336,17 +324,12 @@ MgmGlobals::~MgmGlobals(){
|
|||
* @fn readLocalConfig
|
||||
* @param glob : Global variables
|
||||
* @return true if success, false otherwise.
|
||||
*
|
||||
* How to get LOCAL CONFIGURATION FILE:
|
||||
* 1. Use local config file name (-l)
|
||||
* 2. Use environment NDB_HOME + Ndb.cfg
|
||||
* If NDB_HOME is not set this results in reading from local dir
|
||||
*/
|
||||
static bool
|
||||
readLocalConfig(){
|
||||
// Read local config file
|
||||
LocalConfig lc;
|
||||
if(!lc.init(glob.local_config_filename)){
|
||||
if(!lc.init(0,glob.local_config_filename)){
|
||||
lc.printError();
|
||||
return false;
|
||||
}
|
||||
|
@ -360,10 +343,6 @@ readLocalConfig(){
|
|||
* @fn readGlobalConfig
|
||||
* @param glob : Global variables
|
||||
* @return true if success, false otherwise.
|
||||
*
|
||||
* How to get the GLOBAL CONFIGURATION:
|
||||
* 1. Use config file name (this is a text file)(-c)
|
||||
* 2. Use name from line 2 of local config file, ex: file:///c/ndb/Ndb_cfg.bin
|
||||
*/
|
||||
static bool
|
||||
readGlobalConfig() {
|
||||
|
|
|
@ -32,6 +32,7 @@ void usage(const char * prg){
|
|||
NDB_COMMAND(mkconfig,
|
||||
"mkconfig", "mkconfig",
|
||||
"Make a binary configuration from a config file", 16384){
|
||||
ndb_init();
|
||||
if(argc < 3){
|
||||
usage(argv[0]);
|
||||
return 0;
|
||||
|
|
|
@ -65,7 +65,7 @@ NdbDictionary::Column::getName() const {
|
|||
|
||||
void
|
||||
NdbDictionary::Column::setType(Type t){
|
||||
m_impl.m_type = t;
|
||||
m_impl.init(t);
|
||||
}
|
||||
|
||||
NdbDictionary::Column::Type
|
||||
|
@ -103,6 +103,54 @@ NdbDictionary::Column::getLength() const{
|
|||
return m_impl.m_length;
|
||||
}
|
||||
|
||||
void
|
||||
NdbDictionary::Column::setInlineSize(int size)
|
||||
{
|
||||
m_impl.m_precision = size;
|
||||
}
|
||||
|
||||
void
|
||||
NdbDictionary::Column::setCharset(CHARSET_INFO* cs)
|
||||
{
|
||||
m_impl.m_cs = cs;
|
||||
}
|
||||
|
||||
CHARSET_INFO*
|
||||
NdbDictionary::Column::getCharset() const
|
||||
{
|
||||
return m_impl.m_cs;
|
||||
}
|
||||
|
||||
int
|
||||
NdbDictionary::Column::getInlineSize() const
|
||||
{
|
||||
return m_impl.m_precision;
|
||||
}
|
||||
|
||||
void
|
||||
NdbDictionary::Column::setPartSize(int size)
|
||||
{
|
||||
m_impl.m_scale = size;
|
||||
}
|
||||
|
||||
int
|
||||
NdbDictionary::Column::getPartSize() const
|
||||
{
|
||||
return m_impl.m_scale;
|
||||
}
|
||||
|
||||
void
|
||||
NdbDictionary::Column::setStripeSize(int size)
|
||||
{
|
||||
m_impl.m_length = size;
|
||||
}
|
||||
|
||||
int
|
||||
NdbDictionary::Column::getStripeSize() const
|
||||
{
|
||||
return m_impl.m_length;
|
||||
}
|
||||
|
||||
int
|
||||
NdbDictionary::Column::getSize() const{
|
||||
return m_impl.m_attrSize;
|
||||
|
@ -821,6 +869,8 @@ NdbDictionary::Dictionary::getNdbError() const {
|
|||
NdbOut&
|
||||
operator<<(NdbOut& out, const NdbDictionary::Column& col)
|
||||
{
|
||||
const CHARSET_INFO *cs = col.getCharset();
|
||||
const char *csname = cs ? cs->name : "?";
|
||||
out << col.getName() << " ";
|
||||
switch (col.getType()) {
|
||||
case NdbDictionary::Column::Tinyint:
|
||||
|
@ -863,10 +913,10 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
|
|||
out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")";
|
||||
break;
|
||||
case NdbDictionary::Column::Char:
|
||||
out << "Char(" << col.getLength() << ")";
|
||||
out << "Char(" << col.getLength() << ";" << csname << ")";
|
||||
break;
|
||||
case NdbDictionary::Column::Varchar:
|
||||
out << "Varchar(" << col.getLength() << ")";
|
||||
out << "Varchar(" << col.getLength() << ";" << csname << ")";
|
||||
break;
|
||||
case NdbDictionary::Column::Binary:
|
||||
out << "Binary(" << col.getLength() << ")";
|
||||
|
@ -886,7 +936,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
|
|||
break;
|
||||
case NdbDictionary::Column::Text:
|
||||
out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
|
||||
<< ";" << col.getStripeSize() << ")";
|
||||
<< ";" << col.getStripeSize() << ";" << csname << ")";
|
||||
break;
|
||||
case NdbDictionary::Column::Undefined:
|
||||
out << "Undefined";
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "NdbEventOperationImpl.hpp"
|
||||
#include "NdbBlob.hpp"
|
||||
#include <AttributeHeader.hpp>
|
||||
#include <my_sys.h>
|
||||
|
||||
#define DEBUG_PRINT 0
|
||||
#define INCOMPATIBLE_VERSION -2
|
||||
|
@ -64,6 +65,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
|
|||
m_name = col.m_name;
|
||||
m_type = col.m_type;
|
||||
m_precision = col.m_precision;
|
||||
m_cs = col.m_cs;
|
||||
m_scale = col.m_scale;
|
||||
m_length = col.m_length;
|
||||
m_pk = col.m_pk;
|
||||
|
@ -87,10 +89,66 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
|
|||
}
|
||||
|
||||
void
|
||||
NdbColumnImpl::init()
|
||||
NdbColumnImpl::init(Type t)
|
||||
{
|
||||
// do not use default_charset_info as it may not be initialized yet
|
||||
// use binary collation until NDB tests can handle charsets
|
||||
CHARSET_INFO* default_cs = &my_charset_latin1_bin;
|
||||
m_attrId = -1;
|
||||
m_type = NdbDictionary::Column::Unsigned;
|
||||
m_type = t;
|
||||
switch (m_type) {
|
||||
case Tinyint:
|
||||
case Tinyunsigned:
|
||||
case Smallint:
|
||||
case Smallunsigned:
|
||||
case Mediumint:
|
||||
case Mediumunsigned:
|
||||
case Int:
|
||||
case Unsigned:
|
||||
case Bigint:
|
||||
case Bigunsigned:
|
||||
case Float:
|
||||
case Double:
|
||||
m_precision = 0;
|
||||
m_scale = 0;
|
||||
m_length = 1;
|
||||
m_cs = NULL;
|
||||
break;
|
||||
case Decimal:
|
||||
m_precision = 10;
|
||||
m_scale = 0;
|
||||
m_length = 1;
|
||||
m_cs = NULL;
|
||||
break;
|
||||
case Char:
|
||||
case Varchar:
|
||||
m_precision = 0;
|
||||
m_scale = 0;
|
||||
m_length = 1;
|
||||
m_cs = default_cs;
|
||||
break;
|
||||
case Binary:
|
||||
case Varbinary:
|
||||
case Datetime:
|
||||
case Timespec:
|
||||
m_precision = 0;
|
||||
m_scale = 0;
|
||||
m_length = 1;
|
||||
m_cs = NULL;
|
||||
break;
|
||||
case Blob:
|
||||
m_precision = 256;
|
||||
m_scale = 8000;
|
||||
m_length = 4;
|
||||
m_cs = NULL;
|
||||
break;
|
||||
case Text:
|
||||
m_precision = 256;
|
||||
m_scale = 8000;
|
||||
m_length = 4;
|
||||
m_cs = default_cs;
|
||||
break;
|
||||
}
|
||||
m_pk = false;
|
||||
m_nullable = false;
|
||||
m_tupleKey = false;
|
||||
|
@ -98,12 +156,10 @@ NdbColumnImpl::init()
|
|||
m_distributionKey = false;
|
||||
m_distributionGroup = false;
|
||||
m_distributionGroupBits = 8;
|
||||
m_length = 1;
|
||||
m_scale = 5;
|
||||
m_precision = 5;
|
||||
m_keyInfoPos = 0;
|
||||
m_attrSize = 4,
|
||||
m_arraySize = 1,
|
||||
// next 2 are set at run time
|
||||
m_attrSize = 0;
|
||||
m_arraySize = 0;
|
||||
m_autoIncrement = false;
|
||||
m_autoIncrementInitialValue = 1;
|
||||
m_blobTable = NULL;
|
||||
|
@ -146,52 +202,12 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
|
|||
return false;
|
||||
}
|
||||
}
|
||||
if(m_length != col.m_length){
|
||||
if (m_precision != col.m_precision ||
|
||||
m_scale != col.m_scale ||
|
||||
m_length != col.m_length ||
|
||||
m_cs != col.m_cs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch(m_type){
|
||||
case NdbDictionary::Column::Undefined:
|
||||
break;
|
||||
case NdbDictionary::Column::Tinyint:
|
||||
case NdbDictionary::Column::Tinyunsigned:
|
||||
case NdbDictionary::Column::Smallint:
|
||||
case NdbDictionary::Column::Smallunsigned:
|
||||
case NdbDictionary::Column::Mediumint:
|
||||
case NdbDictionary::Column::Mediumunsigned:
|
||||
case NdbDictionary::Column::Int:
|
||||
case NdbDictionary::Column::Unsigned:
|
||||
case NdbDictionary::Column::Float:
|
||||
break;
|
||||
case NdbDictionary::Column::Decimal:
|
||||
if(m_scale != col.m_scale ||
|
||||
m_precision != col.m_precision){
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case NdbDictionary::Column::Char:
|
||||
case NdbDictionary::Column::Varchar:
|
||||
case NdbDictionary::Column::Binary:
|
||||
case NdbDictionary::Column::Varbinary:
|
||||
if(m_length != col.m_length){
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case NdbDictionary::Column::Bigint:
|
||||
case NdbDictionary::Column::Bigunsigned:
|
||||
case NdbDictionary::Column::Double:
|
||||
case NdbDictionary::Column::Datetime:
|
||||
case NdbDictionary::Column::Timespec:
|
||||
break;
|
||||
case NdbDictionary::Column::Blob:
|
||||
case NdbDictionary::Column::Text:
|
||||
if (m_precision != col.m_precision ||
|
||||
m_scale != col.m_scale ||
|
||||
m_length != col.m_length) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (m_autoIncrement != col.m_autoIncrement){
|
||||
return false;
|
||||
}
|
||||
|
@ -209,14 +225,18 @@ NdbColumnImpl::create_psuedo(const char * name){
|
|||
if(!strcmp(name, "NDB$FRAGMENT")){
|
||||
col->setType(NdbDictionary::Column::Unsigned);
|
||||
col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
|
||||
col->m_impl.m_attrSize = 4;
|
||||
col->m_impl.m_arraySize = 1;
|
||||
} else if(!strcmp(name, "NDB$ROW_COUNT")){
|
||||
col->setType(NdbDictionary::Column::Bigunsigned);
|
||||
col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
|
||||
col->m_impl.m_attrSize = 8;
|
||||
col->m_impl.m_arraySize = 1;
|
||||
} else if(!strcmp(name, "NDB$COMMIT_COUNT")){
|
||||
col->setType(NdbDictionary::Column::Bigunsigned);
|
||||
col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT;
|
||||
col->m_impl.m_attrSize = 8;
|
||||
col->m_impl.m_arraySize = 1;
|
||||
} else {
|
||||
abort();
|
||||
}
|
||||
|
@ -1127,6 +1147,7 @@ indexTypeMapping[] = {
|
|||
{ -1, -1 }
|
||||
};
|
||||
|
||||
// TODO: remove, api-kernel type codes must match now
|
||||
static const
|
||||
ApiKernelMapping
|
||||
columnTypeMapping[] = {
|
||||
|
@ -1233,9 +1254,23 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
|
|||
return 703;
|
||||
}
|
||||
col->m_extType = attrDesc.AttributeExtType;
|
||||
col->m_precision = attrDesc.AttributeExtPrecision;
|
||||
col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF);
|
||||
col->m_scale = attrDesc.AttributeExtScale;
|
||||
col->m_length = attrDesc.AttributeExtLength;
|
||||
// charset in upper half of precision
|
||||
unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16);
|
||||
// charset is defined exactly for char types
|
||||
if (col->getCharType() != (cs_number != 0)) {
|
||||
delete impl;
|
||||
return 703;
|
||||
}
|
||||
if (col->getCharType()) {
|
||||
col->m_cs = get_charset(cs_number, MYF(0));
|
||||
if (col->m_cs == NULL) {
|
||||
delete impl;
|
||||
return 743;
|
||||
}
|
||||
}
|
||||
|
||||
// translate to old kernel types and sizes
|
||||
if (! attrDesc.translateExtType()) {
|
||||
|
@ -1486,9 +1521,23 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
|
|||
getKernelConstant(col->m_type,
|
||||
columnTypeMapping,
|
||||
DictTabInfo::ExtUndefined);
|
||||
tmpAttr.AttributeExtPrecision = col->m_precision;
|
||||
tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF);
|
||||
tmpAttr.AttributeExtScale = col->m_scale;
|
||||
tmpAttr.AttributeExtLength = col->m_length;
|
||||
// charset is defined exactly for char types
|
||||
if (col->getCharType() != (col->m_cs != NULL)) {
|
||||
m_error.code = 703;
|
||||
return -1;
|
||||
}
|
||||
// primary key type check
|
||||
if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
|
||||
m_error.code = 743;
|
||||
return -1;
|
||||
}
|
||||
// charset in upper half of precision
|
||||
if (col->getCharType()) {
|
||||
tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16);
|
||||
}
|
||||
|
||||
// DICT will ignore and recompute this
|
||||
(void)tmpAttr.translateExtType();
|
||||
|
@ -1950,6 +1999,14 @@ NdbDictInterface::createIndex(Ndb & ndb,
|
|||
m_error.code = 4245;
|
||||
return -1;
|
||||
}
|
||||
// index key type check
|
||||
if (it == DictTabInfo::UniqueHashIndex &&
|
||||
! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
|
||||
it == DictTabInfo::OrderedIndex &&
|
||||
! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) {
|
||||
m_error.code = 743;
|
||||
return -1;
|
||||
}
|
||||
attributeList.id[i] = col->m_attrId;
|
||||
}
|
||||
if (it == DictTabInfo::UniqueHashIndex) {
|
||||
|
|
|
@ -52,7 +52,7 @@ public:
|
|||
NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor
|
||||
~NdbColumnImpl();
|
||||
NdbColumnImpl& operator=(const NdbColumnImpl&);
|
||||
void init();
|
||||
void init(Type t = Unsigned);
|
||||
|
||||
int m_attrId;
|
||||
BaseString m_name;
|
||||
|
@ -60,6 +60,7 @@ public:
|
|||
int m_precision;
|
||||
int m_scale;
|
||||
int m_length;
|
||||
CHARSET_INFO * m_cs; // not const in MySQL
|
||||
|
||||
bool m_pk;
|
||||
bool m_tupleKey;
|
||||
|
@ -82,6 +83,7 @@ public:
|
|||
Uint32 m_keyInfoPos;
|
||||
Uint32 m_extType; // used by restore (kernel type in versin v2x)
|
||||
bool getInterpretableType() const ;
|
||||
bool getCharType() const;
|
||||
bool getBlobType() const;
|
||||
|
||||
/**
|
||||
|
@ -446,6 +448,14 @@ NdbColumnImpl::getInterpretableType() const {
|
|||
m_type == NdbDictionary::Column::Bigunsigned);
|
||||
}
|
||||
|
||||
inline
|
||||
bool
|
||||
NdbColumnImpl::getCharType() const {
|
||||
return (m_type == NdbDictionary::Column::Char ||
|
||||
m_type == NdbDictionary::Column::Varchar ||
|
||||
m_type == NdbDictionary::Column::Text);
|
||||
}
|
||||
|
||||
inline
|
||||
bool
|
||||
NdbColumnImpl::getBlobType() const {
|
||||
|
|
|
@ -164,6 +164,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
Uint32 tData;
|
||||
Uint32 tKeyInfoPosition;
|
||||
const char* aValue = aValuePassed;
|
||||
Uint32 xfrmData[1024];
|
||||
Uint32 tempData[1024];
|
||||
|
||||
if ((theStatus == OperationDefined) &&
|
||||
|
@ -224,6 +225,21 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
m_theIndexDefined[i][2] = true;
|
||||
|
||||
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
|
||||
const char* aValueToWrite = aValue;
|
||||
|
||||
CHARSET_INFO* cs = tAttrInfo->m_cs;
|
||||
if (cs != 0) {
|
||||
// current limitation: strxfrm does not increase length
|
||||
assert(cs->strxfrm_multiply == 1);
|
||||
unsigned n =
|
||||
(*cs->coll->strnxfrm)(cs,
|
||||
(uchar*)xfrmData, sizeof(xfrmData),
|
||||
(const uchar*)aValue, sizeInBytes);
|
||||
while (n < sizeInBytes)
|
||||
((uchar*)xfrmData)[n++] = 0x20;
|
||||
aValue = (char*)xfrmData;
|
||||
}
|
||||
|
||||
Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
|
||||
Uint32 totalSizeInWords = (sizeInBytes + 3)/4;// Inc. bits in last word
|
||||
Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
|
||||
|
@ -314,13 +330,20 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
if ((tOpType == InsertRequest) ||
|
||||
(tOpType == WriteRequest)) {
|
||||
if (!tAttrInfo->m_indexOnly){
|
||||
// invalid data can crash kernel
|
||||
if (cs != NULL &&
|
||||
(*cs->cset->well_formed_len)(cs,
|
||||
aValueToWrite,
|
||||
aValueToWrite + sizeInBytes,
|
||||
sizeInBytes) != sizeInBytes)
|
||||
goto equal_error4;
|
||||
Uint32 ahValue;
|
||||
Uint32 sz = totalSizeInWords;
|
||||
AttributeHeader::init(&ahValue, tAttrId, sz);
|
||||
insertATTRINFO( ahValue );
|
||||
insertATTRINFOloop((Uint32*)aValue, sizeInWords);
|
||||
insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
|
||||
if (bitsInLastWord != 0) {
|
||||
tData = *(Uint32*)(aValue + (sizeInWords << 2));
|
||||
tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
|
||||
tData = convertEndian(tData);
|
||||
tData = tData & ((1 << bitsInLastWord) - 1);
|
||||
tData = convertEndian(tData);
|
||||
|
@ -411,7 +434,10 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
|
||||
equal_error3:
|
||||
setErrorCodeAbort(4209);
|
||||
|
||||
return -1;
|
||||
|
||||
equal_error4:
|
||||
setErrorCodeAbort(744);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -492,6 +492,17 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
|
|||
|
||||
// Insert Attribute Id into ATTRINFO part.
|
||||
const Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
|
||||
|
||||
CHARSET_INFO* cs = tAttrInfo->m_cs;
|
||||
// invalid data can crash kernel
|
||||
if (cs != NULL &&
|
||||
(*cs->cset->well_formed_len)(cs,
|
||||
aValue,
|
||||
aValue + sizeInBytes,
|
||||
sizeInBytes) != sizeInBytes) {
|
||||
setErrorCodeAbort(744);
|
||||
return -1;
|
||||
}
|
||||
#if 0
|
||||
tAttrSize = tAttrInfo->theAttrSize;
|
||||
tArraySize = tAttrInfo->theArraySize;
|
||||
|
|
|
@ -60,6 +60,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
Uint32 tData;
|
||||
Uint32 tKeyInfoPosition;
|
||||
const char* aValue = aValuePassed;
|
||||
Uint32 xfrmData[1024];
|
||||
Uint32 tempData[1024];
|
||||
|
||||
if ((theStatus == OperationDefined) &&
|
||||
|
@ -117,6 +118,21 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
theTupleKeyDefined[i][2] = true;
|
||||
|
||||
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
|
||||
const char* aValueToWrite = aValue;
|
||||
|
||||
CHARSET_INFO* cs = tAttrInfo->m_cs;
|
||||
if (cs != 0) {
|
||||
// current limitation: strxfrm does not increase length
|
||||
assert(cs->strxfrm_multiply == 1);
|
||||
unsigned n =
|
||||
(*cs->coll->strnxfrm)(cs,
|
||||
(uchar*)xfrmData, sizeof(xfrmData),
|
||||
(const uchar*)aValue, sizeInBytes);
|
||||
while (n < sizeInBytes)
|
||||
((uchar*)xfrmData)[n++] = 0x20;
|
||||
aValue = (char*)xfrmData;
|
||||
}
|
||||
|
||||
Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
|
||||
Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word
|
||||
Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
|
||||
|
@ -206,13 +222,20 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
if ((tOpType == InsertRequest) ||
|
||||
(tOpType == WriteRequest)) {
|
||||
if (!tAttrInfo->m_indexOnly){
|
||||
// invalid data can crash kernel
|
||||
if (cs != NULL &&
|
||||
(*cs->cset->well_formed_len)(cs,
|
||||
aValueToWrite,
|
||||
aValueToWrite + sizeInBytes,
|
||||
sizeInBytes) != sizeInBytes)
|
||||
goto equal_error4;
|
||||
Uint32 ahValue;
|
||||
const Uint32 sz = totalSizeInWords;
|
||||
AttributeHeader::init(&ahValue, tAttrId, sz);
|
||||
insertATTRINFO( ahValue );
|
||||
insertATTRINFOloop((Uint32*)aValue, sizeInWords);
|
||||
insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
|
||||
if (bitsInLastWord != 0) {
|
||||
tData = *(Uint32*)(aValue + (sizeInWords << 2));
|
||||
tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
|
||||
tData = convertEndian(tData);
|
||||
tData = tData & ((1 << bitsInLastWord) - 1);
|
||||
tData = convertEndian(tData);
|
||||
|
@ -311,6 +334,10 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
|
|||
equal_error3:
|
||||
setErrorCodeAbort(4209);
|
||||
return -1;
|
||||
|
||||
equal_error4:
|
||||
setErrorCodeAbort(744);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
|
|
|
@ -1096,30 +1096,43 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
|
|||
theStatus == SetBound &&
|
||||
(0 <= type && type <= 4) &&
|
||||
len <= 8000) {
|
||||
// bound type
|
||||
|
||||
// insert bound type
|
||||
insertATTRINFO(type);
|
||||
// attribute header
|
||||
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
|
||||
// normalize char bound
|
||||
CHARSET_INFO* cs = tAttrInfo->m_cs;
|
||||
Uint32 xfrmData[2000];
|
||||
if (cs != NULL && aValue != NULL) {
|
||||
// current limitation: strxfrm does not increase length
|
||||
assert(cs->strxfrm_multiply == 1);
|
||||
unsigned n =
|
||||
(*cs->coll->strnxfrm)(cs,
|
||||
(uchar*)xfrmData, sizeof(xfrmData),
|
||||
(const uchar*)aValue, sizeInBytes);
|
||||
while (n < sizeInBytes)
|
||||
((uchar*)xfrmData)[n++] = 0x20;
|
||||
aValue = (char*)xfrmData;
|
||||
}
|
||||
if (len != sizeInBytes && (len != 0)) {
|
||||
setErrorCodeAbort(4209);
|
||||
return -1;
|
||||
}
|
||||
// insert attribute header
|
||||
len = aValue != NULL ? sizeInBytes : 0;
|
||||
Uint32 tIndexAttrId = tAttrInfo->m_attrId;
|
||||
Uint32 sizeInWords = (len + 3) / 4;
|
||||
AttributeHeader ah(tIndexAttrId, sizeInWords);
|
||||
insertATTRINFO(ah.m_value);
|
||||
if (len != 0) {
|
||||
// attribute data
|
||||
// insert attribute data
|
||||
if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0)
|
||||
insertATTRINFOloop((const Uint32*)aValue, sizeInWords);
|
||||
else {
|
||||
Uint32 temp[2000];
|
||||
memcpy(temp, aValue, len);
|
||||
Uint32 tempData[2000];
|
||||
memcpy(tempData, aValue, len);
|
||||
while ((len & 0x3) != 0)
|
||||
((char*)temp)[len++] = 0;
|
||||
insertATTRINFOloop(temp, sizeInWords);
|
||||
((char*)tempData)[len++] = 0;
|
||||
insertATTRINFOloop(tempData, sizeInWords);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1206,11 +1219,11 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols,
|
|||
if((r1_null ^ (unsigned)r2->isNULL())){
|
||||
return (r1_null ? -1 : 1);
|
||||
}
|
||||
Uint32 type = NdbColumnImpl::getImpl(* r1->m_column).m_extType;
|
||||
const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column);
|
||||
Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4;
|
||||
if(!r1_null){
|
||||
const NdbSqlUtil::Type& t = NdbSqlUtil::getType(type);
|
||||
int r = (*t.m_cmp)(d1, d2, size, size);
|
||||
const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_extType);
|
||||
int r = (*sqlType.m_cmp)(col.m_cs, d1, d2, size, size);
|
||||
if(r){
|
||||
assert(r != NdbSqlUtil::CmpUnknown);
|
||||
return r;
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
|
||||
#include <ndb_global.h>
|
||||
#include <my_sys.h>
|
||||
|
||||
#include "NdbApiSignal.hpp"
|
||||
#include "NdbImpl.hpp"
|
||||
|
@ -62,7 +61,6 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) {
|
|||
abort(); // old and new Ndb constructor used mixed
|
||||
theNoOfNdbObjects++;
|
||||
if (global_ndb_cluster_connection == 0) {
|
||||
my_init();
|
||||
global_ndb_cluster_connection= new Ndb_cluster_connection(ndbConnectString);
|
||||
global_ndb_cluster_connection->connect();
|
||||
}
|
||||
|
|
|
@ -280,6 +280,9 @@ ErrorBundle ErrorCodes[] = {
|
|||
{ 739, SE, "Unsupported primary key length" },
|
||||
{ 740, SE, "Nullable primary key not supported" },
|
||||
{ 741, SE, "Unsupported alter table" },
|
||||
{ 742, SE, "Unsupported attribute type in index" },
|
||||
{ 743, SE, "Unsupported character set in table or index" },
|
||||
{ 744, SE, "Character string is invalid for given character set" },
|
||||
{ 241, SE, "Invalid schema object version" },
|
||||
{ 283, SE, "Table is being dropped" },
|
||||
{ 284, SE, "Table not defined in transaction coordinator" },
|
||||
|
|
|
@ -33,10 +33,10 @@ public:
|
|||
{
|
||||
assert(_name != 0);
|
||||
|
||||
setType(_type);
|
||||
setLength(_length);
|
||||
setNullable(_nullable);
|
||||
setPrimaryKey(_pk);
|
||||
setLength(_length);
|
||||
setType(_type);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -434,6 +434,7 @@ extern "C" void* NdbThreadFuncRead(void* pArg)
|
|||
|
||||
NDB_COMMAND(acid, "acid", "acid", "acid", 65535)
|
||||
{
|
||||
ndb_init();
|
||||
long nSeconds = 60;
|
||||
int rc = NDBT_OK;
|
||||
|
||||
|
|
|
@ -610,6 +610,7 @@ extern "C" void* ThreadFunc(void*)
|
|||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ndb_init();
|
||||
Uint32 nSeconds = 1;
|
||||
Uint32 nThread = 1;
|
||||
|
||||
|
|
|
@ -156,7 +156,14 @@ int Bank::performTransactionImpl1(int fromAccountId,
|
|||
|
||||
int check;
|
||||
|
||||
// Ok, all clear to do the transaction
|
||||
Uint64 transId;
|
||||
if (getNextTransactionId(transId) != NDBT_OK){
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbConnection* pTrans = m_ndb.startTransaction();
|
||||
|
||||
if( pTrans == NULL ) {
|
||||
const NdbError err = m_ndb.getNdbError();
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
|
@ -167,6 +174,13 @@ int Bank::performTransactionImpl1(int fromAccountId,
|
|||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
Uint64 currTime;
|
||||
if (prepareGetCurrTimeOp(pTrans, currTime) != NDBT_OK){
|
||||
ERR(pTrans->getNdbError());
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check balance on from account
|
||||
*/
|
||||
|
@ -205,29 +219,6 @@ int Bank::performTransactionImpl1(int fromAccountId,
|
|||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pTrans->execute(NoCommit);
|
||||
if( check == -1 ) {
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
return NDBT_TEMPORARY;
|
||||
}
|
||||
ERR(err);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
Uint32 balanceFrom = balanceFromRec->u_32_value();
|
||||
// ndbout << "balanceFrom: " << balanceFrom << endl;
|
||||
|
||||
if (((Int64)balanceFrom - amount) < 0){
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
//ndbout << "Not enough funds" << endl;
|
||||
return NOT_ENOUGH_FUNDS;
|
||||
}
|
||||
|
||||
Uint32 fromAccountType = fromAccountTypeRec->u_32_value();
|
||||
|
||||
/**
|
||||
* Read balance on to account
|
||||
*/
|
||||
|
@ -278,21 +269,22 @@ int Bank::performTransactionImpl1(int fromAccountId,
|
|||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
|
||||
Uint32 balanceFrom = balanceFromRec->u_32_value();
|
||||
// ndbout << "balanceFrom: " << balanceFrom << endl;
|
||||
|
||||
if (((Int64)balanceFrom - amount) < 0){
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
//ndbout << "Not enough funds" << endl;
|
||||
return NOT_ENOUGH_FUNDS;
|
||||
}
|
||||
|
||||
Uint32 fromAccountType = fromAccountTypeRec->u_32_value();
|
||||
|
||||
Uint32 balanceTo = balanceToRec->u_32_value();
|
||||
// ndbout << "balanceTo: " << balanceTo << endl;
|
||||
Uint32 toAccountType = toAccountTypeRec->u_32_value();
|
||||
|
||||
// Ok, all clear to do the transaction
|
||||
Uint64 transId;
|
||||
if (getNextTransactionId(transId) != NDBT_OK){
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
Uint64 currTime;
|
||||
if (getCurrTime(currTime) != NDBT_OK){
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update balance on from account
|
||||
*/
|
||||
|
@ -1988,35 +1980,13 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){
|
|||
ERR(m_ndb.getNdbError());
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES");
|
||||
if (pOp == NULL) {
|
||||
|
||||
if (prepareReadSystemValueOp(pTrans, sysValId, value) != NDBT_OK) {
|
||||
ERR(pTrans->getNdbError());
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->readTuple();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->equal("SYSTEM_VALUES_ID", sysValId);
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbRecAttr* valueRec = pOp->getValue("VALUE");
|
||||
if( valueRec ==NULL ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
|
||||
check = pTrans->execute(Commit);
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
|
@ -2024,13 +1994,38 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){
|
|||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
value = valueRec->u_64_value();
|
||||
|
||||
m_ndb.closeTransaction(pTrans);
|
||||
return NDBT_OK;
|
||||
|
||||
}
|
||||
|
||||
int Bank::prepareReadSystemValueOp(NdbConnection* pTrans, SystemValueId sysValId, Uint64 & value){
|
||||
|
||||
int check;
|
||||
|
||||
NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES");
|
||||
if (pOp == NULL) {
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->readTuple();
|
||||
if( check == -1 ) {
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->equal("SYSTEM_VALUES_ID", sysValId);
|
||||
if( check == -1 ) {
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbRecAttr* valueRec = pOp->getValue("VALUE", (char *)&value);
|
||||
if( valueRec == NULL ) {
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int Bank::writeSystemValue(SystemValueId sysValId, Uint64 value){
|
||||
|
||||
int check;
|
||||
|
@ -2307,6 +2302,10 @@ int Bank::getCurrTime(Uint64 &time){
|
|||
return readSystemValue(CurrentTime, time);
|
||||
}
|
||||
|
||||
int Bank::prepareGetCurrTimeOp(NdbConnection *pTrans, Uint64 &time){
|
||||
return prepareReadSystemValueOp(pTrans, CurrentTime, time);
|
||||
}
|
||||
|
||||
|
||||
int Bank::performSumAccounts(int maxSleepBetweenSums, int yield){
|
||||
if (init() != NDBT_OK)
|
||||
|
|
|
@ -29,7 +29,7 @@ public:
|
|||
|
||||
Bank();
|
||||
|
||||
int createAndLoadBank(bool overWrite);
|
||||
int createAndLoadBank(bool overWrite, int num_accounts=10);
|
||||
int dropBank();
|
||||
|
||||
int performTransactions(int maxSleepBetweenTrans = 20, int yield=0);
|
||||
|
@ -118,6 +118,9 @@ private:
|
|||
int incCurrTime(Uint64 &value);
|
||||
int getCurrTime(Uint64 &time);
|
||||
|
||||
int prepareReadSystemValueOp(NdbConnection*, SystemValueId sysValId, Uint64 &time);
|
||||
int prepareGetCurrTimeOp(NdbConnection*, Uint64 &time);
|
||||
|
||||
int createTables();
|
||||
int createTable(const char* tabName);
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ int Bank::getNumAccountTypes(){
|
|||
return accountTypesSize;
|
||||
}
|
||||
|
||||
int Bank::createAndLoadBank(bool ovrWrt){
|
||||
int Bank::createAndLoadBank(bool ovrWrt, int num_accounts){
|
||||
|
||||
m_ndb.init();
|
||||
if (m_ndb.waitUntilReady() != 0)
|
||||
|
@ -78,7 +78,7 @@ int Bank::createAndLoadBank(bool ovrWrt){
|
|||
if (loadAccountType() != NDBT_OK)
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (loadAccount(10) != NDBT_OK)
|
||||
if (loadAccount(num_accounts) != NDBT_OK)
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (loadSystemValues() != NDBT_OK)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue