Merge mleich@bk-internal.mysql.com:/home/bk/mysql-4.1

into three.local.lan:/home/matthias/Arbeit/mysql-4.1/src
This commit is contained in:
unknown 2004-11-29 12:11:36 +01:00
commit 7b7f0e2fcf
18 changed files with 240 additions and 526 deletions

View file

@ -1931,9 +1931,11 @@ AC_CHECK_FUNCS(alarm bcmp bfill bmove bzero chsize cuserid fchmod fcntl \
#
#
case "$target" in
*-*-aix4*)
*-*-aix4* | *-*-sco*)
# (grr) aix 4.3 has a stub for clock_gettime, (returning ENOSYS)
# and using AC_TRY_RUN is hard when cross-compiling
# We also disable for SCO for the time being, the headers for the
# thread library we use conflicts with other headers.
;;
*) AC_CHECK_FUNCS(clock_gettime)
;;

View file

@ -1677,11 +1677,13 @@ loop:
srv_printf_innodb_monitor(stderr);
}
mutex_enter(&srv_monitor_file_mutex);
rewind(srv_monitor_file);
srv_printf_innodb_monitor(srv_monitor_file);
os_file_set_eof(srv_monitor_file);
mutex_exit(&srv_monitor_file_mutex);
if (srv_innodb_status) {
mutex_enter(&srv_monitor_file_mutex);
rewind(srv_monitor_file);
srv_printf_innodb_monitor(srv_monitor_file);
os_file_set_eof(srv_monitor_file);
mutex_exit(&srv_monitor_file_mutex);
}
if (srv_print_innodb_tablespace_monitor
&& difftime(current_time, last_table_monitor_time) > 60) {

View file

@ -69,6 +69,6 @@ Error 1259 ZLIB: Input data corrupted
Error 1256 Uncompressed data size too large; the maximum size is 1048576 (probably, length of uncompressed data was corrupted)
drop table t1;
set @@max_allowed_packet=1048576*100;
select compress(repeat('aaaaaaaaaa', 10000000)) is null;
compress(repeat('aaaaaaaaaa', 10000000)) is null
select compress(repeat('aaaaaaaaaa', IF(XXX, 10, 10000000))) is null;
compress(repeat('aaaaaaaaaa', IF(XXX, 10, 10000000))) is null
0

View file

@ -1,9 +1,9 @@
slave stop;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
slave start;
start slave;
stop slave;
create table t1(n int);
start slave;

View file

@ -2353,6 +2353,27 @@ select * from t2,t3 where t2.s = t3.s;
s s
two two
drop table t1, t2, t3;
create table t1 (a integer, b integer, index(a), index(b));
create table t2 (c integer, d integer, index(c), index(d));
insert into t1 values (1,2), (2,2), (3,2), (4,2);
insert into t2 values (1,3), (2,3), (3,4), (4,4);
explain select * from t1 left join t2 on a=c where d in (4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref c,d d 5 const 2 Using where
1 SIMPLE t1 ALL a NULL NULL NULL 3 Using where
select * from t1 left join t2 on a=c where d in (4);
a b c d
3 2 3 4
4 2 4 4
explain select * from t1 left join t2 on a=c where d = 4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref c,d d 5 const 2 Using where
1 SIMPLE t1 ALL a NULL NULL NULL 3 Using where
select * from t1 left join t2 on a=c where d = 4;
a b c d
3 2 3 4
4 2 4 4
drop table t1, t2;
CREATE TABLE t1 (
i int(11) NOT NULL default '0',
c char(10) NOT NULL default '',
@ -2365,7 +2386,4 @@ INSERT INTO t1 VALUES (3,'c');
EXPLAIN SELECT i FROM t1 WHERE i=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 Using index
EXPLAIN SELECT i FROM t1 WHERE i=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 Using index
DROP TABLE t1;

View file

@ -38,7 +38,10 @@ drop table t1;
#
# Bug #5497: a problem with large strings
# note that when LOW_MEMORY is set the "test" below is meaningless
#
set @@max_allowed_packet=1048576*100;
select compress(repeat('aaaaaaaaaa', 10000000)) is null;
--replace_result "''" XXX "'1'" XXX
eval select compress(repeat('aaaaaaaaaa', IF('$LOW_MEMORY', 10, 10000000))) is null;

View file

@ -1898,6 +1898,20 @@ select * from t1,t2 where t1.s = t2.s;
select * from t2,t3 where t2.s = t3.s;
drop table t1, t2, t3;
#
# Bug #3759
# Both queries should produce identical plans and results.
#
create table t1 (a integer, b integer, index(a), index(b));
create table t2 (c integer, d integer, index(c), index(d));
insert into t1 values (1,2), (2,2), (3,2), (4,2);
insert into t2 values (1,3), (2,3), (3,4), (4,4);
explain select * from t1 left join t2 on a=c where d in (4);
select * from t1 left join t2 on a=c where d in (4);
explain select * from t1 left join t2 on a=c where d = 4;
select * from t1 left join t2 on a=c where d = 4;
drop table t1, t2;
#
# Covering index is mentioned in EXPLAIN output for const tables (bug #5333)
#
@ -1915,6 +1929,4 @@ INSERT INTO t1 VALUES (3,'c');
EXPLAIN SELECT i FROM t1 WHERE i=1;
EXPLAIN SELECT i FROM t1 WHERE i=1;
DROP TABLE t1;

View file

@ -128,14 +128,23 @@ protected:
NdbReceiver** m_receivers; // All receivers
Uint32* m_prepared_receivers; // These are to be sent
/**
* owned by API/user thread
*/
Uint32 m_current_api_receiver;
Uint32 m_api_receivers_count;
NdbReceiver** m_api_receivers; // These are currently used by api
/**
* owned by receiver thread
*/
Uint32 m_conf_receivers_count; // NOTE needs mutex to access
NdbReceiver** m_conf_receivers; // receive thread puts them here
/**
* owned by receiver thread
*/
Uint32 m_sent_receivers_count; // NOTE needs mutex to access
NdbReceiver** m_sent_receivers; // receive thread puts them here

View file

@ -56,7 +56,7 @@ NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){
const ScanTabRef * ref = CAST_CONSTPTR(ScanTabRef, aSignal->getDataPtr());
if(checkState_TransId(&ref->transId1)){
theScanningOp->theError.code = ref->errorCode;
theScanningOp->setErrorCode(ref->errorCode);
theScanningOp->execCLOSE_SCAN_REP();
if(!ref->closeNeeded){
return 0;

View file

@ -277,9 +277,9 @@ NdbScanOperation::fix_receivers(Uint32 parallel){
void
NdbScanOperation::receiver_delivered(NdbReceiver* tRec){
if(theError.code == 0){
if(DEBUG_NEXT_RESULT)
ndbout_c("receiver_delivered");
if(DEBUG_NEXT_RESULT)
ndbout_c("receiver_delivered");
Uint32 idx = tRec->m_list_index;
Uint32 last = m_sent_receivers_count - 1;
if(idx != last){
@ -494,6 +494,9 @@ int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend)
Uint32 nodeId = theNdbCon->theDBnode;
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
if(theError.code)
return -1;
Uint32 seq = theNdbCon->theNodeSequence;
if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false,
forceSend) == 0){
@ -699,10 +702,8 @@ void NdbScanOperation::closeScan(bool forceSend)
void
NdbScanOperation::execCLOSE_SCAN_REP(){
m_api_receivers_count = 0;
m_conf_receivers_count = 0;
m_sent_receivers_count = 0;
m_current_api_receiver = m_ordered ? theParallelism : 0;
}
void NdbScanOperation::release()
@ -1348,6 +1349,8 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
if(DEBUG_NEXT_RESULT) ndbout_c("performing fetch...");
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
if(theError.code)
return -1;
Uint32 seq = theNdbCon->theNodeSequence;
Uint32 nodeId = theNdbCon->theDBnode;
if(seq == tp->getNodeSequence(nodeId) &&
@ -1362,6 +1365,13 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
continue;
}
if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
setErrorCode(4028);
return -1;
}
if(theError.code){
setErrorCode(theError.code);
if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
return -1;
}
@ -1371,11 +1381,9 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
memcpy(arr, m_conf_receivers, u_last * sizeof(char*));
if(DEBUG_NEXT_RESULT) ndbout_c("sent: %d recv: %d", tmp, u_last);
if(theError.code){
setErrorCode(theError.code);
if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
return -1;
}
} else {
setErrorCode(4028);
return -1;
}
} else {
if(DEBUG_NEXT_RESULT) ndbout_c("return 2");
@ -1515,6 +1523,13 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){
}
}
if(theError.code)
{
m_api_receivers_count = 0;
m_current_api_receiver = m_ordered ? theParallelism : 0;
}
/**
* move all conf'ed into api
* so that send_next_scan can check if they needs to be closed

View file

@ -241,11 +241,12 @@ ErrorBundle ErrorCodes[] = {
{ 877, AE, "877" },
{ 878, AE, "878" },
{ 879, AE, "879" },
{ 880, AE, "Tried to read too much - too many getValue calls" },
{ 884, AE, "Stack overflow in interpreter" },
{ 885, AE, "Stack underflow in interpreter" },
{ 886, AE, "More than 65535 instructions executed in interpreter" },
{ 897, AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
{ 4256, AE, "Must call Ndb::init() before this function" },
{ 880, AE, "Tried to read too much - too many getValue calls" },
{ 4257, AE, "Tried to read too much - too many getValue calls" },
/**

View file

@ -90,11 +90,59 @@ int runLoadAllTables(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
char orderedPkIdxName[255];
int createOrderedPkIndex(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table* pTab = ctx->getTab();
Ndb* pNdb = GETNDB(step);
// Create index
BaseString::snprintf(orderedPkIdxName, sizeof(orderedPkIdxName),
"IDC_O_PK_%s", pTab->getName());
NdbDictionary::Index pIdx(orderedPkIdxName);
pIdx.setTable(pTab->getName());
pIdx.setType(NdbDictionary::Index::OrderedIndex);
pIdx.setLogging(false);
for (int c = 0; c< pTab->getNoOfColumns(); c++){
const NdbDictionary::Column * col = pTab->getColumn(c);
if(col->getPrimaryKey()){
pIdx.addIndexColumn(col->getName());
}
}
if (pNdb->getDictionary()->createIndex(pIdx) != 0){
ndbout << "FAILED! to create index" << endl;
const NdbError err = pNdb->getDictionary()->getNdbError();
ERR(err);
return NDBT_FAILED;
}
return NDBT_OK;
}
int createOrderedPkIndex_Drop(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table* pTab = ctx->getTab();
Ndb* pNdb = GETNDB(step);
// Drop index
if (pNdb->getDictionary()->dropIndex(orderedPkIdxName,
pTab->getName()) != 0){
ndbout << "FAILED! to drop index" << endl;
ERR(pNdb->getDictionary()->getNdbError());
return NDBT_FAILED;
}
return NDBT_OK;
}
int runScanReadRandomTable(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb");
int abort = ctx->getProperty("AbortProb", 5);
int i = 0;
while (i<loops) {
@ -218,7 +266,7 @@ int runScanRead(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb");
int abort = ctx->getProperty("AbortProb", 5);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
@ -232,11 +280,58 @@ int runScanRead(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int runRandScanRead(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb", 5);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
while (i<loops && !ctx->isTestStopped()) {
g_info << i << ": ";
NdbOperation::LockMode lm = (NdbOperation::LockMode)(rand() % 3);
if (hugoTrans.scanReadRecords(GETNDB(step),
records, abort, parallelism,
lm) != 0){
return NDBT_FAILED;
}
i++;
}
return NDBT_OK;
}
int runScanReadIndex(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb", 5);
const NdbDictionary::Index * pIdx =
GETNDB(step)->getDictionary()->getIndex(orderedPkIdxName,
ctx->getTab()->getName());
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
while (pIdx && i<loops && !ctx->isTestStopped()) {
g_info << i << ": ";
bool sort = (rand() % 100) > 50 ? true : false;
NdbOperation::LockMode lm = (NdbOperation::LockMode)(rand() % 3);
if (hugoTrans.scanReadRecords(GETNDB(step), pIdx,
records, abort, parallelism,
lm,
sort) != 0){
return NDBT_FAILED;
}
i++;
}
return NDBT_OK;
}
int runScanReadCommitted(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb");
int abort = ctx->getProperty("AbortProb", 5);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
@ -425,7 +520,7 @@ int runScanUpdate(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 1);
int abort = ctx->getProperty("AbortProb");
int abort = ctx->getProperty("AbortProb", 5);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
while (i<loops) {
@ -465,7 +560,7 @@ int runScanUpdate2(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb");
int abort = ctx->getProperty("AbortProb", 5);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
while (i<loops) {
@ -1080,7 +1175,30 @@ TESTCASE("ScanRead488",
"When this limit is exceeded the scan will be aborted with errorcode "\
"488."){
INITIALIZER(runLoadTable);
STEPS(runScanRead, 70);
STEPS(runRandScanRead, 70);
FINALIZER(runClearTable);
}
TESTCASE("ScanRead488O",
"Verify scan requirement: It's only possible to have 11 concurrent "\
"scans per fragment running in Ndb kernel at the same time. "\
"When this limit is exceeded the scan will be aborted with errorcode "\
"488."){
INITIALIZER(createOrderedPkIndex);
INITIALIZER(runLoadTable);
STEPS(runScanReadIndex, 70);
FINALIZER(createOrderedPkIndex_Drop);
FINALIZER(runClearTable);
}
TESTCASE("ScanRead488_Mixed",
"Verify scan requirement: It's only possible to have 11 concurrent "\
"scans per fragment running in Ndb kernel at the same time. "\
"When this limit is exceeded the scan will be aborted with errorcode "\
"488."){
INITIALIZER(createOrderedPkIndex);
INITIALIZER(runLoadTable);
STEPS(runRandScanRead, 50);
STEPS(runScanReadIndex, 50);
FINALIZER(createOrderedPkIndex_Drop);
FINALIZER(runClearTable);
}
TESTCASE("ScanRead488Timeout",

View file

@ -222,6 +222,7 @@ run_scan(){
int sum_time= 0;
int sample_rows = 0;
int tot_rows = 0;
NDB_TICKS sample_start = NdbTick_CurrentMillisecond();
Uint32 tot = g_paramters[P_ROWS].value;
@ -296,7 +297,7 @@ run_scan(){
break;
}
}
if(g_paramters[P_RESET].value == 1)
if(g_paramters[P_RESET].value == 2)
goto execute;
}
assert(pOp);
@ -330,6 +331,9 @@ run_scan(){
}
assert(check == 0);
if(g_paramters[P_RESET].value == 1)
g_paramters[P_RESET].value = 2;
for(int i = 0; i<g_table->getNoOfColumns(); i++){
pOp->getValue(i);
}
@ -364,6 +368,7 @@ execute:
int time_passed= (int)(stop - start1);
sample_rows += rows;
sum_time+= time_passed;
tot_rows+= rows;
if(sample_rows >= tot)
{
@ -375,8 +380,8 @@ execute:
sample_start = stop;
}
}
g_err.println("Avg time: %d ms = %u rows/sec", sum_time/iter,
(1000*tot*iter)/sum_time);
g_err.println("Avg time: %d ms = %u rows/sec", sum_time/tot_rows,
(1000*tot_rows)/sum_time);
return 0;
}

View file

@ -222,6 +222,14 @@ max-time: 500
cmd: testScan
args: -n ScanRead488 -l 10 T6
max-time: 500
cmd: testScan
args: -n ScanRead488O -l 10 T6
max-time: 1000
cmd: testScan
args: -n ScanRead488_Mixed -l 10 T6
max-time: 500
cmd: testScan
args: -n ScanRead488Timeout -l 10 T6
@ -478,493 +486,13 @@ args: -n UpdateWithoutValues T6
#cmd: testInterpreter
#args: T1
#
max-time: 1500
max-time: 150000
cmd: testOperations
args: -n ReadRead
args:
max-time: 1500
cmd: testOperations
args: -n ReadReadEx
max-time: 1500
cmd: testOperations
args: -n ReadInsert
max-time: 1500
cmd: testOperations
args: -n ReadUpdate
max-time: 1500
cmd: testOperations
args: -n ReadDelete
max-time: 1500
cmd: testOperations
args: -n FReadRead
max-time: 1500
cmd: testOperations
args: -n FReadReadEx
max-time: 1500
cmd: testOperations
args: -n FReadInsert
max-time: 1500
cmd: testOperations
args: -n FReadUpdate
max-time: 1500
cmd: testOperations
args: -n FReadDelete
max-time: 1500
cmd: testOperations
args: -n ReadExRead
max-time: 1500
cmd: testOperations
args: -n ReadExReadEx
max-time: 1500
cmd: testOperations
args: -n ReadExInsert
max-time: 1500
cmd: testOperations
args: -n ReadExUpdate
max-time: 1500
cmd: testOperations
args: -n ReadExDelete
max-time: 1500
cmd: testOperations
args: -n InsertRead
max-time: 1500
cmd: testOperations
args: -n InsertReadEx
max-time: 1500
cmd: testOperations
args: -n InsertInsert
max-time: 1500
cmd: testOperations
args: -n InsertUpdate
max-time: 1500
cmd: testOperations
args: -n InsertDelete
max-time: 1500
cmd: testOperations
args: -n UpdateRead
max-time: 1500
cmd: testOperations
args: -n UpdateReadEx
max-time: 1500
cmd: testOperations
args: -n UpdateInsert
max-time: 1500
cmd: testOperations
args: -n UpdateUpdate
max-time: 1500
cmd: testOperations
args: -n UpdateDelete
max-time: 1500
cmd: testOperations
args: -n DeleteRead
max-time: 1500
cmd: testOperations
args: -n DeleteReadEx
max-time: 1500
cmd: testOperations
args: -n DeleteInsert
max-time: 1500
cmd: testOperations
args: -n DeleteUpdate
max-time: 1500
cmd: testOperations
args: -n DeleteDelete
max-time: 1500
cmd: testOperations
args: -n ReadSimpleRead
max-time: 1500
cmd: testOperations
args: -n ReadDirtyRead
max-time: 1500
cmd: testOperations
args: -n FReadSimpleRead
max-time: 1500
cmd: testOperations
args: -n FReadDirtyRead
max-time: 1500
cmd: testOperations
args: -n ReadExSimpleRead
max-time: 1500
cmd: testOperations
args: -n ReadExDirtyRead
max-time: 1500
cmd: testOperations
args: -n InsertSimpleRead
max-time: 1500
cmd: testOperations
args: -n InsertDirtyRead
max-time: 1500
cmd: testOperations
args: -n UpdateSimpleRead
max-time: 1500
cmd: testOperations
args: -n UpdateDirtyRead
max-time: 1500
cmd: testOperations
args: -n DeleteSimpleRead
max-time: 1500
cmd: testOperations
args: -n DeleteDirtyRead
max-time: 1500
cmd: testTransactions
args: -n ReadRead
max-time: 1500
cmd: testTransactions
args: -n ReadReadEx
max-time: 1500
cmd: testTransactions
args: -n ReadInsert
max-time: 1500
cmd: testTransactions
args: -n ReadUpdate
max-time: 1500
cmd: testTransactions
args: -n ReadDelete
max-time: 1500
cmd: testTransactions
args: -n ReadExRead
max-time: 1500
cmd: testTransactions
args: -n ReadExReadEx
max-time: 1500
cmd: testTransactions
args: -n ReadExInsert
max-time: 1500
cmd: testTransactions
args: -n ReadExUpdate
max-time: 1500
cmd: testTransactions
args: -n ReadExDelete
max-time: 1500
cmd: testTransactions
args: -n InsertRead
max-time: 1500
cmd: testTransactions
args: -n InsertReadEx
max-time: 1500
cmd: testTransactions
args: -n InsertInsert
max-time: 1500
cmd: testTransactions
args: -n InsertUpdate
max-time: 1500
cmd: testTransactions
args: -n InsertDelete
max-time: 1500
cmd: testTransactions
args: -n UpdateRead
max-time: 1500
cmd: testTransactions
args: -n UpdateReadEx
max-time: 1500
cmd: testTransactions
args: -n UpdateInsert
max-time: 1500
cmd: testTransactions
args: -n UpdateUpdate
max-time: 1500
cmd: testTransactions
args: -n UpdateDelete
max-time: 1500
cmd: testTransactions
args: -n DeleteRead
max-time: 1500
cmd: testTransactions
args: -n DeleteReadEx
max-time: 1500
cmd: testTransactions
args: -n DeleteInsert
max-time: 1500
cmd: testTransactions
args: -n DeleteUpdate
max-time: 1500
cmd: testTransactions
args: -n DeleteDelete
max-time: 1500
cmd: testTransactions
args: -n ReadSimpleRead
max-time: 1500
cmd: testTransactions
args: -n ReadDirtyRead
max-time: 1500
cmd: testTransactions
args: -n ReadExSimpleRead
max-time: 1500
cmd: testTransactions
args: -n ReadExDirtyRead
max-time: 1500
cmd: testTransactions
args: -n InsertSimpleRead
max-time: 1500
cmd: testTransactions
args: -n InsertDirtyRead
max-time: 1500
cmd: testTransactions
args: -n UpdateSimpleRead
max-time: 1500
cmd: testTransactions
args: -n UpdateDirtyRead
max-time: 1500
cmd: testTransactions
args: -n DeleteSimpleRead
max-time: 1500
cmd: testTransactions
args: -n DeleteDirtyRead
max-time: 1500
cmd: testTransactions
args: -n ReadScan
max-time: 1500
cmd: testTransactions
args: -n ReadScanHl
max-time: 1500
cmd: testTransactions
args: -n ReadScanEx
max-time: 1500
cmd: testTransactions
args: -n ScanRead
max-time: 1500
cmd: testTransactions
args: -n ScanReadEx
max-time: 1500
cmd: testTransactions
args: -n ScanSimpleRead
max-time: 1500
cmd: testTransactions
args: -n ScanDirtyRead
max-time: 1500
cmd: testTransactions
args: -n ScanInsert
max-time: 1500
cmd: testTransactions
args: -n ScanUpdate
max-time: 1500
cmd: testTransactions
args: -n ScanDelete
max-time: 1500
cmd: testTransactions
args: -n ScanScan
max-time: 1500
cmd: testTransactions
args: -n ScanScanHl
max-time: 1500
cmd: testTransactions
args: -n ScanScanEx
max-time: 1500
cmd: testTransactions
args: -n ScanHlRead
max-time: 1500
cmd: testTransactions
args: -n ScanHlReadEx
max-time: 1500
cmd: testTransactions
args: -n ScanHlSimpleRead
max-time: 1500
cmd: testTransactions
args: -n ScanHlDirtyRead
max-time: 1500
cmd: testTransactions
args: -n ScanHlInsert
max-time: 1500
cmd: testTransactions
args: -n ScanHlUpdate
max-time: 1500
cmd: testTransactions
args: -n ScanHlDelete
max-time: 1500
cmd: testTransactions
args: -n ScanHlScan
max-time: 1500
cmd: testTransactions
args: -n ScanHlScanHl
max-time: 1500
cmd: testTransactions
args: -n ScanHlScanEx
max-time: 1500
cmd: testTransactions
args: -n ScanExRead
max-time: 1500
cmd: testTransactions
args: -n ScanExReadEx
max-time: 1500
cmd: testTransactions
args: -n ScanExSimpleRead
max-time: 1500
cmd: testTransactions
args: -n ScanExDirtyRead
max-time: 1500
cmd: testTransactions
args: -n ScanExInsert
max-time: 1500
cmd: testTransactions
args: -n ScanExUpdate
max-time: 1500
cmd: testTransactions
args: -n ScanExDelete
max-time: 1500
cmd: testTransactions
args: -n ScanExScan
max-time: 1500
cmd: testTransactions
args: -n ScanExScanHl
max-time: 1500
cmd: testTransactions
args: -n ScanExScanEx
max-time: 1500
cmd: testTransactions
args: -n ReadExScan
max-time: 1500
cmd: testTransactions
args: -n ReadExScanHl
max-time: 1500
cmd: testTransactions
args: -n ReadExScanEx
max-time: 1500
cmd: testTransactions
args: -n InsertScan
max-time: 1500
cmd: testTransactions
args: -n InsertScanHl
max-time: 1500
cmd: testTransactions
args: -n InsertScanEx
max-time: 1500
cmd: testTransactions
args: -n UpdateScan
max-time: 1500
cmd: testTransactions
args: -n UpdateScanHl
max-time: 1500
cmd: testTransactions
args: -n UpdateScanEx
max-time: 1500
cmd: testTransactions
args: -n DeleteScan
max-time: 1500
cmd: testTransactions
args: -n DeleteScanHl
max-time: 1500
max-time: 150000
cmd: testTransactions
args: -n DeleteScanEx
args:
max-time: 1500
cmd: testRestartGci

View file

@ -115,7 +115,7 @@ then
password=$old_style_password
fi
cmd="$bindir/mysql -f --user=$user --host=$host"
cmd="$bindir/mysql --no-defaults --force --user=$user --host=$host"
if test ! -z "$password" ; then
cmd="$cmd --password=$password"
fi

View file

@ -735,7 +735,7 @@ void Item_func_interval::fix_length_and_dec()
maybe_null= 0;
max_length= 2;
used_tables_cache|= row->used_tables();
not_null_tables_cache&= row->not_null_tables();
not_null_tables_cache= row->not_null_tables();
with_sum_func= with_sum_func || row->with_sum_func;
const_item_cache&= row->const_item();
}

View file

@ -1820,7 +1820,8 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
position is at the beginning of the file, and will read the
"signature" and then fast-forward to the last position read.
*/
if (thread_mask & SLAVE_SQL) {
if (thread_mask & SLAVE_SQL)
{
my_b_seek(mi->rli.cur_log, (my_off_t) 0);
}
DBUG_RETURN(0);