Merge trift2.:/MySQL/M50/mysql-5.0

into  trift2.:/MySQL/M50/push-5.0
This commit is contained in:
unknown 2007-02-21 16:21:12 +01:00
commit e128a436ff
52 changed files with 702 additions and 670 deletions

View file

@ -0,0 +1,43 @@
[ndbd default]
NoOfReplicas= 1
MaxNoOfConcurrentTransactions= 64
MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
DataMemory= CHOOSE_DataMemory
IndexMemory= CHOOSE_IndexMemory
Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 3
#
# Increase deadlock-timeout to cater for slow test-machines
# (possibly running several tests in parallell)
#
#TransactionDeadlockDetectionTimeout= 7500
[ndbd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
[ndb_mgmd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
DataDir= CHOOSE_FILESYSTEM #
PortNumber= CHOOSE_PORT_MGM
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]

View file

@ -1,5 +1,6 @@
[ndbd default]
NoOfReplicas= 2
MaxNoOfConcurrentTransactions= 64
MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
DataMemory= CHOOSE_DataMemory
IndexMemory= CHOOSE_IndexMemory
@ -7,6 +8,15 @@ Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 3
#
# Increase deadlock-timeout to cater for slow test-machines
# (possibly running several tests in parallell)
#
#TransactionDeadlockDetectionTimeout= 7500
[ndbd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress

View file

@ -0,0 +1,52 @@
[ndbd default]
NoOfReplicas= 2
MaxNoOfConcurrentTransactions= 64
MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
DataMemory= CHOOSE_DataMemory
IndexMemory= CHOOSE_IndexMemory
Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 3
#
# Increase deadlock-timeout to cater for slow test-machines
# (possibly running several tests in parallell)
#
#TransactionDeadlockDetectionTimeout= 7500
[ndbd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
[ndbd]
HostName= CHOOSE_HOSTNAME_2 # hostname is a valid network adress
[ndbd]
HostName= CHOOSE_HOSTNAME_3 # hostname is a valid network adress
[ndbd]
HostName= CHOOSE_HOSTNAME_4 # hostname is a valid network adress
[ndb_mgmd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
DataDir= CHOOSE_FILESYSTEM #
PortNumber= CHOOSE_PORT_MGM
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]

View file

@ -63,12 +63,17 @@ stop_ndb=
initial_ndb=
status_ndb=
ndb_diskless=0
ndbd_nodes=2
relative_config_data_dir=
opt_core=
ndb_no_ord=512
ndb_no_attr=2048
ndb_con_op=105000
ndb_dmem=80M
ndb_imem=24M
VERBOSE=100
NDB_MGM_EXTRA_OPTS=
NDB_MGMD_EXTRA_OPTS=
NDBD_EXTRA_OPTS=
@ -89,6 +94,9 @@ while test $# -gt 0; do
--debug*)
flags_ndb="$flags_ndb $1"
;;
--ndbd-nodes=*)
ndbd_nodes=`echo "$1" | sed -e "s;--ndbd-nodes=;;"`
;;
--status)
status_ndb=1
;;
@ -104,6 +112,9 @@ while test $# -gt 0; do
--data-dir=*)
fsdir=`echo "$1" | sed -e "s;--data-dir=;;"`
;;
--relative-config-data-dir)
relative_config_data_dir=1
;;
--port=*)
port=`echo "$1" | sed -e "s;--port=;;"`
;;
@ -122,6 +133,12 @@ while test $# -gt 0; do
--character-sets-dir=*)
CHARSETSDIR=`echo "$1" | sed -e "s;--character-sets-dir=;;"`
;;
--core)
opt_core="--core"
;;
--verbose=*)
VERBOSE=`echo "$1" | sed -e "s;--verbose=;;"`
;;
-- ) shift; break ;;
--* ) $ECHO "Unrecognized option: $1"; exit 1 ;;
* ) break ;;
@ -130,9 +147,10 @@ while test $# -gt 0; do
done
fs_ndb="$fsdir/ndbcluster-$port"
config_ini=ndb/ndb_config_${ndbd_nodes}_node.ini
NDB_HOME=
if [ ! -x "$fsdir" ]; then
if [ ! -d "$fsdir" ]; then
echo "$fsdir missing"
exit 1
fi
@ -148,11 +166,15 @@ if [ ! -x "$exec_waiter" ]; then
echo "$exec_waiter missing"
exit 1
fi
if [ ! -f "$config_ini" ]; then
echo "$config_ini missing, unsupported number of nodes"
exit 1
fi
exec_mgmtclient="$exec_mgmtclient --no-defaults $NDB_MGM_EXTRA_OPTS"
exec_mgmtsrvr="$exec_mgmtsrvr --no-defaults $NDB_MGMD_EXTRA_OPTS"
exec_ndb="$exec_ndb --no-defaults $NDBD_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR"
exec_waiter="$exec_waiter --no-defaults"
exec_mgmtclient="$exec_mgmtclient --no-defaults $opt_core $NDB_MGM_EXTRA_OPTS"
exec_mgmtsrvr="$exec_mgmtsrvr --no-defaults $opt_core $NDB_MGMD_EXTRA_OPTS"
exec_ndb="$exec_ndb --no-defaults $opt_core $NDBD_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR"
exec_waiter="$exec_waiter --no-defaults $opt_core"
ndb_host="localhost"
ndb_mgmd_port=$port
@ -196,18 +218,24 @@ fi
# Start management server as deamon
# Edit file system path and ports in config file
if [ $relative_config_data_dir ] ; then
config_fs_ndb="."
else
config_fs_ndb=$fs_ndb
fi
if [ $initial_ndb ] ; then
rm -f $fs_ndb/ndb_* 2>&1 | cat > /dev/null
rm -rf $fs_ndb/ndb_* 2>&1 | cat > /dev/null
sed \
-e s,"CHOOSE_MaxNoOfAttributes","$ndb_no_attr",g \
-e s,"CHOOSE_MaxNoOfOrderedIndexes","$ndb_no_ord",g \
-e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \
-e s,"CHOOSE_DataMemory","$ndb_dmem",g \
-e s,"CHOOSE_IndexMemory","$ndb_imem",g \
-e s,"CHOOSE_Diskless","$ndb_diskless",g \
-e s,"CHOOSE_HOSTNAME_".*,"$ndb_host",g \
-e s,"CHOOSE_FILESYSTEM","$fs_ndb",g \
-e s,"CHOOSE_FILESYSTEM","$config_fs_ndb",g \
-e s,"CHOOSE_PORT_MGM","$ndb_mgmd_port",g \
< ndb/ndb_config_2_node.ini \
< "$config_ini" \
> "$fs_ndb/config.ini"
fi
@ -218,7 +246,7 @@ if ( cd "$fs_ndb" ; $exec_mgmtsrvr -f config.ini ) ; then :; else
echo "Unable to start $exec_mgmtsrvr from `pwd`"
exit 1
fi
if sleep_until_file_created $fs_ndb/ndb_3.pid 120
if sleep_until_file_created $fs_ndb/ndb_`expr $ndbd_nodes + 1`.pid 120
then :; else
exit 1
fi
@ -226,38 +254,43 @@ cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
# Start database node
echo "Starting ndbd"
( cd "$fs_ndb" ; $exec_ndb $flags_ndb & )
if sleep_until_file_created $fs_ndb/ndb_1.pid 120
then :; else
stop_default_ndbcluster
exit 1
fi
cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
# Start database node
echo "Starting ndbd"
( cd "$fs_ndb" ; $exec_ndb $flags_ndb & )
if sleep_until_file_created $fs_ndb/ndb_2.pid 120
then :; else
stop_default_ndbcluster
exit 1
fi
cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
id=1
while [ $id -le $ndbd_nodes ]
do
if [ `expr $VERBOSE \> 1` = 1 ] ; then
echo "Starting ndbd $id($ndbd_nodes)"
fi
( cd "$fs_ndb" ; $exec_ndb $flags_ndb & )
if sleep_until_file_created $fs_ndb/ndb_${id}.pid 120
then :; else
stop_default_ndbcluster
exit 1
fi
cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
id=`expr $id + 1`
done
# test if Ndb Cluster starts properly
echo "Waiting for NDB data nodes to start..."
if ( $exec_waiter ) | grep "NDBT_ProgramExit: 0 - OK" > /dev/null 2>&1; then :; else
echo "Ndbcluster startup failed"
if [ `expr $VERBOSE \> 1` = 1 ] ; then
echo "Waiting for NDB data nodes to start..."
fi
if ( $exec_waiter ) | grep "NDBT_ProgramExit: 0 - OK" > /dev/null 2>&1 ; then :; else
if [ `expr $VERBOSE \> 0` = 1 ] ; then
echo "Ndbcluster startup failed"
fi
stop_default_ndbcluster
exit 1
fi
if [ `expr $VERBOSE \> 1` = 1 ] ; then
echo "Ok"
fi
cat `find "$fs_ndb" -name 'ndb_*.pid'` > $fs_ndb/$pidfile
status_ndbcluster
if [ `expr $VERBOSE \> 2` = 1 ] ; then
status_ndbcluster
fi
}
status_ndbcluster() {

View file

@ -540,13 +540,13 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ABORT_ALL_REF 446
#define GSN_ABORT_ALL_CONF 447
#define GSN_STATISTICS_REQ 448
/* 448 unused - formerly GSN_STATISTICS_REQ */
#define GSN_STOP_ORD 449
#define GSN_TAMPER_ORD 450
#define GSN_SET_VAR_REQ 451
#define GSN_SET_VAR_CONF 452
#define GSN_SET_VAR_REF 453
#define GSN_STATISTICS_CONF 454
/* 451 unused - formerly GSN_SET_VAR_REQ */
/* 452 unused - formerly GSN_SET_VAR_CONF */
/* 453 unused - formerly GSN_SET_VAR_REF */
/* 454 unused - formerly GSN_STATISTICS_CONF */
#define GSN_START_ORD 455
/* 457 unused */

View file

@ -107,6 +107,10 @@ public:
CmvmiDumpLongSignalMemory = 2601,
CmvmiSetRestartOnErrorInsert = 2602,
CmvmiTestLongSigWithDelay = 2603,
CmvmiDumpSubscriptions = 2604, /* note: done to respective outfile
to be able to debug if events
for some reason does not end up
in clusterlog */
// 7000 DIH
// 7001 DIH
// 7002 DIH

View file

@ -16,6 +16,8 @@
#ifndef MGMAPI_H
#define MGMAPI_H
#define NDB_MGM_MAX_LOGLEVEL 15
/**
* @mainpage MySQL Cluster Management API
*

View file

@ -42,10 +42,10 @@ public:
class SocketOutputStream : public OutputStream {
NDB_SOCKET_TYPE m_socket;
unsigned m_timeout;
unsigned m_timeout_ms;
public:
SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000);
SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000);
int print(const char * fmt, ...);
int println(const char * fmt, ...);
};

View file

@ -16,6 +16,7 @@
#include <ndb_global.h>
#include "EventLogger.hpp"
#include <TransporterCallback.hpp>
#include <NdbConfig.h>
#include <kernel/BlockNumbers.h>
@ -528,10 +529,100 @@ void getTextUndoLogBlocked(QQQQ) {
theData[2]);
}
void getTextTransporterError(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Transporter to node %d reported error 0x%x",
theData[1],
theData[2]);
struct myTransporterError{
Uint32 errorNum;
char errorString[256];
};
int i = 0;
int lenth = 0;
static const struct myTransporterError TransporterErrorString[]=
{
//TE_NO_ERROR = 0
{TE_NO_ERROR,"No error"},
//TE_ERROR_CLOSING_SOCKET = 0x1
{TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"},
//TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2
{TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"},
//TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT
{TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"},
//TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT
{TE_INVALID_CHECKSUM,"Error found in message (checksum)"},
//TE_COULD_NOT_CREATE_SOCKET = 0x5
{TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"},
//TE_COULD_NOT_BIND_SOCKET = 0x6
{TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"},
//TE_LISTEN_FAILED = 0x7
{TE_LISTEN_FAILED,"Error found while listening to server socket"},
//TE_ACCEPT_RETURN_ERROR = 0x8
{TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"},
//TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT
{TE_SHM_DISCONNECT,"The remote node has disconnected"},
//TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT
{TE_SHM_IPC_STAT,"Unable to check shm segment"},
//TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd
{TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"},
//TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe
{TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"},
//TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf
{TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"},
//TE_TOO_SMALL_SIGID = 0x10
{TE_TOO_SMALL_SIGID,"Sig ID too small"},
//TE_TOO_LARGE_SIGID = 0x11
{TE_TOO_LARGE_SIGID,"Sig ID too large"},
//TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT
{TE_WAIT_STACK_FULL,"Wait stack was full"},
//TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT
{TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"},
//TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT
{TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"},
//TE_SIGNAL_LOST = 0x15
{TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"},
//TE_SEND_BUFFER_FULL = 0x16
{TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"},
//TE_SCI_LINK_ERROR = 0x0017
{TE_SCI_LINK_ERROR,"There is no link from this node to the switch"},
//TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"},
//TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"},
//TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"},
//TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT
{TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"},
//TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT
{TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"},
//TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC
{TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"},
//TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"},
//TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC
{TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"},
//TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"},
//TE_SHM_IPC_PERMANENT = 0x21
{TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"},
//TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22
{TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"}
};
lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError);
for(i=0; i<lenth; i++)
{
if(theData[2] == TransporterErrorString[i].errorNum)
{
BaseString::snprintf(m_text, m_text_len,
"Transporter to node %d reported error 0x%x: %s",
theData[1],
theData[2],
TransporterErrorString[i].errorString);
break;
}
}
if(i == lenth)
BaseString::snprintf(m_text, m_text_len,
"Transporter to node %d reported error 0x%x: unknown error",
theData[1],
theData[2]);
}
void getTextTransporterWarning(QQQQ) {
getTextTransporterError(m_text, m_text_len, theData);

View file

@ -380,15 +380,10 @@ const GsnName SignalNames [] = {
,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" }
,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" }
,{ GSN_STATISTICS_REQ, "STATISTICS_REQ" }
,{ GSN_START_ORD, "START_ORD" }
,{ GSN_STOP_ORD, "STOP_ORD" }
,{ GSN_TAMPER_ORD, "TAMPER_ORD" }
,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" }
,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" }
,{ GSN_SET_VAR_REF, "SET_VAR_REF" }
,{ GSN_STATISTICS_CONF, "STATISTICS_CONF" }
,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" }
,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" }
,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" }

View file

@ -42,16 +42,16 @@ FileOutputStream::println(const char * fmt, ...){
}
SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket,
unsigned timeout){
unsigned write_timeout_ms){
m_socket = socket;
m_timeout = timeout;
m_timeout_ms = write_timeout_ms;
}
int
SocketOutputStream::print(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
const int ret = vprint_socket(m_socket, m_timeout, fmt, ap);
const int ret = vprint_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
@ -59,7 +59,7 @@ int
SocketOutputStream::println(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
const int ret = vprintln_socket(m_socket, m_timeout, fmt, ap);
const int ret = vprintln_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}

View file

@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4014
Next DBLQH 5043
Next DBDICT 6007
Next DBDIH 7178
Next DBDIH 7181
Next DBTC 8039
Next CMVMI 9000
Next BACKUP 10022
@ -71,6 +71,8 @@ Delay GCP_SAVEREQ by 10 secs
7177: Delay copying of sysfileData in execCOPY_GCIREQ
7180: Crash master during master-take-over in execMASTER_LCPCONF
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
-----------------------------------------------------------------

View file

@ -77,11 +77,7 @@ Cmvmi::Cmvmi(const Configuration & conf) :
addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ);
addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD);
addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ);
addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD);
addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ);
addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF);
addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF);
addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD);
addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD);
addRecSignal(GSN_EVENT_SUBSCRIBE_REQ,
@ -703,24 +699,6 @@ Cmvmi::execTEST_ORD(Signal * signal){
#endif
}
void Cmvmi::execSTATISTICS_REQ(Signal* signal)
{
// TODO Note ! This is only a test implementation...
static int stat1 = 0;
jamEntry();
//ndbout << "data 1: " << signal->theData[1];
int x = signal->theData[0];
stat1++;
signal->theData[0] = stat1;
sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB);
}//execSTATISTICS_REQ()
void Cmvmi::execSTOP_ORD(Signal* signal)
{
jamEntry();
@ -839,7 +817,7 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
// to be able to indicate if we really introduced an error.
#ifdef ERROR_INSERT
TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0];
signal->theData[2] = 0;
signal->theData[1] = tamperOrd->errorNo;
signal->theData[0] = 5;
sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB);
@ -847,160 +825,6 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
}//execTAMPER_ORD()
void Cmvmi::execSET_VAR_REQ(Signal* signal)
{
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
jamEntry();
switch (var) {
// NDBCNTR_REF
// DBTC
case TransactionDeadlockDetectionTimeout:
case TransactionInactiveTime:
case NoOfConcurrentProcessesHandleTakeover:
sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
break;
// DBDIH
case TimeBetweenLocalCheckpoints:
case TimeBetweenGlobalCheckpoints:
sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
break;
// DBLQH
case NoOfConcurrentCheckpointsDuringRestart:
case NoOfConcurrentCheckpointsAfterRestart:
sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
break;
// DBACC
case NoOfDiskPagesToDiskDuringRestartACC:
case NoOfDiskPagesToDiskAfterRestartACC:
sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
break;
// DBTUP
case NoOfDiskPagesToDiskDuringRestartTUP:
case NoOfDiskPagesToDiskAfterRestartTUP:
sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
break;
// DBDICT
// NDBCNTR
case TimeToWaitAlive:
// QMGR
case HeartbeatIntervalDbDb: // TODO ev till Ndbcnt också
case HeartbeatIntervalDbApi:
case ArbitTimeout:
sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
break;
// NDBFS
// CMVMI
case MaxNoOfSavedMessages:
case LockPagesInMainMemory:
case TimeBetweenWatchDogCheck:
case StopOnError:
handleSET_VAR_REQ(signal);
break;
// Not possible to update (this could of course be handled by each block
// instead but I havn't investigated where they belong)
case Id:
case ExecuteOnComputer:
case ShmKey:
case MaxNoOfConcurrentOperations:
case MaxNoOfConcurrentTransactions:
case MemorySpaceIndexes:
case MemorySpaceTuples:
case MemoryDiskPages:
case NoOfFreeDiskClusters:
case NoOfDiskClusters:
case NoOfFragmentLogFiles:
case NoOfDiskClustersPerDiskFile:
case NoOfDiskFiles:
case MaxNoOfSavedEvents:
default:
int mgmtSrvr = setVarReq->mgmtSrvrBlockRef();
sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
} // switch
#endif
}//execSET_VAR_REQ()
void Cmvmi::execSET_VAR_CONF(Signal* signal)
{
int mgmtSrvr = signal->theData[0];
sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB);
}//execSET_VAR_CONF()
void Cmvmi::execSET_VAR_REF(Signal* signal)
{
int mgmtSrvr = signal->theData[0];
sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
}//execSET_VAR_REF()
void Cmvmi::handleSET_VAR_REQ(Signal* signal) {
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
int val = setVarReq->value();
switch (var) {
case MaxNoOfSavedMessages:
theConfig.maxNoOfErrorLogs(val);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case LockPagesInMainMemory:
int result;
if (val == 0) {
result = NdbMem_MemUnlockAll();
}
else {
result = NdbMem_MemLockAll();
}
if (result == 0) {
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
}
else {
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
}
break;
case TimeBetweenWatchDogCheck:
theConfig.timeBetweenWatchDogCheck(val);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case StopOnError:
theConfig.stopOnError(val);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
return;
} // switch
#endif
}
#ifdef VM_TRACE
class RefSignalTest {
public:
@ -1105,6 +929,24 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
}
}
if (arg == DumpStateOrd::CmvmiDumpSubscriptions)
{
SubscriberPtr ptr;
subscribers.first(ptr);
g_eventLogger.info("List subscriptions:");
while(ptr.i != RNIL)
{
g_eventLogger.info("Subscription: %u, nodeId: %u, ref: 0x%x",
ptr.i, refToNode(ptr.p->blockRef), ptr.p->blockRef);
for(Uint32 i = 0; i < LogLevel::LOGLEVEL_CATEGORIES; i++)
{
Uint32 level = ptr.p->logLevel.getLogLevel((LogLevel::EventCategory)i);
g_eventLogger.info("Category %u Level %u", i, level);
}
subscribers.next(ptr);
}
}
if (arg == DumpStateOrd::CmvmiDumpLongSignalMemory){
infoEvent("Cmvmi: g_sectionSegmentPool size: %d free: %d",
g_sectionSegmentPool.getSize(),

View file

@ -55,20 +55,14 @@ private:
void execSIZEALT_ACK(Signal* signal);
void execTEST_ORD(Signal* signal);
void execSTATISTICS_REQ(Signal* signal);
void execSTOP_ORD(Signal* signal);
void execSTART_ORD(Signal* signal);
void execTAMPER_ORD(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execSET_VAR_CONF(Signal* signal);
void execSET_VAR_REF(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
void execEVENT_SUBSCRIBE_REQ(Signal *);
void cancelSubscription(NodeId nodeId);
void handleSET_VAR_REQ(Signal* signal);
void execTESTSIG(Signal* signal);
void execNODE_START_REP(Signal* signal);

View file

@ -911,7 +911,6 @@ private:
void execDROP_TAB_REQ(Signal* signal);
void execFSREMOVECONF(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
// Statement blocks

View file

@ -178,7 +178,6 @@ Dbacc::Dbacc(const class Configuration & conf):
addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
addRecSignal(GSN_FSREMOVECONF, &Dbacc::execFSREMOVECONF);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
initData();

View file

@ -11648,33 +11648,6 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Dbacc::execDUMP_STATE_ORD()
void Dbacc::execSET_VAR_REQ(Signal* signal)
{
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
int val = setVarReq->value();
switch (var) {
case NoOfDiskPagesToDiskAfterRestartACC:
clblPagesPerTick = val;
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case NoOfDiskPagesToDiskDuringRestartACC:
// Valid only during start so value not set.
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
} // switch
#endif
}//execSET_VAR_REQ()
void
Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
jamEntry();

View file

@ -693,7 +693,6 @@ private:
void execFSREADREF(Signal *);
void execFSWRITECONF(Signal *);
void execFSWRITEREF(Signal *);
void execSET_VAR_REQ(Signal *);
void execCHECKNODEGROUPSREQ(Signal *);
void execSTART_INFOREQ(Signal*);
void execSTART_INFOREF(Signal*);
@ -1367,6 +1366,7 @@ private:
Uint32 csystemnodes;
Uint32 currentgcp;
Uint32 c_newest_restorable_gci;
Uint32 c_set_initial_start_flag;
enum GcpMasterTakeOverState {
GMTOS_IDLE = 0,

View file

@ -74,6 +74,7 @@ void Dbdih::initData()
c_blockCommit = false;
c_blockCommitNo = 1;
cntrlblockref = RNIL;
c_set_initial_start_flag = FALSE;
}//Dbdih::initData()
void Dbdih::initRecords()
@ -216,7 +217,6 @@ Dbdih::Dbdih(const class Configuration & config):
addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true);
addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF);
addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true);
addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ);
addRecSignal(GSN_START_INFOREQ,
&Dbdih::execSTART_INFOREQ);

View file

@ -666,6 +666,12 @@ done:
{
jam();
memcpy(sysfileData, cdata, sizeof(sysfileData));
if (c_set_initial_start_flag)
{
jam();
Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
}
}
c_copyGCISlave.m_copyReason = reason;
@ -1259,6 +1265,11 @@ void Dbdih::execNDB_STTOR(Signal* signal)
// The permission is given by the master node in the alive set.
/*-----------------------------------------------------------------------*/
createMutexes(signal, 0);
if (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)
{
jam();
c_set_initial_start_flag = TRUE; // In sysfile...
}
break;
case ZNDB_SPH3:
@ -1786,8 +1797,8 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
return;
}//if
if (getNodeStatus(nodeId) != NodeRecord::DEAD){
ndbout << "nodeStatus in START_PERMREQ = "
<< (Uint32) getNodeStatus(nodeId) << endl;
g_eventLogger.error("nodeStatus in START_PERMREQ = %u",
(Uint32) getNodeStatus(nodeId));
ndbrequire(false);
}//if
@ -4029,9 +4040,9 @@ void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr)
jam();
break;
default:
ndbout_c("outstanding gsn: %s(%d)",
getSignalName(c_nodeStartMaster.m_outstandingGsn),
c_nodeStartMaster.m_outstandingGsn);
g_eventLogger.error("outstanding gsn: %s(%d)",
getSignalName(c_nodeStartMaster.m_outstandingGsn),
c_nodeStartMaster.m_outstandingGsn);
ndbrequire(false);
}
@ -4472,9 +4483,10 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
break;
default:
ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
ndbout << " at failure after NODE_FAILREP of node = ";
ndbout << failedNodePtr.i << endl;
g_eventLogger.error("activeStatus = %u "
"at failure after NODE_FAILREP of node = %u",
(Uint32) failedNodePtr.p->activeStatus,
failedNodePtr.i);
ndbrequire(false);
break;
}//switch
@ -4611,6 +4623,8 @@ void
Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
jam();
Uint32 oldNode = c_lcpMasterTakeOverState.failedNodeId;
c_lcpMasterTakeOverState.minTableId = ~0;
c_lcpMasterTakeOverState.minFragId = ~0;
c_lcpMasterTakeOverState.failedNodeId = nodeId;
@ -4629,7 +4643,20 @@ Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
/**
* Node failure during master take over...
*/
ndbout_c("Nodefail during master take over");
g_eventLogger.info("Nodefail during master take over (old: %d)", oldNode);
}
NodeRecordPtr nodePtr;
nodePtr.i = oldNode;
if (oldNode > 0 && oldNode < MAX_NDB_NODES)
{
jam();
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
if (nodePtr.p->m_nodefailSteps.get(NF_LCP_TAKE_OVER))
{
jam();
checkLocalNodefailComplete(signal, oldNode, NF_LCP_TAKE_OVER);
}
}
setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
@ -4869,7 +4896,8 @@ void Dbdih::execMASTER_GCPCONF(Signal* signal)
if (latestLcpId > SYSFILE->latestLCP_ID) {
jam();
#if 0
ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId);
g_eventLogger.info("Dbdih: Setting SYSFILE->latestLCP_ID to %d",
latestLcpId);
SYSFILE->latestLCP_ID = latestLcpId;
#endif
SYSFILE->keepGCI = oldestKeepGci;
@ -5528,7 +5556,7 @@ Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId,
if (ERROR_INSERTED(7030))
{
ndbout_c("Reenable GCP_PREPARE");
g_eventLogger.info("Reenable GCP_PREPARE");
CLEAR_ERROR_INSERT_VALUE;
}
@ -5645,6 +5673,14 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal)
jamEntry();
const BlockReference newMasterBlockref = req->masterRef;
if (newMasterBlockref != cmasterdihref)
{
jam();
ndbout_c("resending GSN_MASTER_LCPREQ");
sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal,
signal->getLength(), 50);
return;
}
Uint32 failedNodeId = req->failedNodeId;
/**
@ -5701,7 +5737,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
#if 0
if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){
ndbout_c("Dbdih: Also resetting c_copyGCISlave");
g_eventLogger.info("Dbdih: Also resetting c_copyGCISlave");
c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
c_copyGCISlave.m_expectedNextWord = 0;
}
@ -5790,7 +5826,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
if(c_lcpState.lcpStatus == LCP_TAB_SAVED){
#ifdef VM_TRACE
ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master");
g_eventLogger.info("Sending extra GSN_LCP_COMPLETE_REP to new master");
#endif
sendLCP_COMPLETE_REP(signal);
}
@ -5945,8 +5981,10 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
nodePtr.p->lcpStateAtTakeOver = lcpState;
CRASH_INSERTION(7180);
#ifdef VM_TRACE
ndbout_c("MASTER_LCPCONF");
g_eventLogger.info("MASTER_LCPCONF");
printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
#endif
@ -6023,7 +6061,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
// protocol.
/* --------------------------------------------------------------------- */
#ifdef VM_TRACE
ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
#endif
checkLcpStart(signal, __LINE__);
break;
@ -6034,7 +6072,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
// protocol by calculating the keep gci and storing the new lcp id.
/* --------------------------------------------------------------------- */
#ifdef VM_TRACE
ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
g_eventLogger.info("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
#endif
if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) {
jam();
@ -6045,7 +6083,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
/*---------------------------------------------------------------------*/
Uint32 lcpId = SYSFILE->latestLCP_ID;
#ifdef VM_TRACE
ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
g_eventLogger.info("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
#endif
SYSFILE->latestLCP_ID--;
}//if
@ -6062,10 +6100,10 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
* complete before finalising the LCP process.
* ------------------------------------------------------------------ */
#ifdef VM_TRACE
ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
"startLcpRoundLoopLab(table=%u, fragment=%u)",
c_lcpMasterTakeOverState.minTableId,
c_lcpMasterTakeOverState.minFragId);
g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
"startLcpRoundLoopLab(table=%u, fragment=%u)",
c_lcpMasterTakeOverState.minTableId,
c_lcpMasterTakeOverState.minFragId);
#endif
c_lcpState.keepGci = SYSFILE->keepGCI;
@ -7376,8 +7414,8 @@ void Dbdih::checkGcpStopLab(Signal* signal)
if (cgcpSameCounter == 1200) {
jam();
#ifdef VM_TRACE
ndbout << "System crash due to GCP Stop in state = ";
ndbout << (Uint32) cgcpStatus << endl;
g_eventLogger.error("System crash due to GCP Stop in state = %u",
(Uint32) cgcpStatus);
#endif
crashSystemAtGcpStop(signal);
return;
@ -7390,8 +7428,8 @@ void Dbdih::checkGcpStopLab(Signal* signal)
if (cgcpSameCounter == 1200) {
jam();
#ifdef VM_TRACE
ndbout << "System crash due to GCP Stop in state = ";
ndbout << (Uint32) cgcpStatus << endl;
g_eventLogger.error("System crash due to GCP Stop in state = %u",
(Uint32) cgcpStatus);
#endif
crashSystemAtGcpStop(signal);
return;
@ -7582,7 +7620,7 @@ void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId)
getNodeState().startLevel == NodeState::SL_STARTED){
jam();
#if 0
ndbout_c("Dbdih: Clearing initial start ongoing");
g_eventLogger.info("Dbdih: Clearing initial start ongoing");
#endif
Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits);
}
@ -7601,7 +7639,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal)
if (ERROR_INSERTED(7030))
{
cgckptflag = true;
ndbout_c("Delayed GCP_PREPARE 5s");
g_eventLogger.info("Delayed GCP_PREPARE 5s");
sendSignalWithDelay(reference(), GSN_GCP_PREPARE, signal, 5000,
signal->getLength());
return;
@ -7621,7 +7659,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal)
if (ERROR_INSERTED(7031))
{
ndbout_c("Crashing delayed in GCP_PREPARE 3s");
g_eventLogger.info("Crashing delayed in GCP_PREPARE 3s");
signal->theData[0] = 9999;
sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 3000, 1);
return;
@ -8136,7 +8174,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
* This is LCP master takeover
*/
#ifdef VM_TRACE
ndbout_c("initLcpLab aborted due to LCP master takeover - 1");
g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 1");
#endif
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
sendMASTER_LCPCONF(signal);
@ -8149,7 +8187,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
* Master take over but has not yet received MASTER_LCPREQ
*/
#ifdef VM_TRACE
ndbout_c("initLcpLab aborted due to LCP master takeover - 2");
g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 2");
#endif
return;
}
@ -9380,9 +9418,10 @@ void Dbdih::checkTcCounterLab(Signal* signal)
{
CRASH_INSERTION(7009);
if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
ndbout << "lcpStatusUpdatedPlace = " <<
c_lcpState.lcpStatusUpdatedPlace << endl;
g_eventLogger.error("lcpStatus = %u"
"lcpStatusUpdatedPlace = %d",
(Uint32) c_lcpState.lcpStatus,
c_lcpState.lcpStatusUpdatedPlace);
ndbrequire(false);
return;
}//if
@ -9935,9 +9974,8 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
jam();
ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
tableId,
fragId);
g_eventLogger.info("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
tableId, fragId);
} else {
jam();
/**
@ -10065,7 +10103,7 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
};
#ifdef VM_TRACE
ndbout_c("Fragment Replica(node=%d) not found", nodeId);
g_eventLogger.info("Fragment Replica(node=%d) not found", nodeId);
replicaPtr.i = fragPtrP->oldStoredReplicas;
while(replicaPtr.i != RNIL){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
@ -10078,9 +10116,9 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
}//if
};
if(replicaPtr.i != RNIL){
ndbout_c("...But was found in oldStoredReplicas");
g_eventLogger.info("...But was found in oldStoredReplicas");
} else {
ndbout_c("...And wasn't found in oldStoredReplicas");
g_eventLogger.info("...And wasn't found in oldStoredReplicas");
}
#endif
ndbrequire(false);
@ -10114,8 +10152,8 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
ndbrequire(replicaPtr.p->lcpOngoingFlag == true);
if(lcpNo != replicaPtr.p->nextLcp){
ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
lcpNo, replicaPtr.p->nextLcp);
g_eventLogger.error("lcpNo = %d replicaPtr.p->nextLcp = %d",
lcpNo, replicaPtr.p->nextLcp);
ndbrequire(false);
}
ndbrequire(lcpNo == replicaPtr.p->nextLcp);
@ -10150,7 +10188,7 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
// Not all fragments in table have been checkpointed.
/* ----------------------------------------------------------------- */
if(0)
ndbout_c("reportLcpCompletion: fragment %d not ready", fid);
g_eventLogger.info("reportLcpCompletion: fragment %d not ready", fid);
return false;
}//if
}//for
@ -10257,6 +10295,17 @@ Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
LcpCompleteRep::SignalLength, JBB);
/**
* Say that an initial node restart does not need to be redone
* once node has been part of first LCP
*/
if (c_set_initial_start_flag &&
c_lcpState.m_participatingLQH.get(getOwnNodeId()))
{
jam();
c_set_initial_start_flag = FALSE;
}
}
/*-------------------------------------------------------------------------- */
@ -10267,7 +10316,7 @@ void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
jamEntry();
#if 0
ndbout_c("LCP_COMPLETE_REP");
g_eventLogger.info("LCP_COMPLETE_REP");
printLCP_COMPLETE_REP(stdout,
signal->getDataPtr(),
signal->length(), number());
@ -10353,7 +10402,7 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){
jam();
#ifdef VM_TRACE
ndbout_c("Exiting from allNodesLcpCompletedLab");
g_eventLogger.info("Exiting from allNodesLcpCompletedLab");
#endif
return;
}
@ -10582,14 +10631,14 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
infoEvent("Detected GCP stop...sending kill to %s",
c_GCP_SAVEREQ_Counter.getText());
ndbout_c("Detected GCP stop...sending kill to %s",
c_GCP_SAVEREQ_Counter.getText());
g_eventLogger.error("Detected GCP stop...sending kill to %s",
c_GCP_SAVEREQ_Counter.getText());
return;
}
case GCP_SAVE_LQH_FINISHED:
ndbout_c("m_copyReason: %d m_waiting: %d",
c_copyGCIMaster.m_copyReason,
c_copyGCIMaster.m_waiting);
g_eventLogger.error("m_copyReason: %d m_waiting: %d",
c_copyGCIMaster.m_copyReason,
c_copyGCIMaster.m_waiting);
break;
case GCP_READY: // shut up lint
case GCP_PREPARE_SENT:
@ -10597,11 +10646,11 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
break;
}
ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
c_copyGCISlave.m_senderData,
c_copyGCISlave.m_senderRef,
c_copyGCISlave.m_copyReason,
c_copyGCISlave.m_expectedNextWord);
g_eventLogger.error("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
c_copyGCISlave.m_senderData,
c_copyGCISlave.m_senderRef,
c_copyGCISlave.m_copyReason,
c_copyGCISlave.m_expectedNextWord);
FileRecordPtr file0Ptr;
file0Ptr.i = crestartInfoFile[0];
@ -12804,9 +12853,9 @@ void Dbdih::setLcpActiveStatusEnd()
nodePtr.i = getOwnNodeId();
ptrAss(nodePtr, nodeRecord);
ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active);
ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH");
g_eventLogger.info("NR: setLcpActiveStatusEnd - m_participatingLQH");
} else {
ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH");
g_eventLogger.info("NR: setLcpActiveStatusEnd - !m_participatingLQH");
}
}
@ -13637,8 +13686,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}
if(arg == DumpStateOrd::EnableUndoDelayDataWrite){
ndbout << "Dbdih:: delay write of datapages for table = "
<< dumpState->args[1]<< endl;
g_eventLogger.info("Dbdih:: delay write of datapages for table = %s",
dumpState->args[1]);
// Send this dump to ACC and TUP
EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2);
EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2);
@ -13655,13 +13704,13 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}//if
if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
// Set time between LCP to min value
ndbout << "Set time between LCP to min value" << endl;
g_eventLogger.info("Set time between LCP to min value");
c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
return;
}
if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
// Set time between LCP to max value
ndbout << "Set time between LCP to max value" << endl;
g_eventLogger.info("Set time between LCP to max value");
c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max
return;
}
@ -13697,7 +13746,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
{
cgcpDelay = signal->theData[1];
}
ndbout_c("Setting time between gcp : %d", cgcpDelay);
g_eventLogger.info("Setting time between gcp : %d", cgcpDelay);
}
if (arg == 7021 && signal->getLength() == 2)
@ -13820,7 +13869,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
while(index < count){
if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){
jam();
// ndbout_c("Unqueuing %d", index);
// g_eventLogger.info("Unqueuing %d", index);
count--;
for(Uint32 i = index; i<count; i++){
@ -13860,7 +13909,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
if(checkLcpAllTablesDoneInLqh()){
jam();
ndbout_c("This is the last table");
g_eventLogger.info("This is the last table");
/**
* Then check if saving of tab info is done for all tables
@ -13869,7 +13918,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
checkLcpCompletedLab(signal);
if(a != c_lcpState.lcpStatus){
ndbout_c("And all tables are written to already written disk");
g_eventLogger.info("And all tables are written to already written disk");
}
}
break;
@ -14026,30 +14075,6 @@ Dbdih::execNDB_TAMPER(Signal* signal)
return;
}//Dbdih::execNDB_TAMPER()
void Dbdih::execSET_VAR_REQ(Signal* signal) {
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
int val = setVarReq->value();
switch (var) {
case TimeBetweenLocalCheckpoints:
c_lcpState.clcpDelay = val;
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case TimeBetweenGlobalCheckpoints:
cgcpDelay = val;
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
} // switch
#endif
}
void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];

View file

@ -2201,7 +2201,6 @@ private:
void execFSREADCONF(Signal* signal);
void execFSREADREF(Signal* signal);
void execSCAN_HBREP(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execTIME_SIGNAL(Signal* signal);
void execFSSYNCCONF(Signal* signal);

View file

@ -314,7 +314,6 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF, true);
addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);

View file

@ -11672,7 +11672,8 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
return;
}
if(getNodeState().getNodeRestartInProgress()){
if(getNodeState().getNodeRestartInProgress() && cstartRecReq == ZFALSE)
{
GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
saveRef->dihPtr = dihPtr;
saveRef->nodeId = getOwnNodeId();
@ -18902,30 +18903,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
}//Dblqh::execDUMP_STATE_ORD()
void Dblqh::execSET_VAR_REQ(Signal* signal)
{
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
switch (var) {
case NoOfConcurrentCheckpointsAfterRestart:
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case NoOfConcurrentCheckpointsDuringRestart:
// Valid only during start so value not set.
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
} // switch
#endif
}//execSET_VAR_REQ()
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ---------------------- TRIGGER HANDLING ------------------------ */

View file

@ -1323,7 +1323,6 @@ private:
void execTIME_SIGNAL(Signal* signal);
void execAPI_FAILREQ(Signal* signal);
void execSCAN_HBREP(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execABORT_ALL_REQ(Signal* signal);

View file

@ -256,7 +256,6 @@ Dbtc::Dbtc(const class Configuration & conf):
addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ);
addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL);
addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ);
addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ);
addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK);
addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ);

View file

@ -10999,36 +10999,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
}
}//Dbtc::execDUMP_STATE_ORD()
void Dbtc::execSET_VAR_REQ(Signal* signal)
{
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
int val = setVarReq->value();
switch (var) {
case TransactionInactiveTime:
jam();
set_appl_timeout_value(val);
break;
case TransactionDeadlockDetectionTimeout:
set_timeout_value(val);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case NoOfConcurrentProcessesHandleTakeover:
set_no_parallel_takeover(val);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
} // switch
#endif
}
void Dbtc::execABORT_ALL_REQ(Signal* signal)
{
jamEntry();

View file

@ -1116,7 +1116,6 @@ private:
void execFSREADCONF(Signal* signal);
void execNDB_STTOR(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
void execALTER_TAB_REQ(Signal* signal);
void execFSREMOVECONF(Signal* signal);

View file

@ -103,7 +103,6 @@ Dbtup::Dbtup(const class Configuration & conf)
addRecSignal(GSN_FSREADCONF, &Dbtup::execFSREADCONF);
addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true);
addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ);
// Trigger Signals
addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ);
@ -1315,32 +1314,5 @@ void Dbtup::seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr)
pfoiPtr.p->pfoNextRec = RNIL;
}//Dbtup::seizePendingFileOpenInfoRecord()
void Dbtup::execSET_VAR_REQ(Signal* signal)
{
#if 0
SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend();
ConfigParamId var = setVarReq->variable();
int val = setVarReq->value();
switch (var) {
case NoOfDiskPagesToDiskAfterRestartTUP:
clblPagesPerTick = val;
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case NoOfDiskPagesToDiskDuringRestartTUP:
// Valid only during start so value not set.
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
} // switch
#endif
}//execSET_VAR_REQ()

View file

@ -190,7 +190,6 @@ private:
void execNDB_STARTCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
void execNDB_STARTREF(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execSTOP_PERM_REF(Signal* signal);
void execSTOP_PERM_CONF(Signal* signal);

View file

@ -80,7 +80,6 @@ Ndbcntr::Ndbcntr(const class Configuration & conf):
addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF);
addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ);
addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF);
addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ);
addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF);
addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF);

View file

@ -75,8 +75,8 @@ static BlockInfo ALL_BLOCKS[] = {
{ DBDICT_REF, 1 , 6000, 6003 },
{ NDBFS_REF, 0 , 2000, 2999 },
{ NDBCNTR_REF, 0 , 1000, 1999 },
{ CMVMI_REF, 1 , 9000, 9999 }, // before QMGR
{ QMGR_REF, 1 , 1, 999 },
{ CMVMI_REF, 1 , 9000, 9999 },
{ TRIX_REF, 1 , 0, 0 },
{ BACKUP_REF, 1 , 10000, 10999 },
{ DBUTIL_REF, 1 , 11000, 11999 },
@ -2026,23 +2026,6 @@ Ndbcntr::execDUMP_STATE_ORD(Signal* signal)
}//Ndbcntr::execDUMP_STATE_ORD()
void Ndbcntr::execSET_VAR_REQ(Signal* signal) {
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
switch (var) {
case TimeToWaitAlive:
// Valid only during start so value not set.
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
}// switch
#endif
}//Ndbcntr::execSET_VAR_REQ()
void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{
NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0];

View file

@ -242,7 +242,6 @@ private:
void execAPI_REGREQ(Signal* signal);
void execAPI_FAILCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
void execREAD_NODESREF(Signal* signal);
void execREAD_NODESCONF(Signal* signal);

View file

@ -82,7 +82,6 @@ Qmgr::Qmgr(const class Configuration & conf)
addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP);
addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP);
// Arbitration signals

View file

@ -4774,34 +4774,6 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Qmgr::execDUMP_STATE_ORD()
void Qmgr::execSET_VAR_REQ(Signal* signal)
{
#if 0
SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
ConfigParamId var = setVarReq->variable();
UintR val = setVarReq->value();
switch (var) {
case HeartbeatIntervalDbDb:
setHbDelay(val/10);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case HeartbeatIntervalDbApi:
setHbApiDelay(val/10);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
case ArbitTimeout:
setArbitTimeout(val);
sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
break;
default:
sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
}// switch
#endif
}//execSET_VAR_REQ()
void
Qmgr::execAPI_BROADCAST_REP(Signal* signal)

View file

@ -22,7 +22,10 @@
#include <NdbOut.hpp>
#include <NdbSleep.h>
#include <ErrorHandlingMacros.hpp>
#include <EventLogger.hpp>
extern EventLogger g_eventLogger;
extern "C"
void*
runWatchDog(void* w){
@ -125,7 +128,7 @@ WatchDog::run(){
last_stuck_action = "Unknown place";
break;
}//switch
ndbout << "Ndb kernel is stuck in: " << last_stuck_action << endl;
g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
if(alerts == 3){
shutdownSystem(last_stuck_action);
}

View file

@ -2187,43 +2187,6 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype,
return nodeid;
}
/*****************************************************************************
* Global Replication
******************************************************************************/
extern "C"
int
ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request,
unsigned int* replication_id,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_rep_command");
const ParserRow<ParserDummy> replication_reply[] = {
MGM_CMD("global replication reply", NULL, ""),
MGM_ARG("result", String, Mandatory, "Error message"),
MGM_ARG("id", Int, Optional, "Id of global replication"),
MGM_END()
};
CHECK_HANDLE(handle, -1);
CHECK_CONNECTED(handle, -1);
Properties args;
args.put("request", request);
const Properties *reply;
reply = ndb_mgm_call(handle, replication_reply, "rep", &args);
CHECK_REPLY(reply, -1);
const char * result;
reply->get("result", &result);
reply->get("id", replication_id);
if(strcmp(result,"Ok")!=0) {
delete reply;
return -1;
}
delete reply;
return 0;
}
extern "C"
int
ndb_mgm_set_int_parameter(NdbMgmHandle handle,

View file

@ -799,6 +799,7 @@ InitConfigFileParser::parse_mycnf()
/**
* Add ndbd, ndb_mgmd, api/mysqld
*/
Uint32 idx = options.size();
{
struct my_option opt;
bzero(&opt, sizeof(opt));
@ -808,7 +809,6 @@ InitConfigFileParser::parse_mycnf()
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
ndbd = &options.back();
opt.name = "ndb_mgmd";
opt.id = 256;
@ -816,7 +816,6 @@ InitConfigFileParser::parse_mycnf()
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
ndb_mgmd = &options.back();
opt.name = "mysqld";
opt.id = 256;
@ -824,20 +823,22 @@ InitConfigFileParser::parse_mycnf()
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
mysqld = &options.back();
opt.name = "api";
opt.name = "ndbapi";
opt.id = 256;
opt.value = (gptr*)malloc(sizeof(char*));
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
api = &options.back();
bzero(&opt, sizeof(opt));
options.push_back(opt);
}
ndbd = &options[idx];
ndb_mgmd = &options[idx+1];
mysqld = &options[idx+2];
api = &options[idx+3];
}
Context ctx(m_info, m_errstream);
const char *groups[]= { "cluster_config", 0 };

View file

@ -704,7 +704,7 @@ int MgmtSrvr::okToSendTo(NodeId nodeId, bool unCond)
return WRONG_PROCESS_TYPE;
// Check if we have contact with it
if(unCond){
if(theFacade->theClusterMgr->getNodeInfo(nodeId).connected)
if(theFacade->theClusterMgr->getNodeInfo(nodeId).m_api_reg_conf)
return 0;
}
else if (theFacade->get_node_alive(nodeId) == true)
@ -1562,32 +1562,85 @@ MgmtSrvr::status(int nodeId,
}
int
MgmtSrvr::setEventReportingLevelImpl(int nodeId,
MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
const EventSubscribeReq& ll)
{
SignalSender ss(theFacade);
ss.lock();
SimpleSignal ssig;
EventSubscribeReq * dst =
CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend());
ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
EventSubscribeReq::SignalLength);
*dst = ll;
NodeBitmask nodes;
NdbNodeBitmask nodes;
int retries = 30;
nodes.clear();
Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId;
for(; (Uint32) nodeId <= max; nodeId++)
while (1)
{
if (nodeTypes[nodeId] != NODE_TYPE_DB)
continue;
if (okToSendTo(nodeId, true))
continue;
if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
Uint32 nodeId, max;
ss.lock();
SimpleSignal ssig;
EventSubscribeReq * dst =
CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend());
ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
EventSubscribeReq::SignalLength);
*dst = ll;
if (nodeId_arg == 0)
{
nodes.set(nodeId);
// all nodes
nodeId = 1;
max = MAX_NDB_NODES;
}
else
{
// only one node
max = nodeId = nodeId_arg;
}
// first make sure nodes are sendable
for(; nodeId <= max; nodeId++)
{
if (nodeTypes[nodeId] != NODE_TYPE_DB)
continue;
if (okToSendTo(nodeId, true))
{
if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false)
{
// node not connected we can safely skip this one
continue;
}
// api_reg_conf not recevied yet, need to retry
break;
}
}
if (nodeId <= max)
{
if (--retries)
{
ss.unlock();
NdbSleep_MilliSleep(100);
continue;
}
return SEND_OR_RECEIVE_FAILED;
}
if (nodeId_arg == 0)
{
// all nodes
nodeId = 1;
max = MAX_NDB_NODES;
}
else
{
// only one node
max = nodeId = nodeId_arg;
}
// now send to all sendable nodes nodes
// note, lock is held, so states have not changed
for(; (Uint32) nodeId <= max; nodeId++)
{
if (nodeTypes[nodeId] != NODE_TYPE_DB)
continue;
if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false)
continue; // node is not connected, skip
if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
nodes.set(nodeId);
}
break;
}
if (nodes.isclear())
@ -1598,6 +1651,7 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId,
int error = 0;
while (!nodes.isclear())
{
Uint32 nodeId;
SimpleSignal *signal = ss.waitFor();
int gsn = signal->readSignalNumber();
nodeId = refToNode(signal->header.theSendersBlockRef);

View file

@ -597,7 +597,6 @@ private:
*/
enum WaitSignalType {
NO_WAIT, // We don't expect to receive any signal
WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
WAIT_SUBSCRIBE_CONF // Accept event subscription confirmation
};

View file

@ -332,19 +332,6 @@ MgmApiSession::runSession()
switch(ctx.m_status) {
case Parser_t::UnknownCommand:
#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
/* Backwards compatibility for old NDBs that still use
* the old "GET CONFIG" command.
*/
size_t i;
for(i=0; i<strlen(ctx.m_currentToken); i++)
ctx.m_currentToken[i] = toupper(ctx.m_currentToken[i]);
if(strncmp("GET CONFIG ",
ctx.m_currentToken,
strlen("GET CONFIG ")) == 0)
getConfig_old(ctx);
#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
break;
default:
break;
@ -359,32 +346,6 @@ MgmApiSession::runSession()
DBUG_VOID_RETURN;
}
#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
void
MgmApiSession::getConfig_old(Parser_t::Context &ctx) {
Properties args;
Uint32 version, node;
if(sscanf(ctx.m_currentToken, "GET CONFIG %d %d",
(int *)&version, (int *)&node) != 2) {
m_output->println("Expected 2 arguments for GET CONFIG");
return;
}
/* Put arguments in properties object so we can call the real function */
args.put("version", version);
args.put("node", node);
getConfig_common(ctx, args, true);
}
#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
void
MgmApiSession::getConfig(Parser_t::Context &ctx,
const class Properties &args) {
getConfig_common(ctx, args);
}
static Properties *
backward(const char * base, const Properties* reply){
Properties * ret = new Properties();
@ -560,9 +521,9 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
}
void
MgmApiSession::getConfig_common(Parser_t::Context &,
const class Properties &args,
bool compat) {
MgmApiSession::getConfig(Parser_t::Context &,
const class Properties &args)
{
Uint32 version, node = 0;
args.get("version", &version);
@ -576,47 +537,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
return;
}
if(version > 0 && version < makeVersion(3, 5, 0) && compat){
Properties *reply = backward("", conf->m_oldConfig);
reply->put("Version", version);
reply->put("LocalNodeId", node);
backward("", reply);
//reply->print();
const Uint32 size = reply->getPackedSize();
Uint32 *buffer = new Uint32[size/4+1];
reply->pack(buffer);
delete reply;
const int uurows = (size + 44)/45;
char * uubuf = new char[uurows * 62+5];
const int uusz = uuencode_mem(uubuf, (char *)buffer, size);
delete[] buffer;
m_output->println("GET CONFIG %d %d %d %d %d",
0, version, node, size, uusz);
m_output->println("begin 664 Ndb_cfg.bin");
/* XXX Need to write directly to the socket, because the uubuf is not
* NUL-terminated. This could/should probably be done in a nicer way.
*/
write_socket(m_socket, MAX_WRITE_TIMEOUT, uubuf, uusz);
delete[] uubuf;
m_output->println("end");
m_output->println("");
return;
}
if(compat){
m_output->println("GET CONFIG %d %d %d %d %d",1, version, 0, 0, 0);
return;
}
if(node != 0){
bool compatible;
switch (m_mgmsrv.getNodeType(node)) {
@ -844,8 +764,7 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
DBUG_PRINT("enter",("node=%d, category=%d, level=%d", node, cat, level));
/* XXX should use constants for this value */
if(level > 15) {
if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println(reply);
m_output->println("result: Invalid loglevel %d", level);
m_output->println("");
@ -889,8 +808,7 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
args.get("category", &cat);
args.get("level", &level);
/* XXX should use constants for this value */
if(level > 15) {
if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println("set loglevel reply");
m_output->println("result: Invalid loglevel", errorString.c_str());
m_output->println("");
@ -1590,7 +1508,7 @@ MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
}
int level = atoi(spec[1].c_str());
if(level < 0 || level > 15){
if(level < 0 || level > NDB_MGM_MAX_LOGLEVEL){
msg.appfmt("Invalid level: >%s<", spec[1].c_str());
result = -1;
goto done;

View file

@ -24,9 +24,6 @@
#include "MgmtSrvr.hpp"
/** Undefine this to remove backwards compatibility for "GET CONFIG". */
#define MGM_GET_CONFIG_BACKWARDS_COMPAT
class MgmApiSession : public SocketServer::Session
{
static void stop_session_if_timed_out(SocketServer::Session *_s, void *data);
@ -42,9 +39,6 @@ private:
char m_err_str[1024];
int m_stopSelf; // -1 is restart, 0 do nothing, 1 stop
void getConfig_common(Parser_t::Context &ctx,
const class Properties &args,
bool compat = false);
const char *get_error_text(int err_no)
{ return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); }
@ -55,9 +49,6 @@ public:
void getStatPort(Parser_t::Context &ctx, const class Properties &args);
void getConfig(Parser_t::Context &ctx, const class Properties &args);
#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
void getConfig_old(Parser_t::Context &ctx);
#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
void get_nodeid(Parser_t::Context &ctx, const class Properties &args);
void getVersion(Parser_t::Context &ctx, const class Properties &args);

View file

@ -327,7 +327,7 @@ ClusterMgr::showState(NodeId nodeId){
ClusterMgr::Node::Node()
: m_state(NodeState::SL_NOTHING) {
compatible = nfCompleteRep = true;
connected = defined = m_alive = false;
connected = defined = m_alive = m_api_reg_conf = false;
m_state.m_connected_nodes.clear();
}
@ -401,6 +401,8 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.m_info.m_version);
}
node.m_api_reg_conf = true;
node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
@ -519,6 +521,7 @@ ClusterMgr::reportDisconnected(NodeId nodeId){
noOfConnectedNodes--;
theNodes[nodeId].connected = false;
theNodes[nodeId].m_api_reg_conf = false;
theNodes[nodeId].m_state.m_connected_nodes.clear();
reportNodeFailed(nodeId, true);

View file

@ -65,6 +65,7 @@ public:
bool compatible; // Version is compatible
bool nfCompleteRep; // NF Complete Rep has arrived
bool m_alive; // Node is alive
bool m_api_reg_conf;// API_REGCONF has arrived
NodeInfo m_info;
NodeState m_state;

View file

@ -46,7 +46,7 @@ private:
} * m_map;
NdbMutex * m_mutex;
void expand(Uint32 newSize);
int expand(Uint32 newSize);
};
inline
@ -73,9 +73,8 @@ NdbObjectIdMap::map(void * object){
// lock();
if(m_firstFree == InvalidId){
expand(m_expandSize);
}
if(m_firstFree == InvalidId && expand(m_expandSize))
return InvalidId;
Uint32 ff = m_firstFree;
m_firstFree = m_map[ff].m_next;
@ -130,7 +129,7 @@ NdbObjectIdMap::getObject(Uint32 id){
return 0;
}
inline void
inline int
NdbObjectIdMap::expand(Uint32 incSize){
NdbMutex_Lock(m_mutex);
Uint32 newSize = m_size + incSize;
@ -149,9 +148,11 @@ NdbObjectIdMap::expand(Uint32 incSize){
}
else
{
ndbout_c("NdbObjectIdMap::expand unable to expand!!");
NdbMutex_Unlock(m_mutex);
return -1;
}
NdbMutex_Unlock(m_mutex);
return 0;
}
#endif

View file

@ -140,6 +140,8 @@ SignalSender::getNoOfConnectedNodes() const {
SendStatus
SignalSender::sendSignal(Uint16 nodeId, const SimpleSignal * s){
assert(getNodeInfo(nodeId).m_api_reg_conf == true ||
s->readSignalNumber() == GSN_API_REGREQ);
return theFacade->theTransporterRegistry->prepareSend(&s->header,
1, // JBB
&s->theData[0],

View file

@ -32,7 +32,7 @@ public:
Uint32 theData[25];
LinearSectionPtr ptr[3];
int readSignalNumber() {return header.theVerId_signalNumber; }
int readSignalNumber() const {return header.theVerId_signalNumber; }
Uint32 *getDataPtrSend() { return theData; }
const Uint32 *getDataPtr() const { return theData; }

View file

@ -61,6 +61,8 @@ public:
int dumpStateAllNodes(int * _args, int _num_args);
int getMasterNodeId();
int getNextMasterNodeId(int nodeId);
int getNodeGroup(int nodeId);
int getRandomNodeSameNodeGroup(int nodeId, int randomNumber);
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
int getRandomNotMasterNodeId(int randomNumber);

View file

@ -8,6 +8,15 @@
static const char* _dbname = "TEST_DB";
static int g_loops = 7;
NDB_STD_OPTS_VARS;
static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static void usage()
{
ndb_std_print_version();
@ -36,9 +45,10 @@ main(int argc, char** argv){
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
argc--;
argv++;
if ((ho_error=handle_options(&argc, &argv, my_long_options,
ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
Ndb_cluster_connection con(opt_connect_str);
if(con.connect(12, 5, 1))
{

View file

@ -1044,6 +1044,85 @@ int runBug25554(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int
runBug26457(NDBT_Context* ctx, NDBT_Step* step)
{
NdbRestarter res;
if (res.getNumDbNodes() < 4)
return NDBT_OK;
int loops = ctx->getNumLoops();
while (loops --)
{
retry:
int master = res.getMasterNodeId();
int next = res.getNextMasterNodeId(master);
ndbout_c("master: %d next: %d", master, next);
if (res.getNodeGroup(master) == res.getNodeGroup(next))
{
res.restartOneDbNode(next, false, false, true);
if (res.waitClusterStarted())
return NDBT_FAILED;
goto retry;
}
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 2 };
if (res.dumpStateOneNode(next, val2, 2))
return NDBT_FAILED;
if (res.insertErrorInNode(next, 7180))
return NDBT_FAILED;
res.restartOneDbNode(master, false, false, true);
if (res.waitClusterStarted())
return NDBT_FAILED;
}
return NDBT_OK;
}
int
runBug26481(NDBT_Context* ctx, NDBT_Step* step)
{
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
NdbRestarter res;
int node = res.getRandomNotMasterNodeId(rand());
ndbout_c("node: %d", node);
if (res.restartOneDbNode(node, true, true, true))
return NDBT_FAILED;
if (res.waitNodesNoStart(&node, 1))
return NDBT_FAILED;
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
if (res.dumpStateOneNode(node, val2, 2))
return NDBT_FAILED;
if (res.insertErrorInNode(node, 7018))
return NDBT_FAILED;
if (res.startNodes(&node, 1))
return NDBT_FAILED;
res.waitNodesStartPhase(&node, 1, 3);
if (res.waitNodesNoStart(&node, 1))
return NDBT_FAILED;
res.startNodes(&node, 1);
if (res.waitClusterStarted())
return NDBT_FAILED;
return NDBT_OK;
}
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
@ -1366,6 +1445,12 @@ TESTCASE("Bug25364", ""){
TESTCASE("Bug25554", ""){
INITIALIZER(runBug25554);
}
TESTCASE("Bug26457", ""){
INITIALIZER(runBug26457);
}
TESTCASE("Bug26481", ""){
INITIALIZER(runBug26481);
}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){

View file

@ -477,6 +477,14 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug25554 T1
max-time: 1000
cmd: testNodeRestart
args: -n Bug26457 T1
max-time: 1000
cmd: testNodeRestart
args: -n Bug26481 T1
# OLD FLEX
max-time: 500
cmd: flexBench

View file

@ -127,6 +127,68 @@ NdbRestarter::getMasterNodeId(){
return node;
}
int
NdbRestarter::getNodeGroup(int nodeId){
if (!isConnected())
return -1;
if (getStatus() != 0)
return -1;
for(size_t i = 0; i < ndbNodes.size(); i++)
{
if(ndbNodes[i].node_id == nodeId)
{
return ndbNodes[i].node_group;
}
}
return -1;
}
int
NdbRestarter::getNextMasterNodeId(int nodeId){
if (!isConnected())
return -1;
if (getStatus() != 0)
return -1;
size_t i;
for(i = 0; i < ndbNodes.size(); i++)
{
if(ndbNodes[i].node_id == nodeId)
{
break;
}
}
assert(i < ndbNodes.size());
if (i == ndbNodes.size())
return -1;
int dynid = ndbNodes[i].dynamic_id;
int minid = dynid;
for (i = 0; i<ndbNodes.size(); i++)
if (ndbNodes[i].dynamic_id > minid)
minid = ndbNodes[i].dynamic_id;
for (i = 0; i<ndbNodes.size(); i++)
if (ndbNodes[i].dynamic_id > dynid &&
ndbNodes[i].dynamic_id < minid)
{
minid = ndbNodes[i].dynamic_id;
}
if (minid != ~0)
{
for (i = 0; i<ndbNodes.size(); i++)
if (ndbNodes[i].dynamic_id == minid)
return ndbNodes[i].node_id;
}
return getMasterNodeId();
}
int
NdbRestarter::getRandomNotMasterNodeId(int rand){
int master = getMasterNodeId();

View file

@ -3243,7 +3243,7 @@ int ha_ndbcluster::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
if (m_table)
if (m_table && table->found_next_number_field)
{
Ndb *ndb= get_ndb();