mirror of
https://github.com/MariaDB/server.git
synced 2025-02-02 03:51:50 +01:00
Merge mysql.com:/home/jonas/src/mysql-4.1-fix
into mysql.com:/home/jonas/src/mysql-5.0
This commit is contained in:
commit
c7f740921d
16 changed files with 187 additions and 35 deletions
|
@ -1,4 +1,4 @@
|
|||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
|
||||
flush status;
|
||||
create table t1(
|
||||
id int not null primary key,
|
||||
|
@ -363,3 +363,8 @@ a int NOT NULL PRIMARY KEY,
|
|||
b int
|
||||
) engine=ndb;
|
||||
insert t9 values(1, 2), (2,3), (3, 4), (4, 5);
|
||||
create table t10 (
|
||||
a int not null primary key,
|
||||
b blob
|
||||
) engine=ndb;
|
||||
insert into t10 values (1, 'kalle');
|
||||
|
|
|
@ -8,3 +8,6 @@ show status like 'handler_discover%';
|
|||
Variable_name Value
|
||||
Handler_discover 1
|
||||
drop table t9;
|
||||
select * from t10;
|
||||
ERROR HY000: Got error 4263 'Invalid blob attributes or invalid blob parts table' from ndbcluster
|
||||
drop table t10;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
drop table if exists t1;
|
||||
drop table if exists t1, test1, test2;
|
||||
CREATE TABLE t1 (
|
||||
a int unsigned NOT NULL PRIMARY KEY,
|
||||
b int unsigned not null,
|
||||
|
@ -275,3 +275,38 @@ a b c
|
|||
1 1 1
|
||||
4 4 NULL
|
||||
drop table t1;
|
||||
CREATE TABLE test1 (
|
||||
SubscrID int(11) NOT NULL auto_increment,
|
||||
UsrID int(11) NOT NULL default '0',
|
||||
PRIMARY KEY (SubscrID),
|
||||
KEY idx_usrid (UsrID)
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
INSERT INTO test1 VALUES (2,224),(3,224),(1,224);
|
||||
CREATE TABLE test2 (
|
||||
SbclID int(11) NOT NULL auto_increment,
|
||||
SbcrID int(11) NOT NULL default '0',
|
||||
PRIMARY KEY (SbclID),
|
||||
KEY idx_sbcrid (SbcrID)
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
INSERT INTO test2 VALUES (3,2),(1,1),(2,1),(4,2);
|
||||
select * from test1 order by 1;
|
||||
SubscrID UsrID
|
||||
1 224
|
||||
2 224
|
||||
3 224
|
||||
select * from test2 order by 1;
|
||||
SbclID SbcrID
|
||||
1 1
|
||||
2 1
|
||||
3 2
|
||||
4 2
|
||||
SELECT s.SubscrID,l.SbclID FROM test1 s left JOIN test2 l ON
|
||||
l.SbcrID=s.SubscrID WHERE s.UsrID=224 order by 1, 2;
|
||||
SubscrID SbclID
|
||||
1 1
|
||||
1 2
|
||||
2 3
|
||||
2 4
|
||||
3 NULL
|
||||
drop table test1;
|
||||
drop table test2;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
-- source include/have_ndb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
|
||||
--enable_warnings
|
||||
|
||||
################################################
|
||||
|
@ -472,5 +472,11 @@ system rm var/master-data/test/t9.frm ;
|
|||
# MySQL Server will have been restarted because it has a
|
||||
# ndb_autodiscover2-master.opt file.
|
||||
|
||||
create table t10 (
|
||||
a int not null primary key,
|
||||
b blob
|
||||
) engine=ndb;
|
||||
|
||||
insert into t10 values (1, 'kalle');
|
||||
|
||||
--exec $NDB_TOOLS_DIR/ndb_drop_table -d test `$NDB_TOOLS_DIR/ndb_show_tables | grep BLOB` > /dev/null 2>&1 || true
|
||||
|
|
|
@ -13,4 +13,7 @@ show status like 'handler_discover%';
|
|||
|
||||
drop table t9;
|
||||
|
||||
--error 1296
|
||||
select * from t10;
|
||||
drop table t10;
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
-- source include/have_ndb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
drop table if exists t1, test1, test2;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
|
@ -146,3 +146,29 @@ select * from t1 use index (bc) where b IS NULL and c = 2 order by a;
|
|||
select * from t1 use index (bc) where b < 4 order by a;
|
||||
select * from t1 use index (bc) where b IS NOT NULL order by a;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug #6435
|
||||
CREATE TABLE test1 (
|
||||
SubscrID int(11) NOT NULL auto_increment,
|
||||
UsrID int(11) NOT NULL default '0',
|
||||
PRIMARY KEY (SubscrID),
|
||||
KEY idx_usrid (UsrID)
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO test1 VALUES (2,224),(3,224),(1,224);
|
||||
|
||||
CREATE TABLE test2 (
|
||||
SbclID int(11) NOT NULL auto_increment,
|
||||
SbcrID int(11) NOT NULL default '0',
|
||||
PRIMARY KEY (SbclID),
|
||||
KEY idx_sbcrid (SbcrID)
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO test2 VALUES (3,2),(1,1),(2,1),(4,2);
|
||||
select * from test1 order by 1;
|
||||
select * from test2 order by 1;
|
||||
SELECT s.SubscrID,l.SbclID FROM test1 s left JOIN test2 l ON
|
||||
l.SbcrID=s.SubscrID WHERE s.UsrID=224 order by 1, 2;
|
||||
drop table test1;
|
||||
drop table test2;
|
||||
|
|
|
@ -21,11 +21,11 @@
|
|||
#ifndef NDB_TYPES_H
|
||||
#define NDB_TYPES_H
|
||||
|
||||
typedef char Int8;
|
||||
typedef signed char Int8;
|
||||
typedef unsigned char Uint8;
|
||||
typedef short Int16;
|
||||
typedef signed short Int16;
|
||||
typedef unsigned short Uint16;
|
||||
typedef int Int32;
|
||||
typedef signed int Int32;
|
||||
typedef unsigned int Uint32;
|
||||
|
||||
typedef unsigned int UintR;
|
||||
|
@ -45,10 +45,10 @@ typedef uintptr_t UintPtr;
|
|||
|
||||
#if defined(WIN32) || defined(NDB_WIN32)
|
||||
typedef unsigned __int64 Uint64;
|
||||
typedef __int64 Int64;
|
||||
typedef signed __int64 Int64;
|
||||
#else
|
||||
typedef unsigned long long Uint64;
|
||||
typedef long long Int64;
|
||||
typedef signed long long Int64;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -369,7 +369,7 @@ public:
|
|||
*/
|
||||
bool getDistributionKey() const;
|
||||
/** @} *******************************************************************/
|
||||
|
||||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
void setTupleKey(bool);
|
||||
bool getTupleKey() const;
|
||||
|
@ -486,6 +486,18 @@ public:
|
|||
*/
|
||||
const Column* getColumn(const char * name) const;
|
||||
|
||||
/**
|
||||
* Get column definition via index in table.
|
||||
* @return null if none existing name
|
||||
*/
|
||||
Column* getColumn(const int attributeId);
|
||||
|
||||
/**
|
||||
* Get column definition via name.
|
||||
* @return null if none existing name
|
||||
*/
|
||||
Column* getColumn(const char * name);
|
||||
|
||||
/**
|
||||
* Get column definition via index in table.
|
||||
* @return null if none existing name
|
||||
|
|
|
@ -254,9 +254,9 @@ SignalDataPrintFunctions[] = {
|
|||
,{ 0, 0 }
|
||||
};
|
||||
|
||||
template class Bitmask<1>;
|
||||
template class Bitmask<2>;
|
||||
template class Bitmask<4>;
|
||||
template struct BitmaskPOD<1>;
|
||||
template struct BitmaskPOD<2>;
|
||||
template struct BitmaskPOD<4>;
|
||||
template class Bitmask<1>;
|
||||
template class Bitmask<2>;
|
||||
template class Bitmask<4>;
|
||||
|
|
|
@ -55,18 +55,21 @@ NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags)
|
|||
"%s: lseek failed: %s", lockfile, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
#ifdef F_TLOCK
|
||||
/* Test for lock before becoming daemon */
|
||||
if (lockf(lockfd, F_TEST, 0) == -1) {
|
||||
if (errno == EACCES || errno == EAGAIN) { /* results may vary */
|
||||
if (lockf(lockfd, F_TLOCK, 0) == -1)
|
||||
{
|
||||
if (errno == EACCES || errno == EAGAIN) { /* results may vary */
|
||||
snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize,
|
||||
"%s: already locked by pid=%ld", lockfile, NdbDaemon_DaemonPid);
|
||||
"%s: already locked by pid=%ld", lockfile, NdbDaemon_DaemonPid);
|
||||
return -1;
|
||||
}
|
||||
NdbDaemon_ErrorCode = errno;
|
||||
snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize,
|
||||
"%s: lock test failed: %s", lockfile, strerror(errno));
|
||||
"%s: lock test failed: %s", lockfile, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
/* Test open log file before becoming daemon */
|
||||
if (logfile != NULL) {
|
||||
logfd = open(logfile, O_CREAT|O_WRONLY|O_APPEND, 0644);
|
||||
|
@ -77,6 +80,15 @@ NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags)
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
#ifdef F_TLOCK
|
||||
if (lockf(lockfd, F_ULOCK, 0) == -1)
|
||||
{
|
||||
snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize,
|
||||
"%s: fail to unlock", lockfile);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Fork */
|
||||
n = fork();
|
||||
if (n == -1) {
|
||||
|
|
|
@ -40,7 +40,6 @@ basestring_snprintf(char *str, size_t size, const char *format, ...)
|
|||
* Let's hope vsnprintf works anyways
|
||||
*/
|
||||
#define BASESTRING_VSNPRINTF_FUNC(a,b,c,d) vsnprintf(a,b,c,d)
|
||||
extern int my_vsnprintf(char *str, size_t size, const char *format, va_list ap);
|
||||
#endif
|
||||
#ifdef SNPRINTF_RETURN_TRUNC
|
||||
static char basestring_vsnprintf_buf[16*1024];
|
||||
|
|
|
@ -343,6 +343,18 @@ NdbDictionary::Table::getColumn(const int attrId) const {
|
|||
return m_impl.getColumn(attrId);
|
||||
}
|
||||
|
||||
NdbDictionary::Column*
|
||||
NdbDictionary::Table::getColumn(const char * name)
|
||||
{
|
||||
return m_impl.getColumn(name);
|
||||
}
|
||||
|
||||
NdbDictionary::Column*
|
||||
NdbDictionary::Table::getColumn(const int attrId)
|
||||
{
|
||||
return m_impl.getColumn(attrId);
|
||||
}
|
||||
|
||||
void
|
||||
NdbDictionary::Table::setLogging(bool val){
|
||||
m_impl.m_logging = val;
|
||||
|
@ -956,6 +968,10 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
|
|||
out << " NOT NULL";
|
||||
else
|
||||
out << " NULL";
|
||||
|
||||
if (col.getDistributionKey())
|
||||
out << " DISTRIBUTION KEY";
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
|
|
|
@ -637,11 +637,9 @@ NdbDictionaryImpl::get_local_table_info(const char * internalTableName,
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
if (do_add_blob_tables &&
|
||||
info->m_table_impl->m_noOfBlobs &&
|
||||
addBlobTables(*(info->m_table_impl))) {
|
||||
return 0;
|
||||
}
|
||||
if (do_add_blob_tables && info->m_table_impl->m_noOfBlobs)
|
||||
addBlobTables(*(info->m_table_impl));
|
||||
|
||||
return info; // autoincrement already initialized
|
||||
}
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
|
|||
OperationType tOperationType = theOperationType;
|
||||
Uint32 tTupKeyLen = theTupKeyLen;
|
||||
Uint8 abortOption =
|
||||
m_abortOption != (Int8)-1 ? m_abortOption : theNdbCon->m_abortOption;
|
||||
m_abortOption != -1 ? m_abortOption : theNdbCon->m_abortOption;
|
||||
|
||||
tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator);
|
||||
tcKeyReq->setOperationType(tReqInfo, tOperationType);
|
||||
|
@ -543,7 +543,7 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
|
|||
}//if
|
||||
|
||||
AbortOption ao = (AbortOption)
|
||||
(m_abortOption != (Int8)-1 ? m_abortOption : theNdbCon->m_abortOption);
|
||||
(m_abortOption != -1 ? m_abortOption : theNdbCon->m_abortOption);
|
||||
theReceiver.m_received_result_length = ~0;
|
||||
|
||||
theStatus = Finished;
|
||||
|
|
|
@ -850,6 +850,14 @@ NdbScanOperation::doSendScan(int aProcessorId)
|
|||
tSignal = tSignal->next();
|
||||
}
|
||||
theStatus = WaitResponse;
|
||||
|
||||
m_sent_receivers_count = theParallelism;
|
||||
if(m_ordered)
|
||||
{
|
||||
m_current_api_receiver = theParallelism;
|
||||
m_api_receivers_count = theParallelism;
|
||||
}
|
||||
|
||||
return tSignalCount;
|
||||
}//NdbOperation::doSendScan()
|
||||
|
||||
|
@ -1507,13 +1515,8 @@ NdbScanOperation::reset_receivers(Uint32 parallell, Uint32 ordered){
|
|||
|
||||
m_api_receivers_count = 0;
|
||||
m_current_api_receiver = 0;
|
||||
m_sent_receivers_count = parallell;
|
||||
m_sent_receivers_count = 0;
|
||||
m_conf_receivers_count = 0;
|
||||
|
||||
if(ordered){
|
||||
m_current_api_receiver = parallell;
|
||||
m_api_receivers_count = parallell;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -1290,7 +1290,6 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
|
|||
Field *field= key_part->field;
|
||||
uint part_len= key_part->length;
|
||||
uint part_store_len= key_part->store_length;
|
||||
bool part_nullable= (bool) key_part->null_bit;
|
||||
// Info about each key part
|
||||
struct part_st {
|
||||
bool part_last;
|
||||
|
@ -1312,9 +1311,9 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
|
|||
p.part_last= (tot_len + part_store_len >= key_tot_len[j]);
|
||||
p.key= keys[j];
|
||||
p.part_ptr= &p.key->key[tot_len];
|
||||
p.part_null= (field->maybe_null() && *p.part_ptr);
|
||||
p.part_null= key_part->null_bit && *p.part_ptr;
|
||||
p.bound_ptr= (const char *)
|
||||
p.part_null ? 0 : part_nullable ? p.part_ptr + 1 : p.part_ptr;
|
||||
p.part_null ? 0 : key_part->null_bit ? p.part_ptr + 1 : p.part_ptr;
|
||||
|
||||
if (j == 0)
|
||||
{
|
||||
|
@ -3322,7 +3321,7 @@ int ha_ndbcluster::create(const char *name,
|
|||
{
|
||||
NDBTAB tab;
|
||||
NDBCOL col;
|
||||
uint pack_length, length, i;
|
||||
uint pack_length, length, i, pk_length= 0;
|
||||
const void *data, *pack_data;
|
||||
const char **key_names= form->keynames.type_names;
|
||||
char name2[FN_HEADLEN];
|
||||
|
@ -3369,6 +3368,8 @@ int ha_ndbcluster::create(const char *name,
|
|||
if ((my_errno= create_ndb_column(col, field, info)))
|
||||
DBUG_RETURN(my_errno);
|
||||
tab.addColumn(col);
|
||||
if(col.getPrimaryKey())
|
||||
pk_length += (field->pack_length() + 3) / 4;
|
||||
}
|
||||
|
||||
// No primary key, create shadow key as 64 bit, auto increment
|
||||
|
@ -3382,6 +3383,39 @@ int ha_ndbcluster::create(const char *name,
|
|||
col.setPrimaryKey(TRUE);
|
||||
col.setAutoIncrement(TRUE);
|
||||
tab.addColumn(col);
|
||||
pk_length += 2;
|
||||
}
|
||||
|
||||
// Make sure that blob tables don't have to big part size
|
||||
for (i= 0; i < form->fields; i++)
|
||||
{
|
||||
/**
|
||||
* The extra +7 concists
|
||||
* 2 - words from pk in blob table
|
||||
* 5 - from extra words added by tup/dict??
|
||||
*/
|
||||
switch (form->field[i]->real_type()) {
|
||||
case MYSQL_TYPE_BLOB:
|
||||
case MYSQL_TYPE_MEDIUM_BLOB:
|
||||
case MYSQL_TYPE_LONG_BLOB:
|
||||
{
|
||||
NdbDictionary::Column * col = tab.getColumn(i);
|
||||
int size = pk_length + (col->getPartSize()+3)/4 + 7;
|
||||
if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS &&
|
||||
(pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS)
|
||||
{
|
||||
size = NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7;
|
||||
col->setPartSize(4*size);
|
||||
}
|
||||
/**
|
||||
* If size > NDB_MAX and pk_length+7 >= NDB_MAX
|
||||
* then the table can't be created anyway, so skip
|
||||
* changing part size, and have error later
|
||||
*/
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ((my_errno= check_ndb_connection()))
|
||||
|
|
Loading…
Add table
Reference in a new issue