Merge bk-internal:/home/bk/mysql-5.1-new

into  neptunus.(none):/home/msvensson/mysql/bug15020/my51-bug15020
This commit is contained in:
msvensson@neptunus.(none) 2006-01-26 11:14:21 +01:00
commit bacd40f3fb
50 changed files with 1735 additions and 452 deletions

View file

@ -74,5 +74,6 @@ hours:
[jonas:]checkout:get
[tomas:]checkout:get
[guilhem:]checkout:get
[pekka:]checkout:get
checkout:edit
eoln:unix

View file

@ -1184,7 +1184,7 @@ parse_delimiter(const char *script, statement **stmt, char delm)
tmp= tmp->next)
{
count++;
tmp->string= my_strdup_with_length(ptr, (size_t)(retstr - ptr), MYF(MY_FAE));
tmp->string= my_strndup(ptr, (size_t)(retstr - ptr), MYF(MY_FAE));
tmp->length= (size_t)(retstr - ptr);
DBUG_PRINT("info", (" Creating : %.*s\n", (uint)tmp->length, tmp->string));
ptr+= retstr - ptr + 1;
@ -1195,7 +1195,7 @@ parse_delimiter(const char *script, statement **stmt, char delm)
if (ptr != script+length)
{
tmp->string= my_strdup_with_length(ptr, (size_t)((script + length) - ptr),
tmp->string= my_strndup(ptr, (size_t)((script + length) - ptr),
MYF(MY_FAE));
tmp->length= (size_t)((script + length) - ptr);
DBUG_PRINT("info", (" Creating : %.*s\n", (uint)tmp->length, tmp->string));

View file

@ -638,7 +638,7 @@ static char *get_word(char **str)
DBUG_ENTER("get_word");
*str= find_end_of_word(start);
DBUG_RETURN(my_strdup_with_length(start, (uint) (*str - start),
DBUG_RETURN(my_strndup(start, (uint) (*str - start),
MYF(MY_WME | MY_FAE)));
}
@ -672,7 +672,7 @@ static struct message *parse_message_string(struct message *new_message,
while (*str != ' ' && *str != '\t' && *str)
str++;
if (!(new_message->lang_short_name=
my_strdup_with_length(start, (uint) (str - start),
my_strndup(start, (uint) (str - start),
MYF(MY_WME | MY_FAE))))
DBUG_RETURN(0); /* Fatal error */
DBUG_PRINT("info", ("msg_slang: %s", new_message->lang_short_name));
@ -692,7 +692,7 @@ static struct message *parse_message_string(struct message *new_message,
start= str + 1;
str= parse_text_line(start);
if (!(new_message->text= my_strdup_with_length(start, (uint) (str - start),
if (!(new_message->text= my_strndup(start, (uint) (str - start),
MYF(MY_WME | MY_FAE))))
DBUG_RETURN(0); /* Fatal error */
DBUG_PRINT("info", ("msg_text: %s", new_message->text));

View file

@ -136,7 +136,7 @@ extern int NEAR my_errno; /* Last error in mysys */
#define my_free(PTR,FLAG) _myfree((PTR), __FILE__, __LINE__,FLAG)
#define my_memdup(A,B,C) _my_memdup((A),(B), __FILE__,__LINE__,C)
#define my_strdup(A,C) _my_strdup((A), __FILE__,__LINE__,C)
#define my_strdup_with_length(A,B,C) _my_strdup_with_length((A),(B),__FILE__,__LINE__,C)
#define my_strndup(A,B,C) _my_strndup((A),(B),__FILE__,__LINE__,C)
#define TRASH(A,B) bfill(A, B, 0x8F)
#define QUICK_SAFEMALLOC sf_malloc_quick=1
#define NORMAL_SAFEMALLOC sf_malloc_quick=0
@ -158,7 +158,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags);
extern void my_no_flags_free(gptr ptr);
extern gptr my_memdup(const byte *from,uint length,myf MyFlags);
extern char *my_strdup(const char *from,myf MyFlags);
extern char *my_strdup_with_length(const byte *from, uint length,
extern char *my_strndup(const byte *from, uint length,
myf MyFlags);
/* we do use FG (as a no-op) in below so that a typo on FG is caught */
#define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR))
@ -597,7 +597,7 @@ extern gptr _my_memdup(const byte *from,uint length,
const char *sFile, uint uLine,myf MyFlag);
extern my_string _my_strdup(const char *from, const char *sFile, uint uLine,
myf MyFlag);
extern char *_my_strdup_with_length(const byte *from, uint length,
extern char *_my_strndup(const byte *from, uint length,
const char *sFile, uint uLine,
myf MyFlag);

View file

@ -112,6 +112,8 @@ ALTER TABLE t1 DROP PARTITION x1;
ALTER TABLE t1 DROP PARTITION x0;
ERROR HY000: Cannot remove all partitions, use DROP TABLE instead
DROP TABLE t1;
INSERT INTO t1 VALUES (15);
DROP TABLE t1;
CREATE TABLE t1 ( id INT NOT NULL,
fname VARCHAR(50) NOT NULL,
lname VARCHAR(50) NOT NULL,

View file

@ -29,3 +29,7 @@ ndb_autodiscover : Needs to be fixed w.r.t binlog
ndb_autodiscover2 : Needs to be fixed w.r.t binlog
system_mysql_db : Needs fixing
system_mysql_db_fix : Needs fixing
#ndb_alter_table_row : sometimes wrong error 1015!=1046
ndb_gis : garbled msgs from corrupt THD* + partitioning problem
# vim: set filetype=conf:

View file

@ -168,6 +168,23 @@ ALTER TABLE t1 DROP PARTITION x0;
DROP TABLE t1;
#
# BUG: 14354 Partitions: data directory clause fails
#
--exec rm -rf $MYSQL_TEST_DIR/bug14354
--exec mkdir $MYSQL_TEST_DIR/bug14354
disable_query_log;
eval CREATE TABLE t1 (id int) PARTITION BY RANGE(id) (
PARTITION p1 VALUES LESS THAN (20) ENGINE=myiasm
DATA DIRECTORY="$MYSQL_TEST_DIR/bug14354"
INDEX DIRECTORY="$MYSQL_TEST_DIR/bug14354");
enable_query_log;
INSERT INTO t1 VALUES (15);
--exec test -f $MYSQL_TEST_DIR/bug14354/t1#P#p1.MYD
--exec test -f $MYSQL_TEST_DIR/bug14354/t1#P#p1.MYI
DROP TABLE t1;
--exec rm -rf $MYSQL_TEST_DIR/bug14354
#
# Bug# 16534 - Trying to add multiple partitions crashes server
#

View file

@ -53,7 +53,7 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
end=strcend(pathlist, DELIM);
strmake(buff, pathlist, (uint) (end-pathlist));
length= cleanup_dirname(buff, buff);
if (!(copy= my_strdup_with_length(buff, length, MYF(MY_WME))) ||
if (!(copy= my_strndup(buff, length, MYF(MY_WME))) ||
insert_dynamic(&t_arr, (gptr) &copy))
DBUG_RETURN(TRUE);
pathlist=end+1;

View file

@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags)
}
char *my_strdup_with_length(const byte *from, uint length, myf my_flags)
char *my_strndup(const byte *from, uint length, myf my_flags)
{
gptr ptr;
if ((ptr=my_malloc(length+1,my_flags)) != 0)

View file

@ -525,7 +525,7 @@ char *_my_strdup(const char *from, const char *filename, uint lineno,
} /* _my_strdup */
char *_my_strdup_with_length(const byte *from, uint length,
char *_my_strndup(const byte *from, uint length,
const char *filename, uint lineno,
myf MyFlags)
{

View file

@ -647,7 +647,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
DBUG_PRINT("info", ("Length: %d", table->s->connect_string.length));
DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length,
table->s->connect_string.str));
share->scheme= my_strdup_with_length((const byte*)table->s->
share->scheme= my_strndup((const byte*)table->s->
connect_string.str,
table->s->connect_string.length,
MYF(0));

View file

@ -35,6 +35,11 @@
#include "ha_ndbcluster_binlog.h"
#ifdef ndb_dynamite
#undef assert
#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0)
#endif
// options from from mysqld.cc
extern my_bool opt_ndb_optimized_node_selection;
extern const char *opt_ndbcluster_connectstring;
@ -791,10 +796,20 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg)
if (ndb_blob->blobsNextBlob() != NULL)
DBUG_RETURN(0);
ha_ndbcluster *ha= (ha_ndbcluster *)arg;
DBUG_RETURN(ha->get_ndb_blobs_value(ndb_blob));
int ret= get_ndb_blobs_value(ha->table, ha->m_value,
ha->m_blobs_buffer, ha->m_blobs_buffer_size,
0);
DBUG_RETURN(ret);
}
int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
/*
This routine is shared by injector. There is no common blobs buffer
so the buffer and length are passed by reference. Injector also
passes a record pointer diff.
*/
int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
byte*& buffer, uint& buffer_size,
my_ptrdiff_t ptrdiff)
{
DBUG_ENTER("get_ndb_blobs_value");
@ -803,44 +818,51 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
for (int loop= 0; loop <= 1; loop++)
{
uint32 offset= 0;
for (uint i= 0; i < table_share->fields; i++)
for (uint i= 0; i < table->s->fields; i++)
{
Field *field= table->field[i];
NdbValue value= m_value[i];
NdbValue value= value_array[i];
if (value.ptr != NULL && (field->flags & BLOB_FLAG))
{
Field_blob *field_blob= (Field_blob *)field;
NdbBlob *ndb_blob= value.blob;
Uint64 blob_len= 0;
if (ndb_blob->getLength(blob_len) != 0)
DBUG_RETURN(-1);
// Align to Uint64
uint32 blob_size= blob_len;
if (blob_size % 8 != 0)
blob_size+= 8 - blob_size % 8;
if (loop == 1)
{
char *buf= m_blobs_buffer + offset;
uint32 len= 0xffffffff; // Max uint32
DBUG_PRINT("value", ("read blob ptr=%lx len=%u",
buf, (uint) blob_len));
if (ndb_blob->readData(buf, len) != 0)
int isNull;
ndb_blob->getDefined(isNull);
if (isNull == 0) { // XXX -1 should be allowed only for events
Uint64 blob_len= 0;
if (ndb_blob->getLength(blob_len) != 0)
DBUG_RETURN(-1);
DBUG_ASSERT(len == blob_len);
field_blob->set_ptr(len, buf);
// Align to Uint64
uint32 blob_size= blob_len;
if (blob_size % 8 != 0)
blob_size+= 8 - blob_size % 8;
if (loop == 1)
{
char *buf= buffer + offset;
uint32 len= 0xffffffff; // Max uint32
DBUG_PRINT("info", ("read blob ptr=%p len=%u",
buf, (uint) blob_len));
if (ndb_blob->readData(buf, len) != 0)
DBUG_RETURN(-1);
DBUG_ASSERT(len == blob_len);
// Ugly hack assumes only ptr needs to be changed
field_blob->ptr += ptrdiff;
field_blob->set_ptr(len, buf);
field_blob->ptr -= ptrdiff;
}
offset+= blob_size;
}
offset+= blob_size;
}
}
if (loop == 0 && offset > m_blobs_buffer_size)
if (loop == 0 && offset > buffer_size)
{
my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
m_blobs_buffer_size= 0;
DBUG_PRINT("value", ("allocate blobs buffer size %u", offset));
m_blobs_buffer= my_malloc(offset, MYF(MY_WME));
if (m_blobs_buffer == NULL)
my_free(buffer, MYF(MY_ALLOW_ZERO_PTR));
buffer_size= 0;
DBUG_PRINT("info", ("allocate blobs buffer size %u", offset));
buffer= my_malloc(offset, MYF(MY_WME));
if (buffer == NULL)
DBUG_RETURN(-1);
m_blobs_buffer_size= offset;
buffer_size= offset;
}
}
DBUG_RETURN(0);
@ -2713,14 +2735,22 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
else
{
NdbBlob *ndb_blob= (*value).blob;
bool isNull= TRUE;
#ifndef DBUG_OFF
int ret=
#endif
ndb_blob->getNull(isNull);
DBUG_ASSERT(ret == 0);
if (isNull)
field->set_null(row_offset);
int isNull;
ndb_blob->getDefined(isNull);
if (isNull != 0)
{
uint col_no = ndb_blob->getColumn()->getColumnNo();
if (isNull == 1)
{
DBUG_PRINT("info",("[%u] NULL", col_no))
field->set_null(row_offset);
}
else
{
DBUG_PRINT("info",("[%u] UNDEFINED", col_no));
bitmap_clear_bit(defined, col_no);
}
}
}
}
}
@ -4713,6 +4743,7 @@ int ha_ndbcluster::alter_table_name(const char *to)
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *orig_tab= (const NDBTAB *) m_table;
DBUG_ENTER("alter_table_name");
DBUG_PRINT("info", ("from: %s to: %s", orig_tab->getName(), to));
NdbDictionary::Table new_tab= *orig_tab;
new_tab.setName(to);

View file

@ -25,6 +25,9 @@
#pragma interface /* gcc class implementation */
#endif
/* Blob tables and events are internal to NDB and must never be accessed */
#define IS_NDB_BLOB_PREFIX(A) is_prefix(A, "NDB$BLOB")
#include <NdbApi.hpp>
#include <ndbapi_limits.h>
@ -78,6 +81,10 @@ typedef struct ndb_index_data {
typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
byte*& buffer, uint& buffer_size,
my_ptrdiff_t ptrdiff);
typedef enum {
NSS_INITIAL= 0,
NSS_DROPPED,
@ -114,6 +121,7 @@ typedef struct st_ndbcluster_share {
#ifdef HAVE_NDB_BINLOG
/* NDB_SHARE.flags */
#define NSF_HIDDEN_PK 1 /* table has hidden primary key */
#define NSF_BLOB_FLAG 2 /* table has blob attributes */
#define NSF_NO_BINLOG 4 /* table should not be binlogged */
#endif

View file

@ -23,6 +23,11 @@
#include "slave.h"
#include "ha_ndbcluster_binlog.h"
#ifdef ndb_dynamite
#undef assert
#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0)
#endif
/*
defines for cluster replication table names
*/
@ -237,6 +242,8 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
DBUG_ASSERT(_table != 0);
if (_table->s->primary_key == MAX_KEY)
share->flags|= NSF_HIDDEN_PK;
if (_table->s->blob_fields != 0)
share->flags|= NSF_BLOB_FLAG;
return;
}
while (1)
@ -316,6 +323,8 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
}
if (table->s->primary_key == MAX_KEY)
share->flags|= NSF_HIDDEN_PK;
if (table->s->blob_fields != 0)
share->flags|= NSF_BLOB_FLAG;
break;
}
}
@ -1622,6 +1631,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
NDB_SHARE *share)
{
DBUG_ENTER("ndbcluster_create_binlog_setup");
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(table_name));
pthread_mutex_lock(&ndbcluster_mutex);
@ -1713,6 +1723,10 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
const char *event_name, NDB_SHARE *share)
{
DBUG_ENTER("ndbcluster_create_event");
DBUG_PRINT("info", ("table=%s version=%d event=%s share=%s",
ndbtab->getName(), ndbtab->getObjectVersion(),
event_name, share ? share->key : "(nil)"));
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName()));
if (!share)
{
DBUG_PRINT("info", ("share == NULL"));
@ -1730,7 +1744,14 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
my_event.addTableEvent(NDBEVENT::TE_ALL);
if (share->flags & NSF_HIDDEN_PK)
{
/* No primary key, susbscribe for all attributes */
if (share->flags & NSF_BLOB_FLAG)
{
sql_print_error("NDB Binlog: logging of table %s "
"with no PK and blob attributes is not supported",
share->key);
DBUG_RETURN(-1);
}
/* No primary key, subscribe for all attributes */
my_event.setReport(NDBEVENT::ER_ALL);
DBUG_PRINT("info", ("subscription all"));
}
@ -1749,6 +1770,8 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
DBUG_PRINT("info", ("subscription all and subscribe"));
}
}
if (share->flags & NSF_BLOB_FLAG)
my_event.mergeEvents(true);
/* add all columns to the event */
int n_cols= ndbtab->getNoOfColumns();
@ -1837,6 +1860,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
*/
DBUG_ENTER("ndbcluster_create_event_ops");
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName()));
DBUG_ASSERT(share != 0);
@ -1857,22 +1881,6 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
}
TABLE *table= share->table;
if (table)
{
/*
Logging of blob tables is not yet implemented, it would require:
1. setup of events also on the blob attribute tables
2. collect the pieces of the blob into one from an epoch to
provide a full blob to binlog
*/
if (table->s->blob_fields)
{
sql_print_error("NDB Binlog: logging of blob table %s "
"is not supported", share->key);
share->flags|= NSF_NO_BINLOG;
DBUG_RETURN(0);
}
}
int do_schema_share= 0, do_apply_status_share= 0;
int retries= 100;
@ -1910,37 +1918,64 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
DBUG_RETURN(-1);
}
if (share->flags & NSF_BLOB_FLAG)
op->mergeEvents(true); // currently not inherited from event
if (share->flags & NSF_BLOB_FLAG)
{
/*
* Given servers S1 S2, following results in out-of-date
* event->m_tableImpl and column->m_blobTable.
*
* S1: create table t1(a int primary key);
* S2: drop table t1;
* S1: create table t2(a int primary key, b blob);
* S1: alter table t2 add x int;
* S1: alter table t2 drop x;
*
* TODO fix at right place before we get here
*/
ndb->getDictionary()->fix_blob_events(ndbtab, event_name);
}
int n_columns= ndbtab->getNoOfColumns();
int n_fields= table ? table->s->fields : 0;
int n_fields= table ? table->s->fields : 0; // XXX ???
for (int j= 0; j < n_columns; j++)
{
const char *col_name= ndbtab->getColumn(j)->getName();
NdbRecAttr *attr0, *attr1;
NdbValue attr0, attr1;
if (j < n_fields)
{
Field *f= share->table->field[j];
if (is_ndb_compatible_type(f))
{
DBUG_PRINT("info", ("%s compatible", col_name));
attr0= op->getValue(col_name, f->ptr);
attr1= op->getPreValue(col_name, (f->ptr-share->table->record[0]) +
attr0.rec= op->getValue(col_name, f->ptr);
attr1.rec= op->getPreValue(col_name,
(f->ptr - share->table->record[0]) +
share->table->record[1]);
}
else if (! (f->flags & BLOB_FLAG))
{
DBUG_PRINT("info", ("%s non compatible", col_name));
attr0.rec= op->getValue(col_name);
attr1.rec= op->getPreValue(col_name);
}
else
{
DBUG_PRINT("info", ("%s non compatible", col_name));
attr0= op->getValue(col_name);
attr1= op->getPreValue(col_name);
DBUG_PRINT("info", ("%s blob", col_name));
attr0.blob= op->getBlobHandle(col_name);
attr1.blob= op->getPreBlobHandle(col_name);
}
}
else
{
DBUG_PRINT("info", ("%s hidden key", col_name));
attr0= op->getValue(col_name);
attr1= op->getPreValue(col_name);
attr0.rec= op->getValue(col_name);
attr1.rec= op->getPreValue(col_name);
}
share->ndb_value[0][j].rec= attr0;
share->ndb_value[1][j].rec= attr1;
share->ndb_value[0][j].ptr= attr0.ptr;
share->ndb_value[1][j].ptr= attr1.ptr;
}
op->setCustomData((void *) share); // set before execute
share->op= op; // assign op in NDB_SHARE
@ -2229,12 +2264,27 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
(saves moving data about many times)
*/
/*
for now malloc/free blobs buffer each time
TODO if possible share single permanent buffer with handlers
*/
byte* blobs_buffer[2] = { 0, 0 };
uint blobs_buffer_size[2] = { 0, 0 };
switch(pOp->getEventType())
{
case NDBEVENT::TE_INSERT:
row.n_inserts++;
DBUG_PRINT("info", ("INSERT INTO %s", share->key));
{
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= 0;
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0], blobs_buffer_size[0],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
trans.write_row(::server_id, injector::transaction::table(table, true),
&b, n_fields, table->record[0]);
@ -2261,6 +2311,14 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
key
*/
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= table->record[n] - table->record[0];
int ret= get_ndb_blobs_value(table, share->ndb_value[n],
blobs_buffer[n], blobs_buffer_size[n],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
print_records(table, table->record[n]);
trans.delete_row(::server_id, injector::transaction::table(table, true),
@ -2271,13 +2329,21 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
row.n_updates++;
DBUG_PRINT("info", ("UPDATE %s", share->key));
{
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= 0;
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0], blobs_buffer_size[0],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[0],
&b, table->record[0]);
print_records(table, table->record[0]);
if (table->s->primary_key != MAX_KEY)
{
/*
since table has a primary key, we can to a write
since table has a primary key, we can do a write
using only after values
*/
trans.write_row(::server_id, injector::transaction::table(table, true),
@ -2289,6 +2355,14 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
mysql server cannot handle the ndb hidden key and
therefore needs the before image as well
*/
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= table->record[1] - table->record[0];
int ret= get_ndb_blobs_value(table, share->ndb_value[1],
blobs_buffer[1], blobs_buffer_size[1],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
print_records(table, table->record[1]);
trans.update_row(::server_id,
@ -2305,6 +2379,12 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
break;
}
if (share->flags & NSF_BLOB_FLAG)
{
my_free(blobs_buffer[0], MYF(MY_ALLOW_ZERO_PTR));
my_free(blobs_buffer[1], MYF(MY_ALLOW_ZERO_PTR));
}
return 0;
}
@ -2544,6 +2624,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Binlog_index_row row;
while (pOp != NULL)
{
// sometimes get TE_ALTER with invalid table
DBUG_ASSERT(pOp->getEventType() == NdbDictionary::Event::TE_ALTER ||
! IS_NDB_BLOB_PREFIX(pOp->getTable()->getName()));
ndb->
setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
@ -2684,6 +2767,7 @@ err:
DBUG_PRINT("info",("removing all event operations"));
while ((op= ndb->getEventOperation()))
{
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getTable()->getName()));
DBUG_PRINT("info",("removing event operation on %s",
op->getEvent()->getName()));
NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();

View file

@ -1696,7 +1696,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
error= (*file)->delete_table((const char*) from_buff);
else
{
set_up_table_before_create(table_arg, create_info, i);
set_up_table_before_create(table_arg, from_buff, create_info, i);
error= (*file)->create(from_buff, table_arg, create_info);
}
name_buffer_ptr= strend(name_buffer_ptr) + 1;
@ -1770,8 +1770,9 @@ partition_element *ha_partition::find_partition_element(uint part_id)
*/
void ha_partition::set_up_table_before_create(TABLE *table,
HA_CREATE_INFO *info,
uint part_id)
const char *partition_name_with_path,
HA_CREATE_INFO *info,
uint part_id)
{
partition_element *part_elem= find_partition_element(part_id);
@ -1779,6 +1780,15 @@ void ha_partition::set_up_table_before_create(TABLE *table,
return; // Fatal error
table->s->max_rows= part_elem->part_max_rows;
table->s->min_rows= part_elem->part_min_rows;
char *partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
if (part_elem->index_file_name)
append_file_to_dir(current_thd,
(const char**)&part_elem->index_file_name,
partition_name+1);
if (part_elem->data_file_name)
append_file_to_dir(current_thd,
(const char**)&part_elem->data_file_name,
partition_name+1);
info->index_file_name= part_elem->index_file_name;
info->data_file_name= part_elem->data_file_name;
}

View file

@ -220,8 +220,10 @@ private:
bool new_handlers_from_part_info();
bool create_handlers();
void clear_handler_file();
void set_up_table_before_create(TABLE * table_arg, HA_CREATE_INFO * info,
uint part_id);
void set_up_table_before_create(TABLE *table_arg,
const char *partition_name_with_path,
HA_CREATE_INFO *info,
uint part_id);
partition_element *find_partition_element(uint part_id);
public:

View file

@ -737,17 +737,23 @@ typedef uint32 (*partition_iter_func)(st_partition_iter* part_iter);
typedef struct st_partition_iter
{
partition_iter_func get_next;
struct st_part_num_range
{
uint32 start;
uint32 end;
};
union {
struct {
uint32 start_part_num;
uint32 end_part_num;
};
struct {
longlong start_val;
longlong end_val;
};
bool null_returned;
struct st_field_value_range
{
longlong start;
longlong end;
};
union
{
struct st_part_num_range part_nums;
struct st_field_value_range field_vals;
};
partition_info *part_info;
} PARTITION_ITERATOR;
@ -1004,8 +1010,8 @@ uint32 get_next_partition_id_range(struct st_partition_iter* part_iter);
inline void init_single_partition_iterator(uint32 part_id,
PARTITION_ITERATOR *part_iter)
{
part_iter->start_part_num= part_id;
part_iter->end_part_num= part_id+1;
part_iter->part_nums.start= part_id;
part_iter->part_nums.end= part_id+1;
part_iter->get_next= get_next_partition_id_range;
}
@ -1013,8 +1019,8 @@ inline
void init_all_partitions_iterator(partition_info *part_info,
PARTITION_ITERATOR *part_iter)
{
part_iter->start_part_num= 0;
part_iter->end_part_num= part_info->no_parts;
part_iter->part_nums.start= 0;
part_iter->part_nums.end= part_info->no_parts;
part_iter->get_next= get_next_partition_id_range;
}

View file

@ -3138,7 +3138,7 @@ Rotate_log_event::Rotate_log_event(const char* new_log_ident_arg,
llstr(pos_arg, buff), flags));
#endif
if (flags & DUP_NAME)
new_log_ident= my_strdup_with_length((const byte*) new_log_ident_arg,
new_log_ident= my_strndup((const byte*) new_log_ident_arg,
ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
@ -3162,7 +3162,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len,
(header_size+post_header_len));
ident_offset = post_header_len;
set_if_smaller(ident_len,FN_REFLEN-1);
new_log_ident= my_strdup_with_length((byte*) buf + ident_offset,
new_log_ident= my_strndup((byte*) buf + ident_offset,
(uint) ident_len,
MYF(MY_WME));
DBUG_VOID_RETURN;

View file

@ -649,6 +649,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char* packet, uint packet_length);
void log_slow_statement(THD *thd);
bool check_dup(const char *db, const char *name, TABLE_LIST *tables);
bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name);
bool table_cache_init(void);
void table_cache_free(void);

View file

@ -1021,7 +1021,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
uint new_length= (var ? var->value->str_value.length() : 0);
if (!old_value)
old_value= (char*) "";
if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0))))
if (!(res= my_strndup((byte*)old_value, new_length, MYF(0))))
return 1;
/*
Replace the old value in such a way that the any thread using

View file

@ -926,7 +926,7 @@ public:
uint name_length_arg, gptr data_arg)
:name_length(name_length_arg), data(data_arg)
{
name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME));
name= my_strndup((byte*) name_arg, name_length, MYF(MY_WME));
links->push_back(this);
}
inline bool cmp(const char *name_cmp, uint length)

View file

@ -68,8 +68,6 @@ static bool check_db_used(THD *thd,TABLE_LIST *tables);
static bool check_multi_update_lock(THD *thd);
static void remove_escape(char *name);
static void refresh_status(THD *thd);
static bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name);
const char *any_db="*any*"; // Special symbol for check_access
@ -6729,8 +6727,8 @@ static void refresh_status(THD *thd)
/* If pointer is not a null pointer, append filename to it */
static bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name)
bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name)
{
char buff[FN_REFLEN],*ptr, *end;
if (!*filename_ptr)

View file

@ -2243,6 +2243,7 @@ static int add_int(File fptr, longlong number)
}
static int add_keyword_string(File fptr, const char *keyword,
bool should_use_quotes,
const char *keystr)
{
int err= add_string(fptr, keyword);
@ -2250,7 +2251,11 @@ static int add_keyword_string(File fptr, const char *keyword,
err+= add_space(fptr);
err+= add_equal(fptr);
err+= add_space(fptr);
if (should_use_quotes)
err+= add_string(fptr, "'");
err+= add_string(fptr, keystr);
if (should_use_quotes)
err+= add_string(fptr, "'");
return err + add_space(fptr);
}
@ -2278,7 +2283,8 @@ static int add_partition_options(File fptr, partition_element *p_elem)
int err= 0;
if (p_elem->tablespace_name)
err+= add_keyword_string(fptr,"TABLESPACE",p_elem->tablespace_name);
err+= add_keyword_string(fptr,"TABLESPACE", FALSE,
p_elem->tablespace_name);
if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
err+= add_keyword_int(fptr,"NODEGROUP",(longlong)p_elem->nodegroup_id);
if (p_elem->part_max_rows)
@ -2286,11 +2292,13 @@ static int add_partition_options(File fptr, partition_element *p_elem)
if (p_elem->part_min_rows)
err+= add_keyword_int(fptr,"MIN_ROWS",(longlong)p_elem->part_min_rows);
if (p_elem->data_file_name)
err+= add_keyword_string(fptr,"DATA DIRECTORY",p_elem->data_file_name);
err+= add_keyword_string(fptr, "DATA DIRECTORY", TRUE,
p_elem->data_file_name);
if (p_elem->index_file_name)
err+= add_keyword_string(fptr,"INDEX DIRECTORY",p_elem->index_file_name);
err+= add_keyword_string(fptr, "INDEX DIRECTORY", TRUE,
p_elem->index_file_name);
if (p_elem->part_comment)
err+= add_keyword_string(fptr, "COMMENT",p_elem->part_comment);
err+= add_keyword_string(fptr, "COMMENT", FALSE, p_elem->part_comment);
return err + add_engine(fptr,p_elem->engine_type);
}
@ -5751,7 +5759,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
/* Find minimum */
if (flags & NO_MIN_RANGE)
part_iter->start_part_num= 0;
part_iter->part_nums.start= 0;
else
{
/*
@ -5763,21 +5771,21 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
store_key_image_to_rec(field, min_value, field_len);
bool include_endp= part_info->range_analysis_include_bounds ||
!test(flags & NEAR_MIN);
part_iter->start_part_num= get_endpoint(part_info, 1, include_endp);
if (part_iter->start_part_num == max_endpoint_val)
part_iter->part_nums.start= get_endpoint(part_info, 1, include_endp);
if (part_iter->part_nums.start == max_endpoint_val)
return 0; /* No partitions */
}
/* Find maximum, do the same as above but for right interval bound */
if (flags & NO_MAX_RANGE)
part_iter->end_part_num= max_endpoint_val;
part_iter->part_nums.end= max_endpoint_val;
else
{
store_key_image_to_rec(field, max_value, field_len);
bool include_endp= part_info->range_analysis_include_bounds ||
!test(flags & NEAR_MAX);
part_iter->end_part_num= get_endpoint(part_info, 0, include_endp);
if (part_iter->start_part_num == part_iter->end_part_num)
part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp);
if (part_iter->part_nums.start== part_iter->part_nums.end)
return 0; /* No partitions */
}
return 1; /* Ok, iterator initialized */
@ -5907,8 +5915,8 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
if (n_values > total_parts || n_values > MAX_RANGE_TO_WALK)
return -1;
part_iter->start_val= a;
part_iter->end_val= b;
part_iter->field_vals.start= a;
part_iter->field_vals.end= b;
part_iter->part_info= part_info;
part_iter->get_next= get_next_func;
return 1;
@ -5933,10 +5941,10 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
{
if (part_iter->start_part_num == part_iter->end_part_num)
if (part_iter->part_nums.start== part_iter->part_nums.end)
return NOT_A_PARTITION_ID;
else
return part_iter->start_part_num++;
return part_iter->part_nums.start++;
}
@ -5959,11 +5967,11 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
{
if (part_iter->start_part_num == part_iter->end_part_num)
if (part_iter->part_nums.start == part_iter->part_nums.end)
return NOT_A_PARTITION_ID;
else
return part_iter->part_info->list_array[part_iter->
start_part_num++].partition_id;
part_nums.start++].partition_id;
}
@ -5988,10 +5996,10 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
{
uint32 part_id;
Field *field= part_iter->part_info->part_field_array[0];
while (part_iter->start_val != part_iter->end_val)
while (part_iter->field_vals.start != part_iter->field_vals.end)
{
field->store(part_iter->start_val, FALSE);
part_iter->start_val++;
field->store(part_iter->field_vals.start, FALSE);
part_iter->field_vals.start++;
longlong dummy;
if (!part_iter->part_info->get_partition_id(part_iter->part_info,
&part_id, &dummy))
@ -6007,10 +6015,10 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
{
uint32 part_id;
Field *field= part_iter->part_info->subpart_field_array[0];
if (part_iter->start_val == part_iter->end_val)
if (part_iter->field_vals.start == part_iter->field_vals.end)
return NOT_A_PARTITION_ID;
field->store(part_iter->start_val, FALSE);
part_iter->start_val++;
field->store(part_iter->field_vals.start, FALSE);
part_iter->field_vals.start++;
return part_iter->part_info->get_subpartition_id(part_iter->part_info);
}
#endif

View file

@ -83,7 +83,7 @@ int ft_init_stopwords()
end=start+len;
while (ft_simple_get_word(default_charset_info, &start, end, &w, TRUE))
{
if (ft_add_stopword(my_strdup_with_length(w.pos, w.len, MYF(0))))
if (ft_add_stopword(my_strndup(w.pos, w.len, MYF(0))))
goto err1;
}
error=0;

View file

@ -32,7 +32,7 @@ struct AllocExtentReq {
enum ErrorCode {
UnmappedExtentPageIsNotImplemented = 1,
NoExtentAvailable = 2
NoExtentAvailable = 1601
};
union

View file

@ -49,9 +49,9 @@ public:
GET_SET_SENDERREF
GET_SET_SENDERDATA
void setPrepareId(Uint32 pId) { prepareId = pId; }; // !! unsets release flag
Uint32 getPrepareId() { return prepareId & 0xFF; };
Uint32 getPrepareId() const { return prepareId & 0xFF; };
void setReleaseFlag() { prepareId |= 0x100; };
bool getReleaseFlag() { return (prepareId & 0x100) != 0; };
bool getReleaseFlag() const { return (prepareId & 0x100) != 0; };
private:
Uint32 senderData; // MUST be no 1!
Uint32 senderRef;
@ -117,7 +117,6 @@ public:
IllegalKeyNumber = 1,
IllegalAttrNumber = 2,
TCError = 3,
IllegalPrepareId = 4,
AllocationError = 5,
MissingDataSection = 6,
MissingData = 7

View file

@ -28,6 +28,7 @@ class NdbOperation;
class NdbRecAttr;
class NdbTableImpl;
class NdbColumnImpl;
class NdbEventOperationImpl;
/**
* @class NdbBlob
@ -71,6 +72,10 @@ class NdbColumnImpl;
* writes. It avoids execute penalty if nothing is pending. It is not
* needed after execute (obviously) or after next scan result.
*
* NdbBlob also supports reading post or pre blob data from events. The
* handle can be read after next event on main table has been retrieved.
* The data is available immediately. See NdbEventOperation.
*
* NdbBlob methods return -1 on error and 0 on success, and use output
* parameters when necessary.
*
@ -145,6 +150,12 @@ public:
* then the callback is invoked.
*/
int setActiveHook(ActiveHook* activeHook, void* arg);
/**
* Check if blob value is defined (NULL or not). Used as first call
* on event based blob. The argument is set to -1 for not defined.
* Unlike getNull() this does not cause error on the handle.
*/
int getDefined(int& isNull);
/**
* Check if blob is null.
*/
@ -191,6 +202,11 @@ public:
* Get blob parts table name. Useful only to test programs.
*/
static int getBlobTableName(char* btname, Ndb* anNdb, const char* tableName, const char* columnName);
/**
* Get blob event name. The blob event is created if the main event
* monitors the blob column. The name includes main event name.
*/
static int getBlobEventName(char* bename, Ndb* anNdb, const char* eventName, const char* columnName);
/**
* Return error object. The error may be blob specific (below) or may
* be copied from a failed implicit operation.
@ -217,17 +233,29 @@ private:
friend class NdbScanOperation;
friend class NdbDictionaryImpl;
friend class NdbResultSet; // atNextResult
friend class NdbEventBuffer;
friend class NdbEventOperationImpl;
#endif
// state
State theState;
void setState(State newState);
// quick and dirty support for events (consider subclassing)
int theEventBlobVersion; // -1=normal blob 0=post event 1=pre event
// define blob table
static void getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnImpl* c);
static void getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c);
static void getBlobEventName(char* bename, const NdbEventImpl* e, const NdbColumnImpl* c);
static void getBlobEvent(NdbEventImpl& be, const NdbEventImpl* e, const NdbColumnImpl* c);
// ndb api stuff
Ndb* theNdb;
NdbTransaction* theNdbCon;
NdbOperation* theNdbOp;
NdbEventOperationImpl* theEventOp;
NdbEventOperationImpl* theBlobEventOp;
NdbRecAttr* theBlobEventPkRecAttr;
NdbRecAttr* theBlobEventDistRecAttr;
NdbRecAttr* theBlobEventPartRecAttr;
NdbRecAttr* theBlobEventDataRecAttr;
const NdbTableImpl* theTable;
const NdbTableImpl* theAccessTable;
const NdbTableImpl* theBlobTable;
@ -263,6 +291,8 @@ private:
Buf theHeadInlineBuf;
Buf theHeadInlineCopyBuf; // for writeTuple
Buf thePartBuf;
Buf theBlobEventDataBuf;
Uint32 thePartNumber; // for event
Head* theHead;
char* theInlineData;
NdbRecAttr* theHeadInlineRecAttr;
@ -306,6 +336,8 @@ private:
int readDataPrivate(char* buf, Uint32& bytes);
int writeDataPrivate(const char* buf, Uint32 bytes);
int readParts(char* buf, Uint32 part, Uint32 count);
int readTableParts(char* buf, Uint32 part, Uint32 count);
int readEventParts(char* buf, Uint32 part, Uint32 count);
int insertParts(const char* buf, Uint32 part, Uint32 count);
int updateParts(const char* buf, Uint32 part, Uint32 count);
int deleteParts(Uint32 part, Uint32 count);
@ -317,19 +349,23 @@ private:
int invokeActiveHook();
// blob handle maintenance
int atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn);
int atPrepare(NdbEventOperationImpl* anOp, NdbEventOperationImpl* aBlobOp, const NdbColumnImpl* aColumn, int version);
int prepareColumn();
int preExecute(NdbTransaction::ExecType anExecType, bool& batch);
int postExecute(NdbTransaction::ExecType anExecType);
int preCommit();
int atNextResult();
int atNextEvent();
// errors
void setErrorCode(int anErrorCode, bool invalidFlag = true);
void setErrorCode(NdbOperation* anOp, bool invalidFlag = true);
void setErrorCode(NdbTransaction* aCon, bool invalidFlag = true);
void setErrorCode(NdbEventOperationImpl* anOp, bool invalidFlag = true);
#ifdef VM_TRACE
int getOperationType() const;
friend class NdbOut& operator<<(NdbOut&, const NdbBlob&);
#endif
// list stuff
void next(NdbBlob* obj) { theNext= obj;}
NdbBlob* next() { return theNext;}
friend struct Ndb_free_list_t<NdbBlob>;

View file

@ -883,6 +883,7 @@ public:
private:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbDictionaryImpl;
friend class NdbTableImpl;
#endif
class NdbTableImpl & m_impl;
@ -1124,7 +1125,7 @@ public:
_TE_NODE_FAILURE=10,
_TE_SUBSCRIBE=11,
_TE_UNSUBSCRIBE=12,
_TE_NUL=13 // internal (INS o DEL within same GCI)
_TE_NUL=13 // internal (e.g. INS o DEL within same GCI)
};
#endif
/**
@ -1261,6 +1262,24 @@ public:
*/
int getNoOfEventColumns() const;
/**
* The merge events flag is false by default. Setting it true
* implies that events are merged in following ways:
*
* - for given NdbEventOperation associated with this event,
* events on same PK within same GCI are merged into single event
*
* - a blob table event is created for each blob attribute
* and blob events are handled as part of main table events
*
* - blob post/pre data from the blob part events can be read
* via NdbBlob methods as a single value
*
* NOTE: Currently this flag is not inherited by NdbEventOperation
* and must be set on NdbEventOperation explicitly.
*/
void mergeEvents(bool flag);
/**
* Get object status
*/
@ -1746,6 +1765,7 @@ public:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
const Table * getTable(const char * name, void **data) const;
void set_local_table_data_size(unsigned sz);
void fix_blob_events(const Table* table, const char* ev_name);
#endif
};
};

View file

@ -150,6 +150,14 @@ public:
*/
NdbRecAttr *getPreValue(const char *anAttrName, char *aValue = 0);
/**
* These methods replace getValue/getPreValue for blobs. Each
* method creates a blob handle NdbBlob. The handle supports only
* read operations. See NdbBlob.
*/
NdbBlob* getBlobHandle(const char *anAttrName);
NdbBlob* getPreBlobHandle(const char *anAttrName);
int isOverrun() const;
/**

View file

@ -4,7 +4,7 @@ OBJS = ndbapi_event.o
CXX = g++ -g
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
DEBUG =
DEBUG =# -DVM_TRACE
LFLAGS = -Wall
TOP_SRCDIR = ../../../..
INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include
@ -16,8 +16,8 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
$(TARGET).o: $(SRCS)
$(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi -I$(TOP_SRCDIR)/include $(SRCS)
$(TARGET).o: $(SRCS) Makefile
$(CXX) $(CFLAGS) $(DEBUG) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi -I$(TOP_SRCDIR)/include $(SRCS)
clean:
rm -f *.o $(TARGET)

View file

@ -54,26 +54,32 @@
#include <stdio.h>
#include <iostream>
#include <unistd.h>
#ifdef VM_TRACE
#include <my_global.h>
#endif
#ifndef assert
#include <assert.h>
#endif
/**
*
* Assume that there is a table t0 which is being updated by
* Assume that there is a table which is being updated by
* another process (e.g. flexBench -l 0 -stdtables).
* We want to monitor what happens with columns c0,c1,c2,c3.
* We want to monitor what happens with column values.
*
* or together with the mysql client;
* Or using the mysql client:
*
* shell> mysql -u root
* mysql> create database TEST_DB;
* mysql> use TEST_DB;
* mysql> create table t0 (c0 int, c1 int, c2 char(4), c3 char(4),
* mysql> create table t0
* (c0 int, c1 int, c2 char(4), c3 char(4), c4 text,
* primary key(c0, c2)) engine ndb charset latin1;
*
* In another window start ndbapi_event, wait until properly started
*
insert into t0 values (1, 2, 'a', 'b');
insert into t0 values (3, 4, 'c', 'd');
insert into t0 values (1, 2, 'a', 'b', null);
insert into t0 values (3, 4, 'c', 'd', null);
update t0 set c3 = 'e' where c0 = 1 and c2 = 'a'; -- use pk
update t0 set c3 = 'f'; -- use scan
update t0 set c3 = 'F'; -- use scan update to 'same'
@ -81,7 +87,18 @@
update t0 set c2 = 'G' where c0 = 1; -- update pk part to 'same'
update t0 set c0 = 5, c2 = 'H' where c0 = 3; -- update full PK
delete from t0;
*
insert ...; update ...; -- see events w/ same pk merged (if -m option)
delete ...; insert ...; -- there are 5 combinations ID IU DI UD UU
update ...; update ...;
-- text requires -m flag
set @a = repeat('a',256); -- inline size
set @b = repeat('b',2000); -- part size
set @c = repeat('c',2000*30); -- 30 parts
-- update the text field using combinations of @a, @b, @c ...
* you should see the data popping up in the example window
*
*/
@ -95,12 +112,18 @@ int myCreateEvent(Ndb* myNdb,
const char *eventName,
const char *eventTableName,
const char **eventColumnName,
const int noEventColumnName);
const int noEventColumnName,
bool merge_events);
int main(int argc, char** argv)
{
ndb_init();
bool merge_events = argc > 1 && strcmp(argv[1], "-m") == 0;
bool merge_events = argc > 1 && strchr(argv[1], 'm') != 0;
#ifdef VM_TRACE
bool dbug = argc > 1 && strchr(argv[1], 'd') != 0;
if (dbug) DBUG_PUSH("d:t:");
if (dbug) putenv("API_SIGNAL_LOG=-");
#endif
Ndb_cluster_connection *cluster_connection=
new Ndb_cluster_connection(); // Object representing the cluster
@ -134,12 +157,13 @@ int main(int argc, char** argv)
const char *eventName= "CHNG_IN_t0";
const char *eventTableName= "t0";
const int noEventColumnName= 4;
const int noEventColumnName= 5;
const char *eventColumnName[noEventColumnName]=
{"c0",
"c1",
"c2",
"c3"
"c3",
"c4"
};
// Create events
@ -147,9 +171,14 @@ int main(int argc, char** argv)
eventName,
eventTableName,
eventColumnName,
noEventColumnName);
noEventColumnName,
merge_events);
int j= 0;
// Normal values and blobs are unfortunately handled differently..
typedef union { NdbRecAttr* ra; NdbBlob* bh; } RA_BH;
int i, j, k, l;
j = 0;
while (j < 99) {
// Start "transaction" for handling events
@ -160,12 +189,17 @@ int main(int argc, char** argv)
op->mergeEvents(merge_events);
printf("get values\n");
NdbRecAttr* recAttr[noEventColumnName];
NdbRecAttr* recAttrPre[noEventColumnName];
RA_BH recAttr[noEventColumnName];
RA_BH recAttrPre[noEventColumnName];
// primary keys should always be a part of the result
for (int i = 0; i < noEventColumnName; i++) {
recAttr[i] = op->getValue(eventColumnName[i]);
recAttrPre[i] = op->getPreValue(eventColumnName[i]);
for (i = 0; i < noEventColumnName; i++) {
if (i < 4) {
recAttr[i].ra = op->getValue(eventColumnName[i]);
recAttrPre[i].ra = op->getPreValue(eventColumnName[i]);
} else if (merge_events) {
recAttr[i].bh = op->getBlobHandle(eventColumnName[i]);
recAttrPre[i].bh = op->getPreBlobHandle(eventColumnName[i]);
}
}
// set up the callbacks
@ -174,13 +208,16 @@ int main(int argc, char** argv)
if (op->execute())
APIERROR(op->getNdbError());
int i= 0;
while(i < 40) {
NdbEventOperation* the_op = op;
i= 0;
while (i < 40) {
// printf("now waiting for event...\n");
int r= myNdb->pollEvents(1000); // wait for event or 1000 ms
int r = myNdb->pollEvents(1000); // wait for event or 1000 ms
if (r > 0) {
// printf("got data! %d\n", r);
while ((op= myNdb->nextEvent())) {
assert(the_op == op);
i++;
switch (op->getEventType()) {
case NdbDictionary::Event::TE_INSERT:
@ -195,40 +232,66 @@ int main(int argc, char** argv)
default:
abort(); // should not happen
}
printf(" gci=%d\n", op->getGCI());
printf("post: ");
for (int i = 0; i < noEventColumnName; i++) {
if (recAttr[i]->isNULL() >= 0) { // we have a value
if (recAttr[i]->isNULL() == 0) { // we have a non-null value
if (i < 2)
printf("%-5u", recAttr[i]->u_32_value());
else
printf("%-5.4s", recAttr[i]->aRef());
} else // we have a null value
printf("%-5s", "NULL");
} else
printf("%-5s", "-");
printf(" gci=%d\n", (int)op->getGCI());
for (k = 0; k <= 1; k++) {
printf(k == 0 ? "post: " : "pre : ");
for (l = 0; l < noEventColumnName; l++) {
if (l < 4) {
NdbRecAttr* ra = k == 0 ? recAttr[l].ra : recAttrPre[l].ra;
if (ra->isNULL() >= 0) { // we have a value
if (ra->isNULL() == 0) { // we have a non-null value
if (l < 2)
printf("%-5u", ra->u_32_value());
else
printf("%-5.4s", ra->aRef());
} else
printf("%-5s", "NULL");
} else
printf("%-5s", "-"); // no value
} else if (merge_events) {
int isNull;
NdbBlob* bh = k == 0 ? recAttr[l].bh : recAttrPre[l].bh;
bh->getDefined(isNull);
if (isNull >= 0) { // we have a value
if (! isNull) { // we have a non-null value
Uint64 length = 0;
bh->getLength(length);
// read into buffer
unsigned char* buf = new unsigned char [length];
memset(buf, 'X', length);
Uint32 n = length;
bh->readData(buf, n); // n is in/out
assert(n == length);
// pretty-print
bool first = true;
Uint32 i = 0;
while (i < n) {
unsigned char c = buf[i++];
Uint32 m = 1;
while (i < n && buf[i] == c)
i++, m++;
if (! first)
printf("+");
printf("%u%c", m, c);
first = false;
}
printf("[%u]", n);
delete [] buf;
} else
printf("%-5s", "NULL");
} else
printf("%-5s", "-"); // no value
}
}
printf("\n");
}
printf("\npre : ");
for (int i = 0; i < noEventColumnName; i++) {
if (recAttrPre[i]->isNULL() >= 0) { // we have a value
if (recAttrPre[i]->isNULL() == 0) { // we have a non-null value
if (i < 2)
printf("%-5u", recAttrPre[i]->u_32_value());
else
printf("%-5.4s", recAttrPre[i]->aRef());
} else // we have a null value
printf("%-5s", "NULL");
} else
printf("%-5s", "-");
}
printf("\n");
}
} else
;//printf("timed out\n");
}
// don't want to listen to events anymore
if (myNdb->dropEventOperation(op)) APIERROR(myNdb->getNdbError());
if (myNdb->dropEventOperation(the_op)) APIERROR(myNdb->getNdbError());
the_op = 0;
j++;
}
@ -250,7 +313,8 @@ int myCreateEvent(Ndb* myNdb,
const char *eventName,
const char *eventTableName,
const char **eventColumnNames,
const int noEventColumnNames)
const int noEventColumnNames,
bool merge_events)
{
NdbDictionary::Dictionary *myDict= myNdb->getDictionary();
if (!myDict) APIERROR(myNdb->getNdbError());
@ -265,6 +329,7 @@ int myCreateEvent(Ndb* myNdb,
// myEvent.addTableEvent(NdbDictionary::Event::TE_DELETE);
myEvent.addEventColumns(noEventColumnNames, eventColumnNames);
myEvent.mergeEvents(merge_events);
// Add event to database
if (myDict->createEvent(myEvent) == 0)

View file

@ -20,10 +20,12 @@ bool
printUTIL_EXECUTE_REQ(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec)
{
const UtilExecuteReq* const sig = (UtilExecuteReq*)data;
fprintf(out, " senderRef: H'%.8x, senderData: H'%.8x prepareId: %d\n",
fprintf(out, " senderRef: H'%.8x, senderData: H'%.8x prepareId: %d "
" releaseFlag: %d\n",
sig->senderRef,
sig->senderData,
sig->prepareId);
sig->getPrepareId(),
sig->getReleaseFlag());
return true;
}
@ -48,8 +50,6 @@ printUTIL_EXECUTE_REF(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec)
"IllegalAttrNumber" :
sig->errorCode == UtilExecuteRef::TCError ?
"TCError" :
sig->errorCode == UtilExecuteRef::IllegalPrepareId ?
"IllegalPrepareId" :
sig->errorCode == UtilExecuteRef::AllocationError ?
"AllocationError" :
"Unknown");

View file

@ -1601,6 +1601,8 @@ private:
* Reply from nodeId
*/
void startInfoReply(Signal *, Uint32 nodeId);
void dump_replica_info();
};
#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)

View file

@ -8925,6 +8925,80 @@ void Dbdih::packFragIntoPagesLab(Signal* signal, RWFragment* wf)
/*****************************************************************************/
/* ********** START FRAGMENT MODULE *************/
/*****************************************************************************/
void
Dbdih::dump_replica_info()
{
TabRecordPtr tabPtr;
FragmentstorePtr fragPtr;
for(tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++)
{
ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
continue;
for(Uint32 fid = 0; fid<tabPtr.p->totalfragments; fid++)
{
getFragstore(tabPtr.p, fid, fragPtr);
ndbout_c("tab: %d frag: %d gci: %d\n -- storedReplicas:",
tabPtr.i, fid, SYSFILE->newestRestorableGCI);
Uint32 i;
ReplicaRecordPtr replicaPtr;
replicaPtr.i = fragPtr.p->storedReplicas;
for(; replicaPtr.i != RNIL; replicaPtr.i = replicaPtr.p->nextReplica)
{
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
ndbout_c(" node: %d initialGci: %d nextLcp: %d noCrashedReplicas: %d",
replicaPtr.p->procNode,
replicaPtr.p->initialGci,
replicaPtr.p->nextLcp,
replicaPtr.p->noCrashedReplicas);
for(i = 0; i<MAX_LCP_STORED; i++)
{
ndbout_c(" i: %d %s : lcpId: %d maxGci Completed: %d Started: %d",
i,
(replicaPtr.p->lcpStatus[i] == ZVALID ?"VALID":"INVALID"),
replicaPtr.p->lcpId[i],
replicaPtr.p->maxGciCompleted[i],
replicaPtr.p->maxGciStarted[i]);
}
for (i = 0; i < 8; i++)
{
ndbout_c(" crashed replica: %d replicaLastGci: %d createGci: %d",
i,
replicaPtr.p->replicaLastGci[i],
replicaPtr.p->createGci[i]);
}
}
ndbout_c(" -- oldStoredReplicas");
replicaPtr.i = fragPtr.p->oldStoredReplicas;
for(; replicaPtr.i != RNIL; replicaPtr.i = replicaPtr.p->nextReplica)
{
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
for(i = 0; i<MAX_LCP_STORED; i++)
{
ndbout_c(" i: %d %s : lcpId: %d maxGci Completed: %d Started: %d",
i,
(replicaPtr.p->lcpStatus[i] == ZVALID ?"VALID":"INVALID"),
replicaPtr.p->lcpId[i],
replicaPtr.p->maxGciCompleted[i],
replicaPtr.p->maxGciStarted[i]);
}
for (i = 0; i < 8; i++)
{
ndbout_c(" crashed replica: %d replicaLastGci: %d createGci: %d",
i,
replicaPtr.p->replicaLastGci[i],
replicaPtr.p->createGci[i]);
}
}
}
}
}
void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
{
Uint32 TloopCount = 0;
@ -8986,6 +9060,7 @@ void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
/* SEARCH FOR STORED REPLICAS THAT CAN BE USED TO RESTART THE SYSTEM. */
/* ----------------------------------------------------------------------- */
searchStoredReplicas(fragPtr);
if (cnoOfCreateReplicas == 0) {
/* --------------------------------------------------------------------- */
/* THERE WERE NO STORED REPLICAS AVAILABLE THAT CAN SERVE AS REPLICA TO*/
@ -8998,6 +9073,10 @@ void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
char buf[64];
BaseString::snprintf(buf, sizeof(buf), "table: %d fragment: %d gci: %d",
tableId, fragId, SYSFILE->newestRestorableGCI);
ndbout_c(buf);
dump_replica_info();
progError(__LINE__, NDBD_EXIT_NO_RESTORABLE_REPLICA, buf);
ndbrequire(false);
return;

View file

@ -565,7 +565,6 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
*
*/
STATIC_CONST( SZ = EXTENT_SEARCH_MATRIX_SIZE );
Uint32 m_extent_search_matrix[SZ]; // 4x4
DLList<Extent_info>::Head m_free_extents[SZ];
Uint32 m_total_extent_free_space_thresholds[EXTENT_SEARCH_MATRIX_ROWS];
Uint32 m_page_free_bits_map[EXTENT_SEARCH_MATRIX_COLS];
@ -593,6 +592,8 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
SLList<Extent_info, Extent_list_t>::Head m_extent_list;
};
void dump_disk_alloc(Disk_alloc_info&);
struct Fragrecord {
Uint32 nextStartRange;
Uint32 currentPageRange;

View file

@ -17,6 +17,112 @@
#define DBTUP_C
#include "Dbtup.hpp"
static
NdbOut&
operator<<(NdbOut& out, const Ptr<Dbtup::Page> & ptr)
{
out << "[ Page: ptr.i: " << ptr.i
<< " [ m_file_no: " << ptr.p->m_file_no
<< " m_page_no: " << ptr.p->m_page_no << "]"
<< " list_index: " << ptr.p->list_index
<< " free_space: " << ptr.p->free_space
<< " uncommitted_used_space: " << ptr.p->uncommitted_used_space
<< " ]";
return out;
}
static
NdbOut&
operator<<(NdbOut& out, const Ptr<Dbtup::Page_request> & ptr)
{
out << "[ Page_request: ptr.i: " << ptr.i
<< " " << ptr.p->m_key
<< " m_estimated_free_space: " << ptr.p->m_estimated_free_space
<< " m_list_index: " << ptr.p->m_list_index
<< " m_frag_ptr_i: " << ptr.p->m_frag_ptr_i
<< " m_extent_info_ptr: " << ptr.p->m_extent_info_ptr
<< " m_ref_count: " << ptr.p->m_ref_count
<< " m_uncommitted_used_space: " << ptr.p->m_uncommitted_used_space
<< " ]";
return out;
}
static
NdbOut&
operator<<(NdbOut& out, const Ptr<Dbtup::Extent_info> & ptr)
{
out << "[ Extent_info: ptr.i " << ptr.i
<< " " << ptr.p->m_key
<< " m_first_page_no: " << ptr.p->m_first_page_no
<< " m_free_space: " << ptr.p->m_free_space
<< " m_free_matrix_pos: " << ptr.p->m_free_matrix_pos
<< " m_free_page_count: [";
for(Uint32 i = 0; i<Dbtup::EXTENT_SEARCH_MATRIX_COLS; i++)
out << " " << ptr.p->m_free_page_count[i];
out << " ] ]";
return out;
}
void
Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{
ndbout_c("dirty pages");
for(Uint32 i = 0; i<MAX_FREE_LIST; i++)
{
printf(" %d : ", i);
Ptr<Page> ptr;
ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
LocalDLList<Page> list(*pool, alloc.m_dirty_pages[i]);
for(list.first(ptr); !ptr.isNull(); list.next(ptr))
{
ndbout << ptr << " ";
}
ndbout_c("");
}
ndbout_c("page requests");
for(Uint32 i = 0; i<MAX_FREE_LIST; i++)
{
printf(" %d : ", i);
Ptr<Page_request> ptr;
LocalDLList<Page_request> list(c_page_request_pool,
alloc.m_page_requests[i]);
for(list.first(ptr); !ptr.isNull(); list.next(ptr))
{
ndbout << ptr << " ";
}
ndbout_c("");
}
ndbout_c("Extent matrix");
for(Uint32 i = 0; i<alloc.SZ; i++)
{
printf(" %d : ", i);
Ptr<Extent_info> ptr;
LocalDLList<Extent_info> list(c_extent_pool, alloc.m_free_extents[i]);
for(list.first(ptr); !ptr.isNull(); list.next(ptr))
{
ndbout << ptr << " ";
}
ndbout_c("");
}
if (alloc.m_curr_extent_info_ptr_i != RNIL)
{
Ptr<Extent_info> ptr;
c_extent_pool.getPtr(ptr, alloc.m_curr_extent_info_ptr_i);
ndbout << "current extent: " << ptr << endl;
}
}
#if defined VM_TRACE || true
#define ddassert(x) do { if(unlikely(!(x))) { dump_disk_alloc(alloc); ndbrequire(false); } } while(0)
#else
#define ddassert(x)
#endif
Dbtup::Disk_alloc_info::Disk_alloc_info(const Tablerec* tabPtrP,
Uint32 extent_size)
{
@ -60,20 +166,20 @@ Dbtup::Disk_alloc_info::find_extent(Uint32 sz) const
* Find the biggest available (with most free space)
* Return position in matrix
*/
Uint32 col = calc_page_free_bits(sz);
Uint32 mask= EXTENT_SEARCH_MATRIX_COLS - 1;
for(Uint32 i= 0; i<EXTENT_SEARCH_MATRIX_SIZE; i++)
{
// Check that it can cater for request
if (m_extent_search_matrix[i] < sz)
{
i = (i + mask) & ~mask;
continue;
}
if (!m_free_extents[i].isEmpty())
{
return i;
}
if ((i & mask) >= col)
{
i = (i & ~mask) + mask;
}
}
return RNIL;
@ -92,13 +198,7 @@ Dbtup::Disk_alloc_info::calc_extent_pos(const Extent_info* extP) const
* if zero (or very small free space) put
* absolutly last
*/
{
printf("free space %d free_page_thresholds ", free);
for(Uint32 i = 0; i<EXTENT_SEARCH_MATRIX_ROWS; i++)
printf("%d ", m_total_extent_free_space_thresholds[i]);
ndbout_c("");
{
const Uint32 *arr= m_total_extent_free_space_thresholds;
for(; free < * arr++; row++)
assert(row < EXTENT_SEARCH_MATRIX_ROWS);
@ -123,11 +223,6 @@ Dbtup::Disk_alloc_info::calc_extent_pos(const Extent_info* extP) const
*/
Uint32 pos= (row * (mask + 1)) + (col & mask);
printf("free space %d free_page_count ", free);
for(Uint32 i = 0; i<EXTENT_SEARCH_MATRIX_COLS; i++)
printf("%d ", extP->m_free_page_count[i]);
ndbout_c(" -> row: %d col: %d -> pos= %d", row, col, pos);
assert(pos < EXTENT_SEARCH_MATRIX_SIZE);
return pos;
}
@ -237,7 +332,9 @@ Dbtup::disk_page_prealloc(Signal* signal,
* and since it couldn't accomadate the request
* we put it on the free list
*/
alloc.m_curr_extent_info_ptr_i = RNIL;
Uint32 pos= alloc.calc_extent_pos(ext.p);
ext.p->m_free_matrix_pos = pos;
LocalDLList<Extent_info> list(c_extent_pool, alloc.m_free_extents[pos]);
list.add(ext);
}
@ -270,11 +367,9 @@ Dbtup::disk_page_prealloc(Signal* signal,
if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0)
{
//XXX
c_extent_pool.release(ext);
c_page_request_pool.release(req);
ndbout_c("no free extent");
return -err;
return err;
}
int pages= err;
@ -292,7 +387,14 @@ Dbtup::disk_page_prealloc(Signal* signal,
alloc.m_curr_extent_info_ptr_i= ext.i;
ext.p->m_free_matrix_pos= RNIL;
pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits);
ndbassert(pageBits >= 0);
#ifdef VM_TRACE
ddassert(pageBits >= 0);
#else
if (unlikely(pageBits < 0))
{
return -AllocExtentReq::NoExtentAvailable;
}
#endif
}
/**
@ -307,18 +409,18 @@ Dbtup::disk_page_prealloc(Signal* signal,
*/
Uint32 size= alloc.calc_page_free_space((Uint32)pageBits);
ndbassert(size >= sz);
ddassert(size >= sz);
Uint32 new_size = size - sz; // Subtract alloc rec
req.p->m_estimated_free_space= new_size; // Store on page request
Uint32 newPageBits= alloc.calc_page_free_bits(new_size);
if (newPageBits != (Uint32)pageBits)
{
ndbassert(ext.p->m_free_page_count[pageBits] > 0);
ddassert(ext.p->m_free_page_count[pageBits] > 0);
ext.p->m_free_page_count[pageBits]--;
ext.p->m_free_page_count[newPageBits]++;
}
ndbassert(ext.p->m_free_space >= sz);
ddassert(ext.p->m_free_space >= sz);
ext.p->m_free_space -= sz;
// And put page request in correct free list
@ -367,13 +469,13 @@ Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc,
Ptr<Page> pagePtr,
Uint32 old_idx, Uint32 sz)
{
ndbassert(pagePtr.p->list_index == old_idx);
ddassert(pagePtr.p->list_index == old_idx);
Uint32 free= pagePtr.p->free_space;
Uint32 used= pagePtr.p->uncommitted_used_space + sz;
Uint32 ext= pagePtr.p->m_extent_info_ptr;
ndbassert(free >= used);
ddassert(free >= used);
Ptr<Extent_info> extentPtr;
c_extent_pool.getPtr(extentPtr, ext);
@ -387,14 +489,14 @@ Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc,
old_list.remove(pagePtr);
new_list.add(pagePtr);
ndbassert(extentPtr.p->m_free_page_count[old_idx]);
ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++;
pagePtr.p->list_index= new_idx;
}
pagePtr.p->uncommitted_used_space = used;
ndbassert(extentPtr.p->m_free_space >= sz);
ddassert(extentPtr.p->m_free_space >= sz);
extentPtr.p->m_free_space -= sz;
Uint32 old_pos= extentPtr.p->m_free_matrix_pos;
if (old_pos != RNIL) // Current extent
@ -419,7 +521,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
Ptr<Page_request> req,
Uint32 old_idx, Uint32 sz)
{
ndbassert(req.p->m_list_index == old_idx);
ddassert(req.p->m_list_index == old_idx);
Uint32 free= req.p->m_estimated_free_space;
Uint32 used= req.p->m_uncommitted_used_space + sz;
@ -428,7 +530,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
Ptr<Extent_info> extentPtr;
c_extent_pool.getPtr(extentPtr, ext);
ndbassert(free >= sz);
ddassert(free >= sz);
Uint32 new_idx= alloc.calc_page_free_bits(free - sz);
if (old_idx != new_idx)
@ -439,7 +541,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
old_list.remove(req);
new_list.add(req);
ndbassert(extentPtr.p->m_free_page_count[old_idx]);
ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++;
req.p->m_list_index= new_idx;
@ -447,7 +549,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
req.p->m_uncommitted_used_space = used;
req.p->m_estimated_free_space = free - sz;
ndbassert(extentPtr.p->m_free_space >= sz);
ddassert(extentPtr.p->m_free_space >= sz);
extentPtr.p->m_free_space -= sz;
Uint32 old_pos= extentPtr.p->m_free_matrix_pos;
if (old_pos != RNIL) // Current extent
@ -553,11 +655,11 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal,
* 3) register callback in pgman (unmap callback)
* 4) inform pgman about current users
*/
ndbassert((page->list_index & 0x8000) == 0x8000);
ndbassert(page->m_extent_info_ptr == req.p->m_extent_info_ptr);
ndbassert(page->m_page_no == req.p->m_key.m_page_no);
ndbassert(page->m_file_no == req.p->m_key.m_file_no);
Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
ddassert((page->list_index & 0x8000) == 0x8000);
ddassert(page->m_extent_info_ptr == req.p->m_extent_info_ptr);
ddassert(page->m_page_no == req.p->m_key.m_page_no);
ddassert(page->m_file_no == req.p->m_key.m_file_no);
Uint32 old_idx = req.p->m_list_index;
Uint32 free= req.p->m_estimated_free_space;
@ -566,9 +668,9 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal,
Uint32 real_free = page->free_space;
Uint32 real_used = used + page->uncommitted_used_space;
ndbassert(real_free >= free);
ndbassert(real_free >= real_used);
ndbassert(alloc.calc_page_free_bits(free) == old_idx);
ddassert(real_free >= free);
ddassert(real_free >= real_used);
ddassert(alloc.calc_page_free_bits(free) == old_idx);
Uint32 new_idx= alloc.calc_page_free_bits(real_free - real_used);
/**
@ -589,7 +691,7 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal,
if (old_idx != new_idx)
{
ndbassert(extentPtr.p->m_free_page_count[old_idx]);
ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++;
}
@ -723,13 +825,14 @@ Dbtup::disk_page_alloc(Signal* signal,
Local_key* key, PagePtr pagePtr, Uint32 gci)
{
Uint32 logfile_group_id= fragPtrP->m_logfile_group_id;
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
Uint64 lsn;
Uint32 old_free = pagePtr.p->free_space;
Uint32 old_bits= fragPtrP->m_disk_alloc_info.calc_page_free_bits(old_free);
Uint32 old_bits= alloc.calc_page_free_bits(old_free);
if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
{
ndbassert(pagePtr.p->uncommitted_used_space > 0);
ddassert(pagePtr.p->uncommitted_used_space > 0);
pagePtr.p->uncommitted_used_space--;
key->m_page_idx= ((Fix_page*)pagePtr.p)->alloc_record();
lsn= disk_page_undo_alloc(pagePtr.p, key, 1, gci, logfile_group_id);
@ -737,7 +840,7 @@ Dbtup::disk_page_alloc(Signal* signal,
else
{
Uint32 sz= key->m_page_idx;
ndbassert(pagePtr.p->uncommitted_used_space >= sz);
ddassert(pagePtr.p->uncommitted_used_space >= sz);
pagePtr.p->uncommitted_used_space -= sz;
key->m_page_idx= ((Var_page*)pagePtr.p)->
alloc_record(sz, (Var_page*)ctemp_page, 0);
@ -746,7 +849,7 @@ Dbtup::disk_page_alloc(Signal* signal,
}
Uint32 new_free = pagePtr.p->free_space;
Uint32 new_bits= fragPtrP->m_disk_alloc_info.calc_page_free_bits(new_free);
Uint32 new_bits= alloc.calc_page_free_bits(new_free);
if (old_bits != new_bits)
{
@ -808,20 +911,20 @@ Dbtup::disk_page_free(Signal *signal,
Uint32 ext = pagePtr.p->m_extent_info_ptr;
Uint32 used = pagePtr.p->uncommitted_used_space;
ndbassert(old_free >= used);
ndbassert(new_free >= used);
ndbassert(new_free >= old_free);
ddassert(old_free >= used);
ddassert(new_free >= used);
ddassert(new_free >= old_free);
page_idx = pagePtr.p->list_index;
Uint32 old_idx = page_idx & 0x7FFF;
Uint32 new_idx = alloc.calc_page_free_bits(new_free - used);
ndbassert(alloc.calc_page_free_bits(old_free - used) == old_idx);
ddassert(alloc.calc_page_free_bits(old_free - used) == old_idx);
Ptr<Extent_info> extentPtr;
c_extent_pool.getPtr(extentPtr, ext);
if (old_idx != new_idx)
{
ndbassert(extentPtr.p->m_free_page_count[old_idx]);
ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++;
@ -917,16 +1020,16 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal,
Uint32 ext = pagePtr.p->m_extent_info_ptr;
Uint32 old_idx = page_idx & 0x7FFF;
ndbassert(free >= used);
ndbassert(used >= sz);
ndbassert(alloc.calc_page_free_bits(free - used) == old_idx);
ddassert(free >= used);
ddassert(used >= sz);
ddassert(alloc.calc_page_free_bits(free - used) == old_idx);
Uint32 new_idx = alloc.calc_page_free_bits(free - used + sz);
Ptr<Extent_info> extentPtr;
c_extent_pool.getPtr(extentPtr, ext);
if (old_idx != new_idx)
{
ndbassert(extentPtr.p->m_free_page_count[old_idx]);
ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++;

View file

@ -1439,6 +1439,7 @@ int Dbtup::handleInsertReq(Signal* signal,
int ret= disk_page_prealloc(signal, fragPtr, &tmp, size);
if (unlikely(ret < 0))
{
terrorCode = -ret;
goto disk_prealloc_error;
}

View file

@ -52,7 +52,6 @@
DbUtil::DbUtil(const Configuration & conf) :
SimulatedBlock(DBUTIL, conf),
c_runningPrepares(c_preparePool),
c_runningPreparedOperations(c_preparedOperationPool),
c_seizingTransactions(c_transactionPool),
c_runningTransactions(c_transactionPool),
c_lockQueues(c_lockQueuePool)
@ -566,12 +565,13 @@ DbUtil::execDUMP_STATE_ORD(Signal* signal){
}
ndbout << "PreparedOperation Id: " << signal->theData[2] << endl;
PreparedOperationPtr prepOpPtr;
c_runningPreparedOperations.getPtr(prepOpPtr, signal->theData[2]);
c_preparedOperationPool.getPtr(prepOpPtr, signal->theData[2]);
prepOpPtr.p->print();
return;
}
// ** Print all records **
#if 0 // not implemented
PreparedOperationPtr prepOpPtr;
if (!c_runningPreparedOperations.first(prepOpPtr)) {
ndbout << "No PreparedOperations exist" << endl;
@ -583,6 +583,7 @@ DbUtil::execDUMP_STATE_ORD(Signal* signal){
ndbout << "]";
c_runningPreparedOperations.next(prepOpPtr);
}
#endif
return;
case 3:
@ -988,7 +989,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
* Seize and store PreparedOperation struct
*******************************************/
PreparedOperationPtr prepOpPtr;
if(!c_runningPreparedOperations.seize(prepOpPtr)) {
if(!c_preparedOperationPool.seize(prepOpPtr)) {
jam();
releaseSections(signal);
sendUtilPrepareRef(signal, UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR,
@ -1738,17 +1739,7 @@ DbUtil::execUTIL_EXECUTE_REQ(Signal* signal)
* Get PreparedOperation struct
*******************************/
PreparedOperationPtr prepOpPtr;
c_runningPreparedOperations.first(prepOpPtr);
while (!prepOpPtr.isNull() && prepOpPtr.i != prepareId)
c_runningPreparedOperations.next(prepOpPtr);
if (prepOpPtr.i != prepareId) {
jam();
releaseSections(signal);
sendUtilExecuteRef(signal, UtilExecuteRef::IllegalPrepareId,
0, clientRef, clientData);
return;
}
c_preparedOperationPool.getPtr(prepOpPtr, prepareId);
prepOpPtr.p->releaseFlag = releaseFlag;

View file

@ -389,7 +389,6 @@ public:
DataBuffer<1>::DataBufferPool c_attrMappingPool;
DataBuffer<11>::DataBufferPool c_dataBufPool;
DLList<Prepare> c_runningPrepares;
DLList<PreparedOperation> c_runningPreparedOperations;
DLList<Transaction> c_seizingTransactions; // Being seized at TC
DLList<Transaction> c_runningTransactions; // Seized and now exec.

View file

@ -40,6 +40,12 @@
#define dbg(x)
#endif
#if 1
#define DBG_LCP(x)
#else
#define DBG_LCP(x) ndbout << x
#endif
Pgman::Pgman(const Configuration & conf) :
SimulatedBlock(PGMAN, conf),
m_file_map(m_data_buffer_pool),
@ -1083,6 +1089,7 @@ Pgman::execLCP_FRAG_ORD(Signal* signal)
LcpFragOrd* ord = (LcpFragOrd*)signal->getDataPtr();
ndbrequire(ord->lcpId >= m_last_lcp_complete + 1 || m_last_lcp_complete == 0);
m_last_lcp = ord->lcpId;
DBG_LCP("execLCP_FRAG_ORD" << endl);
ndbrequire(!m_lcp_outstanding);
ndbrequire(m_lcp_copy_page_free);
@ -1104,6 +1111,8 @@ Pgman::execEND_LCP_REQ(Signal* signal)
EndLcpReq* req = (EndLcpReq*)signal->getDataPtr();
m_end_lcp_req = *req;
DBG_LCP("execEND_LCP_REQ" << endl);
#ifdef VM_TRACE
debugOut
<< "PGMAN: execEND_LCP_REQ"
@ -1117,6 +1126,7 @@ Pgman::execEND_LCP_REQ(Signal* signal)
ndbrequire(! m_lcp_loop_on);
signal->theData[0] = m_end_lcp_req.senderData;
sendSignal(m_end_lcp_req.senderRef, GSN_END_LCP_CONF, signal, 1, JBB);
DBG_LCP("GSN_END_LCP_CONF" << endl);
}
m_last_lcp_complete = m_last_lcp;
@ -1149,6 +1159,8 @@ Pgman::process_lcp(Signal* signal)
Ptr<Page_entry>& ptr = iter.curr;
Uint16 state = ptr.p->m_state;
DBG_LCP("PROCESS LCP: " << ptr);
if (ptr.p->m_last_lcp < m_last_lcp &&
(state & Page_entry::DIRTY))
{
@ -1159,6 +1171,7 @@ Pgman::process_lcp(Signal* signal)
}
if (state & Page_entry::BUSY)
{
DBG_LCP(" BUSY" << endl);
break; // wait for it
}
if (state & Page_entry::LOCKED)
@ -1169,6 +1182,7 @@ Pgman::process_lcp(Signal* signal)
*/
if (!m_lcp_copy_page_free)
{
DBG_LCP(" !m_lcp_copy_page_free" << endl);
break;
}
m_lcp_copy_page_free = false;
@ -1183,10 +1197,12 @@ Pgman::process_lcp(Signal* signal)
}
else if (state & Page_entry::PAGEOUT)
{
DBG_LCP(" PAGEOUT -> state |= LCP" << endl);
set_page_state(ptr, state | Page_entry::LCP);
}
else
{
DBG_LCP(" pageout()" << endl);
ptr.p->m_state |= Page_entry::LCP;
pageout(signal, ptr);
}
@ -1205,11 +1221,15 @@ Pgman::process_lcp(Signal* signal)
{
signal->theData[0] = m_end_lcp_req.senderData;
sendSignal(m_end_lcp_req.senderRef, GSN_END_LCP_CONF, signal, 1, JBB);
DBG_LCP("GSN_END_LCP_CONF" << endl);
}
DBG_LCP(" -- RETURN FALSE" << endl);
m_last_lcp_complete = m_last_lcp;
m_lcp_curr_bucket = ~(Uint32)0;
return false;
}
DBG_LCP(" -- RETURN TRUE" << endl);
return true;
}

View file

@ -684,20 +684,15 @@ Tsman::open_file(Signal* signal,
req->file_size_lo = lo;
Uint64 pages = (Uint64(hi) << 32 | lo) / File_formats::NDB_PAGE_SIZE;
// Extent size in #pages
Uint32 extent_size = ts_ptr.p->m_extent_size;
Uint32 extent_size = ts_ptr.p->m_extent_size; // Extent size in #pages
Uint64 extents = (pages + extent_size - 1) / extent_size;
extents = extents ? extents : 1;
Uint64 data_pages = extents * extent_size;
Uint32 eh_words = File_formats::Datafile::extent_header_words(extent_size);
ndbrequire(eh_words < File_formats::Datafile::EXTENT_PAGE_WORDS);
Uint32 extents_per_page = File_formats::Datafile::EXTENT_PAGE_WORDS/eh_words;
Uint64 tmp = Uint64(extents_per_page) * Uint64(extent_size);
Uint64 extent_pages = pages / (1+tmp);
extent_pages = extent_pages ? extent_pages : 1;
Uint64 data_pages = pages - extent_pages -1;
Uint64 extents = data_pages / extent_size;
data_pages = extents * extent_size;
Uint64 extent_pages = (extents + extents_per_page - 1) / extents_per_page;
ptr.p->m_create.m_extent_pages = extent_pages;
ptr.p->m_create.m_data_pages = data_pages;

View file

@ -23,6 +23,7 @@
#include <NdbBlob.hpp>
#include "NdbBlobImpl.hpp"
#include <NdbScanOperation.hpp>
#include <NdbEventOperationImpl.hpp>
/*
* Reading index table directly (as a table) is faster but there are
@ -147,6 +148,61 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
DBUG_VOID_RETURN;
}
int
NdbBlob::getBlobEventName(char* bename, Ndb* anNdb, const char* eventName, const char* columnName)
{
NdbEventImpl* e = anNdb->theDictionary->m_impl.getEvent(eventName);
if (e == NULL)
return -1;
NdbColumnImpl* c = e->m_tableImpl->getColumn(columnName);
if (c == NULL)
return -1;
getBlobEventName(bename, e, c);
return 0;
}
void
NdbBlob::getBlobEventName(char* bename, const NdbEventImpl* e, const NdbColumnImpl* c)
{
// XXX events should have object id
snprintf(bename, MAX_TAB_NAME_SIZE, "NDB$BLOBEVENT_%s_%d", e->m_name.c_str(), (int)c->m_column_no);
}
void
NdbBlob::getBlobEvent(NdbEventImpl& be, const NdbEventImpl* e, const NdbColumnImpl* c)
{
DBUG_ENTER("NdbBlob::getBlobEvent");
// blob table
assert(c->m_blobTable != NULL);
const NdbTableImpl& bt = *c->m_blobTable;
// blob event name
char bename[NdbBlobImpl::BlobTableNameSize];
getBlobEventName(bename, e, c);
be.setName(bename);
be.setTable(bt);
// simple assigments
be.mi_type = e->mi_type;
be.m_dur = e->m_dur;
be.m_mergeEvents = e->m_mergeEvents;
// report unchanged data
// not really needed now since UPD is DEL o INS and we subscribe to all
be.setReport(NdbDictionary::Event::ER_ALL);
// columns PK - DIST - PART - DATA
{ const NdbColumnImpl* bc = bt.getColumn((Uint32)0);
be.addColumn(*bc);
}
{ const NdbColumnImpl* bc = bt.getColumn((Uint32)1);
be.addColumn(*bc);
}
{ const NdbColumnImpl* bc = bt.getColumn((Uint32)2);
be.addColumn(*bc);
}
{ const NdbColumnImpl* bc = bt.getColumn((Uint32)3);
be.addColumn(*bc);
}
DBUG_VOID_RETURN;
}
// initialization
NdbBlob::NdbBlob(Ndb*)
@ -158,9 +214,16 @@ void
NdbBlob::init()
{
theState = Idle;
theEventBlobVersion = -1;
theNdb = NULL;
theNdbCon = NULL;
theNdbOp = NULL;
theEventOp = NULL;
theBlobEventOp = NULL;
theBlobEventPkRecAttr = NULL;
theBlobEventDistRecAttr = NULL;
theBlobEventPartRecAttr = NULL;
theBlobEventDataRecAttr = NULL;
theTable = NULL;
theAccessTable = NULL;
theBlobTable = NULL;
@ -439,7 +502,7 @@ NdbBlob::getHeadFromRecAttr()
DBUG_ENTER("NdbBlob::getHeadFromRecAttr");
assert(theHeadInlineRecAttr != NULL);
theNullFlag = theHeadInlineRecAttr->isNULL();
assert(theNullFlag != -1);
assert(theEventBlobVersion >= 0 || theNullFlag != -1);
theLength = ! theNullFlag ? theHead->length : 0;
DBUG_VOID_RETURN;
}
@ -543,6 +606,18 @@ NdbBlob::setActiveHook(ActiveHook activeHook, void* arg)
// misc operations
int
NdbBlob::getDefined(int& isNull)
{
DBUG_ENTER("NdbBlob::getDefined");
if (theState == Prepared && theSetFlag) {
isNull = (theSetBuf == NULL);
DBUG_RETURN(0);
}
isNull = theNullFlag;
DBUG_RETURN(0);
}
int
NdbBlob::getNull(bool& isNull)
{
@ -887,6 +962,18 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
{
DBUG_ENTER("NdbBlob::readParts");
DBUG_PRINT("info", ("part=%u count=%u", part, count));
int ret;
if (theEventBlobVersion == -1)
ret = readTableParts(buf, part, count);
else
ret = readEventParts(buf, part, count);
DBUG_RETURN(ret);
}
int
NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count)
{
DBUG_ENTER("NdbBlob::readTableParts");
Uint32 n = 0;
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
@ -906,6 +993,18 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
DBUG_RETURN(0);
}
int
NdbBlob::readEventParts(char* buf, Uint32 part, Uint32 count)
{
DBUG_ENTER("NdbBlob::readEventParts");
int ret = theEventOp->readBlobParts(buf, this, part, count);
if (ret != 0) {
setErrorCode(theEventOp);
DBUG_RETURN(-1);
}
DBUG_RETURN(0);
}
int
NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
{
@ -1094,48 +1193,12 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl
theTable = anOp->m_currentTable;
theAccessTable = anOp->m_accessTable;
theColumn = aColumn;
NdbDictionary::Column::Type partType = NdbDictionary::Column::Undefined;
switch (theColumn->getType()) {
case NdbDictionary::Column::Blob:
partType = NdbDictionary::Column::Binary;
theFillChar = 0x0;
break;
case NdbDictionary::Column::Text:
partType = NdbDictionary::Column::Char;
theFillChar = 0x20;
break;
default:
setErrorCode(NdbBlobImpl::ErrUsage);
// prepare blob column and table
if (prepareColumn() == -1)
DBUG_RETURN(-1);
}
// sizes
theInlineSize = theColumn->getInlineSize();
thePartSize = theColumn->getPartSize();
theStripeSize = theColumn->getStripeSize();
// sanity check
assert((NDB_BLOB_HEAD_SIZE << 2) == sizeof(Head));
assert(theColumn->m_attrSize * theColumn->m_arraySize == sizeof(Head) + theInlineSize);
if (thePartSize > 0) {
const NdbDictionary::Table* bt = NULL;
const NdbDictionary::Column* bc = NULL;
if (theStripeSize == 0 ||
(bt = theColumn->getBlobTable()) == NULL ||
(bc = bt->getColumn("DATA")) == NULL ||
bc->getType() != partType ||
bc->getLength() != (int)thePartSize) {
setErrorCode(NdbBlobImpl::ErrTable);
DBUG_RETURN(-1);
}
theBlobTable = &NdbTableImpl::getImpl(*bt);
}
// buffers
theKeyBuf.alloc(theTable->m_keyLenInWords << 2);
// extra buffers
theAccessKeyBuf.alloc(theAccessTable->m_keyLenInWords << 2);
theHeadInlineBuf.alloc(sizeof(Head) + theInlineSize);
theHeadInlineCopyBuf.alloc(sizeof(Head) + theInlineSize);
thePartBuf.alloc(thePartSize);
theHead = (Head*)theHeadInlineBuf.data;
theInlineData = theHeadInlineBuf.data + sizeof(Head);
// handle different operation types
bool supportedOp = false;
if (isKeyOp()) {
@ -1189,6 +1252,103 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl
DBUG_RETURN(0);
}
int
NdbBlob::atPrepare(NdbEventOperationImpl* anOp, NdbEventOperationImpl* aBlobOp, const NdbColumnImpl* aColumn, int version)
{
DBUG_ENTER("NdbBlob::atPrepare [event]");
DBUG_PRINT("info", ("this=%p op=%p", this, anOp));
assert(theState == Idle);
assert(version == 0 || version == 1);
theEventBlobVersion = version;
// ndb api stuff
theNdb = anOp->m_ndb;
theEventOp = anOp;
theBlobEventOp = aBlobOp;
theTable = anOp->m_eventImpl->m_tableImpl;
theColumn = aColumn;
// prepare blob column and table
if (prepareColumn() == -1)
DBUG_RETURN(-1);
// tinyblob sanity
assert((theBlobEventOp == NULL) == (theBlobTable == NULL));
// extra buffers
theBlobEventDataBuf.alloc(thePartSize);
// prepare receive of head+inline
theHeadInlineRecAttr = theEventOp->getValue(aColumn, theHeadInlineBuf.data, version);
if (theHeadInlineRecAttr == NULL) {
setErrorCode(theEventOp);
DBUG_RETURN(-1);
}
// prepare receive of blob part
if (theBlobEventOp != NULL) {
if ((theBlobEventPkRecAttr =
theBlobEventOp->getValue(theBlobTable->getColumn((Uint32)0),
theKeyBuf.data, version)) == NULL ||
(theBlobEventDistRecAttr =
theBlobEventOp->getValue(theBlobTable->getColumn((Uint32)1),
(char*)0, version)) == NULL ||
(theBlobEventPartRecAttr =
theBlobEventOp->getValue(theBlobTable->getColumn((Uint32)2),
(char*)&thePartNumber, version)) == NULL ||
(theBlobEventDataRecAttr =
theBlobEventOp->getValue(theBlobTable->getColumn((Uint32)3),
theBlobEventDataBuf.data, version)) == NULL) {
setErrorCode(theBlobEventOp);
DBUG_RETURN(-1);
}
}
setState(Prepared);
DBUG_RETURN(0);
}
int
NdbBlob::prepareColumn()
{
DBUG_ENTER("prepareColumn");
NdbDictionary::Column::Type partType = NdbDictionary::Column::Undefined;
switch (theColumn->getType()) {
case NdbDictionary::Column::Blob:
partType = NdbDictionary::Column::Binary;
theFillChar = 0x0;
break;
case NdbDictionary::Column::Text:
partType = NdbDictionary::Column::Char;
theFillChar = 0x20;
break;
default:
setErrorCode(NdbBlobImpl::ErrUsage);
DBUG_RETURN(-1);
}
// sizes
theInlineSize = theColumn->getInlineSize();
thePartSize = theColumn->getPartSize();
theStripeSize = theColumn->getStripeSize();
// sanity check
assert((NDB_BLOB_HEAD_SIZE << 2) == sizeof(Head));
assert(theColumn->m_attrSize * theColumn->m_arraySize == sizeof(Head) + theInlineSize);
if (thePartSize > 0) {
const NdbTableImpl* bt = NULL;
const NdbColumnImpl* bc = NULL;
if (theStripeSize == 0 ||
(bt = theColumn->m_blobTable) == NULL ||
(bc = bt->getColumn("DATA")) == NULL ||
bc->getType() != partType ||
bc->getLength() != (int)thePartSize) {
setErrorCode(NdbBlobImpl::ErrTable);
DBUG_RETURN(-1);
}
// blob table
theBlobTable = &NdbTableImpl::getImpl(*bt);
}
// these buffers are always used
theKeyBuf.alloc(theTable->m_keyLenInWords << 2);
theHeadInlineBuf.alloc(sizeof(Head) + theInlineSize);
theHead = (Head*)theHeadInlineBuf.data;
theInlineData = theHeadInlineBuf.data + sizeof(Head);
thePartBuf.alloc(thePartSize);
DBUG_RETURN(0);
}
/*
* Before execute of prepared operation. May add new operations before
* this one. May ask that this operation and all before it (a "batch")
@ -1537,6 +1697,26 @@ NdbBlob::atNextResult()
DBUG_RETURN(0);
}
/*
* After next event on main table.
*/
int
NdbBlob::atNextEvent()
{
DBUG_ENTER("NdbBlob::atNextEvent");
DBUG_PRINT("info", ("this=%p op=%p blob op=%p version=%d", this, theEventOp, theBlobEventOp, theEventBlobVersion));
if (theState == Invalid)
DBUG_RETURN(-1);
assert(theEventBlobVersion >= 0);
getHeadFromRecAttr();
if (theNullFlag == -1) // value not defined
DBUG_RETURN(0);
if (setPos(0) == -1)
DBUG_RETURN(-1);
setState(Active);
DBUG_RETURN(0);
}
// misc
const NdbDictionary::Column*
@ -1589,6 +1769,17 @@ NdbBlob::setErrorCode(NdbTransaction* aCon, bool invalidFlag)
setErrorCode(code, invalidFlag);
}
void
NdbBlob::setErrorCode(NdbEventOperationImpl* anOp, bool invalidFlag)
{
int code = 0;
if ((code = anOp->m_error.code) != 0)
;
else
code = NdbBlobImpl::ErrUnknown;
setErrorCode(code, invalidFlag);
}
// info about all blobs in this operation
NdbBlob*

View file

@ -901,6 +901,11 @@ int NdbDictionary::Event::getNoOfEventColumns() const
return m_impl.getNoOfEventColumns();
}
void NdbDictionary::Event::mergeEvents(bool flag)
{
m_impl.m_mergeEvents = flag;
}
NdbDictionary::Object::Status
NdbDictionary::Event::getObjectStatus() const
{
@ -1473,6 +1478,12 @@ NdbDictionary::Dictionary::getNdbError() const {
return m_impl.getNdbError();
}
void
NdbDictionary::Dictionary::fix_blob_events(const Table* table, const char* ev_name)
{
m_impl.fix_blob_events(table, ev_name);
}
// printers
NdbOut&

View file

@ -1072,6 +1072,7 @@ void NdbEventImpl::init()
m_tableId= RNIL;
mi_type= 0;
m_dur= NdbDictionary::Event::ED_UNDEFINED;
m_mergeEvents = false;
m_tableImpl= NULL;
m_rep= NdbDictionary::Event::ER_UPDATED;
}
@ -2036,7 +2037,7 @@ int
NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
{
unsigned n= t.m_noOfBlobs;
DBUG_ENTER("NdbDictioanryImpl::addBlobTables");
DBUG_ENTER("NdbDictionaryImpl::addBlobTables");
// optimized for blob column being the last one
// and not looking for more than one if not neccessary
for (unsigned i = t.m_columns.size(); i > 0 && n > 0;) {
@ -3151,7 +3152,37 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
#endif
// NdbDictInterface m_receiver;
DBUG_RETURN(m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */));
if (m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */) != 0)
DBUG_RETURN(-1);
// Create blob events
if (evnt.m_mergeEvents && createBlobEvents(evnt) != 0) {
int save_code = m_error.code;
(void)dropEvent(evnt.m_name.c_str());
m_error.code = save_code;
DBUG_RETURN(-1);
}
DBUG_RETURN(0);
}
int
NdbDictionaryImpl::createBlobEvents(NdbEventImpl& evnt)
{
DBUG_ENTER("NdbDictionaryImpl::createBlobEvents");
NdbTableImpl& t = *evnt.m_tableImpl;
Uint32 n = t.m_noOfBlobs;
Uint32 i;
for (i = 0; i < evnt.m_columns.size() && n > 0; i++) {
NdbColumnImpl & c = *evnt.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
continue;
n--;
NdbEventImpl blob_evnt;
NdbBlob::getBlobEvent(blob_evnt, &evnt, &c);
if (createEvent(blob_evnt) != 0)
DBUG_RETURN(-1);
}
DBUG_RETURN(0);
}
int
@ -3367,12 +3398,14 @@ NdbDictionaryImpl::getEvent(const char * eventName)
if (ev->m_tableId == info->m_table_impl->m_id &&
ev->m_tableVersion == info->m_table_impl->m_version)
break;
DBUG_PRINT("error",("%s: retry=%d: "
"table version mismatch, event: [%u,%u] table: [%u,%u]",
ev->getTableName(), retry,
ev->m_tableId, ev->m_tableVersion,
info->m_table_impl->m_id, info->m_table_impl->m_version));
if (retry)
{
m_error.code= 241;
DBUG_PRINT("error",("%s: table version mismatch, event: [%u,%u] table: [%u,%u]",
ev->getTableName(), ev->m_tableId, ev->m_tableVersion,
info->m_table_impl->m_id, info->m_table_impl->m_version));
delete ev;
DBUG_RETURN(NULL);
}
@ -3400,6 +3433,7 @@ NdbDictionaryImpl::getEvent(const char * eventName)
if ( attributeList_sz > table.getNoOfColumns() )
{
m_error.code = 241;
DBUG_PRINT("error",("Invalid version, too many columns"));
delete ev;
DBUG_RETURN(NULL);
@ -3409,6 +3443,7 @@ NdbDictionaryImpl::getEvent(const char * eventName)
for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) {
if ( id >= table.getNoOfColumns())
{
m_error.code = 241;
DBUG_PRINT("error",("Invalid version, column %d out of range", id));
delete ev;
DBUG_RETURN(NULL);
@ -3566,13 +3601,64 @@ NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal,
int
NdbDictionaryImpl::dropEvent(const char * eventName)
{
NdbEventImpl *ev= new NdbEventImpl();
ev->setName(eventName);
int ret= m_receiver.dropEvent(*ev);
delete ev;
DBUG_ENTER("NdbDictionaryImpl::dropEvent");
DBUG_PRINT("info", ("name=%s", eventName));
// printf("__________________RET %u\n", ret);
return ret;
NdbEventImpl *evnt = getEvent(eventName); // allocated
if (evnt == NULL) {
if (m_error.code != 723 && // no such table
m_error.code != 241) // invalid table
DBUG_RETURN(-1);
DBUG_PRINT("info", ("no table err=%d, drop by name alone", m_error.code));
evnt = new NdbEventImpl();
evnt->setName(eventName);
}
int ret = dropEvent(*evnt);
delete evnt;
DBUG_RETURN(ret);
}
int
NdbDictionaryImpl::dropEvent(const NdbEventImpl& evnt)
{
if (dropBlobEvents(evnt) != 0)
return -1;
if (m_receiver.dropEvent(evnt) != 0)
return -1;
return 0;
}
int
NdbDictionaryImpl::dropBlobEvents(const NdbEventImpl& evnt)
{
DBUG_ENTER("NdbDictionaryImpl::dropBlobEvents");
if (evnt.m_tableImpl != 0) {
const NdbTableImpl& t = *evnt.m_tableImpl;
Uint32 n = t.m_noOfBlobs;
Uint32 i;
for (i = 0; i < evnt.m_columns.size() && n > 0; i++) {
const NdbColumnImpl& c = *evnt.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
continue;
n--;
char bename[MAX_TAB_NAME_SIZE];
NdbBlob::getBlobEventName(bename, &evnt, &c);
(void)dropEvent(bename);
}
} else {
// loop over MAX_ATTRIBUTES_IN_TABLE ...
Uint32 i;
for (i = 0; i < MAX_ATTRIBUTES_IN_TABLE; i++) {
char bename[MAX_TAB_NAME_SIZE];
// XXX should get name from NdbBlob
sprintf(bename, "NDB$BLOBEVENT_%s_%u", evnt.getName(), i);
NdbEventImpl* bevnt = new NdbEventImpl();
bevnt->setName(bename);
(void)m_receiver.dropEvent(*bevnt);
delete bevnt;
}
}
DBUG_RETURN(0);
}
int
@ -4557,6 +4643,30 @@ NdbDictInterface::parseFileInfo(NdbFileImpl &dst,
return 0;
}
// XXX temp
void
NdbDictionaryImpl::fix_blob_events(const NdbDictionary::Table* table, const char* ev_name)
{
const NdbTableImpl& t = table->m_impl;
const NdbEventImpl* ev = getEvent(ev_name);
assert(ev != NULL && ev->m_tableImpl == &t);
Uint32 i;
for (i = 0; i < t.m_columns.size(); i++) {
assert(t.m_columns[i] != NULL);
const NdbColumnImpl& c = *t.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
continue;
char bename[200];
NdbBlob::getBlobEventName(bename, ev, &c);
// following fixes dict cache blob table
NdbEventImpl* bev = getEvent(bename);
if (c.m_blobTable != bev->m_tableImpl) {
// XXX const violation
((NdbColumnImpl*)&c)->m_blobTable = bev->m_tableImpl;
}
}
}
template class Vector<int>;
template class Vector<Uint16>;
template class Vector<Uint32>;

View file

@ -277,7 +277,6 @@ public:
NdbDictionary::Event::EventDurability getDurability() const;
void setReport(NdbDictionary::Event::EventReport r);
NdbDictionary::Event::EventReport getReport() const;
void addEventColumn(const NdbColumnImpl &c);
int getNoOfEventColumns() const;
void print() {
@ -295,6 +294,7 @@ public:
Uint32 mi_type;
NdbDictionary::Event::EventDurability m_dur;
NdbDictionary::Event::EventReport m_rep;
bool m_mergeEvents;
NdbTableImpl *m_tableImpl;
BaseString m_tableName;
@ -547,7 +547,10 @@ public:
NdbTableImpl * table);
int createEvent(NdbEventImpl &);
int createBlobEvents(NdbEventImpl &);
int dropEvent(const char * eventName);
int dropEvent(const NdbEventImpl &);
int dropBlobEvents(const NdbEventImpl &);
int executeSubscribeEvent(NdbEventOperationImpl &);
int stopSubscribeEvent(NdbEventOperationImpl &);
@ -589,6 +592,9 @@ public:
NdbDictInterface m_receiver;
Ndb & m_ndb;
// XXX temp
void fix_blob_events(const NdbDictionary::Table* table, const char* ev_name);
private:
NdbIndexImpl * getIndexImpl(const char * name,
const BaseString& internalName);

View file

@ -55,6 +55,18 @@ NdbEventOperation::getPreValue(const char *colName, char *aValue)
return m_impl.getValue(colName, aValue, 1);
}
NdbBlob *
NdbEventOperation::getBlobHandle(const char *colName)
{
return m_impl.getBlobHandle(colName, 0);
}
NdbBlob *
NdbEventOperation::getPreBlobHandle(const char *colName)
{
return m_impl.getBlobHandle(colName, 1);
}
int
NdbEventOperation::execute()
{

View file

@ -38,6 +38,7 @@
#include "DictCache.hpp"
#include <portlib/NdbMem.h>
#include <NdbRecAttr.hpp>
#include <NdbBlob.hpp>
#include <NdbEventOperation.hpp>
#include "NdbEventOperationImpl.hpp"
@ -48,6 +49,20 @@ static Gci_container g_empty_gci_container;
static const Uint32 ACTIVE_GCI_DIRECTORY_SIZE = 4;
static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1;
#ifdef VM_TRACE
static void
print_std(const SubTableData * sdata, LinearSectionPtr ptr[3])
{
printf("addr=%p gci=%d op=%d\n", (void*)sdata, sdata->gci, sdata->operation);
for (int i = 0; i <= 2; i++) {
printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz);
for (int j = 0; j < ptr[i].sz; j++)
printf("%08x ", ptr[i].p[j]);
printf("\n");
}
}
#endif
/*
* Class NdbEventOperationImpl
*
@ -60,7 +75,7 @@ static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1;
#define DBUG_RETURN_EVENT(A) DBUG_RETURN(A)
#define DBUG_VOID_RETURN_EVENT DBUG_VOID_RETURN
#define DBUG_PRINT_EVENT(A,B) DBUG_PRINT(A,B)
#define DBUG_DUMP_EVENT(A,B,C) DBUG_SUMP(A,B,C)
#define DBUG_DUMP_EVENT(A,B,C) DBUG_DUMP(A,B,C)
#else
#define DBUG_ENTER_EVENT(A)
#define DBUG_RETURN_EVENT(A) return(A)
@ -92,6 +107,11 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
theCurrentDataAttrs[0] = NULL;
theFirstDataAttrs[1] = NULL;
theCurrentDataAttrs[1] = NULL;
theBlobList = NULL;
theBlobOpList = NULL;
theMainOp = NULL;
m_data_item= NULL;
m_eventImpl = NULL;
@ -117,7 +137,11 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
m_state= EO_CREATED;
m_mergeEvents = false;
#ifdef ndb_event_stores_merge_events_flag
m_mergeEvents = m_eventImpl->m_mergeEvents;
#else
m_mergeEvents = false;
#endif
m_has_error= 0;
@ -254,10 +278,191 @@ NdbEventOperationImpl::getValue(const NdbColumnImpl *tAttrInfo, char *aValue, in
DBUG_RETURN(tAttr);
}
NdbBlob*
NdbEventOperationImpl::getBlobHandle(const char *colName, int n)
{
DBUG_ENTER("NdbEventOperationImpl::getBlobHandle (colName)");
assert(m_mergeEvents);
if (m_state != EO_CREATED) {
ndbout_c("NdbEventOperationImpl::getBlobHandle may only be called between "
"instantiation and execute()");
DBUG_RETURN(NULL);
}
NdbColumnImpl *tAttrInfo = m_eventImpl->m_tableImpl->getColumn(colName);
if (tAttrInfo == NULL) {
ndbout_c("NdbEventOperationImpl::getBlobHandle attribute %s not found",colName);
DBUG_RETURN(NULL);
}
NdbBlob* bh = getBlobHandle(tAttrInfo, n);
DBUG_RETURN(bh);
}
NdbBlob*
NdbEventOperationImpl::getBlobHandle(const NdbColumnImpl *tAttrInfo, int n)
{
DBUG_ENTER("NdbEventOperationImpl::getBlobHandle");
DBUG_PRINT("info", ("attr=%s post/pre=%d", tAttrInfo->m_name.c_str(), n));
// as in NdbOperation, create only one instance
NdbBlob* tBlob = theBlobList;
NdbBlob* tLastBlob = NULL;
while (tBlob != NULL) {
if (tBlob->theColumn == tAttrInfo && tBlob->theEventBlobVersion == n)
DBUG_RETURN(tBlob);
tLastBlob = tBlob;
tBlob = tBlob->theNext;
}
NdbEventOperationImpl* tBlobOp = NULL;
const bool is_tinyblob = (tAttrInfo->getPartSize() == 0);
assert(is_tinyblob == (tAttrInfo->m_blobTable == NULL));
if (! is_tinyblob) {
// blob event name
char bename[MAX_TAB_NAME_SIZE];
NdbBlob::getBlobEventName(bename, m_eventImpl, tAttrInfo);
// find blob event op if any (it serves both post and pre handles)
tBlobOp = theBlobOpList;
NdbEventOperationImpl* tLastBlopOp = NULL;
while (tBlobOp != NULL) {
if (strcmp(tBlobOp->m_eventImpl->m_name.c_str(), bename) == 0) {
assert(tBlobOp->m_eventImpl->m_tableImpl == tAttrInfo->m_blobTable);
break;
}
tLastBlopOp = tBlobOp;
tBlobOp = tBlobOp->m_next;
}
DBUG_PRINT("info", ("%s op %s", tBlobOp ? " reuse" : " create", bename));
// create blob event op if not found
if (tBlobOp == NULL) {
// to hide blob op it is linked under main op, not under m_ndb
NdbEventOperation* tmp =
m_ndb->theEventBuffer->createEventOperation(bename, m_error);
if (tmp == NULL)
DBUG_RETURN(NULL);
tBlobOp = &tmp->m_impl;
// pointer to main table op
tBlobOp->theMainOp = this;
tBlobOp->m_mergeEvents = m_mergeEvents;
// add to list end
if (tLastBlopOp == NULL)
theBlobOpList = tBlobOp;
else
tLastBlopOp->m_next = tBlobOp;
tBlobOp->m_next = NULL;
}
}
tBlob = m_ndb->getNdbBlob();
if (tBlob == NULL)
DBUG_RETURN(NULL);
// calls getValue on inline and blob part
if (tBlob->atPrepare(this, tBlobOp, tAttrInfo, n) == -1) {
m_ndb->releaseNdbBlob(tBlob);
DBUG_RETURN(NULL);
}
// add to list end
if (tLastBlob == NULL)
theBlobList = tBlob;
else
tLastBlob->theNext = tBlob;
tBlob->theNext = NULL;
DBUG_RETURN(tBlob);
}
int
NdbEventOperationImpl::readBlobParts(char* buf, NdbBlob* blob,
Uint32 part, Uint32 count)
{
DBUG_ENTER_EVENT("NdbEventOperationImpl::readBlobParts");
DBUG_PRINT_EVENT("info", ("part=%u count=%u post/pre=%d",
part, count, blob->theEventBlobVersion));
NdbEventOperationImpl* blob_op = blob->theBlobEventOp;
EventBufData* main_data = m_data_item;
DBUG_PRINT_EVENT("info", ("main_data=%p", main_data));
assert(main_data != NULL);
// search for blob parts list head
EventBufData* head;
assert(m_data_item != NULL);
head = m_data_item->m_next_blob;
while (head != NULL)
{
if (head->m_event_op == blob_op)
{
DBUG_PRINT_EVENT("info", ("found blob parts head %p", head));
break;
}
head = head->m_next_blob;
}
Uint32 nparts = 0;
EventBufData* data = head;
// XXX optimize using part no ordering
while (data != NULL)
{
/*
* Hack part no directly out of buffer since it is not returned
* in pre data (PK buglet). For part data use receive_event().
* This means extra copy.
*/
blob_op->m_data_item = data;
int r = blob_op->receive_event();
assert(r > 0);
Uint32 no = data->get_blob_part_no();
Uint32 sz = blob->thePartSize;
const char* src = blob->theBlobEventDataBuf.data;
DBUG_PRINT_EVENT("info", ("part_data=%p part no=%u part sz=%u", data, no, sz));
if (part <= no && no < part + count)
{
DBUG_PRINT_EVENT("info", ("part within read range"));
memcpy(buf + (no - part) * sz, src, sz);
nparts++;
}
else
{
DBUG_PRINT_EVENT("info", ("part outside read range"));
}
data = data->m_next;
}
assert(nparts == count);
DBUG_RETURN_EVENT(0);
}
int
NdbEventOperationImpl::execute()
{
DBUG_ENTER("NdbEventOperationImpl::execute");
m_ndb->theEventBuffer->add_drop_lock();
int r = execute_nolock();
m_ndb->theEventBuffer->add_drop_unlock();
DBUG_RETURN(r);
}
int
NdbEventOperationImpl::execute_nolock()
{
DBUG_ENTER("NdbEventOperationImpl::execute_nolock");
DBUG_PRINT("info", ("this=%p type=%s", this, !theMainOp ? "main" : "blob"));
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
if (!myDict) {
m_error.code= m_ndb->getNdbError().code;
@ -266,18 +471,26 @@ NdbEventOperationImpl::execute()
if (theFirstPkAttrs[0] == NULL &&
theFirstDataAttrs[0] == NULL) { // defaults to get all
}
m_ndb->theEventBuffer->add_drop_lock();
m_magic_number= NDB_EVENT_OP_MAGIC_NUMBER;
m_state= EO_EXECUTING;
mi_type= m_eventImpl->mi_type;
m_ndb->theEventBuffer->add_op();
int r= NdbDictionaryImpl::getImpl(*myDict).executeSubscribeEvent(*this);
if (r == 0) {
m_ndb->theEventBuffer->add_drop_unlock();
DBUG_RETURN(0);
if (theMainOp == NULL) {
DBUG_PRINT("info", ("execute blob ops"));
NdbEventOperationImpl* blob_op = theBlobOpList;
while (blob_op != NULL) {
r = blob_op->execute_nolock();
if (r != 0)
break;
blob_op = blob_op->m_next;
}
}
if (r == 0)
DBUG_RETURN(0);
}
//Error
m_state= EO_ERROR;
@ -285,7 +498,6 @@ NdbEventOperationImpl::execute()
m_magic_number= 0;
m_error.code= myDict->getNdbError().code;
m_ndb->theEventBuffer->remove_op();
m_ndb->theEventBuffer->add_drop_unlock();
DBUG_RETURN(r);
}
@ -709,21 +921,6 @@ NdbEventBuffer::pollEvents(int aMillisecondNumber, Uint64 *latestGCI)
return ret;
}
#ifdef VM_TRACE
static void
print_std(const char* tag, const SubTableData * sdata, LinearSectionPtr ptr[3])
{
printf("%s\n", tag);
printf("addr=%p gci=%d op=%d\n", (void*)sdata, sdata->gci, sdata->operation);
for (int i = 0; i <= 2; i++) {
printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz);
for (int j = 0; j < ptr[i].sz; j++)
printf("%08x ", ptr[i].p[j]);
printf("\n");
}
}
#endif
NdbEventOperation *
NdbEventBuffer::nextEvent()
{
@ -751,6 +948,10 @@ NdbEventBuffer::nextEvent()
while ((data= m_available_data.m_head))
{
NdbEventOperationImpl *op= data->m_event_op;
DBUG_PRINT_EVENT("info", ("available data=%p op=%p", data, op));
// blob table ops must not be seen at this level
assert(op->theMainOp == NULL);
// set NdbEventOperation data
op->m_data_item= data;
@ -767,7 +968,10 @@ NdbEventBuffer::nextEvent()
// NUL event is not returned
if (data->sdata->operation == NdbDictionary::Event::_TE_NUL)
{
DBUG_PRINT_EVENT("info", ("skip _TE_NUL"));
continue;
}
int r= op->receive_event();
if (r > 0)
@ -777,6 +981,12 @@ NdbEventBuffer::nextEvent()
#ifdef VM_TRACE
m_latest_command= m_latest_command_save;
#endif
NdbBlob* tBlob = op->theBlobList;
while (tBlob != NULL)
{
(void)tBlob->atNextEvent();
tBlob = tBlob->theNext;
}
DBUG_RETURN_EVENT(op->m_facade);
}
// the next event belonged to an event op that is no
@ -1161,7 +1371,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
DBUG_ENTER_EVENT("NdbEventBuffer::insertDataL");
Uint64 gci= sdata->gci;
if ( likely((Uint32)op->mi_type & 1 << (Uint32)sdata->operation) )
if ( likely((Uint32)op->mi_type & (1 << (Uint32)sdata->operation)) )
{
Gci_container* bucket= find_bucket(&m_active_gci, gci);
@ -1179,9 +1389,17 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
DBUG_RETURN_EVENT(0);
}
bool use_hash =
op->m_mergeEvents &&
const bool is_blob_event = (op->theMainOp != NULL);
const bool is_data_event =
sdata->operation < NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT;
const bool use_hash = op->m_mergeEvents && is_data_event;
if (! is_data_event && is_blob_event)
{
// currently subscribed to but not used
DBUG_PRINT_EVENT("info", ("ignore non-data event on blob table"));
DBUG_RETURN_EVENT(0);
}
// find position in bucket hash table
EventBufData* data = 0;
@ -1201,16 +1419,43 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
op->m_has_error = 2;
DBUG_RETURN_EVENT(-1);
}
if (unlikely(copy_data(sdata, ptr, data)))
{
op->m_has_error = 3;
DBUG_RETURN_EVENT(-1);
}
// add it to list and hash table
bucket->m_data.append(data);
data->m_event_op = op;
if (! is_blob_event || ! is_data_event)
{
bucket->m_data.append(data);
}
else
{
// find or create main event for this blob event
EventBufData_hash::Pos main_hpos;
int ret = get_main_data(bucket, main_hpos, data);
if (ret == -1)
{
op->m_has_error = 4;
DBUG_RETURN_EVENT(-1);
}
EventBufData* main_data = main_hpos.data;
if (ret != 0) // main event was created
{
main_data->m_event_op = op->theMainOp;
bucket->m_data.append(main_data);
if (use_hash)
{
main_data->m_pkhash = main_hpos.pkhash;
bucket->m_data_hash.append(main_hpos, main_data);
}
}
// link blob event under main event
add_blob_data(main_data, data);
}
if (use_hash)
{
data->m_pkhash = hpos.pkhash;
bucket->m_data_hash.append(hpos, data);
}
#ifdef VM_TRACE
@ -1226,18 +1471,12 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
DBUG_RETURN_EVENT(-1);
}
}
data->m_event_op = op;
if (use_hash)
{
data->m_pkhash = hpos.pkhash;
}
DBUG_RETURN_EVENT(0);
}
#ifdef VM_TRACE
if ((Uint32)op->m_eventImpl->mi_type & 1 << (Uint32)sdata->operation)
if ((Uint32)op->m_eventImpl->mi_type & (1 << (Uint32)sdata->operation))
{
// XXX never reached
DBUG_PRINT_EVENT("info",("Data arrived before ready eventId", op->m_eventId));
DBUG_RETURN_EVENT(0);
}
@ -1300,6 +1539,8 @@ NdbEventBuffer::alloc_data()
int
NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3])
{
DBUG_ENTER("NdbEventBuffer::alloc_mem");
DBUG_PRINT("info", ("ptr sz %u + %u + %u", ptr[0].sz, ptr[1].sz, ptr[2].sz));
const Uint32 min_alloc_size = 128;
Uint32 sz4 = (sizeof(SubTableData) + 3) >> 2;
@ -1317,7 +1558,7 @@ NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3])
data->memory = (Uint32*)NdbMem_Allocate(alloc_size);
if (data->memory == 0)
return -1;
DBUG_RETURN(-1);
data->sz = alloc_size;
m_total_alloc += data->sz;
}
@ -1332,7 +1573,7 @@ NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3])
memptr += ptr[i].sz;
}
return 0;
DBUG_RETURN(0);
}
int
@ -1404,13 +1645,10 @@ copy_attr(AttributeHeader ah,
{
Uint32 k;
for (k = 0; k < n; k++)
p1[j1++] = p2[j2++];
}
else
{
j1 += n;
j2 += n;
p1[j1 + k] = p2[j2 + k];
}
j1 += n;
j2 += n;
}
int
@ -1443,8 +1681,8 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata,
data->sz = 0;
// compose ptr1 o ptr2 = ptr
LinearSectionPtr (&ptr1) [3] = olddata.ptr;
LinearSectionPtr (&ptr) [3] = data->ptr;
LinearSectionPtr (&ptr1)[3] = olddata.ptr;
LinearSectionPtr (&ptr)[3] = data->ptr;
// loop twice where first loop only sets sizes
int loop;
@ -1458,7 +1696,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata,
data->sdata->operation = tp->t3;
}
ptr[0].sz = ptr[1].sz = ptr[3].sz = 0;
ptr[0].sz = ptr[1].sz = ptr[2].sz = 0;
// copy pk from new version
{
@ -1572,6 +1810,113 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata,
DBUG_RETURN_EVENT(0);
}
/*
* Given blob part event, find main table event on inline part. It
* should exist (force in TUP) but may arrive later. If so, create
* NUL event on main table. The real event replaces it later.
*/
// write attribute headers for concatened PK
static void
split_concatenated_pk(const NdbTableImpl* t, Uint32* ah_buffer,
const Uint32* pk_buffer, Uint32 pk_sz)
{
Uint32 sz = 0; // words parsed so far
Uint32 n; // pk attr count
Uint32 i;
for (i = n = 0; i < t->m_columns.size() && n < t->m_noOfKeys; i++)
{
const NdbColumnImpl* c = t->getColumn(i);
assert(c != NULL);
if (! c->m_pk)
continue;
assert(sz < pk_sz);
Uint32 bytesize = c->m_attrSize * c->m_arraySize;
Uint32 lb, len;
bool ok = NdbSqlUtil::get_var_length(c->m_type, &pk_buffer[sz], bytesize,
lb, len);
assert(ok);
AttributeHeader ah(i, lb + len);
ah_buffer[n++] = ah.m_value;
sz += ah.getDataSize();
}
assert(n == t->m_noOfKeys && sz == pk_sz);
}
int
NdbEventBuffer::get_main_data(Gci_container* bucket,
EventBufData_hash::Pos& hpos,
EventBufData* blob_data)
{
DBUG_ENTER_EVENT("NdbEventBuffer::get_main_data");
NdbEventOperationImpl* main_op = blob_data->m_event_op->theMainOp;
assert(main_op != NULL);
const NdbTableImpl* mainTable = main_op->m_eventImpl->m_tableImpl;
// create LinearSectionPtr for main table key
LinearSectionPtr ptr[3];
Uint32 ah_buffer[NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY];
ptr[0].sz = mainTable->m_noOfKeys;
ptr[0].p = ah_buffer;
ptr[1].sz = AttributeHeader(blob_data->ptr[0].p[0]).getDataSize();
ptr[1].p = blob_data->ptr[1].p;
ptr[2].sz = 0;
ptr[2].p = 0;
split_concatenated_pk(mainTable, ptr[0].p, ptr[1].p, ptr[1].sz);
DBUG_DUMP_EVENT("ah", (char*)ptr[0].p, ptr[0].sz << 2);
DBUG_DUMP_EVENT("pk", (char*)ptr[1].p, ptr[1].sz << 2);
// search for main event buffer
bucket->m_data_hash.search(hpos, main_op, ptr);
if (hpos.data != NULL)
DBUG_RETURN_EVENT(0);
// not found, create a place-holder
EventBufData* main_data = alloc_data();
if (main_data == NULL)
DBUG_RETURN_EVENT(-1);
SubTableData sdata = *blob_data->sdata;
sdata.tableId = main_op->m_eventImpl->m_tableImpl->m_id;
sdata.operation = NdbDictionary::Event::_TE_NUL;
if (copy_data(&sdata, ptr, main_data) != 0)
DBUG_RETURN_EVENT(-1);
hpos.data = main_data;
DBUG_RETURN_EVENT(1);
}
void
NdbEventBuffer::add_blob_data(EventBufData* main_data,
EventBufData* blob_data)
{
DBUG_ENTER_EVENT("NdbEventBuffer::add_blob_data");
DBUG_PRINT_EVENT("info", ("main_data=%p blob_data=%p", main_data, blob_data));
EventBufData* head;
head = main_data->m_next_blob;
while (head != NULL)
{
if (head->m_event_op == blob_data->m_event_op)
break;
head = head->m_next_blob;
}
if (head == NULL)
{
head = blob_data;
head->m_next_blob = main_data->m_next_blob;
main_data->m_next_blob = head;
}
else
{
blob_data->m_next = head->m_next;
head->m_next = blob_data;
}
DBUG_VOID_RETURN_EVENT;
}
NdbEventOperationImpl *
NdbEventBuffer::move_data()
@ -1613,6 +1958,31 @@ NdbEventBuffer::free_list(EventBufData_list &list)
#endif
m_free_data_sz+= list.m_sz;
// free blobs XXX unacceptable performance, fix later
{
EventBufData* data = list.m_head;
while (1) {
while (data->m_next_blob != NULL) {
EventBufData* blob_head = data->m_next_blob;
data->m_next_blob = blob_head->m_next_blob;
blob_head->m_next_blob = NULL;
while (blob_head != NULL) {
EventBufData* blob_part = blob_head;
blob_head = blob_head->m_next;
blob_part->m_next = m_free_data;
m_free_data = blob_part;
#ifdef VM_TRACE
m_free_data_count++;
#endif
m_free_data_sz += blob_part->sz;
}
}
if (data == list.m_tail)
break;
data = data->m_next;
}
}
// list returned to m_free_data
new (&list) EventBufData_list;
}
@ -1648,6 +2018,17 @@ NdbEventBuffer::dropEventOperation(NdbEventOperation* tOp)
if (m_dropped_ev_op)
m_dropped_ev_op->m_prev= op;
m_dropped_ev_op= op;
// stop blob event ops
if (op->theMainOp == NULL)
{
NdbEventOperationImpl* tBlobOp = op->theBlobOpList;
while (tBlobOp != NULL)
{
tBlobOp->stop();
tBlobOp = tBlobOp->m_next;
}
}
// ToDo, take care of these to be deleted at the
// appropriate time, after we are sure that there
@ -1717,6 +2098,10 @@ send_report:
Uint32
EventBufData_hash::getpkhash(NdbEventOperationImpl* op, LinearSectionPtr ptr[3])
{
DBUG_ENTER_EVENT("EventBufData_hash::getpkhash");
DBUG_DUMP_EVENT("ah", (char*)ptr[0].p, ptr[0].sz << 2);
DBUG_DUMP_EVENT("pk", (char*)ptr[1].p, ptr[1].sz << 2);
const NdbTableImpl* tab = op->m_eventImpl->m_tableImpl;
// in all cases ptr[0] = pk ah.. ptr[1] = pk ad..
@ -1747,13 +2132,19 @@ EventBufData_hash::getpkhash(NdbEventOperationImpl* op, LinearSectionPtr ptr[3])
(*cs->coll->hash_sort)(cs, dptr + lb, len, &nr1, &nr2);
dptr += ((bytesize + 3) / 4) * 4;
}
return nr1;
DBUG_PRINT_EVENT("info", ("hash result=%08x", nr1));
DBUG_RETURN_EVENT(nr1);
}
// this is seldom invoked
bool
EventBufData_hash::getpkequal(NdbEventOperationImpl* op, LinearSectionPtr ptr1[3], LinearSectionPtr ptr2[3])
{
DBUG_ENTER_EVENT("EventBufData_hash::getpkequal");
DBUG_DUMP_EVENT("ah1", (char*)ptr1[0].p, ptr1[0].sz << 2);
DBUG_DUMP_EVENT("pk1", (char*)ptr1[1].p, ptr1[1].sz << 2);
DBUG_DUMP_EVENT("ah2", (char*)ptr2[0].p, ptr2[0].sz << 2);
DBUG_DUMP_EVENT("pk2", (char*)ptr2[1].p, ptr2[1].sz << 2);
const NdbTableImpl* tab = op->m_eventImpl->m_tableImpl;
Uint32 nkey = tab->m_noOfKeys;
@ -1763,6 +2154,8 @@ EventBufData_hash::getpkequal(NdbEventOperationImpl* op, LinearSectionPtr ptr1[3
const uchar* dptr1 = (uchar*)ptr1[1].p;
const uchar* dptr2 = (uchar*)ptr2[1].p;
bool equal = true;
while (nkey-- != 0)
{
AttributeHeader ah1(*hptr1++);
@ -1787,16 +2180,22 @@ EventBufData_hash::getpkequal(NdbEventOperationImpl* op, LinearSectionPtr ptr1[3
CHARSET_INFO* cs = col->m_cs ? col->m_cs : &my_charset_bin;
int res = (cs->coll->strnncollsp)(cs, dptr1 + lb1, len1, dptr2 + lb2, len2, false);
if (res != 0)
return false;
{
equal = false;
break;
}
dptr1 += ((bytesize1 + 3) / 4) * 4;
dptr2 += ((bytesize2 + 3) / 4) * 4;
}
return true;
DBUG_PRINT_EVENT("info", ("equal=%s", equal ? "true" : "false"));
DBUG_RETURN_EVENT(equal);
}
void
EventBufData_hash::search(Pos& hpos, NdbEventOperationImpl* op, LinearSectionPtr ptr[3])
{
DBUG_ENTER_EVENT("EventBufData_hash::search");
Uint32 pkhash = getpkhash(op, ptr);
Uint32 index = (op->m_oid ^ pkhash) % GCI_EVENT_HASH_SIZE;
EventBufData* data = m_hash[index];
@ -1811,6 +2210,8 @@ EventBufData_hash::search(Pos& hpos, NdbEventOperationImpl* op, LinearSectionPtr
hpos.index = index;
hpos.data = data;
hpos.pkhash = pkhash;
DBUG_PRINT_EVENT("info", ("search result=%p", data));
DBUG_VOID_RETURN_EVENT;
}
template class Vector<Gci_container>;

View file

@ -21,6 +21,7 @@
#include <signaldata/SumaImpl.hpp>
#include <transporter/TransporterDefinitions.hpp>
#include <NdbRecAttr.hpp>
#include <AttributeHeader.hpp>
#define NDB_EVENT_OP_MAGIC_NUMBER 0xA9F301B4
@ -35,9 +36,28 @@ struct EventBufData
LinearSectionPtr ptr[3];
unsigned sz;
NdbEventOperationImpl *m_event_op;
EventBufData *m_next; // Next wrt to global order
/*
* Blobs are stored in blob list (m_next_blob) where each entry
* is list of parts (m_next) in part number order.
*
* TODO order by part no and link for fast read and free_list
*/
EventBufData *m_next; // Next wrt to global order or Next blob part
EventBufData *m_next_blob; // First part in next blob
EventBufData *m_next_hash; // Next in per-GCI hash
Uint32 m_pkhash; // PK hash (without op) for fast compare
// Get blob part number from blob data
Uint32 get_blob_part_no() {
assert(ptr[0].sz > 2);
Uint32 pos = AttributeHeader(ptr[0].p[0]).getDataSize() +
AttributeHeader(ptr[0].p[1]).getDataSize();
Uint32 no = ptr[1].p[pos];
return no;
}
};
class EventBufData_list
@ -70,7 +90,6 @@ EventBufData_list::~EventBufData_list()
{
}
inline
int EventBufData_list::is_empty()
{
@ -173,9 +192,13 @@ public:
NdbEventOperation::State getState();
int execute();
int execute_nolock();
int stop();
NdbRecAttr *getValue(const char *colName, char *aValue, int n);
NdbRecAttr *getValue(const NdbColumnImpl *, char *aValue, int n);
NdbBlob *getBlobHandle(const char *colName, int n);
NdbBlob *getBlobHandle(const NdbColumnImpl *, int n);
int readBlobParts(char* buf, NdbBlob* blob, Uint32 part, Uint32 count);
int receive_event();
Uint64 getGCI();
Uint64 getLatestGCI();
@ -199,6 +222,10 @@ public:
NdbRecAttr *theFirstDataAttrs[2];
NdbRecAttr *theCurrentDataAttrs[2];
NdbBlob* theBlobList;
NdbEventOperationImpl* theBlobOpList; // in main op, list of blob ops
NdbEventOperationImpl* theMainOp; // in blob op, the main op
NdbEventOperation::State m_state; /* note connection to mi_type */
Uint32 mi_type; /* should be == 0 if m_state != EO_EXECUTING
* else same as in EventImpl
@ -275,6 +302,11 @@ public:
int merge_data(const SubTableData * const sdata,
LinearSectionPtr ptr[3],
EventBufData* data);
int get_main_data(Gci_container* bucket,
EventBufData_hash::Pos& hpos,
EventBufData* blob_data);
void add_blob_data(EventBufData* main_data,
EventBufData* blob_data);
void free_list(EventBufData_list &list);

View file

@ -80,6 +80,7 @@ static const char* empty_string = "";
* 1300 - BACKUP
* 1400 - SUMA
* 1500 - LGMAN
* 1600 - TSMAN
* 4000 - API
* 4100 - ""
* 4200 - ""
@ -197,7 +198,8 @@ ErrorBundle ErrorCodes[] = {
{ 903, HA_ERR_INDEX_FILE_FULL, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
{ 904, HA_ERR_INDEX_FILE_FULL, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
{ 905, DMEC, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
{ 1601, HA_ERR_RECORD_FILE_FULL, IS, "Out extents, tablespace full" },
/**
* TimeoutExpired
*/

View file

@ -21,14 +21,7 @@
#include <my_sys.h>
#include <ndb_version.h>
#if NDB_VERSION_D < MAKE_VERSION(5, 1, 0)
#define version50
#else
#undef version50
#endif
// until rbr in 5.1
#undef version51rbr
// version >= 5.1 required
#if !defined(min) || !defined(max)
#define min(x, y) ((x) < (y) ? (x) : (y))
@ -57,11 +50,11 @@
* There are other -no-* options, each added to isolate a specific bug.
*
* There are 5 ways (ignoring NUL operand) to compose 2 ops:
* 5.0 bugs 5.1 bugs
*
* INS o DEL = NUL
* INS o UPD = INS type=INS
* DEL o INS = UPD type=INS type=INS
* UPD o DEL = DEL no event
* INS o UPD = INS
* DEL o INS = UPD
* UPD o DEL = DEL
* UPD o UPD = UPD
*/
@ -73,17 +66,19 @@ struct Opts {
uint maxpk;
my_bool no_blobs;
my_bool no_implicit_nulls;
my_bool no_missing_update;
my_bool no_multiops;
my_bool no_nulls;
my_bool one_blob;
const char* opstring;
uint seed;
my_bool separate_events;
uint tweak; // whatever's useful
my_bool use_table;
};
static Opts g_opts;
static const uint g_maxpk = 100;
static const uint g_maxpk = 1000;
static const uint g_maxopstringpart = 100;
static const char* g_opstringpart[g_maxopstringpart];
static uint g_opstringparts = 0;
@ -208,7 +203,9 @@ struct Col {
uint length;
uint size;
bool isblob() const {
return type == NdbDictionary::Column::Text;
return
type == NdbDictionary::Column::Text ||
type == NdbDictionary::Column::Blob;
}
};
@ -218,19 +215,21 @@ static Col g_col[] = {
{ 2, "seq", NdbDictionary::Column::Unsigned, false, false, 1, 4 },
{ 3, "cc1", NdbDictionary::Column::Char, false, true, g_charlen, g_charlen },
{ 4, "tx1", NdbDictionary::Column::Text, false, true, 0, 0 },
{ 5, "tx2", NdbDictionary::Column::Text, false, true, 0, 0 }
{ 5, "tx2", NdbDictionary::Column::Text, false, true, 0, 0 },
{ 6, "bl1", NdbDictionary::Column::Blob, false, true, 0, 0 } // tinyblob
};
static const uint g_maxcol = sizeof(g_col)/sizeof(g_col[0]);
static const uint g_blobcols = 3;
static uint
ncol()
{
uint n = g_maxcol;
if (g_opts.no_blobs)
n -= 2;
n -= g_blobcols;
else if (g_opts.one_blob)
n -= 1;
n -= (g_blobcols - 1);
return n;
}
@ -283,6 +282,11 @@ createtable()
col.setStripeSize(g_blobstripesize);
col.setCharset(cs);
break;
case NdbDictionary::Column::Blob:
col.setInlineSize(g_blobinlinesize);
col.setPartSize(0);
col.setStripeSize(0);
break;
default:
assert(false);
break;
@ -337,6 +341,7 @@ struct Data {
char cc1[g_charlen + 1];
Txt tx1;
Txt tx2;
Txt bl1;
Ptr ptr[g_maxcol];
int ind[g_maxcol]; // -1 = no data, 1 = NULL, 0 = not NULL
uint noop; // bit: omit in NdbOperation (implicit NULL INS or no UPD)
@ -347,14 +352,15 @@ struct Data {
memset(pk2, 0, sizeof(pk2));
seq = 0;
memset(cc1, 0, sizeof(cc1));
tx1.val = tx2.val = 0;
tx1.len = tx2.len = 0;
tx1.val = tx2.val = bl1.val = 0;
tx1.len = tx2.len = bl1.len = 0;
ptr[0].u32 = &pk1;
ptr[1].ch = pk2;
ptr[2].u32 = &seq;
ptr[3].ch = cc1;
ptr[4].txt = &tx1;
ptr[5].txt = &tx2;
ptr[6].txt = &bl1;
for (i = 0; i < g_maxcol; i++)
ind[i] = -1;
noop = 0;
@ -363,6 +369,7 @@ struct Data {
void free() {
delete [] tx1.val;
delete [] tx2.val;
delete [] bl1.val;
init();
}
};
@ -384,6 +391,7 @@ cmpcol(const Col& c, const Data& d1, const Data& d2)
return 1;
break;
case NdbDictionary::Column::Text:
case NdbDictionary::Column::Blob:
{
const Data::Txt& t1 = *d1.ptr[i].txt;
const Data::Txt& t2 = *d2.ptr[i].txt;
@ -434,6 +442,7 @@ operator<<(NdbOut& out, const Data& d)
}
break;
case NdbDictionary::Column::Text:
case NdbDictionary::Column::Blob:
{
Data::Txt& t = *d.ptr[i].txt;
bool first = true;
@ -712,6 +721,20 @@ checkop(const Op* op, Uint32& pk1)
if (! c.nullable) {
chkrc(ind0 <= 0 && ind1 <= 0);
}
if (c.isblob()) {
// blob values must be from allowed chars
int j;
for (j = 0; j < 2; j++) {
const Data& d = op->data[j];
if (d.ind[i] == 0) {
const Data::Txt& t = *d.ptr[i].txt;
int k;
for (k = 0; k < t.len; k++) {
chkrc(strchr(g_charval, t.val[k]) != 0);
}
}
}
}
}
return 0;
}
@ -849,9 +872,8 @@ createevent()
const Col& c = g_col[i];
evt.addEventColumn(c.name);
}
#ifdef version51rbr
evt.setReport(NdbDictionary::Event::ER_UPDATED);
evt.mergeEvents(! g_opts.separate_events);
#endif
if (g_dic->getEvent(evt.getName()) != 0)
chkdb(g_dic->dropEvent(evt.getName()) == 0);
chkdb(g_dic->createEvent(evt) == 0);
@ -875,14 +897,8 @@ static int
createeventop()
{
ll1("createeventop");
#ifdef version50
uint bsz = 10 * g_opts.maxops;
chkdb((g_evt_op = g_ndb->createEventOperation(g_evt->getName(), bsz)) != 0);
#else
chkdb((g_evt_op = g_ndb->createEventOperation(g_evt->getName())) != 0);
// available in gci merge changeset
g_evt_op->mergeEvents(! g_opts.separate_events); // not yet inherited
#endif
uint i;
for (i = 0; i < ncol(); i++) {
const Col& c = g_col[i];
@ -891,10 +907,8 @@ createeventop()
chkdb((g_ev_ra[0][i] = g_evt_op->getValue(c.name, (char*)d[0].ptr[i].v)) != 0);
chkdb((g_ev_ra[1][i] = g_evt_op->getPreValue(c.name, (char*)d[1].ptr[i].v)) != 0);
} else {
#ifdef version51rbr
chkdb((g_ev_bh[0][i] = g_evt_op->getBlobHandle(c.name)) != 0);
chkdb((g_ev_bh[1][i] = g_evt_op->getPreBlobHandle(c.name)) != 0);
#endif
}
}
return 0;
@ -909,10 +923,10 @@ dropeventop()
return 0;
}
// wait for event to be installed and for GCIs to pass
static int
waitgci() // wait for event to be installed and for at least 1 GCI to pass
waitgci(uint ngci)
{
const uint ngci = 3;
ll1("waitgci " << ngci);
Uint32 gci[2];
uint i = 0;
@ -976,7 +990,6 @@ scantab()
if (! c.isblob()) {
ind = ra[i]->isNULL();
} else {
#ifdef version51rbr
int ret;
ret = bh[i]->getDefined(ind);
assert(ret == 0);
@ -992,8 +1005,10 @@ scantab()
Uint32 len = t.len;
ret = bh[i]->readData(t.val, len);
assert(ret == 0 && len == t.len);
// to see the data, have to execute...
chkdb(g_con->execute(NoCommit) == 0);
assert(memchr(t.val, 'X', t.len) == 0);
}
#endif
}
assert(ind >= 0);
d0.ind[i] = ind;
@ -1042,7 +1057,7 @@ makedata(const Col& c, Data& d, Uint32 pk1, Op::Type t)
} else if (t == Op::INS && ! g_opts.no_implicit_nulls && c.nullable && urandom(10, 100)) {
d.noop |= (1 << i);
d.ind[i] = 1; // implicit NULL value is known
} else if (t == Op::UPD && urandom(10, 100)) {
} else if (t == Op::UPD && ! g_opts.no_missing_update && urandom(10, 100)) {
d.noop |= (1 << i);
d.ind[i] = -1; // fixed up in caller
} else if (! g_opts.no_nulls && c.nullable && urandom(10, 100)) {
@ -1060,6 +1075,8 @@ makedata(const Col& c, Data& d, Uint32 pk1, Op::Type t)
{
char* p = d.ptr[i].ch;
uint u = urandom(g_charlen);
if (u == 0)
u = urandom(g_charlen); // 2x bias for non-empty
uint j;
for (j = 0; j < g_charlen; j++) {
uint v = urandom(strlen(g_charval));
@ -1068,12 +1085,23 @@ makedata(const Col& c, Data& d, Uint32 pk1, Op::Type t)
}
break;
case NdbDictionary::Column::Text:
case NdbDictionary::Column::Blob:
{
const bool tinyblob = (c.type == NdbDictionary::Column::Blob);
Data::Txt& t = *d.ptr[i].txt;
uint u = urandom(g_maxblobsize);
delete [] t.val;
t.val = 0;
if (g_opts.tweak & 1) {
uint u = g_blobinlinesize + (tinyblob ? 0 : g_blobpartsize);
uint v = (g_opts.tweak & 2) ? 0 : urandom(strlen(g_charval));
t.val = new char [u];
t.len = u;
memset(t.val, g_charval[v], u);
break;
}
uint u = urandom(tinyblob ? g_blobinlinesize : g_maxblobsize);
u = urandom(u); // 4x bias for smaller blobs
u = urandom(u);
delete [] t.val;
t.val = new char [u];
t.len = u;
uint j = 0;
@ -1134,9 +1162,15 @@ makeops()
{
ll1("makeops");
Uint32 pk1 = 0;
while (g_usedops < g_opts.maxops && pk1 < g_opts.maxpk) {
if (g_opts.opstring == 0)
while (1) {
if (g_opts.opstring == 0) {
if (g_usedops >= g_opts.maxops) // use up ops
break;
pk1 = urandom(g_opts.maxpk);
} else {
if (pk1 >= g_opts.maxpk) // use up pks
break;
}
ll2("makeops: pk1=" << pk1);
// total op on the pk so far
// optype either NUL=initial/deleted or INS=created
@ -1465,7 +1499,7 @@ matchevent(Op* ev)
}
if (tmpok) {
ok = gci_op->match = true;
ll2("===: match");
ll2("match");
}
}
pos++;
@ -1555,7 +1589,6 @@ geteventdata()
NdbRecAttr* ra = g_ev_ra[j][i];
ind = ra->isNULL();
} else {
#ifdef version51rbr
NdbBlob* bh = g_ev_bh[j][i];
ret = bh->getDefined(ind);
assert(ret == 0);
@ -1572,7 +1605,6 @@ geteventdata()
ret = bh->readData(t.val, len);
assert(ret == 0 && len == t.len);
}
#endif
}
d[j].ind[i] = ind;
}
@ -1585,38 +1617,22 @@ runevents()
ll1("runevents");
uint mspoll = 1000;
uint npoll = 6; // strangely long delay
ll1("poll " << npoll);
while (npoll != 0) {
npoll--;
int ret;
ll1("poll");
ret = g_ndb->pollEvents(mspoll);
if (ret <= 0)
continue;
while (1) {
g_rec_ev->init(Op::EV);
#ifdef version50
int overrun = g_opts.maxops;
chkdb((ret = g_evt_op->next(&overrun)) >= 0);
chkrc(overrun == 0);
if (ret == 0)
break;
#else
NdbEventOperation* tmp_op = g_ndb->nextEvent();
if (tmp_op == 0)
break;
reqrc(g_evt_op == tmp_op);
#endif
chkrc(seteventtype(g_rec_ev, g_evt_op->getEventType()) == 0);
geteventdata();
g_rec_ev->gci = g_evt_op->getGCI();
#ifdef version50
// fix to match 5.1
if (g_rec_ev->type == Op::UPD) {
Uint32 pk1 = g_rec_ev->data[0].pk1;
makedata(getcol("pk1"), g_rec_ev->data[1], pk1, Op::UPD);
makedata(getcol("pk2"), g_rec_ev->data[1], pk1, Op::UPD);
}
#endif
// get indicators and blob value
ll2("runevents: EVT: " << *g_rec_ev);
// check basic sanity
@ -1667,7 +1683,7 @@ runtest()
chkrc(createtable() == 0);
chkrc(createevent() == 0);
for (g_loop = 0; g_opts.loop == 0 || g_loop < g_opts.loop; g_loop++) {
ll0("loop " << g_loop);
ll0("=== loop " << g_loop << " ===");
setseed(g_loop);
resetmem();
chkrc(scantab() == 0); // alternative: save tot_op for loop > 0
@ -1675,7 +1691,7 @@ runtest()
g_rec_ev = getop(Op::EV);
chkrc(createeventop() == 0);
chkdb(g_evt_op->execute() == 0);
chkrc(waitgci() == 0);
chkrc(waitgci(3) == 0);
chkrc(runops() == 0);
if (! g_opts.separate_events)
chkrc(mergeops() == 0);
@ -1685,6 +1701,8 @@ runtest()
chkrc(matchevents() == 0);
chkrc(matchops() == 0);
chkrc(dropeventop() == 0);
// time erases everything..
chkrc(waitgci(1) == 0);
}
chkrc(dropevent() == 0);
chkrc(droptable() == 0);
@ -1703,41 +1721,48 @@ my_long_options[] =
{ "loglevel", 1002, "Logging level in this program (default 0)",
(gptr*)&g_opts.loglevel, (gptr*)&g_opts.loglevel, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "loop", 1003, "Number of test loops (default 2, 0=forever)",
{ "loop", 1003, "Number of test loops (default 3, 0=forever)",
(gptr*)&g_opts.loop, (gptr*)&g_opts.loop, 0,
GET_INT, REQUIRED_ARG, 2, 0, 0, 0, 0, 0 },
GET_INT, REQUIRED_ARG, 3, 0, 0, 0, 0, 0 },
{ "maxops", 1004, "Approx number of PK operations (default 1000)",
(gptr*)&g_opts.maxops, (gptr*)&g_opts.maxops, 0,
GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 },
{ "maxpk", 1005, "Number of different PK values (default 10)",
(gptr*)&g_opts.maxpk, (gptr*)&g_opts.maxpk, 0,
GET_UINT, REQUIRED_ARG, 10, 1, g_maxpk, 0, 0, 0 },
GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 },
{ "no-blobs", 1006, "Omit blob attributes (5.0: true)",
(gptr*)&g_opts.no_blobs, (gptr*)&g_opts.no_blobs, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-implicit-nulls", 1007, "Insert must include NULL values explicitly",
{ "no-implicit-nulls", 1007, "Insert must include all attrs"
" i.e. no implicit NULLs",
(gptr*)&g_opts.no_implicit_nulls, (gptr*)&g_opts.no_implicit_nulls, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-multiops", 1008, "Allow only 1 operation per commit",
{ "no-missing-update", 1008, "Update must include all non-PK attrs",
(gptr*)&g_opts.no_missing_update, (gptr*)&g_opts.no_missing_update, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-multiops", 1009, "Allow only 1 operation per commit",
(gptr*)&g_opts.no_multiops, (gptr*)&g_opts.no_multiops, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-nulls", 1009, "Create no NULL values",
{ "no-nulls", 1010, "Create no NULL values",
(gptr*)&g_opts.no_nulls, (gptr*)&g_opts.no_nulls, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "one-blob", 1010, "Only one blob attribute (defautt 2)",
{ "one-blob", 1011, "Only one blob attribute (default 2)",
(gptr*)&g_opts.one_blob, (gptr*)&g_opts.one_blob, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "opstring", 1011, "Operations to run e.g. idiucdc (c is commit) or"
{ "opstring", 1012, "Operations to run e.g. idiucdc (c is commit) or"
" iuuc:uudc (the : separates loops)",
(gptr*)&g_opts.opstring, (gptr*)&g_opts.opstring, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "seed", 1012, "Random seed (0=loop number, default -1=random)",
{ "seed", 1013, "Random seed (0=loop number, default -1=random)",
(gptr*)&g_opts.seed, (gptr*)&g_opts.seed, 0,
GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 },
{ "separate-events", 1013, "Do not combine events per GCI (5.0: true)",
{ "separate-events", 1014, "Do not combine events per GCI (5.0: true)",
(gptr*)&g_opts.separate_events, (gptr*)&g_opts.separate_events, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "use-table", 1014, "Use existing table 'tem1'",
{ "tweak", 1015, "Whatever the source says",
(gptr*)&g_opts.tweak, (gptr*)&g_opts.tweak, 0,
GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "use-table", 1016, "Use existing table 'tem1'",
(gptr*)&g_opts.use_table, (gptr*)&g_opts.use_table, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0,
@ -1754,9 +1779,10 @@ usage()
static int
checkopts()
{
#ifdef version50
g_opts.separate_events = true;
#endif
if (g_opts.maxpk > g_maxpk) {
ll0("setting maxpk to " << g_maxpk);
g_opts.maxpk = g_maxpk;
}
if (g_opts.separate_events) {
g_opts.no_blobs = true;
}