WL #2747: Fix such that backup and restore works for user defined

partitioned tables in NDB


include/my_sys.h:
  Move packfrm and unpackfrm to mysys
mysql-test/r/ndb_restore.result:
  New test cases
mysql-test/t/ndb_restore.test:
  New test cases
mysys/my_compress.c:
  Moved packfrm and unpackfrm to mysys
sql/ha_ndbcluster.cc:
  Set value of partition function in hidden field for user defined
  partitioning in NDB to handle restore and later on-line reorganize
  of partitions
  To save space value of those functions are limited to 32 bits
sql/ha_partition.cc:
  Use new get_partition_id interface
sql/handler.h:
  Use new get_partition_id interface
sql/mysql_priv.h:
  Moved to mysys
sql/mysqld.cc:
  Minor
sql/opt_range.cc:
  New get_partition_id interface
sql/sql_partition.cc:
  New get_partition_id interface
  Fix error checks of specification of engines in ALTER TABLE
  Moved packfrm and unpackfrm to mysys
sql/sql_table.cc:
  Fixed debug printouts
storage/ndb/include/kernel/ndb_limits.h:
  New constant
storage/ndb/include/kernel/signaldata/DictTabInfo.hpp:
  New table description item
storage/ndb/include/ndb_version.h.in:
  New version specific constant
storage/ndb/include/ndbapi/NdbDictionary.hpp:
  New item in table descriptions
storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp:
  New item in table descriptions
storage/ndb/src/kernel/blocks/backup/Backup.cpp:
  Write fragment id in backup's log entry
storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp:
  Write fragment id in backup's log entry
storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
  New item in table description
storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp:
  New item in table description
storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
  Moved constant
storage/ndb/src/ndbapi/NdbDictionary.cpp:
  New item in table description
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
  New item in table description
storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp:
  New item in table description
storage/ndb/tools/Makefile.am:
  Compress library needed for ndb_restore
storage/ndb/tools/restore/Restore.cpp:
  Handle fragment id and also handle backups from older versions
storage/ndb/tools/restore/Restore.hpp:
  Use fragment id
storage/ndb/tools/restore/consumer.hpp:
  Use fragment id
storage/ndb/tools/restore/consumer_printer.cpp:
  Use fragment id
storage/ndb/tools/restore/consumer_printer.hpp:
  Use fragment id
storage/ndb/tools/restore/consumer_restore.cpp:
  Code to map node groups if new cluster has different set of
  node groups from original cluster
  Very simple search and replace parser of partition syntax in frm file
  Fix settings of partition id properly using fragment id and hidden
  field in tables
storage/ndb/tools/restore/consumer_restore.hpp:
  Changed function headers and new one for mapping node groups
storage/ndb/tools/restore/consumer_restorem.cpp:
  Use fragment id
storage/ndb/tools/restore/restore_main.cpp:
  New parameter to set node group map, parser for this parameter
This commit is contained in:
unknown 2006-01-17 09:25:12 +01:00
commit 6f83ed91f3
36 changed files with 1616 additions and 307 deletions

View file

@ -806,6 +806,9 @@ extern void print_defaults(const char *conf_file, const char **groups);
extern my_bool my_compress(byte *, ulong *, ulong *);
extern my_bool my_uncompress(byte *, ulong *, ulong *);
extern byte *my_compress_alloc(const byte *packet, ulong *len, ulong *complen);
extern int packfrm(const void *, uint, const void **, uint *);
extern int unpackfrm(const void **, uint *, const void *);
extern ha_checksum my_checksum(ha_checksum crc, const byte *mem, uint count);
extern uint my_bit_log2(ulong value);
extern uint my_count_bits(ulonglong v);

View file

@ -225,6 +225,223 @@ from (select * from t9 union
select * from t9_c) a;
count(*)
3
ALTER TABLE t1_c
PARTITION BY RANGE (`capgoaledatta`)
(PARTITION p0 VALUES LESS THAN MAXVALUE);
ALTER TABLE t2_c
PARTITION BY LIST(`capgotod`)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6));
ALTER TABLE t3_c
PARTITION BY HASH (`CapGoaledatta`);
ALTER TABLE t5_c
PARTITION BY HASH (`capfa`)
PARTITIONS 4;
ALTER TABLE t6_c
PARTITION BY LINEAR HASH (`relatta`)
PARTITIONS 4;
ALTER TABLE t7_c
PARTITION BY LINEAR KEY (`dardtestard`);
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
select count(*) from t1;
count(*)
5
select count(*) from t1_c;
count(*)
5
select count(*)
from (select * from t1 union
select * from t1_c) a;
count(*)
5
select count(*) from t2;
count(*)
6
select count(*) from t2_c;
count(*)
6
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
6
select count(*) from t3;
count(*)
4
select count(*) from t3_c;
count(*)
4
select count(*)
from (select * from t3 union
select * from t3_c) a;
count(*)
4
select count(*) from t4;
count(*)
22
select count(*) from t4_c;
count(*)
22
select count(*)
from (select * from t4 union
select * from t4_c) a;
count(*)
22
select count(*) from t5;
count(*)
3
select count(*) from t5_c;
count(*)
3
select count(*)
from (select * from t5 union
select * from t5_c) a;
count(*)
3
select count(*) from t6;
count(*)
8
select count(*) from t6_c;
count(*)
8
select count(*)
from (select * from t6 union
select * from t6_c) a;
count(*)
8
select count(*) from t7;
count(*)
5
select count(*) from t7_c;
count(*)
5
select count(*)
from (select * from t7 union
select * from t7_c) a;
count(*)
5
select count(*) from t8;
count(*)
3
select count(*) from t8_c;
count(*)
3
select count(*)
from (select * from t8 union
select * from t8_c) a;
count(*)
3
select count(*) from t9;
count(*)
3
select count(*) from t9_c;
count(*)
3
select count(*)
from (select * from t9 union
select * from t9_c) a;
count(*)
3
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
select count(*) from t1;
count(*)
5
select count(*) from t1_c;
count(*)
5
select count(*)
from (select * from t1 union
select * from t1_c) a;
count(*)
5
select count(*) from t2;
count(*)
6
select count(*) from t2_c;
count(*)
6
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
6
select count(*) from t3;
count(*)
4
select count(*) from t3_c;
count(*)
4
select count(*)
from (select * from t3 union
select * from t3_c) a;
count(*)
4
select count(*) from t4;
count(*)
22
select count(*) from t4_c;
count(*)
22
select count(*)
from (select * from t4 union
select * from t4_c) a;
count(*)
22
select count(*) from t5;
count(*)
3
select count(*) from t5_c;
count(*)
3
select count(*)
from (select * from t5 union
select * from t5_c) a;
count(*)
3
select count(*) from t6;
count(*)
8
select count(*) from t6_c;
count(*)
8
select count(*)
from (select * from t6 union
select * from t6_c) a;
count(*)
8
select count(*) from t7;
count(*)
5
select count(*) from t7_c;
count(*)
5
select count(*)
from (select * from t7 union
select * from t7_c) a;
count(*)
5
select count(*) from t8;
count(*)
3
select count(*) from t8_c;
count(*)
3
select count(*)
from (select * from t8 union
select * from t8_c) a;
count(*)
3
select count(*) from t9;
count(*)
3
select count(*) from t9_c;
count(*)
3
select count(*)
from (select * from t9 union
select * from t9_c) a;
count(*)
3
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
520093696,1
520093696,2

View file

@ -205,6 +205,152 @@ select count(*)
from (select * from t9 union
select * from t9_c) a;
#
# Try Partitioned tables as well
#
ALTER TABLE t1_c
PARTITION BY RANGE (`capgoaledatta`)
(PARTITION p0 VALUES LESS THAN MAXVALUE);
ALTER TABLE t2_c
PARTITION BY LIST(`capgotod`)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6));
ALTER TABLE t3_c
PARTITION BY HASH (`CapGoaledatta`);
ALTER TABLE t5_c
PARTITION BY HASH (`capfa`)
PARTITIONS 4;
ALTER TABLE t6_c
PARTITION BY LINEAR HASH (`relatta`)
PARTITIONS 4;
ALTER TABLE t7_c
PARTITION BY LINEAR KEY (`dardtestard`);
--exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
select count(*) from t1;
select count(*) from t1_c;
select count(*)
from (select * from t1 union
select * from t1_c) a;
select count(*) from t2;
select count(*) from t2_c;
select count(*)
from (select * from t2 union
select * from t2_c) a;
select count(*) from t3;
select count(*) from t3_c;
select count(*)
from (select * from t3 union
select * from t3_c) a;
select count(*) from t4;
select count(*) from t4_c;
select count(*)
from (select * from t4 union
select * from t4_c) a;
select count(*) from t5;
select count(*) from t5_c;
select count(*)
from (select * from t5 union
select * from t5_c) a;
select count(*) from t6;
select count(*) from t6_c;
select count(*)
from (select * from t6 union
select * from t6_c) a;
select count(*) from t7;
select count(*) from t7_c;
select count(*)
from (select * from t7 union
select * from t7_c) a;
select count(*) from t8;
select count(*) from t8_c;
select count(*)
from (select * from t8 union
select * from t8_c) a;
select count(*) from t9;
select count(*) from t9_c;
select count(*)
from (select * from t9 union
select * from t9_c) a;
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
select count(*) from t1;
select count(*) from t1_c;
select count(*)
from (select * from t1 union
select * from t1_c) a;
select count(*) from t2;
select count(*) from t2_c;
select count(*)
from (select * from t2 union
select * from t2_c) a;
select count(*) from t3;
select count(*) from t3_c;
select count(*)
from (select * from t3 union
select * from t3_c) a;
select count(*) from t4;
select count(*) from t4_c;
select count(*)
from (select * from t4 union
select * from t4_c) a;
select count(*) from t5;
select count(*) from t5_c;
select count(*)
from (select * from t5 union
select * from t5_c) a;
select count(*) from t6;
select count(*) from t6_c;
select count(*)
from (select * from t6 union
select * from t6_c) a;
select count(*) from t7;
select count(*) from t7_c;
select count(*)
from (select * from t7 union
select * from t7_c) a;
select count(*) from t8;
select count(*) from t8_c;
select count(*)
from (select * from t8 union
select * from t8_c) a;
select count(*) from t9;
select count(*) from t9_c;
select count(*)
from (select * from t9 union
select * from t9_c) a;
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
--error 134
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
#
# Cleanup
#

View file

@ -95,4 +95,132 @@ my_bool my_uncompress (byte *packet, ulong *len, ulong *complen)
}
DBUG_RETURN(0);
}
/*
Internal representation of the frm blob
*/
struct frm_blob_header
{
uint ver; /* Version of header */
uint orglen; /* Original length of compressed data */
uint complen; /* Compressed length of data, 0=uncompressed */
};
struct frm_blob_struct
{
struct frm_blob_header head;
char data[1];
};
/*
packfrm is a method used to compress the frm file for storage in a
handler. This method was developed for the NDB handler and has been moved
here to serve also other uses.
SYNOPSIS
packfrm()
data Data reference to frm file data
len Length of frm file data
out:pack_data Reference to the pointer to the packed frm data
out:pack_len Length of packed frm file data
RETURN VALUES
0 Success
>0 Failure
*/
int packfrm(const void *data, uint len,
const void **pack_data, uint *pack_len)
{
int error;
ulong org_len, comp_len;
uint blob_len;
struct frm_blob_struct *blob;
DBUG_ENTER("packfrm");
DBUG_PRINT("enter", ("data: %x, len: %d", data, len));
error= 1;
org_len= len;
if (my_compress((byte*)data, &org_len, &comp_len))
goto err;
DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len));
DBUG_DUMP("compressed", (char*)data, org_len);
error= 2;
blob_len= sizeof(struct frm_blob_header)+org_len;
if (!(blob= (struct frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
goto err;
// Store compressed blob in machine independent format
int4store((char*)(&blob->head.ver), 1);
int4store((char*)(&blob->head.orglen), comp_len);
int4store((char*)(&blob->head.complen), org_len);
// Copy frm data into blob, already in machine independent format
memcpy(blob->data, data, org_len);
*pack_data= blob;
*pack_len= blob_len;
error= 0;
DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len));
err:
DBUG_RETURN(error);
}
/*
unpackfrm is a method used to decompress the frm file received from a
handler. This method was developed for the NDB handler and has been moved
here to serve also other uses for other clustered storage engines.
SYNOPSIS
unpackfrm()
pack_data Data reference to packed frm file data
out:unpack_data Reference to the pointer to the unpacked frm data
out:unpack_len Length of unpacked frm file data
RETURN VALUES¨
0 Success
>0 Failure
*/
int unpackfrm(const void **unpack_data, uint *unpack_len,
const void *pack_data)
{
const struct frm_blob_struct *blob= (struct frm_blob_struct*)pack_data;
byte *data;
ulong complen, orglen, ver;
DBUG_ENTER("unpackfrm");
DBUG_PRINT("enter", ("pack_data: %x", pack_data));
complen= uint4korr((char*)&blob->head.complen);
orglen= uint4korr((char*)&blob->head.orglen);
ver= uint4korr((char*)&blob->head.ver);
DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d",
ver,complen,orglen));
DBUG_DUMP("blob->data", (char*) blob->data, complen);
if (ver != 1)
DBUG_RETURN(1);
if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
DBUG_RETURN(2);
memcpy(data, blob->data, complen);
if (my_uncompress(data, &complen, &orglen))
{
my_free((char*)data, MYF(0));
DBUG_RETURN(3);
}
*unpack_data= data;
*unpack_len= complen;
DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len));
DBUG_RETURN(0);
}
#endif /* HAVE_COMPRESS */

View file

@ -1688,7 +1688,9 @@ int ha_ndbcluster::peek_row(const byte *record)
{
uint32 part_id;
int error;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id)))
longlong func_value;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value)))
{
DBUG_RETURN(error);
}
@ -2146,10 +2148,10 @@ int ha_ndbcluster::write_row(byte *record)
NdbOperation *op;
int res;
THD *thd= current_thd;
longlong func_value= 0;
DBUG_ENTER("ha_ndbcluster::write_row");
m_write_op= TRUE;
DBUG_ENTER("write_row");
if (!m_use_write && m_ignore_dup_key && table_share->primary_key != MAX_KEY)
{
int peek_res= peek_row(record);
@ -2179,7 +2181,8 @@ int ha_ndbcluster::write_row(byte *record)
{
uint32 part_id;
int error;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id)))
if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value)))
{
DBUG_RETURN(error);
}
@ -2235,6 +2238,22 @@ int ha_ndbcluster::write_row(byte *record)
}
}
if (m_use_partition_function)
{
/*
We need to set the value of the partition function value in
NDB since the NDB kernel doesn't have easy access to the function
to calculate the value.
*/
if (func_value >= INT_MAX32)
func_value= INT_MAX32;
uint32 part_func_value= (uint32)func_value;
uint no_fields= table_share->fields;
if (table_share->primary_key == MAX_KEY)
no_fields++;
op->setValue(no_fields, part_func_value);
}
m_rows_changed++;
/*
@ -2346,6 +2365,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
uint i;
uint32 old_part_id= 0, new_part_id= 0;
int error;
longlong func_value;
DBUG_ENTER("update_row");
m_write_op= TRUE;
@ -2358,7 +2378,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (m_use_partition_function &&
(error= get_parts_for_update(old_data, new_data, table->record[0],
m_part_info, &old_part_id, &new_part_id)))
m_part_info, &old_part_id, &new_part_id,
&func_value)))
{
DBUG_RETURN(error);
}
@ -2474,6 +2495,16 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
ERR_RETURN(op->getNdbError());
}
if (m_use_partition_function)
{
if (func_value >= INT_MAX32)
func_value= INT_MAX32;
uint32 part_func_value= (uint32)func_value;
uint no_fields= table_share->fields;
if (table_share->primary_key == MAX_KEY)
no_fields++;
op->setValue(no_fields, part_func_value);
}
// Execute update operation
if (!cursor && execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
@ -8871,11 +8902,16 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
for (i= 0; i < part_info->no_parts; i++)
{
longlong range_val= part_info->range_int_array[i];
if (range_val < INT_MIN32 || range_val > INT_MAX32)
if (range_val < INT_MIN32 || range_val >= INT_MAX32)
{
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
error= 1;
goto error;
if ((i != part_info->no_parts - 1) ||
(range_val != LONGLONG_MAX))
{
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
error= 1;
goto error;
}
range_val= INT_MAX32;
}
range_data[i]= (int32)range_val;
}
@ -8966,18 +9002,37 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
col->setPartitionKey(TRUE);
}
}
else if (part_info->part_type == RANGE_PARTITION)
else
{
if ((error= set_range_data((void*)tab, part_info)))
/*
Create a shadow field for those tables that have user defined
partitioning. This field stores the value of the partition
function such that NDB can handle reorganisations of the data
even when the MySQL Server isn't available to assist with
calculation of the partition function value.
*/
NDBCOL col;
DBUG_PRINT("info", ("Generating partition func value field"));
col.setName("$PART_FUNC_VALUE");
col.setType(NdbDictionary::Column::Int);
col.setLength(1);
col.setNullable(FALSE);
col.setPrimaryKey(FALSE);
col.setAutoIncrement(FALSE);
tab->addColumn(col);
if (part_info->part_type == RANGE_PARTITION)
{
DBUG_RETURN(error);
if ((error= set_range_data((void*)tab, part_info)))
{
DBUG_RETURN(error);
}
}
}
else if (part_info->part_type == LIST_PARTITION)
{
if ((error= set_list_data((void*)tab, part_info)))
else if (part_info->part_type == LIST_PARTITION)
{
DBUG_RETURN(error);
if ((error= set_list_data((void*)tab, part_info)))
{
DBUG_RETURN(error);
}
}
}
tab->setFragmentType(ftype);
@ -9012,6 +9067,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
first= FALSE;
} while (++i < part_info->no_parts);
tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions);
tab->setLinearFlag(part_info->linear_hash_ind);
tab->setMaxRows(table->s->max_rows);
tab->setTablespaceNames(ts_names, fd_index*sizeof(char*));
tab->setFragmentCount(fd_index);

View file

@ -1543,6 +1543,7 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
{
uint reorg_part= 0;
int result= 0;
longlong func_value;
DBUG_ENTER("ha_partition::copy_partitions");
while (reorg_part < m_reorged_parts)
@ -1568,7 +1569,8 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
break;
}
/* Found record to insert into new handler */
if (m_part_info->get_partition_id(m_part_info, &new_part))
if (m_part_info->get_partition_id(m_part_info, &new_part,
&func_value))
{
/*
This record is in the original table but will not be in the new
@ -2593,6 +2595,7 @@ int ha_partition::write_row(byte * buf)
{
uint32 part_id;
int error;
longlong func_value;
#ifdef NOT_NEEDED
byte *rec0= m_rec0;
#endif
@ -2602,12 +2605,14 @@ int ha_partition::write_row(byte * buf)
#ifdef NOT_NEEDED
if (likely(buf == rec0))
#endif
error= m_part_info->get_partition_id(m_part_info, &part_id);
error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value);
#ifdef NOT_NEEDED
else
{
set_field_ptr(m_part_field_array, buf, rec0);
error= m_part_info->get_partition_id(m_part_info, &part_id);
error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value);
set_field_ptr(m_part_field_array, rec0, buf);
}
#endif
@ -2654,10 +2659,12 @@ int ha_partition::update_row(const byte *old_data, byte *new_data)
{
uint32 new_part_id, old_part_id;
int error;
longlong func_value;
DBUG_ENTER("ha_partition::update_row");
if ((error= get_parts_for_update(old_data, new_data, table->record[0],
m_part_info, &old_part_id, &new_part_id)))
m_part_info, &old_part_id, &new_part_id,
&func_value)))
{
DBUG_RETURN(error);
}

View file

@ -695,7 +695,8 @@ typedef struct {
class partition_info;
typedef int (*get_part_id_func)(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
class partition_info :public Sql_alloc {
@ -957,7 +958,8 @@ bool set_up_defaults_for_partitioning(partition_info *part_info,
handler *get_ha_partition(partition_info *part_info);
int get_parts_for_update(const byte *old_data, byte *new_data,
const byte *rec0, partition_info *part_info,
uint32 *old_part_id, uint32 *new_part_id);
uint32 *old_part_id, uint32 *new_part_id,
longlong *func_value);
int get_part_for_delete(const byte *buf, const byte *rec0,
partition_info *part_info, uint32 *part_id);
bool check_partition_info(partition_info *part_info,handlerton **eng_type,

View file

@ -1086,10 +1086,6 @@ typedef struct st_lock_param_type
} ALTER_PARTITION_PARAM_TYPE;
void mem_alloc_error(size_t size);
int packfrm(const void *data, uint len,
const void **pack_data, uint *pack_len);
int unpackfrm(const void **unpack_data, uint *unpack_len,
const void *pack_data);
#define WFRM_INITIAL_WRITE 1
#define WFRM_CREATE_HANDLER_FILES 2
#define WFRM_PACK_FRM 4

View file

@ -408,6 +408,7 @@ extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
extern long berkeley_lock_scan_time;
extern TYPELIB berkeley_lock_typelib;
#endif
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
const char *opt_ndbcluster_connectstring= 0;
const char *opt_ndb_connectstring= 0;

View file

@ -2698,8 +2698,10 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack,
ppar->part_fields););
uint32 part_id;
longlong func_value;
/* then find in which partition the {const1, ...,constN} tuple goes */
if (ppar->get_top_partition_id_func(ppar->part_info, &part_id))
if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
&func_value))
{
res= 0; /* No satisfying partitions */
goto pop_and_go_right;

View file

@ -63,33 +63,47 @@ static const char *comma_str= ",";
static char buff[22];
int get_partition_id_list(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_range(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_linear_key_nosub(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_range_sub_hash(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_range_sub_key(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_range_sub_linear_hash(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_range_sub_linear_key(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_list_sub_hash(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_list_sub_key(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_list_sub_linear_hash(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
int get_partition_id_list_sub_linear_key(partition_info *part_info,
uint32 *part_id);
uint32 *part_id,
longlong *func_value);
uint32 get_partition_id_hash_sub(partition_info *part_info);
uint32 get_partition_id_key_sub(partition_info *part_info);
uint32 get_partition_id_linear_hash_sub(partition_info *part_info);
@ -327,15 +341,18 @@ bool check_reorganise_list(partition_info *new_part_info,
int get_parts_for_update(const byte *old_data, byte *new_data,
const byte *rec0, partition_info *part_info,
uint32 *old_part_id, uint32 *new_part_id)
uint32 *old_part_id, uint32 *new_part_id,
longlong *new_func_value)
{
Field **part_field_array= part_info->full_part_field_array;
int error;
longlong old_func_value;
DBUG_ENTER("get_parts_for_update");
DBUG_ASSERT(new_data == rec0);
set_field_ptr(part_field_array, old_data, rec0);
error= part_info->get_partition_id(part_info, old_part_id);
error= part_info->get_partition_id(part_info, old_part_id,
&old_func_value);
set_field_ptr(part_field_array, rec0, old_data);
if (unlikely(error)) // Should never happen
{
@ -346,7 +363,9 @@ int get_parts_for_update(const byte *old_data, byte *new_data,
if (new_data == rec0)
#endif
{
if (unlikely(error= part_info->get_partition_id(part_info,new_part_id)))
if (unlikely(error= part_info->get_partition_id(part_info,
new_part_id,
new_func_value)))
{
DBUG_RETURN(error);
}
@ -360,7 +379,8 @@ int get_parts_for_update(const byte *old_data, byte *new_data,
condition is false in one test situation before pushing the code.
*/
set_field_ptr(part_field_array, new_data, rec0);
error= part_info->get_partition_id(part_info, new_part_id);
error= part_info->get_partition_id(part_info, new_part_id,
new_func_value);
set_field_ptr(part_field_array, rec0, new_data);
if (unlikely(error))
{
@ -397,11 +417,13 @@ int get_part_for_delete(const byte *buf, const byte *rec0,
partition_info *part_info, uint32 *part_id)
{
int error;
longlong func_value;
DBUG_ENTER("get_part_for_delete");
if (likely(buf == rec0))
{
if (unlikely((error= part_info->get_partition_id(part_info, part_id))))
if (unlikely((error= part_info->get_partition_id(part_info, part_id,
&func_value))))
{
DBUG_RETURN(error);
}
@ -411,7 +433,7 @@ int get_part_for_delete(const byte *buf, const byte *rec0,
{
Field **part_field_array= part_info->full_part_field_array;
set_field_ptr(part_field_array, buf, rec0);
error= part_info->get_partition_id(part_info, part_id);
error= part_info->get_partition_id(part_info, part_id, &func_value);
set_field_ptr(part_field_array, rec0, buf);
if (unlikely(error))
{
@ -1892,7 +1914,7 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
if (part_id >= no_parts)
{
uint new_mask= ((mask + 1) >> 1) - 1;
part_id= hash_value & new_mask;
part_id= (uint32)(hash_value & new_mask);
}
return part_id;
}
@ -2658,6 +2680,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
get_part_id_hash()
no_parts Number of hash partitions
part_expr Item tree of hash function
out:func_value Value of hash function
RETURN VALUE
Calculated partition id
@ -2665,10 +2688,12 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
inline
static uint32 get_part_id_hash(uint no_parts,
Item *part_expr)
Item *part_expr,
longlong *func_value)
{
DBUG_ENTER("get_part_id_hash");
longlong int_hash_id= part_expr->val_int() % no_parts;
*func_value= part_expr->val_int();
longlong int_hash_id= *func_value % no_parts;
DBUG_RETURN(int_hash_id < 0 ? -int_hash_id : int_hash_id);
}
@ -2682,6 +2707,7 @@ static uint32 get_part_id_hash(uint no_parts,
desired information is given
no_parts Number of hash partitions
part_expr Item tree of hash function
out:func_value Value of hash function
RETURN VALUE
Calculated partition id
@ -2690,11 +2716,13 @@ static uint32 get_part_id_hash(uint no_parts,
inline
static uint32 get_part_id_linear_hash(partition_info *part_info,
uint no_parts,
Item *part_expr)
Item *part_expr,
longlong *func_value)
{
DBUG_ENTER("get_part_id_linear_hash");
DBUG_RETURN(get_part_id_from_linear_hash(part_expr->val_int(),
*func_value= part_expr->val_int();
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
no_parts));
}
@ -2714,11 +2742,12 @@ static uint32 get_part_id_linear_hash(partition_info *part_info,
inline
static uint32 get_part_id_key(Field **field_array,
uint no_parts)
uint no_parts,
longlong *func_value)
{
DBUG_ENTER("get_part_id_key");
DBUG_RETURN(calculate_key_value(field_array) % no_parts);
*func_value= calculate_key_value(field_array);
DBUG_RETURN(*func_value % no_parts);
}
@ -2739,11 +2768,13 @@ static uint32 get_part_id_key(Field **field_array,
inline
static uint32 get_part_id_linear_key(partition_info *part_info,
Field **field_array,
uint no_parts)
uint no_parts,
longlong *func_value)
{
DBUG_ENTER("get_partition_id_linear_key");
DBUG_RETURN(get_part_id_from_linear_hash(calculate_key_value(field_array),
*func_value= calculate_key_value(field_array);
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
no_parts));
}
@ -2820,7 +2851,8 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
int get_partition_id_list(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
LIST_PART_ENTRY *list_array= part_info->list_array;
int list_index;
@ -2830,6 +2862,7 @@ int get_partition_id_list(partition_info *part_info,
longlong part_func_value= part_info->part_expr->val_int();
DBUG_ENTER("get_partition_id_list");
*func_value= part_func_value;
while (max_list_index >= min_list_index)
{
list_index= (max_list_index + min_list_index) >> 1;
@ -2928,7 +2961,8 @@ notfound:
int get_partition_id_range(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
longlong *range_array= part_info->range_int_array;
uint max_partition= part_info->no_parts - 1;
@ -2951,6 +2985,7 @@ int get_partition_id_range(partition_info *part_info,
if (loc_part_id != max_partition)
loc_part_id++;
*part_id= (uint32)loc_part_id;
*func_value= part_func_value;
if (loc_part_id == max_partition)
if (range_array[loc_part_id] != LONGLONG_MAX)
if (part_func_value >= range_array[loc_part_id])
@ -3042,192 +3077,229 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
*part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr);
*part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr,
func_value);
return 0;
}
int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
*part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
part_info->part_expr);
part_info->part_expr, func_value);
return 0;
}
int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
*part_id= get_part_id_key(part_info->part_field_array, part_info->no_parts);
*part_id= get_part_id_key(part_info->part_field_array,
part_info->no_parts, func_value);
return 0;
}
int get_partition_id_linear_key_nosub(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
*part_id= get_part_id_linear_key(part_info,
part_info->part_field_array,
part_info->no_parts);
part_info->no_parts, func_value);
return 0;
}
int get_partition_id_range_sub_hash(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_hash");
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
&local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_range_sub_linear_hash(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_linear_hash");
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
part_info->subpart_expr);
part_info->subpart_expr,
&local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_range_sub_key(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_key");
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
sub_part_id= get_part_id_key(part_info->subpart_field_array,
no_subparts, &local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_range_sub_linear_key(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_linear_key");
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_linear_key(part_info,
part_info->subpart_field_array,
no_subparts);
no_subparts, &local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_list_sub_hash(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_list_sub_hash");
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
&local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_list_sub_linear_hash(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_list_sub_linear_hash");
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
part_info->subpart_expr,
&local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_list_sub_key(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_key");
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
sub_part_id= get_part_id_key(part_info->subpart_field_array,
no_subparts, &local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
int get_partition_id_list_sub_linear_key(partition_info *part_info,
uint32 *part_id)
uint32 *part_id,
longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_list_sub_linear_key");
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
func_value))))
{
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_linear_key(part_info,
part_info->subpart_field_array,
no_subparts);
no_subparts, &local_func_value);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
@ -3259,29 +3331,34 @@ int get_partition_id_list_sub_linear_key(partition_info *part_info,
uint32 get_partition_id_hash_sub(partition_info *part_info)
{
return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr);
longlong func_value;
return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
&func_value);
}
uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
{
longlong func_value;
return get_part_id_linear_hash(part_info, part_info->no_subparts,
part_info->subpart_expr);
part_info->subpart_expr, &func_value);
}
uint32 get_partition_id_key_sub(partition_info *part_info)
{
longlong func_value;
return get_part_id_key(part_info->subpart_field_array,
part_info->no_subparts);
part_info->no_subparts, &func_value);
}
uint32 get_partition_id_linear_key_sub(partition_info *part_info)
{
longlong func_value;
return get_part_id_linear_key(part_info,
part_info->subpart_field_array,
part_info->no_subparts);
part_info->no_subparts, &func_value);
}
@ -3428,16 +3505,19 @@ bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
bool result;
byte *rec0= table->record[0];
partition_info *part_info= table->part_info;
longlong func_value;
DBUG_ENTER("get_part_id_from_key");
key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
if (likely(rec0 == buf))
result= part_info->get_part_partition_id(part_info, part_id);
result= part_info->get_part_partition_id(part_info, part_id,
&func_value);
else
{
Field **part_field_array= part_info->part_field_array;
set_field_ptr(part_field_array, buf, rec0);
result= part_info->get_part_partition_id(part_info, part_id);
result= part_info->get_part_partition_id(part_info, part_id,
&func_value);
set_field_ptr(part_field_array, rec0, buf);
}
DBUG_RETURN(result);
@ -3472,16 +3552,19 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
bool result;
partition_info *part_info= table->part_info;
byte *rec0= table->record[0];
longlong func_value;
DBUG_ENTER("get_full_part_id_from_key");
key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
if (likely(rec0 == buf))
result= part_info->get_partition_id(part_info, &part_spec->start_part);
result= part_info->get_partition_id(part_info, &part_spec->start_part,
&func_value);
else
{
Field **part_field_array= part_info->full_part_field_array;
set_field_ptr(part_field_array, buf, rec0);
result= part_info->get_partition_id(part_info, &part_spec->start_part);
result= part_info->get_partition_id(part_info, &part_spec->start_part,
&func_value);
set_field_ptr(part_field_array, rec0, buf);
}
part_spec->end_part= part_spec->start_part;
@ -3926,6 +4009,47 @@ static int fast_end_partition(THD *thd, ulonglong copied,
}
/*
Check engine mix that it is correct
SYNOPSIS
check_engine_condition()
p_elem Partition element
default_engine Have user specified engine on table level
inout::engine_type Current engine used
inout::first Is it first partition
RETURN VALUE
TRUE Failed check
FALSE Ok
DESCRIPTION
(specified partition handler ) specified table handler
(NDB, NDB) NDB OK
(MYISAM, MYISAM) - OK
(MYISAM, -) - NOT OK
(MYISAM, -) MYISAM OK
(- , MYISAM) - NOT OK
(- , -) MYISAM OK
(-,-) - OK
(NDB, MYISAM) * NOT OK
*/
static bool check_engine_condition(partition_element *p_elem,
bool default_engine,
handlerton **engine_type,
bool *first)
{
if (*first && default_engine)
*engine_type= p_elem->engine_type;
*first= FALSE;
if ((!default_engine &&
(p_elem->engine_type != *engine_type &&
!p_elem->engine_type)) ||
(default_engine &&
p_elem->engine_type != *engine_type))
return TRUE;
else
return FALSE;
}
/*
We need to check if engine used by all partitions can handle
partitioning natively.
@ -3954,8 +4078,10 @@ static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
bool first= TRUE;
bool default_engine;
handlerton *engine_type= create_info->db_type;
handlerton *old_engine_type= engine_type;
uint i= 0;
handler *file;
uint no_parts= part_info->partitions.elements;
DBUG_ENTER("check_native_partitioned");
default_engine= (create_info->used_fields | HA_CREATE_USED_ENGINE) ?
@ -3963,27 +4089,48 @@ static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
DBUG_PRINT("info", ("engine_type = %u, default = %u",
ha_legacy_type(engine_type),
default_engine));
do
if (no_parts)
{
partition_element *part_elem= part_it++;
if (first && default_engine && part_elem->engine_type)
engine_type= part_elem->engine_type;
first= FALSE;
if (part_elem->engine_type != engine_type)
do
{
/*
Mixed engines not yet supported but when supported it will need
the partition handler
*/
*ret_val= FALSE;
DBUG_RETURN(FALSE);
}
} while (++i < part_info->no_parts);
partition_element *part_elem= part_it++;
if (is_sub_partitioned(part_info) &&
part_elem->subpartitions.elements)
{
uint no_subparts= part_elem->subpartitions.elements;
uint j= 0;
List_iterator<partition_element> sub_it(part_elem->subpartitions);
do
{
partition_element *sub_elem= sub_it++;
if (check_engine_condition(sub_elem, default_engine,
&engine_type, &first))
goto error;
} while (++j < no_subparts);
/*
In case of subpartitioning and defaults we allow that only
subparts have specified engines, as long as the parts haven't
specified the wrong engine it's ok.
*/
if (check_engine_condition(part_elem, FALSE,
&engine_type, &first))
goto error;
}
else if (check_engine_condition(part_elem, default_engine,
&engine_type, &first))
goto error;
} while (++i < no_parts);
}
/*
All engines are of the same type. Check if this engine supports
native partitioning.
*/
if (!engine_type)
engine_type= old_engine_type;
DBUG_PRINT("info", ("engine_type = %s",
ha_resolve_storage_engine_name(engine_type)));
if (engine_type->partition_flags &&
(engine_type->partition_flags() & HA_CAN_PARTITION))
{
@ -3992,6 +4139,13 @@ static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
*ret_val= TRUE;
}
DBUG_RETURN(FALSE);
error:
/*
Mixed engines not yet supported but when supported it will need
the partition handler
*/
*ret_val= FALSE;
DBUG_RETURN(TRUE);
}
@ -4794,7 +4948,7 @@ the generated partition syntax in a correct manner.
}
else
{
bool is_native_partitioned;
bool is_native_partitioned= FALSE;
partition_info *part_info= thd->lex->part_info;
part_info->default_engine_type= create_info->db_type;
if (check_native_partitioned(create_info, &is_native_partitioned,
@ -4804,11 +4958,7 @@ the generated partition syntax in a correct manner.
}
if (!is_native_partitioned)
{
if (create_info->db_type == (handlerton*)&default_hton)
{
thd->lex->part_info->default_engine_type=
ha_checktype(thd, DB_TYPE_DEFAULT, FALSE, FALSE);
}
DBUG_ASSERT(create_info->db_type != &default_hton);
create_info->db_type= &partition_hton;
}
}
@ -5248,132 +5398,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
}
#endif
/*
Internal representation of the frm blob
*/
struct frm_blob_struct
{
struct frm_blob_header
{
uint ver; /* Version of header */
uint orglen; /* Original length of compressed data */
uint complen; /* Compressed length of data, 0=uncompressed */
} head;
char data[1];
};
/*
packfrm is a method used to compress the frm file for storage in a
handler. This method was developed for the NDB handler and has been moved
here to serve also other uses.
SYNOPSIS
packfrm()
data Data reference to frm file data
len Length of frm file data
out:pack_data Reference to the pointer to the packed frm data
out:pack_len Length of packed frm file data
RETURN VALUES
0 Success
>0 Failure
*/
int packfrm(const void *data, uint len,
const void **pack_data, uint *pack_len)
{
int error;
ulong org_len, comp_len;
uint blob_len;
frm_blob_struct *blob;
DBUG_ENTER("packfrm");
DBUG_PRINT("enter", ("data: %x, len: %d", data, len));
error= 1;
org_len= len;
if (my_compress((byte*)data, &org_len, &comp_len))
goto err;
DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len));
DBUG_DUMP("compressed", (char*)data, org_len);
error= 2;
blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len;
if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
goto err;
// Store compressed blob in machine independent format
int4store((char*)(&blob->head.ver), 1);
int4store((char*)(&blob->head.orglen), comp_len);
int4store((char*)(&blob->head.complen), org_len);
// Copy frm data into blob, already in machine independent format
memcpy(blob->data, data, org_len);
*pack_data= blob;
*pack_len= blob_len;
error= 0;
DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len));
err:
DBUG_RETURN(error);
}
/*
unpackfrm is a method used to decompress the frm file received from a
handler. This method was developed for the NDB handler and has been moved
here to serve also other uses for other clustered storage engines.
SYNOPSIS
unpackfrm()
pack_data Data reference to packed frm file data
out:unpack_data Reference to the pointer to the unpacked frm data
out:unpack_len Length of unpacked frm file data
RETURN VALUES¨
0 Success
>0 Failure
*/
int unpackfrm(const void **unpack_data, uint *unpack_len,
const void *pack_data)
{
const frm_blob_struct *blob= (frm_blob_struct*)pack_data;
byte *data;
ulong complen, orglen, ver;
DBUG_ENTER("unpackfrm");
DBUG_PRINT("enter", ("pack_data: %x", pack_data));
complen= uint4korr((char*)&blob->head.complen);
orglen= uint4korr((char*)&blob->head.orglen);
ver= uint4korr((char*)&blob->head.ver);
DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d",
ver,complen,orglen));
DBUG_DUMP("blob->data", (char*) blob->data, complen);
if (ver != 1)
DBUG_RETURN(1);
if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
DBUG_RETURN(2);
memcpy(data, blob->data, complen);
if (my_uncompress(data, &complen, &orglen))
{
my_free((char*)data, MYF(0));
DBUG_RETURN(3);
}
*unpack_data= data;
*unpack_len= complen;
DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len));
DBUG_RETURN(0);
}
/*
Prepare for calling val_int on partition function by setting fields to

View file

@ -3934,7 +3934,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (create_info->row_type == ROW_TYPE_NOT_USED)
create_info->row_type= table->s->row_type;
DBUG_PRINT("info", ("old type: %d new type: %d", old_db_type, new_db_type));
DBUG_PRINT("info", ("old type: %s new type: %s",
ha_resolve_storage_engine_name(old_db_type),
ha_resolve_storage_engine_name(new_db_type)));
if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED) ||
ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED))
{

View file

@ -27,6 +27,7 @@
*/
#define MAX_NDB_NODES 49
#define MAX_NODES 64
#define UNDEF_NODEGROUP 0xFFFF
/**
* MAX_API_NODES = MAX_NODES - No of NDB Nodes in use

View file

@ -131,6 +131,7 @@ public:
MaxRowsLow = 139,
MaxRowsHigh = 140,
DefaultNoPartFlag = 141,
LinearHashFlag = 142,
RowGCIFlag = 150,
RowChecksumFlag = 151,
@ -310,6 +311,7 @@ public:
Uint32 MaxRowsLow;
Uint32 MaxRowsHigh;
Uint32 DefaultNoPartFlag;
Uint32 LinearHashFlag;
/*
TODO RONM:
We need to replace FRM, Fragment Data, Tablespace Data and in

View file

@ -63,6 +63,6 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
#define NDBD_ROWID_VERSION (MAKE_VERSION(5,1,6))
#define NDBD_INCL_NODECONF_VERSION_4 MAKE_VERSION(4,1,17)
#define NDBD_INCL_NODECONF_VERSION_5 MAKE_VERSION(5,0,18)
#define NDBD_FRAGID_VERSION (MAKE_VERSION(5,1,6))
#endif

View file

@ -733,6 +733,12 @@ public:
*/
void setLogging(bool);
/**
* Set/Get Linear Hash Flag
*/
void setLinearFlag(Uint32 flag);
bool getLinearFlag() const;
/**
* Set fragment count
*/
@ -799,13 +805,13 @@ public:
* number of partitions).
*/
void setMaxRows(Uint64 maxRows);
Uint64 getMaxRows();
Uint64 getMaxRows() const;
/**
* Set/Get indicator if default number of partitions is used in table.
*/
void setDefaultNoPartitionsFlag(Uint32 indicator);
Uint32 getDefaultNoPartitionsFlag();
Uint32 getDefaultNoPartitionsFlag() const;
/**
* Get object id
@ -830,7 +836,7 @@ public:
*/
void setTablespaceNames(const void* data, Uint32 len);
const void *getTablespaceNames();
Uint32 getTablespaceNamesLen();
Uint32 getTablespaceNamesLen() const;
/**
* Set tablespace information per fragment

View file

@ -58,6 +58,7 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, MaxRowsLow, MaxRowsLow),
DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
DTIMAP(Table, DefaultNoPartFlag, DefaultNoPartFlag),
DTIMAP(Table, LinearHashFlag, LinearHashFlag),
DTIMAP(Table, TablespaceVersion, TablespaceVersion),
DTIMAP(Table, RowGCIFlag, RowGCIFlag),
DTIMAP(Table, RowChecksumFlag, RowChecksumFlag),
@ -149,6 +150,7 @@ DictTabInfo::Table::init(){
MaxRowsLow = 0;
MaxRowsHigh = 0;
DefaultNoPartFlag = 1;
LinearHashFlag = 1;
RowGCIFlag = ~0;
RowChecksumFlag = ~0;

View file

@ -4082,6 +4082,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal)
const Uint32 gci = trg->getGCI();
const Uint32 trI = trg->getTriggerId();
const Uint32 fragId = trg->fragId;
TriggerPtr trigPtr;
c_triggerPool.getPtr(trigPtr, trI);
@ -4095,6 +4096,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal)
ndbrequire(trigPtr.p->logEntry != 0);
Uint32 len = trigPtr.p->logEntry->Length;
trigPtr.p->logEntry->FragId = htonl(fragId);
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, trigPtr.p->backupPtr);
@ -4104,7 +4106,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal)
trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000);
trigPtr.p->logEntry->Data[len] = htonl(gci);
len ++;
len++;
ptr.p->currGCP = gci;
}//if

View file

@ -143,6 +143,7 @@ struct BackupFormat {
Uint32 TableId;
// If TriggerEvent & 0x10000 == true then GCI is right after data
Uint32 TriggerEvent;
Uint32 FragId;
Uint32 Data[1]; // Len = Length - 2
};
};

View file

@ -440,6 +440,7 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow);
w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh);
w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag);
w.add(DictTabInfo::LinearHashFlag, tablePtr.p->linearHashFlag);
w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
if(signal)
@ -1832,6 +1833,7 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->maxRowsLow = 0;
tablePtr.p->maxRowsHigh = 0;
tablePtr.p->defaultNoPartFlag = true;
tablePtr.p->linearHashFlag = true;
tablePtr.p->m_bits = 0;
tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL;
@ -5901,6 +5903,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow;
tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh;
tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag;
tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag;
{
Rope frm(c_rope_pool, tablePtr.p->frmData);

View file

@ -275,6 +275,11 @@ public:
*/
bool defaultNoPartFlag;
/*
Flag to indicate using linear hash function
*/
bool linearHashFlag;
/*
* Used when shrinking to decide when to merge buckets. Hysteresis
* is thus possible. Should be smaller but not much smaller than

View file

@ -6398,7 +6398,6 @@ void Dbdih::execDIRELEASEREQ(Signal* signal)
***************************************
*/
#define UNDEF_NODEGROUP 65535
static inline void inc_node_or_group(Uint32 &node, Uint32 max_node)
{
Uint32 next = node + 1;

View file

@ -420,7 +420,7 @@ NdbDictionary::Table::setMaxRows(Uint64 maxRows)
}
Uint64
NdbDictionary::Table::getMaxRows()
NdbDictionary::Table::getMaxRows() const
{
return m_impl.m_max_rows;
}
@ -432,7 +432,7 @@ NdbDictionary::Table::setDefaultNoPartitionsFlag(Uint32 flag)
}
Uint32
NdbDictionary::Table::getDefaultNoPartitionsFlag()
NdbDictionary::Table::getDefaultNoPartitionsFlag() const
{
return m_impl.m_default_no_part_flag;
}
@ -472,11 +472,23 @@ NdbDictionary::Table::getTablespaceNames()
}
Uint32
NdbDictionary::Table::getTablespaceNamesLen()
NdbDictionary::Table::getTablespaceNamesLen() const
{
return m_impl.getTablespaceNamesLen();
}
void
NdbDictionary::Table::setLinearFlag(Uint32 flag)
{
m_impl.m_linear_flag = flag;
}
bool
NdbDictionary::Table::getLinearFlag() const
{
return m_impl.m_linear_flag;
}
void
NdbDictionary::Table::setFragmentCount(Uint32 count)
{

View file

@ -392,6 +392,7 @@ NdbTableImpl::init(){
m_fragmentType= NdbDictionary::Object::FragAllSmall;
m_hashValueMask= 0;
m_hashpointerValue= 0;
m_linear_flag= true;
m_primaryTable.clear();
m_max_rows = 0;
m_default_no_part_flag = 1;
@ -486,6 +487,13 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const
}
}
if(m_linear_flag != obj.m_linear_flag)
{
DBUG_PRINT("info",("m_linear_flag %d != %d",m_linear_flag,
obj.m_linear_flag));
DBUG_RETURN(false);
}
if(m_max_rows != obj.m_max_rows)
{
DBUG_PRINT("info",("m_max_rows %d != %d",(int32)m_max_rows,
@ -633,6 +641,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_fragments = org.m_fragments;
m_linear_flag = org.m_linear_flag;
m_max_rows = org.m_max_rows;
m_default_no_part_flag = org.m_default_no_part_flag;
m_logging = org.m_logging;
@ -1833,6 +1842,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
max_rows += tableDesc->MaxRowsLow;
impl->m_max_rows = max_rows;
impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag;
impl->m_linear_flag = tableDesc->LinearHashFlag;
impl->m_logging = tableDesc->TableLoggedFlag;
impl->m_row_gci = tableDesc->RowGCIFlag;
impl->m_row_checksum = tableDesc->RowChecksumFlag;
@ -2266,6 +2276,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32);
tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag;
tmpTab->LinearHashFlag = impl.m_linear_flag;
if (impl.m_ts_name.length())
{

View file

@ -177,6 +177,7 @@ public:
Uint64 m_max_rows;
Uint32 m_default_no_part_flag;
bool m_linear_flag;
bool m_logging;
bool m_row_gci;
bool m_row_checksum;

View file

@ -55,7 +55,7 @@ ndb_drop_index_LDFLAGS = @ndb_bin_am_ldflags@
ndb_show_tables_LDFLAGS = @ndb_bin_am_ldflags@
ndb_select_all_LDFLAGS = @ndb_bin_am_ldflags@
ndb_select_count_LDFLAGS = @ndb_bin_am_ldflags@
ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@
ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@ @ZLIB_LIBS@
ndb_config_LDFLAGS = @ndb_bin_am_ldflags@
# Don't update the files from bitkeeper

View file

@ -16,6 +16,7 @@
#include "Restore.hpp"
#include <NdbTCP.h>
#include <NdbMem.h>
#include <OutputStream.hpp>
#include <Bitmask.hpp>
@ -23,6 +24,7 @@
#include <trigger_definitions.h>
#include <SimpleProperties.hpp>
#include <signaldata/DictTabInfo.hpp>
#include <ndb_limits.h>
Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data
Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data
@ -321,6 +323,7 @@ TableS::~TableS()
delete allAttributesDesc[i];
}
// Parse dictTabInfo buffer and pushback to to vector storage
bool
RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
@ -336,8 +339,6 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
return false;
debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
tableImpl->m_fd.clear();
tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall;
TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
if(table == NULL) {
return false;
@ -738,7 +739,7 @@ BackupFile::validateFooter(){
return true;
}
bool RestoreDataIterator::readFragmentHeader(int & ret)
bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId)
{
BackupFormat::DataFile::FragmentHeader Header;
@ -780,7 +781,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret)
m_count = 0;
ret = 0;
*fragmentId = Header.FragmentNo;
return true;
} // RestoreDataIterator::getNextFragment
@ -901,7 +902,7 @@ RestoreLogIterator::RestoreLogIterator(const RestoreMetaData & md)
}
const LogEntry *
RestoreLogIterator::getNextLogEntry(int & res) {
RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
// Read record length
typedef BackupFormat::LogFile::LogEntry LogE;
@ -925,7 +926,30 @@ RestoreLogIterator::getNextLogEntry(int & res) {
res= 0;
return 0;
}
if (m_metaData.getFileHeader().NdbVersion < NDBD_FRAGID_VERSION)
{
/*
FragId was introduced in LogEntry in version
5.1.6
We set FragId to 0 in older versions (these versions
do not support restore of user defined partitioned
tables.
*/
int i;
LogE *tmpLogE = (LogE*)NdbMem_Allocate(data_len + 4);
if (!tmpLogE)
{
res = -2;
return 0;
}
tmpLogE->Length = logE->Length;
tmpLogE->TableId = logE->TableId;
tmpLogE->TriggerEvent = logE->TriggerEvent;
tmpLogE->FragId = 0;
for (i = 0; i < len - 3; i++)
tmpLogE->Data[i] = logE->Data[i-1];
*alloc_flag= true;
}
logE->TableId= ntohl(logE->TableId);
logE->TriggerEvent= ntohl(logE->TriggerEvent);
@ -960,6 +984,7 @@ RestoreLogIterator::getNextLogEntry(int & res) {
AttributeHeader * ah = (AttributeHeader *)&logE->Data[0];
AttributeHeader *end = (AttributeHeader *)&logE->Data[len - 2];
AttributeS * attr;
m_logEntry.m_frag_id = ntohl(logE->FragId);
while(ah < end){
attr= m_logEntry.add_attr();
if(attr == NULL) {

View file

@ -225,6 +225,8 @@ public:
TableS& operator=(TableS& org) ;
}; // TableS;
class RestoreLogIterator;
class BackupFile {
protected:
FILE * m_file;
@ -320,7 +322,7 @@ public:
~RestoreDataIterator() {};
// Read data file fragment header
bool readFragmentHeader(int & res);
bool readFragmentHeader(int & res, Uint32 *fragmentId);
bool validateFragmentFooter();
const TupleS *getNextTuple(int & res);
@ -333,6 +335,7 @@ public:
LE_DELETE,
LE_UPDATE
};
Uint32 m_frag_id;
EntryType m_type;
TableS * m_table;
Vector<AttributeS*> m_values;
@ -378,7 +381,7 @@ public:
RestoreLogIterator(const RestoreMetaData &);
virtual ~RestoreLogIterator() {};
const LogEntry * getNextLogEntry(int & res);
const LogEntry * getNextLogEntry(int & res, bool *alloc_flag);
};
NdbOut& operator<<(NdbOut& ndbout, const TableS&);

View file

@ -18,10 +18,11 @@
#define CONSUMER_HPP
#include "Restore.hpp"
#include "ndb_nodegroup_map.h"
#include "../../../../sql/ha_ndbcluster_tables.h"
extern const char *Ndb_apply_table;
jk
class BackupConsumer {
public:
virtual ~BackupConsumer() { }
@ -29,13 +30,15 @@ public:
virtual bool object(Uint32 tableType, const void*) { return true;}
virtual bool table(const TableS &){return true;}
virtual bool endOfTables() { return true; }
virtual void tuple(const TupleS &){}
virtual void tuple(const TupleS &, Uint32 fragId){}
virtual void tuple_free(){}
virtual void endOfTuples(){}
virtual void logEntry(const LogEntry &){}
virtual void endOfLogEntrys(){}
virtual bool finalize_table(const TableS &){return true;}
virtual bool update_apply_status(const RestoreMetaData &metaData){return true;}
NODE_GROUP_MAP *m_nodegroup_map;
uint m_nodegroup_map_len;
};
#endif

View file

@ -28,7 +28,7 @@ BackupPrinter::table(const TableS & tab)
}
void
BackupPrinter::tuple(const TupleS & tup)
BackupPrinter::tuple(const TupleS & tup, Uint32 fragId)
{
m_dataCount++;
if (m_print || m_print_data)

View file

@ -23,8 +23,12 @@ class BackupPrinter : public BackupConsumer
{
NdbOut & m_ndbout;
public:
BackupPrinter(NdbOut & out = ndbout) : m_ndbout(out)
BackupPrinter(NODE_GROUP_MAP *ng_map,
uint ng_map_len,
NdbOut & out = ndbout) : m_ndbout(out)
{
m_nodegroup_map = ng_map;
m_nodegroup_map_len= ng_map_len;
m_print = false;
m_print_log = false;
m_print_data = false;
@ -37,7 +41,7 @@ public:
#ifdef USE_MYSQL
virtual bool table(const TableS &, MYSQL* mysqlp);
#endif
virtual void tuple(const TupleS &);
virtual void tuple(const TupleS &, Uint32 fragId);
virtual void logEntry(const LogEntry &);
virtual void endOfTuples() {};
virtual void endOfLogEntrys();

View file

@ -16,6 +16,7 @@
#include <NDBT_ReturnCodes.h>
#include "consumer_restore.hpp"
#include <my_sys.h>
#include <NdbSleep.h>
extern my_bool opt_core;
@ -25,6 +26,8 @@ extern FilteredNdbOut info;
extern FilteredNdbOut debug;
static void callback(int, NdbTransaction*, void*);
static Uint32 get_part_id(const NdbDictionary::Table *table,
Uint32 hash_value);
extern const char * g_connect_string;
bool
@ -152,6 +155,289 @@ BackupRestore::finalize_table(const TableS & table){
return ret;
}
static bool default_nodegroups(NdbDictionary::Table *table)
{
Uint16 *node_groups = (Uint16*)table->getFragmentData();
Uint32 no_parts = table->getFragmentDataLen() >> 1;
Uint32 i;
if (node_groups[0] != 0)
return false;
for (i = 1; i < no_parts; i++)
{
if (node_groups[i] != UNDEF_NODEGROUP)
return false;
}
return true;
}
static Uint32 get_no_fragments(Uint64 max_rows, Uint32 no_nodes)
{
Uint32 i = 0;
Uint32 acc_row_size = 27;
Uint32 acc_fragment_size = 512*1024*1024;
Uint32 no_parts= (max_rows*acc_row_size)/acc_fragment_size + 1;
Uint32 reported_parts = no_nodes;
while (reported_parts < no_parts && ++i < 4 &&
(reported_parts + no_parts) < MAX_NDB_PARTITIONS)
reported_parts+= no_nodes;
if (reported_parts < no_parts)
{
err << "Table will be restored but will not be able to handle the maximum";
err << " amount of rows as requested" << endl;
}
return reported_parts;
}
static void set_default_nodegroups(NdbDictionary::Table *table)
{
Uint32 no_parts = table->getFragmentCount();
Uint16 node_group[MAX_NDB_PARTITIONS];
Uint32 i;
node_group[0] = 0;
for (i = 1; i < no_parts; i++)
{
node_group[i] = UNDEF_NODEGROUP;
}
table->setFragmentData((const void*)node_group, 2 * no_parts);
}
Uint32 BackupRestore::map_ng(Uint32 ng)
{
NODE_GROUP_MAP *ng_map = m_nodegroup_map;
if (ng == UNDEF_NODEGROUP ||
ng_map[ng].map_array[0] == UNDEF_NODEGROUP)
{
ndbout << "No mapping done" << endl;
return ng;
}
else
{
Uint32 new_ng;
Uint32 curr_inx = ng_map[ng].curr_index;
Uint32 new_curr_inx = curr_inx + 1;
assert(ng < MAX_NDB_PARTITIONS);
assert(curr_inx < MAX_MAPS_PER_NODE_GROUP);
assert(new_curr_inx < MAX_MAPS_PER_NODE_GROUP);
ndbout << "curr_inx = " << curr_inx << endl;
if (new_curr_inx >= MAX_MAPS_PER_NODE_GROUP)
new_curr_inx = 0;
else if (ng_map[ng].map_array[new_curr_inx] == UNDEF_NODEGROUP)
new_curr_inx = 0;
new_ng = ng_map[ng].map_array[curr_inx];
ndbout << "new_ng = " << new_ng << endl;
ng_map[ng].curr_index = new_curr_inx;
return new_ng;
}
}
bool BackupRestore::map_nodegroups(Uint16 *ng_array, Uint32 no_parts)
{
Uint32 i;
bool mapped = FALSE;
DBUG_ENTER("map_nodegroups");
assert(no_parts < MAX_NDB_PARTITIONS);
for (i = 0; i < no_parts; i++)
{
Uint32 ng;
ndbout << "map_nodegroups loop " << i << ", " << ng_array[i] << endl;
ng = map_ng((Uint32)ng_array[i]);
if (ng != ng_array[i])
mapped = TRUE;
ng_array[i] = ng;
}
DBUG_RETURN(mapped);
}
static void copy_byte(const char **data, char **new_data, uint *len)
{
**new_data = **data;
(*data)++;
(*new_data)++;
(*len)++;
}
bool BackupRestore::search_replace(char *search_str, char **new_data,
const char **data, const char *end_data,
uint *new_data_len)
{
uint search_str_len = strlen(search_str);
uint inx = 0;
bool in_delimiters = FALSE;
bool escape_char = FALSE;
char start_delimiter = 0;
DBUG_ENTER("search_replace");
ndbout << "search_replace" << endl;
do
{
char c = **data;
copy_byte(data, new_data, new_data_len);
if (escape_char)
{
escape_char = FALSE;
}
else if (in_delimiters)
{
if (c == start_delimiter)
in_delimiters = FALSE;
}
else if (c == '\'' || c == '\"')
{
in_delimiters = TRUE;
start_delimiter = c;
}
else if (c == '\\')
{
escape_char = TRUE;
}
else if (c == search_str[inx])
{
inx++;
if (inx == search_str_len)
{
bool found = FALSE;
uint number = 0;
while (*data != end_data)
{
if (isdigit(**data))
{
found = TRUE;
number = (10 * number) + (**data);
if (number > MAX_NDB_NODES)
break;
}
else if (found)
{
/*
After long and tedious preparations we have actually found
a node group identifier to convert. We'll use the mapping
table created for node groups and then insert the new number
instead of the old number.
*/
uint temp = map_ng(number);
int no_digits = 0;
char digits[10];
while (temp != 0)
{
digits[no_digits] = temp % 10;
no_digits++;
temp/=10;
}
for (no_digits--; no_digits >= 0; no_digits--)
{
**new_data = digits[no_digits];
*new_data_len+=1;
}
DBUG_RETURN(FALSE);
}
else
break;
(*data)++;
}
DBUG_RETURN(TRUE);
}
}
else
inx = 0;
} while (*data < end_data);
DBUG_RETURN(FALSE);
}
bool BackupRestore::map_in_frm(char *new_data, const char *data,
uint data_len, uint *new_data_len)
{
const char *end_data= data + data_len;
const char *end_part_data;
const char *part_data;
char *extra_ptr;
uint start_key_definition_len = uint2korr(data + 6);
uint key_definition_len = uint4korr(data + 47);
uint part_info_len;
DBUG_ENTER("map_in_frm");
if (data_len < 4096) goto error;
extra_ptr = (char*)data + start_key_definition_len + key_definition_len;
if ((int)data_len < ((extra_ptr - data) + 2)) goto error;
extra_ptr = extra_ptr + 2 + uint2korr(extra_ptr);
if ((int)data_len < ((extra_ptr - data) + 2)) goto error;
extra_ptr = extra_ptr + 2 + uint2korr(extra_ptr);
if ((int)data_len < ((extra_ptr - data) + 4)) goto error;
part_info_len = uint4korr(extra_ptr);
part_data = extra_ptr + 4;
if ((int)data_len < ((part_data + part_info_len) - data)) goto error;
do
{
copy_byte(&data, &new_data, new_data_len);
} while (data < part_data);
end_part_data = part_data + part_info_len;
do
{
if (search_replace((char*)" NODEGROUP = ", &new_data, &data,
end_part_data, new_data_len))
goto error;
} while (data != end_part_data);
do
{
copy_byte(&data, &new_data, new_data_len);
} while (data < end_data);
DBUG_RETURN(FALSE);
error:
DBUG_RETURN(TRUE);
}
bool BackupRestore::translate_frm(NdbDictionary::Table *table)
{
const void *pack_data, *data, *new_pack_data;
char *new_data;
uint data_len, pack_len, new_data_len, new_pack_len;
uint no_parts, extra_growth;
DBUG_ENTER("translate_frm");
pack_data = table->getFrmData();
no_parts = table->getFragmentCount();
/*
Add max 4 characters per partition to handle worst case
of mapping from single digit to 5-digit number.
Fairly future-proof, ok up to 99999 node groups.
*/
extra_growth = no_parts * 4;
if (unpackfrm(&data, &data_len, pack_data))
{
DBUG_RETURN(TRUE);
}
if ((new_data = my_malloc(data_len + extra_growth, MYF(0))))
{
DBUG_RETURN(TRUE);
}
if (map_in_frm(new_data, (const char*)data, data_len, &new_data_len))
{
my_free(new_data, MYF(0));
DBUG_RETURN(TRUE);
}
if (packfrm((const void*)new_data, new_data_len,
&new_pack_data, &new_pack_len))
{
my_free(new_data, MYF(0));
DBUG_RETURN(TRUE);
}
table->setFrm(new_pack_data, new_pack_len);
DBUG_RETURN(FALSE);
}
#include <signaldata/DictTabInfo.hpp>
bool
@ -190,7 +476,7 @@ BackupRestore::object(Uint32 type, const void * ptr)
NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr);
NdbDictionary::Tablespace * null = 0;
m_tablespaces.set(currptr, id, null);
debug << "Retreived tablspace: " << currptr->getName()
debug << "Retreived tablespace: " << currptr->getName()
<< " oldid: " << id << " newid: " << currptr->getObjectId()
<< " " << (void*)currptr << endl;
return true;
@ -349,6 +635,7 @@ BackupRestore::table(const TableS & table){
const char * name = table.getTableName();
ndbout << "Starting to handle table " << name << endl;
/**
* Ignore blob tables
*/
@ -372,7 +659,8 @@ BackupRestore::table(const TableS & table){
m_ndb->setSchemaName(split[1].c_str());
NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
if(m_restore_meta){
if(m_restore_meta)
{
NdbDictionary::Table copy(*table.m_dictTable);
copy.setName(split[2].c_str());
@ -385,10 +673,63 @@ BackupRestore::table(const TableS & table){
copy.setTablespace(* ts);
}
if (copy.getDefaultNoPartitionsFlag())
{
ndbout << "Default number of partitions" << endl;
/*
Table was defined with default number of partitions. We can restore
it with whatever is the default in this cluster.
We use the max_rows parameter in calculating the default number.
*/
Uint32 no_nodes = m_cluster_connection->no_db_nodes();
copy.setFragmentCount(get_no_fragments(copy.getMaxRows(),
no_nodes));
set_default_nodegroups(&copy);
}
else
{
ndbout << "Not default number of partitions" << endl;
/*
Table was defined with specific number of partitions. It should be
restored with the same number of partitions. It will either be
restored in the same node groups as when backup was taken or by
using a node group map supplied to the ndb_restore program.
*/
Uint16 *ng_array = (Uint16*)copy.getFragmentData();
Uint16 no_parts = copy.getFragmentCount();
ndbout << "Map node groups, no_parts = " << no_parts << endl;
ndbout << "ng_array = " << hex << (Uint32)ng_array << endl;
if (map_nodegroups(ng_array, no_parts))
{
ndbout << "Node groups were mapped" << endl;
if (translate_frm(&copy))
{
err << "Create table " << table.getTableName() << " failed: ";
err << "Translate frm error" << endl;
return false;
}
}
ndbout << "Set fragment Data " << endl;
copy.setFragmentData((const void *)ng_array, no_parts << 1);
}
if (dict->createTable(copy) == -1)
{
err << "Create table " << table.getTableName() << " failed: "
<< dict->getNdbError() << endl;
<< dict->getNdbError() << endl;
if (dict->getNdbError().code == 771)
{
/*
The user on the cluster where the backup was created had specified
specific node groups for partitions. Some of these node groups
didn't exist on this cluster. We will warn the user of this and
inform him of his option.
*/
err << "The node groups defined in the table didn't exist in this";
err << " cluster." << endl << "There is an option to use the";
err << " the parameter ndb-nodegroup-map to define a mapping from";
err << endl << "the old nodegroups to new nodegroups" << endl;
}
return false;
}
info << "Successfully restored table " << table.getTableName()<< endl ;
@ -503,7 +844,7 @@ BackupRestore::endOfTables(){
return true;
}
void BackupRestore::tuple(const TupleS & tup)
void BackupRestore::tuple(const TupleS & tup, Uint32 fragmentId)
{
if (!m_restore)
return;
@ -523,6 +864,7 @@ void BackupRestore::tuple(const TupleS & tup)
m_free_callback = cb->next;
cb->retries = 0;
cb->fragId = fragmentId;
cb->tup = tup; // must do copy!
tuple_a(cb);
@ -530,6 +872,7 @@ void BackupRestore::tuple(const TupleS & tup)
void BackupRestore::tuple_a(restore_callback_t *cb)
{
Uint32 partition_id = cb->fragId;
while (cb->retries < 10)
{
/**
@ -543,6 +886,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
m_ndb->sendPollNdb(3000, 1);
continue;
}
err << "Cannot start transaction" << endl;
exitHandler();
} // if
@ -555,6 +899,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
{
if (errorHandler(cb))
continue;
err << "Cannot get operation: " << cb->connection->getNdbError() << endl;
exitHandler();
} // if
@ -562,9 +907,37 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
{
if (errorHandler(cb))
continue;
err << "Error defining op: " << cb->connection->getNdbError() << endl;
exitHandler();
} // if
if (table->getFragmentType() == NdbDictionary::Object::UserDefined)
{
if (table->getDefaultNoPartitionsFlag())
{
/*
This can only happen for HASH partitioning with
user defined hash function where user hasn't
specified the number of partitions and we
have to calculate it. We use the hash value
stored in the record to calculate the partition
to use.
*/
int i = tup.getNoOfAttributes() - 1;
const AttributeData *attr_data = tup.getData(i);
Uint32 hash_value = *attr_data->u_int32_value;
op->setPartitionId(get_part_id(table, hash_value));
}
else
{
/*
Either RANGE or LIST (with or without subparts)
OR HASH partitioning with user defined hash
function but with fixed set of partitions.
*/
op->setPartitionId(partition_id);
}
}
int ret = 0;
for (int j = 0; j < 2; j++)
{
@ -607,6 +980,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
{
if (errorHandler(cb))
continue;
err << "Error defining op: " << cb->connection->getNdbError() << endl;
exitHandler();
}
@ -679,30 +1053,28 @@ bool BackupRestore::errorHandler(restore_callback_t *cb)
switch(error.status)
{
case NdbError::Success:
err << "Success error: " << error << endl;
return false;
// ERROR!
break;
case NdbError::TemporaryError:
err << "Temporary error: " << error << endl;
NdbSleep_MilliSleep(sleepTime);
return true;
// RETRY
break;
case NdbError::UnknownResult:
err << error << endl;
err << "Unknown: " << error << endl;
return false;
// ERROR!
break;
default:
case NdbError::PermanentError:
//ERROR
err << error << endl;
err << "Permanent: " << error << endl;
return false;
break;
}
err << "No error status" << endl;
return false;
}
@ -736,6 +1108,35 @@ BackupRestore::endOfTuples()
tuple_free();
}
static bool use_part_id(const NdbDictionary::Table *table)
{
if (table->getDefaultNoPartitionsFlag() &&
(table->getFragmentType() == NdbDictionary::Object::UserDefined))
return false;
else
return true;
}
static Uint32 get_part_id(const NdbDictionary::Table *table,
Uint32 hash_value)
{
Uint32 no_frags = table->getFragmentCount();
if (table->getLinearFlag())
{
Uint32 part_id;
Uint32 mask = 1;
while (no_frags > mask) mask <<= 1;
mask--;
part_id = hash_value & mask;
if (part_id >= no_frags)
part_id = hash_value & (mask >> 1);
return part_id;
}
else
return (hash_value % no_frags);
}
void
BackupRestore::logEntry(const LogEntry & tup)
{
@ -782,6 +1183,18 @@ BackupRestore::logEntry(const LogEntry & tup)
exitHandler();
} // if
if (table->getFragmentType() == NdbDictionary::Object::UserDefined)
{
if (table->getDefaultNoPartitionsFlag())
{
const AttributeS * attr = tup[tup.size()-1];
Uint32 hash_value = *(Uint32*)attr->Data.string_value;
op->setPartitionId(get_part_id(table, hash_value));
}
else
op->setPartitionId(tup.m_frag_id);
}
Bitmask<4096> keys;
for (Uint32 i= 0; i < tup.size(); i++)
{

View file

@ -19,12 +19,15 @@
#include "consumer.hpp"
bool map_nodegroups(Uint16 *ng_array, Uint32 no_parts);
struct restore_callback_t {
class BackupRestore *restore;
class TupleS tup;
class NdbTransaction *connection;
int retries;
int error_code;
Uint32 fragId;
restore_callback_t *next;
};
@ -32,10 +35,14 @@ struct restore_callback_t {
class BackupRestore : public BackupConsumer
{
public:
BackupRestore(Uint32 parallelism=1)
BackupRestore(NODE_GROUP_MAP *ng_map,
uint ng_map_len,
Uint32 parallelism=1)
{
m_ndb = 0;
m_cluster_connection = 0;
m_nodegroup_map = ng_map;
m_nodegroup_map_len = ng_map_len;
m_logCount = m_dataCount = 0;
m_restore = false;
m_restore_meta = false;
@ -54,7 +61,7 @@ public:
virtual bool object(Uint32 type, const void* ptr);
virtual bool table(const TableS &);
virtual bool endOfTables();
virtual void tuple(const TupleS &);
virtual void tuple(const TupleS &, Uint32 fragId);
virtual void tuple_free();
virtual void tuple_a(restore_callback_t *cb);
virtual void cback(int result, restore_callback_t *cb);
@ -66,6 +73,15 @@ public:
virtual bool finalize_table(const TableS &);
virtual bool update_apply_status(const RestoreMetaData &metaData);
void connectToMysql();
bool map_in_frm(char *new_data, const char *data,
uint data_len, uint *new_data_len);
bool search_replace(char *search_str, char **new_data,
const char **data, const char *end_data,
uint *new_data_len);
bool map_nodegroups(Uint16 *ng_array, Uint32 no_parts);
Uint32 map_ng(Uint32 ng);
bool translate_frm(NdbDictionary::Table *table);
Ndb * m_ndb;
Ndb_cluster_connection * m_cluster_connection;
bool m_restore;

View file

@ -211,7 +211,7 @@ BackupRestore::table(const TableS & table){
return true;
}
void BackupRestore::tuple(const TupleS & tup)
void BackupRestore::tuple(const TupleS & tup, Uint32 fragId)
{
if (!m_restore)
{
@ -225,6 +225,7 @@ void BackupRestore::tuple(const TupleS & tup)
{
m_free_callback = cb->next;
cb->retries = 0;
cb->fragId = fragId;
cb->tup = &tup;
tuple_a(cb);
}

View file

@ -0,0 +1,35 @@
/* Copyright (C) 2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/**
* @file ndb_nodegroup_map.h
*
* Declarations of data types for node group map
*/
#ifndef NDB_NODEGROUP_MAP_H
#define NDB_NODEGROUP_MAP_H
#define MAX_MAPS_PER_NODE_GROUP 4
#define MAX_NODE_GROUP_MAPS 128
typedef struct node_group_map
{
uint no_maps;
uint curr_index;
uint16 map_array[MAX_MAPS_PER_NODE_GROUP];
} NODE_GROUP_MAP;
#endif

View file

@ -19,6 +19,7 @@
#include <Vector.hpp>
#include <ndb_limits.h>
#include <NdbTCP.h>
#include <NdbMem.h>
#include <NdbOut.hpp>
#include <NDBT_ReturnCodes.h>
@ -37,6 +38,11 @@ static Vector<class BackupConsumer *> g_consumers;
static const char* ga_backupPath = "." DIR_SEPARATOR;
static const char *opt_nodegroup_map_str= 0;
static unsigned opt_nodegroup_map_len= 0;
static NODE_GROUP_MAP opt_nodegroup_map[MAX_NODE_GROUP_MAPS];
#define OPT_NDB_NODEGROUP_MAP 'z'
NDB_STD_OPTS_VARS;
/**
@ -107,9 +113,125 @@ static struct my_option my_long_options[] =
"Experimental. Do not ignore system table during restore.",
(gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "ndb-nodegroup-map", OPT_NDB_NODEGROUP_MAP,
"Nodegroup map for ndbcluster. Syntax: list of (source_ng, dest_ng)",
(gptr*) &opt_nodegroup_map_str,
(gptr*) &opt_nodegroup_map_str,
0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static char* analyse_one_map(char *map_str, uint16 *source, uint16 *dest)
{
char *end_ptr;
int number;
DBUG_ENTER("analyse_one_map");
/*
Search for pattern ( source_ng , dest_ng )
*/
while (isspace(*map_str)) map_str++;
if (*map_str != '(')
{
DBUG_RETURN(NULL);
}
map_str++;
while (isspace(*map_str)) map_str++;
number= strtol(map_str, &end_ptr, 10);
if (!end_ptr || number < 0 || number >= MAX_NODE_GROUP_MAPS)
{
DBUG_RETURN(NULL);
}
*source= (uint16)number;
map_str= end_ptr;
while (isspace(*map_str)) map_str++;
if (*map_str != ',')
{
DBUG_RETURN(NULL);
}
map_str++;
number= strtol(map_str, &end_ptr, 10);
if (!end_ptr || number < 0 || number >= UNDEF_NODEGROUP)
{
DBUG_RETURN(NULL);
}
*dest= (uint16)number;
map_str= end_ptr;
if (*map_str != ')')
{
DBUG_RETURN(NULL);
}
map_str++;
while (isspace(*map_str)) map_str++;
DBUG_RETURN(map_str);
}
static bool insert_ng_map(NODE_GROUP_MAP *ng_map,
uint16 source_ng, uint16 dest_ng)
{
uint index= source_ng;
uint ng_index= ng_map[index].no_maps;
opt_nodegroup_map_len++;
printf("New node group map for source %u index %u\n",index,ng_index);
if (ng_index >= MAX_MAPS_PER_NODE_GROUP)
return true;
ng_map[index].no_maps++;
ng_map[index].map_array[ng_index]= dest_ng;
return false;
}
static void init_nodegroup_map()
{
uint i,j;
NODE_GROUP_MAP *ng_map = &opt_nodegroup_map[0];
for (i = 0; i < MAX_NODE_GROUP_MAPS; i++)
{
ng_map[i].no_maps= 0;
for (j= 0; j < MAX_MAPS_PER_NODE_GROUP; j++)
ng_map[i].map_array[j]= UNDEF_NODEGROUP;
}
}
static bool analyse_nodegroup_map(const char *ng_map_str,
NODE_GROUP_MAP *ng_map)
{
uint16 source_ng, dest_ng;
char *local_str= (char*)ng_map_str;
DBUG_ENTER("analyse_nodegroup_map");
do
{
if (!local_str)
{
DBUG_RETURN(TRUE);
}
local_str= analyse_one_map(local_str, &source_ng, &dest_ng);
if (!local_str)
{
DBUG_RETURN(TRUE);
}
if (insert_ng_map(ng_map, source_ng, dest_ng))
{
DBUG_RETURN(TRUE);
}
if (!(*local_str))
break;
} while (TRUE);
DBUG_RETURN(FALSE);
}
static void short_usage_sub(void)
{
printf("Usage: %s [OPTIONS] [<path to backup files>]\n", my_progname);
@ -136,6 +258,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
printf("Error in --nodeid,-n setting, see --help\n");
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
ndbout << "Nodeid = " << ga_nodeId << endl;
break;
case 'b':
if (ga_backupId == 0)
@ -143,6 +266,20 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
printf("Error in --backupid,-b setting, see --help\n");
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
ndbout << "Backup Id = " << ga_backupId << endl;
break;
case OPT_NDB_NODEGROUP_MAP:
/*
This option is used to set a map from nodegroup in original cluster
to nodegroup in new cluster.
*/
opt_nodegroup_map_len= 0;
ndbout << "Analyse node group map" << endl;
if (analyse_nodegroup_map(opt_nodegroup_map_str,
&opt_nodegroup_map[0]))
{
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
break;
}
return 0;
@ -150,18 +287,55 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
bool
readArguments(int *pargc, char*** pargv)
{
Uint32 i;
ndbout << "Load defaults" << endl;
const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
init_nodegroup_map();
load_defaults("my",load_default_groups,pargc,pargv);
ndbout << "handle_options" << endl;
if (handle_options(pargc, pargv, my_long_options, get_one_option))
{
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
for (i = 0; i < MAX_NODE_GROUP_MAPS; i++)
opt_nodegroup_map[i].curr_index = 0;
BackupPrinter* printer = new BackupPrinter();
#if 0
/*
Test code written t{
o verify nodegroup mapping
*/
printf("Handled options successfully\n");
Uint16 map_ng[16];
Uint32 j;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4 ; i++)
map_ng[i] = i;
map_nodegroups(&map_ng[0], (Uint32)4);
for (i = 0; i < 4 ; i++)
printf("NG %u mapped to %u \n", i, map_ng[i]);
}
for (j = 0; j < 4; j++)
{
for (i = 0; i < 8 ; i++)
map_ng[i] = i >> 1;
map_nodegroups(&map_ng[0], (Uint32)8);
for (i = 0; i < 8 ; i++)
printf("NG %u mapped to %u \n", i >> 1, map_ng[i]);
}
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
#endif
BackupPrinter* printer = new BackupPrinter(opt_nodegroup_map,
opt_nodegroup_map_len);
if (printer == NULL)
return false;
BackupRestore* restore = new BackupRestore(ga_nParallelism);
BackupRestore* restore = new BackupRestore(opt_nodegroup_map,
opt_nodegroup_map_len,
ga_nParallelism);
if (restore == NULL)
{
delete printer;
@ -225,7 +399,7 @@ readArguments(int *pargc, char*** pargv)
{
ga_backupPath = *pargv[0];
}
ndbout << "backup path = " << ga_backupPath << endl;
return true;
}
@ -271,6 +445,7 @@ main(int argc, char** argv)
{
NDB_INIT(argv[0]);
ndbout << "Start readArguments" << endl;
if (!readArguments(&argc, &argv))
{
exitHandler(NDBT_FAILED);
@ -281,6 +456,7 @@ main(int argc, char** argv)
/**
* we must always load meta data, even if we will only print it to stdout
*/
ndbout << "Start restoring meta data" << endl;
RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId);
if (!metaData.readHeader())
{
@ -298,6 +474,7 @@ main(int argc, char** argv)
/**
* check wheater we can restore the backup (right version).
*/
ndbout << "Load content" << endl;
int res = metaData.loadContent();
if (res == 0)
@ -305,20 +482,20 @@ main(int argc, char** argv)
ndbout_c("Restore: Failed to load content");
exitHandler(NDBT_FAILED);
}
ndbout << "Get no of Tables" << endl;
if (metaData.getNoOfTables() == 0)
{
ndbout_c("Restore: The backup contains no tables ");
exitHandler(NDBT_FAILED);
}
ndbout << "Validate Footer" << endl;
if (!metaData.validateFooter())
{
ndbout_c("Restore: Failed to validate footer.");
exitHandler(NDBT_FAILED);
}
ndbout << "Init Backup objects" << endl;
Uint32 i;
for(i= 0; i < g_consumers.size(); i++)
{
@ -329,7 +506,7 @@ main(int argc, char** argv)
}
}
ndbout << "Restore objects (tablespaces, ..)" << endl;
for(i = 0; i<metaData.getNoOfObjects(); i++)
{
for(Uint32 j= 0; j < g_consumers.size(); j++)
@ -342,7 +519,7 @@ main(int argc, char** argv)
exitHandler(NDBT_FAILED);
}
}
ndbout << "Restoring tables" << endl;
for(i = 0; i<metaData.getNoOfTables(); i++)
{
if (checkSysTable(metaData[i]->getTableName()))
@ -357,14 +534,14 @@ main(int argc, char** argv)
}
}
}
ndbout << "Close tables" << endl;
for(i= 0; i < g_consumers.size(); i++)
if (!g_consumers[i]->endOfTables())
{
ndbout_c("Restore: Failed while closing tables");
exitHandler(NDBT_FAILED);
}
ndbout << "Iterate over data" << endl;
if (ga_restore || ga_print)
{
if(_restore_data || _print_data)
@ -378,15 +555,15 @@ main(int argc, char** argv)
exitHandler(NDBT_FAILED);
}
while (dataIter.readFragmentHeader(res= 0))
Uint32 fragmentId;
while (dataIter.readFragmentHeader(res= 0, &fragmentId))
{
const TupleS* tuple;
while ((tuple = dataIter.getNextTuple(res= 1)) != 0)
{
if (checkSysTable(tuple->getTable()->getTableName()))
for(Uint32 i= 0; i < g_consumers.size(); i++)
g_consumers[i]->tuple(* tuple);
g_consumers[i]->tuple(* tuple, fragmentId);
} // while (tuple != NULL);
if (res < 0)
@ -426,11 +603,14 @@ main(int argc, char** argv)
}
const LogEntry * logEntry = 0;
while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
bool alloc_flag = false;
while ((logEntry = logIter.getNextLogEntry(res= 0, &alloc_flag)) != 0)
{
if (checkSysTable(logEntry->m_table->getTableName()))
for(Uint32 i= 0; i < g_consumers.size(); i++)
g_consumers[i]->logEntry(* logEntry);
if (alloc_flag)
NdbMem_Free((void*)logEntry);
}
if (res < 0)
{