mirror of
https://github.com/MariaDB/server.git
synced 2025-01-22 14:54:20 +01:00
Rename hash functions to avoid conflict with mysql
git-svn-id: file:///svn/tokudb@38 c7de825b-a66e-492c-adef-691d508d4ae1
This commit is contained in:
parent
0243ed5122
commit
e6e8a2aebc
7 changed files with 56 additions and 56 deletions
|
@ -25,14 +25,14 @@ ybt.o: ybt.h brttypes.h
|
|||
ybt-test: ybt-test.o ybt.o memory.o
|
||||
cachetable.o: cachetable.h
|
||||
brt-test: ybt.o brt.o hashtable.o pma.o memory.o brt-serialize.o cachetable.o header-io.o ybt.o key.o
|
||||
brt-test.o brt.o: brt.h cachetable.h brttypes.h
|
||||
brt-test.o brt.o: brt.h hashtable.h pma.h brttypes.h
|
||||
brt-serialize-test.o: pma.h yerror.h brt.h memory.h hashtable.h brttypes.h brt-internal.h
|
||||
brt.o: brt.h mdict.h pma.h brttypes.h memory.h brt-internal.h cachetable.h
|
||||
brt.o: brt.h mdict.h pma.h brttypes.h memory.h brt-internal.h cachetable.h hashtable.h
|
||||
mdict.o: pma.h
|
||||
hashtable.o: hashtable.h brttypes.h memory.h key.h yerror.h ../include/ydb-constants.h
|
||||
memory.o: memory.h
|
||||
hashtest: hashtable.o memory.o
|
||||
brt-serialize.o: brt.h cachetable.h memory.h mdict.h pma.h brttypes.h brt-internal.h
|
||||
brt-serialize.o: brt.h cachetable.h memory.h mdict.h pma.h brttypes.h brt-internal.h hashtable.h
|
||||
header-io.o: brttypes.h brt-internal.h memory.h
|
||||
mdict-test: hashtable.o pma.o memory.o
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@ void test_serialize(void) {
|
|||
sn.u.n.totalchildkeylens = 6;
|
||||
sn.u.n.children[0] = sn.nodesize*30;
|
||||
sn.u.n.children[1] = sn.nodesize*35;
|
||||
r = hashtable_create(&sn.u.n.htables[0]); assert(r==0);
|
||||
r = hashtable_create(&sn.u.n.htables[1]); assert(r==0);
|
||||
r = hash_insert(sn.u.n.htables[0], "a", 2, "aval", 5); assert(r==0);
|
||||
r = hash_insert(sn.u.n.htables[0], "b", 2, "bval", 5); assert(r==0);
|
||||
r = hash_insert(sn.u.n.htables[1], "x", 2, "xval", 5); assert(r==0);
|
||||
r = toku_hashtable_create(&sn.u.n.htables[0]); assert(r==0);
|
||||
r = toku_hashtable_create(&sn.u.n.htables[1]); assert(r==0);
|
||||
r = toku_hash_insert(sn.u.n.htables[0], "a", 2, "aval", 5); assert(r==0);
|
||||
r = toku_hash_insert(sn.u.n.htables[0], "b", 2, "bval", 5); assert(r==0);
|
||||
r = toku_hash_insert(sn.u.n.htables[1], "x", 2, "xval", 5); assert(r==0);
|
||||
sn.u.n.n_bytes_in_hashtables = 3*(KEY_VALUE_OVERHEAD+2+5);
|
||||
|
||||
serialize_brtnode_to(fd, sn.nodesize*20, sn.nodesize, &sn);
|
||||
|
@ -43,17 +43,17 @@ void test_serialize(void) {
|
|||
assert(dn->u.n.children[1]==sn.nodesize*35);
|
||||
{
|
||||
bytevec data; ITEMLEN datalen;
|
||||
int r = hash_find(dn->u.n.htables[0], "a", 2, &data, &datalen);
|
||||
int r = toku_hash_find(dn->u.n.htables[0], "a", 2, &data, &datalen);
|
||||
assert(r==0);
|
||||
assert(strcmp(data,"aval")==0);
|
||||
assert(datalen==5);
|
||||
|
||||
r=hash_find(dn->u.n.htables[0], "b", 2, &data, &datalen);
|
||||
r=toku_hash_find(dn->u.n.htables[0], "b", 2, &data, &datalen);
|
||||
assert(r==0);
|
||||
assert(strcmp(data,"bval")==0);
|
||||
assert(datalen==5);
|
||||
|
||||
r=hash_find(dn->u.n.htables[1], "x", 2, &data, &datalen);
|
||||
r=toku_hash_find(dn->u.n.htables[1], "x", 2, &data, &datalen);
|
||||
assert(r==0);
|
||||
assert(strcmp(data,"xval")==0);
|
||||
assert(datalen==5);
|
||||
|
|
|
@ -161,7 +161,7 @@ void serialize_brtnode_to(int fd, diskoff off, diskoff size, BRTNODE node) {
|
|||
int n_hash_tables = node->u.n.n_children;
|
||||
for (i=0; i< n_hash_tables; i++) {
|
||||
//printf("%s:%d p%d=%p n_entries=%d\n", __FILE__, __LINE__, i, node->mdicts[i], mdict_n_entries(node->mdicts[i]));
|
||||
wbuf_int(&w, hashtable_n_entries(node->u.n.htables[i]));
|
||||
wbuf_int(&w, toku_hashtable_n_entries(node->u.n.htables[i]));
|
||||
HASHTABLE_ITERATE(node->u.n.htables[i], key, keylen, data, datalen,
|
||||
(wbuf_bytes(&w, key, keylen),
|
||||
wbuf_bytes(&w, data, datalen)));
|
||||
|
@ -257,11 +257,11 @@ int deserialize_brtnode_from (int fd, diskoff off, BRTNODE *brtnode, int nodesiz
|
|||
}
|
||||
result->u.n.n_bytes_in_hashtables = 0;
|
||||
for (i=0; i<result->u.n.n_children; i++) {
|
||||
int r=hashtable_create(&result->u.n.htables[i]);
|
||||
int r=toku_hashtable_create(&result->u.n.htables[i]);
|
||||
if (r!=0) {
|
||||
int j;
|
||||
if (0) { died_12: j=result->u.n.n_bytes_in_hashtables; }
|
||||
for (j=0; j<i; j++) hashtable_free(&result->u.n.htables[j]);
|
||||
for (j=0; j<i; j++) toku_hashtable_free(&result->u.n.htables[j]);
|
||||
goto died1;
|
||||
}
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ int deserialize_brtnode_from (int fd, diskoff off, BRTNODE *brtnode, int nodesiz
|
|||
rbuf_bytes(&rc, &val, &vallen);
|
||||
//printf("Found %s,%s\n", key, val);
|
||||
{
|
||||
int r=hash_insert(result->u.n.htables[cnum], key, keylen, val, vallen); /* Copies the data into the hash table. */
|
||||
int r=toku_hash_insert(result->u.n.htables[cnum], key, keylen, val, vallen); /* Copies the data into the hash table. */
|
||||
if (r!=0) { goto died_12; }
|
||||
}
|
||||
diff = keylen + vallen + KEY_VALUE_OVERHEAD;
|
||||
|
|
38
newbrt/brt.c
38
newbrt/brt.c
|
@ -47,7 +47,7 @@ void brtnode_free (BRTNODE node) {
|
|||
}
|
||||
for (i=0; i<node->u.n.n_children; i++) {
|
||||
if (node->u.n.htables[i]) {
|
||||
hashtable_free(&node->u.n.htables[i]);
|
||||
toku_hashtable_free(&node->u.n.htables[i]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -248,7 +248,7 @@ void delete_node (BRT t, BRTNODE node) {
|
|||
} else {
|
||||
for (i=0; i<node->u.n.n_children; i++) {
|
||||
if (node->u.n.htables[i]) {
|
||||
hashtable_free(&node->u.n.htables[i]);
|
||||
toku_hashtable_free(&node->u.n.htables[i]);
|
||||
}
|
||||
node->u.n.n_bytes_in_hashtable[0]=0;
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ static void insert_to_buffer_in_leaf (BRTNODE node, DBT *k, DBT *v, DB *db) {
|
|||
|
||||
static int insert_to_hash_in_nonleaf (BRTNODE node, int childnum, DBT *k, DBT *v) {
|
||||
unsigned int n_bytes_added = KEY_VALUE_OVERHEAD + k->size + v->size;
|
||||
int r = hash_insert(node->u.n.htables[childnum], k->data, k->size, v->data, v->size);
|
||||
int r = toku_hash_insert(node->u.n.htables[childnum], k->data, k->size, v->data, v->size);
|
||||
if (r!=0) return r;
|
||||
node->u.n.n_bytes_in_hashtable[childnum] += n_bytes_added;
|
||||
node->u.n.n_bytes_in_hashtables += n_bytes_added;
|
||||
|
@ -534,7 +534,7 @@ static int push_a_kvpair_down (BRT t, BRTNODE node, BRTNODE child, int childnum,
|
|||
|
||||
//if (debug) printf("%s:%d %*sinserted down child_did_split=%d\n", __FILE__, __LINE__, debug, "", child_did_split);
|
||||
{
|
||||
int r = hash_delete(node->u.n.htables[childnum], k->data, k->size); // Must delete after doing the insert, to avoid operating on freed' key
|
||||
int r = toku_hash_delete(node->u.n.htables[childnum], k->data, k->size); // Must delete after doing the insert, to avoid operating on freed' key
|
||||
//printf("%s:%d deleted status=%d\n", __FILE__, __LINE__, r);
|
||||
if (r!=0) return r;
|
||||
}
|
||||
|
@ -586,8 +586,8 @@ static int handle_split_of_child (BRT t, BRTNODE node, int childnum,
|
|||
}
|
||||
node->u.n.children[childnum] = childa->thisnodename;
|
||||
node->u.n.children[childnum+1] = childb->thisnodename;
|
||||
hashtable_create(&node->u.n.htables[childnum]);
|
||||
hashtable_create(&node->u.n.htables[childnum+1]);
|
||||
toku_hashtable_create(&node->u.n.htables[childnum]);
|
||||
toku_hashtable_create(&node->u.n.htables[childnum+1]);
|
||||
node->u.n.n_bytes_in_hashtable[childnum] = 0;
|
||||
node->u.n.n_bytes_in_hashtable[childnum+1] = 0;
|
||||
// Slide the keys over
|
||||
|
@ -620,7 +620,7 @@ static int handle_split_of_child (BRT t, BRTNODE node, int childnum,
|
|||
}
|
||||
if (r!=0) return r;
|
||||
}));
|
||||
hashtable_free(&old_h);
|
||||
toku_hashtable_free(&old_h);
|
||||
|
||||
r=cachetable_unpin(t->cf, childa->thisnodename, 1);
|
||||
assert(r==0);
|
||||
|
@ -681,8 +681,8 @@ static int push_some_kvpairs_down (BRT t, BRTNODE node, int childnum,
|
|||
bytevec key,val;
|
||||
ITEMLEN keylen, vallen;
|
||||
//printf("%s:%d Try random_pick, weight=%d \n", __FILE__, __LINE__, node->u.n.n_bytes_in_hashtable[childnum]);
|
||||
assert(hashtable_n_entries(node->u.n.htables[childnum])>0);
|
||||
while(0==hashtable_random_pick(node->u.n.htables[childnum], &key, &keylen, &val, &vallen)) {
|
||||
assert(toku_hashtable_n_entries(node->u.n.htables[childnum])>0);
|
||||
while(0==toku_hashtable_random_pick(node->u.n.htables[childnum], &key, &keylen, &val, &vallen)) {
|
||||
int child_did_split=0; BRTNODE childa, childb;
|
||||
DBT hk,hv;
|
||||
DBT childsplitk;
|
||||
|
@ -704,7 +704,7 @@ static int push_some_kvpairs_down (BRT t, BRTNODE node, int childnum,
|
|||
printf("%s:%d sum=%d\n", __FILE__, __LINE__, sum);
|
||||
assert(sum==node->u.n.n_bytes_in_hashtable[childnum]);
|
||||
}
|
||||
if (node->u.n.n_bytes_in_hashtable[childnum]>0) assert(hashtable_n_entries(node->u.n.htables[childnum])>0);
|
||||
if (node->u.n.n_bytes_in_hashtable[childnum]>0) assert(toku_hashtable_n_entries(node->u.n.htables[childnum])>0);
|
||||
//printf("%s:%d %d=push_a_kvpair_down=(); child_did_split=%d (weight=%d)\n", __FILE__, __LINE__, r, child_did_split, node->u.n.n_bytes_in_hashtable[childnum]);
|
||||
if (r!=0) return r;
|
||||
if (child_did_split) {
|
||||
|
@ -820,7 +820,7 @@ static int brt_nonleaf_insert (BRT t, BRTNODE node, DBT *k, DBT *v,
|
|||
bytevec olddata;
|
||||
ITEMLEN olddatalen;
|
||||
unsigned int childnum = brtnode_which_child(node, k, t, db);
|
||||
int found = !hash_find(node->u.n.htables[childnum], k->data, k->size, &olddata, &olddatalen);
|
||||
int found = !toku_hash_find(node->u.n.htables[childnum], k->data, k->size, &olddata, &olddatalen);
|
||||
|
||||
if (0) { // It is faster to do this, except on yobiduck where things grind to a halt.
|
||||
void *child_v;
|
||||
|
@ -830,7 +830,7 @@ static int brt_nonleaf_insert (BRT t, BRTNODE node, DBT *k, DBT *v,
|
|||
BRTNODE child = child_v;
|
||||
if (found) {
|
||||
int diff = k->size + olddatalen + KEY_VALUE_OVERHEAD;
|
||||
int r = hash_delete(node->u.n.htables[childnum], k->data, k->size);
|
||||
int r = toku_hash_delete(node->u.n.htables[childnum], k->data, k->size);
|
||||
assert(r==0);
|
||||
node->u.n.n_bytes_in_hashtables -= diff;
|
||||
node->u.n.n_bytes_in_hashtable[childnum] -= diff;
|
||||
|
@ -860,7 +860,7 @@ static int brt_nonleaf_insert (BRT t, BRTNODE node, DBT *k, DBT *v,
|
|||
if (debug) printf("%s:%d %*sDoing hash_insert\n", __FILE__, __LINE__, debug, "");
|
||||
verify_counts(node);
|
||||
if (found) {
|
||||
int r = hash_delete(node->u.n.htables[childnum], k->data, k->size);
|
||||
int r = toku_hash_delete(node->u.n.htables[childnum], k->data, k->size);
|
||||
int diff = k->size + olddatalen + KEY_VALUE_OVERHEAD;
|
||||
assert(r==0);
|
||||
node->u.n.n_bytes_in_hashtables -= diff;
|
||||
|
@ -869,7 +869,7 @@ static int brt_nonleaf_insert (BRT t, BRTNODE node, DBT *k, DBT *v,
|
|||
}
|
||||
{
|
||||
int diff = k->size + v->size + KEY_VALUE_OVERHEAD;
|
||||
int r=hash_insert(node->u.n.htables[childnum], k->data, k->size, v->data, v->size);
|
||||
int r=toku_hash_insert(node->u.n.htables[childnum], k->data, k->size, v->data, v->size);
|
||||
assert(r==0);
|
||||
node->u.n.n_bytes_in_hashtables += diff;
|
||||
node->u.n.n_bytes_in_hashtable[childnum] += diff;
|
||||
|
@ -1142,8 +1142,8 @@ int brt_insert (BRT brt, DBT *k, DBT *v, DB* db) {
|
|||
newroot->u.n.totalchildkeylens=splitk.size;
|
||||
newroot->u.n.children[0]=nodea->thisnodename;
|
||||
newroot->u.n.children[1]=nodeb->thisnodename;
|
||||
r=hashtable_create(&newroot->u.n.htables[0]); if (r!=0) return r;
|
||||
r=hashtable_create(&newroot->u.n.htables[1]); if (r!=0) return r;
|
||||
r=toku_hashtable_create(&newroot->u.n.htables[0]); if (r!=0) return r;
|
||||
r=toku_hashtable_create(&newroot->u.n.htables[1]); if (r!=0) return r;
|
||||
verify_counts(newroot);
|
||||
r=cachetable_unpin(brt->cf, nodea->thisnodename, 1); if (r!=0) return r;
|
||||
r=cachetable_unpin(brt->cf, nodeb->thisnodename, 1); if (r!=0) return r;
|
||||
|
@ -1191,7 +1191,7 @@ int brt_lookup_node (BRT brt, diskoff off, DBT *k, DBT *v, DB *db) {
|
|||
{
|
||||
bytevec hanswer;
|
||||
ITEMLEN hanswerlen;
|
||||
if (hash_find (node->u.n.htables[childnum], k->data, k->size, &hanswer, &hanswerlen)==0) {
|
||||
if (toku_hash_find (node->u.n.htables[childnum], k->data, k->size, &hanswer, &hanswerlen)==0) {
|
||||
//printf("Found %d bytes\n", *vallen);
|
||||
ybt_set_value(v, hanswer, hanswerlen, &brt->sval);
|
||||
//printf("%s:%d Returning %p\n", __FILE__, __LINE__, v->data);
|
||||
|
@ -1255,7 +1255,7 @@ int dump_brtnode (BRT brt, diskoff off, int depth, bytevec lorange, ITEMLEN lole
|
|||
{
|
||||
int i;
|
||||
for (i=0; i< node->u.n.n_children-1; i++) {
|
||||
printf("%*schild %d buffered (%d entries):\n", depth+1, "", i, hashtable_n_entries(node->u.n.htables[i]));
|
||||
printf("%*schild %d buffered (%d entries):\n", depth+1, "", i, toku_hashtable_n_entries(node->u.n.htables[i]));
|
||||
HASHTABLE_ITERATE(node->u.n.htables[i], key, keylen, data, datalen,
|
||||
({
|
||||
printf("%*s %s %s\n", depth+2, "", (char*)key, (char*)data);
|
||||
|
@ -1376,7 +1376,7 @@ int verify_brtnode (BRT brt, diskoff off, bytevec lorange, ITEMLEN lolen, byteve
|
|||
result=1;
|
||||
}
|
||||
}
|
||||
hashtable_iterate(node->u.n.htables[i], verify_pair, 0);
|
||||
toku_hashtable_iterate(node->u.n.htables[i], verify_pair, 0);
|
||||
}
|
||||
}
|
||||
for (i=0; i<node->u.n.n_children; i++) {
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "key.h"
|
||||
#include "yerror.h"
|
||||
|
||||
int hashtable_create (HASHTABLE *h) {
|
||||
int toku_hashtable_create (HASHTABLE *h) {
|
||||
HASHTABLE MALLOC(tab);
|
||||
int i;
|
||||
if (tab==0) return -1;
|
||||
|
@ -51,7 +51,7 @@ static void hash_find_internal (HASHTABLE tab, const char *key, ITEMLEN keylen,
|
|||
*hashelt = 0;
|
||||
}
|
||||
|
||||
int hash_find (HASHTABLE tab, bytevec key, ITEMLEN keylen, bytevec *data, ITEMLEN *datalen) {
|
||||
int toku_hash_find (HASHTABLE tab, bytevec key, ITEMLEN keylen, bytevec *data, ITEMLEN *datalen) {
|
||||
HASHELT he, *prev_ptr;
|
||||
hash_find_internal(tab, key, keylen, &he, &prev_ptr);
|
||||
if (he==0) {
|
||||
|
@ -64,7 +64,7 @@ int hash_find (HASHTABLE tab, bytevec key, ITEMLEN keylen, bytevec *data, ITEMLE
|
|||
}
|
||||
|
||||
|
||||
int hash_insert (HASHTABLE tab, const char *key, ITEMLEN keylen, const char *val, ITEMLEN vallen)
|
||||
int toku_hash_insert (HASHTABLE tab, const char *key, ITEMLEN keylen, const char *val, ITEMLEN vallen)
|
||||
{
|
||||
unsigned int h = hash_key (key,keylen)%tab->arraysize;
|
||||
{
|
||||
|
@ -108,7 +108,7 @@ int hash_insert (HASHTABLE tab, const char *key, ITEMLEN keylen, const char *val
|
|||
}
|
||||
}
|
||||
|
||||
int hash_delete (HASHTABLE tab, const char *key, ITEMLEN keylen) {
|
||||
int toku_hash_delete (HASHTABLE tab, const char *key, ITEMLEN keylen) {
|
||||
HASHELT he, *prev_ptr;
|
||||
//printf("%s:%d deleting %s (bucket %d)\n", __FILE__, __LINE__, key, hash_key(key,keylen)%tab->arraysize);
|
||||
hash_find_internal(tab, key, keylen, &he, &prev_ptr);
|
||||
|
@ -127,7 +127,7 @@ int hash_delete (HASHTABLE tab, const char *key, ITEMLEN keylen) {
|
|||
}
|
||||
|
||||
|
||||
int hashtable_random_pick(HASHTABLE h, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen) {
|
||||
int toku_hashtable_random_pick(HASHTABLE h, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen) {
|
||||
int i;
|
||||
for (i=0; i<h->arraysize; i++) {
|
||||
HASHELT he=h->array[i];
|
||||
|
@ -167,7 +167,7 @@ int hashtable_find_last(HASHTABLE h, bytevec *key, ITEMLEN *keylen, bytevec *dat
|
|||
}
|
||||
#endif
|
||||
|
||||
void hashtable_iterate (HASHTABLE tab, void(*f)(bytevec key, ITEMLEN keylen, bytevec data, ITEMLEN datalen, void*args), void* args) {
|
||||
void toku_hashtable_iterate (HASHTABLE tab, void(*f)(bytevec key, ITEMLEN keylen, bytevec data, ITEMLEN datalen, void*args), void* args) {
|
||||
/*
|
||||
int i;
|
||||
for (i=0; i<tab->arraysize; i++) {
|
||||
|
@ -180,7 +180,7 @@ void hashtable_iterate (HASHTABLE tab, void(*f)(bytevec key, ITEMLEN keylen, byt
|
|||
HASHTABLE_ITERATE(tab, key, keylen, val, vallen, f(key,keylen,val,vallen,args));
|
||||
}
|
||||
|
||||
int hashtable_n_entries(HASHTABLE tab) {
|
||||
int toku_hashtable_n_entries(HASHTABLE tab) {
|
||||
return tab->n_keys;
|
||||
}
|
||||
|
||||
|
@ -196,9 +196,9 @@ static void hasheltlist_free (HASHELT elt) {
|
|||
}
|
||||
|
||||
/* Frees the table, but doesn't do anything to the contents of the table. The keys are still alloc'd. The internal storage of the hashtable is freed. */
|
||||
void hashtable_free(HASHTABLE *tab) {
|
||||
void toku_hashtable_free(HASHTABLE *tab) {
|
||||
//printf("%s:%d free hashtable %p\n", __FILE__, __LINE__, tab);
|
||||
hashtable_clear(*tab);
|
||||
toku_hashtable_clear(*tab);
|
||||
//printf("%s:%d free %p\n", __FILE__, __LINE__, tab);n
|
||||
toku_free((*tab)->array);
|
||||
toku_free(*tab);
|
||||
|
@ -206,7 +206,7 @@ void hashtable_free(HASHTABLE *tab) {
|
|||
}
|
||||
|
||||
|
||||
void hashtable_clear(HASHTABLE tab) {
|
||||
void toku_hashtable_clear(HASHTABLE tab) {
|
||||
int i;
|
||||
for (i=0; i<tab->arraysize; i++) {
|
||||
hasheltlist_free(tab->array[i]);
|
||||
|
|
|
@ -8,23 +8,23 @@
|
|||
|
||||
typedef struct hashtable *HASHTABLE;
|
||||
|
||||
int hashtable_create (HASHTABLE*);
|
||||
int toku_hashtable_create (HASHTABLE*);
|
||||
|
||||
/* Return 0 if the key is found in the hashtable, -1 otherwise. */
|
||||
/* Warning: The data returned points to the internals of the hashtable. It is set to "const" to try to prevent you from messing it up. */
|
||||
int hash_find (HASHTABLE tab, bytevec key, ITEMLEN keylen, bytevec*data, ITEMLEN *datalen);
|
||||
int toku_hash_find (HASHTABLE tab, bytevec key, ITEMLEN keylen, bytevec*data, ITEMLEN *datalen);
|
||||
|
||||
/* Replace the key if it was already there. */
|
||||
int hash_insert (HASHTABLE tab, const char *key, ITEMLEN keylen, const char *data, ITEMLEN datalen);
|
||||
int toku_hash_insert (HASHTABLE tab, const char *key, ITEMLEN keylen, const char *data, ITEMLEN datalen);
|
||||
|
||||
/* It is OK to delete something that isn't there. */
|
||||
int hash_delete (HASHTABLE tab, const char *key, ITEMLEN keylen);
|
||||
void hashtable_free(HASHTABLE *tab);
|
||||
int hashtable_n_entries(HASHTABLE);
|
||||
int toku_hash_delete (HASHTABLE tab, const char *key, ITEMLEN keylen);
|
||||
void toku_hashtable_free(HASHTABLE *tab);
|
||||
int toku_hashtable_n_entries(HASHTABLE);
|
||||
|
||||
void hashtable_clear(HASHTABLE);
|
||||
void toku_hashtable_clear(HASHTABLE);
|
||||
|
||||
int hashtable_random_pick(HASHTABLE h, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen);
|
||||
int toku_hashtable_random_pick(HASHTABLE h, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen);
|
||||
//int hashtable_find_last(HASHTABLE h, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen);
|
||||
|
||||
typedef struct hashelt *HASHELT;
|
||||
|
@ -41,7 +41,7 @@ struct hashtable {
|
|||
};
|
||||
|
||||
/* You cannot add or delete elements from the hashtable while iterating. */
|
||||
void hashtable_iterate (HASHTABLE tab, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen,void*), void*);
|
||||
void toku_hashtable_iterate (HASHTABLE tab, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen,void*), void*);
|
||||
// If you don't want to use something, do something like use "key __attribute__((__unused__))" for keyvar.
|
||||
#define HASHTABLE_ITERATE(table,keyvar,keylenvar,datavar,datalenvar,body) ({ \
|
||||
int hi_counter; \
|
||||
|
|
|
@ -68,8 +68,8 @@ void test0 (void) {
|
|||
char*saw =malloc(sizeof(*saw)*n_ops);
|
||||
int data_n = 0;
|
||||
assert(data!=0);
|
||||
r = hashtable_create(&htable); assert(r==0);
|
||||
assert(hashtable_n_entries(htable)==0);
|
||||
r = toku_hashtable_create(&htable); assert(r==0);
|
||||
assert(toku_hashtable_n_entries(htable)==0);
|
||||
#if 0
|
||||
{
|
||||
bytevec kv=(void*)0xdeadbeef;
|
||||
|
@ -97,7 +97,7 @@ void test0 (void) {
|
|||
}
|
||||
snprintf(kv, 99, "k%d", ra);
|
||||
snprintf(dv, 99, "d%d", ra);
|
||||
hash_insert(htable, kv, strlen(kv)+1, dv, strlen(dv)+1);
|
||||
toku_hash_insert(htable, kv, strlen(kv)+1, dv, strlen(dv)+1);
|
||||
data[data_n++]=ra;
|
||||
}
|
||||
} else {
|
||||
|
|
Loading…
Add table
Reference in a new issue