mariadb/storage/innobase/include/hash0hash.h
Eugene Kosov bd36a4ca12 introduce HASH_REPLACE() for hash_table_t
HASH_REPLACE(): allows to not travel through linked list twice
when HASH_INSERT() happens right after HASH_DELETE()
2020-01-31 22:14:18 +08:00

525 lines
15 KiB
C

/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/hash0hash.h
The simple hash table utility
Created 5/20/1997 Heikki Tuuri
*******************************************************/
#ifndef hash0hash_h
#define hash0hash_h
#include "mem0mem.h"
#include "sync0rw.h"
struct hash_table_t;
struct hash_cell_t;
typedef void* hash_node_t;
/* Fix Bug #13859: symbol collision between imap/mysql */
#define hash_create hash0_create
/* Differnt types of hash_table based on the synchronization
method used for it. */
enum hash_table_sync_t {
HASH_TABLE_SYNC_NONE = 0, /*!< Don't use any internal
synchronization objects for
this hash_table. */
HASH_TABLE_SYNC_MUTEX, /*!< Use mutexes to control
access to this hash_table. */
HASH_TABLE_SYNC_RW_LOCK /*!< Use rw_locks to control
access to this hash_table. */
};
/*************************************************************//**
Creates a hash table with >= n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
@return own: created table */
hash_table_t*
hash_create(
/*========*/
ulint n); /*!< in: number of array cells */
/*************************************************************//**
Creates a sync object array array to protect a hash table.
::sync_obj can be mutexes or rw_locks depening on the type of
hash table. */
void
hash_create_sync_obj(
/*=================*/
hash_table_t* table, /*!< in: hash table */
hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX
or HASH_TABLE_SYNC_RW_LOCK */
latch_id_t id, /*!< in: mutex/rw_lock ID */
ulint n_sync_obj);/*!< in: number of sync objects,
must be a power of 2 */
/*************************************************************//**
Frees a hash table. */
void
hash_table_free(
/*============*/
hash_table_t* table); /*!< in, own: hash table */
/**************************************************************//**
Calculates the hash value from a folded value.
@return hashed value */
UNIV_INLINE
ulint
hash_calc_hash(
/*===========*/
ulint fold, /*!< in: folded value */
hash_table_t* table); /*!< in: hash table */
/********************************************************************//**
Assert that the mutex for the table is held */
#define HASH_ASSERT_OWN(TABLE, FOLD) \
ut_ad((TABLE)->type != HASH_TABLE_SYNC_MUTEX \
|| (mutex_own(hash_get_mutex((TABLE), FOLD))));
/*******************************************************************//**
Inserts a struct to a hash table. */
#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)\
do {\
hash_cell_t* cell3333;\
TYPE* struct3333;\
\
HASH_ASSERT_OWN(TABLE, FOLD)\
\
(DATA)->NAME = NULL;\
\
cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\
\
if (cell3333->node == NULL) {\
cell3333->node = DATA;\
} else {\
struct3333 = (TYPE*) cell3333->node;\
\
while (struct3333->NAME != NULL) {\
\
struct3333 = (TYPE*) struct3333->NAME;\
}\
\
struct3333->NAME = DATA;\
}\
} while (0)
/*******************************************************************//**
Inserts a struct to the head of hash table. */
#define HASH_PREPEND(TYPE, NAME, TABLE, FOLD, DATA) \
do { \
hash_cell_t* cell3333; \
TYPE* struct3333; \
\
HASH_ASSERT_OWN(TABLE, FOLD) \
\
(DATA)->NAME = NULL; \
\
cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\
\
if (cell3333->node == NULL) { \
cell3333->node = DATA; \
DATA->NAME = NULL; \
} else { \
struct3333 = (TYPE*) cell3333->node; \
\
DATA->NAME = struct3333; \
\
cell3333->node = DATA; \
} \
} while (0)
#ifdef UNIV_HASH_DEBUG
# define HASH_ASSERT_VALID(DATA) ut_a((void*) (DATA) != (void*) -1)
# define HASH_INVALIDATE(DATA, NAME) *(void**) (&DATA->NAME) = (void*) -1
#else
# define HASH_ASSERT_VALID(DATA) do {} while (0)
# define HASH_INVALIDATE(DATA, NAME) do {} while (0)
#endif
/*******************************************************************//**
Deletes a struct from a hash table. */
#define HASH_DELETE(TYPE, NAME, TABLE, FOLD, DATA)\
do {\
hash_cell_t* cell3333;\
TYPE* struct3333;\
\
HASH_ASSERT_OWN(TABLE, FOLD)\
\
cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\
\
if (cell3333->node == DATA) {\
HASH_ASSERT_VALID(DATA->NAME);\
cell3333->node = DATA->NAME;\
} else {\
struct3333 = (TYPE*) cell3333->node;\
\
while (struct3333->NAME != DATA) {\
\
struct3333 = (TYPE*) struct3333->NAME;\
ut_a(struct3333);\
}\
\
struct3333->NAME = DATA->NAME;\
}\
HASH_INVALIDATE(DATA, NAME);\
} while (0)
#define HASH_REPLACE(TYPE, NAME, TABLE, FOLD, DATA_OLD, DATA_NEW) \
do { \
(DATA_NEW)->NAME = (DATA_OLD)->NAME; \
\
hash_cell_t& cell3333 \
= TABLE->array[hash_calc_hash(FOLD, TABLE)]; \
TYPE** struct3333 = (TYPE**)&cell3333.node; \
while (*struct3333 != DATA_OLD) { \
struct3333 = &((*struct3333)->NAME); \
} \
*struct3333 = DATA_NEW; \
} while (0)
/*******************************************************************//**
Gets the first struct in a hash chain, NULL if none. */
#define HASH_GET_FIRST(TABLE, HASH_VAL)\
(hash_get_nth_cell(TABLE, HASH_VAL)->node)
/*******************************************************************//**
Gets the next struct in a hash chain, NULL if none. */
#define HASH_GET_NEXT(NAME, DATA) ((DATA)->NAME)
/********************************************************************//**
Looks for a struct in a hash table. */
#define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)\
{\
\
HASH_ASSERT_OWN(TABLE, FOLD)\
\
(DATA) = (TYPE) HASH_GET_FIRST(TABLE, hash_calc_hash(FOLD, TABLE));\
HASH_ASSERT_VALID(DATA);\
\
while ((DATA) != NULL) {\
ASSERTION;\
if (TEST) {\
break;\
} else {\
HASH_ASSERT_VALID(HASH_GET_NEXT(NAME, DATA));\
(DATA) = (TYPE) HASH_GET_NEXT(NAME, DATA);\
}\
}\
}
/********************************************************************//**
Looks for an item in all hash buckets. */
#define HASH_SEARCH_ALL(NAME, TABLE, TYPE, DATA, ASSERTION, TEST) \
do { \
ulint i3333; \
\
for (i3333 = (TABLE)->n_cells; i3333--; ) { \
(DATA) = (TYPE) HASH_GET_FIRST(TABLE, i3333); \
\
while ((DATA) != NULL) { \
HASH_ASSERT_VALID(DATA); \
ASSERTION; \
\
if (TEST) { \
break; \
} \
\
(DATA) = (TYPE) HASH_GET_NEXT(NAME, DATA); \
} \
\
if ((DATA) != NULL) { \
break; \
} \
} \
} while (0)
/************************************************************//**
Gets the nth cell in a hash table.
@return pointer to cell */
UNIV_INLINE
hash_cell_t*
hash_get_nth_cell(
/*==============*/
hash_table_t* table, /*!< in: hash table */
ulint n); /*!< in: cell index */
/*************************************************************//**
Clears a hash table so that all the cells become empty. */
UNIV_INLINE
void
hash_table_clear(
/*=============*/
hash_table_t* table); /*!< in/out: hash table */
/*************************************************************//**
Returns the number of cells in a hash table.
@return number of cells */
UNIV_INLINE
ulint
hash_get_n_cells(
/*=============*/
hash_table_t* table); /*!< in: table */
/*******************************************************************//**
Deletes a struct which is stored in the heap of the hash table, and compacts
the heap. The fold value must be stored in the struct NODE in a field named
'fold'. */
#define HASH_DELETE_AND_COMPACT(TYPE, NAME, TABLE, NODE)\
do {\
TYPE* node111;\
TYPE* top_node111;\
hash_cell_t* cell111;\
ulint fold111;\
\
fold111 = (NODE)->fold;\
\
HASH_DELETE(TYPE, NAME, TABLE, fold111, NODE);\
\
top_node111 = (TYPE*) mem_heap_get_top(\
hash_get_heap(TABLE, fold111),\
sizeof(TYPE));\
\
/* If the node to remove is not the top node in the heap, compact the\
heap of nodes by moving the top node in the place of NODE. */\
\
if (NODE != top_node111) {\
\
/* Copy the top node in place of NODE */\
\
*(NODE) = *top_node111;\
\
cell111 = hash_get_nth_cell(TABLE,\
hash_calc_hash(top_node111->fold, TABLE));\
\
/* Look for the pointer to the top node, to update it */\
\
if (cell111->node == top_node111) {\
/* The top node is the first in the chain */\
\
cell111->node = NODE;\
} else {\
/* We have to look for the predecessor of the top\
node */\
node111 = static_cast<TYPE*>(cell111->node);\
\
while (top_node111 != HASH_GET_NEXT(NAME, node111)) {\
\
node111 = static_cast<TYPE*>(\
HASH_GET_NEXT(NAME, node111));\
}\
\
/* Now we have the predecessor node */\
\
node111->NAME = NODE;\
}\
}\
\
/* Free the space occupied by the top node */\
\
mem_heap_free_top(hash_get_heap(TABLE, fold111), sizeof(TYPE));\
} while (0)
/****************************************************************//**
Move all hash table entries from OLD_TABLE to NEW_TABLE. */
#define HASH_MIGRATE(OLD_TABLE, NEW_TABLE, NODE_TYPE, PTR_NAME, FOLD_FUNC) \
do {\
ulint i2222;\
ulint cell_count2222;\
\
cell_count2222 = hash_get_n_cells(OLD_TABLE);\
\
for (i2222 = 0; i2222 < cell_count2222; i2222++) {\
NODE_TYPE* node2222 = static_cast<NODE_TYPE*>(\
HASH_GET_FIRST((OLD_TABLE), i2222));\
\
while (node2222) {\
NODE_TYPE* next2222 = static_cast<NODE_TYPE*>(\
node2222->PTR_NAME);\
ulint fold2222 = FOLD_FUNC(node2222);\
\
HASH_INSERT(NODE_TYPE, PTR_NAME, (NEW_TABLE),\
fold2222, node2222);\
\
node2222 = next2222;\
}\
}\
} while (0)
/************************************************************//**
Gets the sync object index for a fold value in a hash table.
@return index */
UNIV_INLINE
ulint
hash_get_sync_obj_index(
/*====================*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
/************************************************************//**
Gets the nth heap in a hash table.
@return mem heap */
UNIV_INLINE
mem_heap_t*
hash_get_nth_heap(
/*==============*/
hash_table_t* table, /*!< in: hash table */
ulint i); /*!< in: index of the heap */
/************************************************************//**
Gets the heap for a fold value in a hash table.
@return mem heap */
UNIV_INLINE
mem_heap_t*
hash_get_heap(
/*==========*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
/************************************************************//**
Gets the nth mutex in a hash table.
@return mutex */
UNIV_INLINE
ib_mutex_t*
hash_get_nth_mutex(
/*===============*/
hash_table_t* table, /*!< in: hash table */
ulint i); /*!< in: index of the mutex */
/************************************************************//**
Gets the nth rw_lock in a hash table.
@return rw_lock */
UNIV_INLINE
rw_lock_t*
hash_get_nth_lock(
/*==============*/
hash_table_t* table, /*!< in: hash table */
ulint i); /*!< in: index of the rw_lock */
/************************************************************//**
Gets the mutex for a fold value in a hash table.
@return mutex */
UNIV_INLINE
ib_mutex_t*
hash_get_mutex(
/*===========*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
/************************************************************//**
Gets the rw_lock for a fold value in a hash table.
@return rw_lock */
UNIV_INLINE
rw_lock_t*
hash_get_lock(
/*==========*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
/** If not appropriate rw_lock for a fold value in a hash table,
relock S-lock the another rw_lock until appropriate for a fold value.
@param[in] hash_lock latched rw_lock to be confirmed
@param[in] table hash table
@param[in] fold fold value
@return latched rw_lock */
UNIV_INLINE
rw_lock_t*
hash_lock_s_confirm(
rw_lock_t* hash_lock,
hash_table_t* table,
ulint fold);
/** If not appropriate rw_lock for a fold value in a hash table,
relock X-lock the another rw_lock until appropriate for a fold value.
@param[in] hash_lock latched rw_lock to be confirmed
@param[in] table hash table
@param[in] fold fold value
@return latched rw_lock */
UNIV_INLINE
rw_lock_t*
hash_lock_x_confirm(
rw_lock_t* hash_lock,
hash_table_t* table,
ulint fold);
/************************************************************//**
Reserves all the locks of a hash table, in an ascending order. */
void
hash_lock_x_all(
/*============*/
hash_table_t* table); /*!< in: hash table */
/************************************************************//**
Releases all the locks of a hash table, in an ascending order. */
void
hash_unlock_x_all(
/*==============*/
hash_table_t* table); /*!< in: hash table */
/************************************************************//**
Releases all but passed in lock of a hash table, */
void
hash_unlock_x_all_but(
/*==================*/
hash_table_t* table, /*!< in: hash table */
rw_lock_t* keep_lock); /*!< in: lock to keep */
struct hash_cell_t{
void* node; /*!< hash chain node, NULL if none */
};
/* The hash table structure */
struct hash_table_t {
enum hash_table_sync_t type; /*<! type of hash_table. */
#ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ibool adaptive;/* TRUE if this is the hash
table of the adaptive hash
index */
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
ulint n_cells;/* number of cells in the hash table */
hash_cell_t* array; /*!< pointer to cell array */
ulint n_sync_obj;/* if sync_objs != NULL, then
the number of either the number
of mutexes or the number of
rw_locks depending on the type.
Must be a power of 2 */
union {
ib_mutex_t* mutexes;/* NULL, or an array of mutexes
used to protect segments of the
hash table */
rw_lock_t* rw_locks;/* NULL, or an array of rw_lcoks
used to protect segments of the
hash table */
} sync_obj;
mem_heap_t** heaps; /*!< if this is non-NULL, hash
chain nodes for external chaining
can be allocated from these memory
heaps; there are then n_mutexes
many of these heaps */
mem_heap_t* heap;
#ifdef UNIV_DEBUG
ulint magic_n;
# define HASH_TABLE_MAGIC_N 76561114
#endif /* UNIV_DEBUG */
};
#include "hash0hash.ic"
#endif