mirror of
https://github.com/MariaDB/server.git
synced 2025-02-01 03:21:53 +01:00
f41bba8c61
Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index memory overhead grows by 67 %, maybe we have to tune this later somehow innobase/include/buf0buf.h: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow innobase/include/ha0ha.h: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow innobase/include/hash0hash.h: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow innobase/ha/ha0ha.c: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow innobase/buf/buf0buf.c: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow innobase/buf/buf0lru.c: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow innobase/btr/btr0sea.c: Link adaptive hash index entries to the buffer page, so that we can remove them quickly without knowing the record structure on that page; this was requested by Marko for the compact InnoDB table format; note that the adaptive hash index overhead grows by 67 %, maybe we have to tune this later somehow
412 lines
9.1 KiB
C
412 lines
9.1 KiB
C
/************************************************************************
|
|
The hash table with external chains
|
|
|
|
(c) 1994-1997 Innobase Oy
|
|
|
|
Created 8/22/1994 Heikki Tuuri
|
|
*************************************************************************/
|
|
|
|
#include "ha0ha.h"
|
|
#ifdef UNIV_NONINL
|
|
#include "ha0ha.ic"
|
|
#endif
|
|
|
|
#include "buf0buf.h"
|
|
|
|
/*****************************************************************
|
|
Creates a hash table with >= n array cells. The actual number of cells is
|
|
chosen to be a prime number slightly bigger than n. */
|
|
|
|
hash_table_t*
|
|
ha_create(
|
|
/*======*/
|
|
/* out, own: created table */
|
|
ibool in_btr_search, /* in: TRUE if the hash table is used in
|
|
the btr_search module */
|
|
ulint n, /* in: number of array cells */
|
|
ulint n_mutexes, /* in: number of mutexes to protect the
|
|
hash table: must be a power of 2, or 0 */
|
|
ulint mutex_level) /* in: level of the mutexes in the latching
|
|
order: this is used in the debug version */
|
|
{
|
|
hash_table_t* table;
|
|
ulint i;
|
|
|
|
table = hash_create(n);
|
|
|
|
if (in_btr_search) {
|
|
table->adaptive = TRUE;
|
|
} else {
|
|
table->adaptive = FALSE;
|
|
}
|
|
|
|
if (n_mutexes == 0) {
|
|
if (in_btr_search) {
|
|
table->heap = mem_heap_create_in_btr_search(4096);
|
|
} else {
|
|
table->heap = mem_heap_create_in_buffer(4096);
|
|
}
|
|
|
|
return(table);
|
|
}
|
|
|
|
hash_create_mutexes(table, n_mutexes, mutex_level);
|
|
|
|
table->heaps = mem_alloc(n_mutexes * sizeof(void*));
|
|
|
|
for (i = 0; i < n_mutexes; i++) {
|
|
if (in_btr_search) {
|
|
table->heaps[i] = mem_heap_create_in_btr_search(4096);
|
|
} else {
|
|
table->heaps[i] = mem_heap_create_in_buffer(4096);
|
|
}
|
|
}
|
|
|
|
return(table);
|
|
}
|
|
|
|
/*****************************************************************
|
|
Removes an adaptive hash index node from the doubly linked list of hash nodes
|
|
for the buffer block. */
|
|
UNIV_INLINE
|
|
void
|
|
ha_remove_buf_block_node(
|
|
/*=====================*/
|
|
buf_block_t* block, /* in: buffer block */
|
|
ha_node_t* node) /* in: an adaptive hash index node */
|
|
{
|
|
if (node == block->hash_nodes) {
|
|
block->hash_nodes = node->next_for_block;
|
|
}
|
|
|
|
if (node->prev_for_block != NULL) {
|
|
(node->prev_for_block)->next_for_block = node->next_for_block;
|
|
}
|
|
|
|
if (node->next_for_block != NULL) {
|
|
(node->next_for_block)->prev_for_block = node->prev_for_block;
|
|
}
|
|
}
|
|
|
|
/*****************************************************************
|
|
Adds an adaptive hash index node to the start of the doubly linked list of
|
|
hash nodes for the buffer block. */
|
|
UNIV_INLINE
|
|
void
|
|
ha_add_buf_block_node(
|
|
/*==================*/
|
|
buf_block_t* block, /* in: buffer block */
|
|
ha_node_t* node) /* in: an adaptive hash index node */
|
|
{
|
|
node->next_for_block = block->hash_nodes;
|
|
node->prev_for_block = NULL;
|
|
|
|
block->hash_nodes = node;
|
|
|
|
if (node->next_for_block != NULL) {
|
|
(node->next_for_block)->prev_for_block = node;
|
|
}
|
|
}
|
|
|
|
/*****************************************************************
|
|
Inserts an entry into a hash table. If an entry with the same fold number
|
|
is found, its node is updated to point to the new data, and no new node
|
|
is inserted. This function is only used in the adaptive hash index. */
|
|
|
|
ibool
|
|
ha_insert_for_fold(
|
|
/*===============*/
|
|
/* out: TRUE if succeed, FALSE if no more
|
|
memory could be allocated */
|
|
hash_table_t* table, /* in: hash table */
|
|
ulint fold, /* in: folded value of data; if a node with
|
|
the same fold value already exists, it is
|
|
updated to point to the same data, and no new
|
|
node is created! */
|
|
void* data) /* in: data, must not be NULL */
|
|
{
|
|
hash_cell_t* cell;
|
|
ha_node_t* node;
|
|
buf_block_t* block;
|
|
ha_node_t* prev_node;
|
|
buf_block_t* prev_block;
|
|
ulint hash;
|
|
|
|
ut_ad(table && data);
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
|
|
block = buf_block_align(data);
|
|
|
|
hash = hash_calc_hash(fold, table);
|
|
|
|
cell = hash_get_nth_cell(table, hash);
|
|
|
|
prev_node = cell->node;
|
|
|
|
while (prev_node != NULL) {
|
|
if (prev_node->fold == fold) {
|
|
if (table->adaptive) {
|
|
prev_block = buf_block_align(prev_node->data);
|
|
ut_a(prev_block->n_pointers > 0);
|
|
prev_block->n_pointers--;
|
|
|
|
block->n_pointers++;
|
|
|
|
if (prev_block != block) {
|
|
ha_remove_buf_block_node(prev_block,
|
|
prev_node);
|
|
ha_add_buf_block_node(block,
|
|
prev_node);
|
|
}
|
|
}
|
|
|
|
prev_node->data = data;
|
|
|
|
return(TRUE);
|
|
}
|
|
|
|
prev_node = prev_node->next;
|
|
}
|
|
|
|
/* We have to allocate a new chain node */
|
|
|
|
node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t));
|
|
|
|
if (node == NULL) {
|
|
/* It was a btr search type memory heap and at the moment
|
|
no more memory could be allocated: return */
|
|
|
|
ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH);
|
|
|
|
return(FALSE);
|
|
}
|
|
|
|
ha_node_set_data(node, data);
|
|
|
|
if (table->adaptive) {
|
|
block->n_pointers++;
|
|
|
|
ha_add_buf_block_node(block, node);
|
|
}
|
|
|
|
node->fold = fold;
|
|
|
|
node->next = NULL;
|
|
|
|
prev_node = cell->node;
|
|
|
|
if (prev_node == NULL) {
|
|
|
|
cell->node = node;
|
|
|
|
return(TRUE);
|
|
}
|
|
|
|
while (prev_node->next != NULL) {
|
|
|
|
prev_node = prev_node->next;
|
|
}
|
|
|
|
prev_node->next = node;
|
|
|
|
return(TRUE);
|
|
}
|
|
|
|
/***************************************************************
|
|
Deletes a hash node. */
|
|
|
|
void
|
|
ha_delete_hash_node(
|
|
/*================*/
|
|
hash_table_t* table, /* in: hash table */
|
|
ha_node_t* del_node) /* in: node to be deleted */
|
|
{
|
|
buf_block_t* block;
|
|
|
|
if (table->adaptive) {
|
|
block = buf_block_align(del_node->data);
|
|
|
|
ut_a(block->n_pointers > 0);
|
|
|
|
block->n_pointers--;
|
|
ha_remove_buf_block_node(block, del_node);
|
|
}
|
|
|
|
HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node);
|
|
}
|
|
|
|
/*****************************************************************
|
|
Deletes an entry from a hash table. */
|
|
|
|
void
|
|
ha_delete(
|
|
/*======*/
|
|
hash_table_t* table, /* in: hash table */
|
|
ulint fold, /* in: folded value of data */
|
|
void* data) /* in: data, must not be NULL and must exist
|
|
in the hash table */
|
|
{
|
|
ha_node_t* node;
|
|
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
node = ha_search_with_data(table, fold, data);
|
|
|
|
ut_a(node);
|
|
|
|
ha_delete_hash_node(table, node);
|
|
}
|
|
|
|
/*************************************************************
|
|
Looks for an element when we know the pointer to the data, and updates
|
|
the pointer to data, if found. */
|
|
|
|
void
|
|
ha_search_and_update_if_found(
|
|
/*==========================*/
|
|
hash_table_t* table, /* in: hash table */
|
|
ulint fold, /* in: folded value of the searched data */
|
|
void* data, /* in: pointer to the data */
|
|
void* new_data)/* in: new pointer to the data */
|
|
{
|
|
buf_block_t* old_block;
|
|
buf_block_t* block;
|
|
ha_node_t* node;
|
|
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
|
|
node = ha_search_with_data(table, fold, data);
|
|
|
|
if (node) {
|
|
if (table->adaptive) {
|
|
ut_a(buf_block_align(node->data)->n_pointers > 0);
|
|
|
|
old_block = buf_block_align(node->data);
|
|
ut_a(old_block->n_pointers > 0);
|
|
old_block->n_pointers--;
|
|
ha_remove_buf_block_node(old_block, node);
|
|
|
|
block = buf_block_align(new_data);
|
|
block->n_pointers++;
|
|
ha_add_buf_block_node(block, node);
|
|
}
|
|
|
|
node->data = new_data;
|
|
}
|
|
}
|
|
|
|
/*********************************************************************
|
|
Removes from the chain determined by fold all nodes whose data pointer
|
|
points to the page given. */
|
|
|
|
void
|
|
ha_remove_all_nodes_to_page(
|
|
/*========================*/
|
|
hash_table_t* table, /* in: hash table */
|
|
page_t* page) /* in: buffer page */
|
|
{
|
|
buf_block_t* block;
|
|
ha_node_t* node;
|
|
|
|
block = buf_block_align(page);
|
|
|
|
node = block->hash_nodes;
|
|
|
|
while (node) {
|
|
/* Remove the hash node */
|
|
|
|
ha_delete_hash_node(table, node);
|
|
|
|
node = block->hash_nodes;
|
|
}
|
|
|
|
ut_a(block->n_pointers == 0);
|
|
ut_a(block->hash_nodes == NULL);
|
|
}
|
|
|
|
/*****************************************************************
|
|
Validates a hash table. */
|
|
|
|
ibool
|
|
ha_validate(
|
|
/*========*/
|
|
/* out: TRUE if ok */
|
|
hash_table_t* table) /* in: hash table */
|
|
{
|
|
hash_cell_t* cell;
|
|
ha_node_t* node;
|
|
ibool ok = TRUE;
|
|
ulint i;
|
|
|
|
for (i = 0; i < hash_get_n_cells(table); i++) {
|
|
|
|
cell = hash_get_nth_cell(table, i);
|
|
|
|
node = cell->node;
|
|
|
|
while (node) {
|
|
if (hash_calc_hash(node->fold, table) != i) {
|
|
ut_print_timestamp(stderr);
|
|
fprintf(stderr,
|
|
"InnoDB: Error: hash table node fold value %lu does not\n"
|
|
"InnoDB: match with the cell number %lu.\n",
|
|
(ulong) node->fold, (ulong) i);
|
|
|
|
ok = FALSE;
|
|
}
|
|
|
|
node = node->next;
|
|
}
|
|
}
|
|
|
|
return(ok);
|
|
}
|
|
|
|
/*****************************************************************
|
|
Prints info of a hash table. */
|
|
|
|
void
|
|
ha_print_info(
|
|
/*==========*/
|
|
FILE* file, /* in: file where to print */
|
|
hash_table_t* table) /* in: hash table */
|
|
{
|
|
hash_cell_t* cell;
|
|
ulint cells = 0;
|
|
ulint n_bufs;
|
|
ulint i;
|
|
|
|
for (i = 0; i < hash_get_n_cells(table); i++) {
|
|
|
|
cell = hash_get_nth_cell(table, i);
|
|
|
|
if (cell->node) {
|
|
|
|
cells++;
|
|
}
|
|
}
|
|
|
|
fprintf(file,
|
|
"Hash table size %lu, used cells %lu",
|
|
(ulong) hash_get_n_cells(table), (ulong) cells);
|
|
|
|
if (table->heaps == NULL && table->heap != NULL) {
|
|
|
|
/* This calculation is intended for the adaptive hash
|
|
index: how many buffer frames we have reserved? */
|
|
|
|
n_bufs = UT_LIST_GET_LEN(table->heap->base) - 1;
|
|
|
|
if (table->heap->free_block) {
|
|
n_bufs++;
|
|
}
|
|
|
|
fprintf(file, ", node heap has %lu buffer(s)\n",
|
|
(ulong) n_bufs);
|
|
}
|
|
}
|