mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 03:52:35 +01:00
Merge janus.mylan:/usr/home/serg/Abk/mysql-5.1
into janus.mylan:/usr/home/serg/Abk/mysql-maria include/my_global.h: Auto merged include/my_sys.h: Auto merged sql/ha_myisam.cc: Auto merged sql/item_func.cc: Auto merged sql/mysql_priv.h: Auto merged sql/mysqld.cc: Auto merged sql/set_var.cc: Auto merged sql/sql_parse.cc: Auto merged sql/sql_select.cc: Auto merged storage/myisam/mi_create.c: Auto merged storage/myisam/mi_test2.c: Auto merged storage/myisam/myisamchk.c: Auto merged include/Makefile.am: merged unittest/mysys/my_atomic-t.c: merged
This commit is contained in:
commit
161f79aefa
30 changed files with 1506 additions and 152 deletions
|
@ -23,7 +23,7 @@ pkginclude_HEADERS = my_dbug.h m_string.h my_sys.h my_list.h my_xml.h \
|
|||
my_getopt.h sslopt-longopts.h my_dir.h typelib.h \
|
||||
sslopt-vars.h sslopt-case.h sql_common.h keycache.h \
|
||||
mysql_time.h mysql/plugin.h $(BUILT_SOURCES)
|
||||
noinst_HEADERS = config-win.h config-netware.h \
|
||||
noinst_HEADERS = config-win.h config-netware.h lf.h \
|
||||
heap.h maria.h myisamchk.h my_bitmap.h my_uctype.h \
|
||||
myisam.h myisampack.h myisammrg.h ft_global.h\
|
||||
mysys_err.h my_base.h help_start.h help_end.h \
|
||||
|
|
|
@ -14,19 +14,20 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#if defined(__i386__) || defined(_M_IX86)
|
||||
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__)
|
||||
|
||||
#ifdef MY_ATOMIC_MODE_DUMMY
|
||||
# define LOCK ""
|
||||
#else
|
||||
# define LOCK "lock"
|
||||
#endif
|
||||
# ifdef MY_ATOMIC_MODE_DUMMY
|
||||
# define LOCK ""
|
||||
# else
|
||||
# define LOCK "lock"
|
||||
# endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
#include "x86-gcc.h"
|
||||
#elif defined(_MSC_VER)
|
||||
#include "x86-msvc.h"
|
||||
#endif
|
||||
# ifdef __GNUC__
|
||||
# include "x86-gcc.h"
|
||||
# elif defined(_MSC_VER)
|
||||
# error Broken!
|
||||
# include "x86-msvc.h"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef make_atomic_cas_body
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
|
||||
typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
|
||||
|
||||
#ifdef MY_ATOMIC_MODE_DUMMY
|
||||
/*
|
||||
|
@ -32,13 +32,18 @@ typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
|
|||
#define my_atomic_rwlock_wrunlock(name)
|
||||
#define MY_ATOMIC_MODE "dummy (non-atomic)"
|
||||
#else
|
||||
#define my_atomic_rwlock_destroy(name) pthread_rwlock_destroy(& (name)->rw)
|
||||
#define my_atomic_rwlock_init(name) pthread_rwlock_init(& (name)->rw, 0)
|
||||
#define my_atomic_rwlock_rdlock(name) pthread_rwlock_rdlock(& (name)->rw)
|
||||
#define my_atomic_rwlock_wrlock(name) pthread_rwlock_wrlock(& (name)->rw)
|
||||
#define my_atomic_rwlock_rdunlock(name) pthread_rwlock_unlock(& (name)->rw)
|
||||
#define my_atomic_rwlock_wrunlock(name) pthread_rwlock_unlock(& (name)->rw)
|
||||
#define MY_ATOMIC_MODE "rwlocks"
|
||||
/*
|
||||
we're using read-write lock macros but map them to mutex locks, and they're
|
||||
faster. Still, having semantically rich API we can change the
|
||||
underlying implementation, if necessary.
|
||||
*/
|
||||
#define my_atomic_rwlock_destroy(name) pthread_mutex_destroy(& (name)->rw)
|
||||
#define my_atomic_rwlock_init(name) pthread_mutex_init(& (name)->rw, 0)
|
||||
#define my_atomic_rwlock_rdlock(name) pthread_mutex_lock(& (name)->rw)
|
||||
#define my_atomic_rwlock_wrlock(name) pthread_mutex_lock(& (name)->rw)
|
||||
#define my_atomic_rwlock_rdunlock(name) pthread_mutex_unlock(& (name)->rw)
|
||||
#define my_atomic_rwlock_wrunlock(name) pthread_mutex_unlock(& (name)->rw)
|
||||
#define MY_ATOMIC_MODE "mutex"
|
||||
#endif
|
||||
|
||||
#define make_atomic_add_body(S) int ## S sav; sav= *a; *a+= v; v=sav;
|
||||
|
|
|
@ -20,10 +20,18 @@
|
|||
architectures support double-word (128-bit) cas.
|
||||
*/
|
||||
|
||||
#ifdef MY_ATOMIC_NO_XADD
|
||||
#define MY_ATOMIC_MODE "gcc-x86" LOCK "-no-xadd"
|
||||
#ifdef __x86_64__
|
||||
# ifdef MY_ATOMIC_NO_XADD
|
||||
# define MY_ATOMIC_MODE "gcc-amd64" LOCK "-no-xadd"
|
||||
# else
|
||||
# define MY_ATOMIC_MODE "gcc-amd64" LOCK
|
||||
# endif
|
||||
#else
|
||||
#define MY_ATOMIC_MODE "gcc-x86" LOCK
|
||||
# ifdef MY_ATOMIC_NO_XADD
|
||||
# define MY_ATOMIC_MODE "gcc-x86" LOCK "-no-xadd"
|
||||
# else
|
||||
# define MY_ATOMIC_MODE "gcc-x86" LOCK
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* fix -ansi errors while maintaining readability */
|
||||
|
@ -54,6 +62,9 @@
|
|||
asm volatile (LOCK "; cmpxchg %2, %0" \
|
||||
: "+m" (*a), "+a" (ret): "r" (ret))
|
||||
#define make_atomic_store_body(S) \
|
||||
asm volatile ("; xchg %0, %1;" : "+m" (*a) : "r" (v))
|
||||
asm volatile ("; xchg %0, %1;" : "+m" (*a), "+r" (v))
|
||||
#endif
|
||||
|
||||
/* TODO test on intel whether the below helps. on AMD it makes no difference */
|
||||
//#define LF_BACKOFF ({asm volatile ("rep; nop"); 1; })
|
||||
|
||||
|
|
238
include/lf.h
Normal file
238
include/lf.h
Normal file
|
@ -0,0 +1,238 @@
|
|||
|
||||
/*
|
||||
TODO
|
||||
1. copyright
|
||||
6. reduce the number of memory barriers
|
||||
*/
|
||||
|
||||
#ifndef _lf_h
|
||||
#define _lf_h
|
||||
|
||||
#include <my_atomic.h>
|
||||
|
||||
/*
|
||||
Generic helpers
|
||||
*/
|
||||
|
||||
#define lock_wrap(f,t,proto_args, args, lock) \
|
||||
t _ ## f proto_args; \
|
||||
static inline t f proto_args \
|
||||
{ \
|
||||
t ret; \
|
||||
my_atomic_rwlock_wrlock(lock); \
|
||||
ret= _ ## f args; \
|
||||
my_atomic_rwlock_wrunlock(lock); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define lock_wrap_void(f,proto_args, args, lock) \
|
||||
void _ ## f proto_args; \
|
||||
static inline void f proto_args \
|
||||
{ \
|
||||
my_atomic_rwlock_wrlock(lock); \
|
||||
_ ## f args; \
|
||||
my_atomic_rwlock_wrunlock(lock); \
|
||||
}
|
||||
|
||||
#define nolock_wrap(f,t,proto_args, args) \
|
||||
t _ ## f proto_args; \
|
||||
static inline t f proto_args \
|
||||
{ \
|
||||
return _ ## f args; \
|
||||
}
|
||||
|
||||
#define nolock_wrap_void(f,proto_args, args) \
|
||||
void _ ## f proto_args; \
|
||||
static inline void f proto_args \
|
||||
{ \
|
||||
_ ## f args; \
|
||||
}
|
||||
|
||||
/*
|
||||
dynamic array
|
||||
|
||||
4 levels of 256 elements each mean 4311810304 elements in an array - it
|
||||
should be enough for a while
|
||||
*/
|
||||
#define LF_DYNARRAY_LEVEL_LENGTH 256
|
||||
#define LF_DYNARRAY_LEVELS 4
|
||||
|
||||
typedef struct {
|
||||
void * volatile level[LF_DYNARRAY_LEVELS];
|
||||
uint size_of_element;
|
||||
my_atomic_rwlock_t lock;
|
||||
} LF_DYNARRAY;
|
||||
|
||||
typedef int (*lf_dynarray_func)(void *, void *);
|
||||
|
||||
void lf_dynarray_init(LF_DYNARRAY *array, uint element_size);
|
||||
void lf_dynarray_end(LF_DYNARRAY *array);
|
||||
|
||||
nolock_wrap(lf_dynarray_nr, int,
|
||||
(LF_DYNARRAY *array, void *el),
|
||||
(array,el));
|
||||
|
||||
nolock_wrap(lf_dynarray_value, void *,
|
||||
(LF_DYNARRAY *array, uint idx),
|
||||
(array,idx));
|
||||
|
||||
lock_wrap(lf_dynarray_lvalue, void *,
|
||||
(LF_DYNARRAY *array, uint idx),
|
||||
(array,idx),
|
||||
&array->lock);
|
||||
nolock_wrap(lf_dynarray_iterate, int,
|
||||
(LF_DYNARRAY *array, lf_dynarray_func func, void *arg),
|
||||
(array,func,arg));
|
||||
|
||||
/*
|
||||
pin manager for memory allocator
|
||||
*/
|
||||
|
||||
#define LF_PINBOX_PINS 3
|
||||
#define LF_PURGATORY_SIZE 11
|
||||
|
||||
typedef void lf_pinbox_free_func(void *, void *);
|
||||
|
||||
typedef struct {
|
||||
LF_DYNARRAY pinstack;
|
||||
lf_pinbox_free_func *free_func;
|
||||
void * free_func_arg;
|
||||
uint32 volatile pinstack_top_ver; /* this is a versioned pointer */
|
||||
uint32 volatile pins_in_stack; /* number of elements in array */
|
||||
} LF_PINBOX;
|
||||
|
||||
/* we want sizeof(LF_PINS) to be close to 128 to avoid false sharing */
|
||||
typedef struct {
|
||||
void * volatile pin[LF_PINBOX_PINS];
|
||||
void * purgatory[LF_PURGATORY_SIZE];
|
||||
LF_PINBOX *pinbox;
|
||||
uint32 purgatory_count;
|
||||
uint32 volatile link;
|
||||
char pad[128-sizeof(uint32)*2
|
||||
-sizeof(void *)*(LF_PINBOX_PINS+LF_PURGATORY_SIZE+1)];
|
||||
} LF_PINS;
|
||||
|
||||
#define lf_lock_by_pins(PINS) \
|
||||
my_atomic_rwlock_wrlock(&(PINS)->pinbox->pinstack.lock)
|
||||
#define lf_unlock_by_pins(PINS) \
|
||||
my_atomic_rwlock_wrunlock(&(PINS)->pinbox->pinstack.lock)
|
||||
|
||||
/*
|
||||
compile-time assert, to require "no less than N" pins
|
||||
it's enough if it'll fail on at least one compiler, so
|
||||
we'll enable it on GCC only, which supports zero-length arrays.
|
||||
*/
|
||||
#if defined(__GNUC__) && defined(MY_LF_EXTRA_DEBUG)
|
||||
#define LF_REQUIRE_PINS(N) \
|
||||
static const char require_pins[LF_PINBOX_PINS-N]; \
|
||||
static const int LF_NUM_PINS_IN_THIS_FILE=N;
|
||||
#define _lf_pin(PINS, PIN, ADDR) \
|
||||
( \
|
||||
my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR)), \
|
||||
assert(PIN < LF_NUM_PINS_IN_THIS_FILE) \
|
||||
)
|
||||
#else
|
||||
#define LF_REQUIRE_PINS(N)
|
||||
#define _lf_pin(PINS, PIN, ADDR) my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR))
|
||||
#endif
|
||||
|
||||
#define _lf_unpin(PINS, PIN) _lf_pin(PINS, PIN, NULL)
|
||||
#define lf_pin(PINS, PIN, ADDR) \
|
||||
do { \
|
||||
lf_lock_pins(PINS); \
|
||||
_lf_pin(PINS, PIN, ADDR); \
|
||||
lf_unlock_pins(PINS); \
|
||||
} while (0)
|
||||
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
|
||||
|
||||
void lf_pinbox_init(LF_PINBOX *pinbox, lf_pinbox_free_func *free_func,
|
||||
void * free_func_arg);
|
||||
void lf_pinbox_end(LF_PINBOX *pinbox);
|
||||
|
||||
lock_wrap(lf_pinbox_get_pins, LF_PINS *,
|
||||
(LF_PINBOX *pinbox),
|
||||
(pinbox),
|
||||
&pinbox->pinstack.lock);
|
||||
lock_wrap_void(lf_pinbox_put_pins,
|
||||
(LF_PINS *pins),
|
||||
(pins),
|
||||
&pins->pinbox->pinstack.lock);
|
||||
#if 0
|
||||
lock_wrap_void(lf_pinbox_real_free,
|
||||
(LF_PINS *pins),
|
||||
(pins),
|
||||
&pins->pinbox->pinstack.lock);
|
||||
#endif
|
||||
lock_wrap_void(lf_pinbox_free,
|
||||
(LF_PINS *pins, void *addr),
|
||||
(pins,addr),
|
||||
&pins->pinbox->pinstack.lock);
|
||||
|
||||
/*
|
||||
memory allocator
|
||||
*/
|
||||
|
||||
typedef struct st_lf_allocator {
|
||||
LF_PINBOX pinbox;
|
||||
void * volatile top;
|
||||
uint element_size;
|
||||
uint32 volatile mallocs;
|
||||
} LF_ALLOCATOR;
|
||||
|
||||
void lf_alloc_init(LF_ALLOCATOR *allocator, uint size);
|
||||
void lf_alloc_end(LF_ALLOCATOR *allocator);
|
||||
uint lf_alloc_in_pool(LF_ALLOCATOR *allocator);
|
||||
#define _lf_alloc_free(PINS, PTR) _lf_pinbox_free((PINS), (PTR))
|
||||
#define lf_alloc_free(PINS, PTR) lf_pinbox_free((PINS), (PTR))
|
||||
#define _lf_alloc_get_pins(ALLOC) _lf_pinbox_get_pins(&(ALLOC)->pinbox)
|
||||
#define lf_alloc_get_pins(ALLOC) lf_pinbox_get_pins(&(ALLOC)->pinbox)
|
||||
#define _lf_alloc_put_pins(PINS) _lf_pinbox_put_pins(PINS)
|
||||
#define lf_alloc_put_pins(PINS) lf_pinbox_put_pins(PINS)
|
||||
#define lf_alloc_real_free(ALLOC,ADDR) my_free((gptr)(ADDR), MYF(0))
|
||||
|
||||
lock_wrap(lf_alloc_new, void *,
|
||||
(LF_PINS *pins),
|
||||
(pins),
|
||||
&pins->pinbox->pinstack.lock);
|
||||
|
||||
/*
|
||||
extendible hash
|
||||
*/
|
||||
#include <hash.h>
|
||||
|
||||
#define LF_HASH_UNIQUE 1
|
||||
|
||||
typedef struct {
|
||||
LF_DYNARRAY array; /* hash itself */
|
||||
LF_ALLOCATOR alloc; /* allocator for elements */
|
||||
hash_get_key get_key; /* see HASH */
|
||||
CHARSET_INFO *charset; /* see HASH */
|
||||
uint key_offset, key_length; /* see HASH */
|
||||
uint element_size, flags; /* LF_HASH_UNIQUE, etc */
|
||||
int32 volatile size; /* size of array */
|
||||
int32 volatile count; /* number of elements in the hash */
|
||||
} LF_HASH;
|
||||
|
||||
void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
|
||||
uint key_offset, uint key_length, hash_get_key get_key,
|
||||
CHARSET_INFO *charset);
|
||||
void lf_hash_end(LF_HASH *hash);
|
||||
int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data);
|
||||
int lf_hash_search(LF_HASH *hash, LF_PINS *pins, const uchar *key, uint keylen);
|
||||
int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const uchar *key, uint keylen);
|
||||
#define _lf_hash_get_pins(HASH) _lf_alloc_get_pins(&(HASH)->alloc)
|
||||
#define lf_hash_get_pins(HASH) lf_alloc_get_pins(&(HASH)->alloc)
|
||||
#define _lf_hash_put_pins(PINS) _lf_pinbox_put_pins(PINS)
|
||||
#define lf_hash_put_pins(PINS) lf_pinbox_put_pins(PINS)
|
||||
|
||||
/*
|
||||
cleanup
|
||||
*/
|
||||
|
||||
#undef lock_wrap_void
|
||||
#undef lock_wrap
|
||||
#undef nolock_wrap_void
|
||||
#undef nolock_wrap
|
||||
|
||||
#endif
|
||||
|
|
@ -36,7 +36,7 @@
|
|||
#ifdef HAVE_INLINE
|
||||
|
||||
#define make_atomic_add(S) \
|
||||
static inline int ## S my_atomic_add ## S( \
|
||||
STATIC_INLINE int ## S my_atomic_add ## S( \
|
||||
int ## S volatile *a, int ## S v) \
|
||||
{ \
|
||||
make_atomic_add_body(S); \
|
||||
|
@ -44,7 +44,7 @@ static inline int ## S my_atomic_add ## S( \
|
|||
}
|
||||
|
||||
#define make_atomic_swap(S) \
|
||||
static inline int ## S my_atomic_swap ## S( \
|
||||
STATIC_INLINE int ## S my_atomic_swap ## S( \
|
||||
int ## S volatile *a, int ## S v) \
|
||||
{ \
|
||||
make_atomic_swap_body(S); \
|
||||
|
@ -52,7 +52,7 @@ static inline int ## S my_atomic_swap ## S( \
|
|||
}
|
||||
|
||||
#define make_atomic_cas(S) \
|
||||
static inline int my_atomic_cas ## S(int ## S volatile *a, \
|
||||
STATIC_INLINE int my_atomic_cas ## S(int ## S volatile *a, \
|
||||
int ## S *cmp, int ## S set) \
|
||||
{ \
|
||||
int8 ret; \
|
||||
|
@ -61,7 +61,7 @@ static inline int my_atomic_cas ## S(int ## S volatile *a, \
|
|||
}
|
||||
|
||||
#define make_atomic_load(S) \
|
||||
static inline int ## S my_atomic_load ## S(int ## S volatile *a) \
|
||||
STATIC_INLINE int ## S my_atomic_load ## S(int ## S volatile *a) \
|
||||
{ \
|
||||
int ## S ret; \
|
||||
make_atomic_load_body(S); \
|
||||
|
@ -69,7 +69,7 @@ static inline int ## S my_atomic_load ## S(int ## S volatile *a) \
|
|||
}
|
||||
|
||||
#define make_atomic_store(S) \
|
||||
static inline void my_atomic_store ## S( \
|
||||
STATIC_INLINE void my_atomic_store ## S( \
|
||||
int ## S volatile *a, int ## S v) \
|
||||
{ \
|
||||
make_atomic_store_body(S); \
|
||||
|
@ -135,6 +135,10 @@ make_atomic_swap(ptr)
|
|||
#undef _atomic_h_cleanup_
|
||||
#endif
|
||||
|
||||
#ifndef LF_BACKOFF
|
||||
#define LF_BACKOFF (1)
|
||||
#endif
|
||||
|
||||
#if SIZEOF_CHARP == SIZEOF_INT
|
||||
typedef int intptr;
|
||||
#elif SIZEOF_CHARP == SIZEOF_LONG
|
||||
|
|
107
include/my_bit.h
Normal file
107
include/my_bit.h
Normal file
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
Some useful bit functions
|
||||
*/
|
||||
|
||||
#ifdef HAVE_INLINE
|
||||
|
||||
extern const char _my_bits_nbits[256];
|
||||
extern const uchar _my_bits_reverse_table[256];
|
||||
|
||||
/*
|
||||
Find smallest X in 2^X >= value
|
||||
This can be used to divide a number with value by doing a shift instead
|
||||
*/
|
||||
|
||||
STATIC_INLINE uint my_bit_log2(ulong value)
|
||||
{
|
||||
uint bit;
|
||||
for (bit=0 ; value > 1 ; value>>=1, bit++) ;
|
||||
return bit;
|
||||
}
|
||||
|
||||
STATIC_INLINE uint my_count_bits(ulonglong v)
|
||||
{
|
||||
#if SIZEOF_LONG_LONG > 4
|
||||
/* The following code is a bit faster on 16 bit machines than if we would
|
||||
only shift v */
|
||||
ulong v2=(ulong) (v >> 32);
|
||||
return (uint) (uchar) (_my_bits_nbits[(uchar) v] +
|
||||
_my_bits_nbits[(uchar) (v >> 8)] +
|
||||
_my_bits_nbits[(uchar) (v >> 16)] +
|
||||
_my_bits_nbits[(uchar) (v >> 24)] +
|
||||
_my_bits_nbits[(uchar) (v2)] +
|
||||
_my_bits_nbits[(uchar) (v2 >> 8)] +
|
||||
_my_bits_nbits[(uchar) (v2 >> 16)] +
|
||||
_my_bits_nbits[(uchar) (v2 >> 24)]);
|
||||
#else
|
||||
return (uint) (uchar) (_my_bits_nbits[(uchar) v] +
|
||||
_my_bits_nbits[(uchar) (v >> 8)] +
|
||||
_my_bits_nbits[(uchar) (v >> 16)] +
|
||||
_my_bits_nbits[(uchar) (v >> 24)]);
|
||||
#endif
|
||||
}
|
||||
|
||||
STATIC_INLINE uint my_count_bits_ushort(ushort v)
|
||||
{
|
||||
return _my_bits_nbits[v];
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Next highest power of two
|
||||
|
||||
SYNOPSIS
|
||||
my_round_up_to_next_power()
|
||||
v Value to check
|
||||
|
||||
RETURN
|
||||
Next or equal power of 2
|
||||
Note: 0 will return 0
|
||||
|
||||
NOTES
|
||||
Algorithm by Sean Anderson, according to:
|
||||
http://graphics.stanford.edu/~seander/bithacks.html
|
||||
(Orignal code public domain)
|
||||
|
||||
Comments shows how this works with 01100000000000000000000000001011
|
||||
*/
|
||||
|
||||
STATIC_INLINE uint32 my_round_up_to_next_power(uint32 v)
|
||||
{
|
||||
v--; /* 01100000000000000000000000001010 */
|
||||
v|= v >> 1; /* 01110000000000000000000000001111 */
|
||||
v|= v >> 2; /* 01111100000000000000000000001111 */
|
||||
v|= v >> 4; /* 01111111110000000000000000001111 */
|
||||
v|= v >> 8; /* 01111111111111111100000000001111 */
|
||||
v|= v >> 16; /* 01111111111111111111111111111111 */
|
||||
return v+1; /* 10000000000000000000000000000000 */
|
||||
}
|
||||
|
||||
STATIC_INLINE uint32 my_clear_highest_bit(uint32 v)
|
||||
{
|
||||
uint32 w=v >> 1;
|
||||
w|= w >> 1;
|
||||
w|= w >> 2;
|
||||
w|= w >> 4;
|
||||
w|= w >> 8;
|
||||
w|= w >> 16;
|
||||
return v & w;
|
||||
}
|
||||
|
||||
STATIC_INLINE uint32 my_reverse_bits(uint key)
|
||||
{
|
||||
return
|
||||
(_my_bits_reverse_table[ key & 255] << 24) |
|
||||
(_my_bits_reverse_table[(key>> 8) & 255] << 16) |
|
||||
(_my_bits_reverse_table[(key>>16) & 255] << 8) |
|
||||
_my_bits_reverse_table[(key>>24) ];
|
||||
}
|
||||
|
||||
#else
|
||||
extern uint my_bit_log2(ulong value);
|
||||
extern uint32 my_round_up_to_next_power(uint32 v);
|
||||
uint32 my_clear_highest_bit(uint32 v);
|
||||
uint32 my_reverse_bits(uint key);
|
||||
extern uint my_count_bits(ulonglong v);
|
||||
extern uint my_count_bits_ushort(ushort v);
|
||||
#endif
|
|
@ -192,6 +192,8 @@
|
|||
#endif
|
||||
#undef inline_test_2
|
||||
#undef inline_test_1
|
||||
/* helper macro for "instantiating" inline functions */
|
||||
#define STATIC_INLINE static inline
|
||||
|
||||
/*
|
||||
The following macros are used to control inlining a bit more than
|
||||
|
|
|
@ -216,6 +216,7 @@ extern int (*error_handler_hook)(uint my_err, const char *str,myf MyFlags);
|
|||
extern int (*fatal_error_handler_hook)(uint my_err, const char *str,
|
||||
myf MyFlags);
|
||||
extern uint my_file_limit;
|
||||
extern ulong my_thread_stack_size;
|
||||
|
||||
#ifdef HAVE_LARGE_PAGES
|
||||
extern my_bool my_use_large_pages;
|
||||
|
@ -821,10 +822,6 @@ extern int packfrm(const void *, uint, const void **, uint *);
|
|||
extern int unpackfrm(const void **, uint *, const void *);
|
||||
|
||||
extern ha_checksum my_checksum(ha_checksum crc, const byte *mem, uint count);
|
||||
extern uint my_bit_log2(ulong value);
|
||||
extern uint32 my_round_up_to_next_power(uint32 v);
|
||||
extern uint my_count_bits(ulonglong v);
|
||||
extern uint my_count_bits_ushort(ushort v);
|
||||
extern void my_sleep(ulong m_seconds);
|
||||
extern ulong crc32(ulong crc, const uchar *buf, uint len);
|
||||
extern uint my_set_max_open_files(uint files);
|
||||
|
@ -840,7 +837,7 @@ extern int my_getncpus();
|
|||
#ifndef MAP_NOSYNC
|
||||
#define MAP_NOSYNC 0
|
||||
#endif
|
||||
#ifndef MAP_NORESERVE
|
||||
#ifndef MAP_NORESERVE
|
||||
#define MAP_NORESERVE 0 /* For irix and AIX */
|
||||
#endif
|
||||
|
||||
|
|
|
@ -31,7 +31,8 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \
|
|||
mf_tempdir.c my_lock.c mf_brkhant.c my_alarm.c \
|
||||
my_malloc.c my_realloc.c my_once.c mulalloc.c \
|
||||
my_alloc.c safemalloc.c my_new.cc \
|
||||
my_vle.c my_atomic.c \
|
||||
my_vle.c my_atomic.c lf_hash.c \
|
||||
lf_dynarray.c lf_alloc-pin.c \
|
||||
my_fopen.c my_fstream.c my_getsystime.c \
|
||||
my_error.c errors.c my_div.c my_messnc.c \
|
||||
mf_format.c mf_same.c mf_dirname.c mf_fn_ext.c \
|
||||
|
|
319
mysys/lf_alloc-pin.c
Normal file
319
mysys/lf_alloc-pin.c
Normal file
|
@ -0,0 +1,319 @@
|
|||
/* Copyright (C) 2000 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
concurrent allocator based on pinning addresses
|
||||
|
||||
strictly speaking it's not lock-free, as it can be blocked
|
||||
if a thread's purgatory is full and all addresses from there
|
||||
are pinned.
|
||||
|
||||
But until the above happens, it's wait-free.
|
||||
|
||||
It can be made strictly wait-free by increasing purgatory size.
|
||||
If it's larger than pins_in_stack*LF_PINBOX_PINS, then apocalyptical
|
||||
condition above will never happen. But than the memory requirements
|
||||
will be O(pins_in_stack^2).
|
||||
|
||||
Note, that for large purgatory sizes it makes sense to remove
|
||||
purgatory array, and link objects in a list using embedded pointer.
|
||||
|
||||
TODO test with more than 256 threads
|
||||
TODO test w/o alloca
|
||||
*/
|
||||
|
||||
#include <my_global.h>
|
||||
#include <my_sys.h>
|
||||
#include <lf.h>
|
||||
|
||||
#define LF_PINBOX_MAX_PINS 65536
|
||||
|
||||
static void _lf_pinbox_real_free(LF_PINS *pins);
|
||||
|
||||
void lf_pinbox_init(LF_PINBOX *pinbox, lf_pinbox_free_func *free_func,
|
||||
void *free_func_arg)
|
||||
{
|
||||
DBUG_ASSERT(sizeof(LF_PINS) == 128);
|
||||
lf_dynarray_init(&pinbox->pinstack, sizeof(LF_PINS));
|
||||
pinbox->pinstack_top_ver=0;
|
||||
pinbox->pins_in_stack=0;
|
||||
pinbox->free_func=free_func;
|
||||
pinbox->free_func_arg=free_func_arg;
|
||||
}
|
||||
|
||||
void lf_pinbox_end(LF_PINBOX *pinbox)
|
||||
{
|
||||
lf_dynarray_end(&pinbox->pinstack);
|
||||
}
|
||||
|
||||
LF_PINS *_lf_pinbox_get_pins(LF_PINBOX *pinbox)
|
||||
{
|
||||
uint32 pins, next, top_ver;
|
||||
LF_PINS *el;
|
||||
|
||||
top_ver=pinbox->pinstack_top_ver;
|
||||
do
|
||||
{
|
||||
if (!(pins=top_ver % LF_PINBOX_MAX_PINS))
|
||||
{
|
||||
pins=my_atomic_add32(&pinbox->pins_in_stack, 1)+1;
|
||||
el=(LF_PINS *)_lf_dynarray_lvalue(&pinbox->pinstack, pins);
|
||||
break;
|
||||
}
|
||||
el=(LF_PINS *)_lf_dynarray_value(&pinbox->pinstack, pins);
|
||||
next=el->link;
|
||||
} while (!my_atomic_cas32(&pinbox->pinstack_top_ver, &top_ver,
|
||||
top_ver-pins+next+LF_PINBOX_MAX_PINS));
|
||||
el->link=pins;
|
||||
el->purgatory_count=0;
|
||||
el->pinbox=pinbox;
|
||||
return el;
|
||||
}
|
||||
|
||||
void _lf_pinbox_put_pins(LF_PINS *pins)
|
||||
{
|
||||
LF_PINBOX *pinbox=pins->pinbox;
|
||||
uint32 top_ver, nr;
|
||||
nr=pins->link;
|
||||
#ifdef MY_LF_EXTRA_DEBUG
|
||||
{
|
||||
int i;
|
||||
for (i=0; i < LF_PINBOX_PINS; i++)
|
||||
assert(pins->pin[i] == 0);
|
||||
}
|
||||
#endif
|
||||
while (pins->purgatory_count)
|
||||
{
|
||||
_lf_pinbox_real_free(pins);
|
||||
if (pins->purgatory_count && my_getncpus() == 1)
|
||||
{
|
||||
my_atomic_rwlock_wrunlock(&pins->pinbox->pinstack.lock);
|
||||
pthread_yield();
|
||||
my_atomic_rwlock_wrlock(&pins->pinbox->pinstack.lock);
|
||||
}
|
||||
}
|
||||
top_ver=pinbox->pinstack_top_ver;
|
||||
if (nr == pinbox->pins_in_stack)
|
||||
{
|
||||
int32 tmp=nr;
|
||||
if (my_atomic_cas32(&pinbox->pins_in_stack, &tmp, tmp-1))
|
||||
goto ret;
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
pins->link=top_ver % LF_PINBOX_MAX_PINS;
|
||||
} while (!my_atomic_cas32(&pinbox->pinstack_top_ver, &top_ver,
|
||||
top_ver-pins->link+nr+LF_PINBOX_MAX_PINS));
|
||||
ret:
|
||||
return;
|
||||
}
|
||||
|
||||
static int ptr_cmp(void **a, void **b)
|
||||
{
|
||||
return *a < *b ? -1 : *a == *b ? 0 : 1;
|
||||
}
|
||||
|
||||
void _lf_pinbox_free(LF_PINS *pins, void *addr)
|
||||
{
|
||||
while (pins->purgatory_count == LF_PURGATORY_SIZE)
|
||||
{
|
||||
_lf_pinbox_real_free(pins);
|
||||
if (pins->purgatory_count == LF_PURGATORY_SIZE && my_getncpus() == 1)
|
||||
{
|
||||
my_atomic_rwlock_wrunlock(&pins->pinbox->pinstack.lock);
|
||||
pthread_yield();
|
||||
my_atomic_rwlock_wrlock(&pins->pinbox->pinstack.lock);
|
||||
}
|
||||
}
|
||||
pins->purgatory[pins->purgatory_count++]=addr;
|
||||
}
|
||||
|
||||
struct st_harvester {
|
||||
void **granary;
|
||||
int npins;
|
||||
};
|
||||
|
||||
static int harvest_pins(LF_PINS *el, struct st_harvester *hv)
|
||||
{
|
||||
int i;
|
||||
LF_PINS *el_end= el+min(hv->npins, LF_DYNARRAY_LEVEL_LENGTH);
|
||||
for (; el < el_end; el++)
|
||||
{
|
||||
for (i= 0; i < LF_PINBOX_PINS; i++)
|
||||
{
|
||||
void *p= el->pin[i];
|
||||
if (p)
|
||||
*hv->granary++= p;
|
||||
}
|
||||
}
|
||||
hv->npins-= LF_DYNARRAY_LEVEL_LENGTH;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int match_pins(LF_PINS *el, void *addr)
|
||||
{
|
||||
int i;
|
||||
LF_PINS *el_end= el+LF_DYNARRAY_LEVEL_LENGTH;
|
||||
for (; el < el_end; el++)
|
||||
for (i= 0; i < LF_PINBOX_PINS; i++)
|
||||
if (el->pin[i] == addr)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _lf_pinbox_real_free(LF_PINS *pins)
|
||||
{
|
||||
int npins;
|
||||
void **addr=0;
|
||||
void **start, **cur, **end=pins->purgatory+pins->purgatory_count;
|
||||
LF_PINBOX *pinbox=pins->pinbox;
|
||||
|
||||
npins=pinbox->pins_in_stack+1;
|
||||
|
||||
#ifdef HAVE_ALLOCA
|
||||
/* create a sorted list of pinned addresses, to speed up searches */
|
||||
if (sizeof(void *)*LF_PINBOX_PINS*npins < my_thread_stack_size)
|
||||
{
|
||||
struct st_harvester hv;
|
||||
addr= (void **) alloca(sizeof(void *)*LF_PINBOX_PINS*npins);
|
||||
hv.granary=addr;
|
||||
hv.npins=npins;
|
||||
_lf_dynarray_iterate(&pinbox->pinstack,
|
||||
(lf_dynarray_func)harvest_pins, &hv);
|
||||
|
||||
npins=hv.granary-addr;
|
||||
if (npins)
|
||||
qsort(addr, npins, sizeof(void *), (qsort_cmp)ptr_cmp);
|
||||
}
|
||||
#endif
|
||||
|
||||
start= cur= pins->purgatory;
|
||||
end= start+pins->purgatory_count;
|
||||
for (; cur < end; cur++)
|
||||
{
|
||||
if (npins)
|
||||
{
|
||||
if (addr)
|
||||
{
|
||||
void **a,**b,**c;
|
||||
for (a=addr, b=addr+npins-1, c=a+(b-a)/2; b-a>1; c=a+(b-a)/2)
|
||||
if (*cur == *c)
|
||||
a=b=c;
|
||||
else if (*cur > *c)
|
||||
a=c;
|
||||
else
|
||||
b=c;
|
||||
if (*cur == *a || *cur == *b)
|
||||
goto found;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_lf_dynarray_iterate(&pinbox->pinstack,
|
||||
(lf_dynarray_func)match_pins, *cur))
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
/* not pinned - freeing */
|
||||
pinbox->free_func(*cur, pinbox->free_func_arg);
|
||||
continue;
|
||||
found:
|
||||
/* pinned - keeping */
|
||||
*start++=*cur;
|
||||
}
|
||||
pins->purgatory_count=start-pins->purgatory;
|
||||
#ifdef MY_LF_EXTRA_DEBUG
|
||||
while (start < pins->purgatory + LF_PURGATORY_SIZE)
|
||||
*start++=0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void alloc_free(void *node, LF_ALLOCATOR *allocator)
|
||||
{
|
||||
void *tmp;
|
||||
tmp=allocator->top;
|
||||
do
|
||||
{
|
||||
(*(void **)node)=tmp;
|
||||
} while (!my_atomic_casptr((void **)&allocator->top, (void **)&tmp, node) &&
|
||||
LF_BACKOFF);
|
||||
}
|
||||
|
||||
LF_REQUIRE_PINS(1);
|
||||
void *_lf_alloc_new(LF_PINS *pins)
|
||||
{
|
||||
LF_ALLOCATOR *allocator=(LF_ALLOCATOR *)(pins->pinbox->free_func_arg);
|
||||
void *node;
|
||||
for (;;)
|
||||
{
|
||||
do
|
||||
{
|
||||
node=allocator->top;
|
||||
_lf_pin(pins, 0, node);
|
||||
} while (node !=allocator->top && LF_BACKOFF);
|
||||
if (!node)
|
||||
{
|
||||
if (!(node=my_malloc(allocator->element_size, MYF(MY_WME|MY_ZEROFILL))))
|
||||
goto ret;
|
||||
#ifdef MY_LF_EXTRA_DEBUG
|
||||
my_atomic_add32(&allocator->mallocs, 1);
|
||||
#endif
|
||||
goto ret;
|
||||
}
|
||||
if (my_atomic_casptr((void **)&allocator->top,
|
||||
(void *)&node, *(void **)node))
|
||||
goto ret;
|
||||
}
|
||||
ret:
|
||||
_lf_unpin(pins, 0);
|
||||
return node;
|
||||
}
|
||||
|
||||
void lf_alloc_init(LF_ALLOCATOR *allocator, uint size)
|
||||
{
|
||||
lf_pinbox_init(&allocator->pinbox,
|
||||
(lf_pinbox_free_func *)alloc_free, allocator);
|
||||
allocator->top=0;
|
||||
allocator->mallocs=0;
|
||||
allocator->element_size=size;
|
||||
DBUG_ASSERT(size >= (int)sizeof(void *));
|
||||
}
|
||||
|
||||
void lf_alloc_end(LF_ALLOCATOR *allocator)
|
||||
{
|
||||
void *el=allocator->top;
|
||||
while (el)
|
||||
{
|
||||
void *tmp=*(void **)el;
|
||||
my_free(el, MYF(0));
|
||||
el=tmp;
|
||||
}
|
||||
lf_pinbox_end(&allocator->pinbox);
|
||||
allocator->top=0;
|
||||
}
|
||||
|
||||
/*
|
||||
NOTE
|
||||
this is NOT thread-safe !!!
|
||||
*/
|
||||
uint lf_alloc_in_pool(LF_ALLOCATOR *allocator)
|
||||
{
|
||||
uint i;
|
||||
void *node;
|
||||
for (node=allocator->top, i=0; node; node=*(void **)node, i++) /* no op */;
|
||||
return i;
|
||||
}
|
||||
|
186
mysys/lf_dynarray.c
Normal file
186
mysys/lf_dynarray.c
Normal file
|
@ -0,0 +1,186 @@
|
|||
/* Copyright (C) 2000 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
Analog of DYNAMIC_ARRAY that never reallocs
|
||||
(so no pointer into the array may ever become invalid).
|
||||
|
||||
Memory is allocated in non-contiguous chunks.
|
||||
This data structure is not space efficient for sparce arrays.
|
||||
|
||||
The number of elements is limited to 2^16
|
||||
|
||||
Every element is aligned to sizeof(element) boundary
|
||||
(to avoid false sharing if element is big enough).
|
||||
|
||||
Actually, it's wait-free, not lock-free ;-)
|
||||
*/
|
||||
|
||||
#undef DBUG_OFF
|
||||
#include <my_global.h>
|
||||
#include <strings.h>
|
||||
#include <my_sys.h>
|
||||
#include <lf.h>
|
||||
|
||||
void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
|
||||
{
|
||||
bzero(array, sizeof(*array));
|
||||
array->size_of_element=element_size;
|
||||
my_atomic_rwlock_init(&array->lock);
|
||||
}
|
||||
|
||||
static void recursive_free(void **alloc, int level)
|
||||
{
|
||||
if (!alloc) return;
|
||||
|
||||
if (level)
|
||||
{
|
||||
int i;
|
||||
for (i=0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
|
||||
recursive_free(alloc[i], level-1);
|
||||
my_free((void *)alloc, MYF(0));
|
||||
}
|
||||
else
|
||||
my_free(alloc[-1], MYF(0));
|
||||
}
|
||||
|
||||
void lf_dynarray_end(LF_DYNARRAY *array)
|
||||
{
|
||||
int i;
|
||||
for (i=0; i < LF_DYNARRAY_LEVELS; i++)
|
||||
recursive_free(array->level[i], i);
|
||||
my_atomic_rwlock_destroy(&array->lock);
|
||||
bzero(array, sizeof(*array));
|
||||
}
|
||||
|
||||
static const int dynarray_idxes_in_level[LF_DYNARRAY_LEVELS]=
|
||||
{
|
||||
0, /* +1 here to to avoid -1's below */
|
||||
LF_DYNARRAY_LEVEL_LENGTH,
|
||||
LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH,
|
||||
LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH *
|
||||
LF_DYNARRAY_LEVEL_LENGTH
|
||||
};
|
||||
|
||||
void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
|
||||
{
|
||||
void * ptr, * volatile * ptr_ptr=0;
|
||||
int i;
|
||||
|
||||
for (i=3; i > 0; i--)
|
||||
{
|
||||
if (ptr_ptr || idx >= dynarray_idxes_in_level[i])
|
||||
{
|
||||
if (!ptr_ptr)
|
||||
{
|
||||
ptr_ptr=&array->level[i];
|
||||
idx-= dynarray_idxes_in_level[i];
|
||||
}
|
||||
ptr=*ptr_ptr;
|
||||
if (!ptr)
|
||||
{
|
||||
void *alloc=my_malloc(LF_DYNARRAY_LEVEL_LENGTH * sizeof(void *),
|
||||
MYF(MY_WME|MY_ZEROFILL));
|
||||
if (!alloc)
|
||||
return(NULL);
|
||||
if (my_atomic_casptr(ptr_ptr, &ptr, alloc))
|
||||
ptr= alloc;
|
||||
else
|
||||
my_free(alloc, MYF(0));
|
||||
}
|
||||
ptr_ptr=((void **)ptr) + idx / dynarray_idxes_in_level[i];
|
||||
idx%= dynarray_idxes_in_level[i];
|
||||
}
|
||||
}
|
||||
if (!ptr_ptr)
|
||||
ptr_ptr=&array->level[0];
|
||||
ptr=*ptr_ptr;
|
||||
if (!ptr)
|
||||
{
|
||||
void *alloc, *data;
|
||||
alloc=my_malloc(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
|
||||
max(array->size_of_element, sizeof(void *)),
|
||||
MYF(MY_WME|MY_ZEROFILL));
|
||||
if (!alloc)
|
||||
return(NULL);
|
||||
/* reserve the space for free() address */
|
||||
data= alloc + sizeof(void *);
|
||||
{ /* alignment */
|
||||
intptr mod= ((intptr)data) % array->size_of_element;
|
||||
if (mod)
|
||||
data+= array->size_of_element - mod;
|
||||
}
|
||||
((void **)data)[-1]=alloc; /* free() will need the original pointer */
|
||||
if (my_atomic_casptr(ptr_ptr, &ptr, data))
|
||||
ptr= data;
|
||||
else
|
||||
my_free(alloc, MYF(0));
|
||||
}
|
||||
return ptr + array->size_of_element * idx;
|
||||
}
|
||||
|
||||
void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
|
||||
{
|
||||
void * ptr, * volatile * ptr_ptr=0;
|
||||
int i;
|
||||
|
||||
for (i=3; i > 0; i--)
|
||||
{
|
||||
if (ptr_ptr || idx >= dynarray_idxes_in_level[i])
|
||||
{
|
||||
if (!ptr_ptr)
|
||||
{
|
||||
ptr_ptr=&array->level[i];
|
||||
idx-= dynarray_idxes_in_level[i];
|
||||
}
|
||||
ptr=*ptr_ptr;
|
||||
if (!ptr)
|
||||
return(NULL);
|
||||
ptr_ptr=((void **)ptr) + idx / dynarray_idxes_in_level[i];
|
||||
idx %= dynarray_idxes_in_level[i];
|
||||
}
|
||||
}
|
||||
if (!ptr_ptr)
|
||||
ptr_ptr=&array->level[0];
|
||||
ptr=*ptr_ptr;
|
||||
if (!ptr)
|
||||
return(NULL);
|
||||
return ptr + array->size_of_element * idx;
|
||||
}
|
||||
|
||||
static int recursive_iterate(LF_DYNARRAY *array, void *ptr, int level,
|
||||
lf_dynarray_func func, void *arg)
|
||||
{
|
||||
int res, i;
|
||||
if (!ptr)
|
||||
return 0;
|
||||
if (!level)
|
||||
return func(ptr, arg);
|
||||
for (i=0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
|
||||
if ((res=recursive_iterate(array, ((void **)ptr)[i], level-1, func, arg)))
|
||||
return res;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int _lf_dynarray_iterate(LF_DYNARRAY *array, lf_dynarray_func func, void *arg)
|
||||
{
|
||||
int i, res;
|
||||
for (i=0; i < LF_DYNARRAY_LEVELS; i++)
|
||||
if ((res=recursive_iterate(array, array->level[i], i, func, arg)))
|
||||
return res;
|
||||
return 0;
|
||||
}
|
||||
|
370
mysys/lf_hash.c
Normal file
370
mysys/lf_hash.c
Normal file
|
@ -0,0 +1,370 @@
|
|||
/* Copyright (C) 2000 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
extensible hash
|
||||
|
||||
TODO
|
||||
dummy nodes use element_size=0
|
||||
try to get rid of dummy nodes ?
|
||||
*/
|
||||
#include <my_global.h>
|
||||
#include <my_sys.h>
|
||||
#include <my_bit.h>
|
||||
#include <lf.h>
|
||||
|
||||
LF_REQUIRE_PINS(3);
|
||||
|
||||
typedef struct {
|
||||
intptr volatile link;
|
||||
uint32 hashnr;
|
||||
const uchar *key;
|
||||
uint keylen;
|
||||
} LF_SLIST;
|
||||
|
||||
typedef struct {
|
||||
intptr volatile *prev;
|
||||
LF_SLIST *curr, *next;
|
||||
} CURSOR;
|
||||
|
||||
#define PTR(V) (LF_SLIST *)((V) & (~(intptr)1))
|
||||
#define DELETED(V) ((V) & 1)
|
||||
|
||||
/*
|
||||
RETURN
|
||||
0 - not found
|
||||
1 - found
|
||||
|
||||
NOTE
|
||||
cursor is positioned in either case
|
||||
pins[0..2] are used, they are NOT removed on return
|
||||
*/
|
||||
static int lfind(intptr volatile *head, uint32 hashnr,
|
||||
const uchar *key, uint keylen, CURSOR *cursor, LF_PINS *pins)
|
||||
{
|
||||
uint32 cur_hashnr;
|
||||
const uchar *cur_key;
|
||||
uint cur_keylen;
|
||||
intptr link;
|
||||
|
||||
retry:
|
||||
cursor->prev=head;
|
||||
do {
|
||||
cursor->curr=PTR(*cursor->prev);
|
||||
_lf_pin(pins,1,cursor->curr);
|
||||
} while(*cursor->prev != (intptr)cursor->curr && LF_BACKOFF);
|
||||
for (;;)
|
||||
{
|
||||
if (!cursor->curr)
|
||||
return 0;
|
||||
do { // XXX or goto retry ?
|
||||
link=cursor->curr->link;
|
||||
cursor->next=PTR(link);
|
||||
_lf_pin(pins, 0, cursor->next);
|
||||
} while(link != cursor->curr->link && LF_BACKOFF);
|
||||
cur_hashnr=cursor->curr->hashnr;
|
||||
cur_key=cursor->curr->key;
|
||||
cur_keylen=cursor->curr->keylen;
|
||||
if (*cursor->prev != (intptr)cursor->curr)
|
||||
{
|
||||
LF_BACKOFF;
|
||||
goto retry;
|
||||
}
|
||||
if (!DELETED(link))
|
||||
{
|
||||
if (cur_hashnr >= hashnr)
|
||||
{
|
||||
int r=1;
|
||||
if (cur_hashnr > hashnr || (r=memcmp(cur_key, key, keylen)) >= 0)
|
||||
return !r;
|
||||
}
|
||||
cursor->prev=&(cursor->curr->link);
|
||||
_lf_pin(pins, 2, cursor->curr);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (my_atomic_casptr((void **)cursor->prev,
|
||||
(void **)&cursor->curr, cursor->next))
|
||||
_lf_alloc_free(pins, cursor->curr);
|
||||
else
|
||||
{
|
||||
LF_BACKOFF;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
cursor->curr=cursor->next;
|
||||
_lf_pin(pins, 1, cursor->curr);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
RETURN
|
||||
0 - inserted
|
||||
not 0 - a pointer to a conflict
|
||||
|
||||
NOTE
|
||||
it uses pins[0..2], on return all pins are removed.
|
||||
*/
|
||||
static LF_SLIST *linsert(LF_SLIST * volatile *head, LF_SLIST *node,
|
||||
LF_PINS *pins, uint flags)
|
||||
{
|
||||
CURSOR cursor;
|
||||
int res=-1;
|
||||
|
||||
do
|
||||
{
|
||||
if (lfind((intptr*)head, node->hashnr, node->key, node->keylen,
|
||||
&cursor, pins) &&
|
||||
(flags & LF_HASH_UNIQUE))
|
||||
res=0;
|
||||
else
|
||||
{
|
||||
node->link=(intptr)cursor.curr;
|
||||
assert(node->link != (intptr)node);
|
||||
assert(cursor.prev != &node->link);
|
||||
if (my_atomic_casptr((void **)cursor.prev, (void **)&cursor.curr, node))
|
||||
res=1;
|
||||
}
|
||||
} while (res == -1);
|
||||
_lf_unpin(pins, 0);
|
||||
_lf_unpin(pins, 1);
|
||||
_lf_unpin(pins, 2);
|
||||
return res ? 0 : cursor.curr;
|
||||
}
|
||||
|
||||
/*
|
||||
RETURN
|
||||
0 - ok
|
||||
1 - not found
|
||||
NOTE
|
||||
it uses pins[0..2], on return all pins are removed.
|
||||
*/
|
||||
static int ldelete(LF_SLIST * volatile *head, uint32 hashnr,
|
||||
const uchar *key, uint keylen, LF_PINS *pins)
|
||||
{
|
||||
CURSOR cursor;
|
||||
int res=-1;
|
||||
|
||||
do
|
||||
{
|
||||
if (!lfind((intptr *)head, hashnr, key, keylen, &cursor, pins))
|
||||
res= 1;
|
||||
else
|
||||
if (my_atomic_casptr((void **)&(cursor.curr->link),
|
||||
(void **)&cursor.next, 1+(char *)cursor.next))
|
||||
{
|
||||
if (my_atomic_casptr((void **)cursor.prev,
|
||||
(void **)&cursor.curr, cursor.next))
|
||||
_lf_alloc_free(pins, cursor.curr);
|
||||
else
|
||||
lfind((intptr *)head, hashnr, key, keylen, &cursor, pins);
|
||||
res= 0;
|
||||
}
|
||||
} while (res == -1);
|
||||
_lf_unpin(pins, 0);
|
||||
_lf_unpin(pins, 1);
|
||||
_lf_unpin(pins, 2);
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
RETURN
|
||||
0 - not found
|
||||
node - found
|
||||
NOTE
|
||||
it uses pins[0..2], on return the pin[2] keeps the node found
|
||||
all other pins are removed.
|
||||
*/
|
||||
static LF_SLIST *lsearch(LF_SLIST * volatile *head, uint32 hashnr,
|
||||
const uchar *key, uint keylen, LF_PINS *pins)
|
||||
{
|
||||
CURSOR cursor;
|
||||
int res=lfind((intptr *)head, hashnr, key, keylen, &cursor, pins);
|
||||
if (res) _lf_pin(pins, 2, cursor.curr);
|
||||
_lf_unpin(pins, 0);
|
||||
_lf_unpin(pins, 1);
|
||||
return res ? cursor.curr : 0;
|
||||
}
|
||||
|
||||
static inline const uchar* hash_key(const LF_HASH *hash,
|
||||
const uchar *record, uint *length)
|
||||
{
|
||||
if (hash->get_key)
|
||||
return (*hash->get_key)(record,length,0);
|
||||
*length=hash->key_length;
|
||||
return record + hash->key_offset;
|
||||
}
|
||||
|
||||
static inline uint calc_hash(LF_HASH *hash, const uchar *key, uint keylen)
|
||||
{
|
||||
ulong nr1=1, nr2=4;
|
||||
hash->charset->coll->hash_sort(hash->charset,key,keylen,&nr1,&nr2);
|
||||
return nr1 & INT_MAX32;
|
||||
}
|
||||
|
||||
#define MAX_LOAD 1
|
||||
static void initialize_bucket(LF_HASH *, LF_SLIST * volatile*, uint, LF_PINS *);
|
||||
|
||||
void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
|
||||
uint key_offset, uint key_length, hash_get_key get_key,
|
||||
CHARSET_INFO *charset)
|
||||
{
|
||||
lf_alloc_init(&hash->alloc,sizeof(LF_SLIST)+element_size);
|
||||
lf_dynarray_init(&hash->array, sizeof(LF_SLIST **));
|
||||
hash->size=1;
|
||||
hash->count=0;
|
||||
hash->element_size=element_size;
|
||||
hash->flags=flags;
|
||||
hash->charset=charset;
|
||||
hash->key_offset=key_offset;
|
||||
hash->key_length=key_length;
|
||||
hash->get_key=get_key;
|
||||
DBUG_ASSERT(get_key ? !key_offset && !key_length : key_length);
|
||||
}
|
||||
|
||||
void lf_hash_end(LF_HASH *hash)
|
||||
{
|
||||
LF_SLIST *el=*(LF_SLIST **)_lf_dynarray_lvalue(&hash->array, 0);
|
||||
while (el)
|
||||
{
|
||||
intptr next=el->link;
|
||||
lf_alloc_real_free(&hash->alloc, el);
|
||||
el=(LF_SLIST *)next;
|
||||
}
|
||||
lf_alloc_end(&hash->alloc);
|
||||
lf_dynarray_end(&hash->array);
|
||||
}
|
||||
|
||||
/*
|
||||
NOTE
|
||||
see linsert() for pin usage
|
||||
*/
|
||||
int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
|
||||
{
|
||||
uint csize, bucket, hashnr, keylen;
|
||||
LF_SLIST *node, * volatile *el;
|
||||
const uchar *key;
|
||||
|
||||
key= hash_key(hash, data, &keylen);
|
||||
hashnr= calc_hash(hash, key, keylen);
|
||||
bucket= hashnr % hash->size;
|
||||
lf_lock_by_pins(pins);
|
||||
node=(LF_SLIST *)_lf_alloc_new(pins);
|
||||
memcpy(node+1, data, hash->element_size);
|
||||
el=_lf_dynarray_lvalue(&hash->array, bucket);
|
||||
if (*el == NULL)
|
||||
initialize_bucket(hash, el, bucket, pins);
|
||||
node->hashnr=my_reverse_bits(hashnr) | 1;
|
||||
node->key=((char *)(node+1))+(key-(uchar *)data);
|
||||
node->keylen=keylen;
|
||||
if (linsert(el, node, pins, hash->flags))
|
||||
{
|
||||
_lf_alloc_free(pins, node);
|
||||
lf_unlock_by_pins(pins);
|
||||
return 0;
|
||||
}
|
||||
csize= hash->size;
|
||||
if ((my_atomic_add32(&hash->count, 1)+1.0) / csize > MAX_LOAD)
|
||||
my_atomic_cas32(&hash->size, &csize, csize*2);
|
||||
#if 0
|
||||
node=*(LF_SLIST **)_lf_dynarray_lvalue(&hash->array, 0);
|
||||
hashnr=0;
|
||||
while (node)
|
||||
{
|
||||
assert (node->hashnr >= hashnr);
|
||||
hashnr=node->hashnr;
|
||||
node=(LF_SLIST *)node->link;
|
||||
}
|
||||
#endif
|
||||
lf_unlock_by_pins(pins);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
NOTE
|
||||
see ldelete() for pin usage
|
||||
*/
|
||||
int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const uchar *key, uint keylen)
|
||||
{
|
||||
LF_SLIST * volatile *el;
|
||||
uint bucket, hashnr=calc_hash(hash, key, keylen);
|
||||
|
||||
bucket= hashnr % hash->size;
|
||||
lf_lock_by_pins(pins);
|
||||
el=_lf_dynarray_lvalue(&hash->array, bucket);
|
||||
if (*el == NULL)
|
||||
initialize_bucket(hash, el, bucket, pins);
|
||||
if (ldelete(el, my_reverse_bits(hashnr) | 1, key, keylen, pins))
|
||||
{
|
||||
lf_unlock_by_pins(pins);
|
||||
return 0;
|
||||
}
|
||||
my_atomic_add32(&hash->count, -1);
|
||||
#if 0
|
||||
{
|
||||
LF_SLIST *node=*(LF_SLIST **)_lf_dynarray_lvalue(&hash->array, 0);
|
||||
hashnr=0;
|
||||
while (node)
|
||||
{
|
||||
assert (node->hashnr >= hashnr);
|
||||
hashnr=node->hashnr;
|
||||
node=(LF_SLIST *)node->link;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
lf_unlock_by_pins(pins);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
NOTE
|
||||
see lsearch() for pin usage
|
||||
*/
|
||||
int lf_hash_search(LF_HASH *hash, LF_PINS *pins, const uchar *key, uint keylen)
|
||||
{
|
||||
int res;
|
||||
LF_SLIST * volatile *el;
|
||||
uint bucket, hashnr=calc_hash(hash, key, keylen);
|
||||
|
||||
bucket= hashnr % hash->size;
|
||||
lf_lock_by_pins(pins);
|
||||
el=_lf_dynarray_lvalue(&hash->array, bucket);
|
||||
if (*el == NULL)
|
||||
initialize_bucket(hash, el, bucket, pins);
|
||||
res=NULL != lsearch(el, my_reverse_bits(hashnr) | 1, key, keylen, pins);
|
||||
lf_unlock_by_pins(pins);
|
||||
return res;
|
||||
}
|
||||
|
||||
static void initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node,
|
||||
uint bucket, LF_PINS *pins)
|
||||
{
|
||||
uint parent= my_clear_highest_bit(bucket);
|
||||
LF_SLIST *dummy=_lf_alloc_new(pins), **tmp=0, *cur;
|
||||
LF_SLIST * volatile *el=_lf_dynarray_lvalue(&hash->array, parent);
|
||||
if (*el == NULL && bucket)
|
||||
initialize_bucket(hash, el, parent, pins);
|
||||
dummy->hashnr=my_reverse_bits(bucket);
|
||||
LINT_INIT(dummy->key);
|
||||
LINT_INIT(dummy->keylen);
|
||||
if ((cur= linsert(el, dummy, pins, 0)))
|
||||
{
|
||||
_lf_alloc_free(pins, dummy);
|
||||
dummy= cur;
|
||||
}
|
||||
my_atomic_casptr((void **)node, (void **)&tmp, dummy);
|
||||
}
|
||||
|
|
@ -43,6 +43,7 @@
|
|||
#include <keycache.h>
|
||||
#include "my_static.h"
|
||||
#include <m_string.h>
|
||||
#include <my_bit.h>
|
||||
#include <errno.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
#include <my_pthread.h>
|
||||
|
||||
#ifndef HAVE_INLINE
|
||||
/*
|
||||
the following will cause all inline functions to be instantiated
|
||||
*/
|
||||
/* the following will cause all inline functions to be instantiated */
|
||||
#define HAVE_INLINE
|
||||
#define static extern
|
||||
#undef STATIC_INLINE
|
||||
#define STATIC_INLINE extern
|
||||
#endif
|
||||
|
||||
#include <my_atomic.h>
|
||||
|
|
100
mysys/my_bit.c
100
mysys/my_bit.c
|
@ -14,23 +14,18 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/* Some useful bit functions */
|
||||
#include <my_global.h>
|
||||
|
||||
#include "mysys_priv.h"
|
||||
#ifndef HAVE_INLINE
|
||||
/* the following will cause all inline functions to be instantiated */
|
||||
#define HAVE_INLINE
|
||||
#undef STATIC_INLINE
|
||||
#define STATIC_INLINE extern
|
||||
#endif
|
||||
|
||||
/*
|
||||
Find smallest X in 2^X >= value
|
||||
This can be used to divide a number with value by doing a shift instead
|
||||
*/
|
||||
#include <my_bit.h>
|
||||
|
||||
uint my_bit_log2(ulong value)
|
||||
{
|
||||
uint bit;
|
||||
for (bit=0 ; value > 1 ; value>>=1, bit++) ;
|
||||
return bit;
|
||||
}
|
||||
|
||||
static char nbits[256] = {
|
||||
const char _my_bits_nbits[256] = {
|
||||
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
|
@ -49,60 +44,29 @@ static char nbits[256] = {
|
|||
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
|
||||
};
|
||||
|
||||
uint my_count_bits(ulonglong v)
|
||||
{
|
||||
#if SIZEOF_LONG_LONG > 4
|
||||
/* The following code is a bit faster on 16 bit machines than if we would
|
||||
only shift v */
|
||||
ulong v2=(ulong) (v >> 32);
|
||||
return (uint) (uchar) (nbits[(uchar) v] +
|
||||
nbits[(uchar) (v >> 8)] +
|
||||
nbits[(uchar) (v >> 16)] +
|
||||
nbits[(uchar) (v >> 24)] +
|
||||
nbits[(uchar) (v2)] +
|
||||
nbits[(uchar) (v2 >> 8)] +
|
||||
nbits[(uchar) (v2 >> 16)] +
|
||||
nbits[(uchar) (v2 >> 24)]);
|
||||
#else
|
||||
return (uint) (uchar) (nbits[(uchar) v] +
|
||||
nbits[(uchar) (v >> 8)] +
|
||||
nbits[(uchar) (v >> 16)] +
|
||||
nbits[(uchar) (v >> 24)]);
|
||||
#endif
|
||||
}
|
||||
|
||||
uint my_count_bits_ushort(ushort v)
|
||||
{
|
||||
return nbits[v];
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Next highest power of two
|
||||
|
||||
SYNOPSIS
|
||||
my_round_up_to_next_power()
|
||||
v Value to check
|
||||
|
||||
RETURN
|
||||
Next or equal power of 2
|
||||
Note: 0 will return 0
|
||||
|
||||
NOTES
|
||||
Algorithm by Sean Anderson, according to:
|
||||
http://graphics.stanford.edu/~seander/bithacks.html
|
||||
(Orignal code public domain)
|
||||
|
||||
Comments shows how this works with 01100000000000000000000000001011
|
||||
perl -e 'print map{", 0x".unpack H2,pack B8,unpack b8,chr$_}(0..255)'
|
||||
*/
|
||||
const uchar _my_bits_reverse_table[256]={
|
||||
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30,
|
||||
0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98,
|
||||
0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64,
|
||||
0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, 0x0C, 0x8C, 0x4C, 0xCC,
|
||||
0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, 0x02,
|
||||
0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2,
|
||||
0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A,
|
||||
0xDA, 0x3A, 0xBA, 0x7A, 0xFA, 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6,
|
||||
0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E,
|
||||
0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81,
|
||||
0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71,
|
||||
0xF1, 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
|
||||
0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15,
|
||||
0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD,
|
||||
0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, 0x03, 0x83, 0x43,
|
||||
0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
|
||||
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B,
|
||||
0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97,
|
||||
0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F,
|
||||
0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
|
||||
};
|
||||
|
||||
uint32 my_round_up_to_next_power(uint32 v)
|
||||
{
|
||||
v--; /* 01100000000000000000000000001010 */
|
||||
v|= v >> 1; /* 01110000000000000000000000001111 */
|
||||
v|= v >> 2; /* 01111100000000000000000000001111 */
|
||||
v|= v >> 4; /* 01111111110000000000000000001111 */
|
||||
v|= v >> 8; /* 01111111111111111100000000001111 */
|
||||
v|= v >> 16; /* 01111111111111111111111111111111 */
|
||||
return v+1; /* 10000000000000000000000000000000 */
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "mysys_priv.h"
|
||||
#include <my_bitmap.h>
|
||||
#include <m_string.h>
|
||||
#include <my_bit.h>
|
||||
|
||||
void create_last_word_mask(MY_BITMAP *map)
|
||||
{
|
||||
|
|
|
@ -44,6 +44,7 @@ static void netware_init();
|
|||
|
||||
my_bool my_init_done= 0;
|
||||
uint mysys_usage_id= 0; /* Incremented for each my_init() */
|
||||
ulong my_thread_stack_size= 65536;
|
||||
|
||||
static ulong atoi_octal(const char *str)
|
||||
{
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include "mysql_priv.h"
|
||||
#include <m_ctype.h>
|
||||
#include <my_bit.h>
|
||||
#include <myisampack.h>
|
||||
#include "ha_myisam.h"
|
||||
#include <stdarg.h>
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <hash.h>
|
||||
#include <time.h>
|
||||
#include <ft_global.h>
|
||||
#include <my_bit.h>
|
||||
|
||||
#include "sp_head.h"
|
||||
#include "sp_rcontext.h"
|
||||
|
|
|
@ -1516,7 +1516,7 @@ extern ulong max_connections,max_connect_errors, connect_timeout;
|
|||
extern ulong slave_net_timeout, slave_trans_retries;
|
||||
extern uint max_user_connections;
|
||||
extern ulong what_to_log,flush_time;
|
||||
extern ulong query_buff_size, thread_stack;
|
||||
extern ulong query_buff_size;
|
||||
extern ulong max_prepared_stmt_count, prepared_stmt_count;
|
||||
extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit;
|
||||
extern ulong max_binlog_size, max_relay_log_size;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "mysql_priv.h"
|
||||
#include <m_ctype.h>
|
||||
#include <my_dir.h>
|
||||
#include <my_bit.h>
|
||||
#include "slave.h"
|
||||
#include "sql_repl.h"
|
||||
#include "rpl_filter.h"
|
||||
|
@ -486,7 +487,7 @@ uint volatile thread_count, thread_running;
|
|||
ulonglong thd_startup_options;
|
||||
ulong back_log, connect_timeout, concurrency, server_id;
|
||||
ulong table_cache_size, table_def_size;
|
||||
ulong thread_stack, what_to_log;
|
||||
ulong what_to_log;
|
||||
ulong query_buff_size, slow_launch_time, slave_open_temp_tables;
|
||||
ulong open_files_limit, max_binlog_size, max_relay_log_size;
|
||||
ulong slave_net_timeout, slave_trans_retries;
|
||||
|
@ -2149,7 +2150,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n",
|
|||
{
|
||||
fprintf(stderr,"thd=%p\n",thd);
|
||||
print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0,
|
||||
thread_stack);
|
||||
my_thread_stack_size);
|
||||
}
|
||||
if (thd)
|
||||
{
|
||||
|
@ -2281,9 +2282,9 @@ static void start_signal_handler(void)
|
|||
Peculiar things with ia64 platforms - it seems we only have half the
|
||||
stack size in reality, so we have to double it here
|
||||
*/
|
||||
pthread_attr_setstacksize(&thr_attr,thread_stack*2);
|
||||
pthread_attr_setstacksize(&thr_attr,my_thread_stack_size*2);
|
||||
#else
|
||||
pthread_attr_setstacksize(&thr_attr,thread_stack);
|
||||
pthread_attr_setstacksize(&thr_attr,my_thread_stack_size);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -3500,9 +3501,9 @@ int main(int argc, char **argv)
|
|||
Peculiar things with ia64 platforms - it seems we only have half the
|
||||
stack size in reality, so we have to double it here
|
||||
*/
|
||||
pthread_attr_setstacksize(&connection_attrib,thread_stack*2);
|
||||
pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size*2);
|
||||
#else
|
||||
pthread_attr_setstacksize(&connection_attrib,thread_stack);
|
||||
pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size);
|
||||
#endif
|
||||
#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE
|
||||
{
|
||||
|
@ -3513,15 +3514,15 @@ int main(int argc, char **argv)
|
|||
stack_size/= 2;
|
||||
#endif
|
||||
/* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */
|
||||
if (stack_size && stack_size < thread_stack)
|
||||
if (stack_size && stack_size < my_thread_stack_size)
|
||||
{
|
||||
if (global_system_variables.log_warnings)
|
||||
sql_print_warning("Asked for %ld thread stack, but got %ld",
|
||||
thread_stack, stack_size);
|
||||
my_thread_stack_size, stack_size);
|
||||
#if defined(__ia64__) || defined(__ia64)
|
||||
thread_stack= stack_size*2;
|
||||
my_thread_stack_size= stack_size*2;
|
||||
#else
|
||||
thread_stack= stack_size;
|
||||
my_thread_stack_size= stack_size;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -6333,8 +6334,8 @@ The minimum value for this variable is 4096.",
|
|||
(gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
|
||||
DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
|
||||
{"thread_stack", OPT_THREAD_STACK,
|
||||
"The stack size for each thread.", (gptr*) &thread_stack,
|
||||
(gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
|
||||
"The stack size for each thread.", (gptr*) &my_thread_stack_size,
|
||||
(gptr*) &my_thread_stack_size, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
|
||||
1024L*128L, ~0L, 0, 1024, 0},
|
||||
{ "time_format", OPT_TIME_FORMAT,
|
||||
"The TIME format (for future).",
|
||||
|
|
|
@ -1069,10 +1069,10 @@ SHOW_VAR init_vars[]= {
|
|||
#ifdef HAVE_THR_SETCONCURRENCY
|
||||
{"thread_concurrency", (char*) &concurrency, SHOW_LONG},
|
||||
#endif
|
||||
{"thread_stack", (char*) &thread_stack, SHOW_LONG},
|
||||
{"thread_stack", (char*) &my_thread_stack_size, SHOW_LONG},
|
||||
{sys_time_format.name, (char*) &sys_time_format, SHOW_SYS},
|
||||
{"time_zone", (char*) &sys_time_zone, SHOW_SYS},
|
||||
{sys_timed_mutexes.name, (char*) &sys_timed_mutexes, SHOW_SYS},
|
||||
{sys_timed_mutexes.name, (char*) &sys_timed_mutexes, SHOW_SYS},
|
||||
{sys_tmp_table_size.name, (char*) &sys_tmp_table_size, SHOW_SYS},
|
||||
{sys_tmpdir.name, (char*) &sys_tmpdir, SHOW_SYS},
|
||||
{sys_trans_alloc_block_size.name, (char*) &sys_trans_alloc_block_size,
|
||||
|
|
|
@ -5785,10 +5785,10 @@ bool check_stack_overrun(THD *thd, long margin,
|
|||
long stack_used;
|
||||
DBUG_ASSERT(thd == current_thd);
|
||||
if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >=
|
||||
(long) (thread_stack - margin))
|
||||
(long) (my_thread_stack_size - margin))
|
||||
{
|
||||
sprintf(errbuff[0],ER(ER_STACK_OVERRUN_NEED_MORE),
|
||||
stack_used,thread_stack,margin);
|
||||
stack_used,my_thread_stack_size,margin);
|
||||
my_message(ER_STACK_OVERRUN_NEED_MORE,errbuff[0],MYF(0));
|
||||
thd->fatal_error();
|
||||
return 1;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "sql_cursor.h"
|
||||
|
||||
#include <m_ctype.h>
|
||||
#include <my_bit.h>
|
||||
#include <hash.h>
|
||||
#include <ft_global.h>
|
||||
|
||||
|
|
|
@ -455,7 +455,7 @@ void mysql_print_status()
|
|||
VOID(my_getwd(current_dir, sizeof(current_dir),MYF(0)));
|
||||
printf("Current dir: %s\n", current_dir);
|
||||
printf("Running threads: %d Stack size: %ld\n", thread_count,
|
||||
(long) thread_stack);
|
||||
(long) my_thread_stack_size);
|
||||
thr_print_locks(); // Write some debug info
|
||||
#ifndef DBUG_OFF
|
||||
print_cached_tables();
|
||||
|
@ -530,7 +530,7 @@ Estimated memory (with thread stack): %ld\n",
|
|||
(int) info.uordblks,
|
||||
(int) info.fordblks,
|
||||
(int) info.keepcost,
|
||||
(long) (thread_count * thread_stack + info.hblkhd + info.arena));
|
||||
(long) (thread_count * my_thread_stack_size + info.hblkhd + info.arena));
|
||||
#endif
|
||||
puts("");
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "ftdefs.h"
|
||||
#include "sp_defs.h"
|
||||
#include <my_bit.h>
|
||||
|
||||
#if defined(MSDOS) || defined(__WIN__)
|
||||
#ifdef __WIN__
|
||||
|
@ -432,7 +433,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
share.state.rec_per_key_part[key_segs-1]=1L;
|
||||
length+=key_length;
|
||||
/* Get block length for key, if defined by user */
|
||||
block_length= (keydef->block_length ?
|
||||
block_length= (keydef->block_length ?
|
||||
my_round_up_to_next_power(keydef->block_length) :
|
||||
myisam_block_size);
|
||||
block_length= max(block_length, MI_MIN_KEY_BLOCK_LENGTH);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#endif
|
||||
#include "myisamdef.h"
|
||||
#include <m_ctype.h>
|
||||
#include <my_bit.h>
|
||||
|
||||
#define STANDARD_LENGTH 37
|
||||
#define MYISAM_KEYS 6
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <m_ctype.h>
|
||||
#include <stdarg.h>
|
||||
#include <my_getopt.h>
|
||||
#include <my_bit.h>
|
||||
#ifdef HAVE_SYS_VADVICE_H
|
||||
#include <sys/vadvise.h>
|
||||
#endif
|
||||
|
|
|
@ -14,13 +14,20 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <my_global.h>
|
||||
//#define MY_ATOMIC_MODE_RWLOCKS
|
||||
//#define MY_ATOMIC_MODE_DUMMY
|
||||
|
||||
#include <tap.h>
|
||||
|
||||
#include <my_global.h>
|
||||
#include <my_sys.h>
|
||||
#include <my_atomic.h>
|
||||
#include <lf.h>
|
||||
|
||||
int32 a32,b32,c32;
|
||||
volatile uint32 a32,b32,c32;
|
||||
my_atomic_rwlock_t rwl;
|
||||
LF_ALLOCATOR lf_allocator;
|
||||
LF_HASH lf_hash;
|
||||
|
||||
pthread_attr_t thr_attr;
|
||||
pthread_mutex_t mutex;
|
||||
|
@ -30,9 +37,9 @@ int N;
|
|||
/* add and sub a random number in a loop. Must get 0 at the end */
|
||||
pthread_handler_t test_atomic_add_handler(void *arg)
|
||||
{
|
||||
int m=*(int *)arg;
|
||||
int m=(*(int *)arg)/2;
|
||||
int32 x;
|
||||
for (x=((int)(&m)); m ; m--)
|
||||
for (x=((int)(intptr)(&m)); m ; m--)
|
||||
{
|
||||
x=x*m+0x87654321;
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
|
@ -61,15 +68,9 @@ pthread_handler_t test_atomic_add_handler(void *arg)
|
|||
pthread_handler_t test_atomic_swap_handler(void *arg)
|
||||
{
|
||||
int m=*(int *)arg;
|
||||
int32 x;
|
||||
uint32 x=my_atomic_add32(&b32, 1);
|
||||
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
x=my_atomic_add32(&b32, 1);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
my_atomic_add32(&a32, x);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
|
||||
for (; m ; m--)
|
||||
{
|
||||
|
@ -98,29 +99,29 @@ pthread_handler_t test_atomic_swap_handler(void *arg)
|
|||
|
||||
/*
|
||||
same as test_atomic_add_handler, but my_atomic_add32 is emulated with
|
||||
(slower) my_atomic_cas32
|
||||
my_atomic_cas32 - notice that the slowdown is proportional to the
|
||||
number of CPUs
|
||||
*/
|
||||
pthread_handler_t test_atomic_cas_handler(void *arg)
|
||||
{
|
||||
int m=*(int *)arg, ok;
|
||||
int32 x,y;
|
||||
for (x=((int)(&m)); m ; m--)
|
||||
int m=(*(int *)arg)/2, ok=0;
|
||||
int32 x, y;
|
||||
for (x=((int)(intptr)(&m)); m ; m--)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
y=my_atomic_load32(&a32);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
|
||||
x=x*m+0x87654321;
|
||||
do {
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
ok=my_atomic_cas32(&a32, &y, y+x);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
} while (!ok);
|
||||
} while (!ok) ;
|
||||
do {
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
ok=my_atomic_cas32(&a32, &y, y-x);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
} while (!ok);
|
||||
} while (!ok) ;
|
||||
}
|
||||
pthread_mutex_lock(&mutex);
|
||||
N--;
|
||||
|
@ -129,6 +130,128 @@ pthread_handler_t test_atomic_cas_handler(void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
pin allocator - alloc and release an element in a loop
|
||||
*/
|
||||
pthread_handler_t test_lf_pinbox(void *arg)
|
||||
{
|
||||
int m=*(int *)arg;
|
||||
int32 x=0;
|
||||
LF_PINS *pins;
|
||||
|
||||
pins=lf_pinbox_get_pins(&lf_allocator.pinbox);
|
||||
|
||||
for (x=((int)(intptr)(&m)); m ; m--)
|
||||
{
|
||||
lf_pinbox_put_pins(pins);
|
||||
pins=lf_pinbox_get_pins(&lf_allocator.pinbox);
|
||||
}
|
||||
lf_pinbox_put_pins(pins);
|
||||
pthread_mutex_lock(&mutex);
|
||||
N--;
|
||||
if (!N)
|
||||
pthread_cond_signal(&cond);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef union {
|
||||
int32 data;
|
||||
void *not_used; /* to guarantee sizeof(TLA) >= sizeof(void *) */
|
||||
} TLA;
|
||||
|
||||
pthread_handler_t test_lf_alloc(void *arg)
|
||||
{
|
||||
int m=(*(int *)arg)/2;
|
||||
int32 x,y=0;
|
||||
LF_PINS *pins;
|
||||
|
||||
pins=lf_alloc_get_pins(&lf_allocator);
|
||||
|
||||
for (x=((int)(intptr)(&m)); m ; m--)
|
||||
{
|
||||
TLA *node1, *node2;
|
||||
x=x*m+0x87654321;
|
||||
node1=(TLA *)lf_alloc_new(pins);
|
||||
node1->data=x;
|
||||
y+=node1->data;
|
||||
node1->data=0;
|
||||
node2=(TLA *)lf_alloc_new(pins);
|
||||
node2->data=x;
|
||||
y-=node2->data;
|
||||
node2->data=0;
|
||||
lf_alloc_free(pins, node1);
|
||||
lf_alloc_free(pins, node2);
|
||||
}
|
||||
lf_alloc_put_pins(pins);
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
my_atomic_add32(&a32, y);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
pthread_mutex_lock(&mutex);
|
||||
N--;
|
||||
if (!N)
|
||||
{
|
||||
diag("%d mallocs, %d pins in stack",
|
||||
lf_allocator.mallocs, lf_allocator.pinbox.pins_in_stack);
|
||||
#ifdef MY_LF_EXTRA_DEBUG
|
||||
a32|=lf_allocator.mallocs - lf_alloc_in_pool(&lf_allocator);
|
||||
#endif
|
||||
pthread_cond_signal(&cond);
|
||||
}
|
||||
pthread_mutex_unlock(&mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define N_TLH 1000
|
||||
pthread_handler_t test_lf_hash(void *arg)
|
||||
{
|
||||
int m=(*(int *)arg)/(2*N_TLH);
|
||||
int32 x,y,z,sum=0, ins=0;
|
||||
LF_PINS *pins;
|
||||
|
||||
pins=lf_hash_get_pins(&lf_hash);
|
||||
|
||||
for (x=((int)(intptr)(&m)); m ; m--)
|
||||
{
|
||||
int i;
|
||||
y=x;
|
||||
for (i=0; i < N_TLH; i++)
|
||||
{
|
||||
x=x*(m+i)+0x87654321;
|
||||
z=(x<0) ? -x : x;
|
||||
if (lf_hash_insert(&lf_hash, pins, &z))
|
||||
{
|
||||
sum+=z;
|
||||
ins++;
|
||||
}
|
||||
}
|
||||
for (i=0; i < N_TLH; i++)
|
||||
{
|
||||
y=y*(m+i)+0x87654321;
|
||||
z=(y<0) ? -y : y;
|
||||
if (lf_hash_delete(&lf_hash, pins, (uchar *)&z, sizeof(z)))
|
||||
sum-=z;
|
||||
}
|
||||
}
|
||||
lf_hash_put_pins(pins);
|
||||
my_atomic_rwlock_wrlock(&rwl);
|
||||
my_atomic_add32(&a32, sum);
|
||||
my_atomic_add32(&b32, ins);
|
||||
my_atomic_rwlock_wrunlock(&rwl);
|
||||
pthread_mutex_lock(&mutex);
|
||||
N--;
|
||||
if (!N)
|
||||
{
|
||||
diag("%d mallocs, %d pins in stack, %d hash size, %d inserts",
|
||||
lf_hash.alloc.mallocs, lf_hash.alloc.pinbox.pins_in_stack,
|
||||
lf_hash.size, b32);
|
||||
a32|=lf_hash.count;
|
||||
pthread_cond_signal(&cond);
|
||||
}
|
||||
pthread_mutex_unlock(&mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void test_atomic(const char *test, pthread_handler handler, int n, int m)
|
||||
{
|
||||
pthread_t t;
|
||||
|
@ -141,23 +264,24 @@ void test_atomic(const char *test, pthread_handler handler, int n, int m)
|
|||
diag("Testing %s with %d threads, %d iterations... ", test, n, m);
|
||||
for (N=n ; n ; n--)
|
||||
pthread_create(&t, &thr_attr, handler, &m);
|
||||
|
||||
pthread_mutex_lock(&mutex);
|
||||
while (N)
|
||||
pthread_cond_wait(&cond, &mutex);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
now=my_getsystime()-now;
|
||||
ok(a32 == 0, "tested %s in %g secs", test, ((double)now)/1e7);
|
||||
ok(a32 == 0, "tested %s in %g secs (%d)", test, ((double)now)/1e7, a32);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
int err;
|
||||
|
||||
diag("N CPUs: %d", my_getncpus());
|
||||
my_init();
|
||||
|
||||
diag("N CPUs: %d, atomic ops: %s", my_getncpus(), MY_ATOMIC_MODE);
|
||||
err= my_atomic_initialize();
|
||||
|
||||
plan(4);
|
||||
plan(7);
|
||||
ok(err == 0, "my_atomic_initialize() returned %d", err);
|
||||
|
||||
pthread_attr_init(&thr_attr);
|
||||
|
@ -165,15 +289,31 @@ int main()
|
|||
pthread_mutex_init(&mutex, 0);
|
||||
pthread_cond_init(&cond, 0);
|
||||
my_atomic_rwlock_init(&rwl);
|
||||
lf_alloc_init(&lf_allocator, sizeof(TLA));
|
||||
lf_hash_init(&lf_hash, sizeof(int), LF_HASH_UNIQUE, 0, sizeof(int), 0,
|
||||
&my_charset_bin);
|
||||
|
||||
test_atomic("my_atomic_add32", test_atomic_add_handler, 100,10000);
|
||||
test_atomic("my_atomic_swap32", test_atomic_swap_handler, 100,10000);
|
||||
test_atomic("my_atomic_cas32", test_atomic_cas_handler, 100,10000);
|
||||
#ifdef MY_ATOMIC_MODE_RWLOCKS
|
||||
#define CYCLES 10000
|
||||
#else
|
||||
#define CYCLES 1000000
|
||||
#endif
|
||||
#define THREADS 100
|
||||
|
||||
test_atomic("my_atomic_add32", test_atomic_add_handler, THREADS,CYCLES);
|
||||
test_atomic("my_atomic_swap32", test_atomic_swap_handler, THREADS,CYCLES);
|
||||
test_atomic("my_atomic_cas32", test_atomic_cas_handler, THREADS,CYCLES);
|
||||
test_atomic("lf_pinbox", test_lf_pinbox, THREADS,CYCLES);
|
||||
test_atomic("lf_alloc", test_lf_alloc, THREADS,CYCLES);
|
||||
test_atomic("lf_hash", test_lf_hash, THREADS,CYCLES);
|
||||
|
||||
lf_hash_end(&lf_hash);
|
||||
lf_alloc_end(&lf_allocator);
|
||||
pthread_mutex_destroy(&mutex);
|
||||
pthread_cond_destroy(&cond);
|
||||
pthread_attr_destroy(&thr_attr);
|
||||
my_atomic_rwlock_destroy(&rwl);
|
||||
my_end(0);
|
||||
return exit_status();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue