mariadb/mysys/safemalloc.c
Monty 17a87d6063 MDEV-10139 Support for SEQUENCE objects
Working features:
CREATE OR REPLACE [TEMPORARY] SEQUENCE [IF NOT EXISTS] name
    [ INCREMENT [ BY | = ] increment ]
    [ MINVALUE [=] minvalue | NO MINVALUE ]
    [ MAXVALUE [=] maxvalue | NO MAXVALUE ]
    [ START [ WITH | = ] start ] [ CACHE [=] cache ] [ [ NO ] CYCLE ]
    ENGINE=xxx COMMENT=".."
SELECT NEXT VALUE FOR sequence_name;
SELECT NEXTVAL(sequence_name);
SELECT PREVIOUS VALUE FOR sequence_name;
SELECT LASTVAL(sequence_name);

SHOW CREATE SEQUENCE sequence_name;
SHOW CREATE TABLE sequence_name;
CREATE TABLE sequence-structure ... SEQUENCE=1
ALTER TABLE sequence RENAME TO sequence2;
RENAME TABLE sequence TO sequence2;
DROP [TEMPORARY] SEQUENCE  [IF EXISTS] sequence_names

Missing features
- SETVAL(value,sequence_name), to be used with replication.
- Check replication, including checking that sequence tables are marked
  not transactional.
- Check that a commit happens for NEXT VALUE that changes table data (may
  already work)
- ALTER SEQUENCE. ANSI SQL version of setval.
- Share identical sequence entries to not add things twice to table list.
- testing insert/delete/update/truncate/load data
- Run and fix Alibaba sequence tests (part of mysql-test/suite/sql_sequence)
- Write documentation for NEXT VALUE / PREVIOUS_VALUE
- NEXTVAL in DEFAULT
  - Ensure that NEXTVAL in DEFAULT uses database from base table
- Two NEXTVAL for same row should give same answer.
- Oracle syntax sequence_table.nextval, without any FOR or FROM.
- Sequence tables are treated as 'not read constant tables' by SELECT; Would
  be better if we would have a separate list for sequence tables so that
  select doesn't know about them, except if refereed to with FROM.

Other things done:
- Improved output for safemalloc backtrack
- frm_type_enum changed to Table_type
- Removed lex->is_view and replaced with lex->table_type. This allows
  use to more easy check if item is view, sequence or table.
- Added table flag HA_CAN_TABLES_WITHOUT_ROLLBACK, needed for handlers
  that want's to support sequences
- Added handler calls:
 - engine_name(), to simplify getting engine name for partition and sequences
 - update_first_row(), to be able to do efficient sequence implementations.
 - Made binlog_log_row() global to be able to call it from ha_sequence.cc
- Added handler variable: row_already_logged, to be able to flag that the
  changed row is already logging to replication log.
- Added CF_DB_CHANGE and CF_SCHEMA_CHANGE flags to simplify
  deny_updates_if_read_only_option()
- Added sp_add_cfetch() to avoid new conflicts in sql_yacc.yy
- Moved code for add_table_options() out from sql_show.cc::show_create_table()
- Added String::append_longlong() and used it in sql_show.cc to simplify code.
- Added extra option to dd_frm_type() and ha_table_exists to indicate if
  the table is a sequence. Needed by DROP SQUENCE to not drop a table.
2017-04-07 18:09:56 +04:00

403 lines
10 KiB
C

/* Copyright (C) 2000 MySQL AB, 2011 Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/********************************************************************
memory debugger
based on safemalloc, memory sub-system, written by Bjorn Benson
********************************************************************/
#include "mysys_priv.h"
#include <my_stacktrace.h> /* my_addr_resolve */
#if HAVE_EXECINFO_H
#include <execinfo.h>
#endif
/*
this can be set to 1 if we leak memory and know it
(to disable memory leak tests on exit)
*/
int sf_leaking_memory= 0;
#ifdef SAFEMALLOC
/* this mutex protects all sf_* variables, and nothing else*/
static pthread_mutex_t sf_mutex;
static int init_done= 0;
#ifndef SF_REMEMBER_FRAMES
#define SF_REMEMBER_FRAMES 8
#endif
/* ignore the first two frames (sf_malloc itself, and my_malloc) */
#define SF_FRAMES_SKIP 2
/*
Structure that stores information of an allocated memory block
The data is at &struct_adr+sizeof(struct irem)
Note that sizeof(struct st_irem) % sizeof(double) == 0
*/
struct st_irem
{
struct st_irem *next; /* Linked list of structures */
struct st_irem *prev; /* Other link */
size_t datasize; /* Size requested */
#if SIZEOF_SIZE_T == 4
size_t pad; /* Compensate 32bit datasize */
#endif
#ifdef HAVE_BACKTRACE
void *frame[SF_REMEMBER_FRAMES]; /* call stack */
#endif
uint32 flags; /* Flags passed to malloc */
my_thread_id thread_id; /* Which thread did the allocation */
uint32 marker; /* Underrun marker value */
};
static int sf_malloc_count= 0; /* Number of allocated chunks */
static void *sf_min_adress= (void*) (intptr)~0ULL,
*sf_max_adress= 0;
static struct st_irem *sf_malloc_root = 0;
#define MAGICSTART 0x14235296 /* A magic value for underrun key */
#define MAGICEND0 0x68 /* Magic values for overrun keys */
#define MAGICEND1 0x34 /* " */
#define MAGICEND2 0x7A /* " */
#define MAGICEND3 0x15 /* " */
static int bad_ptr(const char *where, void *ptr);
static void free_memory(void *ptr);
static void sf_terminate();
/* Setup default call to get a thread id for the memory */
my_thread_id default_sf_malloc_dbug_id(void)
{
return my_thread_dbug_id();
}
my_thread_id (*sf_malloc_dbug_id)(void)= default_sf_malloc_dbug_id;
/**
allocates memory
*/
void *sf_malloc(size_t size, myf my_flags)
{
struct st_irem *irem;
uchar *data;
/*
this style of initialization looks like race conditon prone,
but it is safe under the assumption that a program does
at least one malloc() while still being single threaded.
*/
if (!init_done)
{
pthread_mutex_init(&sf_mutex, NULL);
atexit(sf_terminate);
init_done= 1;
}
irem= (struct st_irem *) malloc (sizeof(struct st_irem) + size + 4);
if (!irem)
return 0;
/* we guarantee the alignment */
compile_time_assert(sizeof(struct st_irem) % sizeof(double) == 0);
/* Fill up the structure */
data= (uchar*) (irem + 1);
irem->datasize= size;
irem->prev= 0;
irem->flags= my_flags;
irem->marker= MAGICSTART;
irem->thread_id= sf_malloc_dbug_id();
data[size + 0]= MAGICEND0;
data[size + 1]= MAGICEND1;
data[size + 2]= MAGICEND2;
data[size + 3]= MAGICEND3;
#ifdef HAVE_BACKTRACE
{
void *frame[SF_REMEMBER_FRAMES + SF_FRAMES_SKIP];
int frames= backtrace(frame, array_elements(frame));
if (frames < SF_FRAMES_SKIP)
frames= 0;
else
{
frames-= SF_FRAMES_SKIP;
memcpy(irem->frame, frame + SF_FRAMES_SKIP, sizeof(void*)*frames);
}
if (frames < SF_REMEMBER_FRAMES)
irem->frame[frames]= 0;
}
#endif
pthread_mutex_lock(&sf_mutex);
/* Add this structure to the linked list */
if ((irem->next= sf_malloc_root))
sf_malloc_root->prev= irem;
sf_malloc_root= irem;
/* Keep the statistics */
sf_malloc_count++;
set_if_smaller(sf_min_adress, (void*)data);
set_if_bigger(sf_max_adress, (void*)data);
pthread_mutex_unlock(&sf_mutex);
TRASH_ALLOC(data, size);
return data;
}
void *sf_realloc(void *ptr, size_t size, myf my_flags)
{
char *data;
if (!ptr)
return sf_malloc(size, my_flags);
if (bad_ptr("Reallocating", ptr))
return 0;
if ((data= sf_malloc(size, my_flags)))
{
struct st_irem *irem= (struct st_irem *)ptr - 1;
set_if_smaller(size, irem->datasize);
memcpy(data, ptr, size);
free_memory(ptr);
}
return data;
}
void sf_free(void *ptr)
{
if (!ptr || bad_ptr("Freeing", ptr))
return;
free_memory(ptr);
}
/**
Return size of memory block and if block is thread specific
sf_malloc_usable_size()
@param ptr Pointer to malloced block
@param flags We will store 1 here if block is marked as MY_THREAD_SPECIFIC
otherwise 0
@return Size of block
*/
size_t sf_malloc_usable_size(void *ptr, my_bool *is_thread_specific)
{
struct st_irem *irem= (struct st_irem *)ptr - 1;
DBUG_ENTER("sf_malloc_usable_size");
*is_thread_specific= MY_TEST(irem->flags & MY_THREAD_SPECIFIC);
DBUG_PRINT("exit", ("size: %lu flags: %lu", (ulong) irem->datasize,
(ulong)irem->flags));
DBUG_RETURN(irem->datasize);
}
#ifdef HAVE_BACKTRACE
static void print_stack(void **frame)
{
const char *err;
int i;
if ((err= my_addr_resolve_init()))
{
fprintf(stderr, "(my_addr_resolve failure: %s)\n", err);
return;
}
for (i=0; i < SF_REMEMBER_FRAMES && frame[i]; i++)
{
my_addr_loc loc;
if (i)
fprintf(stderr, ", ");
if (my_addr_resolve(frame[i], &loc))
fprintf(stderr, "%p", frame[i]);
else
fprintf(stderr, "%s:%u", loc.file, loc.line);
}
fprintf(stderr, "\n");
}
#else
#define print_stack(X) fprintf(stderr, "???\n")
#endif
static void free_memory(void *ptr)
{
struct st_irem *irem= (struct st_irem *)ptr - 1;
if ((irem->flags & MY_THREAD_SPECIFIC) && irem->thread_id &&
irem->thread_id != sf_malloc_dbug_id())
{
fprintf(stderr, "Warning: %4lu bytes freed by T@%lu, allocated by T@%lu at ",
(ulong) irem->datasize,
(ulong) sf_malloc_dbug_id(), (ulong) irem->thread_id);
print_stack(irem->frame);
}
pthread_mutex_lock(&sf_mutex);
/* Remove this structure from the linked list */
if (irem->prev)
irem->prev->next= irem->next;
else
sf_malloc_root= irem->next;
if (irem->next)
irem->next->prev= irem->prev;
/* Handle the statistics */
sf_malloc_count--;
pthread_mutex_unlock(&sf_mutex);
/* only trash the data and magic values, but keep the stack trace */
TRASH_FREE((uchar*)(irem + 1) - 4, irem->datasize + 8);
free(irem);
return;
}
static void warn(const char *format,...)
{
va_list args;
DBUG_PRINT("error", ("%s", format));
va_start(args,format);
fflush(stderr);
vfprintf(stderr, format, args);
va_end(args);
#ifdef HAVE_BACKTRACE
{
void *frame[SF_REMEMBER_FRAMES + SF_FRAMES_SKIP];
int frames= backtrace(frame, array_elements(frame));
fprintf(stderr, " at ");
if (frames < SF_REMEMBER_FRAMES + SF_FRAMES_SKIP)
frame[frames]= 0;
print_stack(frame + SF_FRAMES_SKIP);
}
#endif
}
static int bad_ptr(const char *where, void *ptr)
{
struct st_irem *irem= (struct st_irem *)ptr - 1;
const uchar *magicend;
if (((intptr) ptr) % sizeof(double))
{
warn("Error: %s wrong aligned pointer", where);
return 1;
}
if (ptr < sf_min_adress || ptr > sf_max_adress)
{
warn("Error: %s pointer out of range", where);
return 1;
}
if (irem->marker != MAGICSTART)
{
DBUG_PRINT("error",("Unallocated data or underrun buffer %p", ptr));
warn("Error: %s unallocated data or underrun buffer %p", ptr, where);
return 1;
}
magicend= (uchar*)ptr + irem->datasize;
if (magicend[0] != MAGICEND0 ||
magicend[1] != MAGICEND1 ||
magicend[2] != MAGICEND2 ||
magicend[3] != MAGICEND3)
{
DBUG_PRINT("error",("Overrun buffer %p", ptr));
warn("Error: %s overrun buffer %p", where);
fprintf(stderr, "Allocated at ");
print_stack(irem->frame);
return 1;
}
return 0;
}
/* check all allocated memory list for consistency */
static int sf_sanity()
{
struct st_irem *irem;
int flag= 0;
int count= 0;
pthread_mutex_lock(&sf_mutex);
count= sf_malloc_count;
for (irem= sf_malloc_root; irem && count > 0; count--, irem= irem->next)
flag+= bad_ptr("Safemalloc", irem + 1);
pthread_mutex_unlock(&sf_mutex);
if (count || irem)
{
warn("Error: Safemalloc link list destroyed");
return 1;
}
return 0;
}
/**
report on all the memory pieces that have not been free'd
@param id Id of thread to report. 0 if all
*/
void sf_report_leaked_memory(my_thread_id id)
{
size_t total= 0;
struct st_irem *irem;
sf_sanity();
/* Report on all the memory that was allocated but not free'd */
for (irem= sf_malloc_root; irem; irem= irem->next)
{
if (!id || (irem->thread_id == id && irem->flags & MY_THREAD_SPECIFIC))
{
my_thread_id tid = irem->thread_id && irem->flags & MY_THREAD_SPECIFIC ?
irem->thread_id : 0;
fprintf(stderr, "Warning: %4lu bytes lost at %p, allocated by T@%llu at ",
(ulong) irem->datasize, (char*) (irem + 1), tid);
print_stack(irem->frame);
total+= irem->datasize;
}
}
if (total)
fprintf(stderr, "Memory lost: %lu bytes in %d chunks\n",
(ulong) total, sf_malloc_count);
return;
}
static void sf_terminate()
{
if (!sf_leaking_memory)
sf_report_leaked_memory(0);
pthread_mutex_destroy(&sf_mutex);
}
#endif