2009-11-30 16:55:03 +01:00
|
|
|
#ifndef MDL_H
|
|
|
|
#define MDL_H
|
|
|
|
/* Copyright (C) 2007-2008 MySQL AB
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
|
|
|
|
|
|
#include "sql_plist.h"
|
|
|
|
#include <my_sys.h>
|
|
|
|
#include <m_string.h>
|
2009-12-02 17:31:57 +01:00
|
|
|
#include <mysql_com.h>
|
2009-11-30 16:55:03 +01:00
|
|
|
|
|
|
|
class THD;
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
class MDL_context;
|
|
|
|
class MDL_lock;
|
|
|
|
class MDL_ticket;
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-01 14:59:11 +01:00
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Type of metadata lock request.
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-04 00:34:19 +01:00
|
|
|
- High-priority shared locks differ from ordinary shared locks by
|
|
|
|
that they ignore pending requests for exclusive locks.
|
|
|
|
- Upgradable shared locks can be later upgraded to exclusive
|
|
|
|
(because of that their acquisition involves implicit
|
|
|
|
acquisition of global intention-exclusive lock).
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-04 00:34:19 +01:00
|
|
|
@see Comments for can_grant_lock() and can_grant_global_lock() for details.
|
2009-12-01 14:59:11 +01:00
|
|
|
*/
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-01 14:59:11 +01:00
|
|
|
enum enum_mdl_type {MDL_SHARED=0, MDL_SHARED_HIGH_PRIO,
|
|
|
|
MDL_SHARED_UPGRADABLE, MDL_EXCLUSIVE};
|
2009-11-30 16:55:03 +01:00
|
|
|
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
/** States which a metadata lock ticket can have. */
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
enum enum_mdl_state { MDL_PENDING, MDL_ACQUIRED };
|
|
|
|
|
|
|
|
/** Maximal length of key for metadata locking subsystem. */
|
|
|
|
#define MAX_MDLKEY_LENGTH (1 + NAME_LEN + 1 + NAME_LEN + 1)
|
2009-11-30 16:55:03 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Metadata lock object key.
|
2009-12-04 00:29:40 +01:00
|
|
|
|
2009-12-04 00:34:19 +01:00
|
|
|
A lock is requested or granted based on a fully qualified name and type.
|
|
|
|
E.g. They key for a table consists of <0 (=table)>+<database>+<table name>.
|
|
|
|
Elsewhere in the comments this triple will be referred to simply as "key"
|
|
|
|
or "name".
|
2009-11-30 16:55:03 +01:00
|
|
|
*/
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
class MDL_key
|
2009-11-30 16:55:03 +01:00
|
|
|
{
|
2009-12-04 00:29:40 +01:00
|
|
|
public:
|
2009-12-10 09:21:38 +01:00
|
|
|
/**
|
|
|
|
Object namespaces
|
|
|
|
|
|
|
|
Different types of objects exist in different namespaces
|
|
|
|
- TABLE is for tables and views.
|
|
|
|
- FUNCTION is for stored functions.
|
|
|
|
- PROCEDURE is for stored procedures.
|
|
|
|
- TRIGGER is for triggers.
|
|
|
|
Note that although there isn't metadata locking on triggers,
|
|
|
|
it's necessary to have a separate namespace for them since
|
|
|
|
MDL_key is also used outside of the MDL subsystem.
|
|
|
|
*/
|
|
|
|
enum enum_mdl_namespace { TABLE=0,
|
|
|
|
FUNCTION,
|
|
|
|
PROCEDURE,
|
|
|
|
TRIGGER };
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
const uchar *ptr() const { return (uchar*) m_ptr; }
|
|
|
|
uint length() const { return m_length; }
|
|
|
|
|
|
|
|
const char *db_name() const { return m_ptr + 1; }
|
|
|
|
uint db_name_length() const { return m_db_name_length; }
|
|
|
|
|
2009-12-09 17:11:26 +01:00
|
|
|
const char *name() const { return m_ptr + m_db_name_length + 2; }
|
|
|
|
uint name_length() const { return m_length - m_db_name_length - 3; }
|
|
|
|
|
|
|
|
enum_mdl_namespace mdl_namespace() const
|
|
|
|
{ return (enum_mdl_namespace)(m_ptr[0]); }
|
2009-11-30 16:55:03 +01:00
|
|
|
|
|
|
|
/**
|
2009-12-09 09:51:20 +01:00
|
|
|
Construct a metadata lock key from a triplet (mdl_namespace, database and name).
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-09 09:51:20 +01:00
|
|
|
@remark The key for a table is <mdl_namespace>+<database name>+<table name>
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-09 09:51:20 +01:00
|
|
|
@param mdl_namespace Id of namespace of object to be locked
|
|
|
|
@param db Name of database to which the object belongs
|
|
|
|
@param name Name of of the object
|
|
|
|
@param key Where to store the the MDL key.
|
2009-11-30 16:55:03 +01:00
|
|
|
*/
|
2009-12-09 09:51:20 +01:00
|
|
|
void mdl_key_init(enum_mdl_namespace mdl_namespace, const char *db, const char *name)
|
2009-12-04 00:29:40 +01:00
|
|
|
{
|
2009-12-09 09:51:20 +01:00
|
|
|
m_ptr[0]= (char) mdl_namespace;
|
2009-12-04 00:29:40 +01:00
|
|
|
m_db_name_length= (uint) (strmov(m_ptr + 1, db) - m_ptr - 1);
|
|
|
|
m_length= (uint) (strmov(m_ptr + m_db_name_length + 2, name) - m_ptr + 1);
|
|
|
|
}
|
2009-12-04 00:52:05 +01:00
|
|
|
void mdl_key_init(const MDL_key *rhs)
|
2009-12-04 00:29:40 +01:00
|
|
|
{
|
|
|
|
memcpy(m_ptr, rhs->m_ptr, rhs->m_length);
|
|
|
|
m_length= rhs->m_length;
|
|
|
|
m_db_name_length= rhs->m_db_name_length;
|
|
|
|
}
|
2009-12-04 00:52:05 +01:00
|
|
|
bool is_equal(const MDL_key *rhs) const
|
2009-12-04 00:29:40 +01:00
|
|
|
{
|
|
|
|
return (m_length == rhs->m_length &&
|
|
|
|
memcmp(m_ptr, rhs->m_ptr, m_length) == 0);
|
|
|
|
}
|
2009-12-04 00:52:05 +01:00
|
|
|
MDL_key(const MDL_key *rhs)
|
|
|
|
{
|
|
|
|
mdl_key_init(rhs);
|
|
|
|
}
|
2009-12-09 09:51:20 +01:00
|
|
|
MDL_key(enum_mdl_namespace namespace_arg, const char *db_arg, const char *name_arg)
|
2009-12-04 00:52:05 +01:00
|
|
|
{
|
2009-12-09 09:51:20 +01:00
|
|
|
mdl_key_init(namespace_arg, db_arg, name_arg);
|
2009-12-04 00:52:05 +01:00
|
|
|
}
|
|
|
|
MDL_key() {} /* To use when part of MDL_request. */
|
2009-12-09 09:51:20 +01:00
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
private:
|
|
|
|
char m_ptr[MAX_MDLKEY_LENGTH];
|
|
|
|
uint m_length;
|
|
|
|
uint m_db_name_length;
|
2009-12-04 00:52:05 +01:00
|
|
|
private:
|
|
|
|
MDL_key(const MDL_key &); /* not implemented */
|
|
|
|
MDL_key &operator=(const MDL_key &); /* not implemented */
|
2009-11-30 16:55:03 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
|
2009-11-30 16:55:03 +01:00
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Hook class which via its methods specifies which members
|
|
|
|
of T should be used for participating in MDL lists.
|
2009-11-30 16:55:03 +01:00
|
|
|
*/
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
template <typename T, T* T::*next, T** T::*prev>
|
|
|
|
struct I_P_List_adapter
|
2009-11-30 16:55:03 +01:00
|
|
|
{
|
2009-12-04 00:29:40 +01:00
|
|
|
static inline T **next_ptr(T *el) { return &(el->*next); }
|
|
|
|
|
|
|
|
static inline T ***prev_ptr(T *el) { return &(el->*prev); }
|
2009-11-30 16:55:03 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
A pending metadata lock request.
|
2009-12-04 00:52:05 +01:00
|
|
|
|
|
|
|
A lock request and a granted metadata lock are represented by
|
|
|
|
different classes because they have different allocation
|
2009-12-04 00:34:19 +01:00
|
|
|
sites and hence different lifetimes. The allocation of lock requests is
|
|
|
|
controlled from outside of the MDL subsystem, while allocation of granted
|
|
|
|
locks (tickets) is controlled within the MDL subsystem.
|
2009-12-04 00:52:05 +01:00
|
|
|
|
|
|
|
MDL_request is a C structure, you don't need to call a constructor
|
|
|
|
or destructor for it.
|
2009-11-30 16:55:03 +01:00
|
|
|
*/
|
|
|
|
|
2009-12-04 00:57:01 +01:00
|
|
|
class MDL_request
|
2009-11-30 16:55:03 +01:00
|
|
|
{
|
2009-12-04 00:57:01 +01:00
|
|
|
public:
|
2009-12-04 00:29:40 +01:00
|
|
|
/** Type of metadata lock. */
|
|
|
|
enum enum_mdl_type type;
|
|
|
|
|
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Pointers for participating in the list of lock requests for this context.
|
2009-12-04 00:29:40 +01:00
|
|
|
*/
|
2009-12-08 10:57:07 +01:00
|
|
|
MDL_request *next_in_list;
|
|
|
|
MDL_request **prev_in_list;
|
|
|
|
/**
|
|
|
|
Pointer to the lock ticket object for this lock request.
|
|
|
|
Valid only if this lock request is satisfied.
|
|
|
|
*/
|
|
|
|
MDL_ticket *ticket;
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
/** A lock is requested based on a fully qualified name and type. */
|
2009-12-04 00:52:05 +01:00
|
|
|
MDL_key key;
|
|
|
|
|
2009-12-08 10:57:07 +01:00
|
|
|
public:
|
2009-12-10 09:21:38 +01:00
|
|
|
void init(MDL_key::enum_mdl_namespace namespace_arg,
|
|
|
|
const char *db_arg, const char *name_arg,
|
2009-12-08 10:57:07 +01:00
|
|
|
enum_mdl_type mdl_type_arg);
|
2009-12-09 17:11:26 +01:00
|
|
|
void init(const MDL_key *key_arg, enum_mdl_type mdl_type_arg);
|
2009-12-04 00:52:05 +01:00
|
|
|
/** Set type of lock request. Can be only applied to pending locks. */
|
|
|
|
inline void set_type(enum_mdl_type type_arg)
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(ticket == NULL);
|
|
|
|
type= type_arg;
|
|
|
|
}
|
|
|
|
bool is_shared() const { return type < MDL_EXCLUSIVE; }
|
2009-12-04 00:29:40 +01:00
|
|
|
|
2009-12-10 09:21:38 +01:00
|
|
|
static MDL_request *create(MDL_key::enum_mdl_namespace mdl_namespace,
|
|
|
|
const char *db, const char *name,
|
|
|
|
enum_mdl_type mdl_type, MEM_ROOT *root);
|
2009-12-08 10:57:07 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
This is to work around the ugliness of TABLE_LIST
|
|
|
|
compiler-generated assignment operator. It is currently used
|
|
|
|
in several places to quickly copy "most" of the members of the
|
|
|
|
table list. These places currently never assume that the mdl
|
|
|
|
request is carried over to the new TABLE_LIST, or shared
|
|
|
|
between lists.
|
|
|
|
|
|
|
|
This method does not initialize the instance being assigned!
|
|
|
|
Use of init() for initialization after this assignment operator
|
|
|
|
is mandatory. Can only be used before the request has been
|
|
|
|
granted.
|
|
|
|
*/
|
|
|
|
MDL_request& operator=(const MDL_request &rhs)
|
|
|
|
{
|
|
|
|
ticket= NULL;
|
|
|
|
/* Do nothing, in particular, don't try to copy the key. */
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
/* Another piece of ugliness for TABLE_LIST constructor */
|
|
|
|
MDL_request() {}
|
2009-12-04 00:52:05 +01:00
|
|
|
|
2009-12-08 10:57:07 +01:00
|
|
|
MDL_request(const MDL_request *rhs)
|
|
|
|
:type(rhs->type),
|
|
|
|
ticket(NULL),
|
|
|
|
key(&rhs->key)
|
|
|
|
{}
|
2009-12-04 00:29:40 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
typedef void (*mdl_cached_object_release_hook)(void *);
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
A granted metadata lock.
|
2009-12-04 00:29:40 +01:00
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
@warning MDL_ticket members are private to the MDL subsystem.
|
2009-12-04 00:29:40 +01:00
|
|
|
|
2009-12-04 00:34:19 +01:00
|
|
|
@note Multiple shared locks on a same object are represented by a
|
|
|
|
single ticket. The same does not apply for other lock types.
|
2009-12-04 00:29:40 +01:00
|
|
|
*/
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
class MDL_ticket
|
2009-12-04 00:29:40 +01:00
|
|
|
{
|
2009-12-04 00:52:05 +01:00
|
|
|
public:
|
2009-12-04 00:29:40 +01:00
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Pointers for participating in the list of lock requests for this context.
|
2009-12-04 00:29:40 +01:00
|
|
|
*/
|
2009-12-04 00:52:05 +01:00
|
|
|
MDL_ticket *next_in_context;
|
|
|
|
MDL_ticket **prev_in_context;
|
2009-12-04 00:29:40 +01:00
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Pointers for participating in the list of satisfied/pending requests
|
|
|
|
for the lock.
|
2009-12-04 00:29:40 +01:00
|
|
|
*/
|
2009-12-04 00:52:05 +01:00
|
|
|
MDL_ticket *next_in_lock;
|
|
|
|
MDL_ticket **prev_in_lock;
|
|
|
|
public:
|
|
|
|
bool has_pending_conflicting_lock() const;
|
|
|
|
|
|
|
|
void *get_cached_object();
|
|
|
|
void set_cached_object(void *cached_object,
|
|
|
|
mdl_cached_object_release_hook release_hook);
|
|
|
|
const MDL_context *get_ctx() const { return m_ctx; }
|
|
|
|
bool is_shared() const { return m_type < MDL_EXCLUSIVE; }
|
2009-12-09 10:44:01 +01:00
|
|
|
bool is_upgradable_or_exclusive() const
|
|
|
|
{
|
|
|
|
return m_type == MDL_SHARED_UPGRADABLE || m_type == MDL_EXCLUSIVE;
|
|
|
|
}
|
2009-12-04 00:52:05 +01:00
|
|
|
bool upgrade_shared_lock_to_exclusive();
|
|
|
|
void downgrade_exclusive_lock();
|
|
|
|
private:
|
|
|
|
friend class MDL_context;
|
|
|
|
|
|
|
|
MDL_ticket(MDL_context *ctx_arg, enum_mdl_type type_arg)
|
|
|
|
: m_type(type_arg),
|
|
|
|
m_state(MDL_PENDING),
|
|
|
|
m_ctx(ctx_arg),
|
|
|
|
m_lock(NULL)
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
|
|
static MDL_ticket *create(MDL_context *ctx_arg, enum_mdl_type type_arg);
|
|
|
|
static void destroy(MDL_ticket *ticket);
|
|
|
|
private:
|
|
|
|
/** Type of metadata lock. */
|
|
|
|
enum enum_mdl_type m_type;
|
|
|
|
/** State of the metadata lock ticket. */
|
|
|
|
enum enum_mdl_state m_state;
|
|
|
|
|
2009-12-04 00:29:40 +01:00
|
|
|
/** Context of the owner of the metadata lock ticket. */
|
2009-12-04 00:52:05 +01:00
|
|
|
MDL_context *m_ctx;
|
2009-12-04 00:29:40 +01:00
|
|
|
|
|
|
|
/** Pointer to the lock object for this lock ticket. */
|
2009-12-04 00:52:05 +01:00
|
|
|
MDL_lock *m_lock;
|
|
|
|
private:
|
|
|
|
MDL_ticket(const MDL_ticket &); /* not implemented */
|
|
|
|
MDL_ticket &operator=(const MDL_ticket &); /* not implemented */
|
2009-12-30 18:53:30 +01:00
|
|
|
|
|
|
|
bool has_pending_conflicting_lock_impl() const;
|
2009-11-30 16:55:03 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-12-08 10:57:07 +01:00
|
|
|
typedef I_P_List<MDL_request, I_P_List_adapter<MDL_request,
|
|
|
|
&MDL_request::next_in_list,
|
|
|
|
&MDL_request::prev_in_list> >
|
|
|
|
MDL_request_list;
|
|
|
|
|
2009-11-30 16:55:03 +01:00
|
|
|
/**
|
2009-12-04 00:34:19 +01:00
|
|
|
Context of the owner of metadata locks. I.e. each server
|
|
|
|
connection has such a context.
|
2009-11-30 16:55:03 +01:00
|
|
|
*/
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
class MDL_context
|
2009-11-30 16:55:03 +01:00
|
|
|
{
|
2009-12-04 00:52:05 +01:00
|
|
|
public:
|
|
|
|
typedef I_P_List<MDL_ticket,
|
|
|
|
I_P_List_adapter<MDL_ticket,
|
|
|
|
&MDL_ticket::next_in_context,
|
|
|
|
&MDL_ticket::prev_in_context> >
|
2009-12-04 00:29:40 +01:00
|
|
|
Ticket_list;
|
|
|
|
|
|
|
|
typedef Ticket_list::Iterator Ticket_iterator;
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
void init(THD *thd);
|
|
|
|
void destroy();
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-08 10:57:07 +01:00
|
|
|
bool try_acquire_shared_lock(MDL_request *mdl_request);
|
|
|
|
bool acquire_exclusive_lock(MDL_request *mdl_request);
|
|
|
|
bool acquire_exclusive_locks(MDL_request_list *requests);
|
|
|
|
bool try_acquire_exclusive_lock(MDL_request *mdl_request);
|
2009-12-04 00:52:05 +01:00
|
|
|
bool acquire_global_shared_lock();
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
bool clone_ticket(MDL_request *mdl_request);
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-08 10:57:07 +01:00
|
|
|
bool wait_for_locks(MDL_request_list *requests);
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
void release_all_locks_for_name(MDL_ticket *ticket);
|
|
|
|
void release_lock(MDL_ticket *ticket);
|
|
|
|
void release_global_shared_lock();
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-10 09:21:38 +01:00
|
|
|
bool is_exclusive_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,
|
2009-12-04 00:52:05 +01:00
|
|
|
const char *db,
|
|
|
|
const char *name);
|
2009-12-10 09:21:38 +01:00
|
|
|
bool is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,
|
|
|
|
const char *db, const char *name);
|
2009-11-30 16:55:03 +01:00
|
|
|
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
|
|
|
|
bool has_lock(MDL_ticket *mdl_savepoint, MDL_ticket *mdl_ticket);
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
inline bool has_locks() const
|
|
|
|
{
|
|
|
|
return !m_tickets.is_empty();
|
|
|
|
}
|
2009-11-30 16:55:03 +01:00
|
|
|
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
MDL_ticket *mdl_savepoint()
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
NULL savepoint represents the start of the transaction.
|
|
|
|
Checking for m_lt_or_ha_sentinel also makes sure we never
|
|
|
|
return a pointer to HANDLER ticket as a savepoint.
|
|
|
|
*/
|
|
|
|
return m_tickets.front() == m_lt_or_ha_sentinel ? NULL : m_tickets.front();
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_lt_or_ha_sentinel()
|
2009-12-04 00:52:05 +01:00
|
|
|
{
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
DBUG_ASSERT(m_lt_or_ha_sentinel == NULL);
|
|
|
|
m_lt_or_ha_sentinel= mdl_savepoint();
|
2009-12-04 00:52:05 +01:00
|
|
|
}
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
MDL_ticket *lt_or_ha_sentinel() const { return m_lt_or_ha_sentinel; }
|
2009-11-30 16:55:03 +01:00
|
|
|
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
void clear_lt_or_ha_sentinel()
|
|
|
|
{
|
|
|
|
m_lt_or_ha_sentinel= NULL;
|
|
|
|
}
|
|
|
|
void move_ticket_after_lt_or_ha_sentinel(MDL_ticket *mdl_ticket);
|
|
|
|
|
|
|
|
void release_transactional_locks();
|
2009-12-04 00:52:05 +01:00
|
|
|
void rollback_to_savepoint(MDL_ticket *mdl_savepoint);
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-30 18:53:30 +01:00
|
|
|
bool can_wait_lead_to_deadlock() const;
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
inline THD *get_thd() const { return m_thd; }
|
2009-12-30 18:53:30 +01:00
|
|
|
|
|
|
|
bool is_waiting_in_mdl() const { return m_is_waiting_in_mdl; }
|
2009-12-04 00:52:05 +01:00
|
|
|
private:
|
|
|
|
Ticket_list m_tickets;
|
|
|
|
bool m_has_global_shared_lock;
|
2009-12-30 18:53:30 +01:00
|
|
|
/**
|
|
|
|
Indicates that the owner of this context is waiting in
|
|
|
|
wait_for_locks() method.
|
|
|
|
*/
|
|
|
|
bool m_is_waiting_in_mdl;
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
/**
|
|
|
|
This member has two uses:
|
|
|
|
1) When entering LOCK TABLES mode, remember the last taken
|
|
|
|
metadata lock. COMMIT/ROLLBACK must preserve these metadata
|
|
|
|
locks.
|
|
|
|
2) When we have an open HANDLER tables, store the position
|
|
|
|
in the list beyond which we keep locks for HANDLER tables.
|
|
|
|
COMMIT/ROLLBACK must, again, preserve HANDLER metadata locks.
|
|
|
|
*/
|
|
|
|
MDL_ticket *m_lt_or_ha_sentinel;
|
2009-12-04 00:52:05 +01:00
|
|
|
THD *m_thd;
|
|
|
|
private:
|
|
|
|
void release_ticket(MDL_ticket *ticket);
|
2009-12-30 18:53:30 +01:00
|
|
|
bool can_wait_lead_to_deadlock_impl() const;
|
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
|
|
|
MDL_ticket *find_ticket(MDL_request *mdl_req,
|
|
|
|
bool *is_lt_or_ha);
|
|
|
|
void release_locks_stored_before(MDL_ticket *sentinel);
|
2009-12-04 00:52:05 +01:00
|
|
|
};
|
2009-11-30 16:55:03 +01:00
|
|
|
|
|
|
|
|
2009-12-04 00:52:05 +01:00
|
|
|
void mdl_init();
|
|
|
|
void mdl_destroy();
|
2009-11-30 16:55:03 +01:00
|
|
|
|
2009-12-02 17:31:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Functions in the server's kernel used by metadata locking subsystem.
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use);
|
|
|
|
extern void mysql_ha_flush(THD *thd);
|
2009-12-30 18:53:30 +01:00
|
|
|
extern void mysql_abort_transactions_with_shared_lock(const MDL_key *mdl_key);
|
2009-12-02 17:31:57 +01:00
|
|
|
extern "C" const char *set_thd_proc_info(THD *thd, const char *info,
|
|
|
|
const char *calling_function,
|
|
|
|
const char *calling_file,
|
|
|
|
const unsigned int calling_line);
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
extern pthread_mutex_t LOCK_open;
|
|
|
|
#endif
|
|
|
|
|
2009-11-30 16:55:03 +01:00
|
|
|
#endif
|