mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 20:42:30 +01:00
155e78f014
BitKeeper/deleted/.del-ex_access.wpj~3df6ae8c99bf7c5f: Delete: bdb/build_vxworks/ex_access/ex_access.wpj BitKeeper/deleted/.del-ex_btrec.wpj~a7622f1c6f432dc6: Delete: bdb/build_vxworks/ex_btrec/ex_btrec.wpj BitKeeper/deleted/.del-ex_dbclient.wpj~7345440f3b204cdd: Delete: bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj BitKeeper/deleted/.del-ex_env.wpj~fbe1ab10b04e8b74: Delete: bdb/build_vxworks/ex_env/ex_env.wpj BitKeeper/deleted/.del-ex_mpool.wpj~4479cfd5c45f327d: Delete: bdb/build_vxworks/ex_mpool/ex_mpool.wpj BitKeeper/deleted/.del-ex_tpcb.wpj~f78093006e14bf41: Delete: bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj BitKeeper/deleted/.del-db_buildall.dsp~bd749ff6da11682: Delete: bdb/build_win32/db_buildall.dsp BitKeeper/deleted/.del-cxx_app.cpp~ad8df8e0791011ed: Delete: bdb/cxx/cxx_app.cpp BitKeeper/deleted/.del-cxx_log.cpp~a50ff3118fe06952: Delete: bdb/cxx/cxx_log.cpp BitKeeper/deleted/.del-cxx_table.cpp~ecd751e79b055556: Delete: bdb/cxx/cxx_table.cpp BitKeeper/deleted/.del-namemap.txt~796a3acd3885d8fd: Delete: bdb/cxx/namemap.txt BitKeeper/deleted/.del-Design.fileop~3ca4da68f1727373: Delete: bdb/db/Design.fileop BitKeeper/deleted/.del-db185_int.h~61bee3736e7959ef: Delete: bdb/db185/db185_int.h BitKeeper/deleted/.del-acconfig.h~411e8854d67ad8b5: Delete: bdb/dist/acconfig.h BitKeeper/deleted/.del-mutex.m4~a13383cde18a64e1: Delete: bdb/dist/aclocal/mutex.m4 BitKeeper/deleted/.del-options.m4~b9d0ca637213750a: Delete: bdb/dist/aclocal/options.m4 BitKeeper/deleted/.del-programs.m4~3ce7890b47732b30: Delete: bdb/dist/aclocal/programs.m4 BitKeeper/deleted/.del-tcl.m4~f944e2db93c3b6db: Delete: bdb/dist/aclocal/tcl.m4 BitKeeper/deleted/.del-types.m4~59cae158c9a32cff: Delete: bdb/dist/aclocal/types.m4 BitKeeper/deleted/.del-script~d38f6d3a4f159cb4: Delete: bdb/dist/build/script BitKeeper/deleted/.del-configure.in~ac795a92c8fe049c: Delete: bdb/dist/configure.in BitKeeper/deleted/.del-ltconfig~66bbd007d8024af: Delete: bdb/dist/ltconfig BitKeeper/deleted/.del-rec_ctemp~a28554362534f00a: Delete: bdb/dist/rec_ctemp BitKeeper/deleted/.del-s_tcl~2ffe4326459fcd9f: Delete: bdb/dist/s_tcl BitKeeper/deleted/.del-.IGNORE_ME~d8148b08fa7d5d15: Delete: bdb/dist/template/.IGNORE_ME BitKeeper/deleted/.del-btree.h~179f2aefec1753d: Delete: bdb/include/btree.h BitKeeper/deleted/.del-cxx_int.h~6b649c04766508f8: Delete: bdb/include/cxx_int.h BitKeeper/deleted/.del-db.src~6b433ae615b16a8d: Delete: bdb/include/db.src BitKeeper/deleted/.del-db_185.h~ad8b373d9391d35c: Delete: bdb/include/db_185.h BitKeeper/deleted/.del-db_am.h~a714912b6b75932f: Delete: bdb/include/db_am.h BitKeeper/deleted/.del-db_cxx.h~fcafadf45f5d19e9: Delete: bdb/include/db_cxx.h BitKeeper/deleted/.del-db_dispatch.h~6844f20f7eb46904: Delete: bdb/include/db_dispatch.h BitKeeper/deleted/.del-db_int.src~419a3f48b6a01da7: Delete: bdb/include/db_int.src BitKeeper/deleted/.del-db_join.h~76f9747a42c3399a: Delete: bdb/include/db_join.h BitKeeper/deleted/.del-db_page.h~e302ca3a4db3abdc: Delete: bdb/include/db_page.h BitKeeper/deleted/.del-db_server_int.h~e1d20b6ba3bca1ab: Delete: bdb/include/db_server_int.h BitKeeper/deleted/.del-db_shash.h~5fbf2d696fac90f3: Delete: bdb/include/db_shash.h BitKeeper/deleted/.del-db_swap.h~1e60887550864a59: Delete: bdb/include/db_swap.h BitKeeper/deleted/.del-db_upgrade.h~c644eee73701fc8d: Delete: bdb/include/db_upgrade.h BitKeeper/deleted/.del-db_verify.h~b8d6c297c61f342e: Delete: bdb/include/db_verify.h BitKeeper/deleted/.del-debug.h~dc2b4f2cf27ccebc: Delete: bdb/include/debug.h BitKeeper/deleted/.del-hash.h~2aaa548b28882dfb: Delete: bdb/include/hash.h BitKeeper/deleted/.del-lock.h~a761c1b7de57b77f: Delete: bdb/include/lock.h BitKeeper/deleted/.del-log.h~ff20184238e35e4d: Delete: bdb/include/log.h BitKeeper/deleted/.del-mp.h~7e317597622f3411: Delete: bdb/include/mp.h BitKeeper/deleted/.del-mutex.h~d3ae7a2977a68137: Delete: bdb/include/mutex.h BitKeeper/deleted/.del-os.h~91867cc8757cd0e3: Delete: bdb/include/os.h BitKeeper/deleted/.del-os_jump.h~e1b939fa5151d4be: Delete: bdb/include/os_jump.h BitKeeper/deleted/.del-qam.h~6fad0c1b5723d597: Delete: bdb/include/qam.h BitKeeper/deleted/.del-queue.h~4c72c0826c123d5: Delete: bdb/include/queue.h BitKeeper/deleted/.del-region.h~513fe04d977ca0fc: Delete: bdb/include/region.h BitKeeper/deleted/.del-shqueue.h~525fc3e6c2025c36: Delete: bdb/include/shqueue.h BitKeeper/deleted/.del-tcl_db.h~c536fd61a844f23f: Delete: bdb/include/tcl_db.h BitKeeper/deleted/.del-txn.h~c8d94b221ec147e4: Delete: bdb/include/txn.h BitKeeper/deleted/.del-xa.h~ecc466493aae9d9a: Delete: bdb/include/xa.h BitKeeper/deleted/.del-DbRecoveryInit.java~756b52601a0b9023: Delete: bdb/java/src/com/sleepycat/db/DbRecoveryInit.java BitKeeper/deleted/.del-DbTxnRecover.java~74607cba7ab89d6d: Delete: bdb/java/src/com/sleepycat/db/DbTxnRecover.java BitKeeper/deleted/.del-lock_conflict.c~fc5e0f14cf597a2b: Delete: bdb/lock/lock_conflict.c BitKeeper/deleted/.del-log.src~53ac9e7b5cb023f2: Delete: bdb/log/log.src BitKeeper/deleted/.del-log_findckp.c~24287f008916e81f: Delete: bdb/log/log_findckp.c BitKeeper/deleted/.del-log_rec.c~d51711f2cac09297: Delete: bdb/log/log_rec.c BitKeeper/deleted/.del-log_register.c~b40bb4efac75ca15: Delete: bdb/log/log_register.c BitKeeper/deleted/.del-Design~b3d0f179f2767b: Delete: bdb/mp/Design BitKeeper/deleted/.del-os_finit.c~95dbefc6fe79b26c: Delete: bdb/os/os_finit.c BitKeeper/deleted/.del-os_abs.c~df95d1e7db81924: Delete: bdb/os_vxworks/os_abs.c BitKeeper/deleted/.del-os_finit.c~803b484bdb9d0122: Delete: bdb/os_vxworks/os_finit.c BitKeeper/deleted/.del-os_map.c~3a6d7926398b76d3: Delete: bdb/os_vxworks/os_map.c BitKeeper/deleted/.del-os_finit.c~19a227c6d3c78ad: Delete: bdb/os_win32/os_finit.c BitKeeper/deleted/.del-log-corruption.patch~1cf2ecc7c6408d5d: Delete: bdb/patches/log-corruption.patch BitKeeper/deleted/.del-Btree.pm~af6d0c5eaed4a98e: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm BitKeeper/deleted/.del-BerkeleyDB.pm~7244036d4482643: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pm BitKeeper/deleted/.del-BerkeleyDB.pod~e7b18fd6132448e3: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod BitKeeper/deleted/.del-Hash.pm~10292a26c06a5c95: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm BitKeeper/deleted/.del-BerkeleyDB.pod.P~79f76a1495eda203: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod.P BitKeeper/deleted/.del-BerkeleyDB.xs~80c99afbd98e392c: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.xs BitKeeper/deleted/.del-Changes~729c1891efa60de9: Delete: bdb/perl.BerkeleyDB/Changes BitKeeper/deleted/.del-MANIFEST~63a1e34aecf157a0: Delete: bdb/perl.BerkeleyDB/MANIFEST BitKeeper/deleted/.del-Makefile.PL~c68797707d8df87a: Delete: bdb/perl.BerkeleyDB/Makefile.PL BitKeeper/deleted/.del-README~5f2f579b1a241407: Delete: bdb/perl.BerkeleyDB/README BitKeeper/deleted/.del-Todo~dca3c66c193adda9: Delete: bdb/perl.BerkeleyDB/Todo BitKeeper/deleted/.del-config.in~ae81681e450e0999: Delete: bdb/perl.BerkeleyDB/config.in BitKeeper/deleted/.del-dbinfo~28ad67d83be4f68e: Delete: bdb/perl.BerkeleyDB/dbinfo BitKeeper/deleted/.del-mkconsts~543ab60669c7a04e: Delete: bdb/perl.BerkeleyDB/mkconsts BitKeeper/deleted/.del-mkpod~182c0ca54e439afb: Delete: bdb/perl.BerkeleyDB/mkpod BitKeeper/deleted/.del-5.004~e008cb5a48805543: Delete: bdb/perl.BerkeleyDB/patches/5.004 BitKeeper/deleted/.del-irix_6_5.pl~61662bb08afcdec8: Delete: bdb/perl.BerkeleyDB/hints/irix_6_5.pl BitKeeper/deleted/.del-solaris.pl~6771e7182394e152: Delete: bdb/perl.BerkeleyDB/hints/solaris.pl BitKeeper/deleted/.del-typemap~783b8f5295b05f3d: Delete: bdb/perl.BerkeleyDB/typemap BitKeeper/deleted/.del-5.004_01~6081ce2fff7b0bc: Delete: bdb/perl.BerkeleyDB/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~87214eac35ad9e6: Delete: bdb/perl.BerkeleyDB/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~9a672becec7cb40f: Delete: bdb/perl.BerkeleyDB/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~e326cb51af09d154: Delete: bdb/perl.BerkeleyDB/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~7ab457a1e41a92fe: Delete: bdb/perl.BerkeleyDB/patches/5.004_05 BitKeeper/deleted/.del-5.005~f9e2d59b5964cd4b: Delete: bdb/perl.BerkeleyDB/patches/5.005 BitKeeper/deleted/.del-5.005_01~3eb9fb7b5842ea8e: Delete: bdb/perl.BerkeleyDB/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~67477ce0bef717cb: Delete: bdb/perl.BerkeleyDB/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~c4c29a1fb21e290a: Delete: bdb/perl.BerkeleyDB/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~e1fb9897d124ee22: Delete: bdb/perl.BerkeleyDB/patches/5.6.0 BitKeeper/deleted/.del-btree.t~e4a1a3c675ddc406: Delete: bdb/perl.BerkeleyDB/t/btree.t BitKeeper/deleted/.del-db-3.0.t~d2c60991d84558f2: Delete: bdb/perl.BerkeleyDB/t/db-3.0.t BitKeeper/deleted/.del-db-3.1.t~6ee88cd13f55e018: Delete: bdb/perl.BerkeleyDB/t/db-3.1.t BitKeeper/deleted/.del-db-3.2.t~f73b6461f98fd1cf: Delete: bdb/perl.BerkeleyDB/t/db-3.2.t BitKeeper/deleted/.del-destroy.t~cc6a2ae1980a2ecd: Delete: bdb/perl.BerkeleyDB/t/destroy.t BitKeeper/deleted/.del-env.t~a8604a4499c4bd07: Delete: bdb/perl.BerkeleyDB/t/env.t BitKeeper/deleted/.del-examples.t~2571b77c3cc75574: Delete: bdb/perl.BerkeleyDB/t/examples.t BitKeeper/deleted/.del-examples.t.T~8228bdd75ac78b88: Delete: bdb/perl.BerkeleyDB/t/examples.t.T BitKeeper/deleted/.del-examples3.t.T~66a186897a87026d: Delete: bdb/perl.BerkeleyDB/t/examples3.t.T BitKeeper/deleted/.del-examples3.t~fe3822ba2f2d7f83: Delete: bdb/perl.BerkeleyDB/t/examples3.t BitKeeper/deleted/.del-filter.t~f87b045c1b708637: Delete: bdb/perl.BerkeleyDB/t/filter.t BitKeeper/deleted/.del-hash.t~616bfb4d644de3a3: Delete: bdb/perl.BerkeleyDB/t/hash.t BitKeeper/deleted/.del-join.t~29fc39f74a83ca22: Delete: bdb/perl.BerkeleyDB/t/join.t BitKeeper/deleted/.del-mldbm.t~31f5015341eea040: Delete: bdb/perl.BerkeleyDB/t/mldbm.t BitKeeper/deleted/.del-queue.t~8f338034ce44a641: Delete: bdb/perl.BerkeleyDB/t/queue.t BitKeeper/deleted/.del-recno.t~d4ddbd3743add63e: Delete: bdb/perl.BerkeleyDB/t/recno.t BitKeeper/deleted/.del-strict.t~6885cdd2ea71ca2d: Delete: bdb/perl.BerkeleyDB/t/strict.t BitKeeper/deleted/.del-subdb.t~aab62a5d5864c603: Delete: bdb/perl.BerkeleyDB/t/subdb.t BitKeeper/deleted/.del-txn.t~65033b8558ae1216: Delete: bdb/perl.BerkeleyDB/t/txn.t BitKeeper/deleted/.del-unknown.t~f3710458682665e1: Delete: bdb/perl.BerkeleyDB/t/unknown.t BitKeeper/deleted/.del-Changes~436f74a5c414c65b: Delete: bdb/perl.DB_File/Changes BitKeeper/deleted/.del-DB_File.pm~ae0951c6c7665a82: Delete: bdb/perl.DB_File/DB_File.pm BitKeeper/deleted/.del-DB_File.xs~89e49a0b5556f1d8: Delete: bdb/perl.DB_File/DB_File.xs BitKeeper/deleted/.del-DB_File_BS~290fad5dbbb87069: Delete: bdb/perl.DB_File/DB_File_BS BitKeeper/deleted/.del-MANIFEST~90ee581572bdd4ac: Delete: bdb/perl.DB_File/MANIFEST BitKeeper/deleted/.del-Makefile.PL~ac0567bb5a377e38: Delete: bdb/perl.DB_File/Makefile.PL BitKeeper/deleted/.del-README~77e924a5a9bae6b3: Delete: bdb/perl.DB_File/README BitKeeper/deleted/.del-config.in~ab4c2792b86a810b: Delete: bdb/perl.DB_File/config.in BitKeeper/deleted/.del-dbinfo~461c43b30fab2cb: Delete: bdb/perl.DB_File/dbinfo BitKeeper/deleted/.del-dynixptx.pl~50dcddfae25d17e9: Delete: bdb/perl.DB_File/hints/dynixptx.pl BitKeeper/deleted/.del-typemap~55cffb3288a9e587: Delete: bdb/perl.DB_File/typemap BitKeeper/deleted/.del-version.c~a4df0e646f8b3975: Delete: bdb/perl.DB_File/version.c BitKeeper/deleted/.del-5.004_01~d6830d0082702af7: Delete: bdb/perl.DB_File/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~78b082dc80c91031: Delete: bdb/perl.DB_File/patches/5.004_02 BitKeeper/deleted/.del-5.004~4411ec2e3c9e008b: Delete: bdb/perl.DB_File/patches/5.004 BitKeeper/deleted/.del-sco.pl~1e795fe14fe4dcfe: Delete: bdb/perl.DB_File/hints/sco.pl BitKeeper/deleted/.del-5.004_03~33f274648b160d95: Delete: bdb/perl.DB_File/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~8f3d1b3cf18bb20a: Delete: bdb/perl.DB_File/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~9c0f02e7331e142: Delete: bdb/perl.DB_File/patches/5.004_05 BitKeeper/deleted/.del-5.005~c2108cb2e3c8d951: Delete: bdb/perl.DB_File/patches/5.005 BitKeeper/deleted/.del-5.005_01~3b45e9673afc4cfa: Delete: bdb/perl.DB_File/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~9fe5766bb02a4522: Delete: bdb/perl.DB_File/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~ffa1c38c19ae72ea: Delete: bdb/perl.DB_File/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~373be3a5ce47be85: Delete: bdb/perl.DB_File/patches/5.6.0 BitKeeper/deleted/.del-db-btree.t~3231595a1c241eb3: Delete: bdb/perl.DB_File/t/db-btree.t BitKeeper/deleted/.del-db-hash.t~7c4ad0c795c7fad2: Delete: bdb/perl.DB_File/t/db-hash.t BitKeeper/deleted/.del-db-recno.t~6c2d3d80b9ba4a50: Delete: bdb/perl.DB_File/t/db-recno.t BitKeeper/deleted/.del-db_server.sed~cdb00ebcd48a64e2: Delete: bdb/rpc_server/db_server.sed BitKeeper/deleted/.del-db_server_proc.c~d46c8f409c3747f4: Delete: bdb/rpc_server/db_server_proc.c BitKeeper/deleted/.del-db_server_svc.sed~3f5e59f334fa4607: Delete: bdb/rpc_server/db_server_svc.sed BitKeeper/deleted/.del-db_server_util.c~a809f3a4629acda: Delete: bdb/rpc_server/db_server_util.c BitKeeper/deleted/.del-log.tcl~ff1b41f1355b97d7: Delete: bdb/test/log.tcl BitKeeper/deleted/.del-mpool.tcl~b0df4dc1b04db26c: Delete: bdb/test/mpool.tcl BitKeeper/deleted/.del-mutex.tcl~52fd5c73a150565: Delete: bdb/test/mutex.tcl BitKeeper/deleted/.del-txn.tcl~c4ff071550b5446e: Delete: bdb/test/txn.tcl BitKeeper/deleted/.del-README~e800a12a5392010a: Delete: bdb/test/upgrade/README BitKeeper/deleted/.del-pack-2.6.6.pl~89d5076d758d3e98: Delete: bdb/test/upgrade/generate-2.X/pack-2.6.6.pl BitKeeper/deleted/.del-test-2.6.patch~4a52dc83d447547b: Delete: bdb/test/upgrade/generate-2.X/test-2.6.patch
886 lines
23 KiB
C
886 lines
23 KiB
C
/*-
|
|
* See the file LICENSE for redistribution information.
|
|
*
|
|
* Copyright (c) 1996-2002
|
|
* Sleepycat Software. All rights reserved.
|
|
*/
|
|
|
|
#include "db_config.h"
|
|
|
|
#ifndef lint
|
|
static const char revid[] = "$Id: lock_deadlock.c,v 11.54 2002/08/06 05:05:21 bostic Exp $";
|
|
#endif /* not lint */
|
|
|
|
#ifndef NO_SYSTEM_INCLUDES
|
|
#include <sys/types.h>
|
|
|
|
#include <string.h>
|
|
#endif
|
|
|
|
#include "db_int.h"
|
|
#include "dbinc/db_shash.h"
|
|
#include "dbinc/lock.h"
|
|
#include "dbinc/txn.h"
|
|
#include "dbinc/rep.h"
|
|
|
|
#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << (N) % 32))
|
|
|
|
#define CLEAR_MAP(M, N) { \
|
|
u_int32_t __i; \
|
|
for (__i = 0; __i < (N); __i++) \
|
|
(M)[__i] = 0; \
|
|
}
|
|
|
|
#define SET_MAP(M, B) ((M)[(B) / 32] |= (1 << ((B) % 32)))
|
|
#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~(1 << ((B) % 32)))
|
|
|
|
#define OR_MAP(D, S, N) { \
|
|
u_int32_t __i; \
|
|
for (__i = 0; __i < (N); __i++) \
|
|
D[__i] |= S[__i]; \
|
|
}
|
|
#define BAD_KILLID 0xffffffff
|
|
|
|
typedef struct {
|
|
int valid;
|
|
int self_wait;
|
|
u_int32_t count;
|
|
u_int32_t id;
|
|
u_int32_t last_lock;
|
|
u_int32_t last_locker_id;
|
|
db_pgno_t pgno;
|
|
} locker_info;
|
|
|
|
static int __dd_abort __P((DB_ENV *, locker_info *));
|
|
static int __dd_build __P((DB_ENV *,
|
|
u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
|
|
static int __dd_find __P((DB_ENV *,
|
|
u_int32_t *, locker_info *, u_int32_t, u_int32_t, u_int32_t ***));
|
|
static int __dd_isolder __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
|
|
static int __dd_verify __P((locker_info *, u_int32_t *, u_int32_t *,
|
|
u_int32_t *, u_int32_t, u_int32_t, u_int32_t));
|
|
|
|
#ifdef DIAGNOSTIC
|
|
static void __dd_debug
|
|
__P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t, u_int32_t));
|
|
#endif
|
|
|
|
/*
|
|
* lock_detect --
|
|
*
|
|
* PUBLIC: int __lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
|
|
*/
|
|
int
|
|
__lock_detect(dbenv, flags, atype, abortp)
|
|
DB_ENV *dbenv;
|
|
u_int32_t flags, atype;
|
|
int *abortp;
|
|
{
|
|
DB_LOCKREGION *region;
|
|
DB_LOCKTAB *lt;
|
|
DB_TXNMGR *tmgr;
|
|
locker_info *idmap;
|
|
u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
|
|
u_int32_t i, keeper, killid, limit, nalloc, nlockers;
|
|
u_int32_t lock_max, txn_max;
|
|
int ret;
|
|
|
|
PANIC_CHECK(dbenv);
|
|
ENV_REQUIRES_CONFIG(dbenv,
|
|
dbenv->lk_handle, "DB_ENV->lock_detect", DB_INIT_LOCK);
|
|
|
|
/* Validate arguments. */
|
|
if ((ret = __db_fchk(dbenv, "DB_ENV->lock_detect", flags, 0)) != 0)
|
|
return (ret);
|
|
switch (atype) {
|
|
case DB_LOCK_DEFAULT:
|
|
case DB_LOCK_EXPIRE:
|
|
case DB_LOCK_MAXLOCKS:
|
|
case DB_LOCK_MINLOCKS:
|
|
case DB_LOCK_MINWRITE:
|
|
case DB_LOCK_OLDEST:
|
|
case DB_LOCK_RANDOM:
|
|
case DB_LOCK_YOUNGEST:
|
|
break;
|
|
default:
|
|
__db_err(dbenv,
|
|
"DB_ENV->lock_detect: unknown deadlock detection mode specified");
|
|
return (EINVAL);
|
|
}
|
|
|
|
/*
|
|
* If this environment is a replication client, then we must use the
|
|
* MINWRITE detection discipline.
|
|
*/
|
|
if (__rep_is_client(dbenv))
|
|
atype = DB_LOCK_MINWRITE;
|
|
|
|
free_me = NULL;
|
|
|
|
lt = dbenv->lk_handle;
|
|
if (abortp != NULL)
|
|
*abortp = 0;
|
|
|
|
/* Check if a detector run is necessary. */
|
|
LOCKREGION(dbenv, lt);
|
|
|
|
/* Make a pass only if auto-detect would run. */
|
|
region = lt->reginfo.primary;
|
|
|
|
if (region->need_dd == 0) {
|
|
UNLOCKREGION(dbenv, lt);
|
|
return (0);
|
|
}
|
|
|
|
/* Reset need_dd, so we know we've run the detector. */
|
|
region->need_dd = 0;
|
|
|
|
/* Build the waits-for bitmap. */
|
|
ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
|
|
lock_max = region->stat.st_cur_maxid;
|
|
UNLOCKREGION(dbenv, lt);
|
|
|
|
/*
|
|
* We need the cur_maxid from the txn region as well. In order
|
|
* to avoid tricky synchronization between the lock and txn
|
|
* regions, we simply unlock the lock region and then lock the
|
|
* txn region. This introduces a small window during which the
|
|
* transaction system could then wrap. We're willing to return
|
|
* the wrong answer for "oldest" or "youngest" in those rare
|
|
* circumstances.
|
|
*/
|
|
tmgr = dbenv->tx_handle;
|
|
if (tmgr != NULL) {
|
|
R_LOCK(dbenv, &tmgr->reginfo);
|
|
txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
|
|
R_UNLOCK(dbenv, &tmgr->reginfo);
|
|
} else
|
|
txn_max = TXN_MAXIMUM;
|
|
if (ret != 0 || atype == DB_LOCK_EXPIRE)
|
|
return (ret);
|
|
|
|
if (nlockers == 0)
|
|
return (0);
|
|
#ifdef DIAGNOSTIC
|
|
if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
|
|
__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
|
|
#endif
|
|
/* Now duplicate the bitmaps so we can verify deadlock participants. */
|
|
if ((ret = __os_calloc(dbenv, (size_t)nlockers,
|
|
sizeof(u_int32_t) * nalloc, ©map)) != 0)
|
|
goto err;
|
|
memcpy(copymap, bitmap, nlockers * sizeof(u_int32_t) * nalloc);
|
|
|
|
if ((ret = __os_calloc(dbenv, sizeof(u_int32_t), nalloc, &tmpmap)) != 0)
|
|
goto err1;
|
|
|
|
/* Find a deadlock. */
|
|
if ((ret =
|
|
__dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
|
|
return (ret);
|
|
|
|
killid = BAD_KILLID;
|
|
free_me = deadp;
|
|
for (; *deadp != NULL; deadp++) {
|
|
if (abortp != NULL)
|
|
++*abortp;
|
|
killid = (u_int32_t)((*deadp - bitmap) / nalloc);
|
|
limit = killid;
|
|
keeper = BAD_KILLID;
|
|
|
|
if (atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM)
|
|
goto dokill;
|
|
/*
|
|
* It's conceivable that under XA, the locker could
|
|
* have gone away.
|
|
*/
|
|
if (killid == BAD_KILLID)
|
|
break;
|
|
|
|
/*
|
|
* Start with the id that we know is deadlocked
|
|
* and then examine all other set bits and see
|
|
* if any are a better candidate for abortion
|
|
* and that they are genuinely part of the
|
|
* deadlock. The definition of "best":
|
|
* OLDEST: smallest id
|
|
* YOUNGEST: largest id
|
|
* MAXLOCKS: maximum count
|
|
* MINLOCKS: minimum count
|
|
* MINWRITE: minimum count
|
|
*/
|
|
|
|
for (i = (killid + 1) % nlockers;
|
|
i != limit;
|
|
i = (i + 1) % nlockers) {
|
|
if (!ISSET_MAP(*deadp, i))
|
|
continue;
|
|
switch (atype) {
|
|
case DB_LOCK_OLDEST:
|
|
if (__dd_isolder(idmap[killid].id,
|
|
idmap[i].id, lock_max, txn_max))
|
|
continue;
|
|
keeper = i;
|
|
break;
|
|
case DB_LOCK_YOUNGEST:
|
|
if (__dd_isolder(idmap[i].id,
|
|
idmap[killid].id, lock_max, txn_max))
|
|
continue;
|
|
keeper = i;
|
|
break;
|
|
case DB_LOCK_MAXLOCKS:
|
|
if (idmap[i].count < idmap[killid].count)
|
|
continue;
|
|
keeper = i;
|
|
break;
|
|
case DB_LOCK_MINLOCKS:
|
|
case DB_LOCK_MINWRITE:
|
|
if (idmap[i].count > idmap[killid].count)
|
|
continue;
|
|
keeper = i;
|
|
break;
|
|
default:
|
|
killid = BAD_KILLID;
|
|
ret = EINVAL;
|
|
goto dokill;
|
|
}
|
|
if (__dd_verify(idmap, *deadp,
|
|
tmpmap, copymap, nlockers, nalloc, i))
|
|
killid = i;
|
|
}
|
|
|
|
dokill: if (killid == BAD_KILLID)
|
|
continue;
|
|
|
|
/*
|
|
* There are cases in which our general algorithm will
|
|
* fail. Returning 1 from verify indicates that the
|
|
* particular locker is not only involved in a deadlock,
|
|
* but that killing him will allow others to make forward
|
|
* progress. Unfortunately, there are cases where we need
|
|
* to abort someone, but killing them will not necessarily
|
|
* ensure forward progress (imagine N readers all trying to
|
|
* acquire a write lock). In such a scenario, we'll have
|
|
* gotten all the way through the loop, we will have found
|
|
* someone to keep (keeper will be valid), but killid will
|
|
* still be the initial deadlocker. In this case, if the
|
|
* initial killid satisfies __dd_verify, kill it, else abort
|
|
* keeper and indicate that we need to run deadlock detection
|
|
* again.
|
|
*/
|
|
|
|
if (keeper != BAD_KILLID && killid == limit &&
|
|
__dd_verify(idmap, *deadp,
|
|
tmpmap, copymap, nlockers, nalloc, killid) == 0) {
|
|
LOCKREGION(dbenv, lt);
|
|
region->need_dd = 1;
|
|
UNLOCKREGION(dbenv, lt);
|
|
killid = keeper;
|
|
}
|
|
|
|
/* Kill the locker with lockid idmap[killid]. */
|
|
if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
|
|
/*
|
|
* It's possible that the lock was already aborted;
|
|
* this isn't necessarily a problem, so do not treat
|
|
* it as an error.
|
|
*/
|
|
if (ret == DB_ALREADY_ABORTED)
|
|
ret = 0;
|
|
else
|
|
__db_err(dbenv,
|
|
"warning: unable to abort locker %lx",
|
|
(u_long)idmap[killid].id);
|
|
} else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
|
|
__db_err(dbenv,
|
|
"Aborting locker %lx", (u_long)idmap[killid].id);
|
|
}
|
|
__os_free(dbenv, tmpmap);
|
|
err1: __os_free(dbenv, copymap);
|
|
|
|
err: if (free_me != NULL)
|
|
__os_free(dbenv, free_me);
|
|
__os_free(dbenv, bitmap);
|
|
__os_free(dbenv, idmap);
|
|
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* ========================================================================
|
|
* Utilities
|
|
*/
|
|
|
|
# define DD_INVALID_ID ((u_int32_t) -1)
|
|
|
|
static int
|
|
__dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
|
|
DB_ENV *dbenv;
|
|
u_int32_t atype, **bmp, *nlockers, *allocp;
|
|
locker_info **idmap;
|
|
{
|
|
struct __db_lock *lp;
|
|
DB_LOCKER *lip, *lockerp, *child;
|
|
DB_LOCKOBJ *op, *lo;
|
|
DB_LOCKREGION *region;
|
|
DB_LOCKTAB *lt;
|
|
locker_info *id_array;
|
|
db_timeval_t now;
|
|
u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
|
|
u_int8_t *pptr;
|
|
int expire_only, is_first, need_timeout, ret;
|
|
|
|
lt = dbenv->lk_handle;
|
|
region = lt->reginfo.primary;
|
|
LOCK_SET_TIME_INVALID(&now);
|
|
need_timeout = 0;
|
|
expire_only = atype == DB_LOCK_EXPIRE;
|
|
|
|
/*
|
|
* While we always check for expired timeouts, if we are called
|
|
* with DB_LOCK_EXPIRE, then we are only checking for timeouts
|
|
* (i.e., not doing deadlock detection at all). If we aren't
|
|
* doing real deadlock detection, then we can skip a significant,
|
|
* amount of the processing. In particular we do not build
|
|
* the conflict array and our caller needs to expect this.
|
|
*/
|
|
if (expire_only) {
|
|
count = 0;
|
|
nentries = 0;
|
|
goto obj_loop;
|
|
}
|
|
|
|
/*
|
|
* We'll check how many lockers there are, add a few more in for
|
|
* good measure and then allocate all the structures. Then we'll
|
|
* verify that we have enough room when we go back in and get the
|
|
* mutex the second time.
|
|
*/
|
|
retry: count = region->stat.st_nlockers;
|
|
|
|
if (count == 0) {
|
|
*nlockers = 0;
|
|
return (0);
|
|
}
|
|
|
|
if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
|
|
__db_err(dbenv, "%lu lockers", (u_long)count);
|
|
|
|
count += 20;
|
|
nentries = ALIGN(count, 32) / 32;
|
|
|
|
/*
|
|
* Allocate enough space for a count by count bitmap matrix.
|
|
*
|
|
* XXX
|
|
* We can probably save the malloc's between iterations just
|
|
* reallocing if necessary because count grew by too much.
|
|
*/
|
|
if ((ret = __os_calloc(dbenv, (size_t)count,
|
|
sizeof(u_int32_t) * nentries, &bitmap)) != 0)
|
|
return (ret);
|
|
|
|
if ((ret = __os_calloc(dbenv,
|
|
sizeof(u_int32_t), nentries, &tmpmap)) != 0) {
|
|
__os_free(dbenv, bitmap);
|
|
return (ret);
|
|
}
|
|
|
|
if ((ret = __os_calloc(dbenv,
|
|
(size_t)count, sizeof(locker_info), &id_array)) != 0) {
|
|
__os_free(dbenv, bitmap);
|
|
__os_free(dbenv, tmpmap);
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* Now go back in and actually fill in the matrix.
|
|
*/
|
|
if (region->stat.st_nlockers > count) {
|
|
__os_free(dbenv, bitmap);
|
|
__os_free(dbenv, tmpmap);
|
|
__os_free(dbenv, id_array);
|
|
goto retry;
|
|
}
|
|
|
|
/*
|
|
* First we go through and assign each locker a deadlock detector id.
|
|
*/
|
|
for (id = 0, lip = SH_TAILQ_FIRST(®ion->lockers, __db_locker);
|
|
lip != NULL;
|
|
lip = SH_TAILQ_NEXT(lip, ulinks, __db_locker)) {
|
|
if (F_ISSET(lip, DB_LOCKER_INABORT))
|
|
continue;
|
|
if (lip->master_locker == INVALID_ROFF) {
|
|
lip->dd_id = id++;
|
|
id_array[lip->dd_id].id = lip->id;
|
|
if (atype == DB_LOCK_MINLOCKS ||
|
|
atype == DB_LOCK_MAXLOCKS)
|
|
id_array[lip->dd_id].count = lip->nlocks;
|
|
if (atype == DB_LOCK_MINWRITE)
|
|
id_array[lip->dd_id].count = lip->nwrites;
|
|
} else
|
|
lip->dd_id = DD_INVALID_ID;
|
|
|
|
}
|
|
|
|
/*
|
|
* We only need consider objects that have waiters, so we use
|
|
* the list of objects with waiters (dd_objs) instead of traversing
|
|
* the entire hash table. For each object, we traverse the waiters
|
|
* list and add an entry in the waitsfor matrix for each waiter/holder
|
|
* combination.
|
|
*/
|
|
obj_loop:
|
|
for (op = SH_TAILQ_FIRST(®ion->dd_objs, __db_lockobj);
|
|
op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
|
|
if (expire_only)
|
|
goto look_waiters;
|
|
CLEAR_MAP(tmpmap, nentries);
|
|
|
|
/*
|
|
* First we go through and create a bit map that
|
|
* represents all the holders of this object.
|
|
*/
|
|
for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
|
|
lp != NULL;
|
|
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
|
|
LOCKER_LOCK(lt, region, lp->holder, ndx);
|
|
if ((ret = __lock_getlocker(lt,
|
|
lp->holder, ndx, 0, &lockerp)) != 0)
|
|
continue;
|
|
if (F_ISSET(lockerp, DB_LOCKER_INABORT))
|
|
continue;
|
|
|
|
if (lockerp->dd_id == DD_INVALID_ID) {
|
|
dd = ((DB_LOCKER *)R_ADDR(<->reginfo,
|
|
lockerp->master_locker))->dd_id;
|
|
lockerp->dd_id = dd;
|
|
if (atype == DB_LOCK_MINLOCKS ||
|
|
atype == DB_LOCK_MAXLOCKS)
|
|
id_array[dd].count += lockerp->nlocks;
|
|
if (atype == DB_LOCK_MINWRITE)
|
|
id_array[dd].count += lockerp->nwrites;
|
|
|
|
} else
|
|
dd = lockerp->dd_id;
|
|
id_array[dd].valid = 1;
|
|
|
|
/*
|
|
* If the holder has already been aborted, then
|
|
* we should ignore it for now.
|
|
*/
|
|
if (lp->status == DB_LSTAT_HELD)
|
|
SET_MAP(tmpmap, dd);
|
|
}
|
|
|
|
/*
|
|
* Next, for each waiter, we set its row in the matrix
|
|
* equal to the map of holders we set up above.
|
|
*/
|
|
look_waiters:
|
|
for (is_first = 1,
|
|
lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
|
|
lp != NULL;
|
|
is_first = 0,
|
|
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
|
|
LOCKER_LOCK(lt, region, lp->holder, ndx);
|
|
if ((ret = __lock_getlocker(lt,
|
|
lp->holder, ndx, 0, &lockerp)) != 0)
|
|
continue;
|
|
if (lp->status == DB_LSTAT_WAITING) {
|
|
if (__lock_expired(dbenv,
|
|
&now, &lockerp->lk_expire)) {
|
|
lp->status = DB_LSTAT_EXPIRED;
|
|
MUTEX_UNLOCK(dbenv, &lp->mutex);
|
|
continue;
|
|
}
|
|
need_timeout =
|
|
LOCK_TIME_ISVALID(&lockerp->lk_expire);
|
|
}
|
|
|
|
if (expire_only)
|
|
continue;
|
|
|
|
if (lockerp->dd_id == DD_INVALID_ID) {
|
|
dd = ((DB_LOCKER *)R_ADDR(<->reginfo,
|
|
lockerp->master_locker))->dd_id;
|
|
lockerp->dd_id = dd;
|
|
if (atype == DB_LOCK_MINLOCKS ||
|
|
atype == DB_LOCK_MAXLOCKS)
|
|
id_array[dd].count += lockerp->nlocks;
|
|
if (atype == DB_LOCK_MINWRITE)
|
|
id_array[dd].count += lockerp->nwrites;
|
|
} else
|
|
dd = lockerp->dd_id;
|
|
id_array[dd].valid = 1;
|
|
|
|
/*
|
|
* If the transaction is pending abortion, then
|
|
* ignore it on this iteration.
|
|
*/
|
|
if (lp->status != DB_LSTAT_WAITING)
|
|
continue;
|
|
|
|
entryp = bitmap + (nentries * dd);
|
|
OR_MAP(entryp, tmpmap, nentries);
|
|
/*
|
|
* If this is the first waiter on the queue,
|
|
* then we remove the waitsfor relationship
|
|
* with oneself. However, if it's anywhere
|
|
* else on the queue, then we have to keep
|
|
* it and we have an automatic deadlock.
|
|
*/
|
|
if (is_first) {
|
|
if (ISSET_MAP(entryp, dd))
|
|
id_array[dd].self_wait = 1;
|
|
CLR_MAP(entryp, dd);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (expire_only) {
|
|
region->need_dd = need_timeout;
|
|
return (0);
|
|
}
|
|
|
|
/* Now for each locker; record its last lock. */
|
|
for (id = 0; id < count; id++) {
|
|
if (!id_array[id].valid)
|
|
continue;
|
|
LOCKER_LOCK(lt, region, id_array[id].id, ndx);
|
|
if ((ret = __lock_getlocker(lt,
|
|
id_array[id].id, ndx, 0, &lockerp)) != 0) {
|
|
__db_err(dbenv,
|
|
"No locks for locker %lu", (u_long)id_array[id].id);
|
|
continue;
|
|
}
|
|
|
|
/*
|
|
* If this is a master transaction, try to
|
|
* find one of its children's locks first,
|
|
* as they are probably more recent.
|
|
*/
|
|
child = SH_LIST_FIRST(&lockerp->child_locker, __db_locker);
|
|
if (child != NULL) {
|
|
do {
|
|
lp = SH_LIST_FIRST(&child->heldby, __db_lock);
|
|
if (lp != NULL &&
|
|
lp->status == DB_LSTAT_WAITING) {
|
|
id_array[id].last_locker_id = child->id;
|
|
goto get_lock;
|
|
}
|
|
child = SH_LIST_NEXT(
|
|
child, child_link, __db_locker);
|
|
} while (child != NULL);
|
|
}
|
|
lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
|
|
if (lp != NULL) {
|
|
id_array[id].last_locker_id = lockerp->id;
|
|
get_lock: id_array[id].last_lock = R_OFFSET(<->reginfo, lp);
|
|
lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
|
|
pptr = SH_DBT_PTR(&lo->lockobj);
|
|
if (lo->lockobj.size >= sizeof(db_pgno_t))
|
|
memcpy(&id_array[id].pgno,
|
|
pptr, sizeof(db_pgno_t));
|
|
else
|
|
id_array[id].pgno = 0;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Pass complete, reset the deadlock detector bit,
|
|
* unless we have pending timeouts.
|
|
*/
|
|
region->need_dd = need_timeout;
|
|
|
|
/*
|
|
* Now we can release everything except the bitmap matrix that we
|
|
* created.
|
|
*/
|
|
*nlockers = id;
|
|
*idmap = id_array;
|
|
*bmp = bitmap;
|
|
*allocp = nentries;
|
|
__os_free(dbenv, tmpmap);
|
|
return (0);
|
|
}
|
|
|
|
static int
|
|
__dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
|
|
DB_ENV *dbenv;
|
|
u_int32_t *bmp, nlockers, nalloc;
|
|
locker_info *idmap;
|
|
u_int32_t ***deadp;
|
|
{
|
|
u_int32_t i, j, k, *mymap, *tmpmap;
|
|
u_int32_t **retp;
|
|
int ndead, ndeadalloc, ret;
|
|
|
|
#undef INITIAL_DEAD_ALLOC
|
|
#define INITIAL_DEAD_ALLOC 8
|
|
|
|
ndeadalloc = INITIAL_DEAD_ALLOC;
|
|
ndead = 0;
|
|
if ((ret = __os_malloc(dbenv,
|
|
ndeadalloc * sizeof(u_int32_t *), &retp)) != 0)
|
|
return (ret);
|
|
|
|
/*
|
|
* For each locker, OR in the bits from the lockers on which that
|
|
* locker is waiting.
|
|
*/
|
|
for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nalloc) {
|
|
if (!idmap[i].valid)
|
|
continue;
|
|
for (j = 0; j < nlockers; j++) {
|
|
if (!ISSET_MAP(mymap, j))
|
|
continue;
|
|
|
|
/* Find the map for this bit. */
|
|
tmpmap = bmp + (nalloc * j);
|
|
OR_MAP(mymap, tmpmap, nalloc);
|
|
if (!ISSET_MAP(mymap, i))
|
|
continue;
|
|
|
|
/* Make sure we leave room for NULL. */
|
|
if (ndead + 2 >= ndeadalloc) {
|
|
ndeadalloc <<= 1;
|
|
/*
|
|
* If the alloc fails, then simply return the
|
|
* deadlocks that we already have.
|
|
*/
|
|
if (__os_realloc(dbenv,
|
|
ndeadalloc * sizeof(u_int32_t),
|
|
&retp) != 0) {
|
|
retp[ndead] = NULL;
|
|
*deadp = retp;
|
|
return (0);
|
|
}
|
|
}
|
|
retp[ndead++] = mymap;
|
|
|
|
/* Mark all participants in this deadlock invalid. */
|
|
for (k = 0; k < nlockers; k++)
|
|
if (ISSET_MAP(mymap, k))
|
|
idmap[k].valid = 0;
|
|
break;
|
|
}
|
|
}
|
|
retp[ndead] = NULL;
|
|
*deadp = retp;
|
|
return (0);
|
|
}
|
|
|
|
static int
|
|
__dd_abort(dbenv, info)
|
|
DB_ENV *dbenv;
|
|
locker_info *info;
|
|
{
|
|
struct __db_lock *lockp;
|
|
DB_LOCKER *lockerp;
|
|
DB_LOCKOBJ *sh_obj;
|
|
DB_LOCKREGION *region;
|
|
DB_LOCKTAB *lt;
|
|
u_int32_t ndx;
|
|
int ret;
|
|
|
|
lt = dbenv->lk_handle;
|
|
region = lt->reginfo.primary;
|
|
|
|
LOCKREGION(dbenv, lt);
|
|
|
|
/* Find the locker's last lock. */
|
|
LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
|
|
if ((ret = __lock_getlocker(lt,
|
|
info->last_locker_id, ndx, 0, &lockerp)) != 0 || lockerp == NULL) {
|
|
if (ret == 0)
|
|
ret = DB_ALREADY_ABORTED;
|
|
goto out;
|
|
}
|
|
|
|
/* It's possible that this locker was already aborted. */
|
|
if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
|
|
ret = DB_ALREADY_ABORTED;
|
|
goto out;
|
|
}
|
|
if (R_OFFSET(<->reginfo, lockp) != info->last_lock ||
|
|
lockp->status != DB_LSTAT_WAITING) {
|
|
ret = DB_ALREADY_ABORTED;
|
|
goto out;
|
|
}
|
|
|
|
sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
|
|
SH_LIST_REMOVE(lockp, locker_links, __db_lock);
|
|
|
|
/* Abort lock, take it off list, and wake up this lock. */
|
|
SHOBJECT_LOCK(lt, region, sh_obj, ndx);
|
|
lockp->status = DB_LSTAT_ABORTED;
|
|
SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
|
|
|
|
/*
|
|
* Either the waiters list is now empty, in which case we remove
|
|
* it from dd_objs, or it is not empty, in which case we need to
|
|
* do promotion.
|
|
*/
|
|
if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
|
|
SH_TAILQ_REMOVE(®ion->dd_objs,
|
|
sh_obj, dd_links, __db_lockobj);
|
|
else
|
|
ret = __lock_promote(lt, sh_obj, 0);
|
|
MUTEX_UNLOCK(dbenv, &lockp->mutex);
|
|
|
|
region->stat.st_ndeadlocks++;
|
|
UNLOCKREGION(dbenv, lt);
|
|
|
|
return (0);
|
|
|
|
out: UNLOCKREGION(dbenv, lt);
|
|
return (ret);
|
|
}
|
|
|
|
#ifdef DIAGNOSTIC
|
|
static void
|
|
__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc)
|
|
DB_ENV *dbenv;
|
|
locker_info *idmap;
|
|
u_int32_t *bitmap, nlockers, nalloc;
|
|
{
|
|
u_int32_t i, j, *mymap;
|
|
char *msgbuf;
|
|
|
|
__db_err(dbenv, "Waitsfor array\nWaiter:\tWaiting on:");
|
|
|
|
/* Allocate space to print 10 bytes per item waited on. */
|
|
#undef MSGBUF_LEN
|
|
#define MSGBUF_LEN ((nlockers + 1) * 10 + 64)
|
|
if (__os_malloc(dbenv, MSGBUF_LEN, &msgbuf) != 0)
|
|
return;
|
|
|
|
for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nalloc) {
|
|
if (!idmap[i].valid)
|
|
continue;
|
|
sprintf(msgbuf, /* Waiter. */
|
|
"%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
|
|
for (j = 0; j < nlockers; j++)
|
|
if (ISSET_MAP(mymap, j))
|
|
sprintf(msgbuf, "%s %lx", msgbuf,
|
|
(u_long)idmap[j].id);
|
|
(void)sprintf(msgbuf,
|
|
"%s %lu", msgbuf, (u_long)idmap[i].last_lock);
|
|
__db_err(dbenv, msgbuf);
|
|
}
|
|
|
|
__os_free(dbenv, msgbuf);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Given a bitmap that contains a deadlock, verify that the bit
|
|
* specified in the which parameter indicates a transaction that
|
|
* is actually deadlocked. Return 1 if really deadlocked, 0 otherwise.
|
|
* deadmap is the array that identified the deadlock.
|
|
* tmpmap is a copy of the initial bitmaps from the dd_build phase
|
|
* origmap is a temporary bit map into which we can OR things
|
|
* nlockers is the number of actual lockers under consideration
|
|
* nalloc is the number of words allocated for the bitmap
|
|
* which is the locker in question
|
|
*/
|
|
static int
|
|
__dd_verify(idmap, deadmap, tmpmap, origmap, nlockers, nalloc, which)
|
|
locker_info *idmap;
|
|
u_int32_t *deadmap, *tmpmap, *origmap;
|
|
u_int32_t nlockers, nalloc, which;
|
|
{
|
|
u_int32_t *tmap;
|
|
u_int32_t j;
|
|
int count;
|
|
|
|
memset(tmpmap, 0, sizeof(u_int32_t) * nalloc);
|
|
|
|
/*
|
|
* In order for "which" to be actively involved in
|
|
* the deadlock, removing him from the evaluation
|
|
* must remove the deadlock. So, we OR together everyone
|
|
* except which; if all the participants still have their
|
|
* bits set, then the deadlock persists and which does
|
|
* not participate. If the deadlock does not persist
|
|
* then "which" does participate.
|
|
*/
|
|
count = 0;
|
|
for (j = 0; j < nlockers; j++) {
|
|
if (!ISSET_MAP(deadmap, j) || j == which)
|
|
continue;
|
|
|
|
/* Find the map for this bit. */
|
|
tmap = origmap + (nalloc * j);
|
|
|
|
/*
|
|
* We special case the first waiter who is also a holder, so
|
|
* we don't automatically call that a deadlock. However, if
|
|
* it really is a deadlock, we need the bit set now so that
|
|
* we treat the first waiter like other waiters.
|
|
*/
|
|
if (idmap[j].self_wait)
|
|
SET_MAP(tmap, j);
|
|
OR_MAP(tmpmap, tmap, nalloc);
|
|
count++;
|
|
}
|
|
|
|
if (count == 1)
|
|
return (1);
|
|
|
|
/*
|
|
* Now check the resulting map and see whether
|
|
* all participants still have their bit set.
|
|
*/
|
|
for (j = 0; j < nlockers; j++) {
|
|
if (!ISSET_MAP(deadmap, j) || j == which)
|
|
continue;
|
|
if (!ISSET_MAP(tmpmap, j))
|
|
return (1);
|
|
}
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* __dd_isolder --
|
|
*
|
|
* Figure out the relative age of two lockers. We make all lockers
|
|
* older than all transactions, because that's how it's worked
|
|
* historically (because lockers are lower ids).
|
|
*/
|
|
static int
|
|
__dd_isolder(a, b, lock_max, txn_max)
|
|
u_int32_t a, b;
|
|
u_int32_t lock_max, txn_max;
|
|
{
|
|
u_int32_t max;
|
|
|
|
/* Check for comparing lock-id and txnid. */
|
|
if (a <= DB_LOCK_MAXID && b > DB_LOCK_MAXID)
|
|
return (1);
|
|
if (b <= DB_LOCK_MAXID && a > DB_LOCK_MAXID)
|
|
return (0);
|
|
|
|
/* In the same space; figure out which one. */
|
|
max = txn_max;
|
|
if (a <= DB_LOCK_MAXID)
|
|
max = lock_max;
|
|
|
|
/*
|
|
* We can't get a 100% correct ordering, because we don't know
|
|
* where the current interval started and if there were older
|
|
* lockers outside the interval. We do the best we can.
|
|
*/
|
|
|
|
/*
|
|
* Check for a wrapped case with ids above max.
|
|
*/
|
|
if (a > max && b < max)
|
|
return (1);
|
|
if (b > max && a < max)
|
|
return (0);
|
|
|
|
return (a < b);
|
|
}
|