mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 05:22:25 +01:00
155e78f014
BitKeeper/deleted/.del-ex_access.wpj~3df6ae8c99bf7c5f: Delete: bdb/build_vxworks/ex_access/ex_access.wpj BitKeeper/deleted/.del-ex_btrec.wpj~a7622f1c6f432dc6: Delete: bdb/build_vxworks/ex_btrec/ex_btrec.wpj BitKeeper/deleted/.del-ex_dbclient.wpj~7345440f3b204cdd: Delete: bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj BitKeeper/deleted/.del-ex_env.wpj~fbe1ab10b04e8b74: Delete: bdb/build_vxworks/ex_env/ex_env.wpj BitKeeper/deleted/.del-ex_mpool.wpj~4479cfd5c45f327d: Delete: bdb/build_vxworks/ex_mpool/ex_mpool.wpj BitKeeper/deleted/.del-ex_tpcb.wpj~f78093006e14bf41: Delete: bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj BitKeeper/deleted/.del-db_buildall.dsp~bd749ff6da11682: Delete: bdb/build_win32/db_buildall.dsp BitKeeper/deleted/.del-cxx_app.cpp~ad8df8e0791011ed: Delete: bdb/cxx/cxx_app.cpp BitKeeper/deleted/.del-cxx_log.cpp~a50ff3118fe06952: Delete: bdb/cxx/cxx_log.cpp BitKeeper/deleted/.del-cxx_table.cpp~ecd751e79b055556: Delete: bdb/cxx/cxx_table.cpp BitKeeper/deleted/.del-namemap.txt~796a3acd3885d8fd: Delete: bdb/cxx/namemap.txt BitKeeper/deleted/.del-Design.fileop~3ca4da68f1727373: Delete: bdb/db/Design.fileop BitKeeper/deleted/.del-db185_int.h~61bee3736e7959ef: Delete: bdb/db185/db185_int.h BitKeeper/deleted/.del-acconfig.h~411e8854d67ad8b5: Delete: bdb/dist/acconfig.h BitKeeper/deleted/.del-mutex.m4~a13383cde18a64e1: Delete: bdb/dist/aclocal/mutex.m4 BitKeeper/deleted/.del-options.m4~b9d0ca637213750a: Delete: bdb/dist/aclocal/options.m4 BitKeeper/deleted/.del-programs.m4~3ce7890b47732b30: Delete: bdb/dist/aclocal/programs.m4 BitKeeper/deleted/.del-tcl.m4~f944e2db93c3b6db: Delete: bdb/dist/aclocal/tcl.m4 BitKeeper/deleted/.del-types.m4~59cae158c9a32cff: Delete: bdb/dist/aclocal/types.m4 BitKeeper/deleted/.del-script~d38f6d3a4f159cb4: Delete: bdb/dist/build/script BitKeeper/deleted/.del-configure.in~ac795a92c8fe049c: Delete: bdb/dist/configure.in BitKeeper/deleted/.del-ltconfig~66bbd007d8024af: Delete: bdb/dist/ltconfig BitKeeper/deleted/.del-rec_ctemp~a28554362534f00a: Delete: bdb/dist/rec_ctemp BitKeeper/deleted/.del-s_tcl~2ffe4326459fcd9f: Delete: bdb/dist/s_tcl BitKeeper/deleted/.del-.IGNORE_ME~d8148b08fa7d5d15: Delete: bdb/dist/template/.IGNORE_ME BitKeeper/deleted/.del-btree.h~179f2aefec1753d: Delete: bdb/include/btree.h BitKeeper/deleted/.del-cxx_int.h~6b649c04766508f8: Delete: bdb/include/cxx_int.h BitKeeper/deleted/.del-db.src~6b433ae615b16a8d: Delete: bdb/include/db.src BitKeeper/deleted/.del-db_185.h~ad8b373d9391d35c: Delete: bdb/include/db_185.h BitKeeper/deleted/.del-db_am.h~a714912b6b75932f: Delete: bdb/include/db_am.h BitKeeper/deleted/.del-db_cxx.h~fcafadf45f5d19e9: Delete: bdb/include/db_cxx.h BitKeeper/deleted/.del-db_dispatch.h~6844f20f7eb46904: Delete: bdb/include/db_dispatch.h BitKeeper/deleted/.del-db_int.src~419a3f48b6a01da7: Delete: bdb/include/db_int.src BitKeeper/deleted/.del-db_join.h~76f9747a42c3399a: Delete: bdb/include/db_join.h BitKeeper/deleted/.del-db_page.h~e302ca3a4db3abdc: Delete: bdb/include/db_page.h BitKeeper/deleted/.del-db_server_int.h~e1d20b6ba3bca1ab: Delete: bdb/include/db_server_int.h BitKeeper/deleted/.del-db_shash.h~5fbf2d696fac90f3: Delete: bdb/include/db_shash.h BitKeeper/deleted/.del-db_swap.h~1e60887550864a59: Delete: bdb/include/db_swap.h BitKeeper/deleted/.del-db_upgrade.h~c644eee73701fc8d: Delete: bdb/include/db_upgrade.h BitKeeper/deleted/.del-db_verify.h~b8d6c297c61f342e: Delete: bdb/include/db_verify.h BitKeeper/deleted/.del-debug.h~dc2b4f2cf27ccebc: Delete: bdb/include/debug.h BitKeeper/deleted/.del-hash.h~2aaa548b28882dfb: Delete: bdb/include/hash.h BitKeeper/deleted/.del-lock.h~a761c1b7de57b77f: Delete: bdb/include/lock.h BitKeeper/deleted/.del-log.h~ff20184238e35e4d: Delete: bdb/include/log.h BitKeeper/deleted/.del-mp.h~7e317597622f3411: Delete: bdb/include/mp.h BitKeeper/deleted/.del-mutex.h~d3ae7a2977a68137: Delete: bdb/include/mutex.h BitKeeper/deleted/.del-os.h~91867cc8757cd0e3: Delete: bdb/include/os.h BitKeeper/deleted/.del-os_jump.h~e1b939fa5151d4be: Delete: bdb/include/os_jump.h BitKeeper/deleted/.del-qam.h~6fad0c1b5723d597: Delete: bdb/include/qam.h BitKeeper/deleted/.del-queue.h~4c72c0826c123d5: Delete: bdb/include/queue.h BitKeeper/deleted/.del-region.h~513fe04d977ca0fc: Delete: bdb/include/region.h BitKeeper/deleted/.del-shqueue.h~525fc3e6c2025c36: Delete: bdb/include/shqueue.h BitKeeper/deleted/.del-tcl_db.h~c536fd61a844f23f: Delete: bdb/include/tcl_db.h BitKeeper/deleted/.del-txn.h~c8d94b221ec147e4: Delete: bdb/include/txn.h BitKeeper/deleted/.del-xa.h~ecc466493aae9d9a: Delete: bdb/include/xa.h BitKeeper/deleted/.del-DbRecoveryInit.java~756b52601a0b9023: Delete: bdb/java/src/com/sleepycat/db/DbRecoveryInit.java BitKeeper/deleted/.del-DbTxnRecover.java~74607cba7ab89d6d: Delete: bdb/java/src/com/sleepycat/db/DbTxnRecover.java BitKeeper/deleted/.del-lock_conflict.c~fc5e0f14cf597a2b: Delete: bdb/lock/lock_conflict.c BitKeeper/deleted/.del-log.src~53ac9e7b5cb023f2: Delete: bdb/log/log.src BitKeeper/deleted/.del-log_findckp.c~24287f008916e81f: Delete: bdb/log/log_findckp.c BitKeeper/deleted/.del-log_rec.c~d51711f2cac09297: Delete: bdb/log/log_rec.c BitKeeper/deleted/.del-log_register.c~b40bb4efac75ca15: Delete: bdb/log/log_register.c BitKeeper/deleted/.del-Design~b3d0f179f2767b: Delete: bdb/mp/Design BitKeeper/deleted/.del-os_finit.c~95dbefc6fe79b26c: Delete: bdb/os/os_finit.c BitKeeper/deleted/.del-os_abs.c~df95d1e7db81924: Delete: bdb/os_vxworks/os_abs.c BitKeeper/deleted/.del-os_finit.c~803b484bdb9d0122: Delete: bdb/os_vxworks/os_finit.c BitKeeper/deleted/.del-os_map.c~3a6d7926398b76d3: Delete: bdb/os_vxworks/os_map.c BitKeeper/deleted/.del-os_finit.c~19a227c6d3c78ad: Delete: bdb/os_win32/os_finit.c BitKeeper/deleted/.del-log-corruption.patch~1cf2ecc7c6408d5d: Delete: bdb/patches/log-corruption.patch BitKeeper/deleted/.del-Btree.pm~af6d0c5eaed4a98e: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm BitKeeper/deleted/.del-BerkeleyDB.pm~7244036d4482643: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pm BitKeeper/deleted/.del-BerkeleyDB.pod~e7b18fd6132448e3: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod BitKeeper/deleted/.del-Hash.pm~10292a26c06a5c95: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm BitKeeper/deleted/.del-BerkeleyDB.pod.P~79f76a1495eda203: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod.P BitKeeper/deleted/.del-BerkeleyDB.xs~80c99afbd98e392c: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.xs BitKeeper/deleted/.del-Changes~729c1891efa60de9: Delete: bdb/perl.BerkeleyDB/Changes BitKeeper/deleted/.del-MANIFEST~63a1e34aecf157a0: Delete: bdb/perl.BerkeleyDB/MANIFEST BitKeeper/deleted/.del-Makefile.PL~c68797707d8df87a: Delete: bdb/perl.BerkeleyDB/Makefile.PL BitKeeper/deleted/.del-README~5f2f579b1a241407: Delete: bdb/perl.BerkeleyDB/README BitKeeper/deleted/.del-Todo~dca3c66c193adda9: Delete: bdb/perl.BerkeleyDB/Todo BitKeeper/deleted/.del-config.in~ae81681e450e0999: Delete: bdb/perl.BerkeleyDB/config.in BitKeeper/deleted/.del-dbinfo~28ad67d83be4f68e: Delete: bdb/perl.BerkeleyDB/dbinfo BitKeeper/deleted/.del-mkconsts~543ab60669c7a04e: Delete: bdb/perl.BerkeleyDB/mkconsts BitKeeper/deleted/.del-mkpod~182c0ca54e439afb: Delete: bdb/perl.BerkeleyDB/mkpod BitKeeper/deleted/.del-5.004~e008cb5a48805543: Delete: bdb/perl.BerkeleyDB/patches/5.004 BitKeeper/deleted/.del-irix_6_5.pl~61662bb08afcdec8: Delete: bdb/perl.BerkeleyDB/hints/irix_6_5.pl BitKeeper/deleted/.del-solaris.pl~6771e7182394e152: Delete: bdb/perl.BerkeleyDB/hints/solaris.pl BitKeeper/deleted/.del-typemap~783b8f5295b05f3d: Delete: bdb/perl.BerkeleyDB/typemap BitKeeper/deleted/.del-5.004_01~6081ce2fff7b0bc: Delete: bdb/perl.BerkeleyDB/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~87214eac35ad9e6: Delete: bdb/perl.BerkeleyDB/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~9a672becec7cb40f: Delete: bdb/perl.BerkeleyDB/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~e326cb51af09d154: Delete: bdb/perl.BerkeleyDB/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~7ab457a1e41a92fe: Delete: bdb/perl.BerkeleyDB/patches/5.004_05 BitKeeper/deleted/.del-5.005~f9e2d59b5964cd4b: Delete: bdb/perl.BerkeleyDB/patches/5.005 BitKeeper/deleted/.del-5.005_01~3eb9fb7b5842ea8e: Delete: bdb/perl.BerkeleyDB/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~67477ce0bef717cb: Delete: bdb/perl.BerkeleyDB/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~c4c29a1fb21e290a: Delete: bdb/perl.BerkeleyDB/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~e1fb9897d124ee22: Delete: bdb/perl.BerkeleyDB/patches/5.6.0 BitKeeper/deleted/.del-btree.t~e4a1a3c675ddc406: Delete: bdb/perl.BerkeleyDB/t/btree.t BitKeeper/deleted/.del-db-3.0.t~d2c60991d84558f2: Delete: bdb/perl.BerkeleyDB/t/db-3.0.t BitKeeper/deleted/.del-db-3.1.t~6ee88cd13f55e018: Delete: bdb/perl.BerkeleyDB/t/db-3.1.t BitKeeper/deleted/.del-db-3.2.t~f73b6461f98fd1cf: Delete: bdb/perl.BerkeleyDB/t/db-3.2.t BitKeeper/deleted/.del-destroy.t~cc6a2ae1980a2ecd: Delete: bdb/perl.BerkeleyDB/t/destroy.t BitKeeper/deleted/.del-env.t~a8604a4499c4bd07: Delete: bdb/perl.BerkeleyDB/t/env.t BitKeeper/deleted/.del-examples.t~2571b77c3cc75574: Delete: bdb/perl.BerkeleyDB/t/examples.t BitKeeper/deleted/.del-examples.t.T~8228bdd75ac78b88: Delete: bdb/perl.BerkeleyDB/t/examples.t.T BitKeeper/deleted/.del-examples3.t.T~66a186897a87026d: Delete: bdb/perl.BerkeleyDB/t/examples3.t.T BitKeeper/deleted/.del-examples3.t~fe3822ba2f2d7f83: Delete: bdb/perl.BerkeleyDB/t/examples3.t BitKeeper/deleted/.del-filter.t~f87b045c1b708637: Delete: bdb/perl.BerkeleyDB/t/filter.t BitKeeper/deleted/.del-hash.t~616bfb4d644de3a3: Delete: bdb/perl.BerkeleyDB/t/hash.t BitKeeper/deleted/.del-join.t~29fc39f74a83ca22: Delete: bdb/perl.BerkeleyDB/t/join.t BitKeeper/deleted/.del-mldbm.t~31f5015341eea040: Delete: bdb/perl.BerkeleyDB/t/mldbm.t BitKeeper/deleted/.del-queue.t~8f338034ce44a641: Delete: bdb/perl.BerkeleyDB/t/queue.t BitKeeper/deleted/.del-recno.t~d4ddbd3743add63e: Delete: bdb/perl.BerkeleyDB/t/recno.t BitKeeper/deleted/.del-strict.t~6885cdd2ea71ca2d: Delete: bdb/perl.BerkeleyDB/t/strict.t BitKeeper/deleted/.del-subdb.t~aab62a5d5864c603: Delete: bdb/perl.BerkeleyDB/t/subdb.t BitKeeper/deleted/.del-txn.t~65033b8558ae1216: Delete: bdb/perl.BerkeleyDB/t/txn.t BitKeeper/deleted/.del-unknown.t~f3710458682665e1: Delete: bdb/perl.BerkeleyDB/t/unknown.t BitKeeper/deleted/.del-Changes~436f74a5c414c65b: Delete: bdb/perl.DB_File/Changes BitKeeper/deleted/.del-DB_File.pm~ae0951c6c7665a82: Delete: bdb/perl.DB_File/DB_File.pm BitKeeper/deleted/.del-DB_File.xs~89e49a0b5556f1d8: Delete: bdb/perl.DB_File/DB_File.xs BitKeeper/deleted/.del-DB_File_BS~290fad5dbbb87069: Delete: bdb/perl.DB_File/DB_File_BS BitKeeper/deleted/.del-MANIFEST~90ee581572bdd4ac: Delete: bdb/perl.DB_File/MANIFEST BitKeeper/deleted/.del-Makefile.PL~ac0567bb5a377e38: Delete: bdb/perl.DB_File/Makefile.PL BitKeeper/deleted/.del-README~77e924a5a9bae6b3: Delete: bdb/perl.DB_File/README BitKeeper/deleted/.del-config.in~ab4c2792b86a810b: Delete: bdb/perl.DB_File/config.in BitKeeper/deleted/.del-dbinfo~461c43b30fab2cb: Delete: bdb/perl.DB_File/dbinfo BitKeeper/deleted/.del-dynixptx.pl~50dcddfae25d17e9: Delete: bdb/perl.DB_File/hints/dynixptx.pl BitKeeper/deleted/.del-typemap~55cffb3288a9e587: Delete: bdb/perl.DB_File/typemap BitKeeper/deleted/.del-version.c~a4df0e646f8b3975: Delete: bdb/perl.DB_File/version.c BitKeeper/deleted/.del-5.004_01~d6830d0082702af7: Delete: bdb/perl.DB_File/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~78b082dc80c91031: Delete: bdb/perl.DB_File/patches/5.004_02 BitKeeper/deleted/.del-5.004~4411ec2e3c9e008b: Delete: bdb/perl.DB_File/patches/5.004 BitKeeper/deleted/.del-sco.pl~1e795fe14fe4dcfe: Delete: bdb/perl.DB_File/hints/sco.pl BitKeeper/deleted/.del-5.004_03~33f274648b160d95: Delete: bdb/perl.DB_File/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~8f3d1b3cf18bb20a: Delete: bdb/perl.DB_File/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~9c0f02e7331e142: Delete: bdb/perl.DB_File/patches/5.004_05 BitKeeper/deleted/.del-5.005~c2108cb2e3c8d951: Delete: bdb/perl.DB_File/patches/5.005 BitKeeper/deleted/.del-5.005_01~3b45e9673afc4cfa: Delete: bdb/perl.DB_File/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~9fe5766bb02a4522: Delete: bdb/perl.DB_File/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~ffa1c38c19ae72ea: Delete: bdb/perl.DB_File/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~373be3a5ce47be85: Delete: bdb/perl.DB_File/patches/5.6.0 BitKeeper/deleted/.del-db-btree.t~3231595a1c241eb3: Delete: bdb/perl.DB_File/t/db-btree.t BitKeeper/deleted/.del-db-hash.t~7c4ad0c795c7fad2: Delete: bdb/perl.DB_File/t/db-hash.t BitKeeper/deleted/.del-db-recno.t~6c2d3d80b9ba4a50: Delete: bdb/perl.DB_File/t/db-recno.t BitKeeper/deleted/.del-db_server.sed~cdb00ebcd48a64e2: Delete: bdb/rpc_server/db_server.sed BitKeeper/deleted/.del-db_server_proc.c~d46c8f409c3747f4: Delete: bdb/rpc_server/db_server_proc.c BitKeeper/deleted/.del-db_server_svc.sed~3f5e59f334fa4607: Delete: bdb/rpc_server/db_server_svc.sed BitKeeper/deleted/.del-db_server_util.c~a809f3a4629acda: Delete: bdb/rpc_server/db_server_util.c BitKeeper/deleted/.del-log.tcl~ff1b41f1355b97d7: Delete: bdb/test/log.tcl BitKeeper/deleted/.del-mpool.tcl~b0df4dc1b04db26c: Delete: bdb/test/mpool.tcl BitKeeper/deleted/.del-mutex.tcl~52fd5c73a150565: Delete: bdb/test/mutex.tcl BitKeeper/deleted/.del-txn.tcl~c4ff071550b5446e: Delete: bdb/test/txn.tcl BitKeeper/deleted/.del-README~e800a12a5392010a: Delete: bdb/test/upgrade/README BitKeeper/deleted/.del-pack-2.6.6.pl~89d5076d758d3e98: Delete: bdb/test/upgrade/generate-2.X/pack-2.6.6.pl BitKeeper/deleted/.del-test-2.6.patch~4a52dc83d447547b: Delete: bdb/test/upgrade/generate-2.X/test-2.6.patch
1079 lines
27 KiB
C
1079 lines
27 KiB
C
/*-
|
|
* See the file LICENSE for redistribution information.
|
|
*
|
|
* Copyright (c) 1999-2002
|
|
* Sleepycat Software. All rights reserved.
|
|
*
|
|
* $Id: hash_verify.c,v 1.53 2002/08/06 05:35:02 bostic Exp $
|
|
*/
|
|
|
|
#include "db_config.h"
|
|
|
|
#ifndef lint
|
|
static const char revid[] = "$Id: hash_verify.c,v 1.53 2002/08/06 05:35:02 bostic Exp $";
|
|
#endif /* not lint */
|
|
|
|
#ifndef NO_SYSTEM_INCLUDES
|
|
#include <sys/types.h>
|
|
|
|
#include <string.h>
|
|
#endif
|
|
|
|
#include "db_int.h"
|
|
#include "dbinc/db_page.h"
|
|
#include "dbinc/db_verify.h"
|
|
#include "dbinc/btree.h"
|
|
#include "dbinc/hash.h"
|
|
|
|
static int __ham_dups_unsorted __P((DB *, u_int8_t *, u_int32_t));
|
|
static int __ham_vrfy_bucket __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
|
|
u_int32_t));
|
|
static int __ham_vrfy_item __P((DB *,
|
|
VRFY_DBINFO *, db_pgno_t, PAGE *, u_int32_t, u_int32_t));
|
|
|
|
/*
|
|
* __ham_vrfy_meta --
|
|
* Verify the hash-specific part of a metadata page.
|
|
*
|
|
* Note that unlike btree, we don't save things off, because we
|
|
* will need most everything again to verify each page and the
|
|
* amount of state here is significant.
|
|
*
|
|
* PUBLIC: int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *,
|
|
* PUBLIC: db_pgno_t, u_int32_t));
|
|
*/
|
|
int
|
|
__ham_vrfy_meta(dbp, vdp, m, pgno, flags)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
HMETA *m;
|
|
db_pgno_t pgno;
|
|
u_int32_t flags;
|
|
{
|
|
HASH *hashp;
|
|
VRFY_PAGEINFO *pip;
|
|
int i, ret, t_ret, isbad;
|
|
u_int32_t pwr, mbucket;
|
|
u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
|
|
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
|
|
return (ret);
|
|
isbad = 0;
|
|
|
|
hashp = dbp->h_internal;
|
|
|
|
if (hashp != NULL && hashp->h_hash != NULL)
|
|
hfunc = hashp->h_hash;
|
|
else
|
|
hfunc = __ham_func5;
|
|
|
|
/*
|
|
* If we haven't already checked the common fields in pagezero,
|
|
* check them.
|
|
*/
|
|
if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
|
|
(ret = __db_vrfy_meta(dbp, vdp, &m->dbmeta, pgno, flags)) != 0) {
|
|
if (ret == DB_VERIFY_BAD)
|
|
isbad = 1;
|
|
else
|
|
goto err;
|
|
}
|
|
|
|
/* h_charkey */
|
|
if (!LF_ISSET(DB_NOORDERCHK))
|
|
if (m->h_charkey != hfunc(dbp, CHARKEY, sizeof(CHARKEY))) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: database has different custom hash function; reverify with DB_NOORDERCHK set",
|
|
(u_long)pgno));
|
|
/*
|
|
* Return immediately; this is probably a sign
|
|
* of user error rather than database corruption, so
|
|
* we want to avoid extraneous errors.
|
|
*/
|
|
isbad = 1;
|
|
goto err;
|
|
}
|
|
|
|
/* max_bucket must be less than the last pgno. */
|
|
if (m->max_bucket > vdp->last_pgno) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: Impossible max_bucket %lu on meta page",
|
|
(u_long)pgno, (u_long)m->max_bucket));
|
|
/*
|
|
* Most other fields depend somehow on max_bucket, so
|
|
* we just return--there will be lots of extraneous
|
|
* errors.
|
|
*/
|
|
isbad = 1;
|
|
goto err;
|
|
}
|
|
|
|
/*
|
|
* max_bucket, high_mask and low_mask: high_mask must be one
|
|
* less than the next power of two above max_bucket, and
|
|
* low_mask must be one less than the power of two below it.
|
|
*
|
|
*
|
|
*/
|
|
pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
|
|
if (m->high_mask != pwr - 1) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: incorrect high_mask %lu, should be %lu",
|
|
(u_long)pgno, (u_long)m->high_mask, (u_long)pwr - 1));
|
|
isbad = 1;
|
|
}
|
|
pwr >>= 1;
|
|
if (m->low_mask != pwr - 1) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: incorrect low_mask %lu, should be %lu",
|
|
(u_long)pgno, (u_long)m->low_mask, (u_long)pwr - 1));
|
|
isbad = 1;
|
|
}
|
|
|
|
/* ffactor: no check possible. */
|
|
pip->h_ffactor = m->ffactor;
|
|
|
|
/*
|
|
* nelem: just make sure it's not astronomical for now. This is the
|
|
* same check that hash_upgrade does, since there was a bug in 2.X
|
|
* which could make nelem go "negative".
|
|
*/
|
|
if (m->nelem > 0x80000000) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: suspiciously high nelem of %lu",
|
|
(u_long)pgno, (u_long)m->nelem));
|
|
isbad = 1;
|
|
pip->h_nelem = 0;
|
|
} else
|
|
pip->h_nelem = m->nelem;
|
|
|
|
/* flags */
|
|
if (F_ISSET(&m->dbmeta, DB_HASH_DUP))
|
|
F_SET(pip, VRFY_HAS_DUPS);
|
|
if (F_ISSET(&m->dbmeta, DB_HASH_DUPSORT))
|
|
F_SET(pip, VRFY_HAS_DUPSORT);
|
|
/* XXX: Why is the DB_HASH_SUBDB flag necessary? */
|
|
|
|
/* spares array */
|
|
for (i = 0; m->spares[i] != 0 && i < NCACHED; i++) {
|
|
/*
|
|
* We set mbucket to the maximum bucket that would use a given
|
|
* spares entry; we want to ensure that it's always less
|
|
* than last_pgno.
|
|
*/
|
|
mbucket = (1 << i) - 1;
|
|
if (BS_TO_PAGE(mbucket, m->spares) > vdp->last_pgno) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: spares array entry %d is invalid",
|
|
(u_long)pgno, i));
|
|
isbad = 1;
|
|
}
|
|
}
|
|
|
|
err: if ((t_ret =
|
|
__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
|
|
ret = t_ret;
|
|
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_vrfy --
|
|
* Verify hash page.
|
|
*
|
|
* PUBLIC: int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
|
|
* PUBLIC: u_int32_t));
|
|
*/
|
|
int
|
|
__ham_vrfy(dbp, vdp, h, pgno, flags)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
PAGE *h;
|
|
db_pgno_t pgno;
|
|
u_int32_t flags;
|
|
{
|
|
VRFY_PAGEINFO *pip;
|
|
u_int32_t ent, himark, inpend;
|
|
db_indx_t *inp;
|
|
int isbad, ret, t_ret;
|
|
|
|
isbad = 0;
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
|
|
return (ret);
|
|
|
|
/* Sanity check our flags and page type. */
|
|
if ((ret = __db_fchk(dbp->dbenv, "__ham_vrfy",
|
|
flags, DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)) != 0)
|
|
goto err;
|
|
|
|
if (TYPE(h) != P_HASH) {
|
|
TYPE_ERR_PRINT(dbp->dbenv, "__ham_vrfy", pgno, TYPE(h));
|
|
DB_ASSERT(0);
|
|
ret = EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
/* Verify and save off fields common to all PAGEs. */
|
|
if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
|
|
if (ret == DB_VERIFY_BAD)
|
|
isbad = 1;
|
|
else
|
|
goto err;
|
|
}
|
|
|
|
/*
|
|
* Verify inp[]. Each offset from 0 to NUM_ENT(h) must be lower
|
|
* than the previous one, higher than the current end of the inp array,
|
|
* and lower than the page size.
|
|
*
|
|
* In any case, we return immediately if things are bad, as it would
|
|
* be unsafe to proceed.
|
|
*/
|
|
inp = P_INP(dbp, h);
|
|
for (ent = 0, himark = dbp->pgsize,
|
|
inpend = (u_int32_t)((u_int8_t *)inp - (u_int8_t *)h);
|
|
ent < NUM_ENT(h); ent++)
|
|
if (inp[ent] >= himark) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: item %lu is out of order or nonsensical",
|
|
(u_long)pgno, (u_long)ent));
|
|
isbad = 1;
|
|
goto err;
|
|
} else if (inpend >= himark) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: entries array collided with data",
|
|
(u_long)pgno));
|
|
isbad = 1;
|
|
goto err;
|
|
|
|
} else {
|
|
himark = inp[ent];
|
|
inpend += sizeof(db_indx_t);
|
|
if ((ret = __ham_vrfy_item(
|
|
dbp, vdp, pgno, h, ent, flags)) != 0)
|
|
goto err;
|
|
}
|
|
|
|
err: if ((t_ret =
|
|
__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
|
|
ret = t_ret;
|
|
return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_vrfy_item --
|
|
* Given a hash page and an offset, sanity-check the item itself,
|
|
* and save off any overflow items or off-page dup children as necessary.
|
|
*/
|
|
static int
|
|
__ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
db_pgno_t pgno;
|
|
PAGE *h;
|
|
u_int32_t i, flags;
|
|
{
|
|
HOFFPAGE hop;
|
|
HOFFDUP hod;
|
|
VRFY_CHILDINFO child;
|
|
VRFY_PAGEINFO *pip;
|
|
db_indx_t offset, len, dlen, elen;
|
|
int ret, t_ret;
|
|
u_int8_t *databuf;
|
|
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
|
|
return (ret);
|
|
|
|
switch (HPAGE_TYPE(dbp, h, i)) {
|
|
case H_KEYDATA:
|
|
/* Nothing to do here--everything but the type field is data */
|
|
break;
|
|
case H_DUPLICATE:
|
|
/* Are we a datum or a key? Better be the former. */
|
|
if (i % 2 == 0) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: hash key stored as duplicate item %lu",
|
|
(u_long)pip->pgno, (u_long)i));
|
|
}
|
|
/*
|
|
* Dups are encoded as a series within a single HKEYDATA,
|
|
* in which each dup is surrounded by a copy of its length
|
|
* on either side (so that the series can be walked in either
|
|
* direction. We loop through this series and make sure
|
|
* each dup is reasonable.
|
|
*
|
|
* Note that at this point, we've verified item i-1, so
|
|
* it's safe to use LEN_HKEYDATA (which looks at inp[i-1]).
|
|
*/
|
|
len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
|
|
databuf = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
|
|
for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
|
|
memcpy(&dlen, databuf + offset, sizeof(db_indx_t));
|
|
|
|
/* Make sure the length is plausible. */
|
|
if (offset + DUP_SIZE(dlen) > len) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: duplicate item %lu has bad length",
|
|
(u_long)pip->pgno, (u_long)i));
|
|
ret = DB_VERIFY_BAD;
|
|
goto err;
|
|
}
|
|
|
|
/*
|
|
* Make sure the second copy of the length is the
|
|
* same as the first.
|
|
*/
|
|
memcpy(&elen,
|
|
databuf + offset + dlen + sizeof(db_indx_t),
|
|
sizeof(db_indx_t));
|
|
if (elen != dlen) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: duplicate item %lu has two different lengths",
|
|
(u_long)pip->pgno, (u_long)i));
|
|
ret = DB_VERIFY_BAD;
|
|
goto err;
|
|
}
|
|
}
|
|
F_SET(pip, VRFY_HAS_DUPS);
|
|
if (!LF_ISSET(DB_NOORDERCHK) &&
|
|
__ham_dups_unsorted(dbp, databuf, len))
|
|
F_SET(pip, VRFY_DUPS_UNSORTED);
|
|
break;
|
|
case H_OFFPAGE:
|
|
/* Offpage item. Make sure pgno is sane, save off. */
|
|
memcpy(&hop, P_ENTRY(dbp, h, i), HOFFPAGE_SIZE);
|
|
if (!IS_VALID_PGNO(hop.pgno) || hop.pgno == pip->pgno ||
|
|
hop.pgno == PGNO_INVALID) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: offpage item %lu has bad pgno %lu",
|
|
(u_long)pip->pgno, (u_long)i, (u_long)hop.pgno));
|
|
ret = DB_VERIFY_BAD;
|
|
goto err;
|
|
}
|
|
memset(&child, 0, sizeof(VRFY_CHILDINFO));
|
|
child.pgno = hop.pgno;
|
|
child.type = V_OVERFLOW;
|
|
child.tlen = hop.tlen; /* This will get checked later. */
|
|
if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
|
|
goto err;
|
|
break;
|
|
case H_OFFDUP:
|
|
/* Offpage duplicate item. Same drill. */
|
|
memcpy(&hod, P_ENTRY(dbp, h, i), HOFFDUP_SIZE);
|
|
if (!IS_VALID_PGNO(hod.pgno) || hod.pgno == pip->pgno ||
|
|
hod.pgno == PGNO_INVALID) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: offpage item %lu has bad page number",
|
|
(u_long)pip->pgno, (u_long)i));
|
|
ret = DB_VERIFY_BAD;
|
|
goto err;
|
|
}
|
|
memset(&child, 0, sizeof(VRFY_CHILDINFO));
|
|
child.pgno = hod.pgno;
|
|
child.type = V_DUPLICATE;
|
|
if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
|
|
goto err;
|
|
F_SET(pip, VRFY_HAS_DUPS);
|
|
break;
|
|
default:
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: item %i has bad type",
|
|
(u_long)pip->pgno, (u_long)i));
|
|
ret = DB_VERIFY_BAD;
|
|
break;
|
|
}
|
|
|
|
err: if ((t_ret =
|
|
__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
|
|
ret = t_ret;
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_vrfy_structure --
|
|
* Verify the structure of a hash database.
|
|
*
|
|
* PUBLIC: int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
|
|
* PUBLIC: u_int32_t));
|
|
*/
|
|
int
|
|
__ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
db_pgno_t meta_pgno;
|
|
u_int32_t flags;
|
|
{
|
|
DB *pgset;
|
|
DB_MPOOLFILE *mpf;
|
|
HMETA *m;
|
|
PAGE *h;
|
|
VRFY_PAGEINFO *pip;
|
|
int isbad, p, ret, t_ret;
|
|
db_pgno_t pgno;
|
|
u_int32_t bucket, spares_entry;
|
|
|
|
mpf = dbp->mpf;
|
|
pgset = vdp->pgset;
|
|
h = NULL;
|
|
ret = isbad = 0;
|
|
|
|
if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, &p)) != 0)
|
|
return (ret);
|
|
if (p != 0) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: Hash meta page referenced twice",
|
|
(u_long)meta_pgno));
|
|
return (DB_VERIFY_BAD);
|
|
}
|
|
if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
|
|
return (ret);
|
|
|
|
/* Get the meta page; we'll need it frequently. */
|
|
if ((ret = mpf->get(mpf, &meta_pgno, 0, &m)) != 0)
|
|
return (ret);
|
|
|
|
/* Loop through bucket by bucket. */
|
|
for (bucket = 0; bucket <= m->max_bucket; bucket++)
|
|
if ((ret =
|
|
__ham_vrfy_bucket(dbp, vdp, m, bucket, flags)) != 0) {
|
|
if (ret == DB_VERIFY_BAD)
|
|
isbad = 1;
|
|
else
|
|
goto err;
|
|
}
|
|
|
|
/*
|
|
* There may be unused hash pages corresponding to buckets
|
|
* that have been allocated but not yet used. These may be
|
|
* part of the current doubling above max_bucket, or they may
|
|
* correspond to buckets that were used in a transaction
|
|
* that then aborted.
|
|
*
|
|
* Loop through them, as far as the spares array defines them,
|
|
* and make sure they're all empty.
|
|
*
|
|
* Note that this should be safe, since we've already verified
|
|
* that the spares array is sane.
|
|
*/
|
|
for (bucket = m->max_bucket + 1; spares_entry = __db_log2(bucket + 1),
|
|
spares_entry < NCACHED && m->spares[spares_entry] != 0; bucket++) {
|
|
pgno = BS_TO_PAGE(bucket, m->spares);
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
|
|
goto err;
|
|
|
|
/* It's okay if these pages are totally zeroed; unmark it. */
|
|
F_CLR(pip, VRFY_IS_ALLZEROES);
|
|
|
|
/* It's also OK if this page is simply invalid. */
|
|
if (pip->type == P_INVALID) {
|
|
if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
|
|
vdp, pip)) != 0)
|
|
goto err;
|
|
continue;
|
|
}
|
|
|
|
if (pip->type != P_HASH) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: hash bucket %lu maps to non-hash page",
|
|
(u_long)pgno, (u_long)bucket));
|
|
isbad = 1;
|
|
} else if (pip->entries != 0) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: non-empty page in unused hash bucket %lu",
|
|
(u_long)pgno, (u_long)bucket));
|
|
isbad = 1;
|
|
} else {
|
|
if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
|
|
goto err;
|
|
if (p != 0) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: above max_bucket referenced",
|
|
(u_long)pgno));
|
|
isbad = 1;
|
|
} else {
|
|
if ((ret =
|
|
__db_vrfy_pgset_inc(pgset, pgno)) != 0)
|
|
goto err;
|
|
if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
|
|
vdp, pip)) != 0)
|
|
goto err;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
/* If we got here, it's an error. */
|
|
(void)__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
|
|
goto err;
|
|
}
|
|
|
|
err: if ((t_ret = mpf->put(mpf, m, 0)) != 0)
|
|
return (t_ret);
|
|
if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
|
|
return (t_ret);
|
|
return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD: ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_vrfy_bucket --
|
|
* Verify a given bucket.
|
|
*/
|
|
static int
|
|
__ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
HMETA *m;
|
|
u_int32_t bucket, flags;
|
|
{
|
|
HASH *hashp;
|
|
VRFY_CHILDINFO *child;
|
|
VRFY_PAGEINFO *mip, *pip;
|
|
int ret, t_ret, isbad, p;
|
|
db_pgno_t pgno, next_pgno;
|
|
DBC *cc;
|
|
u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
|
|
|
|
isbad = 0;
|
|
pip = NULL;
|
|
cc = NULL;
|
|
|
|
hashp = dbp->h_internal;
|
|
if (hashp != NULL && hashp->h_hash != NULL)
|
|
hfunc = hashp->h_hash;
|
|
else
|
|
hfunc = __ham_func5;
|
|
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, PGNO(m), &mip)) != 0)
|
|
return (ret);
|
|
|
|
/* Calculate the first pgno for this bucket. */
|
|
pgno = BS_TO_PAGE(bucket, m->spares);
|
|
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
|
|
goto err;
|
|
|
|
/* Make sure we got a plausible page number. */
|
|
if (pgno > vdp->last_pgno || pip->type != P_HASH) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: impossible first page in bucket %lu",
|
|
(u_long)pgno, (u_long)bucket));
|
|
/* Unsafe to continue. */
|
|
isbad = 1;
|
|
goto err;
|
|
}
|
|
|
|
if (pip->prev_pgno != PGNO_INVALID) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: first page in hash bucket %lu has a prev_pgno",
|
|
(u_long)pgno, (u_long)bucket));
|
|
isbad = 1;
|
|
}
|
|
|
|
/*
|
|
* Set flags for dups and sorted dups.
|
|
*/
|
|
flags |= F_ISSET(mip, VRFY_HAS_DUPS) ? ST_DUPOK : 0;
|
|
flags |= F_ISSET(mip, VRFY_HAS_DUPSORT) ? ST_DUPSORT : 0;
|
|
|
|
/* Loop until we find a fatal bug, or until we run out of pages. */
|
|
for (;;) {
|
|
/* Provide feedback on our progress to the application. */
|
|
if (!LF_ISSET(DB_SALVAGE))
|
|
__db_vrfy_struct_feedback(dbp, vdp);
|
|
|
|
if ((ret = __db_vrfy_pgset_get(vdp->pgset, pgno, &p)) != 0)
|
|
goto err;
|
|
if (p != 0) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: hash page referenced twice",
|
|
(u_long)pgno));
|
|
isbad = 1;
|
|
/* Unsafe to continue. */
|
|
goto err;
|
|
} else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, pgno)) != 0)
|
|
goto err;
|
|
|
|
/*
|
|
* Hash pages that nothing has ever hashed to may never
|
|
* have actually come into existence, and may appear to be
|
|
* entirely zeroed. This is acceptable, and since there's
|
|
* no real way for us to know whether this has actually
|
|
* occurred, we clear the "wholly zeroed" flag on every
|
|
* hash page. A wholly zeroed page, by nature, will appear
|
|
* to have no flags set and zero entries, so should
|
|
* otherwise verify correctly.
|
|
*/
|
|
F_CLR(pip, VRFY_IS_ALLZEROES);
|
|
|
|
/* If we have dups, our meta page had better know about it. */
|
|
if (F_ISSET(pip, VRFY_HAS_DUPS) &&
|
|
!F_ISSET(mip, VRFY_HAS_DUPS)) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: duplicates present in non-duplicate database",
|
|
(u_long)pgno));
|
|
isbad = 1;
|
|
}
|
|
|
|
/*
|
|
* If the database has sorted dups, this page had better
|
|
* not have unsorted ones.
|
|
*/
|
|
if (F_ISSET(mip, VRFY_HAS_DUPSORT) &&
|
|
F_ISSET(pip, VRFY_DUPS_UNSORTED)) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: unsorted dups in sorted-dup database",
|
|
(u_long)pgno));
|
|
isbad = 1;
|
|
}
|
|
|
|
/* Walk overflow chains and offpage dup trees. */
|
|
if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
|
|
goto err;
|
|
for (ret = __db_vrfy_ccset(cc, pip->pgno, &child); ret == 0;
|
|
ret = __db_vrfy_ccnext(cc, &child))
|
|
if (child->type == V_OVERFLOW) {
|
|
if ((ret = __db_vrfy_ovfl_structure(dbp, vdp,
|
|
child->pgno, child->tlen, flags)) != 0) {
|
|
if (ret == DB_VERIFY_BAD)
|
|
isbad = 1;
|
|
else
|
|
goto err;
|
|
}
|
|
} else if (child->type == V_DUPLICATE) {
|
|
if ((ret = __db_vrfy_duptype(dbp,
|
|
vdp, child->pgno, flags)) != 0) {
|
|
isbad = 1;
|
|
continue;
|
|
}
|
|
if ((ret = __bam_vrfy_subtree(dbp, vdp,
|
|
child->pgno, NULL, NULL,
|
|
flags | ST_RECNUM | ST_DUPSET | ST_TOPLEVEL,
|
|
NULL, NULL, NULL)) != 0) {
|
|
if (ret == DB_VERIFY_BAD)
|
|
isbad = 1;
|
|
else
|
|
goto err;
|
|
}
|
|
}
|
|
if ((ret = __db_vrfy_ccclose(cc)) != 0)
|
|
goto err;
|
|
cc = NULL;
|
|
|
|
/* If it's safe to check that things hash properly, do so. */
|
|
if (isbad == 0 && !LF_ISSET(DB_NOORDERCHK) &&
|
|
(ret = __ham_vrfy_hashing(dbp, pip->entries,
|
|
m, bucket, pgno, flags, hfunc)) != 0) {
|
|
if (ret == DB_VERIFY_BAD)
|
|
isbad = 1;
|
|
else
|
|
goto err;
|
|
}
|
|
|
|
next_pgno = pip->next_pgno;
|
|
ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
|
|
|
|
pip = NULL;
|
|
if (ret != 0)
|
|
goto err;
|
|
|
|
if (next_pgno == PGNO_INVALID)
|
|
break; /* End of the bucket. */
|
|
|
|
/* We already checked this, but just in case... */
|
|
if (!IS_VALID_PGNO(next_pgno)) {
|
|
DB_ASSERT(0);
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: hash page has bad next_pgno",
|
|
(u_long)pgno));
|
|
isbad = 1;
|
|
goto err;
|
|
}
|
|
|
|
if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
|
|
goto err;
|
|
|
|
if (pip->prev_pgno != pgno) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: hash page has bad prev_pgno",
|
|
(u_long)next_pgno));
|
|
isbad = 1;
|
|
}
|
|
pgno = next_pgno;
|
|
}
|
|
|
|
err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
|
|
ret = t_ret;
|
|
if (mip != NULL && ((t_ret =
|
|
__db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
|
|
ret = t_ret;
|
|
if (pip != NULL && ((t_ret =
|
|
__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
|
|
ret = t_ret;
|
|
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_vrfy_hashing --
|
|
* Verify that all items on a given hash page hash correctly.
|
|
*
|
|
* PUBLIC: int __ham_vrfy_hashing __P((DB *,
|
|
* PUBLIC: u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t,
|
|
* PUBLIC: u_int32_t (*) __P((DB *, const void *, u_int32_t))));
|
|
*/
|
|
int
|
|
__ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
|
|
DB *dbp;
|
|
u_int32_t nentries;
|
|
HMETA *m;
|
|
u_int32_t thisbucket;
|
|
db_pgno_t pgno;
|
|
u_int32_t flags;
|
|
u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
|
|
{
|
|
DBT dbt;
|
|
DB_MPOOLFILE *mpf;
|
|
PAGE *h;
|
|
db_indx_t i;
|
|
int ret, t_ret, isbad;
|
|
u_int32_t hval, bucket;
|
|
|
|
mpf = dbp->mpf;
|
|
ret = isbad = 0;
|
|
|
|
memset(&dbt, 0, sizeof(DBT));
|
|
F_SET(&dbt, DB_DBT_REALLOC);
|
|
|
|
if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
|
|
return (ret);
|
|
|
|
for (i = 0; i < nentries; i += 2) {
|
|
/*
|
|
* We've already verified the page integrity and that of any
|
|
* overflow chains linked off it; it is therefore safe to use
|
|
* __db_ret. It's also not all that much slower, since we have
|
|
* to copy every hash item to deal with alignment anyway; we
|
|
* can tweak this a bit if this proves to be a bottleneck,
|
|
* but for now, take the easy route.
|
|
*/
|
|
if ((ret = __db_ret(dbp, h, i, &dbt, NULL, NULL)) != 0)
|
|
goto err;
|
|
hval = hfunc(dbp, dbt.data, dbt.size);
|
|
|
|
bucket = hval & m->high_mask;
|
|
if (bucket > m->max_bucket)
|
|
bucket = bucket & m->low_mask;
|
|
|
|
if (bucket != thisbucket) {
|
|
EPRINT((dbp->dbenv,
|
|
"Page %lu: item %lu hashes incorrectly",
|
|
(u_long)pgno, (u_long)i));
|
|
isbad = 1;
|
|
}
|
|
}
|
|
|
|
err: if (dbt.data != NULL)
|
|
__os_ufree(dbp->dbenv, dbt.data);
|
|
if ((t_ret = mpf->put(mpf, h, 0)) != 0)
|
|
return (t_ret);
|
|
|
|
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_salvage --
|
|
* Safely dump out anything that looks like a key on an alleged
|
|
* hash page.
|
|
*
|
|
* PUBLIC: int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
|
|
* PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
|
|
*/
|
|
int
|
|
__ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
db_pgno_t pgno;
|
|
PAGE *h;
|
|
void *handle;
|
|
int (*callback) __P((void *, const void *));
|
|
u_int32_t flags;
|
|
{
|
|
DBT dbt, unkdbt;
|
|
db_pgno_t dpgno;
|
|
int ret, err_ret, t_ret;
|
|
u_int32_t himark, tlen;
|
|
u_int8_t *hk;
|
|
void *buf;
|
|
u_int32_t dlen, len, i;
|
|
|
|
memset(&dbt, 0, sizeof(DBT));
|
|
dbt.flags = DB_DBT_REALLOC;
|
|
|
|
memset(&unkdbt, 0, sizeof(DBT));
|
|
unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
|
|
unkdbt.data = "UNKNOWN";
|
|
|
|
err_ret = 0;
|
|
|
|
/*
|
|
* Allocate a buffer for overflow items. Start at one page;
|
|
* __db_safe_goff will realloc as needed.
|
|
*/
|
|
if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &buf)) != 0)
|
|
return (ret);
|
|
|
|
himark = dbp->pgsize;
|
|
for (i = 0;; i++) {
|
|
/* If we're not aggressive, break when we hit NUM_ENT(h). */
|
|
if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
|
|
break;
|
|
|
|
/* Verify the current item. */
|
|
ret = __db_vrfy_inpitem(dbp,
|
|
h, pgno, i, 0, flags, &himark, NULL);
|
|
/* If this returned a fatality, it's time to break. */
|
|
if (ret == DB_VERIFY_FATAL)
|
|
break;
|
|
|
|
if (ret == 0) {
|
|
hk = P_ENTRY(dbp, h, i);
|
|
len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
|
|
if ((u_int32_t)(hk + len - (u_int8_t *)h) >
|
|
dbp->pgsize) {
|
|
/*
|
|
* Item is unsafely large; either continue
|
|
* or set it to the whole page, depending on
|
|
* aggressiveness.
|
|
*/
|
|
if (!LF_ISSET(DB_AGGRESSIVE))
|
|
continue;
|
|
len = dbp->pgsize -
|
|
(u_int32_t)(hk - (u_int8_t *)h);
|
|
err_ret = DB_VERIFY_BAD;
|
|
}
|
|
switch (HPAGE_PTYPE(hk)) {
|
|
default:
|
|
if (!LF_ISSET(DB_AGGRESSIVE))
|
|
break;
|
|
err_ret = DB_VERIFY_BAD;
|
|
/* FALLTHROUGH */
|
|
case H_KEYDATA:
|
|
keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
|
|
dbt.size = len;
|
|
dbt.data = buf;
|
|
if ((ret = __db_prdbt(&dbt,
|
|
0, " ", handle, callback, 0, vdp)) != 0)
|
|
err_ret = ret;
|
|
break;
|
|
case H_OFFPAGE:
|
|
if (len < HOFFPAGE_SIZE) {
|
|
err_ret = DB_VERIFY_BAD;
|
|
continue;
|
|
}
|
|
memcpy(&dpgno,
|
|
HOFFPAGE_PGNO(hk), sizeof(dpgno));
|
|
if ((ret = __db_safe_goff(dbp, vdp,
|
|
dpgno, &dbt, &buf, flags)) != 0) {
|
|
err_ret = ret;
|
|
(void)__db_prdbt(&unkdbt, 0, " ",
|
|
handle, callback, 0, vdp);
|
|
break;
|
|
}
|
|
if ((ret = __db_prdbt(&dbt,
|
|
0, " ", handle, callback, 0, vdp)) != 0)
|
|
err_ret = ret;
|
|
break;
|
|
case H_OFFDUP:
|
|
if (len < HOFFPAGE_SIZE) {
|
|
err_ret = DB_VERIFY_BAD;
|
|
continue;
|
|
}
|
|
memcpy(&dpgno,
|
|
HOFFPAGE_PGNO(hk), sizeof(dpgno));
|
|
/* UNKNOWN iff pgno is bad or we're a key. */
|
|
if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
|
|
if ((ret = __db_prdbt(&unkdbt, 0, " ",
|
|
handle, callback, 0, vdp)) != 0)
|
|
err_ret = ret;
|
|
} else if ((ret = __db_salvage_duptree(dbp,
|
|
vdp, dpgno, &dbt, handle, callback,
|
|
flags | SA_SKIPFIRSTKEY)) != 0)
|
|
err_ret = ret;
|
|
break;
|
|
case H_DUPLICATE:
|
|
/*
|
|
* We're a key; printing dups will seriously
|
|
* foul the output. If we're being aggressive,
|
|
* pretend this is a key and let the app.
|
|
* programmer sort out the mess.
|
|
*/
|
|
if (i % 2 == 0) {
|
|
err_ret = ret;
|
|
if (LF_ISSET(DB_AGGRESSIVE))
|
|
goto keydata;
|
|
break;
|
|
}
|
|
|
|
/* Too small to have any data. */
|
|
if (len <
|
|
HKEYDATA_SIZE(2 * sizeof(db_indx_t))) {
|
|
err_ret = DB_VERIFY_BAD;
|
|
continue;
|
|
}
|
|
|
|
/* Loop until we hit the total length. */
|
|
for (tlen = 0; tlen + sizeof(db_indx_t) < len;
|
|
tlen += dlen) {
|
|
tlen += sizeof(db_indx_t);
|
|
memcpy(&dlen, hk, sizeof(db_indx_t));
|
|
/*
|
|
* If dlen is too long, print all the
|
|
* rest of the dup set in a chunk.
|
|
*/
|
|
if (dlen + tlen > len)
|
|
dlen = len - tlen;
|
|
memcpy(buf, hk + tlen, dlen);
|
|
dbt.size = dlen;
|
|
dbt.data = buf;
|
|
if ((ret = __db_prdbt(&dbt, 0, " ",
|
|
handle, callback, 0, vdp)) != 0)
|
|
err_ret = ret;
|
|
tlen += sizeof(db_indx_t);
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
__os_free(dbp->dbenv, buf);
|
|
if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
|
|
return (t_ret);
|
|
return ((ret == 0 && err_ret != 0) ? err_ret : ret);
|
|
}
|
|
|
|
/*
|
|
* __ham_meta2pgset --
|
|
* Return the set of hash pages corresponding to the given
|
|
* known-good meta page.
|
|
*
|
|
* PUBLIC: int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
|
|
* PUBLIC: DB *));
|
|
*/
|
|
int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
|
|
DB *dbp;
|
|
VRFY_DBINFO *vdp;
|
|
HMETA *hmeta;
|
|
u_int32_t flags;
|
|
DB *pgset;
|
|
{
|
|
DB_MPOOLFILE *mpf;
|
|
PAGE *h;
|
|
db_pgno_t pgno;
|
|
u_int32_t bucket, totpgs;
|
|
int ret, val;
|
|
|
|
/*
|
|
* We don't really need flags, but leave them for consistency with
|
|
* __bam_meta2pgset.
|
|
*/
|
|
COMPQUIET(flags, 0);
|
|
|
|
DB_ASSERT(pgset != NULL);
|
|
|
|
mpf = dbp->mpf;
|
|
totpgs = 0;
|
|
|
|
/*
|
|
* Loop through all the buckets, pushing onto pgset the corresponding
|
|
* page(s) for each one.
|
|
*/
|
|
for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
|
|
pgno = BS_TO_PAGE(bucket, hmeta->spares);
|
|
|
|
/*
|
|
* We know the initial pgno is safe because the spares array has
|
|
* been verified.
|
|
*
|
|
* Safely walk the list of pages in this bucket.
|
|
*/
|
|
for (;;) {
|
|
if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
|
|
return (ret);
|
|
if (TYPE(h) == P_HASH) {
|
|
|
|
/*
|
|
* Make sure we don't go past the end of
|
|
* pgset.
|
|
*/
|
|
if (++totpgs > vdp->last_pgno) {
|
|
(void)mpf->put(mpf, h, 0);
|
|
return (DB_VERIFY_BAD);
|
|
}
|
|
if ((ret =
|
|
__db_vrfy_pgset_inc(pgset, pgno)) != 0) {
|
|
(void)mpf->put(mpf, h, 0);
|
|
return (ret);
|
|
}
|
|
|
|
pgno = NEXT_PGNO(h);
|
|
} else
|
|
pgno = PGNO_INVALID;
|
|
|
|
if ((ret = mpf->put(mpf, h, 0)) != 0)
|
|
return (ret);
|
|
|
|
/* If the new pgno is wonky, go onto the next bucket. */
|
|
if (!IS_VALID_PGNO(pgno) ||
|
|
pgno == PGNO_INVALID)
|
|
break;
|
|
|
|
/*
|
|
* If we've touched this page before, we have a cycle;
|
|
* go on to the next bucket.
|
|
*/
|
|
if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0)
|
|
return (ret);
|
|
if (val != 0)
|
|
break;
|
|
}
|
|
}
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* __ham_dups_unsorted --
|
|
* Takes a known-safe hash duplicate set and its total length.
|
|
* Returns 1 if there are out-of-order duplicates in this set,
|
|
* 0 if there are not.
|
|
*/
|
|
static int
|
|
__ham_dups_unsorted(dbp, buf, len)
|
|
DB *dbp;
|
|
u_int8_t *buf;
|
|
u_int32_t len;
|
|
{
|
|
DBT a, b;
|
|
db_indx_t offset, dlen;
|
|
int (*func) __P((DB *, const DBT *, const DBT *));
|
|
|
|
memset(&a, 0, sizeof(DBT));
|
|
memset(&b, 0, sizeof(DBT));
|
|
|
|
func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
|
|
|
|
/*
|
|
* Loop through the dup set until we hit the end or we find
|
|
* a pair of dups that's out of order. b is always the current
|
|
* dup, a the one before it.
|
|
*/
|
|
for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
|
|
memcpy(&dlen, buf + offset, sizeof(db_indx_t));
|
|
b.data = buf + offset + sizeof(db_indx_t);
|
|
b.size = dlen;
|
|
|
|
if (a.data != NULL && func(dbp, &a, &b) > 0)
|
|
return (1);
|
|
|
|
a.data = b.data;
|
|
a.size = b.size;
|
|
}
|
|
|
|
return (0);
|
|
}
|