mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 04:53:01 +01:00
155e78f014
BitKeeper/deleted/.del-ex_access.wpj~3df6ae8c99bf7c5f: Delete: bdb/build_vxworks/ex_access/ex_access.wpj BitKeeper/deleted/.del-ex_btrec.wpj~a7622f1c6f432dc6: Delete: bdb/build_vxworks/ex_btrec/ex_btrec.wpj BitKeeper/deleted/.del-ex_dbclient.wpj~7345440f3b204cdd: Delete: bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj BitKeeper/deleted/.del-ex_env.wpj~fbe1ab10b04e8b74: Delete: bdb/build_vxworks/ex_env/ex_env.wpj BitKeeper/deleted/.del-ex_mpool.wpj~4479cfd5c45f327d: Delete: bdb/build_vxworks/ex_mpool/ex_mpool.wpj BitKeeper/deleted/.del-ex_tpcb.wpj~f78093006e14bf41: Delete: bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj BitKeeper/deleted/.del-db_buildall.dsp~bd749ff6da11682: Delete: bdb/build_win32/db_buildall.dsp BitKeeper/deleted/.del-cxx_app.cpp~ad8df8e0791011ed: Delete: bdb/cxx/cxx_app.cpp BitKeeper/deleted/.del-cxx_log.cpp~a50ff3118fe06952: Delete: bdb/cxx/cxx_log.cpp BitKeeper/deleted/.del-cxx_table.cpp~ecd751e79b055556: Delete: bdb/cxx/cxx_table.cpp BitKeeper/deleted/.del-namemap.txt~796a3acd3885d8fd: Delete: bdb/cxx/namemap.txt BitKeeper/deleted/.del-Design.fileop~3ca4da68f1727373: Delete: bdb/db/Design.fileop BitKeeper/deleted/.del-db185_int.h~61bee3736e7959ef: Delete: bdb/db185/db185_int.h BitKeeper/deleted/.del-acconfig.h~411e8854d67ad8b5: Delete: bdb/dist/acconfig.h BitKeeper/deleted/.del-mutex.m4~a13383cde18a64e1: Delete: bdb/dist/aclocal/mutex.m4 BitKeeper/deleted/.del-options.m4~b9d0ca637213750a: Delete: bdb/dist/aclocal/options.m4 BitKeeper/deleted/.del-programs.m4~3ce7890b47732b30: Delete: bdb/dist/aclocal/programs.m4 BitKeeper/deleted/.del-tcl.m4~f944e2db93c3b6db: Delete: bdb/dist/aclocal/tcl.m4 BitKeeper/deleted/.del-types.m4~59cae158c9a32cff: Delete: bdb/dist/aclocal/types.m4 BitKeeper/deleted/.del-script~d38f6d3a4f159cb4: Delete: bdb/dist/build/script BitKeeper/deleted/.del-configure.in~ac795a92c8fe049c: Delete: bdb/dist/configure.in BitKeeper/deleted/.del-ltconfig~66bbd007d8024af: Delete: bdb/dist/ltconfig BitKeeper/deleted/.del-rec_ctemp~a28554362534f00a: Delete: bdb/dist/rec_ctemp BitKeeper/deleted/.del-s_tcl~2ffe4326459fcd9f: Delete: bdb/dist/s_tcl BitKeeper/deleted/.del-.IGNORE_ME~d8148b08fa7d5d15: Delete: bdb/dist/template/.IGNORE_ME BitKeeper/deleted/.del-btree.h~179f2aefec1753d: Delete: bdb/include/btree.h BitKeeper/deleted/.del-cxx_int.h~6b649c04766508f8: Delete: bdb/include/cxx_int.h BitKeeper/deleted/.del-db.src~6b433ae615b16a8d: Delete: bdb/include/db.src BitKeeper/deleted/.del-db_185.h~ad8b373d9391d35c: Delete: bdb/include/db_185.h BitKeeper/deleted/.del-db_am.h~a714912b6b75932f: Delete: bdb/include/db_am.h BitKeeper/deleted/.del-db_cxx.h~fcafadf45f5d19e9: Delete: bdb/include/db_cxx.h BitKeeper/deleted/.del-db_dispatch.h~6844f20f7eb46904: Delete: bdb/include/db_dispatch.h BitKeeper/deleted/.del-db_int.src~419a3f48b6a01da7: Delete: bdb/include/db_int.src BitKeeper/deleted/.del-db_join.h~76f9747a42c3399a: Delete: bdb/include/db_join.h BitKeeper/deleted/.del-db_page.h~e302ca3a4db3abdc: Delete: bdb/include/db_page.h BitKeeper/deleted/.del-db_server_int.h~e1d20b6ba3bca1ab: Delete: bdb/include/db_server_int.h BitKeeper/deleted/.del-db_shash.h~5fbf2d696fac90f3: Delete: bdb/include/db_shash.h BitKeeper/deleted/.del-db_swap.h~1e60887550864a59: Delete: bdb/include/db_swap.h BitKeeper/deleted/.del-db_upgrade.h~c644eee73701fc8d: Delete: bdb/include/db_upgrade.h BitKeeper/deleted/.del-db_verify.h~b8d6c297c61f342e: Delete: bdb/include/db_verify.h BitKeeper/deleted/.del-debug.h~dc2b4f2cf27ccebc: Delete: bdb/include/debug.h BitKeeper/deleted/.del-hash.h~2aaa548b28882dfb: Delete: bdb/include/hash.h BitKeeper/deleted/.del-lock.h~a761c1b7de57b77f: Delete: bdb/include/lock.h BitKeeper/deleted/.del-log.h~ff20184238e35e4d: Delete: bdb/include/log.h BitKeeper/deleted/.del-mp.h~7e317597622f3411: Delete: bdb/include/mp.h BitKeeper/deleted/.del-mutex.h~d3ae7a2977a68137: Delete: bdb/include/mutex.h BitKeeper/deleted/.del-os.h~91867cc8757cd0e3: Delete: bdb/include/os.h BitKeeper/deleted/.del-os_jump.h~e1b939fa5151d4be: Delete: bdb/include/os_jump.h BitKeeper/deleted/.del-qam.h~6fad0c1b5723d597: Delete: bdb/include/qam.h BitKeeper/deleted/.del-queue.h~4c72c0826c123d5: Delete: bdb/include/queue.h BitKeeper/deleted/.del-region.h~513fe04d977ca0fc: Delete: bdb/include/region.h BitKeeper/deleted/.del-shqueue.h~525fc3e6c2025c36: Delete: bdb/include/shqueue.h BitKeeper/deleted/.del-tcl_db.h~c536fd61a844f23f: Delete: bdb/include/tcl_db.h BitKeeper/deleted/.del-txn.h~c8d94b221ec147e4: Delete: bdb/include/txn.h BitKeeper/deleted/.del-xa.h~ecc466493aae9d9a: Delete: bdb/include/xa.h BitKeeper/deleted/.del-DbRecoveryInit.java~756b52601a0b9023: Delete: bdb/java/src/com/sleepycat/db/DbRecoveryInit.java BitKeeper/deleted/.del-DbTxnRecover.java~74607cba7ab89d6d: Delete: bdb/java/src/com/sleepycat/db/DbTxnRecover.java BitKeeper/deleted/.del-lock_conflict.c~fc5e0f14cf597a2b: Delete: bdb/lock/lock_conflict.c BitKeeper/deleted/.del-log.src~53ac9e7b5cb023f2: Delete: bdb/log/log.src BitKeeper/deleted/.del-log_findckp.c~24287f008916e81f: Delete: bdb/log/log_findckp.c BitKeeper/deleted/.del-log_rec.c~d51711f2cac09297: Delete: bdb/log/log_rec.c BitKeeper/deleted/.del-log_register.c~b40bb4efac75ca15: Delete: bdb/log/log_register.c BitKeeper/deleted/.del-Design~b3d0f179f2767b: Delete: bdb/mp/Design BitKeeper/deleted/.del-os_finit.c~95dbefc6fe79b26c: Delete: bdb/os/os_finit.c BitKeeper/deleted/.del-os_abs.c~df95d1e7db81924: Delete: bdb/os_vxworks/os_abs.c BitKeeper/deleted/.del-os_finit.c~803b484bdb9d0122: Delete: bdb/os_vxworks/os_finit.c BitKeeper/deleted/.del-os_map.c~3a6d7926398b76d3: Delete: bdb/os_vxworks/os_map.c BitKeeper/deleted/.del-os_finit.c~19a227c6d3c78ad: Delete: bdb/os_win32/os_finit.c BitKeeper/deleted/.del-log-corruption.patch~1cf2ecc7c6408d5d: Delete: bdb/patches/log-corruption.patch BitKeeper/deleted/.del-Btree.pm~af6d0c5eaed4a98e: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm BitKeeper/deleted/.del-BerkeleyDB.pm~7244036d4482643: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pm BitKeeper/deleted/.del-BerkeleyDB.pod~e7b18fd6132448e3: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod BitKeeper/deleted/.del-Hash.pm~10292a26c06a5c95: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm BitKeeper/deleted/.del-BerkeleyDB.pod.P~79f76a1495eda203: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod.P BitKeeper/deleted/.del-BerkeleyDB.xs~80c99afbd98e392c: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.xs BitKeeper/deleted/.del-Changes~729c1891efa60de9: Delete: bdb/perl.BerkeleyDB/Changes BitKeeper/deleted/.del-MANIFEST~63a1e34aecf157a0: Delete: bdb/perl.BerkeleyDB/MANIFEST BitKeeper/deleted/.del-Makefile.PL~c68797707d8df87a: Delete: bdb/perl.BerkeleyDB/Makefile.PL BitKeeper/deleted/.del-README~5f2f579b1a241407: Delete: bdb/perl.BerkeleyDB/README BitKeeper/deleted/.del-Todo~dca3c66c193adda9: Delete: bdb/perl.BerkeleyDB/Todo BitKeeper/deleted/.del-config.in~ae81681e450e0999: Delete: bdb/perl.BerkeleyDB/config.in BitKeeper/deleted/.del-dbinfo~28ad67d83be4f68e: Delete: bdb/perl.BerkeleyDB/dbinfo BitKeeper/deleted/.del-mkconsts~543ab60669c7a04e: Delete: bdb/perl.BerkeleyDB/mkconsts BitKeeper/deleted/.del-mkpod~182c0ca54e439afb: Delete: bdb/perl.BerkeleyDB/mkpod BitKeeper/deleted/.del-5.004~e008cb5a48805543: Delete: bdb/perl.BerkeleyDB/patches/5.004 BitKeeper/deleted/.del-irix_6_5.pl~61662bb08afcdec8: Delete: bdb/perl.BerkeleyDB/hints/irix_6_5.pl BitKeeper/deleted/.del-solaris.pl~6771e7182394e152: Delete: bdb/perl.BerkeleyDB/hints/solaris.pl BitKeeper/deleted/.del-typemap~783b8f5295b05f3d: Delete: bdb/perl.BerkeleyDB/typemap BitKeeper/deleted/.del-5.004_01~6081ce2fff7b0bc: Delete: bdb/perl.BerkeleyDB/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~87214eac35ad9e6: Delete: bdb/perl.BerkeleyDB/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~9a672becec7cb40f: Delete: bdb/perl.BerkeleyDB/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~e326cb51af09d154: Delete: bdb/perl.BerkeleyDB/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~7ab457a1e41a92fe: Delete: bdb/perl.BerkeleyDB/patches/5.004_05 BitKeeper/deleted/.del-5.005~f9e2d59b5964cd4b: Delete: bdb/perl.BerkeleyDB/patches/5.005 BitKeeper/deleted/.del-5.005_01~3eb9fb7b5842ea8e: Delete: bdb/perl.BerkeleyDB/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~67477ce0bef717cb: Delete: bdb/perl.BerkeleyDB/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~c4c29a1fb21e290a: Delete: bdb/perl.BerkeleyDB/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~e1fb9897d124ee22: Delete: bdb/perl.BerkeleyDB/patches/5.6.0 BitKeeper/deleted/.del-btree.t~e4a1a3c675ddc406: Delete: bdb/perl.BerkeleyDB/t/btree.t BitKeeper/deleted/.del-db-3.0.t~d2c60991d84558f2: Delete: bdb/perl.BerkeleyDB/t/db-3.0.t BitKeeper/deleted/.del-db-3.1.t~6ee88cd13f55e018: Delete: bdb/perl.BerkeleyDB/t/db-3.1.t BitKeeper/deleted/.del-db-3.2.t~f73b6461f98fd1cf: Delete: bdb/perl.BerkeleyDB/t/db-3.2.t BitKeeper/deleted/.del-destroy.t~cc6a2ae1980a2ecd: Delete: bdb/perl.BerkeleyDB/t/destroy.t BitKeeper/deleted/.del-env.t~a8604a4499c4bd07: Delete: bdb/perl.BerkeleyDB/t/env.t BitKeeper/deleted/.del-examples.t~2571b77c3cc75574: Delete: bdb/perl.BerkeleyDB/t/examples.t BitKeeper/deleted/.del-examples.t.T~8228bdd75ac78b88: Delete: bdb/perl.BerkeleyDB/t/examples.t.T BitKeeper/deleted/.del-examples3.t.T~66a186897a87026d: Delete: bdb/perl.BerkeleyDB/t/examples3.t.T BitKeeper/deleted/.del-examples3.t~fe3822ba2f2d7f83: Delete: bdb/perl.BerkeleyDB/t/examples3.t BitKeeper/deleted/.del-filter.t~f87b045c1b708637: Delete: bdb/perl.BerkeleyDB/t/filter.t BitKeeper/deleted/.del-hash.t~616bfb4d644de3a3: Delete: bdb/perl.BerkeleyDB/t/hash.t BitKeeper/deleted/.del-join.t~29fc39f74a83ca22: Delete: bdb/perl.BerkeleyDB/t/join.t BitKeeper/deleted/.del-mldbm.t~31f5015341eea040: Delete: bdb/perl.BerkeleyDB/t/mldbm.t BitKeeper/deleted/.del-queue.t~8f338034ce44a641: Delete: bdb/perl.BerkeleyDB/t/queue.t BitKeeper/deleted/.del-recno.t~d4ddbd3743add63e: Delete: bdb/perl.BerkeleyDB/t/recno.t BitKeeper/deleted/.del-strict.t~6885cdd2ea71ca2d: Delete: bdb/perl.BerkeleyDB/t/strict.t BitKeeper/deleted/.del-subdb.t~aab62a5d5864c603: Delete: bdb/perl.BerkeleyDB/t/subdb.t BitKeeper/deleted/.del-txn.t~65033b8558ae1216: Delete: bdb/perl.BerkeleyDB/t/txn.t BitKeeper/deleted/.del-unknown.t~f3710458682665e1: Delete: bdb/perl.BerkeleyDB/t/unknown.t BitKeeper/deleted/.del-Changes~436f74a5c414c65b: Delete: bdb/perl.DB_File/Changes BitKeeper/deleted/.del-DB_File.pm~ae0951c6c7665a82: Delete: bdb/perl.DB_File/DB_File.pm BitKeeper/deleted/.del-DB_File.xs~89e49a0b5556f1d8: Delete: bdb/perl.DB_File/DB_File.xs BitKeeper/deleted/.del-DB_File_BS~290fad5dbbb87069: Delete: bdb/perl.DB_File/DB_File_BS BitKeeper/deleted/.del-MANIFEST~90ee581572bdd4ac: Delete: bdb/perl.DB_File/MANIFEST BitKeeper/deleted/.del-Makefile.PL~ac0567bb5a377e38: Delete: bdb/perl.DB_File/Makefile.PL BitKeeper/deleted/.del-README~77e924a5a9bae6b3: Delete: bdb/perl.DB_File/README BitKeeper/deleted/.del-config.in~ab4c2792b86a810b: Delete: bdb/perl.DB_File/config.in BitKeeper/deleted/.del-dbinfo~461c43b30fab2cb: Delete: bdb/perl.DB_File/dbinfo BitKeeper/deleted/.del-dynixptx.pl~50dcddfae25d17e9: Delete: bdb/perl.DB_File/hints/dynixptx.pl BitKeeper/deleted/.del-typemap~55cffb3288a9e587: Delete: bdb/perl.DB_File/typemap BitKeeper/deleted/.del-version.c~a4df0e646f8b3975: Delete: bdb/perl.DB_File/version.c BitKeeper/deleted/.del-5.004_01~d6830d0082702af7: Delete: bdb/perl.DB_File/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~78b082dc80c91031: Delete: bdb/perl.DB_File/patches/5.004_02 BitKeeper/deleted/.del-5.004~4411ec2e3c9e008b: Delete: bdb/perl.DB_File/patches/5.004 BitKeeper/deleted/.del-sco.pl~1e795fe14fe4dcfe: Delete: bdb/perl.DB_File/hints/sco.pl BitKeeper/deleted/.del-5.004_03~33f274648b160d95: Delete: bdb/perl.DB_File/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~8f3d1b3cf18bb20a: Delete: bdb/perl.DB_File/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~9c0f02e7331e142: Delete: bdb/perl.DB_File/patches/5.004_05 BitKeeper/deleted/.del-5.005~c2108cb2e3c8d951: Delete: bdb/perl.DB_File/patches/5.005 BitKeeper/deleted/.del-5.005_01~3b45e9673afc4cfa: Delete: bdb/perl.DB_File/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~9fe5766bb02a4522: Delete: bdb/perl.DB_File/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~ffa1c38c19ae72ea: Delete: bdb/perl.DB_File/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~373be3a5ce47be85: Delete: bdb/perl.DB_File/patches/5.6.0 BitKeeper/deleted/.del-db-btree.t~3231595a1c241eb3: Delete: bdb/perl.DB_File/t/db-btree.t BitKeeper/deleted/.del-db-hash.t~7c4ad0c795c7fad2: Delete: bdb/perl.DB_File/t/db-hash.t BitKeeper/deleted/.del-db-recno.t~6c2d3d80b9ba4a50: Delete: bdb/perl.DB_File/t/db-recno.t BitKeeper/deleted/.del-db_server.sed~cdb00ebcd48a64e2: Delete: bdb/rpc_server/db_server.sed BitKeeper/deleted/.del-db_server_proc.c~d46c8f409c3747f4: Delete: bdb/rpc_server/db_server_proc.c BitKeeper/deleted/.del-db_server_svc.sed~3f5e59f334fa4607: Delete: bdb/rpc_server/db_server_svc.sed BitKeeper/deleted/.del-db_server_util.c~a809f3a4629acda: Delete: bdb/rpc_server/db_server_util.c BitKeeper/deleted/.del-log.tcl~ff1b41f1355b97d7: Delete: bdb/test/log.tcl BitKeeper/deleted/.del-mpool.tcl~b0df4dc1b04db26c: Delete: bdb/test/mpool.tcl BitKeeper/deleted/.del-mutex.tcl~52fd5c73a150565: Delete: bdb/test/mutex.tcl BitKeeper/deleted/.del-txn.tcl~c4ff071550b5446e: Delete: bdb/test/txn.tcl BitKeeper/deleted/.del-README~e800a12a5392010a: Delete: bdb/test/upgrade/README BitKeeper/deleted/.del-pack-2.6.6.pl~89d5076d758d3e98: Delete: bdb/test/upgrade/generate-2.X/pack-2.6.6.pl BitKeeper/deleted/.del-test-2.6.patch~4a52dc83d447547b: Delete: bdb/test/upgrade/generate-2.X/test-2.6.patch
886 lines
24 KiB
Tcl
886 lines
24 KiB
Tcl
# See the file LICENSE for redistribution information.
|
|
#
|
|
# Copyright (c) 1999-2002
|
|
# Sleepycat Software. All rights reserved.
|
|
#
|
|
# $Id: recd007.tcl,v 11.60 2002/08/08 15:38:07 bostic Exp $
|
|
#
|
|
# TEST recd007
|
|
# TEST File create/delete tests.
|
|
# TEST
|
|
# TEST This is a recovery test for create/delete of databases. We have
|
|
# TEST hooks in the database so that we can abort the process at various
|
|
# TEST points and make sure that the transaction doesn't commit. We
|
|
# TEST then need to recover and make sure the file is correctly existing
|
|
# TEST or not, as the case may be.
|
|
proc recd007 { method args} {
|
|
global fixed_len
|
|
source ./include.tcl
|
|
|
|
set orig_fixed_len $fixed_len
|
|
set opts [convert_args $method $args]
|
|
set omethod [convert_method $method]
|
|
|
|
puts "Recd007: $method operation/transaction tests"
|
|
|
|
# Create the database and environment.
|
|
env_cleanup $testdir
|
|
|
|
set testfile recd007.db
|
|
set flags "-create -txn -home $testdir"
|
|
|
|
puts "\tRecd007.a: creating environment"
|
|
set env_cmd "berkdb_env $flags"
|
|
|
|
set env [eval $env_cmd]
|
|
|
|
# We need to create a database to get the pagesize (either
|
|
# the default or whatever might have been specified).
|
|
# Then remove it so we can compute fixed_len and create the
|
|
# real database.
|
|
set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
|
|
set db [eval {berkdb_open} $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set stat [$db stat]
|
|
#
|
|
# Compute the fixed_len based on the pagesize being used.
|
|
# We want the fixed_len to be 1/4 the pagesize.
|
|
#
|
|
set pg [get_pagesize $stat]
|
|
error_check_bad get_pagesize $pg -1
|
|
set fixed_len [expr $pg / 4]
|
|
error_check_good db_close [$db close] 0
|
|
error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
|
|
error_check_good envclose [$env close] 0
|
|
|
|
# Convert the args again because fixed_len is now real.
|
|
set opts [convert_args $method ""]
|
|
|
|
# List of recovery tests: {HOOKS MSG} pairs
|
|
# Where each HOOK is a list of {COPY ABORT}
|
|
#
|
|
set rlist {
|
|
{ {"none" "preopen"} "Recd007.b0: none/preopen"}
|
|
{ {"none" "postopen"} "Recd007.b1: none/postopen"}
|
|
{ {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
|
|
{ {"none" "postlog"} "Recd007.b3: none/postlog"}
|
|
{ {"none" "postsync"} "Recd007.b4: none/postsync"}
|
|
{ {"postopen" "none"} "Recd007.c0: postopen/none"}
|
|
{ {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
|
|
{ {"postlog" "none"} "Recd007.c2: postlog/none"}
|
|
{ {"postsync" "none"} "Recd007.c3: postsync/none"}
|
|
{ {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
|
|
{ {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
|
|
{ {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
|
|
{ {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
|
|
{ {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
|
|
{ {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
|
|
{ {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
|
|
{ {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
|
|
}
|
|
|
|
# These are all the data values that we're going to need to read
|
|
# through the operation table and run the recovery tests.
|
|
|
|
foreach pair $rlist {
|
|
set cmd [lindex $pair 0]
|
|
set msg [lindex $pair 1]
|
|
file_recover_create $testdir $env_cmd $omethod \
|
|
$opts $testfile $cmd $msg
|
|
}
|
|
|
|
set rlist {
|
|
{ {"none" "predestroy"} "Recd007.l0: none/predestroy"}
|
|
{ {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
|
|
{ {"predestroy" "none"} "Recd007.m0: predestroy/none"}
|
|
{ {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
|
|
{ {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
|
|
{ {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
|
|
{ {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
|
|
}
|
|
foreach op { dbremove dbrename dbtruncate } {
|
|
foreach pair $rlist {
|
|
set cmd [lindex $pair 0]
|
|
set msg [lindex $pair 1]
|
|
file_recover_delete $testdir $env_cmd $omethod \
|
|
$opts $testfile $cmd $msg $op
|
|
}
|
|
}
|
|
|
|
if { $is_windows_test != 1 } {
|
|
set env_cmd "berkdb_env_noerr $flags"
|
|
do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
|
|
}
|
|
|
|
puts "\tRecd007.r: Verify db_printlog can read logfile"
|
|
set tmpfile $testdir/printlog.out
|
|
set stat [catch {exec $util_path/db_printlog -h $testdir \
|
|
> $tmpfile} ret]
|
|
error_check_good db_printlog $stat 0
|
|
fileremove $tmpfile
|
|
}
|
|
|
|
proc file_recover_create { dir env_cmd method opts dbfile cmd msg } {
|
|
#
|
|
# We run this test on each of these scenarios:
|
|
# 1. Creating just a database
|
|
# 2. Creating a database with a subdb
|
|
# 3. Creating a 2nd subdb in a database
|
|
puts "\t$msg create with a database"
|
|
do_file_recover_create $dir $env_cmd $method $opts $dbfile \
|
|
0 $cmd $msg
|
|
if { [is_queue $method] == 1 } {
|
|
puts "\tSkipping subdatabase tests for method $method"
|
|
return
|
|
}
|
|
puts "\t$msg create with a database and subdb"
|
|
do_file_recover_create $dir $env_cmd $method $opts $dbfile \
|
|
1 $cmd $msg
|
|
puts "\t$msg create with a database and 2nd subdb"
|
|
do_file_recover_create $dir $env_cmd $method $opts $dbfile \
|
|
2 $cmd $msg
|
|
|
|
}
|
|
|
|
proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
|
|
global log_log_record_types
|
|
source ./include.tcl
|
|
|
|
# Keep track of the log types we've seen
|
|
if { $log_log_record_types == 1} {
|
|
logtrack_read $dir
|
|
}
|
|
|
|
env_cleanup $dir
|
|
set dflags "-dar"
|
|
# Open the environment and set the copy/abort locations
|
|
set env [eval $env_cmd]
|
|
set copy [lindex $cmd 0]
|
|
set abort [lindex $cmd 1]
|
|
error_check_good copy_location [is_valid_create_loc $copy] 1
|
|
error_check_good abort_location [is_valid_create_loc $abort] 1
|
|
|
|
if {([string first "logmeta" $copy] != -1 || \
|
|
[string first "logmeta" $abort] != -1) && \
|
|
[is_btree $method] == 0 } {
|
|
puts "\tSkipping for method $method"
|
|
$env test copy none
|
|
$env test abort none
|
|
error_check_good env_close [$env close] 0
|
|
return
|
|
}
|
|
|
|
# Basically non-existence is our initial state. When we
|
|
# abort, it is also our final state.
|
|
#
|
|
switch $sub {
|
|
0 {
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env $env $opts $dbfile"
|
|
}
|
|
1 {
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env $env $opts $dbfile sub0"
|
|
}
|
|
2 {
|
|
#
|
|
# If we are aborting here, then we need to
|
|
# create a first subdb, then create a second
|
|
#
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env $env $opts $dbfile sub0"
|
|
set db [eval {berkdb_open} $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
error_check_good db_close [$db close] 0
|
|
set init_file $dir/$dbfile.init
|
|
catch { file copy -force $dir/$dbfile $init_file } res
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env $env $opts $dbfile sub1"
|
|
}
|
|
default {
|
|
puts "\tBad value $sub for sub"
|
|
return
|
|
}
|
|
}
|
|
#
|
|
# Set our locations to copy and abort
|
|
#
|
|
set ret [eval $env test copy $copy]
|
|
error_check_good test_copy $ret 0
|
|
set ret [eval $env test abort $abort]
|
|
error_check_good test_abort $ret 0
|
|
|
|
puts "\t\tExecuting command"
|
|
set ret [catch {eval {berkdb_open} $oflags} db]
|
|
|
|
# Sync the mpool so any changes to the file that are
|
|
# in mpool get written to the disk file before the
|
|
# diff.
|
|
$env mpool_sync
|
|
|
|
#
|
|
# If we don't abort, then we expect success.
|
|
# If we abort, we expect no file created.
|
|
#
|
|
if {[string first "none" $abort] == -1} {
|
|
#
|
|
# Operation was aborted, verify it does
|
|
# not exist.
|
|
#
|
|
puts "\t\tCommand executed and aborted."
|
|
error_check_bad db_open ret 0
|
|
|
|
#
|
|
# Check that the file does not exist. Final state.
|
|
#
|
|
if { $sub != 2 } {
|
|
error_check_good db_open:exists \
|
|
[file exists $dir/$dbfile] 0
|
|
} else {
|
|
error_check_good \
|
|
diff(init,postcreate):diff($init_file,$dir/$dbfile)\
|
|
[dbdump_diff $dflags $init_file $dir $dbfile] 0
|
|
}
|
|
} else {
|
|
#
|
|
# Operation was committed, verify it exists.
|
|
#
|
|
puts "\t\tCommand executed and committed."
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
error_check_good db_close [$db close] 0
|
|
|
|
#
|
|
# Check that the file exists.
|
|
#
|
|
error_check_good db_open [file exists $dir/$dbfile] 1
|
|
set init_file $dir/$dbfile.init
|
|
catch { file copy -force $dir/$dbfile $init_file } res
|
|
|
|
if { [is_queue $method] == 1 } {
|
|
copy_extent_file $dir $dbfile init
|
|
}
|
|
}
|
|
error_check_good env_close [$env close] 0
|
|
|
|
#
|
|
# Run recovery here. Should be a no-op. Verify that
|
|
# the file still doesn't exist or change (depending on sub)
|
|
# when we are done.
|
|
#
|
|
berkdb debug_check
|
|
puts -nonewline "\t\tAbout to run recovery ... "
|
|
flush stdout
|
|
|
|
set stat [catch {exec $util_path/db_recover -h $dir -c} result]
|
|
if { $stat == 1 } {
|
|
error "FAIL: Recovery error: $result."
|
|
return
|
|
}
|
|
puts "complete"
|
|
if { $sub != 2 && [string first "none" $abort] == -1} {
|
|
#
|
|
# Operation was aborted, verify it still does
|
|
# not exist. Only done with file creations.
|
|
#
|
|
error_check_good after_recover1 [file exists $dir/$dbfile] 0
|
|
} else {
|
|
#
|
|
# Operation was committed or just a subdb was aborted.
|
|
# Verify it did not change.
|
|
#
|
|
error_check_good \
|
|
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
|
|
[dbdump_diff $dflags $init_file $dir $dbfile] 0
|
|
#
|
|
# Need a new copy to get the right LSN into the file.
|
|
#
|
|
catch { file copy -force $dir/$dbfile $init_file } res
|
|
|
|
if { [is_queue $method] == 1 } {
|
|
copy_extent_file $dir $dbfile init
|
|
}
|
|
}
|
|
|
|
# If we didn't make a copy, then we are done.
|
|
#
|
|
if {[string first "none" $copy] != -1} {
|
|
return
|
|
}
|
|
|
|
#
|
|
# Now move the .afterop file to $dbfile. Run recovery again.
|
|
#
|
|
copy_afterop $dir
|
|
|
|
berkdb debug_check
|
|
puts -nonewline "\t\tAbout to run recovery ... "
|
|
flush stdout
|
|
|
|
set stat [catch {exec $util_path/db_recover -h $dir -c} result]
|
|
if { $stat == 1 } {
|
|
error "FAIL: Recovery error: $result."
|
|
return
|
|
}
|
|
puts "complete"
|
|
if { $sub != 2 && [string first "none" $abort] == -1} {
|
|
#
|
|
# Operation was aborted, verify it still does
|
|
# not exist. Only done with file creations.
|
|
#
|
|
error_check_good after_recover2 [file exists $dir/$dbfile] 0
|
|
} else {
|
|
#
|
|
# Operation was committed or just a subdb was aborted.
|
|
# Verify it did not change.
|
|
#
|
|
error_check_good \
|
|
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
|
|
[dbdump_diff $dflags $init_file $dir $dbfile] 0
|
|
}
|
|
|
|
}
|
|
|
|
proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } {
|
|
#
|
|
# We run this test on each of these scenarios:
|
|
# 1. Deleting/Renaming just a database
|
|
# 2. Deleting/Renaming a database with a subdb
|
|
# 3. Deleting/Renaming a 2nd subdb in a database
|
|
puts "\t$msg $op with a database"
|
|
do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
|
|
0 $cmd $msg $op
|
|
if { [is_queue $method] == 1 } {
|
|
puts "\tSkipping subdatabase tests for method $method"
|
|
return
|
|
}
|
|
puts "\t$msg $op with a database and subdb"
|
|
do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
|
|
1 $cmd $msg $op
|
|
puts "\t$msg $op with a database and 2nd subdb"
|
|
do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
|
|
2 $cmd $msg $op
|
|
|
|
}
|
|
|
|
proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
|
|
global log_log_record_types
|
|
source ./include.tcl
|
|
|
|
# Keep track of the log types we've seen
|
|
if { $log_log_record_types == 1} {
|
|
logtrack_read $dir
|
|
}
|
|
|
|
env_cleanup $dir
|
|
# Open the environment and set the copy/abort locations
|
|
set env [eval $env_cmd]
|
|
set copy [lindex $cmd 0]
|
|
set abort [lindex $cmd 1]
|
|
error_check_good copy_location [is_valid_delete_loc $copy] 1
|
|
error_check_good abort_location [is_valid_delete_loc $abort] 1
|
|
|
|
if { [is_record_based $method] == 1 } {
|
|
set key1 1
|
|
set key2 2
|
|
} else {
|
|
set key1 recd007_key1
|
|
set key2 recd007_key2
|
|
}
|
|
set data1 recd007_data0
|
|
set data2 recd007_data1
|
|
set data3 NEWrecd007_data2
|
|
|
|
#
|
|
# Depending on what sort of subdb we want, if any, our
|
|
# args to the open call will be different (and if we
|
|
# want a 2nd subdb, we create the first here.
|
|
#
|
|
# XXX
|
|
# For dbtruncate, we want oflags to have "$env" in it,
|
|
# not have the value currently in 'env'. That is why
|
|
# the '$' is protected below. Later on we use oflags
|
|
# but with a new $env we just opened.
|
|
#
|
|
switch $sub {
|
|
0 {
|
|
set subdb ""
|
|
set new $dbfile.new
|
|
set dflags "-dar"
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env \$env $opts $dbfile"
|
|
}
|
|
1 {
|
|
set subdb sub0
|
|
set new $subdb.new
|
|
set dflags ""
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env \$env $opts $dbfile $subdb"
|
|
}
|
|
2 {
|
|
#
|
|
# If we are aborting here, then we need to
|
|
# create a first subdb, then create a second
|
|
#
|
|
set subdb sub1
|
|
set new $subdb.new
|
|
set dflags ""
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env \$env $opts $dbfile sub0"
|
|
set db [eval {berkdb_open} $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set txn [$env txn]
|
|
set ret [$db put -txn $txn $key1 $data1]
|
|
error_check_good db_put $ret 0
|
|
error_check_good commit [$txn commit] 0
|
|
error_check_good db_close [$db close] 0
|
|
set oflags "-create $method -auto_commit -mode 0644 \
|
|
-env \$env $opts $dbfile $subdb"
|
|
}
|
|
default {
|
|
puts "\tBad value $sub for sub"
|
|
return
|
|
}
|
|
}
|
|
|
|
#
|
|
# Set our locations to copy and abort
|
|
#
|
|
set ret [eval $env test copy $copy]
|
|
error_check_good test_copy $ret 0
|
|
set ret [eval $env test abort $abort]
|
|
error_check_good test_abort $ret 0
|
|
|
|
#
|
|
# Open our db, add some data, close and copy as our
|
|
# init file.
|
|
#
|
|
set db [eval {berkdb_open} $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set txn [$env txn]
|
|
set ret [$db put -txn $txn $key1 $data1]
|
|
error_check_good db_put $ret 0
|
|
set ret [$db put -txn $txn $key2 $data2]
|
|
error_check_good db_put $ret 0
|
|
error_check_good commit [$txn commit] 0
|
|
error_check_good db_close [$db close] 0
|
|
|
|
$env mpool_sync
|
|
|
|
set init_file $dir/$dbfile.init
|
|
catch { file copy -force $dir/$dbfile $init_file } res
|
|
|
|
if { [is_queue $method] == 1} {
|
|
copy_extent_file $dir $dbfile init
|
|
}
|
|
|
|
#
|
|
# If we don't abort, then we expect success.
|
|
# If we abort, we expect no file removed.
|
|
#
|
|
switch $op {
|
|
"dbrename" {
|
|
set ret [catch { eval {berkdb} $op -env $env -auto_commit \
|
|
$dbfile $subdb $new } remret]
|
|
}
|
|
"dbremove" {
|
|
set ret [catch { eval {berkdb} $op -env $env -auto_commit \
|
|
$dbfile $subdb } remret]
|
|
}
|
|
"dbtruncate" {
|
|
set txn [$env txn]
|
|
set db [eval {berkdb_open_noerr -env} \
|
|
$env -auto_commit $dbfile $subdb]
|
|
error_check_good dbopen [is_valid_db $db] TRUE
|
|
error_check_good txnbegin [is_valid_txn $txn $env] TRUE
|
|
set ret [catch {$db truncate -txn $txn} remret]
|
|
}
|
|
}
|
|
$env mpool_sync
|
|
if { $abort == "none" } {
|
|
if { $op == "dbtruncate" } {
|
|
error_check_good txncommit [$txn commit] 0
|
|
error_check_good dbclose [$db close] 0
|
|
}
|
|
#
|
|
# Operation was committed, verify it.
|
|
#
|
|
puts "\t\tCommand executed and committed."
|
|
error_check_good $op $ret 0
|
|
#
|
|
# If a dbtruncate, check that truncate returned the number
|
|
# of items previously in the database.
|
|
#
|
|
if { [string compare $op "dbtruncate"] == 0 } {
|
|
error_check_good remret $remret 2
|
|
}
|
|
recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
|
|
} else {
|
|
#
|
|
# Operation was aborted, verify it did not change.
|
|
#
|
|
if { $op == "dbtruncate" } {
|
|
error_check_good txnabort [$txn abort] 0
|
|
error_check_good dbclose [$db close] 0
|
|
}
|
|
puts "\t\tCommand executed and aborted."
|
|
error_check_good $op $ret 1
|
|
|
|
#
|
|
# Check that the file exists. Final state.
|
|
# Compare against initial file.
|
|
#
|
|
error_check_good post$op.1 [file exists $dir/$dbfile] 1
|
|
error_check_good \
|
|
diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
|
|
[dbdump_diff $dflags $init_file $dir $dbfile] 0
|
|
}
|
|
$env mpool_sync
|
|
error_check_good env_close [$env close] 0
|
|
catch { file copy -force $dir/$dbfile $init_file } res
|
|
if { [is_queue $method] == 1} {
|
|
copy_extent_file $dir $dbfile init
|
|
}
|
|
|
|
|
|
#
|
|
# Run recovery here. Should be a no-op. Verify that
|
|
# the file still doesn't exist or change (depending on abort)
|
|
# when we are done.
|
|
#
|
|
berkdb debug_check
|
|
puts -nonewline "\t\tAbout to run recovery ... "
|
|
flush stdout
|
|
|
|
set stat [catch {exec $util_path/db_recover -h $dir -c} result]
|
|
if { $stat == 1 } {
|
|
error "FAIL: Recovery error: $result."
|
|
return
|
|
}
|
|
|
|
puts "complete"
|
|
|
|
if { $abort == "none" } {
|
|
#
|
|
# Operate was committed.
|
|
#
|
|
set env [eval $env_cmd]
|
|
recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
|
|
error_check_good env_close [$env close] 0
|
|
} else {
|
|
#
|
|
# Operation was aborted, verify it did not change.
|
|
#
|
|
berkdb debug_check
|
|
error_check_good \
|
|
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
|
|
[dbdump_diff $dflags $init_file $dir $dbfile] 0
|
|
}
|
|
|
|
#
|
|
# If we didn't make a copy, then we are done.
|
|
#
|
|
if {[string first "none" $copy] != -1} {
|
|
return
|
|
}
|
|
|
|
#
|
|
# Now restore the .afterop file(s) to their original name.
|
|
# Run recovery again.
|
|
#
|
|
copy_afterop $dir
|
|
|
|
berkdb debug_check
|
|
puts -nonewline "\t\tAbout to run recovery ... "
|
|
flush stdout
|
|
|
|
set stat [catch {exec $util_path/db_recover -h $dir -c} result]
|
|
if { $stat == 1 } {
|
|
error "FAIL: Recovery error: $result."
|
|
return
|
|
}
|
|
puts "complete"
|
|
|
|
if { [string first "none" $abort] != -1} {
|
|
set env [eval $env_cmd]
|
|
recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
|
|
error_check_good env_close [$env close] 0
|
|
} else {
|
|
#
|
|
# Operation was aborted, verify it did not change.
|
|
#
|
|
error_check_good \
|
|
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
|
|
[dbdump_diff $dflags $init_file $dir $dbfile] 0
|
|
}
|
|
|
|
}
|
|
|
|
#
|
|
# This function tests a specific case of recovering after a db removal.
|
|
# This is for SR #2538. Basically we want to test that:
|
|
# - Make an env.
|
|
# - Make/close a db.
|
|
# - Remove the db.
|
|
# - Create another db of same name.
|
|
# - Sync db but leave open.
|
|
# - Run recovery.
|
|
# - Verify no recovery errors and that new db is there.
|
|
proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
|
|
global log_log_record_types
|
|
source ./include.tcl
|
|
|
|
# Keep track of the log types we've seen
|
|
if { $log_log_record_types == 1} {
|
|
logtrack_read $dir
|
|
}
|
|
set omethod [convert_method $method]
|
|
|
|
puts "\tRecd007.q: Delete and recreate a database"
|
|
env_cleanup $dir
|
|
# Open the environment and set the copy/abort locations
|
|
set env [eval $env_cmd]
|
|
error_check_good env_open [is_valid_env $env] TRUE
|
|
|
|
if { [is_record_based $method] == 1 } {
|
|
set key 1
|
|
} else {
|
|
set key recd007_key
|
|
}
|
|
set data1 recd007_data
|
|
set data2 NEWrecd007_data2
|
|
|
|
set oflags \
|
|
"-create $omethod -auto_commit -mode 0644 $opts $dbfile"
|
|
|
|
#
|
|
# Open our db, add some data, close and copy as our
|
|
# init file.
|
|
#
|
|
set db [eval {berkdb_open_noerr} -env $env $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set txn [$env txn]
|
|
set ret [$db put -txn $txn $key $data1]
|
|
error_check_good db_put $ret 0
|
|
error_check_good commit [$txn commit] 0
|
|
error_check_good db_close [$db close] 0
|
|
|
|
set ret \
|
|
[catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
|
|
|
|
#
|
|
# Operation was committed, verify it does
|
|
# not exist.
|
|
#
|
|
puts "\t\tCommand executed and committed."
|
|
error_check_good dbremove $ret 0
|
|
error_check_good dbremove.1 [file exists $dir/$dbfile] 0
|
|
|
|
#
|
|
# Now create a new db with the same name.
|
|
#
|
|
set db [eval {berkdb_open_noerr} -env $env $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set txn [$env txn]
|
|
set ret [$db put -txn $txn $key [chop_data $method $data2]]
|
|
error_check_good db_put $ret 0
|
|
error_check_good commit [$txn commit] 0
|
|
error_check_good db_sync [$db sync] 0
|
|
|
|
berkdb debug_check
|
|
puts -nonewline "\t\tAbout to run recovery ... "
|
|
flush stdout
|
|
|
|
set stat [catch {exec $util_path/db_recover -h $dir -c} result]
|
|
if { $stat == 1 } {
|
|
error "FAIL: Recovery error: $result."
|
|
return
|
|
}
|
|
puts "complete"
|
|
error_check_good db_recover $stat 0
|
|
error_check_good db_recover.1 [file exists $dir/$dbfile] 1
|
|
#
|
|
# Since we ran recovery on the open db/env, we need to
|
|
# catch these calls. Basically they are there to clean
|
|
# up the Tcl widgets.
|
|
#
|
|
set stat [catch {$db close} ret]
|
|
error_check_bad dbclose_after_remove $stat 0
|
|
error_check_good dbclose_after_remove [is_substr $ret recovery] 1
|
|
set stat [catch {$env close} ret]
|
|
error_check_bad envclose_after_remove $stat 0
|
|
error_check_good envclose_after_remove [is_substr $ret recovery] 1
|
|
|
|
#
|
|
# Reopen env and db and verify 2nd database is there.
|
|
#
|
|
set env [eval $env_cmd]
|
|
error_check_good env_open [is_valid_env $env] TRUE
|
|
set db [eval {berkdb_open} -env $env $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set ret [$db get $key]
|
|
error_check_good dbget [llength $ret] 1
|
|
set kd [lindex $ret 0]
|
|
error_check_good key [lindex $kd 0] $key
|
|
error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
|
|
|
|
error_check_good dbclose [$db close] 0
|
|
error_check_good envclose [$env close] 0
|
|
}
|
|
|
|
proc is_valid_create_loc { loc } {
|
|
switch $loc {
|
|
none -
|
|
preopen -
|
|
postopen -
|
|
postlogmeta -
|
|
postlog -
|
|
postsync
|
|
{ return 1 }
|
|
default
|
|
{ return 0 }
|
|
}
|
|
}
|
|
|
|
proc is_valid_delete_loc { loc } {
|
|
switch $loc {
|
|
none -
|
|
predestroy -
|
|
postdestroy -
|
|
postremcall
|
|
{ return 1 }
|
|
default
|
|
{ return 0 }
|
|
}
|
|
}
|
|
|
|
# Do a logical diff on the db dump files. We expect that either
|
|
# the files are identical, or if they differ, that it is exactly
|
|
# just a free/invalid page.
|
|
# Return 1 if they are different, 0 if logically the same (or identical).
|
|
#
|
|
proc dbdump_diff { flags initfile dir dbfile } {
|
|
source ./include.tcl
|
|
|
|
set initdump $initfile.dump
|
|
set dbdump $dbfile.dump
|
|
|
|
set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
|
|
$initfile} ret]
|
|
error_check_good dbdump.init $stat 0
|
|
|
|
# Do a dump without the freelist which should eliminate any
|
|
# recovery differences.
|
|
set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
|
|
$dir/$dbfile} ret]
|
|
error_check_good dbdump.db $stat 0
|
|
|
|
set stat [filecmp $dir/$dbdump $initdump]
|
|
|
|
if {$stat == 0} {
|
|
return 0
|
|
}
|
|
puts "diff: $dbdump $initdump gives:\n$ret"
|
|
return 1
|
|
}
|
|
|
|
proc recd007_check { op sub dir dbfile subdb new env oflags } {
|
|
#
|
|
# No matter how many subdbs we have, dbtruncate will always
|
|
# have a file, and if we open our particular db, it should
|
|
# have no entries.
|
|
#
|
|
if { $sub == 0 } {
|
|
if { $op == "dbremove" } {
|
|
error_check_good $op:not-exist \
|
|
[file exists $dir/$dbfile] 0
|
|
} elseif { $op == "dbrename"} {
|
|
error_check_good $op:exist \
|
|
[file exists $dir/$dbfile] 0
|
|
error_check_good $op:exist2 \
|
|
[file exists $dir/$dbfile.new] 1
|
|
} else {
|
|
error_check_good $op:exist \
|
|
[file exists $dir/$dbfile] 1
|
|
set db [eval {berkdb_open} $oflags]
|
|
error_check_good db_open [is_valid_db $db] TRUE
|
|
set dbc [$db cursor]
|
|
error_check_good dbc_open \
|
|
[is_valid_cursor $dbc $db] TRUE
|
|
set ret [$dbc get -first]
|
|
error_check_good dbget1 [llength $ret] 0
|
|
error_check_good dbc_close [$dbc close] 0
|
|
error_check_good db_close [$db close] 0
|
|
}
|
|
return
|
|
} else {
|
|
set t1 $dir/t1
|
|
#
|
|
# If we have subdbs, check that all but the last one
|
|
# are there, and the last one is correctly operated on.
|
|
#
|
|
set db [berkdb_open -rdonly -env $env $dbfile]
|
|
error_check_good dbopen [is_valid_db $db] TRUE
|
|
set c [eval {$db cursor}]
|
|
error_check_good db_cursor [is_valid_cursor $c $db] TRUE
|
|
set d [$c get -last]
|
|
if { $op == "dbremove" } {
|
|
if { $sub == 1 } {
|
|
error_check_good subdb:rem [llength $d] 0
|
|
} else {
|
|
error_check_bad subdb:rem [llength $d] 0
|
|
set sdb [lindex [lindex $d 0] 0]
|
|
error_check_bad subdb:rem1 $sdb $subdb
|
|
}
|
|
} elseif { $op == "dbrename"} {
|
|
set sdb [lindex [lindex $d 0] 0]
|
|
error_check_good subdb:ren $sdb $new
|
|
if { $sub != 1 } {
|
|
set d [$c get -prev]
|
|
error_check_bad subdb:ren [llength $d] 0
|
|
set sdb [lindex [lindex $d 0] 0]
|
|
error_check_good subdb:ren1 \
|
|
[is_substr "new" $sdb] 0
|
|
}
|
|
} else {
|
|
set sdb [lindex [lindex $d 0] 0]
|
|
set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
|
|
error_check_good db_open [is_valid_db $dbt] TRUE
|
|
set dbc [$dbt cursor]
|
|
error_check_good dbc_open \
|
|
[is_valid_cursor $dbc $dbt] TRUE
|
|
set ret [$dbc get -first]
|
|
error_check_good dbget2 [llength $ret] 0
|
|
error_check_good dbc_close [$dbc close] 0
|
|
error_check_good db_close [$dbt close] 0
|
|
if { $sub != 1 } {
|
|
set d [$c get -prev]
|
|
error_check_bad subdb:ren [llength $d] 0
|
|
set sdb [lindex [lindex $d 0] 0]
|
|
set dbt [berkdb_open -rdonly -env $env \
|
|
$dbfile $sdb]
|
|
error_check_good db_open [is_valid_db $dbt] TRUE
|
|
set dbc [$db cursor]
|
|
error_check_good dbc_open \
|
|
[is_valid_cursor $dbc $db] TRUE
|
|
set ret [$dbc get -first]
|
|
error_check_bad dbget3 [llength $ret] 0
|
|
error_check_good dbc_close [$dbc close] 0
|
|
error_check_good db_close [$dbt close] 0
|
|
}
|
|
}
|
|
error_check_good dbcclose [$c close] 0
|
|
error_check_good db_close [$db close] 0
|
|
}
|
|
}
|
|
|
|
proc copy_afterop { dir } {
|
|
set r [catch { set filecopy [glob $dir/*.afterop] } res]
|
|
if { $r == 1 } {
|
|
return
|
|
}
|
|
foreach f $filecopy {
|
|
set orig [string range $f 0 \
|
|
[expr [string last "." $f] - 1]]
|
|
catch { file rename -force $f $orig} res
|
|
}
|
|
}
|