BDB 4.1.24

BitKeeper/deleted/.del-ex_access.wpj~3df6ae8c99bf7c5f:
  Delete: bdb/build_vxworks/ex_access/ex_access.wpj
BitKeeper/deleted/.del-ex_btrec.wpj~a7622f1c6f432dc6:
  Delete: bdb/build_vxworks/ex_btrec/ex_btrec.wpj
BitKeeper/deleted/.del-ex_dbclient.wpj~7345440f3b204cdd:
  Delete: bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj
BitKeeper/deleted/.del-ex_env.wpj~fbe1ab10b04e8b74:
  Delete: bdb/build_vxworks/ex_env/ex_env.wpj
BitKeeper/deleted/.del-ex_mpool.wpj~4479cfd5c45f327d:
  Delete: bdb/build_vxworks/ex_mpool/ex_mpool.wpj
BitKeeper/deleted/.del-ex_tpcb.wpj~f78093006e14bf41:
  Delete: bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj
BitKeeper/deleted/.del-db_buildall.dsp~bd749ff6da11682:
  Delete: bdb/build_win32/db_buildall.dsp
BitKeeper/deleted/.del-cxx_app.cpp~ad8df8e0791011ed:
  Delete: bdb/cxx/cxx_app.cpp
BitKeeper/deleted/.del-cxx_log.cpp~a50ff3118fe06952:
  Delete: bdb/cxx/cxx_log.cpp
BitKeeper/deleted/.del-cxx_table.cpp~ecd751e79b055556:
  Delete: bdb/cxx/cxx_table.cpp
BitKeeper/deleted/.del-namemap.txt~796a3acd3885d8fd:
  Delete: bdb/cxx/namemap.txt
BitKeeper/deleted/.del-Design.fileop~3ca4da68f1727373:
  Delete: bdb/db/Design.fileop
BitKeeper/deleted/.del-db185_int.h~61bee3736e7959ef:
  Delete: bdb/db185/db185_int.h
BitKeeper/deleted/.del-acconfig.h~411e8854d67ad8b5:
  Delete: bdb/dist/acconfig.h
BitKeeper/deleted/.del-mutex.m4~a13383cde18a64e1:
  Delete: bdb/dist/aclocal/mutex.m4
BitKeeper/deleted/.del-options.m4~b9d0ca637213750a:
  Delete: bdb/dist/aclocal/options.m4
BitKeeper/deleted/.del-programs.m4~3ce7890b47732b30:
  Delete: bdb/dist/aclocal/programs.m4
BitKeeper/deleted/.del-tcl.m4~f944e2db93c3b6db:
  Delete: bdb/dist/aclocal/tcl.m4
BitKeeper/deleted/.del-types.m4~59cae158c9a32cff:
  Delete: bdb/dist/aclocal/types.m4
BitKeeper/deleted/.del-script~d38f6d3a4f159cb4:
  Delete: bdb/dist/build/script
BitKeeper/deleted/.del-configure.in~ac795a92c8fe049c:
  Delete: bdb/dist/configure.in
BitKeeper/deleted/.del-ltconfig~66bbd007d8024af:
  Delete: bdb/dist/ltconfig
BitKeeper/deleted/.del-rec_ctemp~a28554362534f00a:
  Delete: bdb/dist/rec_ctemp
BitKeeper/deleted/.del-s_tcl~2ffe4326459fcd9f:
  Delete: bdb/dist/s_tcl
BitKeeper/deleted/.del-.IGNORE_ME~d8148b08fa7d5d15:
  Delete: bdb/dist/template/.IGNORE_ME
BitKeeper/deleted/.del-btree.h~179f2aefec1753d:
  Delete: bdb/include/btree.h
BitKeeper/deleted/.del-cxx_int.h~6b649c04766508f8:
  Delete: bdb/include/cxx_int.h
BitKeeper/deleted/.del-db.src~6b433ae615b16a8d:
  Delete: bdb/include/db.src
BitKeeper/deleted/.del-db_185.h~ad8b373d9391d35c:
  Delete: bdb/include/db_185.h
BitKeeper/deleted/.del-db_am.h~a714912b6b75932f:
  Delete: bdb/include/db_am.h
BitKeeper/deleted/.del-db_cxx.h~fcafadf45f5d19e9:
  Delete: bdb/include/db_cxx.h
BitKeeper/deleted/.del-db_dispatch.h~6844f20f7eb46904:
  Delete: bdb/include/db_dispatch.h
BitKeeper/deleted/.del-db_int.src~419a3f48b6a01da7:
  Delete: bdb/include/db_int.src
BitKeeper/deleted/.del-db_join.h~76f9747a42c3399a:
  Delete: bdb/include/db_join.h
BitKeeper/deleted/.del-db_page.h~e302ca3a4db3abdc:
  Delete: bdb/include/db_page.h
BitKeeper/deleted/.del-db_server_int.h~e1d20b6ba3bca1ab:
  Delete: bdb/include/db_server_int.h
BitKeeper/deleted/.del-db_shash.h~5fbf2d696fac90f3:
  Delete: bdb/include/db_shash.h
BitKeeper/deleted/.del-db_swap.h~1e60887550864a59:
  Delete: bdb/include/db_swap.h
BitKeeper/deleted/.del-db_upgrade.h~c644eee73701fc8d:
  Delete: bdb/include/db_upgrade.h
BitKeeper/deleted/.del-db_verify.h~b8d6c297c61f342e:
  Delete: bdb/include/db_verify.h
BitKeeper/deleted/.del-debug.h~dc2b4f2cf27ccebc:
  Delete: bdb/include/debug.h
BitKeeper/deleted/.del-hash.h~2aaa548b28882dfb:
  Delete: bdb/include/hash.h
BitKeeper/deleted/.del-lock.h~a761c1b7de57b77f:
  Delete: bdb/include/lock.h
BitKeeper/deleted/.del-log.h~ff20184238e35e4d:
  Delete: bdb/include/log.h
BitKeeper/deleted/.del-mp.h~7e317597622f3411:
  Delete: bdb/include/mp.h
BitKeeper/deleted/.del-mutex.h~d3ae7a2977a68137:
  Delete: bdb/include/mutex.h
BitKeeper/deleted/.del-os.h~91867cc8757cd0e3:
  Delete: bdb/include/os.h
BitKeeper/deleted/.del-os_jump.h~e1b939fa5151d4be:
  Delete: bdb/include/os_jump.h
BitKeeper/deleted/.del-qam.h~6fad0c1b5723d597:
  Delete: bdb/include/qam.h
BitKeeper/deleted/.del-queue.h~4c72c0826c123d5:
  Delete: bdb/include/queue.h
BitKeeper/deleted/.del-region.h~513fe04d977ca0fc:
  Delete: bdb/include/region.h
BitKeeper/deleted/.del-shqueue.h~525fc3e6c2025c36:
  Delete: bdb/include/shqueue.h
BitKeeper/deleted/.del-tcl_db.h~c536fd61a844f23f:
  Delete: bdb/include/tcl_db.h
BitKeeper/deleted/.del-txn.h~c8d94b221ec147e4:
  Delete: bdb/include/txn.h
BitKeeper/deleted/.del-xa.h~ecc466493aae9d9a:
  Delete: bdb/include/xa.h
BitKeeper/deleted/.del-DbRecoveryInit.java~756b52601a0b9023:
  Delete: bdb/java/src/com/sleepycat/db/DbRecoveryInit.java
BitKeeper/deleted/.del-DbTxnRecover.java~74607cba7ab89d6d:
  Delete: bdb/java/src/com/sleepycat/db/DbTxnRecover.java
BitKeeper/deleted/.del-lock_conflict.c~fc5e0f14cf597a2b:
  Delete: bdb/lock/lock_conflict.c
BitKeeper/deleted/.del-log.src~53ac9e7b5cb023f2:
  Delete: bdb/log/log.src
BitKeeper/deleted/.del-log_findckp.c~24287f008916e81f:
  Delete: bdb/log/log_findckp.c
BitKeeper/deleted/.del-log_rec.c~d51711f2cac09297:
  Delete: bdb/log/log_rec.c
BitKeeper/deleted/.del-log_register.c~b40bb4efac75ca15:
  Delete: bdb/log/log_register.c
BitKeeper/deleted/.del-Design~b3d0f179f2767b:
  Delete: bdb/mp/Design
BitKeeper/deleted/.del-os_finit.c~95dbefc6fe79b26c:
  Delete: bdb/os/os_finit.c
BitKeeper/deleted/.del-os_abs.c~df95d1e7db81924:
  Delete: bdb/os_vxworks/os_abs.c
BitKeeper/deleted/.del-os_finit.c~803b484bdb9d0122:
  Delete: bdb/os_vxworks/os_finit.c
BitKeeper/deleted/.del-os_map.c~3a6d7926398b76d3:
  Delete: bdb/os_vxworks/os_map.c
BitKeeper/deleted/.del-os_finit.c~19a227c6d3c78ad:
  Delete: bdb/os_win32/os_finit.c
BitKeeper/deleted/.del-log-corruption.patch~1cf2ecc7c6408d5d:
  Delete: bdb/patches/log-corruption.patch
BitKeeper/deleted/.del-Btree.pm~af6d0c5eaed4a98e:
  Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm
BitKeeper/deleted/.del-BerkeleyDB.pm~7244036d4482643:
  Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pm
BitKeeper/deleted/.del-BerkeleyDB.pod~e7b18fd6132448e3:
  Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod
BitKeeper/deleted/.del-Hash.pm~10292a26c06a5c95:
  Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm
BitKeeper/deleted/.del-BerkeleyDB.pod.P~79f76a1495eda203:
  Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod.P
BitKeeper/deleted/.del-BerkeleyDB.xs~80c99afbd98e392c:
  Delete: bdb/perl.BerkeleyDB/BerkeleyDB.xs
BitKeeper/deleted/.del-Changes~729c1891efa60de9:
  Delete: bdb/perl.BerkeleyDB/Changes
BitKeeper/deleted/.del-MANIFEST~63a1e34aecf157a0:
  Delete: bdb/perl.BerkeleyDB/MANIFEST
BitKeeper/deleted/.del-Makefile.PL~c68797707d8df87a:
  Delete: bdb/perl.BerkeleyDB/Makefile.PL
BitKeeper/deleted/.del-README~5f2f579b1a241407:
  Delete: bdb/perl.BerkeleyDB/README
BitKeeper/deleted/.del-Todo~dca3c66c193adda9:
  Delete: bdb/perl.BerkeleyDB/Todo
BitKeeper/deleted/.del-config.in~ae81681e450e0999:
  Delete: bdb/perl.BerkeleyDB/config.in
BitKeeper/deleted/.del-dbinfo~28ad67d83be4f68e:
  Delete: bdb/perl.BerkeleyDB/dbinfo
BitKeeper/deleted/.del-mkconsts~543ab60669c7a04e:
  Delete: bdb/perl.BerkeleyDB/mkconsts
BitKeeper/deleted/.del-mkpod~182c0ca54e439afb:
  Delete: bdb/perl.BerkeleyDB/mkpod
BitKeeper/deleted/.del-5.004~e008cb5a48805543:
  Delete: bdb/perl.BerkeleyDB/patches/5.004
BitKeeper/deleted/.del-irix_6_5.pl~61662bb08afcdec8:
  Delete: bdb/perl.BerkeleyDB/hints/irix_6_5.pl
BitKeeper/deleted/.del-solaris.pl~6771e7182394e152:
  Delete: bdb/perl.BerkeleyDB/hints/solaris.pl
BitKeeper/deleted/.del-typemap~783b8f5295b05f3d:
  Delete: bdb/perl.BerkeleyDB/typemap
BitKeeper/deleted/.del-5.004_01~6081ce2fff7b0bc:
  Delete: bdb/perl.BerkeleyDB/patches/5.004_01
BitKeeper/deleted/.del-5.004_02~87214eac35ad9e6:
  Delete: bdb/perl.BerkeleyDB/patches/5.004_02
BitKeeper/deleted/.del-5.004_03~9a672becec7cb40f:
  Delete: bdb/perl.BerkeleyDB/patches/5.004_03
BitKeeper/deleted/.del-5.004_04~e326cb51af09d154:
  Delete: bdb/perl.BerkeleyDB/patches/5.004_04
BitKeeper/deleted/.del-5.004_05~7ab457a1e41a92fe:
  Delete: bdb/perl.BerkeleyDB/patches/5.004_05
BitKeeper/deleted/.del-5.005~f9e2d59b5964cd4b:
  Delete: bdb/perl.BerkeleyDB/patches/5.005
BitKeeper/deleted/.del-5.005_01~3eb9fb7b5842ea8e:
  Delete: bdb/perl.BerkeleyDB/patches/5.005_01
BitKeeper/deleted/.del-5.005_02~67477ce0bef717cb:
  Delete: bdb/perl.BerkeleyDB/patches/5.005_02
BitKeeper/deleted/.del-5.005_03~c4c29a1fb21e290a:
  Delete: bdb/perl.BerkeleyDB/patches/5.005_03
BitKeeper/deleted/.del-5.6.0~e1fb9897d124ee22:
  Delete: bdb/perl.BerkeleyDB/patches/5.6.0
BitKeeper/deleted/.del-btree.t~e4a1a3c675ddc406:
  Delete: bdb/perl.BerkeleyDB/t/btree.t
BitKeeper/deleted/.del-db-3.0.t~d2c60991d84558f2:
  Delete: bdb/perl.BerkeleyDB/t/db-3.0.t
BitKeeper/deleted/.del-db-3.1.t~6ee88cd13f55e018:
  Delete: bdb/perl.BerkeleyDB/t/db-3.1.t
BitKeeper/deleted/.del-db-3.2.t~f73b6461f98fd1cf:
  Delete: bdb/perl.BerkeleyDB/t/db-3.2.t
BitKeeper/deleted/.del-destroy.t~cc6a2ae1980a2ecd:
  Delete: bdb/perl.BerkeleyDB/t/destroy.t
BitKeeper/deleted/.del-env.t~a8604a4499c4bd07:
  Delete: bdb/perl.BerkeleyDB/t/env.t
BitKeeper/deleted/.del-examples.t~2571b77c3cc75574:
  Delete: bdb/perl.BerkeleyDB/t/examples.t
BitKeeper/deleted/.del-examples.t.T~8228bdd75ac78b88:
  Delete: bdb/perl.BerkeleyDB/t/examples.t.T
BitKeeper/deleted/.del-examples3.t.T~66a186897a87026d:
  Delete: bdb/perl.BerkeleyDB/t/examples3.t.T
BitKeeper/deleted/.del-examples3.t~fe3822ba2f2d7f83:
  Delete: bdb/perl.BerkeleyDB/t/examples3.t
BitKeeper/deleted/.del-filter.t~f87b045c1b708637:
  Delete: bdb/perl.BerkeleyDB/t/filter.t
BitKeeper/deleted/.del-hash.t~616bfb4d644de3a3:
  Delete: bdb/perl.BerkeleyDB/t/hash.t
BitKeeper/deleted/.del-join.t~29fc39f74a83ca22:
  Delete: bdb/perl.BerkeleyDB/t/join.t
BitKeeper/deleted/.del-mldbm.t~31f5015341eea040:
  Delete: bdb/perl.BerkeleyDB/t/mldbm.t
BitKeeper/deleted/.del-queue.t~8f338034ce44a641:
  Delete: bdb/perl.BerkeleyDB/t/queue.t
BitKeeper/deleted/.del-recno.t~d4ddbd3743add63e:
  Delete: bdb/perl.BerkeleyDB/t/recno.t
BitKeeper/deleted/.del-strict.t~6885cdd2ea71ca2d:
  Delete: bdb/perl.BerkeleyDB/t/strict.t
BitKeeper/deleted/.del-subdb.t~aab62a5d5864c603:
  Delete: bdb/perl.BerkeleyDB/t/subdb.t
BitKeeper/deleted/.del-txn.t~65033b8558ae1216:
  Delete: bdb/perl.BerkeleyDB/t/txn.t
BitKeeper/deleted/.del-unknown.t~f3710458682665e1:
  Delete: bdb/perl.BerkeleyDB/t/unknown.t
BitKeeper/deleted/.del-Changes~436f74a5c414c65b:
  Delete: bdb/perl.DB_File/Changes
BitKeeper/deleted/.del-DB_File.pm~ae0951c6c7665a82:
  Delete: bdb/perl.DB_File/DB_File.pm
BitKeeper/deleted/.del-DB_File.xs~89e49a0b5556f1d8:
  Delete: bdb/perl.DB_File/DB_File.xs
BitKeeper/deleted/.del-DB_File_BS~290fad5dbbb87069:
  Delete: bdb/perl.DB_File/DB_File_BS
BitKeeper/deleted/.del-MANIFEST~90ee581572bdd4ac:
  Delete: bdb/perl.DB_File/MANIFEST
BitKeeper/deleted/.del-Makefile.PL~ac0567bb5a377e38:
  Delete: bdb/perl.DB_File/Makefile.PL
BitKeeper/deleted/.del-README~77e924a5a9bae6b3:
  Delete: bdb/perl.DB_File/README
BitKeeper/deleted/.del-config.in~ab4c2792b86a810b:
  Delete: bdb/perl.DB_File/config.in
BitKeeper/deleted/.del-dbinfo~461c43b30fab2cb:
  Delete: bdb/perl.DB_File/dbinfo
BitKeeper/deleted/.del-dynixptx.pl~50dcddfae25d17e9:
  Delete: bdb/perl.DB_File/hints/dynixptx.pl
BitKeeper/deleted/.del-typemap~55cffb3288a9e587:
  Delete: bdb/perl.DB_File/typemap
BitKeeper/deleted/.del-version.c~a4df0e646f8b3975:
  Delete: bdb/perl.DB_File/version.c
BitKeeper/deleted/.del-5.004_01~d6830d0082702af7:
  Delete: bdb/perl.DB_File/patches/5.004_01
BitKeeper/deleted/.del-5.004_02~78b082dc80c91031:
  Delete: bdb/perl.DB_File/patches/5.004_02
BitKeeper/deleted/.del-5.004~4411ec2e3c9e008b:
  Delete: bdb/perl.DB_File/patches/5.004
BitKeeper/deleted/.del-sco.pl~1e795fe14fe4dcfe:
  Delete: bdb/perl.DB_File/hints/sco.pl
BitKeeper/deleted/.del-5.004_03~33f274648b160d95:
  Delete: bdb/perl.DB_File/patches/5.004_03
BitKeeper/deleted/.del-5.004_04~8f3d1b3cf18bb20a:
  Delete: bdb/perl.DB_File/patches/5.004_04
BitKeeper/deleted/.del-5.004_05~9c0f02e7331e142:
  Delete: bdb/perl.DB_File/patches/5.004_05
BitKeeper/deleted/.del-5.005~c2108cb2e3c8d951:
  Delete: bdb/perl.DB_File/patches/5.005
BitKeeper/deleted/.del-5.005_01~3b45e9673afc4cfa:
  Delete: bdb/perl.DB_File/patches/5.005_01
BitKeeper/deleted/.del-5.005_02~9fe5766bb02a4522:
  Delete: bdb/perl.DB_File/patches/5.005_02
BitKeeper/deleted/.del-5.005_03~ffa1c38c19ae72ea:
  Delete: bdb/perl.DB_File/patches/5.005_03
BitKeeper/deleted/.del-5.6.0~373be3a5ce47be85:
  Delete: bdb/perl.DB_File/patches/5.6.0
BitKeeper/deleted/.del-db-btree.t~3231595a1c241eb3:
  Delete: bdb/perl.DB_File/t/db-btree.t
BitKeeper/deleted/.del-db-hash.t~7c4ad0c795c7fad2:
  Delete: bdb/perl.DB_File/t/db-hash.t
BitKeeper/deleted/.del-db-recno.t~6c2d3d80b9ba4a50:
  Delete: bdb/perl.DB_File/t/db-recno.t
BitKeeper/deleted/.del-db_server.sed~cdb00ebcd48a64e2:
  Delete: bdb/rpc_server/db_server.sed
BitKeeper/deleted/.del-db_server_proc.c~d46c8f409c3747f4:
  Delete: bdb/rpc_server/db_server_proc.c
BitKeeper/deleted/.del-db_server_svc.sed~3f5e59f334fa4607:
  Delete: bdb/rpc_server/db_server_svc.sed
BitKeeper/deleted/.del-db_server_util.c~a809f3a4629acda:
  Delete: bdb/rpc_server/db_server_util.c
BitKeeper/deleted/.del-log.tcl~ff1b41f1355b97d7:
  Delete: bdb/test/log.tcl
BitKeeper/deleted/.del-mpool.tcl~b0df4dc1b04db26c:
  Delete: bdb/test/mpool.tcl
BitKeeper/deleted/.del-mutex.tcl~52fd5c73a150565:
  Delete: bdb/test/mutex.tcl
BitKeeper/deleted/.del-txn.tcl~c4ff071550b5446e:
  Delete: bdb/test/txn.tcl
BitKeeper/deleted/.del-README~e800a12a5392010a:
  Delete: bdb/test/upgrade/README
BitKeeper/deleted/.del-pack-2.6.6.pl~89d5076d758d3e98:
  Delete: bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
BitKeeper/deleted/.del-test-2.6.patch~4a52dc83d447547b:
  Delete: bdb/test/upgrade/generate-2.X/test-2.6.patch
This commit is contained in:
unknown 2002-10-30 15:57:05 +04:00
commit 155e78f014
1191 changed files with 170446 additions and 57453 deletions

File diff suppressed because it is too large Load diff

View file

@ -1,33 +1,14 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: archive.tcl,v 11.14 2000/10/27 13:23:55 sue Exp $
# $Id: archive.tcl,v 11.20 2002/04/30 19:21:21 sue Exp $
#
# Options are:
# -checkrec <checkpoint frequency"
# -dir <dbhome directory>
# -maxfilesize <maxsize of log file>
# -stat
proc archive_usage {} {
puts "archive -checkrec <checkpt freq> -dir <directory> \
-maxfilesize <max size of log files>"
}
proc archive_command { args } {
source ./include.tcl
# Catch a list of files output by db_archive.
catch { eval exec $util_path/db_archive $args } output
if { $is_windows_test == 1 || 1 } {
# On Windows, convert all filenames to use forward slashes.
regsub -all {[\\]} $output / output
}
# Output the [possibly-transformed] list.
return $output
}
proc archive { args } {
global alphabet
source ./include.tcl
@ -35,17 +16,16 @@ proc archive { args } {
# Set defaults
set maxbsize [expr 8 * 1024]
set maxfile [expr 32 * 1024]
set dostat 0
set checkrec 500
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-c.* { incr i; set checkrec [lindex $args $i] }
-d.* { incr i; set testdir [lindex $args $i] }
-m.* { incr i; set maxfile [lindex $args $i] }
-s.* { set dostat 1 }
default {
puts -nonewline "FAIL:[timestamp] Usage: "
archive_usage
puts "FAIL:[timestamp] archive usage"
puts "usage: archive -checkrec <checkpt freq> \
-dir <directory> -maxfilesize <max size of log files>"
return
}
@ -53,16 +33,20 @@ proc archive { args } {
}
# Clean out old log if it existed
puts "Archive: Log archive test"
puts "Unlinking log: error message OK"
env_cleanup $testdir
# Now run the various functionality tests
set eflags "-create -txn -home $testdir \
-log_buffer $maxbsize -log_max $maxfile"
set dbenv [eval {berkdb env} $eflags]
set dbenv [eval {berkdb_env} $eflags]
error_check_bad dbenv $dbenv NULL
error_check_good dbenv [is_substr $dbenv env] 1
set logc [$dbenv log_cursor]
error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
# The basic test structure here is that we write a lot of log
# records (enough to fill up 100 log files; each log file it
# small). We take periodic checkpoints. Between each pair
@ -75,7 +59,7 @@ proc archive { args } {
# open data file and CDx is close datafile.
set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
puts "Archive.a: Writing log records; checkpoint every $checkrec records"
puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
set nrecs $maxfile
set rec 0:$baserec
@ -111,7 +95,7 @@ proc archive { args } {
if { [expr $i % $checkrec] == 0 } {
# Take a checkpoint
$dbenv txn_checkpoint
set ckp_file [lindex [lindex [$dbenv log_get -last] 0] 0]
set ckp_file [lindex [lindex [$logc get -last] 0] 0]
catch { archive_command -h $testdir -a } res_log_full
if { [string first db_archive $res_log_full] == 0 } {
set res_log_full ""
@ -125,7 +109,7 @@ proc archive { args } {
res_data_full
catch { archive_command -h $testdir -s } res_data
error_check_good nlogfiles [llength $res_alllog] \
[lindex [lindex [$dbenv log_get -last] 0] 0]
[lindex [lindex [$logc get -last] 0] 0]
error_check_good logs_match [llength $res_log_full] \
[llength $res_log]
error_check_good data_match [llength $res_data_full] \
@ -206,21 +190,35 @@ proc archive { args } {
}
}
# Commit any transactions still running.
puts "Archive: Commit any transactions still running."
puts "\tArchive.b: Commit any transactions still running."
foreach t $txnlist {
error_check_good txn_commit:$t [$t commit] 0
}
# Close any files that are still open.
puts "Archive: Close open files."
puts "\tArchive.c: Close open files."
foreach d $dblist {
error_check_good db_close:$db [$d close] 0
}
# Close and unlink the file
error_check_good log_cursor_close [$logc close] 0
reset_env $dbenv
}
puts "Archive: Complete."
proc archive_command { args } {
source ./include.tcl
# Catch a list of files output by db_archive.
catch { eval exec $util_path/db_archive $args } output
if { $is_windows_test == 1 || 1 } {
# On Windows, convert all filenames to use forward slashes.
regsub -all {[\\]} $output / output
}
# Output the [possibly-transformed] list.
return $output
}
proc min { a b } {

85
bdb/test/bigfile001.tcl Normal file
View file

@ -0,0 +1,85 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2001-2002
# Sleepycat Software. All rights reserved.
#
# $Id: bigfile001.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
#
# TEST bigfile001
# TEST Create a database greater than 4 GB in size. Close, verify.
# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
# TEST repeat. Since it will not work on all systems, this test is
# TEST not run by default.
proc bigfile001 { method \
{ itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
env_cleanup $testdir
# Create the database. Use 64K pages; we want a good fill
# factor, and page size doesn't matter much. Use a 50MB
# cache; that should be manageable, and will help
# performance.
set dbname $testdir/big.db
set db [eval {berkdb_open -create} {-pagesize 65536 \
-cachesize {0 50000000 0}} $omethod $args $dbname]
error_check_good db_open [is_valid_db $db] TRUE
puts -nonewline "\tBigfile.a: Creating database...0%..."
flush stdout
set data [string repeat z $itemsize]
set more_than_ten_already 0
for { set i 0 } { $i < $nitems } { incr i } {
set key key[format %08u $i]
error_check_good db_put($i) [$db put $key $data] 0
if { $i % 5000 == 0 } {
set pct [expr 100 * $i / $nitems]
puts -nonewline "\b\b\b\b\b"
if { $pct >= 10 } {
if { $more_than_ten_already } {
puts -nonewline "\b"
} else {
set more_than_ten_already 1
}
}
puts -nonewline "$pct%..."
flush stdout
}
}
puts "\b\b\b\b\b\b100%..."
error_check_good db_close [$db close] 0
puts "\tBigfile.b: Verifying database..."
error_check_good verify \
[verify_dir $testdir "\t\t" 0 0 1 50000000] 0
puts "\tBigfile.c: Grow database $growtms times by $growby items"
for { set j 0 } { $j < $growtms } { incr j } {
set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
error_check_good db_open [is_valid_db $db] TRUE
puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
flush stdout
for { set i 0 } { $i < $growby } { incr i } {
set key key[format %08u $i].$j
error_check_good db_put($j.$i) [$db put $key $data] 0
}
error_check_good db_close [$db close] 0
puts "done."
puts "\t\tBigfile.c.2: Verifying database..."
error_check_good verify($j) \
[verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
}
}

45
bdb/test/bigfile002.tcl Normal file
View file

@ -0,0 +1,45 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2001-2002
# Sleepycat Software. All rights reserved.
#
# $Id: bigfile002.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
#
# TEST bigfile002
# TEST This one should be faster and not require so much disk space,
# TEST although it doesn't test as extensively. Create an mpool file
# TEST with 1K pages. Dirty page 6000000. Sync.
proc bigfile002 { args } {
source ./include.tcl
puts -nonewline \
"Bigfile002: Creating large, sparse file through mpool..."
flush stdout
env_cleanup $testdir
# Create env.
set env [berkdb_env -create -home $testdir]
error_check_good valid_env [is_valid_env $env] TRUE
# Create the file.
set name big002.file
set file [$env mpool -create -pagesize 1024 $name]
# Dirty page 6000000
set pg [$file get -create 6000000]
error_check_good pg_init [$pg init A] 0
error_check_good pg_set [$pg is_setto A] 1
# Put page back.
error_check_good pg_put [$pg put -dirty] 0
# Fsync.
error_check_good fsync [$file fsync] 0
puts "succeeded."
# Close.
error_check_good fclose [$file close] 0
error_check_good env_close [$env close] 0
}

View file

@ -1,23 +1,34 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: byteorder.tcl,v 11.7 2000/11/16 23:56:18 ubell Exp $
# $Id: byteorder.tcl,v 11.12 2002/07/29 18:09:25 sue Exp $
#
# Byte Order Test
# Use existing tests and run with both byte orders.
proc byteorder { method {nentries 1000} } {
source ./include.tcl
puts "Byteorder: $method $nentries"
eval {test001 $method $nentries 0 "01" -lorder 1234}
eval {test001 $method $nentries 0 "01" -lorder 4321}
eval {test001 $method $nentries 0 "01" 0 -lorder 1234}
eval {verify_dir $testdir}
eval {test001 $method $nentries 0 "01" 0 -lorder 4321}
eval {verify_dir $testdir}
eval {test003 $method -lorder 1234}
eval {verify_dir $testdir}
eval {test003 $method -lorder 4321}
eval {verify_dir $testdir}
eval {test010 $method $nentries 5 10 -lorder 1234}
eval {verify_dir $testdir}
eval {test010 $method $nentries 5 10 -lorder 4321}
eval {verify_dir $testdir}
eval {test011 $method $nentries 5 11 -lorder 1234}
eval {verify_dir $testdir}
eval {test011 $method $nentries 5 11 -lorder 4321}
eval {verify_dir $testdir}
eval {test018 $method $nentries -lorder 1234}
eval {verify_dir $testdir}
eval {test018 $method $nentries -lorder 4321}
eval {verify_dir $testdir}
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: conscript.tcl,v 11.12 2000/12/01 04:28:36 ubell Exp $
# $Id: conscript.tcl,v 11.17 2002/03/22 21:43:06 krinsky Exp $
#
# Script for DB_CONSUME test (test070.tcl).
# Usage: conscript dir file runtype nitems outputfile tnum args
@ -28,17 +28,18 @@ proc consumescript_produce { db_cmd nitems tnum args } {
set ret 0
for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
set oret $ret
if { 0xffffffff > 0 && $oret > 0x7fffffff } {
incr oret [expr 0 - 0x100000000]
}
set ret [$db put -append [chop_data q $mydata]]
error_check_good db_put \
[expr $ret > 0 ? $oret < $ret : \
$oret < 0 ? $oret < $ret : $oret > $ret] 1
}
# XXX: We permit incomplete syncs because they seem to
# be unavoidable and not damaging.
set ret [catch {$db close} res]
error_check_good db_close:$pid [expr ($ret == 0) ||\
([is_substr $res DB_INCOMPLETE] == 1)] 1
error_check_good db_close:$pid $ret 0
puts "\t\tTest0$tnum: Producer $pid finished."
}
@ -67,10 +68,9 @@ proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
}
error_check_good output_close:$pid [close $oid] ""
# XXX: see above note.
set ret [catch {$db close} res]
error_check_good db_close:$pid [expr ($ret == 0) ||\
([is_substr $res DB_INCOMPLETE] == 1)] 1
error_check_good db_close:$pid $ret 0
puts "\t\tTest0$tnum: Consumer $pid finished."
}
@ -99,7 +99,7 @@ set args [lindex [lrange $argv 6 end] 0]
set mydata "consumer data"
# Open env
set dbenv [berkdb env -home $dir ]
set dbenv [berkdb_env -home $dir ]
error_check_good db_env_create [is_valid_env $dbenv] TRUE
# Figure out db opening command.

View file

@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dbm.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
# $Id: dbm.tcl,v 11.15 2002/01/11 15:53:19 bostic Exp $
#
# Historic DBM interface test.
# Use the first 1000 entries from the dictionary.
# Insert each with self as key and data; retrieve each.
# After all are entered, retrieve all; compare output to original.
# Then reopen the file, re-retrieve everything.
# Finally, delete everything.
# TEST dbm
# TEST Historic DBM interface test. Use the first 1000 entries from the
# TEST dictionary. Insert each with self as key and data; retrieve each.
# TEST After all are entered, retrieve all; compare output to original.
# TEST Then reopen the file, re-retrieve everything. Finally, delete
# TEST everything.
proc dbm { { nentries 1000 } } {
source ./include.tcl

View file

@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dbscript.tcl,v 11.10 2000/04/21 18:36:21 krinsky Exp $
# $Id: dbscript.tcl,v 11.14 2002/04/01 16:28:16 bostic Exp $
#
# Random db tester.
# Usage: dbscript file numops min_del max_add key_avg data_avgdups
# method: method (we pass this in so that fixed-length records work)
# file: db file on which to operate
# numops: number of operations to do
# ncurs: number of cursors
@ -22,26 +23,25 @@ source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
set alphabet "abcdefghijklmnopqrstuvwxyz"
set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
# Verify usage
if { $argc != 9 } {
if { $argc != 10 } {
puts stderr "FAIL:[timestamp] Usage: $usage"
exit
}
# Initialize arguments
set file [lindex $argv 0]
set numops [ lindex $argv 1 ]
set ncurs [ lindex $argv 2 ]
set min_del [ lindex $argv 3 ]
set max_add [ lindex $argv 4 ]
set key_avg [ lindex $argv 5 ]
set data_avg [ lindex $argv 6 ]
set dups [ lindex $argv 7 ]
set errpct [ lindex $argv 8 ]
set method [lindex $argv 0]
set file [lindex $argv 1]
set numops [ lindex $argv 2 ]
set ncurs [ lindex $argv 3 ]
set min_del [ lindex $argv 4 ]
set max_add [ lindex $argv 5 ]
set key_avg [ lindex $argv 6 ]
set data_avg [ lindex $argv 7 ]
set dups [ lindex $argv 8 ]
set errpct [ lindex $argv 9 ]
berkdb srand $rand_init
@ -68,7 +68,7 @@ if {$cerr != 0} {
puts $cret
return
}
set method [$db get_type]
# set method [$db get_type]
set record_based [is_record_based $method]
# Initialize globals including data

172
bdb/test/ddoyscript.tcl Normal file
View file

@ -0,0 +1,172 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: ddoyscript.tcl,v 11.6 2002/02/20 16:35:18 sandstro Exp $
#
# Deadlock detector script tester.
# Usage: ddoyscript dir lockerid numprocs
# dir: DBHOME directory
# lockerid: Lock id for this locker
# numprocs: Total number of processes running
# myid: id of this process --
# the order that the processes are created is the same
# in which their lockerid's were allocated so we know
# that there is a locker age relationship that is isomorphic
# with the order releationship of myid's.
source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
set usage "ddoyscript dir lockerid numprocs oldoryoung"
# Verify usage
if { $argc != 5 } {
puts stderr "FAIL:[timestamp] Usage: $usage"
exit
}
# Initialize arguments
set dir [lindex $argv 0]
set lockerid [ lindex $argv 1 ]
set numprocs [ lindex $argv 2 ]
set old_or_young [lindex $argv 3]
set myid [lindex $argv 4]
set myenv [berkdb_env -lock -home $dir -create -mode 0644]
error_check_bad lock_open $myenv NULL
error_check_good lock_open [is_substr $myenv "env"] 1
# There are two cases here -- oldest/youngest or a ring locker.
if { $myid == 0 || $myid == [expr $numprocs - 1] } {
set waitobj NULL
set ret 0
if { $myid == 0 } {
set objid 2
if { $old_or_young == "o" } {
set waitobj [expr $numprocs - 1]
}
} else {
if { $old_or_young == "y" } {
set waitobj 0
}
set objid 4
}
# Acquire own read lock
if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
puts $errorInfo
} else {
error_check_good selfget:$objid [is_substr $selflock $myenv] 1
}
# Acquire read lock
if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
puts $errorInfo
} else {
error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
}
tclsleep 10
if { $waitobj == "NULL" } {
# Sleep for a good long while
tclsleep 90
} else {
# Acquire write lock
if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
!= 0} {
puts $errorInfo
set ret ERROR
} else {
error_check_good lockget:$waitobj \
[is_substr $lock2 $myenv] 1
# Now release it
if {[catch {$lock2 put} err] != 0} {
puts $errorInfo
set ret ERROR
} else {
error_check_good lockput:oy:$objid $err 0
}
}
}
# Release self lock
if {[catch {$selflock put} err] != 0} {
puts $errorInfo
if { $ret == 0 } {
set ret ERROR
}
} else {
error_check_good selfput:oy:$myid $err 0
if { $ret == 0 } {
set ret 1
}
}
# Release first lock
if {[catch {$lock1 put} err] != 0} {
puts $errorInfo
if { $ret == 0 } {
set ret ERROR
}
} else {
error_check_good lockput:oy:$objid $err 0
if { $ret == 0 } {
set ret 1
}
}
} else {
# Make sure that we succeed if we're locking the same object as
# oldest or youngest.
if { [expr $myid % 2] == 0 } {
set mode read
} else {
set mode write
}
# Obtain first lock (should always succeed).
if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
puts $errorInfo
} else {
error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
}
tclsleep 30
set nextobj [expr $myid + 1]
if { $nextobj == [expr $numprocs - 1] } {
set nextobj 1
}
set ret 1
if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
if {[string match "*DEADLOCK*" $lock2] == 1} {
set ret DEADLOCK
} else {
set ret ERROR
}
} else {
error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
}
# Now release the first lock
error_check_good lockput:$lock1 [$lock1 put] 0
if {$ret == 1} {
error_check_bad lockget:$nextobj $lock2 NULL
error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
error_check_good lockput:$lock2 [$lock2 put] 0
}
}
puts $ret
error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
error_check_good envclose [$myenv close] 0
exit

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: ddscript.tcl,v 11.7 2000/05/08 19:26:37 sue Exp $
# $Id: ddscript.tcl,v 11.12 2002/02/20 16:35:18 sandstro Exp $
#
# Deadlock detector script tester.
# Usage: ddscript dir test lockerid objid numprocs
@ -32,12 +32,13 @@ set lockerid [ lindex $argv 2 ]
set objid [ lindex $argv 3 ]
set numprocs [ lindex $argv 4 ]
set myenv [berkdb env -lock -home $dir -create -mode 0644]
set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
error_check_bad lock_open $myenv NULL
error_check_good lock_open [is_substr $myenv "env"] 1
puts [eval $tnum $myenv $lockerid $objid $numprocs]
error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
error_check_good envclose [$myenv close] 0
exit

View file

@ -1,56 +1,67 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead001.tcl,v 11.17 2000/11/05 14:23:55 dda Exp $
# $Id: dead001.tcl,v 11.33 2002/09/05 17:23:05 sandstro Exp $
#
# Deadlock Test 1.
# We create various deadlock scenarios for different numbers of lockers
# and see if we can get the world cleaned up suitably.
proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
# TEST dead001
# TEST Use two different configurations to test deadlock detection among a
# TEST variable number of processes. One configuration has the processes
# TEST deadlocked in a ring. The other has the processes all deadlocked on
# TEST a single resource.
proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
{timeout 0} {tnum "001"} } {
source ./include.tcl
global lock_curid
global lock_maxid
puts "Dead001: Deadlock detector tests"
puts "Dead$tnum: Deadlock detector tests"
env_cleanup $testdir
# Create the environment.
puts "\tDead001.a: creating environment"
set env [berkdb env -create -mode 0644 -lock -home $testdir]
puts "\tDead$tnum.a: creating environment"
set env [berkdb_env -create \
-mode 0644 -lock -txn_timeout $timeout -home $testdir]
error_check_good lock_env:open [is_valid_env $env] TRUE
error_check_good lock_env:close [$env close] 0
set dpid [exec $util_path/db_deadlock -vw -h $testdir \
>& $testdir/dd.out &]
foreach t $tests {
set pidlist ""
foreach n $procs {
if {$timeout == 0 } {
set dpid [exec $util_path/db_deadlock -vw \
-h $testdir >& $testdir/dd.out &]
} else {
set dpid [exec $util_path/db_deadlock -vw \
-ae -h $testdir >& $testdir/dd.out &]
}
sentinel_init
sentinel_init
set pidlist ""
set ret [$env lock_id_set $lock_curid $lock_maxid]
error_check_good lock_id_set $ret 0
# Fire off the tests
puts "\tDead001: $n procs of test $t"
puts "\tDead$tnum: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
$testdir/dead001.log.$i \
ddscript.tcl $testdir $t $i $i $n"
$testdir/dead$tnum.log.$i \
ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddscript.tcl $testdir/dead001.log.$i \
$testdir $t $i $i $n &]
ddscript.tcl $testdir/dead$tnum.log.$i \
$testdir $t $locker $i $n &]
lappend pidlist $p
}
watch_procs 5
watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
set did [open $testdir/dead001.log.$i]
set did [open $testdir/dead$tnum.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
@ -60,17 +71,18 @@ proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
tclkill $dpid
puts "dead check..."
dead_check $t $n $dead $clean $other
dead_check $t $n $timeout $dead $clean $other
}
}
exec $KILL $dpid
# Windows needs files closed before deleting files, so pause a little
tclsleep 2
tclsleep 3
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead001.log.$i
fileremove -f $testdir/dead$tnum.log.$i
}
error_check_good lock_env:close [$env close] 0
}

View file

@ -1,52 +1,58 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead002.tcl,v 11.15 2000/08/25 14:21:50 sue Exp $
# $Id: dead002.tcl,v 11.23 2002/09/05 17:23:05 sandstro Exp $
#
# Deadlock Test 2.
# Identical to Test 1 except that instead of running a standalone deadlock
# detector, we create the region with "detect on every wait"
proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
# TEST dead002
# TEST Same test as dead001, but use "detect on every collision" instead
# TEST of separate deadlock detector.
proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
{timeout 0} {tnum 002} } {
source ./include.tcl
puts "Dead002: Deadlock detector tests"
puts "Dead$tnum: Deadlock detector tests"
env_cleanup $testdir
# Create the environment.
puts "\tDead002.a: creating environment"
set env [berkdb env \
-create -mode 0644 -home $testdir -lock -lock_detect default]
puts "\tDead$tnum.a: creating environment"
set lmode "default"
if { $timeout != 0 } {
set lmode "expire"
}
set env [berkdb_env \
-create -mode 0644 -home $testdir \
-lock -txn_timeout $timeout -lock_detect $lmode]
error_check_good lock_env:open [is_valid_env $env] TRUE
error_check_good lock_env:close [$env close] 0
foreach t $tests {
set pidlist ""
foreach n $procs {
set pidlist ""
sentinel_init
# Fire off the tests
puts "\tDead002: $n procs of test $t"
puts "\tDead$tnum: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
$testdir/dead002.log.$i \
ddscript.tcl $testdir $t $i $i $n"
$testdir/dead$tnum.log.$i \
ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddscript.tcl $testdir/dead002.log.$i \
$testdir $t $i $i $n &]
ddscript.tcl $testdir/dead$tnum.log.$i \
$testdir $t $locker $i $n &]
lappend pidlist $p
}
watch_procs 5
watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
set did [open $testdir/dead002.log.$i]
set did [open $testdir/dead$tnum.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
@ -56,13 +62,14 @@ proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
dead_check $t $n $dead $clean $other
dead_check $t $n $timeout $dead $clean $other
}
}
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead002.log.$i
fileremove -f $testdir/dead$tnum.log.$i
}
error_check_good lock_env:close [$env close] 0
}

View file

@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead003.tcl,v 1.8 2000/08/25 14:21:50 sue Exp $
# $Id: dead003.tcl,v 1.17 2002/09/05 17:23:05 sandstro Exp $
#
# Deadlock Test 3.
# Test DB_LOCK_OLDEST and DB_LOCK_YOUNGEST
# Identical to Test 2 except that we create the region with "detect on
# every wait" with first the "oldest" and then "youngest".
# TEST dead003
# TEST
# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
source ./include.tcl
global lock_curid
global lock_maxid
set detects { oldest youngest }
puts "Dead003: Deadlock detector tests: $detects"
@ -19,31 +21,34 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
foreach d $detects {
env_cleanup $testdir
puts "\tDead003.a: creating environment for $d"
set env [berkdb env \
set env [berkdb_env \
-create -mode 0644 -home $testdir -lock -lock_detect $d]
error_check_good lock_env:open [is_valid_env $env] TRUE
error_check_good lock_env:close [$env close] 0
foreach t $tests {
set pidlist ""
foreach n $procs {
sentinel_init
set pidlist ""
sentinel_init
set ret [$env lock_id_set \
$lock_curid $lock_maxid]
error_check_good lock_id_set $ret 0
# Fire off the tests
puts "\tDead003: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
set locker [$env lock_id]
puts "$tclsh_path\
test_path/ddscript.tcl $testdir \
$t $i $i $n >& \
$t $locker $i $n >& \
$testdir/dead003.log.$i"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddscript.tcl \
$testdir/dead003.log.$i $testdir \
$t $i $i $n &]
$t $locker $i $n &]
lappend pidlist $p
}
watch_procs 5
watch_procs $pidlist 5
# Now check output
set dead 0
@ -60,7 +65,7 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
dead_check $t $n $dead $clean $other
dead_check $t $n 0 $dead $clean $other
#
# If we get here we know we have the
# correct number of dead/clean procs, as
@ -88,5 +93,6 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead003.log.$i
}
error_check_good lock_env:close [$env close] 0
}
}

108
bdb/test/dead004.tcl Normal file
View file

@ -0,0 +1,108 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead004.tcl,v 11.11 2002/09/05 17:23:05 sandstro Exp $
#
# Deadlock Test 4.
# This test is designed to make sure that we handle youngest and oldest
# deadlock detection even when the youngest and oldest transactions in the
# system are not involved in the deadlock (that is, we want to abort the
# youngest/oldest which is actually involved in the deadlock, not simply
# the youngest/oldest in the system).
# Since this is used for transaction systems, the locker ID is what we
# use to identify age (smaller number is older).
#
# The set up is that we have a total of 6 processes. The oldest (locker 0)
# and the youngest (locker 5) simply acquire a lock, hold it for a long time
# and then release it. The rest form a ring, obtaining lock N and requesting
# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
# to abort and not 0 or 5.
proc dead004 { } {
source ./include.tcl
global lock_curid
global lock_maxid
foreach a { o y } {
puts "Dead004: Deadlock detector test -a $a"
env_cleanup $testdir
# Create the environment.
puts "\tDead004.a: creating environment"
set env [berkdb_env -create -mode 0644 -lock -home $testdir]
error_check_good lock_env:open [is_valid_env $env] TRUE
set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
-h $testdir >& $testdir/dd.out &]
set procs 6
foreach n $procs {
sentinel_init
set pidlist ""
set ret [$env lock_id_set $lock_curid $lock_maxid]
error_check_good lock_id_set $ret 0
# Fire off the tests
puts "\tDead004: $n procs"
for { set i 0 } { $i < $n } { incr i } {
set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
$testdir/dead004.log.$i \
ddoyscript.tcl $testdir $locker $n $a $i"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddoyscript.tcl $testdir/dead004.log.$i \
$testdir $locker $n $a $i &]
lappend pidlist $p
}
watch_procs $pidlist 5
}
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
set did [open $testdir/dead004.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
1 { incr clean }
default { incr other }
}
}
close $did
}
tclkill $dpid
puts "dead check..."
dead_check oldyoung $n 0 $dead $clean $other
# Now verify that neither the oldest nor the
# youngest were the deadlock.
set did [open $testdir/dead004.log.0]
error_check_bad file:young [gets $did val] -1
error_check_good read:young $val 1
close $did
set did [open $testdir/dead004.log.[expr $procs - 1]]
error_check_bad file:old [gets $did val] -1
error_check_good read:old $val 1
close $did
# Windows needs files closed before deleting files,
# so pause a little
tclsleep 2
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead004.log.$i
}
error_check_good lock_env:close [$env close] 0
}
}

87
bdb/test/dead005.tcl Normal file
View file

@ -0,0 +1,87 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead005.tcl,v 11.10 2002/09/05 17:23:05 sandstro Exp $
#
# Deadlock Test 5.
# Test out the minlocks, maxlocks, and minwrites options
# to the deadlock detector.
proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } } {
source ./include.tcl
puts "Dead005: minlocks, maxlocks, and minwrites deadlock detection tests"
foreach t $tests {
puts "Dead005.$t: creating environment"
env_cleanup $testdir
# Create the environment.
set env [berkdb_env -create -mode 0644 -lock -home $testdir]
error_check_good lock_env:open [is_valid_env $env] TRUE
case $t {
minlocks { set to n }
maxlocks { set to m }
minwrites { set to w }
}
foreach n $procs {
set dpid [exec $util_path/db_deadlock -vw -h $testdir \
-a $to >& $testdir/dd.out &]
sentinel_init
set pidlist ""
# Fire off the tests
puts "\tDead005: $t test with $n procs"
for { set i 0 } { $i < $n } { incr i } {
set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
$testdir/dead005.log.$i \
ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddscript.tcl $testdir/dead005.log.$i \
$testdir $t $locker $i $n &]
lappend pidlist $p
}
watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
set did [open $testdir/dead005.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
1 { incr clean }
default { incr other }
}
}
close $did
}
tclkill $dpid
puts "dead check..."
dead_check $t $n 0 $dead $clean $other
# Now verify that the correct participant
# got deadlocked.
switch $t {
minlocks {set f 0}
minwrites {set f 1}
maxlocks {set f [expr $n - 1]}
}
set did [open $testdir/dead005.log.$f]
error_check_bad file:$t [gets $did val] -1
error_check_good read($f):$t $val DEADLOCK
close $did
}
error_check_good lock_env:close [$env close] 0
# Windows needs files closed before deleting them, so pause
tclsleep 2
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead001.log.$i
}
}
}

16
bdb/test/dead006.tcl Normal file
View file

@ -0,0 +1,16 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead006.tcl,v 1.4 2002/01/11 15:53:21 bostic Exp $
#
# TEST dead006
# TEST use timeouts rather than the normal dd algorithm.
proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
{timeout 1000} {tnum 006} } {
source ./include.tcl
dead001 $procs $tests $timeout $tnum
dead002 $procs $tests $timeout $tnum
}

34
bdb/test/dead007.tcl Normal file
View file

@ -0,0 +1,34 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: dead007.tcl,v 1.3 2002/01/11 15:53:22 bostic Exp $
#
# TEST dead007
# TEST use timeouts rather than the normal dd algorithm.
proc dead007 { } {
source ./include.tcl
global lock_curid
global lock_maxid
set save_curid $lock_curid
set save_maxid $lock_maxid
puts "Dead007.a -- wrap around"
set lock_curid [expr $lock_maxid - 2]
dead001 "2 10"
## Oldest/youngest breaks when the id wraps
# dead003 "4 10"
dead004
puts "Dead007.b -- extend space"
set lock_maxid [expr $lock_maxid - 3]
set lock_curid [expr $lock_maxid - 1]
dead001 "4 10"
## Oldest/youngest breaks when the id wraps
# dead003 "10"
dead004
set lock_curid $save_curid
set lock_maxid $save_maxid
}

View file

@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env001.tcl,v 11.21 2000/11/09 19:24:08 sue Exp $
# $Id: env001.tcl,v 11.26 2002/05/08 19:01:43 margo Exp $
#
# Test of env remove interface.
# TEST env001
# TEST Test of env remove interface (formerly env_remove).
proc env001 { } {
global errorInfo
global errorCode
@ -20,12 +21,12 @@ proc env001 { } {
# Try opening without Create flag should error
puts "\tEnv001.a: Open without create (should fail)."
catch {set env [berkdb env -home $testdir]} ret
catch {set env [berkdb_env_noerr -home $testdir]} ret
error_check_good env:fail [is_substr $ret "no such file"] 1
# Now try opening with create
puts "\tEnv001.b: Open with create."
set env [berkdb env -create -mode 0644 -home $testdir]
set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
@ -40,7 +41,7 @@ proc env001 { } {
puts "\tEnv001.d: Remove on closed environments."
if { $is_windows_test != 1 } {
puts "\t\tEnv001.d.1: Verify re-open."
set env [berkdb env -home $testdir]
set env [berkdb_env -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
@ -56,7 +57,7 @@ proc env001 { } {
puts "\tEnv001.e: Remove on open environments."
puts "\t\tEnv001.e.1: Env is open by single proc,\
remove no force."
set env [berkdb env -create -mode 0644 -home $testdir]
set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set stat [catch {berkdb envremove -home $testdir} ret]
@ -68,7 +69,7 @@ proc env001 { } {
"\t\tEnv001.e.2: Env is open by single proc, remove with force."
# Now that envremove doesn't do a close, this won't work on Windows.
if { $is_windows_test != 1 && $is_hp_test != 1} {
set env [berkdb env -create -mode 0644 -home $testdir]
set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set stat [catch {berkdb envremove -force -home $testdir} ret]
@ -77,19 +78,22 @@ proc env001 { } {
# Even though the underlying env is gone, we need to close
# the handle.
#
catch {$env close}
set stat [catch {$env close} ret]
error_check_bad env:close_after_remove $stat 0
error_check_good env:close_after_remove \
[is_substr $ret "recovery"] 1
}
puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
# should fail
set env [berkdb env -create -mode 0644 -home $testdir]
set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
# First close our env, but leave remote open
error_check_good env:close [$env close] 0
@ -110,13 +114,13 @@ proc env001 { } {
# are open, so we skip this test for Windows. On UNIX, it should
# succeed
if { $is_windows_test != 1 && $is_hp_test != 1 } {
set env [berkdb env -create -mode 0644 -home $testdir]
set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
catch {berkdb envremove -force -home $testdir} ret
@ -124,7 +128,10 @@ proc env001 { } {
#
# We still need to close our handle.
#
catch {$env close} ret
set stat [catch {$env close} ret]
error_check_bad env:close_after_error $stat 0
error_check_good env:close_after_error \
[is_substr $ret recovery] 1
# Close down remote process
set err [catch { close $f1 } result]
@ -137,7 +144,7 @@ proc env001 { } {
file mkdir $testdir/NEWDIR
}
set eflags "-create -home $testdir/NEWDIR -mode 0644"
set env [eval {berkdb env} $eflags]
set env [eval {berkdb_env} $eflags]
error_check_bad env:open $env NULL
error_check_good env:close [$env close] 0
error_check_good berkdb:envremove \

View file

@ -1,21 +1,21 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env002.tcl,v 11.11 2000/08/25 14:21:50 sue Exp $
# $Id: env002.tcl,v 11.15 2002/02/20 16:35:20 sandstro Exp $
#
# Env Test 002
# Test set_lg_dir and env name resolution
# With an environment path specified using -home, and then again
# with it specified by the environment variable DB_HOME:
# 1) Make sure that the set_lg_dir option is respected
# a) as a relative pathname.
# b) as an absolute pathname.
# 2) Make sure that the DB_LOG_DIR db_config argument is respected,
# again as relative and absolute pathnames.
# 3) Make sure that if -both- db_config and a file are present,
# only the file is respected (see doc/env/naming.html).
# TEST env002
# TEST Test of DB_LOG_DIR and env name resolution.
# TEST With an environment path specified using -home, and then again
# TEST with it specified by the environment variable DB_HOME:
# TEST 1) Make sure that the set_lg_dir option is respected
# TEST a) as a relative pathname.
# TEST b) as an absolute pathname.
# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
# TEST again as relative and absolute pathnames.
# TEST 3) Make sure that if -both- db_config and a file are present,
# TEST only the file is respected (see doc/env/naming.html).
proc env002 { } {
# env002 is essentially just a small driver that runs
# env002_body--formerly the entire test--twice; once, it
@ -30,7 +30,7 @@ proc env002 { } {
puts "Env002: set_lg_dir test."
puts "\tEnv002: Running with -home argument to berkdb env."
puts "\tEnv002: Running with -home argument to berkdb_env."
env002_body "-home $testdir"
puts "\tEnv002: Running with environment variable DB_HOME set."
@ -125,8 +125,8 @@ proc env002_run_test { major minor msg env_args log_path} {
# Create an environment, with logging, and scribble some
# stuff in a [btree] database in it.
# puts [concat {berkdb env -create -log -private} $env_args]
set dbenv [eval {berkdb env -create -log -private} $env_args]
# puts [concat {berkdb_env -create -log -private} $env_args]
set dbenv [eval {berkdb_env -create -log -private} $env_args]
error_check_good env_open [is_valid_env $dbenv] TRUE
set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
error_check_good db_open [is_valid_db $db] TRUE

View file

@ -1,21 +1,21 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env003.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
# $Id: env003.tcl,v 11.21 2002/08/08 15:38:06 bostic Exp $
#
# Env Test 003
# Test DB_TMP_DIR and env name resolution
# With an environment path specified using -home, and then again
# with it specified by the environment variable DB_HOME:
# 1) Make sure that the DB_TMP_DIR config file option is respected
# a) as a relative pathname.
# b) as an absolute pathname.
# 2) Make sure that the DB_TMP_DIR db_config argument is respected,
# again as relative and absolute pathnames.
# 3) Make sure that if -both- db_config and a file are present,
# only the file is respected (see doc/env/naming.html).
# TEST env003
# TEST Test DB_TMP_DIR and env name resolution
# TEST With an environment path specified using -home, and then again
# TEST with it specified by the environment variable DB_HOME:
# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
# TEST a) as a relative pathname.
# TEST b) as an absolute pathname.
# TEST 2) Make sure that the -tmp_dir config option is respected,
# TEST again as relative and absolute pathnames.
# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
# TEST only the file is respected (see doc/env/naming.html).
proc env003 { } {
# env003 is essentially just a small driver that runs
# env003_body twice. First, it supplies a "home" argument
@ -29,7 +29,7 @@ proc env003 { } {
puts "Env003: DB_TMP_DIR test."
puts "\tEnv003: Running with -home argument to berkdb env."
puts "\tEnv003: Running with -home argument to berkdb_env."
env003_body "-home $testdir"
puts "\tEnv003: Running with environment variable DB_HOME set."
@ -44,7 +44,6 @@ proc env003 { } {
set env(DB_HOME) $testdir/bogus_home
env003_body "-use_environ -home $testdir"
unset env(DB_HOME)
}
proc env003_body { home_arg } {
@ -52,7 +51,6 @@ proc env003_body { home_arg } {
env_cleanup $testdir
set tmpdir "tmpfiles_in_here"
file mkdir $testdir/$tmpdir
# Set up full path to $tmpdir for when we test absolute paths.
@ -61,63 +59,44 @@ proc env003_body { home_arg } {
set fulltmpdir [pwd]
cd $curdir
# Run test with the temp dir. nonexistent--it checks for failure.
env_cleanup $testdir
# Create DB_CONFIG
env003_make_config $tmpdir
# Run the meat of the test.
env003_run_test a 1 "relative path, config file" $home_arg \
$testdir/$tmpdir
env_cleanup $testdir
env003_make_config $fulltmpdir
# Run the test again
env003_run_test a 2 "absolute path, config file" $home_arg \
$fulltmpdir
env_cleanup $testdir
# Now we try without a config file, but instead with db_config
# relative paths
env003_run_test b 1 "relative path, db_config" "$home_arg \
-tmp_dir $tmpdir -data_dir ." \
$testdir/$tmpdir
env_cleanup $testdir
# absolute
# absolute paths
env003_run_test b 2 "absolute path, db_config" "$home_arg \
-tmp_dir $fulltmpdir -data_dir ." \
$fulltmpdir
env_cleanup $testdir
# Now, set db_config -and- have a # DB_CONFIG file, and make
# sure only the latter is honored.
# Make a temp directory that actually does exist to supply
# as a bogus argument--the test checks for -nonexistent- temp
# dirs., as success is harder to detect.
file mkdir $testdir/bogus
env003_make_config $tmpdir
# note that we supply an -existent- tmp dir to db_config as
# a red herring
env003_run_test c 1 "relative path, both db_config and file" \
"$home_arg -tmp_dir $testdir/bogus -data_dir ." \
$testdir/$tmpdir
env_cleanup $testdir
file mkdir $fulltmpdir
file mkdir $fulltmpdir/bogus
env003_make_config $fulltmpdir/nonexistent
env003_make_config $fulltmpdir
# note that we supply an -existent- tmp dir to db_config as
# a red herring
env003_run_test c 2 "relative path, both db_config and file" \
env003_run_test c 2 "absolute path, both db_config and file" \
"$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
$fulltmpdir
}
@ -131,40 +110,33 @@ proc env003_run_test { major minor msg env_args tmp_path} {
# Create an environment and small-cached in-memory database to
# use.
set dbenv [eval {berkdb env -create -home $testdir} $env_args \
{-cachesize {0 40960 1}}]
set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
{-cachesize {0 50000 1}}]
error_check_good env_open [is_valid_env $dbenv] TRUE
set db [berkdb_open_noerr -env $dbenv -create -btree]
set db [berkdb_open -env $dbenv -create -btree]
error_check_good db_open [is_valid_db $db] TRUE
# Fill the database with more than its cache can fit.
# !!!
# This is actually trickier than it sounds. The tempfile
# gets unlinked as soon as it's created, so there's no straightforward
# way to check for its existence. Instead, we make sure
# DB_TMP_DIR points somewhere bogus, and make sure that the temp
# dir. does -not- exist. But to do this, we have to know
# which call to DB->put is going to fail--the temp file is
# created lazily, so the failure only occurs when the cache finally
# overflows.
# The data we've conjured up will fit nicely once, but the second
# call will overflow the cache. Thus we check for success once,
# then failure.
#
set key1 "key1"
set key2 "key2"
set data [repeat $alphabet 1000]
# When CONFIG_TEST is defined, the tempfile is left linked so
# we can check for its existence. Size the data to overfill
# the cache--the temp file is created lazily, so it is created
# when the cache overflows.
#
set key "key"
set data [repeat $alphabet 2000]
error_check_good db_put [$db put $key $data] 0
# First put should succeed.
error_check_good db_put_1 [$db put $key1 $data] 0
# Second one should return ENOENT.
set errorCode NONE
catch {$db put $key2 $data} res
error_check_good db_put_2 [is_substr $errorCode ENOENT] 1
# Check for exactly one temp file.
set ret [glob -nocomplain $tmp_path/BDB*]
error_check_good temp_file_exists [llength $ret] 1
# Can't remove temp file until db is closed on Windows.
error_check_good db_close [$db close] 0
fileremove -f $ret
error_check_good env_close [$dbenv close] 0
}
proc env003_make_config { tmpdir } {

View file

@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env004.tcl,v 11.14 2000/08/25 14:21:50 sue Exp $
# $Id: env004.tcl,v 11.18 2002/02/20 17:08:21 sandstro Exp $
#
# Env Test 4
# Test multiple data directories. Do a bunch of different opens
# to make sure that the files are detected in different directories.
# TEST env004
# TEST Test multiple data directories. Do a bunch of different opens
# TEST to make sure that the files are detected in different directories.
proc env004 { } {
source ./include.tcl
@ -38,19 +38,19 @@ proc env004 { } {
set fulldir [pwd]
cd $curdir
set e [berkdb env -create -private -home $testdir]
set e [berkdb_env -create -private -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
ddir_test $fulldir $method $e $args
error_check_good env_close [$e close] 0
puts "\tEnv004.b: Multiple data directories in berkdb env call."
puts "\tEnv004.b: Multiple data directories in berkdb_env call."
env_cleanup $testdir
file mkdir $testdir/data1
file mkdir $testdir/data2
file mkdir $testdir/data3
# Now call dbenv with config specified
set e [berkdb env -create -private \
set e [berkdb_env -create -private \
-data_dir . -data_dir data1 -data_dir data2 \
-data_dir data3 -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE

View file

@ -1,14 +1,14 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env005.tcl,v 11.8 2000/08/25 14:21:50 sue Exp $
# $Id: env005.tcl,v 11.15 2002/02/22 14:28:37 sandstro Exp $
#
# Env Test 5
# Test that using subsystems without initializing them correctly
# returns an error. Cannot test mpool, because it is assumed
# in the Tcl code.
# TEST env005
# TEST Test that using subsystems without initializing them correctly
# TEST returns an error. Cannot test mpool, because it is assumed in
# TEST the Tcl code.
proc env005 { } {
source ./include.tcl
@ -17,7 +17,7 @@ proc env005 { } {
env_cleanup $testdir
puts "\tEnv005.a: Creating env with no subsystems."
set e [berkdb env -create -home $testdir]
set e [berkdb_env_noerr -create -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
set db [berkdb_open -create -btree $testdir/env005.db]
error_check_good dbopen [is_valid_db $db] TRUE
@ -27,17 +27,17 @@ proc env005 { } {
{ "lock_get read 1 1" "Env005.b1"}
{ "lock_id" "Env005.b2"}
{ "lock_stat" "Env005.b3"}
{ "lock_timeout 100" "Env005.b4"}
{ "log_archive" "Env005.c0"}
{ "log_file {1 1}" "Env005.c1"}
{ "log_flush" "Env005.c2"}
{ "log_get -first" "Env005.c3"}
{ "log_cursor" "Env005.c1"}
{ "log_file {1 1}" "Env005.c2"}
{ "log_flush" "Env005.c3"}
{ "log_put record" "Env005.c4"}
{ "log_register $db xxx" "Env005.c5"}
{ "log_stat" "Env005.c6"}
{ "log_unregister $db" "Env005.c7"}
{ "log_stat" "Env005.c5"}
{ "txn" "Env005.d0"}
{ "txn_checkpoint" "Env005.d1"}
{ "txn_stat" "Env005.d2"}
{ "txn_timeout 100" "Env005.d3"}
}
foreach pair $rlist {

View file

@ -1,14 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env006.tcl,v 11.5 2000/10/27 13:23:55 sue Exp $
#
# Env Test 6
# DB Utility Check
# Make sure that all the utilities exist and run.
# $Id: env006.tcl,v 11.8 2002/01/11 15:53:23 bostic Exp $
#
# TEST env006
# TEST Make sure that all the utilities exist and run.
proc env006 { } {
source ./include.tcl
@ -23,6 +21,8 @@ proc env006 { } {
{ "db_printlog" "Env006.f"}
{ "db_recover" "Env006.g"}
{ "db_stat" "Env006.h"}
{ "db_upgrade" "Env006.h"}
{ "db_verify" "Env006.h"}
}
foreach pair $rlist {
set cmd [lindex $pair 0]

View file

@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env007.tcl,v 11.5 2000/08/25 14:21:50 sue Exp $
# $Id: env007.tcl,v 11.21 2002/08/12 20:49:36 sandstro Exp $
#
# Env Test 007
# Test various config file options.
# 1) Make sure command line option is respected
# 2) Make sure that config file option is respected
# 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
# method is used, only the file is respected.
# TEST env007
# TEST Test various DB_CONFIG config file options.
# TEST 1) Make sure command line option is respected
# TEST 2) Make sure that config file option is respected
# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
# TEST method is used, only the file is respected.
# TEST Then test all known config options.
proc env007 { } {
global errorInfo
# env007 is essentially just a small driver that runs
# env007_body twice. First, it supplies a "set" argument
# to use with environment opens, and the second time it sets
@ -29,15 +32,19 @@ proc env007 { } {
set rlist {
{ " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
"txn_stat" "Max Txns"}
{ " -lock_max " "set_lk_max" "19" "31" "Env007.b: Lock Max"
"lock_stat" "Max locks"}
{ " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.c: Log Bsize"
{ " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.b: Lock Max"
"lock_stat" "Maximum locks"}
{ " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
"Env007.c: Max Lockers" "lock_stat" "Maximum lockers"}
{ " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
"Env007.d: Max Objects" "lock_stat" "Maximum objects"}
{ " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.e: Log Bsize"
"log_stat" "Log record cache size"}
{ " -log_max " "set_lg_max" "8388608" "9437184" "Env007.d: Log Max"
"log_stat" "Maximum log file size"}
{ " -log_max " "set_lg_max" "8388608" "9437184" "Env007.f: Log Max"
"log_stat" "Current log file size"}
}
set e "berkdb env -create -mode 0644 -home $testdir -log -lock -txn "
set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
foreach item $rlist {
set envarg [lindex $item 0]
set configarg [lindex $item 1]
@ -72,6 +79,122 @@ proc env007 { } {
env007_check $env $statcmd $statstr $configval
error_check_good envclose:2 [$env close] 0
}
#
# Test all options. For all config options, write it out
# to the file and make sure we can open the env. We cannot
# necessarily check via stat that it worked but this execs
# the config file code itself.
#
set cfglist {
{ "set_cachesize" "0 1048576 0" }
{ "set_data_dir" "." }
{ "set_flags" "db_cdb_alldb" }
{ "set_flags" "db_direct_db" }
{ "set_flags" "db_direct_log" }
{ "set_flags" "db_nolocking" }
{ "set_flags" "db_nommap" }
{ "set_flags" "db_nopanic" }
{ "set_flags" "db_overwrite" }
{ "set_flags" "db_region_init" }
{ "set_flags" "db_txn_nosync" }
{ "set_flags" "db_txn_write_nosync" }
{ "set_flags" "db_yieldcpu" }
{ "set_lg_bsize" "65536" }
{ "set_lg_dir" "." }
{ "set_lg_max" "8388608" }
{ "set_lg_regionmax" "65536" }
{ "set_lk_detect" "db_lock_default" }
{ "set_lk_detect" "db_lock_expire" }
{ "set_lk_detect" "db_lock_maxlocks" }
{ "set_lk_detect" "db_lock_minlocks" }
{ "set_lk_detect" "db_lock_minwrite" }
{ "set_lk_detect" "db_lock_oldest" }
{ "set_lk_detect" "db_lock_random" }
{ "set_lk_detect" "db_lock_youngest" }
{ "set_lk_max" "50" }
{ "set_lk_max_lockers" "1500" }
{ "set_lk_max_locks" "29" }
{ "set_lk_max_objects" "1500" }
{ "set_lock_timeout" "100" }
{ "set_mp_mmapsize" "12582912" }
{ "set_region_init" "1" }
{ "set_shm_key" "15" }
{ "set_tas_spins" "15" }
{ "set_tmp_dir" "." }
{ "set_tx_max" "31" }
{ "set_txn_timeout" "100" }
{ "set_verbose" "db_verb_chkpoint" }
{ "set_verbose" "db_verb_deadlock" }
{ "set_verbose" "db_verb_recovery" }
{ "set_verbose" "db_verb_waitsfor" }
}
puts "\tEnv007.g: Config file settings"
set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
foreach item $cfglist {
env_cleanup $testdir
set configarg [lindex $item 0]
set configval [lindex $item 1]
env007_make_config $configarg $configval
# verify using just config file
puts "\t\t $configarg $configval"
set env [eval $e]
error_check_good envvalid:1 [is_valid_env $env] TRUE
error_check_good envclose:1 [$env close] 0
}
set cfglist {
{ "set_cachesize" "1048576" }
{ "set_flags" "db_xxx" }
{ "set_flags" "1" }
{ "set_flags" "db_txn_nosync x" }
{ "set_lg_bsize" "db_xxx" }
{ "set_lg_max" "db_xxx" }
{ "set_lg_regionmax" "db_xxx" }
{ "set_lk_detect" "db_xxx" }
{ "set_lk_detect" "1" }
{ "set_lk_detect" "db_lock_youngest x" }
{ "set_lk_max" "db_xxx" }
{ "set_lk_max_locks" "db_xxx" }
{ "set_lk_max_lockers" "db_xxx" }
{ "set_lk_max_objects" "db_xxx" }
{ "set_mp_mmapsize" "db_xxx" }
{ "set_region_init" "db_xxx" }
{ "set_shm_key" "db_xxx" }
{ "set_tas_spins" "db_xxx" }
{ "set_tx_max" "db_xxx" }
{ "set_verbose" "db_xxx" }
{ "set_verbose" "1" }
{ "set_verbose" "db_verb_recovery x" }
}
puts "\tEnv007.h: Config value errors"
set e "berkdb_env_noerr -create -mode 0644 \
-home $testdir -log -lock -txn "
foreach item $cfglist {
set configarg [lindex $item 0]
set configval [lindex $item 1]
env007_make_config $configarg $configval
# verify using just config file
puts "\t\t $configarg $configval"
set stat [catch {eval $e} ret]
error_check_good envopen $stat 1
error_check_good error [is_substr $errorInfo \
"incorrect arguments for name-value pair"] 1
}
puts "\tEnv007.i: Config name error set_xxx"
set e "berkdb_env_noerr -create -mode 0644 \
-home $testdir -log -lock -txn "
env007_make_config "set_xxx" 1
set stat [catch {eval $e} ret]
error_check_good envopen $stat 1
error_check_good error [is_substr $errorInfo \
"unrecognized name-value pair"] 1
}
proc env007_check { env statcmd statstr testval } {

View file

@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env008.tcl,v 11.2 2000/10/30 19:00:38 sue Exp $
# $Id: env008.tcl,v 11.6 2002/02/22 14:29:34 sandstro Exp $
#
# Test of env and subdirs.
# TEST env008
# TEST Test environments and subdirectories.
proc env008 { } {
global errorInfo
global errorCode
@ -21,9 +22,8 @@ proc env008 { } {
puts "Env008: Test of environments and subdirectories."
# Try opening without Create flag should error
puts "\tEnv008.a: Create env and db."
set env [berkdb env -create -mode 0644 -home $testdir -txn]
set env [berkdb_env -create -mode 0644 -home $testdir -txn]
error_check_good env [is_valid_env $env] TRUE
puts "\tEnv008.b: Remove db in subdir."

57
bdb/test/env009.tcl Normal file
View file

@ -0,0 +1,57 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env009.tcl,v 11.5 2002/08/12 20:40:36 sandstro Exp $
#
# TEST env009
# TEST Test calls to all the various stat functions. We have several
# TEST sprinkled throughout the test suite, but this will ensure that
# TEST we run all of them at least once.
proc env009 { } {
source ./include.tcl
puts "Env009: Various stat function test."
env_cleanup $testdir
puts "\tEnv009.a: Setting up env and a database."
set e [berkdb_env -create -home $testdir -txn]
error_check_good dbenv [is_valid_env $e] TRUE
set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
error_check_good dbopen [is_valid_db $dbbt] TRUE
set dbh [berkdb_open -create -hash $testdir/env009h.db]
error_check_good dbopen [is_valid_db $dbh] TRUE
set dbq [berkdb_open -create -btree $testdir/env009q.db]
error_check_good dbopen [is_valid_db $dbq] TRUE
set rlist {
{ "lock_stat" "Maximum locks" "Env009.b"}
{ "log_stat" "Magic" "Env009.c"}
{ "mpool_stat" "Number of caches" "Env009.d"}
{ "txn_stat" "Max Txns" "Env009.e"}
}
foreach pair $rlist {
set cmd [lindex $pair 0]
set str [lindex $pair 1]
set msg [lindex $pair 2]
puts "\t$msg: $cmd"
set ret [$e $cmd]
error_check_good $cmd [is_substr $ret $str] 1
}
puts "\tEnv009.f: btree stats"
set ret [$dbbt stat]
error_check_good $cmd [is_substr $ret "Magic"] 1
puts "\tEnv009.g: hash stats"
set ret [$dbh stat]
error_check_good $cmd [is_substr $ret "Magic"] 1
puts "\tEnv009.f: queue stats"
set ret [$dbq stat]
error_check_good $cmd [is_substr $ret "Magic"] 1
error_check_good dbclose [$dbbt close] 0
error_check_good dbclose [$dbh close] 0
error_check_good dbclose [$dbq close] 0
error_check_good envclose [$e close] 0
}

49
bdb/test/env010.tcl Normal file
View file

@ -0,0 +1,49 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env010.tcl,v 1.4 2002/02/20 17:08:21 sandstro Exp $
#
# TEST env010
# TEST Run recovery in an empty directory, and then make sure we can still
# TEST create a database in that directory.
proc env010 { } {
source ./include.tcl
puts "Env010: Test of recovery in an empty directory."
# Create a new directory used only for this test
if { [file exists $testdir/EMPTYDIR] != 1 } {
file mkdir $testdir/EMPTYDIR
} else {
puts "\nDirectory already exists."
}
# Do the test twice, for regular recovery and catastrophic
# Open environment and recover, but don't create a database
foreach rmethod {recover recover_fatal} {
puts "\tEnv010: Creating env for $rmethod test."
env_cleanup $testdir/EMPTYDIR
set e [berkdb_env -create -home $testdir/EMPTYDIR -$rmethod]
error_check_good dbenv [is_valid_env $e] TRUE
# Open and close a database
# The method doesn't matter, so picked btree arbitrarily
set db [eval {berkdb_open -env $e \
-btree -create -mode 0644} ]
error_check_good dbopen [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
# Close environment
error_check_good envclose [$e close] 0
error_check_good berkdb:envremove \
[berkdb envremove -home $testdir/EMPTYDIR] 0
}
puts "\tEnv010 complete."
}

39
bdb/test/env011.tcl Normal file
View file

@ -0,0 +1,39 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: env011.tcl,v 1.2 2002/02/20 17:08:21 sandstro Exp $
#
# TEST env011
# TEST Run with region overwrite flag.
proc env011 { } {
source ./include.tcl
puts "Env011: Test of region overwriting."
env_cleanup $testdir
puts "\tEnv011: Creating/closing env for open test."
set e [berkdb_env -create -overwrite -home $testdir -txn]
error_check_good dbenv [is_valid_env $e] TRUE
set db [eval \
{berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
error_check_good dbopen [is_valid_db $db] TRUE
set ret [eval {$db put} -auto_commit "aaa" "data"]
error_check_good put $ret 0
set ret [eval {$db put} -auto_commit "bbb" "data"]
error_check_good put $ret 0
error_check_good db_close [$db close] 0
error_check_good envclose [$e close] 0
puts "\tEnv011: Opening the environment with overwrite set."
set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
error_check_good dbenv [is_valid_env $e] TRUE
error_check_good envclose [$e close] 0
puts "\tEnv011: Removing the environment with overwrite set."
error_check_good berkdb:envremove \
[berkdb envremove -home $testdir -overwrite] 0
puts "\tEnv011 complete."
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: hsearch.tcl,v 11.7 2000/08/25 14:21:50 sue Exp $
# $Id: hsearch.tcl,v 11.9 2002/01/11 15:53:24 bostic Exp $
#
# Historic Hsearch interface test.
# Use the first 1000 entries from the dictionary.

View file

@ -1,19 +1,23 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: join.tcl,v 11.17 2000/08/25 14:21:51 sue Exp $
# $Id: join.tcl,v 11.21 2002/02/20 17:08:22 sandstro Exp $
#
# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
# everything else does as well. We'll create test databases called
# join1.db, join2.db, join3.db, and join4.db. The number on the database
# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
# where N is the number of the database. Primary.db is the primary database,
# and null.db is the database that has no matching duplicates.
#
# We should test this on all btrees, all hash, and a combination thereof
# Join test.
# TEST jointest
# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
# TEST with differing index orders and selectivity.
# TEST
# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
# TEST work, everything else does as well. We'll create test databases
# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
# TEST the database describes the duplication -- duplicates are of the
# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
# TEST Primary.db is the primary database, and null.db is the database
# TEST that has no matching duplicates.
# TEST
# TEST We should test this on all btrees, all hash, and a combination thereof
proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
global testdir
global rand_init
@ -24,7 +28,7 @@ proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
# Use one environment for all database opens so we don't
# need oodles of regions.
set env [berkdb env -create -home $testdir]
set env [berkdb_env -create -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
# With the new offpage duplicate code, we don't support

View file

@ -1,67 +1,28 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: lock001.tcl,v 11.11 2000/08/25 14:21:51 sue Exp $
# $Id: lock001.tcl,v 11.19 2002/04/25 19:30:28 sue Exp $
#
# Test driver for lock tests.
# General Multi Random
# Options are:
# -dir <directory in which to store mpool> Y Y Y
# -iterations <iterations> Y N Y
# -ldegree <number of locks per iteration> N N Y
# -maxlocks <locks in table> Y Y Y
# -objs <number of objects> N N Y
# -procs <number of processes to run> N N Y
# -reads <read ratio> N N Y
# -seeds <list of seed values for processes> N N Y
# -wait <wait interval after getting locks> N N Y
# -conflicts <conflict matrix; a list of lists> Y Y Y
proc lock_usage {} {
puts stderr "randomlock\n\t-dir <dir>\n\t-iterations <iterations>"
puts stderr "\t-conflicts <conflict matrix>"
puts stderr "\t-ldegree <locks per iteration>\n\t-maxlocks <n>"
puts stderr "\t-objs <objects>\n\t-procs <nprocs>\n\t-reads <%reads>"
puts stderr "\t-seeds <list of seeds>\n\t-wait <max wait interval>"
return
}
proc locktest { args } {
# TEST lock001
# TEST Make sure that the basic lock tests work. Do some simple gets
# TEST and puts for a single locker.
proc lock001 { {iterations 1000} {maxlocks 1000} } {
source ./include.tcl
global lock_curid
global lock_maxid
set save_curid $lock_curid
set save_maxid $lock_maxid
# Set defaults
# Adjusted to make exact match of isqrt
#set conflicts { 3 0 0 0 0 0 1 0 1 1}
#set conflicts { 3 0 0 0 0 1 0 1 1}
set conflicts { 0 0 0 0 0 1 0 1 1}
set iterations 1000
set ldegree 5
set maxlocks 1000
set objs 75
set procs 5
set reads 65
set seeds {}
set wait 5
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-c.* { incr i; set conflicts [linkdex $args $i] }
-d.* { incr i; set testdir [lindex $args $i] }
-i.* { incr i; set iterations [lindex $args $i] }
-l.* { incr i; set ldegree [lindex $args $i] }
-m.* { incr i; set maxlocks [lindex $args $i] }
-o.* { incr i; set objs [lindex $args $i] }
-p.* { incr i; set procs [lindex $args $i] }
-r.* { incr i; set reads [lindex $args $i] }
-s.* { incr i; set seeds [lindex $args $i] }
-w.* { incr i; set wait [lindex $args $i] }
default {
puts -nonewline "FAIL:[timestamp] Usage: "
lock_usage
return
}
}
}
set nmodes [isqrt [llength $conflicts]]
# Cleanup
@ -70,26 +31,15 @@ proc locktest { args } {
# Open the region we'll use for testing.
set eflags "-create -lock -home $testdir -mode 0644 \
-lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
set env [eval {berkdb env} $eflags]
lock001 $env $iterations $nmodes
reset_env $env
env_cleanup $testdir
lock002 $maxlocks $conflicts
lock003 $testdir $iterations \
$maxlocks $procs $ldegree $objs $reads $wait $conflicts $seeds
}
# Make sure that the basic lock tests work. Do some simple gets and puts for
# a single locker.
proc lock001 {env iter nmodes} {
source ./include.tcl
set env [eval {berkdb_env} $eflags]
error_check_good env [is_valid_env $env] TRUE
error_check_good lock_id_set \
[$env lock_id_set $lock_curid $lock_maxid] 0
puts "Lock001: test basic lock operations"
set locker 999
set locker [$env lock_id]
# Get and release each type of lock
puts "Lock001.a: get and release each type of lock"
puts "\tLock001.a: get and release each type of lock"
foreach m {ng write read} {
set obj obj$m
set lockp [$env lock_get $m $locker $obj]
@ -101,7 +51,7 @@ proc lock001 {env iter nmodes} {
# Get a bunch of locks for the same locker; these should work
set obj OBJECT
puts "Lock001.b: Get a bunch of locks for the same locker"
puts "\tLock001.b: Get a bunch of locks for the same locker"
foreach m {ng write read} {
set lockp [$env lock_get $m $locker $obj ]
lappend locklist $lockp
@ -112,7 +62,7 @@ proc lock001 {env iter nmodes} {
set locklist {}
# Check that reference counted locks work
puts "Lock001.c: reference counted locks."
puts "\tLock001.c: reference counted locks."
for {set i 0} { $i < 10 } {incr i} {
set lockp [$env lock_get -nowait write $locker $obj]
error_check_good lock_get:c [is_blocked $lockp] 0
@ -131,10 +81,10 @@ proc lock001 {env iter nmodes} {
}
# Change the locker
set locker [incr locker]
set locker [$env lock_id]
set blocklist {}
# Skip NO_LOCK lock.
puts "Lock001.e: Change the locker, acquire read and write."
puts "\tLock001.d: Change the locker, acquire read and write."
foreach i {write read} {
catch {$env lock_get -nowait $i $locker $obj} ret
error_check_good lock_get:e [is_substr $ret "not granted"] 1
@ -146,7 +96,7 @@ proc lock001 {env iter nmodes} {
# Now re-acquire blocking locks
set locklist {}
puts "Lock001.f: Re-acquire blocking locks."
puts "\tLock001.e: Re-acquire blocking locks."
foreach i {write read} {
set lockp [$env lock_get -nowait $i $locker $obj ]
error_check_good lock_get:f [is_substr $lockp $env] 1
@ -156,8 +106,10 @@ proc lock001 {env iter nmodes} {
# Now release new locks
release_list $locklist
error_check_good free_id [$env lock_id_free $locker] 0
error_check_good envclose [$env close] 0
puts "Lock001 Complete."
}
# Blocked locks appear as lockmgrN.lockM\nBLOCKED

View file

@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: lock002.tcl,v 11.10 2000/08/25 14:21:51 sue Exp $
# $Id: lock002.tcl,v 11.19 2002/04/25 19:30:29 sue Exp $
#
# Exercise basic multi-process aspects of lock.
# TEST lock002
# TEST Exercise basic multi-process aspects of lock.
proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
source ./include.tcl
@ -24,22 +25,25 @@ proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
# detach from it, etc.
proc mlock_open { maxl nmodes conflicts } {
source ./include.tcl
global lock_curid
global lock_maxid
puts "Lock002.a multi-process open/close test"
puts "\tLock002.a multi-process open/close test"
# Open/Create region here. Then close it and try to open from
# other test process.
set env_cmd [concat "berkdb env -create -mode 0644 \
set env_cmd [concat "berkdb_env -create -mode 0644 \
-lock -lock_max $maxl -lock_conflict" \
[list [list $nmodes $conflicts]] "-home $testdir"]
set local_env [eval $env_cmd]
$local_env lock_id_set $lock_curid $lock_maxid
error_check_good env_open [is_valid_env $local_env] TRUE
set ret [$local_env close]
error_check_good env_close $ret 0
# Open from other test process
set env_cmd "berkdb env -mode 0644 -home $testdir"
set env_cmd "berkdb_env -mode 0644 -home $testdir"
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
@ -58,7 +62,7 @@ proc mlock_open { maxl nmodes conflicts } {
error_check_good remote:lock_close $ret 0
# Try opening for create. Will succeed because region exists.
set env_cmd [concat "berkdb env -create -mode 0644 \
set env_cmd [concat "berkdb_env -create -mode 0644 \
-lock -lock_max $maxl -lock_conflict" \
[list [list $nmodes $conflicts]] "-home $testdir"]
set local_env [eval $env_cmd]
@ -76,10 +80,10 @@ proc mlock_open { maxl nmodes conflicts } {
proc mlock_wait { } {
source ./include.tcl
puts "Lock002.b multi-process get/put wait test"
puts "\tLock002.b multi-process get/put wait test"
# Open region locally
set env_cmd "berkdb env -lock -home $testdir"
set env_cmd "berkdb_env -lock -home $testdir"
set local_env [eval $env_cmd]
error_check_good env_open [is_valid_env $local_env] TRUE
@ -95,15 +99,15 @@ proc mlock_wait { } {
# remotely. We hold the locks for several seconds
# so that we can use timestamps to figure out if the
# other process waited.
set locker 1
set local_lock [$local_env lock_get write $locker object1]
set locker1 [$local_env lock_id]
set local_lock [$local_env lock_get write $locker1 object1]
error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
# Now request a lock that we expect to hang; generate
# timestamps so we can tell if it actually hangs.
set locker 2
set locker2 [send_cmd $f1 "$remote_env lock_id"]
set remote_lock [send_timed_cmd $f1 1 \
"set lock \[$remote_env lock_get write $locker object1\]"]
"set lock \[$remote_env lock_get write $locker2 object1\]"]
# Now sleep before releasing lock
tclsleep 5
@ -127,8 +131,7 @@ proc mlock_wait { } {
set ret [send_cmd $f1 "$remote_lock put"]
set locker 1
set local_lock [$local_env lock_get write $locker object1]
set local_lock [$local_env lock_get write $locker1 object1]
error_check_good lock_get:time \
[expr [expr [timestamp -r] - $start] > 2] 1
error_check_good lock_get:local \
@ -139,6 +142,8 @@ proc mlock_wait { } {
error_check_good lock_put:remote $result 0
# Clean up remote
set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
error_check_good remote_free_id $result 0
set ret [send_cmd $f1 "reset_env $remote_env"]
close $f1
@ -146,6 +151,7 @@ proc mlock_wait { } {
# Now close up locally
set ret [$local_lock put]
error_check_good lock_put $ret 0
error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
reset_env $local_env
}

View file

@ -1,48 +1,99 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: lock003.tcl,v 11.16 2000/08/25 14:21:51 sue Exp $
# $Id: lock003.tcl,v 11.25 2002/09/05 17:23:06 sandstro Exp $
#
# Exercise multi-process aspects of lock. Generate a bunch of parallel
# testers that try to randomly obtain locks.
proc lock003 { dir {iter 500} {max 1000} {procs 5} {ldegree 5} {objs 75} \
{reads 65} {wait 1} {conflicts { 3 0 0 0 0 0 1 0 1 1}} {seeds {}} } {
# TEST lock003
# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
# TEST testers that try to randomly obtain locks; make sure that the locks
# TEST correctly protect corresponding objects.
proc lock003 { {iter 500} {max 1000} {procs 5} } {
source ./include.tcl
global lock_curid
global lock_maxid
set ldegree 5
set objs 75
set reads 65
set wait 1
set conflicts { 0 0 0 0 0 1 0 1 1}
set seeds {}
puts "Lock003: Multi-process random lock test"
# Clean up after previous runs
env_cleanup $dir
env_cleanup $testdir
# Open/create the lock region
set e [berkdb env -create -lock -home $dir]
puts "\tLock003.a: Create environment"
set e [berkdb_env -create -lock -home $testdir]
error_check_good env_open [is_substr $e env] 1
$e lock_id_set $lock_curid $lock_maxid
set ret [$e close]
error_check_good env_close $ret 0
error_check_good env_close [$e close] 0
# Now spawn off processes
set pidlist {}
for { set i 0 } {$i < $procs} {incr i} {
if { [llength $seeds] == $procs } {
set s [lindex $seeds $i]
}
puts "$tclsh_path\
$test_path/wrap.tcl \
lockscript.tcl $dir/$i.lockout\
$dir $iter $objs $wait $ldegree $reads &"
# puts "$tclsh_path\
# $test_path/wrap.tcl \
# lockscript.tcl $testdir/$i.lockout\
# $testdir $iter $objs $wait $ldegree $reads &"
set p [exec $tclsh_path $test_path/wrap.tcl \
lockscript.tcl $testdir/lock003.$i.out \
$dir $iter $objs $wait $ldegree $reads &]
$testdir $iter $objs $wait $ldegree $reads &]
lappend pidlist $p
}
puts "Lock003: $procs independent processes now running"
watch_procs 30 10800
puts "\tLock003.b: $procs independent processes now running"
watch_procs $pidlist 30 10800
# Check for test failure
set e [eval findfail [glob $testdir/lock003.*.out]]
error_check_good "FAIL: error message(s) in log files" $e 0
# Remove log files
for { set i 0 } {$i < $procs} {incr i} {
fileremove -f $dir/$i.lockout
fileremove -f $testdir/lock003.$i.out
}
}
# Create and destroy flag files to show we have an object locked, and
# verify that the correct files exist or don't exist given that we've
# just read or write locked a file.
proc lock003_create { rw obj } {
source ./include.tcl
set pref $testdir/L3FLAG
set f [open $pref.$rw.[pid].$obj w]
close $f
}
proc lock003_destroy { obj } {
source ./include.tcl
set pref $testdir/L3FLAG
set f [glob -nocomplain $pref.*.[pid].$obj]
error_check_good l3_destroy [llength $f] 1
fileremove $f
}
proc lock003_vrfy { rw obj } {
source ./include.tcl
set pref $testdir/L3FLAG
if { [string compare $rw "write"] == 0 } {
set fs [glob -nocomplain $pref.*.*.$obj]
error_check_good "number of other locks on $obj" [llength $fs] 0
} else {
set fs [glob -nocomplain $pref.write.*.$obj]
error_check_good "number of write locks on $obj" [llength $fs] 0
}
}

29
bdb/test/lock004.tcl Normal file
View file

@ -0,0 +1,29 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: lock004.tcl,v 11.5 2002/04/25 19:30:30 sue Exp $
#
# TEST lock004
# TEST Test locker ids wraping around.
proc lock004 {} {
source ./include.tcl
global lock_curid
global lock_maxid
set save_curid $lock_curid
set save_maxid $lock_maxid
set lock_curid [expr $lock_maxid - 1]
puts "Lock004: Locker id wraparound test"
puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
lock001
lock002
lock003
set lock_curid $save_curid
set lock_maxid $save_maxid
}

177
bdb/test/lock005.tcl Normal file
View file

@ -0,0 +1,177 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2001
# Sleepycat Software. All rights reserved.
#
# $Id: lock005.tcl,v 1.7 2002/08/08 15:38:07 bostic Exp $
#
# TEST lock005
# TEST Check that page locks are being released properly.
proc lock005 { } {
source ./include.tcl
puts "Lock005: Page lock release test"
# Clean up after previous runs
env_cleanup $testdir
# Open/create the lock region
set e [berkdb_env -create -lock -home $testdir -txn -log]
error_check_good env_open [is_valid_env $e] TRUE
# Open/create the database
set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
error_check_good dbopen [is_valid_db $db] TRUE
# Check that records are locking by trying to
# fetch a record on the wrong transaction.
puts "\tLock005.a: Verify that we are locking"
# Start the first transaction
set txn1 [$e txn -nowait]
error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
set ret [catch {$db put -txn $txn1 -append record1} recno1]
error_check_good dbput_txn1 $ret 0
# Start second txn while the first is still running ...
set txn2 [$e txn -nowait]
error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
# ... and try to get a record from the first txn (should fail)
set ret [catch {$db get -txn $txn2 $recno1} res]
error_check_good dbget_wrong_record \
[is_substr $res "Lock not granted"] 1
# End transactions
error_check_good txn1commit [$txn1 commit] 0
how_many_locks 1 $e
error_check_good txn2commit [$txn2 commit] 0
# The number of locks stays the same here because the first
# lock is released and the second lock was never granted.
how_many_locks 1 $e
# Test lock behavior for both abort and commit
puts "\tLock005.b: Verify locks after abort or commit"
foreach endorder {forward reverse} {
end_order_test $db $e commit abort $endorder
end_order_test $db $e abort commit $endorder
end_order_test $db $e commit commit $endorder
end_order_test $db $e abort abort $endorder
}
# Clean up
error_check_good db_close [$db close] 0
error_check_good env_close [$e close] 0
}
proc end_order_test { db e txn1end txn2end endorder } {
# Start one transaction
set txn1 [$e txn -nowait]
error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
set ret [catch {$db put -txn $txn1 -append record1} recno1]
error_check_good dbput_txn1 $ret 0
# Check number of locks
how_many_locks 2 $e
# Start a second transaction while first is still running
set txn2 [$e txn -nowait]
error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
set ret [catch {$db put -txn $txn2 -append record2} recno2]
error_check_good dbput_txn2 $ret 0
how_many_locks 3 $e
# Now commit or abort one txn and make sure the other is okay
if {$endorder == "forward"} {
# End transaction 1 first
puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
error_check_good txn_$txn1end [$txn1 $txn1end] 0
how_many_locks 2 $e
# txn1 is now ended, but txn2 is still running
set ret1 [catch {$db get -txn $txn2 $recno1} res1]
set ret2 [catch {$db get -txn $txn2 $recno2} res2]
if { $txn1end == "commit" } {
error_check_good txn2_sees_txn1 $ret1 0
error_check_good txn2_sees_txn2 $ret2 0
} else {
# transaction 1 was aborted
error_check_good txn2_cantsee_txn1 [llength $res1] 0
}
# End transaction 2 second
error_check_good txn_$txn2end [$txn2 $txn2end] 0
how_many_locks 1 $e
# txn1 and txn2 should both now be invalid
# The get no longer needs to be transactional
set ret3 [catch {$db get $recno1} res3]
set ret4 [catch {$db get $recno2} res4]
if { $txn2end == "commit" } {
error_check_good txn2_sees_txn1 $ret3 0
error_check_good txn2_sees_txn2 $ret4 0
error_check_good txn2_has_record2 \
[is_substr $res4 "record2"] 1
} else {
# transaction 2 was aborted
error_check_good txn2_cantsee_txn1 $ret3 0
error_check_good txn2_aborted [llength $res4] 0
}
} elseif { $endorder == "reverse" } {
# End transaction 2 first
puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
error_check_good txn_$txn2end [$txn2 $txn2end] 0
how_many_locks 2 $e
# txn2 is ended, but txn1 is still running
set ret1 [catch {$db get -txn $txn1 $recno1} res1]
set ret2 [catch {$db get -txn $txn1 $recno2} res2]
if { $txn2end == "commit" } {
error_check_good txn1_sees_txn1 $ret1 0
error_check_good txn1_sees_txn2 $ret2 0
} else {
# transaction 2 was aborted
error_check_good txn1_cantsee_txn2 [llength $res2] 0
}
# End transaction 1 second
error_check_good txn_$txn1end [$txn1 $txn1end] 0
how_many_locks 1 $e
# txn1 and txn2 should both now be invalid
# The get no longer needs to be transactional
set ret3 [catch {$db get $recno1} res3]
set ret4 [catch {$db get $recno2} res4]
if { $txn1end == "commit" } {
error_check_good txn1_sees_txn1 $ret3 0
error_check_good txn1_sees_txn2 $ret4 0
error_check_good txn1_has_record1 \
[is_substr $res3 "record1"] 1
} else {
# transaction 1 was aborted
error_check_good txn1_cantsee_txn2 $ret4 0
error_check_good txn1_aborted [llength $res3] 0
}
}
}
proc how_many_locks { expected env } {
set stat [$env lock_stat]
set str "Current number of locks"
set checked 0
foreach statpair $stat {
if { $checked == 1 } {
break
}
if { [is_substr [lindex $statpair 0] $str] != 0} {
set checked 1
set nlocks [lindex $statpair 1]
error_check_good expected_nlocks $nlocks $expected
}
}
error_check_good checked $checked 1
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: lockscript.tcl,v 11.11 2000/03/24 19:53:39 krinsky Exp $
# $Id: lockscript.tcl,v 11.17 2002/02/20 17:08:23 sandstro Exp $
#
# Random lock tester.
# Usage: lockscript dir numiters numobjs sleepint degree readratio
@ -32,25 +32,28 @@ set numobjs [ lindex $argv 2 ]
set sleepint [ lindex $argv 3 ]
set degree [ lindex $argv 4 ]
set readratio [ lindex $argv 5 ]
set locker [pid]
# Initialize random number generator
global rand_init
berkdb srand $rand_init
catch { berkdb_env -create -lock -home $dir } e
error_check_good env_open [is_substr $e env] 1
catch { $e lock_id } locker
error_check_good locker [is_valid_locker $locker] TRUE
puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
puts "$sleepint $degree $readratio"
flush stdout
set e [berkdb env -create -lock -home $dir]
error_check_good env_open [is_substr $e env] 1
for { set iter 0 } { $iter < $numiters } { incr iter } {
set nlocks [berkdb random_int 1 $degree]
# We will always lock objects in ascending order to avoid
# deadlocks.
set lastobj 1
set locklist {}
set objlist {}
for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
# Pick lock parameters
set obj [berkdb random_int $lastobj $numobjs]
@ -61,20 +64,46 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
} else {
set rw write
}
puts "[timestamp] $locker $lnum: $rw $obj"
puts "[timestamp -c] $locker $lnum: $rw $obj"
# Do get; add to list
set lockp [$e lock_get $rw $locker $obj]
catch {$e lock_get $rw $locker $obj} lockp
error_check_good lock_get [is_valid_lock $lockp $e] TRUE
# Create a file to flag that we've a lock of the given
# type, after making sure only other read locks exist
# (if we're read locking) or no other locks exist (if
# we're writing).
lock003_vrfy $rw $obj
lock003_create $rw $obj
lappend objlist [list $obj $rw]
lappend locklist $lockp
if {$lastobj > $numobjs} {
break
}
}
# Pick sleep interval
tclsleep [berkdb random_int 1 $sleepint]
puts "[timestamp -c] $locker sleeping"
# We used to sleep 1 to $sleepint seconds. This makes the test
# run for hours. Instead, make it sleep for 10 to $sleepint * 100
# milliseconds, for a maximum sleep time of 0.5 s.
after [berkdb random_int 10 [expr $sleepint * 100]]
puts "[timestamp -c] $locker awake"
# Now release locks
puts "[timestamp] $locker released locks"
puts "[timestamp -c] $locker released locks"
# Delete our locking flag files, then reverify. (Note that the
# locking flag verification function assumes that our own lock
# is not currently flagged.)
foreach pair $objlist {
set obj [lindex $pair 0]
set rw [lindex $pair 1]
lock003_destroy $obj
lock003_vrfy $rw $obj
}
release_list $locklist
flush stdout
}
@ -82,7 +111,7 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
set ret [$e close]
error_check_good env_close $ret 0
puts "[timestamp] $locker Complete"
puts "[timestamp -c] $locker Complete"
flush stdout
exit

View file

@ -1,337 +0,0 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Sleepycat Software. All rights reserved.
#
# $Id: log.tcl,v 11.17 2000/11/30 20:09:19 dda Exp $
#
# Options are:
# -dir <directory in which to store memp>
# -maxfilesize <maxsize of log file>
# -iterations <iterations>
# -stat
proc log_usage {} {
puts "log -dir <directory> -iterations <number of ops> \
-maxfilesize <max size of log files> -stat"
}
proc logtest { args } {
source ./include.tcl
global rand_init
# Set defaults
set iterations 1000
set maxfile [expr 1024 * 128]
set dostat 0
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-d.* { incr i; set testdir [lindex $args $i] }
-i.* { incr i; set iterations [lindex $args $i] }
-m.* { incr i; set maxfile [lindex $args $i] }
-s.* { set dostat 1 }
default {
puts -nonewline "FAIL:[timestamp] Usage: "
log_usage
return
}
}
}
set multi_log [expr 3 * $iterations]
# Clean out old log if it existed
puts "Unlinking log: error message OK"
env_cleanup $testdir
# Now run the various functionality tests
berkdb srand $rand_init
log001 $testdir $maxfile $iterations
log001 $testdir $maxfile $multi_log
log002 $testdir $maxfile
log003 $testdir $maxfile
log004 $testdir
}
proc log001 { dir max nrecs } {
source ./include.tcl
puts "Log001: Basic put/get test"
env_cleanup $dir
set env [berkdb env -log -create -home $dir \
-mode 0644 -log_max $max]
error_check_bad log_env:$dir $env NULL
error_check_good log:$dir [is_substr $env "env"] 1
# We will write records to the log and make sure we can
# read them back correctly. We'll use a standard pattern
# repeated some number of times for each record.
set lsn_list {}
set rec_list {}
puts "Log001.a: Writing $nrecs log records"
for { set i 0 } { $i < $nrecs } { incr i } {
set rec ""
for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
set rec $rec$i:logrec:$i
}
set lsn [$env log_put $rec]
error_check_bad log_put [is_substr $lsn log_cmd] 1
lappend lsn_list $lsn
lappend rec_list $rec
}
puts "Log001.b: Retrieving log records sequentially (forward)"
set i 0
for { set grec [$env log_get -first] } { [llength $grec] != 0 } {
set grec [$env log_get -next]} {
error_check_good log_get:seq [lindex $grec 1] \
[lindex $rec_list $i]
incr i
}
puts "Log001.c: Retrieving log records sequentially (backward)"
set i [llength $rec_list]
for { set grec [$env log_get -last] } { [llength $grec] != 0 } {
set grec [$env log_get -prev] } {
incr i -1
error_check_good \
log_get:seq [lindex $grec 1] [lindex $rec_list $i]
}
puts "Log001.d: Retrieving log records sequentially by LSN"
set i 0
foreach lsn $lsn_list {
set grec [$env log_get -set $lsn]
error_check_good \
log_get:seq [lindex $grec 1] [lindex $rec_list $i]
incr i
}
puts "Log001.e: Retrieving log records randomly by LSN"
set m [expr [llength $lsn_list] - 1]
for { set i 0 } { $i < $nrecs } { incr i } {
set recno [berkdb random_int 0 $m ]
set lsn [lindex $lsn_list $recno]
set grec [$env log_get -set $lsn]
error_check_good \
log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
}
# Close and unlink the file
error_check_good env:close:$env [$env close] 0
error_check_good envremove:$dir [berkdb envremove -home $dir] 0
puts "Log001 Complete"
}
proc log002 { dir {max 32768} } {
source ./include.tcl
puts "Log002: Multiple log test w/trunc, file, compare functionality"
env_cleanup $dir
set env [berkdb env -create -home $dir -mode 0644 -log -log_max $max]
error_check_bad log_env:$dir $env NULL
error_check_good log:$dir [is_substr $env "env"] 1
# We'll record every hundred'th record for later use
set info_list {}
set i 0
puts "Log002.a: Writing log records"
for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
set rec [random_data 120 0 0]
set len [string length $rec]
set lsn [$env log_put $rec]
if { [expr $i % 100 ] == 0 } {
lappend info_list [list $lsn $rec]
}
incr i
}
puts "Log002.b: Checking log_compare"
set last {0 0}
foreach p $info_list {
set l [lindex $p 0]
if { [llength $last] != 0 } {
error_check_good \
log_compare [$env log_compare $l $last] 1
error_check_good \
log_compare [$env log_compare $last $l] -1
error_check_good \
log_compare [$env log_compare $l $l] 0
}
set last $l
}
puts "Log002.c: Checking log_file"
set flist [glob $dir/log*]
foreach p $info_list {
set lsn [lindex $p 0]
set f [$env log_file $lsn]
# Change all backslash separators on Windows to forward slash
# separators, which is what the rest of the test suite expects.
regsub -all {\\} $f {/} f
error_check_bad log_file:$f [lsearch $flist $f] -1
}
puts "Log002.d: Verifying records"
for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
set p [lindex $info_list $i]
set grec [$env log_get -set [lindex $p 0]]
error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
}
# Close and unlink the file
error_check_good env:close:$env [$env close] 0
error_check_good envremove:$dir [berkdb envremove -home $dir] 0
puts "Log002 Complete"
}
proc log003 { dir {max 32768} } {
source ./include.tcl
puts "Log003: Verify log_flush behavior"
env_cleanup $dir
set short_rec "abcdefghijklmnopqrstuvwxyz"
set long_rec [repeat $short_rec 200]
set very_long_rec [repeat $long_rec 4]
foreach rec "$short_rec $long_rec $very_long_rec" {
puts "Log003.a: Verify flush on [string length $rec] byte rec"
set env [berkdb env -log -home $dir \
-create -mode 0644 -log_max $max]
error_check_bad log_env:$dir $env NULL
error_check_good log:$dir [is_substr $env "env"] 1
set lsn [$env log_put $rec]
error_check_bad log_put [lindex $lsn 0] "ERROR:"
set ret [$env log_flush $lsn]
error_check_good log_flush $ret 0
# Now, we want to crash the region and recheck. Closing the
# log does not flush any records, so we'll use a close to
# do the "crash"
set ret [$env close]
error_check_good log_env:close $ret 0
# Now, remove the log region
#set ret [berkdb envremove -home $dir]
#error_check_good env:remove $ret 0
# Re-open the log and try to read the record.
set env [berkdb env -create -home $dir \
-log -mode 0644 -log_max $max]
error_check_bad log_env:$dir $env NULL
error_check_good log:$dir [is_substr $env "env"] 1
set gotrec [$env log_get -first]
error_check_good lp_get [lindex $gotrec 1] $rec
# Close and unlink the file
error_check_good env:close:$env [$env close] 0
error_check_good envremove:$dir [berkdb envremove -home $dir] 0
log_cleanup $dir
}
foreach rec "$short_rec $long_rec $very_long_rec" {
puts "Log003.b: \
Verify flush on non-last record [string length $rec]"
set env [berkdb env \
-create -log -home $dir -mode 0644 -log_max $max]
error_check_bad log_env:$dir $env NULL
error_check_good log:$dir [is_substr $env "env"] 1
# Put 10 random records
for { set i 0 } { $i < 10 } { incr i} {
set r [random_data 450 0 0]
set lsn [$env log_put $r]
error_check_bad log_put [lindex $lsn 0] "ERROR:"
}
# Put the record we are interested in
set save_lsn [$env log_put $rec]
error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
# Put 10 more random records
for { set i 0 } { $i < 10 } { incr i} {
set r [random_data 450 0 0]
set lsn [$env log_put $r]
error_check_bad log_put [lindex $lsn 0] "ERROR:"
}
# Now check the flush
set ret [$env log_flush $save_lsn]
error_check_good log_flush $ret 0
# Now, we want to crash the region and recheck. Closing the
# log does not flush any records, so we'll use a close to
# do the "crash"
#
# Now, close and remove the log region
error_check_good env:close:$env [$env close] 0
set ret [berkdb envremove -home $dir]
error_check_good env:remove $ret 0
# Re-open the log and try to read the record.
set env [berkdb env \
-home $dir -create -log -mode 0644 -log_max $max]
error_check_bad log_env:$dir $env NULL
error_check_good log:$dir [is_substr $env "env"] 1
set gotrec [$env log_get -set $save_lsn]
error_check_good lp_get [lindex $gotrec 1] $rec
# Close and unlink the file
error_check_good env:close:$env [$env close] 0
error_check_good envremove:$dir [berkdb envremove -home $dir] 0
log_cleanup $dir
}
puts "Log003 Complete"
}
# Make sure that if we do PREVs on a log, but the beginning of the
# log has been truncated, we do the right thing.
proc log004 { dir } {
source ./include.tcl
puts "Log004: Prev on log when beginning of log has been truncated."
# Use archive test to populate log
env_cleanup $dir
puts "Log004.a: Call archive to populate log."
archive
# Delete all log files under 100
puts "Log004.b: Delete all log files under 100."
set ret [catch { glob $dir/log.00000000* } result]
if { $ret == 0 } {
eval fileremove -f $result
}
# Now open the log and get the first record and try a prev
puts "Log004.c: Open truncated log, attempt to access missing portion."
set myenv [berkdb env -create -log -home $dir]
error_check_good log_open [is_substr $myenv "env"] 1
set ret [$myenv log_get -first]
error_check_bad log_get [llength $ret] 0
# This should give DB_NOTFOUND which is a ret of length 0
catch {$myenv log_get -prev} ret
error_check_good log_get_prev [string length $ret] 0
puts "Log004.d: Close log and environment."
error_check_good log_close [$myenv close] 0
puts "Log004 complete."
}

120
bdb/test/log001.tcl Normal file
View file

@ -0,0 +1,120 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: log001.tcl,v 11.29 2002/04/30 20:27:56 sue Exp $
#
# TEST log001
# TEST Read/write log records.
proc log001 { } {
global passwd
global rand_init
berkdb srand $rand_init
set iter 1000
set max [expr 1024 * 128]
log001_body $max $iter 1
log001_body $max $iter 0
log001_body $max $iter 1 "-encryptaes $passwd"
log001_body $max $iter 0 "-encryptaes $passwd"
log001_body $max [expr $iter * 15] 1
log001_body $max [expr $iter * 15] 0
log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd"
log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd"
}
proc log001_body { max nrecs fixedlength {encargs ""} } {
source ./include.tcl
puts -nonewline "Log001: Basic put/get log records "
if { $fixedlength == 1 } {
puts "(fixed-length $encargs)"
} else {
puts "(variable-length $encargs)"
}
env_cleanup $testdir
set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
$encargs -log_max $max]
error_check_good envopen [is_valid_env $env] TRUE
# We will write records to the log and make sure we can
# read them back correctly. We'll use a standard pattern
# repeated some number of times for each record.
set lsn_list {}
set rec_list {}
puts "\tLog001.a: Writing $nrecs log records"
for { set i 0 } { $i < $nrecs } { incr i } {
set rec ""
for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
set rec $rec$i:logrec:$i
}
if { $fixedlength != 1 } {
set rec $rec:[random_data 237 0 0]
}
set lsn [$env log_put $rec]
error_check_bad log_put [is_substr $lsn log_cmd] 1
lappend lsn_list $lsn
lappend rec_list $rec
}
# Open a log cursor.
set logc [$env log_cursor]
error_check_good logc [is_valid_logc $logc $env] TRUE
puts "\tLog001.b: Retrieving log records sequentially (forward)"
set i 0
for { set grec [$logc get -first] } { [llength $grec] != 0 } {
set grec [$logc get -next]} {
error_check_good log_get:seq [lindex $grec 1] \
[lindex $rec_list $i]
incr i
}
puts "\tLog001.c: Retrieving log records sequentially (backward)"
set i [llength $rec_list]
for { set grec [$logc get -last] } { [llength $grec] != 0 } {
set grec [$logc get -prev] } {
incr i -1
error_check_good \
log_get:seq [lindex $grec 1] [lindex $rec_list $i]
}
puts "\tLog001.d: Retrieving log records sequentially by LSN"
set i 0
foreach lsn $lsn_list {
set grec [$logc get -set $lsn]
error_check_good \
log_get:seq [lindex $grec 1] [lindex $rec_list $i]
incr i
}
puts "\tLog001.e: Retrieving log records randomly by LSN"
set m [expr [llength $lsn_list] - 1]
for { set i 0 } { $i < $nrecs } { incr i } {
set recno [berkdb random_int 0 $m ]
set lsn [lindex $lsn_list $recno]
set grec [$logc get -set $lsn]
error_check_good \
log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
}
puts "\tLog001.f: Retrieving first/current, last/current log record"
set grec [$logc get -first]
error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
set grec [$logc get -current]
error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
set i [expr [llength $rec_list] - 1]
set grec [$logc get -last]
error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
set grec [$logc get -current]
error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
# Close and unlink the file
error_check_good log_cursor:close:$logc [$logc close] 0
error_check_good env:close [$env close] 0
error_check_good envremove [berkdb envremove -home $testdir] 0
}

85
bdb/test/log002.tcl Normal file
View file

@ -0,0 +1,85 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: log002.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
#
# TEST log002
# TEST Tests multiple logs
# TEST Log truncation
# TEST LSN comparison and file functionality.
proc log002 { } {
source ./include.tcl
puts "Log002: Multiple log test w/trunc, file, compare functionality"
env_cleanup $testdir
set max [expr 1024 * 128]
set env [berkdb_env -create -home $testdir -mode 0644 \
-log -log_max $max]
error_check_good envopen [is_valid_env $env] TRUE
# We'll record every hundred'th record for later use
set info_list {}
puts "\tLog002.a: Writing log records"
set i 0
for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
set rec [random_data 120 0 0]
set len [string length $rec]
set lsn [$env log_put $rec]
if { [expr $i % 100 ] == 0 } {
lappend info_list [list $lsn $rec]
}
incr i
}
puts "\tLog002.b: Checking log_compare"
set last {0 0}
foreach p $info_list {
set l [lindex $p 0]
if { [llength $last] != 0 } {
error_check_good \
log_compare [$env log_compare $l $last] 1
error_check_good \
log_compare [$env log_compare $last $l] -1
error_check_good \
log_compare [$env log_compare $l $l] 0
}
set last $l
}
puts "\tLog002.c: Checking log_file"
set flist [glob $testdir/log*]
foreach p $info_list {
set lsn [lindex $p 0]
set f [$env log_file $lsn]
# Change all backslash separators on Windows to forward slash
# separators, which is what the rest of the test suite expects.
regsub -all {\\} $f {/} f
error_check_bad log_file:$f [lsearch $flist $f] -1
}
puts "\tLog002.d: Verifying records"
set logc [$env log_cursor]
error_check_good log_cursor [is_valid_logc $logc $env] TRUE
for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
set p [lindex $info_list $i]
set grec [$logc get -set [lindex $p 0]]
error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
}
# Close and unlink the file
error_check_good log_cursor:close:$logc [$logc close] 0
error_check_good env:close [$env close] 0
error_check_good envremove [berkdb envremove -home $testdir] 0
}

118
bdb/test/log003.tcl Normal file
View file

@ -0,0 +1,118 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: log003.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
#
# TEST log003
# TEST Verify that log_flush is flushing records correctly.
proc log003 { } {
source ./include.tcl
puts "Log003: Verify log_flush behavior"
set max [expr 1024 * 128]
env_cleanup $testdir
set short_rec "abcdefghijklmnopqrstuvwxyz"
set long_rec [repeat $short_rec 200]
set very_long_rec [repeat $long_rec 4]
foreach rec "$short_rec $long_rec $very_long_rec" {
puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
set env [berkdb_env -log -home $testdir \
-create -mode 0644 -log_max $max]
error_check_good envopen [is_valid_env $env] TRUE
set lsn [$env log_put $rec]
error_check_bad log_put [lindex $lsn 0] "ERROR:"
set ret [$env log_flush $lsn]
error_check_good log_flush $ret 0
# Now, we want to crash the region and recheck. Closing the
# log does not flush any records, so we'll use a close to
# do the "crash"
set ret [$env close]
error_check_good log_env:close $ret 0
# Now, remove the log region
#set ret [berkdb envremove -home $testdir]
#error_check_good env:remove $ret 0
# Re-open the log and try to read the record.
set env [berkdb_env -create -home $testdir \
-log -mode 0644 -log_max $max]
error_check_good envopen [is_valid_env $env] TRUE
set logc [$env log_cursor]
error_check_good log_cursor [is_valid_logc $logc $env] TRUE
set gotrec [$logc get -first]
error_check_good lp_get [lindex $gotrec 1] $rec
# Close and unlink the file
error_check_good log_cursor:close:$logc [$logc close] 0
error_check_good env:close:$env [$env close] 0
error_check_good envremove [berkdb envremove -home $testdir] 0
log_cleanup $testdir
}
foreach rec "$short_rec $long_rec $very_long_rec" {
puts "\tLog003.b: \
Verify flush on non-last record [string length $rec]"
set env [berkdb_env \
-create -log -home $testdir -mode 0644 -log_max $max]
error_check_good envopen [is_valid_env $env] TRUE
# Put 10 random records
for { set i 0 } { $i < 10 } { incr i} {
set r [random_data 450 0 0]
set lsn [$env log_put $r]
error_check_bad log_put [lindex $lsn 0] "ERROR:"
}
# Put the record we are interested in
set save_lsn [$env log_put $rec]
error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
# Put 10 more random records
for { set i 0 } { $i < 10 } { incr i} {
set r [random_data 450 0 0]
set lsn [$env log_put $r]
error_check_bad log_put [lindex $lsn 0] "ERROR:"
}
# Now check the flush
set ret [$env log_flush $save_lsn]
error_check_good log_flush $ret 0
# Now, we want to crash the region and recheck. Closing the
# log does not flush any records, so we'll use a close to
# do the "crash"
#
# Now, close and remove the log region
error_check_good env:close:$env [$env close] 0
set ret [berkdb envremove -home $testdir]
error_check_good env:remove $ret 0
# Re-open the log and try to read the record.
set env [berkdb_env \
-home $testdir -create -log -mode 0644 -log_max $max]
error_check_good envopen [is_valid_env $env] TRUE
set logc [$env log_cursor]
error_check_good log_cursor [is_valid_logc $logc $env] TRUE
set gotrec [$logc get -set $save_lsn]
error_check_good lp_get [lindex $gotrec 1] $rec
# Close and unlink the file
error_check_good log_cursor:close:$logc [$logc close] 0
error_check_good env:close:$env [$env close] 0
error_check_good envremove [berkdb envremove -home $testdir] 0
log_cleanup $testdir
}
}

46
bdb/test/log004.tcl Normal file
View file

@ -0,0 +1,46 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: log004.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
#
# TEST log004
# TEST Make sure that if we do PREVs on a log, but the beginning of the
# TEST log has been truncated, we do the right thing.
proc log004 { } {
source ./include.tcl
puts "Log004: Prev on log when beginning of log has been truncated."
# Use archive test to populate log
env_cleanup $testdir
puts "\tLog004.a: Call archive to populate log."
archive
# Delete all log files under 100
puts "\tLog004.b: Delete all log files under 100."
set ret [catch { glob $testdir/log.00000000* } result]
if { $ret == 0 } {
eval fileremove -f $result
}
# Now open the log and get the first record and try a prev
puts "\tLog004.c: Open truncated log, attempt to access missing portion."
set env [berkdb_env -create -log -home $testdir]
error_check_good envopen [is_valid_env $env] TRUE
set logc [$env log_cursor]
error_check_good log_cursor [is_valid_logc $logc $env] TRUE
set ret [$logc get -first]
error_check_bad log_get [llength $ret] 0
# This should give DB_NOTFOUND which is a ret of length 0
catch {$logc get -prev} ret
error_check_good log_get_prev [string length $ret] 0
puts "\tLog004.d: Close log and environment."
error_check_good log_cursor_close [$logc close] 0
error_check_good log_close [$env close] 0
}

89
bdb/test/log005.tcl Normal file
View file

@ -0,0 +1,89 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: log005.tcl,v 11.1 2002/05/30 22:16:49 bostic Exp $
#
# TEST log005
# TEST Check that log file sizes can change on the fly.
proc log005 { } {
source ./include.tcl
puts "Log005: Check that log file sizes can change."
env_cleanup $testdir
# Open the environment, set and check the log file size.
puts "\tLog005.a: open, set and check the log file size."
set env [berkdb_env \
-create -home $testdir -log_buffer 10000 -log_max 1000000 -txn]
error_check_good envopen [is_valid_env $env] TRUE
set db [berkdb_open \
-env $env -create -mode 0644 -btree -auto_commit a.db]
error_check_good dbopen [is_valid_db $db] TRUE
# Get the current log file maximum.
set max [log005_stat $env "Current log file size"]
error_check_good max_set $max 1000000
# Reset the log file size using a second open, and make sure
# it changes.
puts "\tLog005.b: reset during open, check the log file size."
set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
error_check_good envtmp_open [is_valid_env $envtmp] TRUE
error_check_good envtmp_close [$envtmp close] 0
set tmp [log005_stat $env "Current log file size"]
error_check_good max_changed 900000 $tmp
puts "\tLog005.c: fill in the current log file size."
# Fill in the current log file.
set new_lsn 0
set data [repeat "a" 1024]
for { set i 1 } \
{ [log005_stat $env "Current log file number"] != 2 } \
{ incr i } {
set t [$env txn]
error_check_good txn [is_valid_txn $t $env] TRUE
set ret [$db put -txn $t $i $data]
error_check_good put $ret 0
error_check_good txn [$t commit] 0
set last_lsn $new_lsn
set new_lsn [log005_stat $env "Current log file offset"]
}
# The last LSN in the first file should be more than our new
# file size.
error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
# Close down the environment.
error_check_good db_close [$db close] 0
error_check_good env_close [$env close] 0
puts "\tLog005.d: check the log file size is unchanged after recovery."
# Open again, running recovery. Verify the log file size is as we
# left it.
set env [berkdb_env -create -home $testdir -recover -txn]
error_check_good env_open [is_valid_env $env] TRUE
set tmp [log005_stat $env "Current log file size"]
error_check_good after_recovery 900000 $tmp
error_check_good env_close [$env close] 0
}
# log005_stat --
# Return the current log statistics.
proc log005_stat { env s } {
set stat [$env log_stat]
foreach statpair $stat {
set statmsg [lindex $statpair 0]
set statval [lindex $statpair 1]
if {[is_substr $statmsg $s] != 0} {
return $statval
}
}
puts "FAIL: log005: stat string $s not found"
return 0
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information
#
# Copyright (c) 2000
# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
# $Id: logtrack.tcl,v 11.6 2000/10/27 15:30:39 krinsky Exp $
# $Id: logtrack.tcl,v 11.11 2002/09/03 16:44:37 sue Exp $
#
# logtrack.tcl: A collection of routines, formerly implemented in Perl
# as log.pl, to track which log record types the test suite hits.
@ -35,20 +35,26 @@ proc logtrack_init { } {
# records were seen.
proc logtrack_read { dirname } {
global ltsname tmpname util_path
global encrypt passwd
set seendb [berkdb_open $ltsname]
error_check_good seendb_open [is_valid_db $seendb] TRUE
file delete -force $tmpname
set ret [catch {exec $util_path/db_printlog -N \
-h "$dirname" > $tmpname} res]
set pargs " -N -h $dirname "
if { $encrypt > 0 } {
append pargs " -P $passwd "
}
set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
error_check_good printlog $ret 0
error_check_good tmpfile_exists [file exists $tmpname] 1
set f [open $tmpname r]
while { [gets $f record] >= 0 } {
regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name
error_check_good seendb_put [$seendb put $name ""] 0
set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
if { $r == 1 } {
error_check_good seendb_put [$seendb put $name ""] 0
}
}
close $f
file delete -force $tmpname
@ -73,7 +79,7 @@ proc logtrack_summary { } {
set pref ""
while { [gets $f line] >= 0 } {
# Get the keyword, the first thing on the line:
# BEGIN/DEPRECATED/PREFIX
# BEGIN/DEPRECATED/IGNORED/PREFIX
set keyword [lindex $line 0]
if { [string compare $keyword PREFIX] == 0 } {
@ -92,7 +98,8 @@ proc logtrack_summary { } {
error_check_good exist_put [$existdb put \
${pref}_[lindex $line 1] ""] 0
} elseif { [string compare $keyword DEPRECATED] == 0 } {
} elseif { [string compare $keyword DEPRECATED] == 0 ||
[string compare $keyword IGNORED] == 0 } {
error_check_good deprec_put [$deprecdb put \
${pref}_[lindex $line 1] ""] 0
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: mdbscript.tcl,v 11.23 2000/10/09 02:26:11 krinsky Exp $
# $Id: mdbscript.tcl,v 11.29 2002/03/22 21:43:06 krinsky Exp $
#
# Process script for the multi-process db tester.
@ -78,12 +78,18 @@ puts "$procid process id"
puts "$procs processes"
set klock NOLOCK
# Note: all I/O operations, and especially flush, are expensive
# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
# flushes in the main part of the loop below.
flush stdout
set dbenv [berkdb env -create -cdb -home $dir]
#set dbenv [berkdb env -create -cdb -log -home $dir]
set dbenv [berkdb_env -create -cdb -home $dir]
#set dbenv [berkdb_env -create -cdb -log -home $dir]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set locker [ $dbenv lock_id ]
set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
error_check_good dbopen [is_valid_db $db] TRUE
@ -96,6 +102,7 @@ tclsleep 5
proc get_lock { k } {
global dbenv
global procid
global locker
global klock
global DB_LOCK_WRITE
global DB_LOCK_NOWAIT
@ -103,7 +110,7 @@ proc get_lock { k } {
global exception_handled
# Make sure that the key isn't in the middle of
# a delete operation
if {[catch {$dbenv lock_get -nowait write $procid $k} klock] != 0 } {
if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
set exception_handled 1
error_check_good \
@ -136,7 +143,7 @@ set dlen [string length $datastr]
for { set i 0 } { $i < $iter } { incr i } {
set op [berkdb random_int 0 5]
puts "iteration $i operation $op"
flush stdout
set close_cursor 0
if {[catch {
switch $op {
0 {
@ -337,7 +344,6 @@ for { set i 0 } { $i < $iter } { incr i } {
set fnl [string first "\n" $errorInfo]
set theError [string range $errorInfo 0 [expr $fnl - 1]]
flush stdout
if { [string compare $klock NOLOCK] != 0 } {
catch {$klock put}
}
@ -348,11 +354,11 @@ for { set i 0 } { $i < $iter } { incr i } {
if {[string first FAIL $theError] == 0 && \
$exception_handled != 1} {
flush stdout
error "FAIL:[timestamp] test042: key $k: $theError"
}
set exception_handled 0
} else {
flush stdout
if { [string compare $klock NOLOCK] != 0 } {
error_check_good "$klock put" [$klock put] 0
set klock NOLOCK
@ -360,14 +366,11 @@ for { set i 0 } { $i < $iter } { incr i } {
}
}
if {[catch {$db close} ret] != 0 } {
error_check_good close [is_substr $errorInfo "DB_INCOMPLETE"] 1
puts "Warning: sync incomplete on close ([pid])"
} else {
error_check_good close $ret 0
}
$dbenv close
error_check_good db_close_catch [catch {$db close} ret] 0
error_check_good db_close $ret 0
error_check_good dbenv_close [$dbenv close] 0
flush stdout
exit
puts "[timestamp] [pid] Complete"

199
bdb/test/memp001.tcl Normal file
View file

@ -0,0 +1,199 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: memp001.tcl,v 11.50 2002/08/07 16:46:28 bostic Exp $
#
# TEST memp001
# TEST Randomly updates pages.
proc memp001 { } {
memp001_body 1 ""
memp001_body 3 ""
memp001_body 1 -private
memp001_body 3 -private
memp001_body 1 "-system_mem -shm_key 1"
memp001_body 3 "-system_mem -shm_key 1"
}
proc memp001_body { ncache flags } {
source ./include.tcl
global rand_init
set nfiles 5
set iter 500
set psize 512
set cachearg "-cachesize {0 400000 $ncache}"
puts \
"Memp001: { $flags } random update $iter iterations on $nfiles files."
#
# Check if this platform supports this set of flags
#
if { [mem_chk $flags] == 1 } {
return
}
env_cleanup $testdir
puts "\tMemp001.a: Create env with $ncache caches"
set env [eval {berkdb_env -create -mode 0644} \
$cachearg {-home $testdir} $flags]
error_check_good env_open [is_valid_env $env] TRUE
#
# Do a simple mpool_stat call to verify the number of caches
# just to exercise the stat code.
set stat [$env mpool_stat]
set str "Number of caches"
set checked 0
foreach statpair $stat {
if { $checked == 1 } {
break
}
if { [is_substr [lindex $statpair 0] $str] != 0} {
set checked 1
error_check_good ncache [lindex $statpair 1] $ncache
}
}
error_check_good checked $checked 1
# Open N memp files
puts "\tMemp001.b: Create $nfiles mpool files"
for {set i 1} {$i <= $nfiles} {incr i} {
set fname "data_file.$i"
file_create $testdir/$fname 50 $psize
set mpools($i) \
[$env mpool -create -pagesize $psize -mode 0644 $fname]
error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
}
# Now, loop, picking files at random
berkdb srand $rand_init
puts "\tMemp001.c: Random page replacement loop"
for {set i 0} {$i < $iter} {incr i} {
set mpool $mpools([berkdb random_int 1 $nfiles])
set p(1) [get_range $mpool 10]
set p(2) [get_range $mpool 10]
set p(3) [get_range $mpool 10]
set p(1) [replace $mpool $p(1)]
set p(3) [replace $mpool $p(3)]
set p(4) [get_range $mpool 20]
set p(4) [replace $mpool $p(4)]
set p(5) [get_range $mpool 10]
set p(6) [get_range $mpool 20]
set p(7) [get_range $mpool 10]
set p(8) [get_range $mpool 20]
set p(5) [replace $mpool $p(5)]
set p(6) [replace $mpool $p(6)]
set p(9) [get_range $mpool 40]
set p(9) [replace $mpool $p(9)]
set p(10) [get_range $mpool 40]
set p(7) [replace $mpool $p(7)]
set p(8) [replace $mpool $p(8)]
set p(9) [replace $mpool $p(9)]
set p(10) [replace $mpool $p(10)]
#
# We now need to put all the pages we have here or
# else they end up pinned.
#
for {set x 1} { $x <= 10} {incr x} {
error_check_good pgput [$p($x) put] 0
}
}
# Close N memp files, close the environment.
puts "\tMemp001.d: Close mpools"
for {set i 1} {$i <= $nfiles} {incr i} {
error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
}
error_check_good envclose [$env close] 0
for {set i 1} {$i <= $nfiles} {incr i} {
fileremove -f $testdir/data_file.$i
}
}
proc file_create { fname nblocks blocksize } {
set fid [open $fname w]
for {set i 0} {$i < $nblocks} {incr i} {
seek $fid [expr $i * $blocksize] start
puts -nonewline $fid $i
}
seek $fid [expr $nblocks * $blocksize - 1]
# We don't end the file with a newline, because some platforms (like
# Windows) emit CR/NL. There does not appear to be a BINARY open flag
# that prevents this.
puts -nonewline $fid "Z"
close $fid
# Make sure it worked
if { [file size $fname] != $nblocks * $blocksize } {
error "FAIL: file_create could not create correct file size"
}
}
proc get_range { mpool max } {
set pno [berkdb random_int 0 $max]
set p [$mpool get $pno]
error_check_good page [is_valid_page $p $mpool] TRUE
set got [$p pgnum]
if { $got != $pno } {
puts "Get_range: Page mismatch page |$pno| val |$got|"
}
set ret [$p init "Page is pinned by [pid]"]
error_check_good page_init $ret 0
return $p
}
proc replace { mpool p } {
set pgno [$p pgnum]
set ret [$p init "Page is unpinned by [pid]"]
error_check_good page_init $ret 0
set ret [$p put -dirty]
error_check_good page_put $ret 0
set p2 [$mpool get $pgno]
error_check_good page [is_valid_page $p2 $mpool] TRUE
return $p2
}
proc mem_chk { flags } {
source ./include.tcl
global errorCode
# Open the memp with region init specified
env_cleanup $testdir
set cachearg " -cachesize {0 400000 3}"
set ret [catch {eval {berkdb_env -create -mode 0644}\
$cachearg {-region_init -home $testdir} $flags} env]
if { $ret != 0 } {
# If the env open failed, it may be because we're on a platform
# such as HP-UX 10 that won't support mutexes in shmget memory.
# Or QNX, which doesn't support system memory at all.
# Verify that the return value was EINVAL or EOPNOTSUPP
# and bail gracefully.
error_check_good is_shm_test [is_substr $flags -system_mem] 1
error_check_good returned_error [expr \
[is_substr $errorCode EINVAL] || \
[is_substr $errorCode EOPNOTSUPP]] 1
puts "Warning:\
platform does not support mutexes in shmget memory."
puts "Skipping shared memory mpool test."
return 1
}
error_check_good env_open [is_valid_env $env] TRUE
error_check_good env_close [$env close] 0
env_cleanup $testdir
return 0
}

62
bdb/test/memp002.tcl Normal file
View file

@ -0,0 +1,62 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: memp002.tcl,v 11.47 2002/09/05 17:23:06 sandstro Exp $
#
# TEST memp002
# TEST Tests multiple processes accessing and modifying the same files.
proc memp002 { } {
#
# Multiple processes not supported by private memory so don't
# run memp002_body with -private.
#
memp002_body ""
memp002_body "-system_mem -shm_key 1"
}
proc memp002_body { flags } {
source ./include.tcl
puts "Memp002: {$flags} Multiprocess mpool tester"
set procs 4
set psizes "512 1024 2048 4096 8192"
set iterations 500
set npages 100
# Check if this combination of flags is supported by this arch.
if { [mem_chk $flags] == 1 } {
return
}
set iter [expr $iterations / $procs]
# Clean up old stuff and create new.
env_cleanup $testdir
for { set i 0 } { $i < [llength $psizes] } { incr i } {
fileremove -f $testdir/file$i
}
set e [eval {berkdb_env -create -lock -home $testdir} $flags]
error_check_good dbenv [is_valid_env $e] TRUE
set pidlist {}
for { set i 0 } { $i < $procs } {incr i} {
puts "$tclsh_path\
$test_path/mpoolscript.tcl $testdir $i $procs \
$iter $psizes $npages 3 $flags > \
$testdir/memp002.$i.out &"
set p [exec $tclsh_path $test_path/wrap.tcl \
mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
$iter $psizes $npages 3 $flags &]
lappend pidlist $p
}
puts "Memp002: $procs independent processes now running"
watch_procs $pidlist
reset_env $e
}

153
bdb/test/memp003.tcl Normal file
View file

@ -0,0 +1,153 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: memp003.tcl,v 11.46 2002/04/30 17:26:06 sue Exp $
#
# TEST memp003
# TEST Test reader-only/writer process combinations; we use the access methods
# TEST for testing.
proc memp003 { } {
#
# Multiple processes not supported by private memory so don't
# run memp003_body with -private.
#
memp003_body ""
memp003_body "-system_mem -shm_key 1"
}
proc memp003_body { flags } {
global alphabet
source ./include.tcl
puts "Memp003: {$flags} Reader/Writer tests"
if { [mem_chk $flags] == 1 } {
return
}
env_cleanup $testdir
set psize 1024
set nentries 500
set testfile mpool.db
set t1 $testdir/t1
# Create an environment that the two processes can share, with
# 20 pages per cache.
set c [list 0 [expr $psize * 20 * 3] 3]
set dbenv [eval {berkdb_env \
-create -lock -home $testdir -cachesize $c} $flags]
error_check_good dbenv [is_valid_env $dbenv] TRUE
# First open and create the file.
set db [berkdb_open -env $dbenv -create -truncate \
-mode 0644 -pagesize $psize -btree $testfile]
error_check_good dbopen/RW [is_valid_db $db] TRUE
set did [open $dict]
set txn ""
set count 0
puts "\tMemp003.a: create database"
set keys ""
# Here is the loop where we put and get each key/data pair
while { [gets $did str] != -1 && $count < $nentries } {
lappend keys $str
set ret [eval {$db put} $txn {$str $str}]
error_check_good put $ret 0
set ret [eval {$db get} $txn {$str}]
error_check_good get $ret [list [list $str $str]]
incr count
}
close $did
error_check_good close [$db close] 0
# Now open the file for read-only
set db [berkdb_open -env $dbenv -rdonly $testfile]
error_check_good dbopen/RO [is_substr $db db] 1
puts "\tMemp003.b: verify a few keys"
# Read and verify a couple of keys; saving them to check later
set testset ""
for { set i 0 } { $i < 10 } { incr i } {
set ndx [berkdb random_int 0 [expr $nentries - 1]]
set key [lindex $keys $ndx]
if { [lsearch $testset $key] != -1 } {
incr i -1
continue;
}
# The remote process stuff is unhappy with
# zero-length keys; make sure we don't pick one.
if { [llength $key] == 0 } {
incr i -1
continue
}
lappend testset $key
set ret [eval {$db get} $txn {$key}]
error_check_good get/RO $ret [list [list $key $key]]
}
puts "\tMemp003.c: retrieve and modify keys in remote process"
# Now open remote process where we will open the file RW
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
puts $f1 "flush stdout"
flush $f1
set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
set remote_env [send_cmd $f1 \
"berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
foreach k $testset {
# Get the key
set ret [send_cmd $f1 "$remote_db get $k"]
error_check_good remote_get $ret [list [list $k $k]]
# Now replace the key
set ret [send_cmd $f1 "$remote_db put $k $k$k"]
error_check_good remote_put $ret 0
}
puts "\tMemp003.d: verify changes in local process"
foreach k $testset {
set ret [eval {$db get} $txn {$key}]
error_check_good get_verify/RO $ret [list [list $key $key$key]]
}
puts "\tMemp003.e: Fill up the cache with dirty buffers"
foreach k $testset {
# Now rewrite the keys with BIG data
set data [replicate $alphabet 32]
set ret [send_cmd $f1 "$remote_db put $k $data"]
error_check_good remote_put $ret 0
}
puts "\tMemp003.f: Get more pages for the read-only file"
dump_file $db $txn $t1 nop
puts "\tMemp003.g: Sync from the read-only file"
error_check_good db_sync [$db sync] 0
error_check_good db_close [$db close] 0
set ret [send_cmd $f1 "$remote_db close"]
error_check_good remote_get $ret 0
# Close the environment both remotely and locally.
set ret [send_cmd $f1 "$remote_env close"]
error_check_good remote:env_close $ret 0
close $f1
reset_env $dbenv
}

View file

@ -1,420 +0,0 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Sleepycat Software. All rights reserved.
#
# $Id: mpool.tcl,v 11.34 2001/01/18 04:58:07 krinsky Exp $
#
# Options are:
# -cachesize {gbytes bytes ncache}
# -nfiles <files>
# -iterations <iterations>
# -pagesize <page size in bytes>
# -dir <directory in which to store memp>
# -stat
proc memp_usage {} {
puts "memp -cachesize {gbytes bytes ncache}"
puts "\t-nfiles <files>"
puts "\t-iterations <iterations>"
puts "\t-pagesize <page size in bytes>"
puts "\t-dir <memp directory>"
puts "\t-mem {private system}"
return
}
proc mpool { args } {
source ./include.tcl
global errorCode
puts "mpool {$args} running"
# Set defaults
set cachearg " -cachesize {0 200000 3}"
set nfiles 5
set iterations 500
set pagesize "512 1024 2048 4096 8192"
set npages 100
set procs 4
set seeds ""
set shm_key 1
set dostat 0
set flags ""
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-c.* {
incr i
set cachesize [lindex $args $i]
set cachearg " -cachesize $cachesize"
}
-d.* { incr i; set testdir [lindex $args $i] }
-i.* { incr i; set iterations [lindex $args $i] }
-me.* {
incr i
if { [string \
compare [lindex $args $i] private] == 0 } {
set flags -private
} elseif { [string \
compare [lindex $args $i] system] == 0 } {
#
# We need to use a shm id. Use one
# that is the same each time so that
# we do not grow segments infinitely.
set flags "-system_mem -shm_key $shm_key"
} else {
puts -nonewline \
"FAIL:[timestamp] Usage: "
memp_usage
return
}
}
-nf.* { incr i; set nfiles [lindex $args $i] }
-np.* { incr i; set npages [lindex $args $i] }
-pa.* { incr i; set pagesize [lindex $args $i] }
-pr.* { incr i; set procs [lindex $args $i] }
-se.* { incr i; set seeds [lindex $args $i] }
-st.* { set dostat 1 }
default {
puts -nonewline "FAIL:[timestamp] Usage: "
memp_usage
return
}
}
}
# Clean out old directory
env_cleanup $testdir
# Open the memp with region init specified
set ret [catch {eval {berkdb env -create -mode 0644}\
$cachearg {-region_init -home $testdir} $flags} res]
if { $ret == 0 } {
set env $res
} else {
# If the env open failed, it may be because we're on a platform
# such as HP-UX 10 that won't support mutexes in shmget memory.
# Or QNX, which doesn't support system memory at all.
# Verify that the return value was EINVAL or EOPNOTSUPP
# and bail gracefully.
error_check_good is_shm_test [is_substr $flags -system_mem] 1
error_check_good returned_error [expr \
[is_substr $errorCode EINVAL] || \
[is_substr $errorCode EOPNOTSUPP]] 1
puts "Warning:\
platform does not support mutexes in shmget memory."
puts "Skipping shared memory mpool test."
return
}
error_check_good env_open [is_substr $env env] 1
reset_env $env
env_cleanup $testdir
# Now open without region init
set env [eval {berkdb env -create -mode 0644}\
$cachearg {-home $testdir} $flags]
error_check_good evn_open [is_substr $env env] 1
memp001 $env \
$testdir $nfiles $iterations [lindex $pagesize 0] $dostat $flags
reset_env $env
set ret [berkdb envremove -home $testdir]
error_check_good env_remove $ret 0
env_cleanup $testdir
memp002 $testdir \
$procs $pagesize $iterations $npages $seeds $dostat $flags
set ret [berkdb envremove -home $testdir]
error_check_good env_remove $ret 0
env_cleanup $testdir
memp003 $testdir $iterations $flags
set ret [berkdb envremove -home $testdir]
error_check_good env_remove $ret 0
env_cleanup $testdir
}
proc memp001 {env dir n iter psize dostat flags} {
source ./include.tcl
global rand_init
puts "Memp001: {$flags} random update $iter iterations on $n files."
# Open N memp files
for {set i 1} {$i <= $n} {incr i} {
set fname "data_file.$i"
file_create $dir/$fname 50 $psize
set mpools($i) \
[$env mpool -create -pagesize $psize -mode 0644 $fname]
error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
}
# Now, loop, picking files at random
berkdb srand $rand_init
for {set i 0} {$i < $iter} {incr i} {
set mpool $mpools([berkdb random_int 1 $n])
set p1 [get_range $mpool 10]
set p2 [get_range $mpool 10]
set p3 [get_range $mpool 10]
set p1 [replace $mpool $p1]
set p3 [replace $mpool $p3]
set p4 [get_range $mpool 20]
set p4 [replace $mpool $p4]
set p5 [get_range $mpool 10]
set p6 [get_range $mpool 20]
set p7 [get_range $mpool 10]
set p8 [get_range $mpool 20]
set p5 [replace $mpool $p5]
set p6 [replace $mpool $p6]
set p9 [get_range $mpool 40]
set p9 [replace $mpool $p9]
set p10 [get_range $mpool 40]
set p7 [replace $mpool $p7]
set p8 [replace $mpool $p8]
set p9 [replace $mpool $p9]
set p10 [replace $mpool $p10]
}
if { $dostat == 1 } {
puts [$env mpool_stat]
for {set i 1} {$i <= $n} {incr i} {
error_check_good mp_sync [$mpools($i) fsync] 0
}
}
# Close N memp files
for {set i 1} {$i <= $n} {incr i} {
error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
fileremove -f $dir/data_file.$i
}
}
proc file_create { fname nblocks blocksize } {
set fid [open $fname w]
for {set i 0} {$i < $nblocks} {incr i} {
seek $fid [expr $i * $blocksize] start
puts -nonewline $fid $i
}
seek $fid [expr $nblocks * $blocksize - 1]
# We don't end the file with a newline, because some platforms (like
# Windows) emit CR/NL. There does not appear to be a BINARY open flag
# that prevents this.
puts -nonewline $fid "Z"
close $fid
# Make sure it worked
if { [file size $fname] != $nblocks * $blocksize } {
error "FAIL: file_create could not create correct file size"
}
}
proc get_range { mpool max } {
set pno [berkdb random_int 0 $max]
set p [$mpool get $pno]
error_check_good page [is_valid_page $p $mpool] TRUE
set got [$p pgnum]
if { $got != $pno } {
puts "Get_range: Page mismatch page |$pno| val |$got|"
}
set ret [$p init "Page is pinned by [pid]"]
error_check_good page_init $ret 0
return $p
}
proc replace { mpool p } {
set pgno [$p pgnum]
set ret [$p init "Page is unpinned by [pid]"]
error_check_good page_init $ret 0
set ret [$p put -dirty]
error_check_good page_put $ret 0
set p2 [$mpool get $pgno]
error_check_good page [is_valid_page $p2 $mpool] TRUE
return $p2
}
proc memp002 { dir procs psizes iterations npages seeds dostat flags } {
source ./include.tcl
puts "Memp002: {$flags} Multiprocess mpool tester"
if { [is_substr $flags -private] != 0 } {
puts "Memp002 skipping\
multiple processes not supported by private memory"
return
}
set iter [expr $iterations / $procs]
# Clean up old stuff and create new.
env_cleanup $dir
for { set i 0 } { $i < [llength $psizes] } { incr i } {
fileremove -f $dir/file$i
}
set e [eval {berkdb env -create -lock -home $dir} $flags]
error_check_good dbenv [is_valid_widget $e env] TRUE
set pidlist {}
for { set i 0 } { $i < $procs } {incr i} {
if { [llength $seeds] == $procs } {
set seed [lindex $seeds $i]
} else {
set seed -1
}
puts "$tclsh_path\
$test_path/mpoolscript.tcl $dir $i $procs \
$iter $psizes $npages 3 $flags > \
$dir/memp002.$i.out &"
set p [exec $tclsh_path $test_path/wrap.tcl \
mpoolscript.tcl $dir/memp002.$i.out $dir $i $procs \
$iter $psizes $npages 3 $flags &]
lappend pidlist $p
}
puts "Memp002: $procs independent processes now running"
watch_procs
reset_env $e
}
# Test reader-only/writer process combinations; we use the access methods
# for testing.
proc memp003 { dir {nentries 10000} flags } {
global alphabet
source ./include.tcl
puts "Memp003: {$flags} Reader/Writer tests"
if { [is_substr $flags -private] != 0 } {
puts "Memp003 skipping\
multiple processes not supported by private memory"
return
}
env_cleanup $dir
set psize 1024
set testfile mpool.db
set t1 $dir/t1
# Create an environment that the two processes can share
set c [list 0 [expr $psize * 10] 3]
set dbenv [eval {berkdb env \
-create -lock -home $dir -cachesize $c} $flags]
error_check_good dbenv [is_valid_env $dbenv] TRUE
# First open and create the file.
set db [berkdb_open -env $dbenv -create -truncate \
-mode 0644 -pagesize $psize -btree $testfile]
error_check_good dbopen/RW [is_valid_db $db] TRUE
set did [open $dict]
set txn ""
set count 0
puts "\tMemp003.a: create database"
set keys ""
# Here is the loop where we put and get each key/data pair
while { [gets $did str] != -1 && $count < $nentries } {
lappend keys $str
set ret [eval {$db put} $txn {$str $str}]
error_check_good put $ret 0
set ret [eval {$db get} $txn {$str}]
error_check_good get $ret [list [list $str $str]]
incr count
}
close $did
error_check_good close [$db close] 0
# Now open the file for read-only
set db [berkdb_open -env $dbenv -rdonly $testfile]
error_check_good dbopen/RO [is_substr $db db] 1
puts "\tMemp003.b: verify a few keys"
# Read and verify a couple of keys; saving them to check later
set testset ""
for { set i 0 } { $i < 10 } { incr i } {
set ndx [berkdb random_int 0 [expr $nentries - 1]]
set key [lindex $keys $ndx]
if { [lsearch $testset $key] != -1 } {
incr i -1
continue;
}
# The remote process stuff is unhappy with
# zero-length keys; make sure we don't pick one.
if { [llength $key] == 0 } {
incr i -1
continue
}
lappend testset $key
set ret [eval {$db get} $txn {$key}]
error_check_good get/RO $ret [list [list $key $key]]
}
puts "\tMemp003.c: retrieve and modify keys in remote process"
# Now open remote process where we will open the file RW
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
puts $f1 "flush stdout"
flush $f1
set c [concat "{" [list 0 [expr $psize * 10] 3] "}" ]
set remote_env [send_cmd $f1 \
"berkdb env -create -lock -home $dir -cachesize $c $flags"]
error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
foreach k $testset {
# Get the key
set ret [send_cmd $f1 "$remote_db get $k"]
error_check_good remote_get $ret [list [list $k $k]]
# Now replace the key
set ret [send_cmd $f1 "$remote_db put $k $k$k"]
error_check_good remote_put $ret 0
}
puts "\tMemp003.d: verify changes in local process"
foreach k $testset {
set ret [eval {$db get} $txn {$key}]
error_check_good get_verify/RO $ret [list [list $key $key$key]]
}
puts "\tMemp003.e: Fill up the cache with dirty buffers"
foreach k $testset {
# Now rewrite the keys with BIG data
set data [replicate $alphabet 32]
set ret [send_cmd $f1 "$remote_db put $k $data"]
error_check_good remote_put $ret 0
}
puts "\tMemp003.f: Get more pages for the read-only file"
dump_file $db $txn $t1 nop
puts "\tMemp003.g: Sync from the read-only file"
error_check_good db_sync [$db sync] 0
error_check_good db_close [$db close] 0
set ret [send_cmd $f1 "$remote_db close"]
error_check_good remote_get $ret 0
# Close the environment both remotely and locally.
set ret [send_cmd $f1 "$remote_env close"]
error_check_good remote:env_close $ret 0
close $f1
reset_env $dbenv
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: mpoolscript.tcl,v 11.12 2000/05/05 15:23:47 sue Exp $
# $Id: mpoolscript.tcl,v 11.16 2002/04/29 14:47:16 sandstro Exp $
#
# Random multiple process mpool tester.
# Usage: mpoolscript dir id numiters numfiles numpages sleepint
@ -61,7 +61,7 @@ foreach i $pgsizes {
}
set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
set env_cmd {berkdb env -lock -cachesize $cache -home $dir}
set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
set e [eval $env_cmd $flags]
error_check_good env_open [is_valid_env $e] TRUE
@ -78,7 +78,8 @@ foreach psize $pgsizes {
puts "Establishing long-term pin on file 0 page $id for process $id"
# Set up the long-pin page
set lock [$e lock_get write $id 0:$id]
set locker [$e lock_id]
set lock [$e lock_get write $locker 0:$id]
error_check_good lock_get [is_valid_lock $lock $e] TRUE
set mp [lindex $mpools 0]
@ -109,7 +110,7 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
set mpf [lindex $mpools $fnum]
for { set p 0 } { $p < $numpages } { incr p } {
set lock [$e lock_get write $id $fnum:$p]
set lock [$e lock_get write $locker $fnum:$p]
error_check_good lock_get:$fnum:$p \
[is_valid_lock $lock $e] TRUE

View file

@ -1,225 +0,0 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Sleepycat Software. All rights reserved.
#
# $Id: mutex.tcl,v 11.18 2000/09/01 19:24:59 krinsky Exp $
#
# Exercise mutex functionality.
# Options are:
# -dir <directory in which to store mpool>
# -iter <iterations>
# -mdegree <number of mutexes per iteration>
# -nmutex <number of mutexes>
# -procs <number of processes to run>
# -wait <wait interval after getting locks>
proc mutex_usage {} {
puts stderr "mutex\n\t-dir <dir>\n\t-iter <iterations>"
puts stderr "\t-mdegree <locks per iteration>\n\t-nmutex <n>"
puts stderr "\t-procs <nprocs>"
puts stderr "\n\t-wait <max wait interval>"
return
}
proc mutex { args } {
source ./include.tcl
set dir db
set iter 500
set mdegree 3
set nmutex 20
set procs 5
set wait 2
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-d.* { incr i; set testdir [lindex $args $i] }
-i.* { incr i; set iter [lindex $args $i] }
-m.* { incr i; set mdegree [lindex $args $i] }
-n.* { incr i; set nmutex [lindex $args $i] }
-p.* { incr i; set procs [lindex $args $i] }
-w.* { incr i; set wait [lindex $args $i] }
default {
puts -nonewline "FAIL:[timestamp] Usage: "
mutex_usage
return
}
}
}
if { [file exists $testdir/$dir] != 1 } {
file mkdir $testdir/$dir
} elseif { [file isdirectory $testdir/$dir ] != 1 } {
error "$testdir/$dir is not a directory"
}
# Basic sanity tests
mutex001 $testdir $nmutex
# Basic synchronization tests
mutex002 $testdir $nmutex
# Multiprocess tests
mutex003 $testdir $iter $nmutex $procs $mdegree $wait
}
proc mutex001 { dir nlocks } {
source ./include.tcl
puts "Mutex001: Basic functionality"
env_cleanup $dir
# Test open w/out create; should fail
error_check_bad \
env_open [catch {berkdb env -lock -home $dir} env] 0
# Now open for real
set env [berkdb env -create -mode 0644 -lock -home $dir]
error_check_good env_open [is_valid_env $env] TRUE
set m [$env mutex 0644 $nlocks]
error_check_good mutex_init [is_valid_mutex $m $env] TRUE
# Get, set each mutex; sleep, then get Release
for { set i 0 } { $i < $nlocks } { incr i } {
set r [$m get $i ]
error_check_good mutex_get $r 0
set r [$m setval $i $i]
error_check_good mutex_setval $r 0
}
tclsleep 5
for { set i 0 } { $i < $nlocks } { incr i } {
set r [$m getval $i]
error_check_good mutex_getval $r $i
set r [$m release $i ]
error_check_good mutex_get $r 0
}
error_check_good mutex_close [$m close] 0
error_check_good env_close [$env close] 0
puts "Mutex001: completed successfully."
}
# Test basic synchronization
proc mutex002 { dir nlocks } {
source ./include.tcl
puts "Mutex002: Basic synchronization"
env_cleanup $dir
# Fork off child before we open any files.
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
flush $f1
# Open the environment and the mutex locally
set local_env [berkdb env -create -mode 0644 -lock -home $dir]
error_check_good env_open [is_valid_env $local_env] TRUE
set local_mutex [$local_env mutex 0644 $nlocks]
error_check_good \
mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
# Open the environment and the mutex remotely
set remote_env [send_cmd $f1 "berkdb env -lock -home $dir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
error_check_good \
mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
# Do a get here, then set the value to be pid.
# On the remote side fire off a get and getval.
set r [$local_mutex get 1]
error_check_good lock_get $r 0
set r [$local_mutex setval 1 [pid]]
error_check_good lock_get $r 0
# Now have the remote side request the lock and check its
# value. Then wait 5 seconds, release the mutex and see
# what the remote side returned.
send_timed_cmd $f1 1 "$remote_mutex get 1"
send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
# Now sleep before resetting and releasing lock
tclsleep 5
set newv [expr [pid] - 1]
set r [$local_mutex setval 1 $newv]
error_check_good mutex_setval $r 0
set r [$local_mutex release 1]
error_check_good mutex_release $r 0
# Now get the result from the other script
# Timestamp
set result [rcv_result $f1]
error_check_good lock_get:remote_time [expr $result > 4] 1
# Timestamp
set result [rcv_result $f1]
# Mutex value
set result [send_cmd $f1 "puts \$ret"]
error_check_good lock_get:remote_getval $result $newv
# Close down the remote
set ret [send_cmd $f1 "$remote_mutex close" 5]
# Not sure why we need this, but we do... an extra blank line
# someone gets output somewhere
gets $f1 ret
error_check_good remote:mutex_close $ret 0
set ret [send_cmd $f1 "$remote_env close"]
error_check_good remote:env_close $ret 0
catch { close $f1 } result
set ret [$local_mutex close]
error_check_good local:mutex_close $ret 0
set ret [$local_env close]
error_check_good local:env_close $ret 0
puts "Mutex002: completed successfully."
}
# Generate a bunch of parallel
# testers that try to randomly obtain locks.
proc mutex003 { dir iter nmutex procs mdegree wait } {
source ./include.tcl
puts "Mutex003: Multi-process random mutex test ($procs processes)"
env_cleanup $dir
# Now open the region we'll use for multiprocess testing.
set env [berkdb env -create -mode 0644 -lock -home $dir]
error_check_good env_open [is_valid_env $env] TRUE
set mutex [$env mutex 0644 $nmutex]
error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
error_check_good mutex_close [$mutex close] 0
# Now spawn off processes
set proclist {}
for { set i 0 } {$i < $procs} {incr i} {
puts "$tclsh_path\
$test_path/mutexscript.tcl $dir\
$iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
set p [exec $tclsh_path $test_path/wrap.tcl \
mutexscript.tcl $testdir/$i.mutexout $dir\
$iter $nmutex $wait $mdegree &]
lappend proclist $p
}
puts "Mutex003: $procs independent processes now running"
watch_procs
error_check_good env_close [$env close] 0
# Remove output files
for { set i 0 } {$i < $procs} {incr i} {
fileremove -f $dir/$i.mutexout
}
}

51
bdb/test/mutex001.tcl Normal file
View file

@ -0,0 +1,51 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: mutex001.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
#
# TEST mutex001
# TEST Test basic mutex functionality
proc mutex001 { } {
source ./include.tcl
puts "Mutex001: Basic functionality"
env_cleanup $testdir
set nlocks 20
# Test open w/out create; should fail
error_check_bad \
env_open [catch {berkdb_env -lock -home $testdir} env] 0
puts "\tMutex001.a: Create lock env"
# Now open for real
set env [berkdb_env -create -mode 0644 -lock -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
puts "\tMutex001.b: Create $nlocks mutexes"
set m [$env mutex 0644 $nlocks]
error_check_good mutex_init [is_valid_mutex $m $env] TRUE
# Get, set each mutex; sleep, then get Release
puts "\tMutex001.c: Get/set loop"
for { set i 0 } { $i < $nlocks } { incr i } {
set r [$m get $i ]
error_check_good mutex_get $r 0
set r [$m setval $i $i]
error_check_good mutex_setval $r 0
}
tclsleep 5
for { set i 0 } { $i < $nlocks } { incr i } {
set r [$m getval $i]
error_check_good mutex_getval $r $i
set r [$m release $i ]
error_check_good mutex_get $r 0
}
error_check_good mutex_close [$m close] 0
error_check_good env_close [$env close] 0
}

94
bdb/test/mutex002.tcl Normal file
View file

@ -0,0 +1,94 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: mutex002.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
#
# TEST mutex002
# TEST Test basic mutex synchronization
proc mutex002 { } {
source ./include.tcl
puts "Mutex002: Basic synchronization"
env_cleanup $testdir
set nlocks 20
# Fork off child before we open any files.
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
flush $f1
# Open the environment and the mutex locally
puts "\tMutex002.a: Open local and remote env"
set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
error_check_good env_open [is_valid_env $local_env] TRUE
set local_mutex [$local_env mutex 0644 $nlocks]
error_check_good \
mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
# Open the environment and the mutex remotely
set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
error_check_good \
mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
# Do a get here, then set the value to be pid.
# On the remote side fire off a get and getval.
puts "\tMutex002.b: Local and remote get/set"
set r [$local_mutex get 1]
error_check_good lock_get $r 0
set r [$local_mutex setval 1 [pid]]
error_check_good lock_get $r 0
# Now have the remote side request the lock and check its
# value. Then wait 5 seconds, release the mutex and see
# what the remote side returned.
send_timed_cmd $f1 1 "$remote_mutex get 1"
send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
# Now sleep before resetting and releasing lock
tclsleep 5
set newv [expr [pid] - 1]
set r [$local_mutex setval 1 $newv]
error_check_good mutex_setval $r 0
set r [$local_mutex release 1]
error_check_good mutex_release $r 0
# Now get the result from the other script
# Timestamp
set result [rcv_result $f1]
error_check_good lock_get:remote_time [expr $result > 4] 1
# Timestamp
set result [rcv_result $f1]
# Mutex value
set result [send_cmd $f1 "puts \$ret"]
error_check_good lock_get:remote_getval $result $newv
# Close down the remote
puts "\tMutex002.c: Close remote"
set ret [send_cmd $f1 "$remote_mutex close" 5]
# Not sure why we need this, but we do... an extra blank line
# someone gets output somewhere
gets $f1 ret
error_check_good remote:mutex_close $ret 0
set ret [send_cmd $f1 "$remote_env close"]
error_check_good remote:env_close $ret 0
catch { close $f1 } result
set ret [$local_mutex close]
error_check_good local:mutex_close $ret 0
set ret [$local_env close]
error_check_good local:env_close $ret 0
}

52
bdb/test/mutex003.tcl Normal file
View file

@ -0,0 +1,52 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: mutex003.tcl,v 11.24 2002/09/05 17:23:06 sandstro Exp $
#
# TEST mutex003
# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
proc mutex003 { } {
source ./include.tcl
set nmutex 20
set iter 500
set procs 5
set mdegree 3
set wait 2
puts "Mutex003: Multi-process random mutex test"
env_cleanup $testdir
puts "\tMutex003.a: Create environment"
# Now open the region we'll use for multiprocess testing.
set env [berkdb_env -create -mode 0644 -lock -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
set mutex [$env mutex 0644 $nmutex]
error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
error_check_good mutex_close [$mutex close] 0
# Now spawn off processes
puts "\tMutex003.b: Create $procs processes"
set pidlist {}
for { set i 0 } {$i < $procs} {incr i} {
puts "$tclsh_path\
$test_path/mutexscript.tcl $testdir\
$iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
set p [exec $tclsh_path $test_path/wrap.tcl \
mutexscript.tcl $testdir/$i.mutexout $testdir\
$iter $nmutex $wait $mdegree &]
lappend pidlist $p
}
puts "\tMutex003.c: $procs independent processes now running"
watch_procs $pidlist
error_check_good env_close [$env close] 0
# Remove output files
for { set i 0 } {$i < $procs} {incr i} {
fileremove -f $testdir/$i.mutexout
}
}

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: mutexscript.tcl,v 11.12 2000/11/21 22:14:56 dda Exp $
# $Id: mutexscript.tcl,v 11.16 2002/04/29 14:58:16 sandstro Exp $
#
# Random mutex tester.
# Usage: mutexscript dir numiters mlocks sleepint degree
@ -43,7 +43,7 @@ puts " $numiters $nmutex $sleepint $degree"
flush stdout
# Open the environment and the mutex
set e [berkdb env -create -mode 0644 -lock -home $dir]
set e [berkdb_env -create -mode 0644 -lock -home $dir]
error_check_good evn_open [is_valid_env $e] TRUE
set mutex [$e mutex 0644 $nmutex]
@ -73,8 +73,8 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
}
}
# Pick sleep interval
tclsleep [ berkdb random_int 1 $sleepint ]
# Sleep for 10 to (100*$sleepint) ms.
after [berkdb random_int 10 [expr $sleepint * 100]]
# Now release locks
foreach i $mlist {

View file

@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: ndbm.tcl,v 11.13 2000/08/25 14:21:51 sue Exp $
# $Id: ndbm.tcl,v 11.16 2002/07/08 13:11:30 mjc Exp $
#
# Historic NDBM interface test.
# Use the first 1000 entries from the dictionary.
@ -80,11 +80,14 @@ proc ndbm { { nentries 1000 } } {
error_check_good NDBM:diff($t3,$t2) \
[filecmp $t3 $t2] 0
puts "\tNDBM.c: pagf/dirf test"
set fd [$db pagfno]
error_check_bad pagf $fd -1
set fd [$db dirfno]
error_check_bad dirf $fd -1
# File descriptors tests won't work under Windows.
if { $is_windows_test != 1 } {
puts "\tNDBM.c: pagf/dirf test"
set fd [$db pagfno]
error_check_bad pagf $fd -1
set fd [$db dirfno]
error_check_bad dirf $fd -1
}
puts "\tNDBM.d: close, open, and dump file"

295
bdb/test/parallel.tcl Normal file
View file

@ -0,0 +1,295 @@
# Code to load up the tests in to the Queue database
# $Id: parallel.tcl,v 11.28 2002/09/05 17:23:06 sandstro Exp $
proc load_queue { file {dbdir RUNQUEUE} nitems } {
puts -nonewline "Loading run queue with $nitems items..."
flush stdout
set env [berkdb_env -create -lock -home $dbdir]
error_check_good dbenv [is_valid_env $env] TRUE
set db [eval {berkdb_open -env $env -create -truncate \
-mode 0644 -len 120 -queue queue.db} ]
error_check_good dbopen [is_valid_db $db] TRUE
set fid [open $file]
set count 0
while { [gets $fid str] != -1 } {
set testarr($count) $str
incr count
}
# Randomize array of tests.
set rseed [pid]
berkdb srand $rseed
puts -nonewline "randomizing..."
flush stdout
for { set i 0 } { $i < $count } { incr i } {
set j [berkdb random_int $i [expr $count - 1]]
set tmp $testarr($i)
set testarr($i) $testarr($j)
set testarr($j) $tmp
}
if { [string compare ALL $nitems] != 0 } {
set maxload $nitems
} else {
set maxload $count
}
puts "loading..."
flush stdout
for { set i 0 } { $i < $maxload } { incr i } {
set str $testarr($i)
set ret [eval {$db put -append $str} ]
error_check_good put:$db $ret [expr $i + 1]
}
puts "Loaded $maxload records (out of $count)."
close $fid
$db close
$env close
}
proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
if { [file exists $dbdir] != 1 } {
file mkdir $dbdir
}
puts "Creating test list..."
$list -n
load_queue ALL.OUT $dbdir $nitems
file delete TEST.LIST
file rename ALL.OUT TEST.LIST
# file delete ALL.OUT
}
proc run_parallel { nprocs {list run_all} {nitems ALL} } {
set basename ./PARALLEL_TESTDIR
set queuedir ./RUNQUEUE
source ./include.tcl
mkparalleldirs $nprocs $basename $queuedir
init_runqueue $queuedir $nitems $list
set basedir [pwd]
set pidlist {}
set queuedir ../../[string range $basedir \
[string last "/" $basedir] end]/$queuedir
for { set i 1 } { $i <= $nprocs } { incr i } {
fileremove -f ALL.OUT.$i
set ret [catch {
set p [exec $tclsh_path << \
"source $test_path/test.tcl;\
run_queue $i $basename.$i $queuedir $nitems" &]
lappend pidlist $p
set f [open $testdir/begin.$p w]
close $f
} res]
}
watch_procs $pidlist 300 360000
set failed 0
for { set i 1 } { $i <= $nprocs } { incr i } {
if { [check_failed_run ALL.OUT.$i] != 0 } {
set failed 1
puts "Regression tests failed in process $i."
}
}
if { $failed == 0 } {
puts "Regression tests succeeded."
}
}
proc run_queue { i rundir queuedir nitems } {
set builddir [pwd]
file delete $builddir/ALL.OUT.$i
cd $rundir
puts "Parallel run_queue process $i (pid [pid]) starting."
source ./include.tcl
global env
set dbenv [berkdb_env -create -lock -home $queuedir]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set db [eval {berkdb_open -env $dbenv \
-mode 0644 -len 120 -queue queue.db} ]
error_check_good dbopen [is_valid_db $db] TRUE
set dbc [eval $db cursor]
error_check_good cursor [is_valid_cursor $dbc $db] TRUE
set count 0
set waitcnt 0
while { $waitcnt < 5 } {
set line [$db get -consume]
if { [ llength $line ] > 0 } {
set cmd [lindex [lindex $line 0] 1]
set num [lindex [lindex $line 0] 0]
set o [open $builddir/ALL.OUT.$i a]
puts $o "\nExecuting record $num ([timestamp -w]):\n"
set tdir "TESTDIR.$i"
regsub {TESTDIR} $cmd $tdir cmd
puts $o $cmd
close $o
if { [expr {$num % 10} == 0] } {
puts "Starting test $num of $nitems"
}
#puts "Process $i, record $num:\n$cmd"
set env(PURIFYOPTIONS) \
"-log-file=./test$num.%p -follow-child-processes -messages=first"
set env(PURECOVOPTIONS) \
"-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl; $cmd" \
>>& $builddir/ALL.OUT.$i } res] {
set o [open $builddir/ALL.OUT.$i a]
puts $o "FAIL: '$cmd': $res"
close $o
}
env_cleanup $testdir
set o [open $builddir/ALL.OUT.$i a]
puts $o "\nEnding record $num ([timestamp])\n"
close $o
incr count
} else {
incr waitcnt
tclsleep 1
}
}
puts "Process $i: $count commands executed"
$dbc close
$db close
$dbenv close
#
# We need to put the pid file in the builddir's idea
# of testdir, not this child process' local testdir.
# Therefore source builddir's include.tcl to get its
# testdir.
# !!! This resets testdir, so don't do anything else
# local to the child after this.
source $builddir/include.tcl
set f [open $builddir/$testdir/end.[pid] w]
close $f
}
proc mkparalleldirs { nprocs basename queuedir } {
source ./include.tcl
set dir [pwd]
if { $is_windows_test != 1 } {
set EXE ""
} else {
set EXE ".exe"
}
for { set i 1 } { $i <= $nprocs } { incr i } {
set destdir $basename.$i
catch {file mkdir $destdir}
puts "Created $destdir"
if { $is_windows_test == 1 } {
catch {file mkdir $destdir/Debug}
catch {eval file copy \
[eval glob {$dir/Debug/*.dll}] $destdir/Debug}
}
catch {eval file copy \
[eval glob {$dir/{.libs,include.tcl}}] $destdir}
# catch {eval file copy $dir/$queuedir $destdir}
catch {eval file copy \
[eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
{$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
{$dir/db_{archive,verify}$EXE}] \
$destdir}
# Create modified copies of include.tcl in parallel
# directories so paths still work.
set infile [open ./include.tcl r]
set d [read $infile]
close $infile
regsub {test_path } $d {test_path ../} d
regsub {src_root } $d {src_root ../} d
set tdir "TESTDIR.$i"
regsub -all {TESTDIR} $d $tdir d
regsub {KILL \.} $d {KILL ..} d
set outfile [open $destdir/include.tcl w]
puts $outfile $d
close $outfile
global svc_list
foreach svc_exe $svc_list {
if { [file exists $dir/$svc_exe] } {
catch {eval file copy $dir/$svc_exe $destdir}
}
}
}
}
proc run_ptest { nprocs test args } {
global parms
set basename ./PARALLEL_TESTDIR
set queuedir NULL
source ./include.tcl
mkparalleldirs $nprocs $basename $queuedir
if { [info exists parms($test)] } {
foreach method \
"hash queue queueext recno rbtree frecno rrecno btree" {
if { [eval exec_ptest $nprocs $basename \
$test $method $args] != 0 } {
break
}
}
} else {
eval exec_ptest $nprocs $basename $test $args
}
}
proc exec_ptest { nprocs basename test args } {
source ./include.tcl
set basedir [pwd]
set pidlist {}
puts "Running $nprocs parallel runs of $test"
for { set i 1 } { $i <= $nprocs } { incr i } {
set outf ALL.OUT.$i
fileremove -f $outf
set ret [catch {
set p [exec $tclsh_path << \
"cd $basename.$i;\
source ../$test_path/test.tcl;\
$test $args" >& $outf &]
lappend pidlist $p
set f [open $testdir/begin.$p w]
close $f
} res]
}
watch_procs $pidlist 30 36000
set failed 0
for { set i 1 } { $i <= $nprocs } { incr i } {
if { [check_failed_run ALL.OUT.$i] != 0 } {
set failed 1
puts "Test $test failed in process $i."
}
}
if { $failed == 0 } {
puts "Test $test succeeded all processes"
return 0
} else {
puts "Test failed: stopping"
return 1
}
}

View file

@ -1,19 +1,27 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd001.tcl,v 11.28 2000/12/07 19:13:46 sue Exp $
# $Id: recd001.tcl,v 11.40 2002/05/08 19:36:18 sandstro Exp $
#
# Recovery Test 1.
# These are the most basic recovery tests. We do individual recovery
# tests for each operation in the access method interface. First we
# create a file and capture the state of the database (i.e., we copy
# it. Then we run a transaction containing a single operation. In
# one test, we abort the transaction and compare the outcome to the
# original copy of the file. In the second test, we restore the
# original copy of the database and then run recovery and compare
# this against the actual database.
# TEST recd001
# TEST Per-operation recovery tests for non-duplicate, non-split
# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
# TEST condition. Any test that appears with the message (change state)
# TEST indicates that we've already run the particular test, but we are
# TEST running it again so that we can change the state of the data base
# TEST to prepare for the next test (this applies to all other recovery
# TEST tests as well).
# TEST
# TEST These are the most basic recovery tests. We do individual recovery
# TEST tests for each operation in the access method interface. First we
# TEST create a file and capture the state of the database (i.e., we copy
# TEST it. Then we run a transaction containing a single operation. In
# TEST one test, we abort the transaction and compare the outcome to the
# TEST original copy of the file. In the second test, we restore the
# TEST original copy of the database and then run recovery and compare
# TEST this against the actual database.
proc recd001 { method {select 0} args} {
global fixed_len
source ./include.tcl
@ -43,7 +51,7 @@ proc recd001 { method {select 0} args} {
set flags "-create -txn -home $testdir"
puts "\tRecd001.a.0: creating environment"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@ -124,6 +132,7 @@ proc recd001 { method {select 0} args} {
set newdata NEWrecd001_dataNEW
set off 3
set len 12
set partial_grow replacement_record_grow
set partial_shrink xxx
if { [is_fixed_length $method] == 1 } {
@ -165,16 +174,69 @@ proc recd001 { method {select 0} args} {
# }
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
op_recover prepare-abort $testdir $env_cmd $testfile2 $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 $cmd $msg
#
# Note that since prepare-discard ultimately aborts
# the txn, it must come before prepare-commit.
#
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-discard $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
set fixed_len $orig_fixed_len
puts "\tRecd001.o: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
error_check_good db_printlog $stat 0
fileremove $tmpfile
if { [is_fixed_length $method] == 1 } {
puts "Skipping remainder of test for fixed length methods"
return
}
#
# Check partial extensions. If we add a key/data to the database
# and then expand it using -partial, then recover, recovery was
# failing in #3944. Check that scenario here.
#
# !!!
# We loop here because on each iteration, we need to clean up
# the old env (i.e. this test does not depend on earlier runs).
# If we run it without cleaning up the env inbetween, we do not
# test the scenario of #3944.
#
set len [string length $data]
set len2 256
set part_data [replicate "abcdefgh" 32]
set p [list 0 $len]
set cmd [subst \
{DB put -txn TXNID -partial {$len $len2} $key $part_data}]
set msg "Recd001.o: partial put prepopulated/expanding"
foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
env_cleanup $testdir
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set t [$dbenv txn]
error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
set oflags "-create $omethod -mode 0644 \
-env $dbenv -txn $t $opts $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set oflags "-create $omethod -mode 0644 \
-env $dbenv -txn $t $opts $testfile2"
set db2 [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db2] TRUE
set ret [$db put -txn $t -partial $p $key $data]
error_check_good dbput $ret 0
set ret [$db2 put -txn $t -partial $p $key $data]
error_check_good dbput $ret 0
error_check_good txncommit [$t commit] 0
error_check_good dbclose [$db close] 0
error_check_good dbclose [$db2 close] 0
error_check_good dbenvclose [$dbenv close] 0
op_recover $op $testdir $env_cmd $testfile $cmd $msg
}
return
}

View file

@ -1,11 +1,13 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd002.tcl,v 11.22 2000/12/11 17:24:54 sue Exp $
# $Id: recd002.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
#
# Recovery Test #2. Verify that splits can be recovered.
# TEST recd002
# TEST Split recovery tests. For every known split log message, makes sure
# TEST that we exercise redo, undo, and do-nothing condition.
proc recd002 { method {select 0} args} {
source ./include.tcl
global rand_init
@ -37,7 +39,7 @@ proc recd002 { method {select 0} args} {
"-create -txn -lock_max 2000 -home $testdir"
puts "\tRecd002.a: creating environment"
set env_cmd "berkdb env $eflags"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@ -80,9 +82,14 @@ proc recd002 { method {select 0} args} {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
#
# Note that since prepare-discard ultimately aborts
# the txn, it must come before prepare-commit.
#
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-discard $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}

View file

@ -1,14 +1,17 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd003.tcl,v 11.22 2000/12/07 19:13:46 sue Exp $
# $Id: recd003.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
#
# Recovery Test 3.
# Test all the duplicate log messages and recovery operations. We make
# sure that we exercise all possible recovery actions: redo, undo, undo
# but no fix necessary and redo but no fix necessary.
# TEST recd003
# TEST Duplicate recovery tests. For every known duplicate log message,
# TEST makes sure that we exercise redo, undo, and do-nothing condition.
# TEST
# TEST Test all the duplicate log messages and recovery operations. We make
# TEST sure that we exercise all possible recovery actions: redo, undo, undo
# TEST but no fix necessary and redo but no fix necessary.
proc recd003 { method {select 0} args } {
source ./include.tcl
global rand_init
@ -31,7 +34,7 @@ proc recd003 { method {select 0} args } {
set eflags "-create -txn -home $testdir"
puts "\tRecd003.a: creating environment"
set env_cmd "berkdb env $eflags"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@ -95,9 +98,14 @@ proc recd003 { method {select 0} args } {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
#
# Note that since prepare-discard ultimately aborts
# the txn, it must come before prepare-commit.
#
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-discard $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}

View file

@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd004.tcl,v 11.21 2000/12/11 17:24:55 sue Exp $
# $Id: recd004.tcl,v 11.29 2002/02/25 16:44:25 sandstro Exp $
#
# Recovery Test #4.
# Verify that we work correctly when big keys get elevated.
# TEST recd004
# TEST Big key test where big key gets elevated to internal page.
proc recd004 { method {select 0} args} {
source ./include.tcl
global rand_init
@ -32,7 +32,7 @@ proc recd004 { method {select 0} args} {
set testfile2 recd004-2.db
set eflags "-create -txn -home $testdir"
puts "\tRecd004.a: creating environment"
set env_cmd "berkdb env $eflags"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@ -74,9 +74,14 @@ proc recd004 { method {select 0} args} {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
#
# Note that since prepare-discard ultimately aborts
# the txn, it must come before prepare-commit.
#
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-discard $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}

View file

@ -1,13 +1,15 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd005.tcl,v 11.27 2000/12/15 21:41:38 ubell Exp $
# $Id: recd005.tcl,v 11.34 2002/05/22 15:42:39 sue Exp $
#
# Recovery Test 5.
# Make sure that we can do catastrophic recovery even if we open
# files using the same log file id.
# TEST recd005
# TEST Verify reuse of file ids works on catastrophic recovery.
# TEST
# TEST Make sure that we can do catastrophic recovery even if we open
# TEST files using the same log file id.
proc recd005 { method args} {
source ./include.tcl
global rand_init
@ -15,7 +17,7 @@ proc recd005 { method args} {
set args [convert_args $method $args]
set omethod [convert_method $method]
puts "Recd005: $method catastropic recovery"
puts "Recd005: $method catastrophic recovery"
berkdb srand $rand_init
@ -38,7 +40,7 @@ proc recd005 { method args} {
puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
puts "\tRecd005.$tnum.a: creating environment"
set env_cmd "berkdb env $eflags"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@ -147,12 +149,11 @@ proc do_one_file { dir method env env_cmd filename num op } {
# Save the initial file and open the environment and the first file
file copy -force $dir/$filename $dir/$filename.init
copy_extent_file $dir $filename init
set oflags "-unknown -env $env"
set oflags "-auto_commit -unknown -env $env"
set db [eval {berkdb_open} $oflags $filename]
# Dump out file contents for initial case
set tflags ""
open_and_dump_file $filename $env $tflags $init_file nop \
open_and_dump_file $filename $env $init_file nop \
dump_file_direction "-first" "-next"
set txn [$env txn]
@ -167,7 +168,7 @@ proc do_one_file { dir method env env_cmd filename num op } {
error_check_good sync:$db [$db sync] 0
file copy -force $dir/$filename $dir/$filename.afterop
copy_extent_file $dir $filename afterop
open_and_dump_file $testdir/$filename.afterop NULL $tflags \
open_and_dump_file $testdir/$filename.afterop NULL \
$afterop_file nop dump_file_direction "-first" "-next"
error_check_good txn_$op:$txn [$txn $op] 0
@ -179,7 +180,7 @@ proc do_one_file { dir method env env_cmd filename num op } {
# Dump out file and save a copy.
error_check_good sync:$db [$db sync] 0
open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next"
file copy -force $dir/$filename $dir/$filename.final
copy_extent_file $dir $filename final
@ -211,8 +212,7 @@ proc check_file { dir env_cmd filename op } {
set afterop_file $dir/$filename.t2
set final_file $dir/$filename.t3
set tflags ""
open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "abort" } {
filesort $init_file $init_file.sort
@ -227,5 +227,4 @@ proc check_file { dir env_cmd filename op } {
diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
}
}

View file

@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd006.tcl,v 11.21 2000/12/07 19:13:46 sue Exp $
# $Id: recd006.tcl,v 11.26 2002/03/15 16:30:53 sue Exp $
#
# Recovery Test 6.
# Test nested transactions.
# TEST recd006
# TEST Nested transactions.
proc recd006 { method {select 0} args} {
global kvals
source ./include.tcl
@ -83,7 +83,7 @@ proc recd006 { method {select 0} args} {
set eflags "-create -txn -home $testdir"
puts "\tRecd006.b: creating environment"
set env_cmd "berkdb env $eflags"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@ -176,7 +176,7 @@ proc nesttest { db parent env do p1 p2 child1 child2} {
# OK, do child 1
set kid1 [$env txn -parent $parent]
error_check_good kid1 [is_valid_widget $kid1 $env.txn] TRUE
error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
# Reading write-locked parent object should be OK
#puts "\tRead write-locked parent object for kid1."
@ -193,7 +193,7 @@ proc nesttest { db parent env do p1 p2 child1 child2} {
# Now start child2
#puts "\tBegin txn for kid2."
set kid2 [$env txn -parent $parent]
error_check_good kid2 [is_valid_widget $kid2 $env.txn] TRUE
error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
# Getting anything in the p1 set should deadlock, so let's
# work on the p2 set.

View file

@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd007.tcl,v 11.38 2000/12/20 21:39:23 krinsky Exp $
# $Id: recd007.tcl,v 11.60 2002/08/08 15:38:07 bostic Exp $
#
# Recovery Test 7.
# This is a recovery test for create/delete of databases. We have
# hooks in the database so that we can abort the process at various
# points and make sure that the transaction doesn't commit. We
# then need to recover and make sure the file is correctly existing
# or not, as the case may be.
# TEST recd007
# TEST File create/delete tests.
# TEST
# TEST This is a recovery test for create/delete of databases. We have
# TEST hooks in the database so that we can abort the process at various
# TEST points and make sure that the transaction doesn't commit. We
# TEST then need to recover and make sure the file is correctly existing
# TEST or not, as the case may be.
proc recd007 { method args} {
global fixed_len
source ./include.tcl
@ -28,10 +30,10 @@ proc recd007 { method args} {
set flags "-create -txn -home $testdir"
puts "\tRecd007.a: creating environment"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
set env [eval $env_cmd]
#
# We need to create a database to get the pagesize (either
# the default or whatever might have been specified).
# Then remove it so we can compute fixed_len and create the
@ -54,7 +56,6 @@ proc recd007 { method args} {
# Convert the args again because fixed_len is now real.
set opts [convert_args $method ""]
#
# List of recovery tests: {HOOKS MSG} pairs
# Where each HOOK is a list of {COPY ABORT}
#
@ -89,25 +90,26 @@ proc recd007 { method args} {
}
set rlist {
{ {"none" "prerename"} "Recd007.l0: none/prerename"}
{ {"none" "postrename"} "Recd007.l1: none/postrename"}
{ {"prerename" "none"} "Recd007.m0: prerename/none"}
{ {"postrename" "none"} "Recd007.m1: postrename/none"}
{ {"prerename" "prerename"} "Recd007.n: prerename/prerename"}
{ {"prerename" "postrename"} "Recd007.o: prerename/postrename"}
{ {"postrename" "postrename"} "Recd007.p: postrename/postrename"}
{ {"none" "predestroy"} "Recd007.l0: none/predestroy"}
{ {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
{ {"predestroy" "none"} "Recd007.m0: predestroy/none"}
{ {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
{ {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
{ {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
{ {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
}
foreach op { dbremove dbrename } {
foreach op { dbremove dbrename dbtruncate } {
foreach pair $rlist {
set cmd [lindex $pair 0]
set msg [lindex $pair 1]
file_recover_delete $testdir $env_cmd $omethod \
$opts $testfile $cmd $msg $op
$opts $testfile $cmd $msg $op
}
}
if { $is_windows_test != 1 } {
do_file_recover_delmk $testdir $env_cmd $omethod $opts $testfile
set env_cmd "berkdb_env_noerr $flags"
do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
}
puts "\tRecd007.r: Verify db_printlog can read logfile"
@ -150,6 +152,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
}
env_cleanup $dir
set dflags "-dar"
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
set copy [lindex $cmd 0]
@ -167,17 +170,16 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
return
}
#
# Basically non-existence is our initial state. When we
# abort, it is also our final state.
#
switch $sub {
0 {
set oflags "-create $method -mode 0644 \
set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile"
}
1 {
set oflags "-create $method -mode 0644 \
set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub0"
}
2 {
@ -185,14 +187,14 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
# If we are aborting here, then we need to
# create a first subdb, then create a second
#
set oflags "-create $method -mode 0644 \
set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub0"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
set oflags "-create $method -mode 0644 \
set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub1"
}
default {
@ -214,8 +216,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
# Sync the mpool so any changes to the file that are
# in mpool get written to the disk file before the
# diff.
puts "\t\tSyncing"
$env mpool_sync "0 0"
$env mpool_sync
#
# If we don't abort, then we expect success.
@ -238,7 +239,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
} else {
error_check_good \
diff(init,postcreate):diff($init_file,$dir/$dbfile)\
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff $dflags $init_file $dir $dbfile] 0
}
} else {
#
@ -289,7 +290,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff $dflags $init_file $dir $dbfile] 0
#
# Need a new copy to get the right LSN into the file.
#
@ -300,7 +301,6 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
}
}
#
# If we didn't make a copy, then we are done.
#
if {[string first "none" $copy] != -1} {
@ -310,11 +310,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
# Now move the .afterop file to $dbfile. Run recovery again.
#
file copy -force $dir/$dbfile.afterop $dir/$dbfile
if { [is_queue $method] == 1 } {
move_file_extent $dir $dbfile afterop copy
}
copy_afterop $dir
berkdb debug_check
puts -nonewline "\t\tAbout to run recovery ... "
@ -339,7 +335,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff $dflags $init_file $dir $dbfile] 0
}
}
@ -384,43 +380,61 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error_check_good abort_location [is_valid_delete_loc $abort] 1
if { [is_record_based $method] == 1 } {
set key 1
set key1 1
set key2 2
} else {
set key recd007_key
set key1 recd007_key1
set key2 recd007_key2
}
set data1 recd007_data
set data2 NEWrecd007_data2
set data1 recd007_data0
set data2 recd007_data1
set data3 NEWrecd007_data2
#
# Depending on what sort of subdb we want, if any, our
# args to the open call will be different (and if we
# want a 2nd subdb, we create the first here.
#
# XXX
# For dbtruncate, we want oflags to have "$env" in it,
# not have the value currently in 'env'. That is why
# the '$' is protected below. Later on we use oflags
# but with a new $env we just opened.
#
switch $sub {
0 {
set oflags "-create $method -mode 0644 \
-env $env $opts $dbfile"
set subdb ""
set new $dbfile.new
set dflags "-dar"
set oflags "-create $method -auto_commit -mode 0644 \
-env \$env $opts $dbfile"
}
1 {
set oflags "-create $method -mode 0644 \
-env $env $opts $dbfile sub0"
set subdb sub0
set new $subdb.new
set dflags ""
set oflags "-create $method -auto_commit -mode 0644 \
-env \$env $opts $dbfile $subdb"
}
2 {
#
# If we are aborting here, then we need to
# create a first subdb, then create a second
#
set oflags "-create $method -mode 0644 \
-env $env $opts $dbfile sub0"
set subdb sub1
set new $subdb.new
set dflags ""
set oflags "-create $method -auto_commit -mode 0644 \
-env \$env $opts $dbfile sub0"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
set ret [$db put -txn $txn $key $data2]
set ret [$db put -txn $txn $key1 $data1]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
set oflags "-create $method -mode 0644 \
-env $env $opts $dbfile sub1"
set oflags "-create $method -auto_commit -mode 0644 \
-env \$env $opts $dbfile $subdb"
}
default {
puts "\tBad value $sub for sub"
@ -443,11 +457,15 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
set ret [$db put -txn $txn $key $data1]
set ret [$db put -txn $txn $key1 $data1]
error_check_good db_put $ret 0
set ret [$db put -txn $txn $key2 $data2]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
$env mpool_sync
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
@ -459,16 +477,51 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
# If we don't abort, then we expect success.
# If we abort, we expect no file removed.
#
if { [string compare $op dbremove] == 0 } {
set ret [catch { berkdb $op -env $env $dbfile } remret]
} else {
set ret [catch { berkdb $op -env $env $dbfile $dbfile.new } \
remret]
switch $op {
"dbrename" {
set ret [catch { eval {berkdb} $op -env $env -auto_commit \
$dbfile $subdb $new } remret]
}
if {[string first "none" $abort] == -1} {
"dbremove" {
set ret [catch { eval {berkdb} $op -env $env -auto_commit \
$dbfile $subdb } remret]
}
"dbtruncate" {
set txn [$env txn]
set db [eval {berkdb_open_noerr -env} \
$env -auto_commit $dbfile $subdb]
error_check_good dbopen [is_valid_db $db] TRUE
error_check_good txnbegin [is_valid_txn $txn $env] TRUE
set ret [catch {$db truncate -txn $txn} remret]
}
}
$env mpool_sync
if { $abort == "none" } {
if { $op == "dbtruncate" } {
error_check_good txncommit [$txn commit] 0
error_check_good dbclose [$db close] 0
}
#
# Operation was committed, verify it.
#
puts "\t\tCommand executed and committed."
error_check_good $op $ret 0
#
# If a dbtruncate, check that truncate returned the number
# of items previously in the database.
#
if { [string compare $op "dbtruncate"] == 0 } {
error_check_good remret $remret 2
}
recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
} else {
#
# Operation was aborted, verify it did not change.
#
if { $op == "dbtruncate" } {
error_check_good txnabort [$txn abort] 0
error_check_good dbclose [$db close] 0
}
puts "\t\tCommand executed and aborted."
error_check_good $op $ret 1
@ -479,30 +532,16 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error_check_good post$op.1 [file exists $dir/$dbfile] 1
error_check_good \
diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
[dbdump_diff $init_file $dir/$dbfile] 0
} else {
#
# Operation was committed, verify it does
# not exist.
#
puts "\t\tCommand executed and committed."
error_check_good $op $ret 0
#
# Check that the file does not exist or correct
# file exists.
#
error_check_good $op [file exists $dir/$dbfile] 0
if { [string compare $op dbrename] == 0 } {
error_check_good $op [file exists $dir/$dbfile.new] 1
}
[dbdump_diff $dflags $init_file $dir $dbfile] 0
}
$env mpool_sync
error_check_good env_close [$env close] 0
catch { file copy -force $dir/$dbfile $init_file } res
if { [is_queue $method] == 1} {
copy_extent_file $dir $dbfile init
}
#
# Run recovery here. Should be a no-op. Verify that
# the file still doesn't exist or change (depending on abort)
@ -517,20 +556,24 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error "FAIL: Recovery error: $result."
return
}
puts "complete"
if { [string first "none" $abort] != -1} {
if { $abort == "none" } {
#
# Operation was committed, verify it still does
# not exist.
# Operate was committed.
#
error_check_good after_recover1 [file exists $dir/$dbfile] 0
set env [eval $env_cmd]
recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
error_check_good env_close [$env close] 0
} else {
#
# Operation was aborted, verify it did not change.
#
berkdb debug_check
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff $dflags $init_file $dir $dbfile] 0
}
#
@ -541,15 +584,10 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
}
#
# Now move the .afterop file to $dbfile. Run recovery again.
# Now restore the .afterop file(s) to their original name.
# Run recovery again.
#
set filecopy [glob $dir/*.afterop]
set afterop [lindex $filecopy 0]
file rename -force $afterop $dir/$dbfile
set afterop [string range $afterop \
[expr [string last "/" $afterop] + 1] \
[string last "." $afterop]]
move_file_extent $dir $dbfile afterop rename
copy_afterop $dir
berkdb debug_check
puts -nonewline "\t\tAbout to run recovery ... "
@ -563,18 +601,16 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
puts "complete"
if { [string first "none" $abort] != -1} {
#
# Operation was committed, verify it still does
# not exist.
#
error_check_good after_recover2 [file exists $dir/$dbfile] 0
set env [eval $env_cmd]
recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
error_check_good env_close [$env close] 0
} else {
#
# Operation was aborted, verify it did not change.
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff $dflags $init_file $dir $dbfile] 0
}
}
@ -597,11 +633,13 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
if { $log_log_record_types == 1} {
logtrack_read $dir
}
set omethod [convert_method $method]
puts "\tRecd007.q: Delete and recreate a database"
env_cleanup $dir
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
error_check_good env_open [is_valid_env $env] TRUE
if { [is_record_based $method] == 1 } {
set key 1
@ -611,13 +649,14 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
set data1 recd007_data
set data2 NEWrecd007_data2
set oflags "-create $method -mode 0644 -env $env $opts $dbfile"
set oflags \
"-create $omethod -auto_commit -mode 0644 $opts $dbfile"
#
# Open our db, add some data, close and copy as our
# init file.
#
set db [eval {berkdb_open} $oflags]
set db [eval {berkdb_open_noerr} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
set ret [$db put -txn $txn $key $data1]
@ -625,7 +664,9 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
set ret [catch { berkdb dbremove -env $env $dbfile } remret]
set ret \
[catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
#
# Operation was committed, verify it does
# not exist.
@ -637,10 +678,10 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
#
# Now create a new db with the same name.
#
set db [eval {berkdb_open} $oflags]
set db [eval {berkdb_open_noerr} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
set ret [$db put -txn $txn $key $data1]
set ret [$db put -txn $txn $key [chop_data $method $data2]]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_sync [$db sync] 0
@ -663,9 +704,29 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
# up the Tcl widgets.
#
set stat [catch {$db close} ret]
error_check_bad dbclose_after_remove $stat 0
error_check_good dbclose_after_remove [is_substr $ret recovery] 1
set stat [catch {$env close} ret]
error_check_bad envclose_after_remove $stat 0
error_check_good envclose_after_remove [is_substr $ret recovery] 1
#
# Reopen env and db and verify 2nd database is there.
#
set env [eval $env_cmd]
error_check_good env_open [is_valid_env $env] TRUE
set db [eval {berkdb_open} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set ret [$db get $key]
error_check_good dbget [llength $ret] 1
set kd [lindex $ret 0]
error_check_good key [lindex $kd 0] $key
error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
error_check_good dbclose [$db close] 0
error_check_good envclose [$env close] 0
}
proc is_valid_create_loc { loc } {
switch $loc {
none -
@ -683,8 +744,8 @@ proc is_valid_create_loc { loc } {
proc is_valid_delete_loc { loc } {
switch $loc {
none -
prerename -
postrename -
predestroy -
postdestroy -
postremcall
{ return 1 }
default
@ -697,23 +758,23 @@ proc is_valid_delete_loc { loc } {
# just a free/invalid page.
# Return 1 if they are different, 0 if logically the same (or identical).
#
proc dbdump_diff { initfile dbfile } {
proc dbdump_diff { flags initfile dir dbfile } {
source ./include.tcl
set initdump $initfile.dump
set dbdump $dbfile.dump
set stat [catch {exec $util_path/db_dump -dar -f $initdump \
set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
$initfile} ret]
error_check_good dbdump.init $stat 0
# Do a dump without the freelist which should eliminate any
# recovery differences.
set stat [catch {exec $util_path/db_dump -dar -f $dbdump $dbfile} \
ret]
set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
$dir/$dbfile} ret]
error_check_good dbdump.db $stat 0
set stat [filecmp $dbdump $initdump]
set stat [filecmp $dir/$dbdump $initdump]
if {$stat == 0} {
return 0
@ -721,3 +782,105 @@ proc dbdump_diff { initfile dbfile } {
puts "diff: $dbdump $initdump gives:\n$ret"
return 1
}
proc recd007_check { op sub dir dbfile subdb new env oflags } {
#
# No matter how many subdbs we have, dbtruncate will always
# have a file, and if we open our particular db, it should
# have no entries.
#
if { $sub == 0 } {
if { $op == "dbremove" } {
error_check_good $op:not-exist \
[file exists $dir/$dbfile] 0
} elseif { $op == "dbrename"} {
error_check_good $op:exist \
[file exists $dir/$dbfile] 0
error_check_good $op:exist2 \
[file exists $dir/$dbfile.new] 1
} else {
error_check_good $op:exist \
[file exists $dir/$dbfile] 1
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set dbc [$db cursor]
error_check_good dbc_open \
[is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -first]
error_check_good dbget1 [llength $ret] 0
error_check_good dbc_close [$dbc close] 0
error_check_good db_close [$db close] 0
}
return
} else {
set t1 $dir/t1
#
# If we have subdbs, check that all but the last one
# are there, and the last one is correctly operated on.
#
set db [berkdb_open -rdonly -env $env $dbfile]
error_check_good dbopen [is_valid_db $db] TRUE
set c [eval {$db cursor}]
error_check_good db_cursor [is_valid_cursor $c $db] TRUE
set d [$c get -last]
if { $op == "dbremove" } {
if { $sub == 1 } {
error_check_good subdb:rem [llength $d] 0
} else {
error_check_bad subdb:rem [llength $d] 0
set sdb [lindex [lindex $d 0] 0]
error_check_bad subdb:rem1 $sdb $subdb
}
} elseif { $op == "dbrename"} {
set sdb [lindex [lindex $d 0] 0]
error_check_good subdb:ren $sdb $new
if { $sub != 1 } {
set d [$c get -prev]
error_check_bad subdb:ren [llength $d] 0
set sdb [lindex [lindex $d 0] 0]
error_check_good subdb:ren1 \
[is_substr "new" $sdb] 0
}
} else {
set sdb [lindex [lindex $d 0] 0]
set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
error_check_good db_open [is_valid_db $dbt] TRUE
set dbc [$dbt cursor]
error_check_good dbc_open \
[is_valid_cursor $dbc $dbt] TRUE
set ret [$dbc get -first]
error_check_good dbget2 [llength $ret] 0
error_check_good dbc_close [$dbc close] 0
error_check_good db_close [$dbt close] 0
if { $sub != 1 } {
set d [$c get -prev]
error_check_bad subdb:ren [llength $d] 0
set sdb [lindex [lindex $d 0] 0]
set dbt [berkdb_open -rdonly -env $env \
$dbfile $sdb]
error_check_good db_open [is_valid_db $dbt] TRUE
set dbc [$db cursor]
error_check_good dbc_open \
[is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -first]
error_check_bad dbget3 [llength $ret] 0
error_check_good dbc_close [$dbc close] 0
error_check_good db_close [$dbt close] 0
}
}
error_check_good dbcclose [$c close] 0
error_check_good db_close [$db close] 0
}
}
proc copy_afterop { dir } {
set r [catch { set filecopy [glob $dir/*.afterop] } res]
if { $r == 1 } {
return
}
foreach f $filecopy {
set orig [string range $f 0 \
[expr [string last "." $f] - 1]]
catch { file rename -force $f $orig} res
}
}

View file

@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd008.tcl,v 1.22 2000/12/07 19:13:46 sue Exp $
# $Id: recd008.tcl,v 1.26 2002/02/25 16:44:26 sandstro Exp $
#
# Recovery Test 8.
# Test deeply nested transactions and many-child transactions.
# TEST recd008
# TEST Test deeply nested transactions and many-child transactions.
proc recd008 { method {breadth 4} {depth 4} args} {
global kvals
source ./include.tcl
@ -59,7 +59,7 @@ proc recd008 { method {breadth 4} {depth 4} args} {
set eflags "-mode 0644 -create -txn_max $txn_max \
-txn -home $testdir"
set env_cmd "berkdb env $eflags"
set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_good env_open [is_valid_env $dbenv] TRUE

View file

@ -1,13 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd009.tcl,v 1.13 2000/12/07 19:13:46 sue Exp $
# $Id: recd009.tcl,v 1.18 2002/04/01 20:11:44 krinsky Exp $
#
# Recovery Test 9.
# Test stability of record numbers across splits
# and reverse splits and across recovery.
# TEST recd009
# TEST Verify record numbering across split/reverse splits and recovery.
proc recd009 { method {select 0} args} {
global fixed_len
source ./include.tcl
@ -31,11 +30,11 @@ proc recd009 { method {select 0} args} {
puts "\tRecd009.a: Create $method environment and database."
set flags "-create -txn -home $testdir"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set oflags "-env $dbenv -create -mode 0644 $opts $method"
set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE

View file

@ -1,20 +1,15 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd010.tcl,v 1.14 2000/12/11 17:24:55 sue Exp $
# $Id: recd010.tcl,v 1.19 2002/03/15 19:05:07 sue Exp $
#
# Recovery Test 10.
# Test stability of btree duplicates across btree off-page dup splits
# and reverse splits and across recovery.
# TEST recd010
# TEST Test stability of btree duplicates across btree off-page dup splits
# TEST and reverse splits and across recovery.
proc recd010 { method {select 0} args} {
global fixed_len
global kvals
global kvals_dups
source ./include.tcl
if { [is_dbtree $method] != 1 && [is_ddbtree $method] != 1} {
if { [is_btree $method] != 1 } {
puts "Recd010 skipping for method $method."
return
}
@ -24,11 +19,24 @@ proc recd010 { method {select 0} args} {
puts "Recd010: skipping for specific pagesizes"
return
}
set largs $args
append largs " -dup "
recd010_main $method $select $largs
append largs " -dupsort "
recd010_main $method $select $largs
}
set opts [convert_args $method $args]
proc recd010_main { method select largs } {
global fixed_len
global kvals
global kvals_dups
source ./include.tcl
set opts [convert_args $method $largs]
set method [convert_method $method]
puts "\tRecd010 ($opts): Test duplicates across splits and recovery"
puts "Recd010 ($opts): Test duplicates across splits and recovery"
set testfile recd010.db
env_cleanup $testdir
@ -41,10 +49,10 @@ proc recd010 { method {select 0} args} {
set data "data"
set key "recd010_key"
puts "\tRecd010.a: Create $method environment and database."
puts "\tRecd010.a: Create environment and database."
set flags "-create -txn -home $testdir"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@ -69,17 +77,17 @@ proc recd010 { method {select 0} args} {
return
}
set rlist {
{ {recd010_split DB TXNID 1 $method 2 $mkeys}
{ {recd010_split DB TXNID 1 2 $mkeys}
"Recd010.c: btree split 2 large dups"}
{ {recd010_split DB TXNID 0 $method 2 $mkeys}
{ {recd010_split DB TXNID 0 2 $mkeys}
"Recd010.d: btree reverse split 2 large dups"}
{ {recd010_split DB TXNID 1 $method 10 $mkeys}
{ {recd010_split DB TXNID 1 10 $mkeys}
"Recd010.e: btree split 10 dups"}
{ {recd010_split DB TXNID 0 $method 10 $mkeys}
{ {recd010_split DB TXNID 0 10 $mkeys}
"Recd010.f: btree reverse split 10 dups"}
{ {recd010_split DB TXNID 1 $method 100 $mkeys}
{ {recd010_split DB TXNID 1 100 $mkeys}
"Recd010.g: btree split 100 dups"}
{ {recd010_split DB TXNID 0 $method 100 $mkeys}
{ {recd010_split DB TXNID 0 100 $mkeys}
"Recd010.h: btree reverse split 100 dups"}
}
@ -100,7 +108,7 @@ proc recd010 { method {select 0} args} {
op_recover commit $testdir $env_cmd $testfile $cmd $msg
recd010_check $testdir $testfile $opts commit $reverse $firstkeys
}
puts "\tRecd010.e: Verify db_printlog can read logfile"
puts "\tRecd010.i: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
@ -178,7 +186,14 @@ proc recd010_check { tdir testfile opts op reverse origdups } {
for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
set d [$dbc get -nextdup]} {
set thisdata [lindex [lindex $d 0] 1]
error_check_good dup_check $thisdata $data$datacnt
if { $datacnt < 10 } {
set pdata $data.$ki.00$datacnt
} elseif { $datacnt < 100 } {
set pdata $data.$ki.0$datacnt
} else {
set pdata $data.$ki.$datacnt
}
error_check_good dup_check $thisdata $pdata
incr datacnt
}
error_check_good dup_count $datacnt $numdups
@ -202,7 +217,7 @@ proc recd010_check { tdir testfile opts op reverse origdups } {
error_check_good db_close [$db close] 0
}
proc recd010_split { db txn split method nkeys mkeys } {
proc recd010_split { db txn split nkeys mkeys } {
global errorCode
global kvals
global kvals_dups
@ -220,7 +235,14 @@ proc recd010_split { db txn split method nkeys mkeys } {
"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
for {set k 0} { $k < $nkeys } { incr k } {
for {set i 0} { $i < $numdups } { incr i } {
set ret [$db put -txn $txn $key$k $data$i]
if { $i < 10 } {
set pdata $data.$k.00$i
} elseif { $i < 100 } {
set pdata $data.$k.0$i
} else {
set pdata $data.$k.$i
}
set ret [$db put -txn $txn $key$k $pdata]
error_check_good dbput:more $ret 0
}
}

View file

@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2000
# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd011.tcl,v 11.13 2000/12/06 17:09:54 sue Exp $
# $Id: recd011.tcl,v 11.19 2002/02/25 16:44:26 sandstro Exp $
#
# Recovery Test 11.
# Test recovery to a specific timestamp.
# TEST recd011
# TEST Verify that recovery to a specific timestamp works.
proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
source ./include.tcl
@ -29,11 +29,11 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
puts "\tRecd0$tnum.a: Create environment and database."
set flags "-create -txn -home $testdir"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set oflags "-env $dbenv -create -mode 0644 $args $omethod"
set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@ -70,11 +70,11 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
# Now, loop through and recover to each timestamp, verifying the
# expected increment.
puts "\tRecd0$tnum.c: Recover to each timestamp and check."
for { set i 0 } { $i <= $niter } { incr i } {
for { set i $niter } { $i >= 0 } { incr i -1 } {
# Run db_recover.
berkdb debug_check
set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
berkdb debug_check
set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
error_check_good db_recover($i,$t) $ret 0
@ -91,7 +91,8 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
# Finally, recover to a time well before the first timestamp
# and well after the last timestamp. The latter should
# be just like the last timestamp; the former should fail.
# be just like the timestamp of the last test performed;
# the former should fail.
puts "\tRecd0$tnum.d: Recover to before the first timestamp."
set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
@ -108,8 +109,8 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
error_check_good db_open(after) [is_valid_db $db] TRUE
set dbt [$db get $key]
set datum [lindex [lindex $dbt 0] 1]
set datum2 [lindex [lindex $dbt 0] 1]
error_check_good timestamp_recover $datum [pad_data $method $niter]
error_check_good timestamp_recover $datum2 $datum
error_check_good db_close [$db close] 0
}

View file

@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2000
# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd012.tcl,v 11.14 2000/12/11 17:24:55 sue Exp $
# $Id: recd012.tcl,v 11.27 2002/05/10 00:48:07 margo Exp $
#
# Recovery Test 12.
# Test recovery handling of file opens and closes.
# TEST recd012
# TEST Test of log file ID management. [#2288]
# TEST Test recovery handling of file opens and closes.
proc recd012 { method {start 0} \
{niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
source ./include.tcl
@ -24,9 +25,8 @@ proc recd012 { method {start 0} \
puts "Recd012: skipping for specific pagesizes"
return
}
for { set i $start } { $i <= $niter } { incr i } {
env_cleanup $testdir
# For repeatability, we pass in the iteration number
@ -35,13 +35,13 @@ proc recd012 { method {start 0} \
# This lets us re-run a potentially failing iteration
# without having to start from the beginning and work
# our way to it.
#
#
# The number of databases ranges from 4 to 8 and is
# a function of $niter
# set ndbs [expr ($i % 5) + 4]
# set ndbs [expr ($i % 5) + 4]
recd012_body \
$method $ndbs $i $noutiter $niniter $pagesize $tnum $args
$method $ndbs $i $noutiter $niniter $pagesize $tnum $args
}
}
@ -55,8 +55,15 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
puts "\tRecd0$tnum $method ($largs): Iteration $iter"
puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
# We run out of lockers during some of the recovery runs, so
# we need to make sure that we specify a DB_CONFIG that will
# give us enough lockers.
set f [open $testdir/DB_CONFIG w]
puts $f "set_lk_max_lockers 5000"
close $f
set flags "-create -txn -home $testdir"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
error_check_good env_remove [berkdb envremove -home $testdir] 0
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@ -67,9 +74,12 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Initialize database that keeps track of number of open files (so
# we don't run out of descriptors).
set ofname of.db
set ofdb [berkdb_open -env $dbenv\
set txn [$dbenv txn]
error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
set ofdb [berkdb_open -env $dbenv -txn $txn\
-create -dup -mode 0644 -btree -pagesize 512 $ofname]
error_check_good of_open [is_valid_db $ofdb] TRUE
error_check_good open_txn_commit [$txn commit] 0
set oftxn [$dbenv txn]
error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
@ -80,9 +90,10 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Create ndbs databases to work in, and a file listing db names to
# pick from.
set f [open TESTDIR/dblist w]
set oflags \
"-env $dbenv -create -mode 0644 -pagesize $psz $largs $omethod"
set f [open $testdir/dblist w]
set oflags "-auto_commit -env $dbenv \
-create -mode 0644 -pagesize $psz $largs $omethod"
for { set i 0 } { $i < $ndbs } { incr i } {
# 50-50 chance of being a subdb, unless we're a queue.
if { [berkdb random_int 0 1] || [is_queue $method] } {
@ -96,18 +107,17 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
set db [eval berkdb_open $oflags $dbname]
error_check_good db($i) [is_valid_db $db] TRUE
error_check_good db($i)_close [$db close] 0
}
}
close $f
error_check_good env_close [$dbenv close] 0
# Now we get to the meat of things. Our goal is to do some number
# of opens, closes, updates, and shutdowns (simulated here by a
# close of all open handles and a close/reopen of the environment,
# with or without an envremove), matching the regular expression
#
# ((O[OUC]+S)+R+V)
#
#
# We'll repeat the inner + a random number up to $niniter times,
# and the outer + a random number up to $noutiter times.
#
@ -116,23 +126,22 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# all handles properly. The environment will be left lying around
# before we run recovery 50% of the time.
set out [berkdb random_int 1 $noutiter]
puts "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter\
ops."
puts \
"\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter ops."
for { set i 0 } { $i < $out } { incr i } {
set child [open "|$tclsh_path" w]
# For performance, don't source everything,
# For performance, don't source everything,
# just what we'll need.
puts $child "load $tcllib"
puts $child "set fixed_len $fixed_len"
puts $child "source ../test/testutils.tcl"
puts $child "source ../test/recd0$tnum.tcl"
puts $child "source $src_root/test/testutils.tcl"
puts $child "source $src_root/test/recd0$tnum.tcl"
set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
# Go.
# puts "recd012_dochild {$env_cmd} $rnd $i $niniter\
# $ndbs $tnum $method $ofname $largs"
berkdb debug_check
puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
$ndbs $tnum $method $ofname $largs"
close $child
@ -140,35 +149,35 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Run recovery 0-3 times.
set nrecs [berkdb random_int 0 3]
for { set j 0 } { $j < $nrecs } { incr j } {
berkdb debug_check
set ret [catch {exec $util_path/db_recover \
-h $testdir} res]
if { $ret != 0 } {
if { $ret != 0 } {
puts "FAIL: db_recover returned with nonzero\
exit status, output as follows:"
file mkdir /tmp/12out
set fd [open /tmp/12out/[pid] w]
puts $fd $res
puts $fd $res
close $fd
}
error_check_good recover($j) $ret 0
}
}
# Run recovery one final time; it doesn't make sense to
# Run recovery one final time; it doesn't make sense to
# check integrity if we do not.
set ret [catch {exec $util_path/db_recover -h $testdir} res]
if { $ret != 0 } {
if { $ret != 0 } {
puts "FAIL: db_recover returned with nonzero\
exit status, output as follows:"
puts $res
puts $res
}
# Make sure each datum is the correct filename.
puts "\t\tRecd0$tnum.c: Checking data integrity."
set dbenv [berkdb env -create -private -home $testdir]
set dbenv [berkdb_env -create -private -home $testdir]
error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
set f [open TESTDIR/dblist r]
set f [open $testdir/dblist r]
set i 0
while { [gets $f dbinfo] > 0 } {
set db [eval berkdb_open -env $dbenv $dbinfo]
@ -188,21 +197,21 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
close $f
error_check_good env_close_integrity [$dbenv close] 0
# Verify
error_check_good verify [verify_dir $testdir "\t\tRecd0$tnum.d: "] 0
error_check_good verify \
[verify_dir $testdir "\t\tRecd0$tnum.d: " 0 0 1] 0
}
proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
ofname args } {
global recd012_ofkey
source ./include.tcl
if { [is_record_based $method] } {
set keybase ""
} else {
set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
}
# Initialize our random number generator, repeatably based on an arg.
berkdb srand $rnd
@ -212,7 +221,11 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
# Find out how many databases appear to be open in the log--we
# don't want recovery to run out of filehandles.
set ofdb [berkdb_open -env $dbenv $ofname]
set txn [$dbenv txn]
error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
error_check_good child_txn_commit [$txn commit] 0
set oftxn [$dbenv txn]
error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
@ -222,14 +235,14 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
error_check_good of_commit [$oftxn commit] 0
# Read our dbnames
set f [open TESTDIR/dblist r]
set f [open $testdir/dblist r]
set i 0
while { [gets $f dbname($i)] > 0 } {
incr i
}
close $f
# We now have $ndbs extant databases.
# We now have $ndbs extant databases.
# Open one of them, just to get us started.
set opendbs {}
set oflags "-env $dbenv $args"
@ -254,14 +267,13 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
set num_open [llength $opendbs]
if { $num_open == 0 } {
# If none are open, do an open first.
recd012_open
}
set n [berkdb random_int 0 [expr $num_open - 1]]
set pair [lindex $opendbs $n]
set udb [lindex $pair 0]
set uname [lindex $pair 1]
set key [berkdb random_int 1000 1999]$keybase
set data [chop_data $method $uname]
error_check_good put($uname,$udb,$key,$data) \
@ -273,12 +285,11 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
[$curtxn commit] 0
set curtxn [$dbenv txn]
error_check_good txn_reopen \
[is_valid_txn $curtxn $dbenv] TRUE
[is_valid_txn $curtxn $dbenv] TRUE
}
}
2 {
# Close.
if { [llength $opendbs] == 0 } {
# If none are open, open instead of closing.
recd012_open
@ -286,28 +297,26 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
}
# Commit curtxn first, lest we self-deadlock.
error_check_good txn_recommit \
[$curtxn commit] 0
error_check_good txn_recommit [$curtxn commit] 0
# Do it.
set which [berkdb random_int 0 \
[expr [llength $opendbs] - 1]]
set db [lindex [lindex $opendbs $which] 0]
error_check_good db_choice [is_valid_db $db] TRUE
global errorCode errorInfo
error_check_good db_close \
[[lindex [lindex $opendbs $which] 0] close] 0
set opendbs [lreplace $opendbs $which $which]
incr nopenfiles -1
# Reopen txn.
set curtxn [$dbenv txn]
error_check_good txn_reopen \
[is_valid_txn $curtxn $dbenv] TRUE
}
}
@ -335,12 +344,12 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
[$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
error_check_good of_commit [$oftxn commit] 0
error_check_good ofdb_close [$ofdb close] 0
}
}
proc recd012_open { } {
# This is basically an inline and has to modify curtxn,
# This is basically an inline and has to modify curtxn,
# so use upvars.
upvar curtxn curtxn
upvar curtxn curtxn
upvar ndbs ndbs
upvar dbname dbname
upvar dbenv dbenv
@ -361,21 +370,21 @@ proc recd012_open { } {
# Do it.
set which [berkdb random_int 0 [expr $ndbs - 1]]
set db [eval berkdb_open \
$oflags $dbname($which)]
set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
lappend opendbs [list $db $dbname($which)]
# Reopen txn.
set curtxn [$dbenv txn]
error_check_good txn_reopen \
[is_valid_txn $curtxn $dbenv] TRUE
error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
incr nopenfiles
}
# Update the database containing the number of files that db_recover has
# to contend with--we want to avoid letting it run out of file descriptors.
# We do this by keeping track of the number of unclosed opens since the
# We do this by keeping track of the number of unclosed opens since the
# checkpoint before last.
# $recd012_ofkey stores this current value; the two dups available
# at $recd012_ofckptkey store the number of opens since the last checkpoint
@ -399,7 +408,7 @@ proc recd012_nopenfiles_ckpt { env db nopenfiles } {
error_check_good del [$dbc del] 0
set nopenfiles [expr $nopenfiles - $discard]
# Get the next ckpt value
set dbt [$dbc get -nextdup]
error_check_good set2 [llength $dbt] 1
@ -410,10 +419,10 @@ proc recd012_nopenfiles_ckpt { env db nopenfiles } {
# Put this new number at the end of the dup set.
error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
# We should never deadlock since we're the only one in this db.
error_check_good dbc_close [$dbc close] 0
error_check_good txn_commit [$txn commit] 0
error_check_good txn_commit [$txn commit] 0
return $nopenfiles
}

View file

@ -1,22 +1,22 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2000
# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd013.tcl,v 11.10 2000/12/11 17:24:55 sue Exp $
# $Id: recd013.tcl,v 11.18 2002/02/25 16:44:27 sandstro Exp $
#
# Recovery Test 13.
# Smoke test of aborted cursor adjustments.
# TEST recd013
# TEST Test of cursor adjustment on child transaction aborts. [#2373]
#
# XXX
# Other tests that cover more specific variants of the same issue
# are in the access method tests for now. This is probably wrong; we
# put this one here because they're closely based on and intertwined
# with other, non-transactional cursor stability tests that are among
# the access method tests, and because we need at least one test to
# the access method tests, and because we need at least one test to
# fit under recd and keep logtrack from complaining. We'll sort out the mess
# later; the important thing, for now, is that everything that needs to gets
# tested. (This really shouldn't be under recd at all, since it doesn't
# tested. (This really shouldn't be under recd at all, since it doesn't
# run recovery!)
proc recd013 { method { nitems 100 } args } {
source ./include.tcl
@ -48,11 +48,12 @@ proc recd013 { method { nitems 100 } args } {
Create environment, database, and parent transaction."
set flags "-create -txn -home $testdir"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
set env [eval $env_cmd]
error_check_good dbenv [is_valid_env $env] TRUE
set oflags "-env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
set oflags \
"-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@ -63,19 +64,44 @@ proc recd013 { method { nitems 100 } args } {
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
set key $keybase$i
set data [chop_data $method $i$alphabet]
error_check_good init_put($i) [$db put -txn $txn $key $data] 0
}
error_check_good init_txn_commit [$txn commit] 0
# Create an initial txn; set a cursor of that txn to each item.
set txn [$env txn]
error_check_good txn [is_valid_txn $txn $env] TRUE
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
# First, try to put the item in a child transaction,
# then abort and verify all the cursors we've done up until
# now.
set ctxn [$env txn -parent $txn]
error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
error_check_good ctxn_abort($i) [$ctxn abort] 0
for { set j 1 } { $j < $i } { incr j 2 } {
error_check_good dbc_get($j) [$dbc($j) get -current] \
[list [list $keybase$j \
[pad_data $method $j$alphabet]]]
}
# Then put for real.
error_check_good init_put($i) [$db put -txn $txn $key $data] 0
# Set a cursor of the parent txn to each item.
set dbc($i) [$db cursor -txn $txn]
error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
error_check_good dbc_getset($i) \
[$dbc($i) get -set $key] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
# And verify all the cursors, including the one we just
# created.
for { set j 1 } { $j <= $i } { incr j 2 } {
error_check_good dbc_get($j) [$dbc($j) get -current] \
[list [list $keybase$j \
[pad_data $method $j$alphabet]]]
}
}
puts "\t\tRecd0$tnum.a.1: Verify cursor stability after init."
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
error_check_good dbc_get($i) [$dbc($i) get -current] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
puts "\tRecd0$tnum.b: Put test."
puts "\t\tRecd0$tnum.b.1: Put items."
set ctxn [$env txn -parent $txn]
@ -99,7 +125,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good curs_close [$curs close] 0
}
}
puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
error_check_good ctxn_abort [$ctxn abort] 0
@ -122,7 +148,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good db_verify \
[verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
# Now put back all the even records, this time in the parent.
# Now put back all the even records, this time in the parent.
# Commit and re-begin the transaction so we can abort and
# get back to a nice full database.
for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
@ -135,9 +161,9 @@ proc recd013 { method { nitems 100 } args } {
error_check_good txn [is_valid_txn $txn $env] TRUE
# Delete test. Set a cursor to each record. Delete the even ones
# in the parent and check cursor stability. Then open a child
# in the parent and check cursor stability. Then open a child
# transaction, and delete the odd ones. Verify that the database
# is empty
# is empty.
puts "\tRecd0$tnum.c: Delete test."
unset dbc
@ -149,8 +175,9 @@ proc recd013 { method { nitems 100 } args } {
error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
puts "\t\tRecd0$tnum.c.1: Delete even items in parent txn."
puts "\t\tRecd0$tnum.c.1: Delete even items in child txn and abort."
if { [is_rrecno $method] != 1 } {
set init 2
set bound [expr 2 * $nitems]
@ -162,9 +189,25 @@ proc recd013 { method { nitems 100 } args } {
set bound [expr $nitems + 1]
set step 1
}
set ctxn [$env txn -parent $txn]
for { set i $init } { $i <= $bound } { incr i $step } {
error_check_good del($i) [$db del -txn $txn $keybase$i] 0
error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
}
error_check_good ctxn_abort [$ctxn abort] 0
# Verify that no items are deleted.
for { set i 1 } { $i <= 2 * $nitems } { incr i } {
error_check_good dbc_get($i) [$dbc($i) get -current] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
puts "\t\tRecd0$tnum.c.2: Delete even items in child txn and commit."
set ctxn [$env txn -parent $txn]
for { set i $init } { $i <= $bound } { incr i $step } {
error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
}
error_check_good ctxn_commit [$ctxn commit] 0
# Verify that even items are deleted and odd items are not.
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
@ -181,10 +224,10 @@ proc recd013 { method { nitems 100 } args } {
[list [list "" ""]]
}
puts "\t\tRecd0$tnum.c.2: Delete odd items in child txn."
puts "\t\tRecd0$tnum.c.3: Delete odd items in child txn."
set ctxn [$env txn -parent $txn]
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
if { [is_rrecno $method] != 1 } {
set j $i
@ -196,14 +239,14 @@ proc recd013 { method { nitems 100 } args } {
}
error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
}
# Verify that everyone's deleted.
for { set i 1 } { $i <= 2 * $nitems } { incr i } {
error_check_good get_deleted($i) \
[llength [$db get -txn $ctxn $keybase$i]] 0
}
puts "\t\tRecd0$tnum.c.3: Verify cursor stability after abort."
puts "\t\tRecd0$tnum.c.4: Verify cursor stability after abort."
error_check_good ctxn_abort [$ctxn abort] 0
# Verify that even items are deleted and odd items are not.
@ -229,7 +272,7 @@ proc recd013 { method { nitems 100 } args } {
# Sync and verify.
error_check_good db_sync [$db sync] 0
error_check_good db_verify \
[verify_dir $testdir "\t\tRecd0$tnum.c.4: "] 0
[verify_dir $testdir "\t\tRecd0$tnum.c.5: "] 0
puts "\tRecd0$tnum.d: Clean up."
error_check_good txn_commit [$txn commit] 0
@ -238,7 +281,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good verify_dir \
[verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
if { $log_log_record_types == 1 } {
if { $log_log_record_types == 1 } {
logtrack_read $testdir
}
}

View file

@ -1,16 +1,14 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd014.tcl,v 1.9 2001/01/11 17:16:04 sue Exp $
# $Id: recd014.tcl,v 1.19 2002/08/15 19:21:24 sandstro Exp $
#
# Recovery Test 14.
# This is a recovery test for create/delete of queue extents. We have
# hooks in the database so that we can abort the process at various
# points and make sure that the extent file does or does not exist. We
# then need to recover and make sure the file is correctly existing
# or not, as the case may be.
# TEST recd014
# TEST This is a recovery test for create/delete of queue extents. We
# TEST then need to recover and make sure the file is correctly existing
# TEST or not, as the case may be.
proc recd014 { method args} {
global fixed_len
source ./include.tcl
@ -51,7 +49,7 @@ proc recd014 { method args} {
set flags "-create -txn -home $testdir"
puts "\tRecd014.a: creating environment"
set env_cmd "berkdb env $flags"
set env_cmd "berkdb_env $flags"
puts "\tRecd014.b: Create test commit"
ext_recover_create $testdir $env_cmd $omethod \
@ -61,21 +59,14 @@ proc recd014 { method args} {
$opts $testfile abort
puts "\tRecd014.c: Consume test commit"
ext_recover_delete $testdir $env_cmd $omethod \
$opts $testfile consume commit
ext_recover_consume $testdir $env_cmd $omethod \
$opts $testfile commit
puts "\tRecd014.c: Consume test abort"
ext_recover_delete $testdir $env_cmd $omethod \
$opts $testfile consume abort
puts "\tRecd014.d: Delete test commit"
ext_recover_delete $testdir $env_cmd $omethod \
$opts $testfile delete commit
puts "\tRecd014.d: Delete test abort"
ext_recover_delete $testdir $env_cmd $omethod \
$opts $testfile delete abort
ext_recover_consume $testdir $env_cmd $omethod \
$opts $testfile abort
set fixed_len $orig_fixed_len
puts "\tRecd014.e: Verify db_printlog can read logfile"
puts "\tRecd014.d: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
@ -105,7 +96,11 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
set ret [catch {eval {berkdb_open} $oflags} db]
set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
error_check_good txn_commit [$t commit] 0
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
#
# The command to execute to create an extent is a put.
@ -123,7 +118,7 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
puts "\t\tSyncing"
error_check_good db_sync [$db sync] 0
catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
error_check_good txn_$txncmd:$t [$t $txncmd] 0
@ -149,7 +144,10 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
catch { file copy -force $dir/$dbfile $init_file } res
copy_extent_file $dir $dbfile init
}
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
error_check_good db_close [$db close] 0
error_check_good txn_commit [$t commit] 0
error_check_good env_close [$env close] 0
#
@ -241,7 +239,7 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation aborted. The file is there, but make
@ -255,8 +253,7 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
}
}
proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
global log_log_record_types
global alphabet
source ./include.tcl
@ -269,55 +266,52 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
env_cleanup $dir
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
set oflags "-create $method -mode 0644 -pagesize 512 \
set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
-env $env $opts $dbfile"
#
# Open our db, add some data, close and copy as our
# init file.
#
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set extnum 0
set data [chop_data $method [replicate $alphabet 512]]
set txn [$env txn]
error_check_good txn_begin [is_valid_txn $txn $env] TRUE
set putrecno [$db put -append $data]
set putrecno [$db put -txn $txn -append $data]
error_check_good db_put $putrecno 1
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
puts "\t\tExecuting command"
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
copy_extent_file $dir $dbfile init
#
# If we don't abort, then we expect success.
# If we abort, we expect no file removed until recovery is run.
#
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
if { [string compare $op "delete"] == 0 } {
set dbcmd "$db del -txn $t $putrecno"
} else {
set dbcmd "$db get -txn $t -consume"
}
set dbcmd "$db get -txn $t -consume"
set ret [eval $dbcmd]
error_check_good db_sync [$db sync] 0
catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
error_check_good txn_$txncmd:$t [$t $txncmd] 0
error_check_good db_sync [$db sync] 0
set dbq [make_ext_filename $dir $dbfile $extnum]
if {$txncmd == "abort"} {
#
@ -330,20 +324,10 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
# Since we aborted the txn, we should be able
# to get to our original entry.
#
error_check_good post$op.1 [file exists $dbq] 1
set xdb [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $xdb] TRUE
set kd [$xdb get $putrecno]
set key [lindex [lindex $kd 0] 0]
error_check_good dbget_key $key $putrecno
set retdata [lindex [lindex $kd 0] 1]
error_check_good dbget_data $data $retdata
error_check_good db_close [$xdb close] 0
error_check_good postconsume.1 [file exists $dbq] 1
error_check_good \
diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
[dbdump_diff $init_file $dir/$dbfile] 0
diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
[dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
@ -353,14 +337,8 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
# Check file existence. Consume operations remove
# the extent when we move off, which we should have
# done. Delete operations won't remove the extent
# until we run recovery.
#
if { [string compare $op "delete"] == 0 } {
error_check_good ${op}_exists [file exists $dbq] 1
} else {
error_check_good ${op}_exists [file exists $dbq] 0
}
# done.
error_check_good consume_exists [file exists $dbq] 0
}
error_check_good db_close [$db close] 0
error_check_good env_close [$env close] 0
@ -384,7 +362,7 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
@ -396,7 +374,7 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
# Run recovery here. Re-do the operation.
# Verify that the file doesn't exist
# Verify that the file doesn't exist
# (if we committed) or change (if we aborted)
# when we are done.
#
@ -418,14 +396,14 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
# not exist. Both operations should result
# in no file existing now that we've run recovery.
#
error_check_good after_recover1 [file exists $dbq] 0
error_check_good after_recover2 [file exists $dbq] 0
}
#
@ -456,12 +434,12 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
[dbdump_diff $init_file $dir/$dbfile] 0
[dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it still does
# not exist.
#
error_check_good after_recover2 [file exists $dbq] 0
error_check_good after_recover3 [file exists $dbq] 0
}
}

160
bdb/test/recd015.tcl Normal file
View file

@ -0,0 +1,160 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd015.tcl,v 1.13 2002/09/05 17:23:06 sandstro Exp $
#
# TEST recd015
# TEST This is a recovery test for testing lots of prepared txns.
# TEST This test is to force the use of txn_recover to call with the
# TEST DB_FIRST flag and then DB_NEXT.
proc recd015 { method args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
puts "Recd015: $method ($args) prepared txns test"
# Create the database and environment.
set numtxns 1
set testfile NULL
set env_cmd "berkdb_env -create -txn -home $testdir"
set msg "\tRecd015.a"
puts "$msg Simple test to prepare $numtxns txn "
foreach op { abort commit discard } {
env_cleanup $testdir
recd015_body $env_cmd $testfile $numtxns $msg $op
}
#
# Now test large numbers of prepared txns to test DB_NEXT
# on txn_recover.
#
set numtxns 250
set testfile recd015.db
set txnmax [expr $numtxns + 5]
#
# For this test we create our database ahead of time so that we
# don't need to send methods and args to the script.
#
env_cleanup $testdir
set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
set env [eval $env_cmd]
error_check_good dbenv [is_valid_env $env] TRUE
set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
error_check_good dbclose [$db close] 0
error_check_good envclose [$env close] 0
set msg "\tRecd015.b"
puts "$msg Large test to prepare $numtxns txn "
foreach op { abort commit discard } {
recd015_body $env_cmd $testfile $numtxns $msg $op
}
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $testdir/LOG } ret]
error_check_good db_printlog $stat 0
fileremove $testdir/LOG
}
proc recd015_body { env_cmd testfile numtxns msg op } {
source ./include.tcl
sentinel_init
set gidf $testdir/gidfile
fileremove -f $gidf
set pidlist {}
puts "$msg.0: Executing child script to prepare txns"
berkdb debug_check
set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
$testdir/recdout $env_cmd $testfile $gidf $numtxns &]
lappend pidlist $p
watch_procs $pidlist 5
set f1 [open $testdir/recdout r]
set r [read $f1]
puts $r
close $f1
fileremove -f $testdir/recdout
berkdb debug_check
puts -nonewline "$msg.1: Running recovery ... "
flush stdout
berkdb debug_check
set env [eval $env_cmd -recover]
error_check_good dbenv-recover [is_valid_env $env] TRUE
puts "complete"
puts "$msg.2: getting txns from txn_recover"
set txnlist [$env txn_recover]
error_check_good txnlist_len [llength $txnlist] $numtxns
set gfd [open $gidf r]
set i 0
while { [gets $gfd gid] != -1 } {
set gids($i) $gid
incr i
}
close $gfd
#
# Make sure we have as many as we expect
error_check_good num_gids $i $numtxns
set i 0
puts "$msg.3: comparing GIDs and $op txns"
foreach tpair $txnlist {
set txn [lindex $tpair 0]
set gid [lindex $tpair 1]
error_check_good gidcompare $gid $gids($i)
error_check_good txn:$op [$txn $op] 0
incr i
}
if { $op != "discard" } {
error_check_good envclose [$env close] 0
return
}
#
# If we discarded, now do it again and randomly resolve some
# until all txns are resolved.
#
puts "$msg.4: resolving/discarding txns"
set txnlist [$env txn_recover]
set len [llength $txnlist]
set opval(1) "abort"
set opcnt(1) 0
set opval(2) "commit"
set opcnt(2) 0
set opval(3) "discard"
set opcnt(3) 0
while { $len != 0 } {
set opicnt(1) 0
set opicnt(2) 0
set opicnt(3) 0
#
# Abort/commit or discard them randomly until
# all are resolved.
#
for { set i 0 } { $i < $len } { incr i } {
set t [lindex $txnlist $i]
set txn [lindex $t 0]
set newop [berkdb random_int 1 3]
set ret [$txn $opval($newop)]
error_check_good txn_$opval($newop):$i $ret 0
incr opcnt($newop)
incr opicnt($newop)
}
# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
set txnlist [$env txn_recover]
set len [llength $txnlist]
}
error_check_good envclose [$env close] 0
}

183
bdb/test/recd016.tcl Normal file
View file

@ -0,0 +1,183 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd016.tcl,v 11.8 2002/09/05 17:23:07 sandstro Exp $
#
# TEST recd016
# TEST This is a recovery test for testing running recovery while
# TEST recovery is already running. While bad things may or may not
# TEST happen, if recovery is then run properly, things should be correct.
proc recd016 { method args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
puts "Recd016: $method ($args) simultaneous recovery test"
puts "Recd016: Skipping; waiting on SR #6277"
return
# Create the database and environment.
set testfile recd016.db
#
# For this test we create our database ahead of time so that we
# don't need to send methods and args to the script.
#
cleanup $testdir NULL
#
# Use a smaller log to make more files and slow down recovery.
#
set gflags ""
set pflags ""
set log_max [expr 256 * 1024]
set nentries 10000
set nrec 6
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
set t4 $testdir/t4
set t5 $testdir/t5
# Since we are using txns, we need at least 1 lock per
# record (for queue). So set lock_max accordingly.
set lkmax [expr $nentries * 2]
puts "\tRecd016.a: Create environment and database"
set env_cmd "berkdb_env -create -log_max $log_max \
-lock_max $lkmax -txn -home $testdir"
set env [eval $env_cmd]
error_check_good dbenv [is_valid_env $env] TRUE
set db [eval {berkdb_open -create} \
$omethod -auto_commit -env $env $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
set abid [open $t4 w]
if { [is_record_based $method] == 1 } {
set checkfunc recd016_recno.check
append gflags " -recno"
} else {
set checkfunc recd016.check
}
puts "\tRecd016.b: put/get loop"
# Here is the loop where we put and get each key/data pair
set count 0
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
set key [expr $count + 1]
if { 0xffffffff > 0 && $key > 0xffffffff } {
set key [expr $key - 0x100000000]
}
if { $key == 0 || $key - 0xffffffff == 1 } {
incr key
incr count
}
set kvals($key) [pad_data $method $str]
} else {
set key $str
set str [reverse $str]
}
#
# Start a transaction. Alternately abort and commit them.
# This will create a bigger log for recovery to collide.
#
set txn [$env txn]
set ret [eval \
{$db put} -txn $txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
if {[expr $count % 2] == 0} {
set ret [$txn commit]
error_check_good txn_commit $ret 0
set ret [eval {$db get} $gflags {$key}]
error_check_good commit_get \
$ret [list [list $key [pad_data $method $str]]]
} else {
set ret [$txn abort]
error_check_good txn_abort $ret 0
set ret [eval {$db get} $gflags {$key}]
error_check_good abort_get [llength $ret] 0
puts $abid $key
}
incr count
}
close $did
close $abid
error_check_good dbclose [$db close] 0
error_check_good envclose [$env close] 0
set pidlist {}
puts "\tRecd016.c: Start up $nrec recovery processes at once"
for {set i 0} {$i < $nrec} {incr i} {
set p [exec $util_path/db_recover -h $testdir -c &]
lappend pidlist $p
}
watch_procs $pidlist 5
#
# Now that they are all done run recovery correctly
puts "\tRecd016.d: Run recovery process"
set stat [catch {exec $util_path/db_recover -h $testdir -c} result]
if { $stat == 1 } {
error "FAIL: Recovery error: $result."
}
puts "\tRecd016.e: Open, dump and check database"
# Now compare the keys to see if they match the dictionary (or ints)
if { [is_record_based $method] == 1 } {
set oid [open $t2 w]
for {set i 1} {$i <= $nentries} {incr i} {
set j $i
if { 0xffffffff > 0 && $j > 0xffffffff } {
set j [expr $j - 0x100000000]
}
if { $j == 0 } {
incr i
incr j
}
puts $oid $j
}
close $oid
} else {
set q q
filehead $nentries $dict $t2
}
filesort $t2 $t3
file rename -force $t3 $t2
filesort $t4 $t3
file rename -force $t3 $t4
fileextract $t2 $t4 $t3
file rename -force $t3 $t5
set env [eval $env_cmd]
error_check_good dbenv [is_valid_env $env] TRUE
open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next"
filesort $t1 $t3
error_check_good envclose [$env close] 0
error_check_good Recd016:diff($t5,$t3) \
[filecmp $t5 $t3] 0
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $testdir/LOG } ret]
error_check_good db_printlog $stat 0
fileremove $testdir/LOG
}
# Check function for recd016; keys and data are identical
proc recd016.check { key data } {
error_check_good "key/data mismatch" $data [reverse $key]
}
proc recd016_recno.check { key data } {
global kvals
error_check_good key"$key"_exists [info exists kvals($key)] 1
error_check_good "key/data mismatch, key $key" $data $kvals($key)
}

151
bdb/test/recd017.tcl Normal file
View file

@ -0,0 +1,151 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd017.tcl,v 11.4 2002/09/03 16:44:37 sue Exp $
#
# TEST recd017
# TEST Test recovery and security. This is basically a watered
# TEST down version of recd001 just to verify that encrypted environments
# TEST can be recovered.
proc recd017 { method {select 0} args} {
global fixed_len
global encrypt
global passwd
source ./include.tcl
set orig_fixed_len $fixed_len
set opts [convert_args $method $args]
set omethod [convert_method $method]
puts "Recd017: $method operation/transaction tests"
# Create the database and environment.
env_cleanup $testdir
# The recovery tests were originally written to
# do a command, abort, do it again, commit, and then
# repeat the sequence with another command. Each command
# tends to require that the previous command succeeded and
# left the database a certain way. To avoid cluttering up the
# op_recover interface as well as the test code, we create two
# databases; one does abort and then commit for each op, the
# other does prepare, prepare-abort, and prepare-commit for each
# op. If all goes well, this allows each command to depend
# exactly one successful iteration of the previous command.
set testfile recd017.db
set testfile2 recd017-2.db
set flags "-create -encryptaes $passwd -txn -home $testdir"
puts "\tRecd017.a.0: creating environment"
set env_cmd "berkdb_env $flags"
convert_encrypt $env_cmd
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
#
# We need to create a database to get the pagesize (either
# the default or whatever might have been specified).
# Then remove it so we can compute fixed_len and create the
# real database.
set oflags "-create $omethod -mode 0644 \
-env $dbenv -encrypt $opts $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set stat [$db stat]
#
# Compute the fixed_len based on the pagesize being used.
# We want the fixed_len to be 1/4 the pagesize.
#
set pg [get_pagesize $stat]
error_check_bad get_pagesize $pg -1
set fixed_len [expr $pg / 4]
error_check_good db_close [$db close] 0
error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
# Convert the args again because fixed_len is now real.
# Create the databases and close the environment.
# cannot specify db truncate in txn protected env!!!
set opts [convert_args $method ""]
convert_encrypt $env_cmd
set omethod [convert_method $method]
set oflags "-create $omethod -mode 0644 \
-env $dbenv -encrypt $opts $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
set oflags "-create $omethod -mode 0644 \
-env $dbenv -encrypt $opts $testfile2"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
error_check_good env_close [$dbenv close] 0
puts "\tRecd017.a.1: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
> $tmpfile} ret]
error_check_good db_printlog $stat 0
fileremove $tmpfile
# List of recovery tests: {CMD MSG} pairs.
set rlist {
{ {DB put -txn TXNID $key $data} "Recd017.b: put"}
{ {DB del -txn TXNID $key} "Recd017.c: delete"}
}
# These are all the data values that we're going to need to read
# through the operation table and run the recovery tests.
if { [is_record_based $method] == 1 } {
set key 1
} else {
set key recd017_key
}
set data recd017_data
foreach pair $rlist {
set cmd [subst [lindex $pair 0]]
set msg [lindex $pair 1]
if { $select != 0 } {
set tag [lindex $msg 0]
set tail [expr [string length $tag] - 2]
set tag [string range $tag $tail $tail]
if { [lsearch $select $tag] == -1 } {
continue
}
}
if { [is_queue $method] != 1 } {
if { [string first append $cmd] != -1 } {
continue
}
if { [string first consume $cmd] != -1 } {
continue
}
}
# if { [is_fixed_length $method] == 1 } {
# if { [string first partial $cmd] != -1 } {
# continue
# }
# }
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
#
# Note that since prepare-discard ultimately aborts
# the txn, it must come before prepare-commit.
#
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-discard $testdir $env_cmd $testfile2 \
$cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
set fixed_len $orig_fixed_len
return
}

110
bdb/test/recd018.tcl Normal file
View file

@ -0,0 +1,110 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd018.tcl,v 11.2 2002/03/13 21:04:20 sue Exp $
#
# TEST recd018
# TEST Test recover of closely interspersed checkpoints and commits.
#
# This test is from the error case from #4230.
#
proc recd018 { method {ndbs 10} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
set tnum 18
puts "Recd0$tnum ($args): $method recovery of checkpoints and commits."
set tname recd0$tnum.db
env_cleanup $testdir
set i 0
if { [is_record_based $method] == 1 } {
set key 1
set key2 2
} else {
set key KEY
set key2 KEY2
}
puts "\tRecd0$tnum.a: Create environment and database."
set flags "-create -txn -home $testdir"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
for { set i 0 } { $i < $ndbs } { incr i } {
set testfile $tname.$i
set db($i) [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db($i)] TRUE
set file $testdir/$testfile.init
catch { file copy -force $testdir/$testfile $file} res
copy_extent_file $testdir $testfile init
}
# Main loop: Write a record or two to each database.
# Do a commit immediately followed by a checkpoint after each one.
error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
puts "\tRecd0$tnum.b Put/Commit/Checkpoint to $ndbs databases"
for { set i 0 } { $i < $ndbs } { incr i } {
set testfile $tname.$i
set data $i
# Put, in a txn.
set txn [$dbenv txn]
error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
error_check_good db_put \
[$db($i) put -txn $txn $key [chop_data $method $data]] 0
error_check_good txn_commit [$txn commit] 0
error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
if { [expr $i % 2] == 0 } {
set txn [$dbenv txn]
error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
error_check_good db_put [$db($i) put \
-txn $txn $key2 [chop_data $method $data]] 0
error_check_good txn_commit [$txn commit] 0
error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
}
error_check_good db_close [$db($i) close] 0
set file $testdir/$testfile.afterop
catch { file copy -force $testdir/$testfile $file} res
copy_extent_file $testdir $testfile afterop
}
error_check_good env_close [$dbenv close] 0
# Now, loop through and recover to each timestamp, verifying the
# expected increment.
puts "\tRecd0$tnum.c: Run recovery (no-op)"
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 0
puts "\tRecd0$tnum.d: Run recovery (initial file)"
for { set i 0 } {$i < $ndbs } { incr i } {
set testfile $tname.$i
set file $testdir/$testfile.init
catch { file copy -force $file $testdir/$testfile } res
move_file_extent $testdir $testfile init copy
}
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 0
puts "\tRecd0$tnum.e: Run recovery (after file)"
for { set i 0 } {$i < $ndbs } { incr i } {
set testfile $tname.$i
set file $testdir/$testfile.afterop
catch { file copy -force $file $testdir/$testfile } res
move_file_extent $testdir $testfile afterop copy
}
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 0
}

121
bdb/test/recd019.tcl Normal file
View file

@ -0,0 +1,121 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd019.tcl,v 11.3 2002/08/08 15:38:07 bostic Exp $
#
# TEST recd019
# TEST Test txn id wrap-around and recovery.
proc recd019 { method {numid 50} args} {
global fixed_len
global txn_curid
global log_log_record_types
source ./include.tcl
set orig_fixed_len $fixed_len
set opts [convert_args $method $args]
set omethod [convert_method $method]
puts "Recd019: $method txn id wrap-around test"
# Create the database and environment.
env_cleanup $testdir
set testfile recd019.db
set flags "-create -txn -home $testdir"
puts "\tRecd019.a: creating environment"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
# Test txn wrapping. Force a txn_recycle msg.
#
set new_curid $txn_curid
set new_maxid [expr $new_curid + $numid]
error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
#
# We need to create a database to get the pagesize (either
# the default or whatever might have been specified).
# Then remove it so we can compute fixed_len and create the
# real database.
set oflags "-create $omethod -mode 0644 \
-env $dbenv $opts $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set stat [$db stat]
#
# Compute the fixed_len based on the pagesize being used.
# We want the fixed_len to be 1/4 the pagesize.
#
set pg [get_pagesize $stat]
error_check_bad get_pagesize $pg -1
set fixed_len [expr $pg / 4]
error_check_good db_close [$db close] 0
error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
# Convert the args again because fixed_len is now real.
# Create the databases and close the environment.
# cannot specify db truncate in txn protected env!!!
set opts [convert_args $method ""]
set omethod [convert_method $method]
set oflags "-create $omethod -mode 0644 \
-env $dbenv -auto_commit $opts $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
#
# Force txn ids to wrap twice and then some.
#
set nument [expr $numid * 3 - 2]
puts "\tRecd019.b: Wrapping txn ids after $numid"
set file $testdir/$testfile.init
catch { file copy -force $testdir/$testfile $file} res
copy_extent_file $testdir $testfile init
for { set i 1 } { $i <= $nument } { incr i } {
# Use 'i' as key so method doesn't matter
set key $i
set data $i
# Put, in a txn.
set txn [$dbenv txn]
error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
error_check_good db_put \
[$db put -txn $txn $key [chop_data $method $data]] 0
error_check_good txn_commit [$txn commit] 0
}
error_check_good db_close [$db close] 0
set file $testdir/$testfile.afterop
catch { file copy -force $testdir/$testfile $file} res
copy_extent_file $testdir $testfile afterop
error_check_good env_close [$dbenv close] 0
# Keep track of the log types we've seen
if { $log_log_record_types == 1} {
logtrack_read $testdir
}
# Now, loop through and recover.
puts "\tRecd019.c: Run recovery (no-op)"
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 0
puts "\tRecd019.d: Run recovery (initial file)"
set file $testdir/$testfile.init
catch { file copy -force $file $testdir/$testfile } res
move_file_extent $testdir $testfile init copy
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 0
puts "\tRecd019.e: Run recovery (after file)"
set file $testdir/$testfile.afterop
catch { file copy -force $file $testdir/$testfile } res
move_file_extent $testdir $testfile afterop copy
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 0
}

180
bdb/test/recd020.tcl Normal file
View file

@ -0,0 +1,180 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd020.tcl,v 11.8 2002/08/08 15:38:08 bostic Exp $
#
# TEST recd020
# TEST Test recovery after checksum error.
proc recd020 { method args} {
global fixed_len
global log_log_record_types
global datastr
source ./include.tcl
set pgindex [lsearch -exact $args "-pagesize"]
if { $pgindex != -1 } {
puts "Recd020: skipping for specific pagesizes"
return
}
if { [is_queueext $method] == 1 } {
puts "Recd020: skipping for method $method"
return
}
puts "Recd020: $method recovery after checksum error"
# Create the database and environment.
env_cleanup $testdir
set testfile recd020.db
set flags "-create -txn -home $testdir"
puts "\tRecd020.a: creating environment"
set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set pgsize 512
set orig_fixed_len $fixed_len
set fixed_len [expr $pgsize / 4]
set opts [convert_args $method $args]
set omethod [convert_method $method]
set oflags "-create $omethod -mode 0644 \
-auto_commit -chksum -pagesize $pgsize $opts $testfile"
set db [eval {berkdb_open} -env $dbenv $oflags]
#
# Put some data.
#
set nument 50
puts "\tRecd020.b: Put some data"
for { set i 1 } { $i <= $nument } { incr i } {
# Use 'i' as key so method doesn't matter
set key $i
set data $i$datastr
# Put, in a txn.
set txn [$dbenv txn]
error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
error_check_good db_put \
[$db put -txn $txn $key [chop_data $method $data]] 0
error_check_good txn_commit [$txn commit] 0
}
error_check_good db_close [$db close] 0
error_check_good env_close [$dbenv close] 0
#
# We need to remove the env so that we don't get cached
# pages.
#
error_check_good env_remove [berkdb envremove -home $testdir] 0
puts "\tRecd020.c: Overwrite part of database"
#
# First just touch some bits in the file. We want to go
# through the paging system, so touch some data pages,
# like the middle of page 2.
# We should get a checksum error for the checksummed file.
#
set pg 2
set fid [open $testdir/$testfile r+]
fconfigure $fid -translation binary
set seeklen [expr $pgsize * $pg + 200]
seek $fid $seeklen start
set byte [read $fid 1]
binary scan $byte c val
set newval [expr ~$val]
set newbyte [binary format c $newval]
seek $fid $seeklen start
puts -nonewline $fid $newbyte
close $fid
#
# Verify we get the checksum error. When we get it, it should
# log the error as well, so when we run recovery we'll need to
# do catastrophic recovery. We do this in a sub-process so that
# the files are closed after the panic.
#
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
set env_cmd "berkdb_env_noerr $flags"
set dbenv [send_cmd $f1 $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
error_check_good db [is_valid_db $db] TRUE
# We need to set non-blocking mode so that after each command
# we can read all the remaining output from that command and
# we can know what the output from one command is.
fconfigure $f1 -blocking 0
set ret [read $f1]
set got_err 0
for { set i 1 } { $i <= $nument } { incr i } {
set stat [send_cmd $f1 "catch {$db get $i} r"]
set getret [send_cmd $f1 "puts \$r"]
set ret [read $f1]
if { $stat == 1 } {
error_check_good dbget:fail [is_substr $getret \
"checksum error: catastrophic recovery required"] 1
set got_err 1
# Now verify that it was an error on the page we set.
error_check_good dbget:pg$pg [is_substr $ret \
"failed for page $pg"] 1
break
} else {
set key [lindex [lindex $getret 0] 0]
set data [lindex [lindex $getret 0] 1]
error_check_good keychk $key $i
error_check_good datachk $data \
[pad_data $method $i$datastr]
}
}
error_check_good got_chksum $got_err 1
set ret [send_cmd $f1 "$db close"]
set extra [read $f1]
error_check_good db:fail [is_substr $ret "run recovery"] 1
set ret [send_cmd $f1 "$dbenv close"]
error_check_good env_close:fail [is_substr $ret "run recovery"] 1
close $f1
# Keep track of the log types we've seen
if { $log_log_record_types == 1} {
logtrack_read $testdir
}
puts "\tRecd020.d: Run normal recovery"
set ret [catch {exec $util_path/db_recover -h $testdir} r]
error_check_good db_recover $ret 1
error_check_good dbrec:fail \
[is_substr $r "checksum error: catastrophic recovery required"] 1
catch {fileremove $testdir/$testfile} ret
puts "\tRecd020.e: Run catastrophic recovery"
set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
error_check_good db_recover $ret 0
#
# Now verify the data was reconstructed correctly.
#
set env_cmd "berkdb_env_noerr $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
set db [eval {berkdb_open} -env $dbenv $oflags]
error_check_good db [is_valid_db $db] TRUE
for { set i 1 } { $i <= $nument } { incr i } {
set stat [catch {$db get $i} ret]
error_check_good stat $stat 0
set key [lindex [lindex $ret 0] 0]
set data [lindex [lindex $ret 0] 1]
error_check_good keychk $key $i
error_check_good datachk $data [pad_data $method $i$datastr]
}
error_check_good db_close [$db close] 0
error_check_good env_close [$dbenv close] 0
}

74
bdb/test/recd15scr.tcl Normal file
View file

@ -0,0 +1,74 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recd15scr.tcl,v 1.5 2002/01/30 13:18:04 margo Exp $
#
# Recd15 - lots of txns - txn prepare script
# Usage: recd15script envcmd dbcmd gidf numtxns
# envcmd: command to open env
# dbfile: name of database file
# gidf: name of global id file
# numtxns: number of txns to start
source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
set usage "recd15script envcmd dbfile gidfile numtxns"
# Verify usage
if { $argc != 4 } {
puts stderr "FAIL:[timestamp] Usage: $usage"
exit
}
# Initialize arguments
set envcmd [ lindex $argv 0 ]
set dbfile [ lindex $argv 1 ]
set gidfile [ lindex $argv 2 ]
set numtxns [ lindex $argv 3 ]
set txnmax [expr $numtxns + 5]
set dbenv [eval $envcmd]
error_check_good envopen [is_valid_env $dbenv] TRUE
set usedb 0
if { $dbfile != "NULL" } {
set usedb 1
set db [berkdb_open -auto_commit -env $dbenv $dbfile]
error_check_good dbopen [is_valid_db $db] TRUE
}
puts "\tRecd015script.a: Begin $numtxns txns"
for {set i 0} {$i < $numtxns} {incr i} {
set t [$dbenv txn]
error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
set txns($i) $t
if { $usedb } {
set dbc [$db cursor -txn $t]
error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
set curs($i) $dbc
}
}
puts "\tRecd015script.b: Prepare $numtxns txns"
set gfd [open $gidfile w+]
for {set i 0} {$i < $numtxns} {incr i} {
if { $usedb } {
set dbc $curs($i)
error_check_good dbc_close [$dbc close] 0
}
set t $txns($i)
set gid [make_gid recd015script:$t]
puts $gfd $gid
error_check_good txn_prepare:$t [$t prepare $gid] 0
}
close $gfd
#
# We do not close the db or env, but exit with the txns outstanding.
#
puts "\tRecd015script completed successfully"
flush stdout

37
bdb/test/recdscript.tcl Normal file
View file

@ -0,0 +1,37 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: recdscript.tcl,v 11.4 2002/01/11 15:53:32 bostic Exp $
#
# Recovery txn prepare script
# Usage: recdscript op dir envcmd dbfile cmd
# op: primary txn operation
# dir: test directory
# envcmd: command to open env
# dbfile: name of database file
# gidf: name of global id file
# cmd: db command to execute
source ./include.tcl
source $test_path/test.tcl
set usage "recdscript op dir envcmd dbfile gidfile cmd"
# Verify usage
if { $argc != 6 } {
puts stderr "FAIL:[timestamp] Usage: $usage"
exit
}
# Initialize arguments
set op [ lindex $argv 0 ]
set dir [ lindex $argv 1 ]
set envcmd [ lindex $argv 2 ]
set dbfile [ lindex $argv 3 ]
set gidfile [ lindex $argv 4 ]
set cmd [ lindex $argv 5 ]
op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
flush stdout

249
bdb/test/rep001.tcl Normal file
View file

@ -0,0 +1,249 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2001-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rep001.tcl,v 1.16 2002/08/26 17:52:19 margo Exp $
#
# TEST rep001
# TEST Replication rename and forced-upgrade test.
# TEST
# TEST Run a modified version of test001 in a replicated master environment;
# TEST verify that the database on the client is correct.
# TEST Next, remove the database, close the master, upgrade the
# TEST client, reopen the master, and make sure the new master can correctly
# TEST run test001 and propagate it in the other direction.
proc rep001 { method { niter 1000 } { tnum "01" } args } {
global passwd
puts "Rep0$tnum: Replication sanity test."
set envargs ""
rep001_sub $method $niter $tnum $envargs $args
puts "Rep0$tnum: Replication and security sanity test."
append envargs " -encryptaes $passwd "
append args " -encrypt "
rep001_sub $method $niter $tnum $envargs $args
}
proc rep001_sub { method niter tnum envargs largs } {
source ./include.tcl
global testdir
global encrypt
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
set masterdir $testdir/MASTERDIR
set clientdir $testdir/CLIENTDIR
file mkdir $masterdir
file mkdir $clientdir
if { [is_record_based $method] == 1 } {
set checkfunc test001_recno.check
} else {
set checkfunc test001.check
}
# Open a master.
repladd 1
set masterenv \
[eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \
$envargs {-home $masterdir -txn -rep_master -rep_transport \
[list 1 replsend]}]
error_check_good master_env [is_valid_env $masterenv] TRUE
# Open a client
repladd 2
set clientenv [eval {berkdb_env -create} $envargs -txn -lock_max 2500 \
{-home $clientdir -rep_client -rep_transport [list 2 replsend]}]
error_check_good client_env [is_valid_env $clientenv] TRUE
# Bring the client online by processing the startup messages.
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
# Open a test database on the master (so we can test having handles
# open across an upgrade).
puts "\tRep0$tnum.a:\
Opening test database for post-upgrade client logging test."
set master_upg_db [berkdb_open \
-create -auto_commit -btree -env $masterenv rep0$tnum-upg.db]
set puttxn [$masterenv txn]
error_check_good master_upg_db_put \
[$master_upg_db put -txn $puttxn hello world] 0
error_check_good puttxn_commit [$puttxn commit] 0
error_check_good master_upg_db_close [$master_upg_db close] 0
# Run a modified test001 in the master (and update client).
puts "\tRep0$tnum.b: Running test001 in replicated env."
eval test001 $method $niter 0 $tnum 1 -env $masterenv $largs
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
# Open the cross-upgrade database on the client and check its contents.
set client_upg_db [berkdb_open \
-create -auto_commit -btree -env $clientenv rep0$tnum-upg.db]
error_check_good client_upg_db_get [$client_upg_db get hello] \
[list [list hello world]]
# !!! We use this handle later. Don't close it here.
# Verify the database in the client dir.
puts "\tRep0$tnum.c: Verifying client database contents."
set testdir [get_home $masterenv]
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
open_and_dump_file test0$tnum.db $clientenv $t1 \
$checkfunc dump_file_direction "-first" "-next"
# Remove the file (and update client).
puts "\tRep0$tnum.d: Remove the file on the master and close master."
error_check_good remove \
[$masterenv dbremove -auto_commit test0$tnum.db] 0
error_check_good masterenv_close [$masterenv close] 0
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
# Don't get confused in Tcl.
puts "\tRep0$tnum.e: Upgrade client."
set newmasterenv $clientenv
error_check_good upgrade_client [$newmasterenv rep_start -master] 0
# Run test001 in the new master
puts "\tRep0$tnum.f: Running test001 in new master."
eval test001 $method $niter 0 $tnum 1 -env $newmasterenv $largs
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $newmasterenv 2]
if { $nproced == 0 } {
break
}
}
puts "\tRep0$tnum.g: Reopen old master as client and catch up."
# Throttle master so it can't send everything at once
$newmasterenv rep_limit 0 [expr 64 * 1024]
set newclientenv [eval {berkdb_env -create -recover} $envargs \
-txn -lock_max 2500 \
{-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
error_check_good newclient_env [is_valid_env $newclientenv] TRUE
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $newclientenv 1]
incr nproced [replprocessqueue $newmasterenv 2]
if { $nproced == 0 } {
break
}
}
set stats [$newmasterenv rep_stat]
set nthrottles [getstats $stats {Transmission limited}]
error_check_bad nthrottles $nthrottles -1
error_check_bad nthrottles $nthrottles 0
# Run a modified test001 in the new master (and update client).
puts "\tRep0$tnum.h: Running test001 in new master."
eval test001 $method \
$niter $niter $tnum 1 -env $newmasterenv $largs
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $newclientenv 1]
incr nproced [replprocessqueue $newmasterenv 2]
if { $nproced == 0 } {
break
}
}
# Test put to the database handle we opened back when the new master
# was a client.
puts "\tRep0$tnum.i: Test put to handle opened before upgrade."
set puttxn [$newmasterenv txn]
error_check_good client_upg_db_put \
[$client_upg_db put -txn $puttxn hello there] 0
error_check_good puttxn_commit [$puttxn commit] 0
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $newclientenv 1]
incr nproced [replprocessqueue $newmasterenv 2]
if { $nproced == 0 } {
break
}
}
# Close the new master's handle for the upgrade-test database; we
# don't need it. Then check to make sure the client did in fact
# update the database.
error_check_good client_upg_db_close [$client_upg_db close] 0
set newclient_upg_db [berkdb_open -env $newclientenv rep0$tnum-upg.db]
error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
[list [list hello there]]
error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
# Verify the database in the client dir.
puts "\tRep0$tnum.j: Verifying new client database contents."
set testdir [get_home $newmasterenv]
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
open_and_dump_file test0$tnum.db $newclientenv $t1 \
$checkfunc dump_file_direction "-first" "-next"
if { [string compare [convert_method $method] -recno] != 0 } {
filesort $t1 $t3
}
error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
error_check_good newmasterenv_close [$newmasterenv close] 0
error_check_good newclientenv_close [$newclientenv close] 0
if { [lsearch $envargs "-encrypta*"] !=-1 } {
set encrypt 1
}
error_check_good verify \
[verify_dir $clientdir "\tRep0$tnum.k: " 0 0 1] 0
replclose $testdir/MSGQUEUEDIR
}

278
bdb/test/rep002.tcl Normal file
View file

@ -0,0 +1,278 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2002
# Sleepycat Software. All rights reserved.
#
# $Id: rep002.tcl,v 11.11 2002/08/08 18:13:12 sue Exp $
#
# TEST rep002
# TEST Basic replication election test.
# TEST
# TEST Run a modified version of test001 in a replicated master environment;
# TEST hold an election among a group of clients to make sure they select
# TEST a proper master from amongst themselves, in various scenarios.
proc rep002 { method { niter 10 } { nclients 3 } { tnum "02" } args } {
source ./include.tcl
global elect_timeout
set elect_timeout 1000000
if { [is_record_based $method] == 1 } {
puts "Rep002: Skipping for method $method."
return
}
env_cleanup $testdir
set qdir $testdir/MSGQUEUEDIR
replsetup $qdir
set masterdir $testdir/MASTERDIR
file mkdir $masterdir
for { set i 0 } { $i < $nclients } { incr i } {
set clientdir($i) $testdir/CLIENTDIR.$i
file mkdir $clientdir($i)
}
puts "Rep0$tnum: Replication election test with $nclients clients."
# Open a master.
repladd 1
set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
$masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
set masterenv [eval $env_cmd(M)]
error_check_good master_env [is_valid_env $masterenv] TRUE
# Open the clients.
for { set i 0 } { $i < $nclients } { incr i } {
set envid [expr $i + 2]
repladd $envid
set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
-txn -rep_client -rep_transport \[list $envid replsend\]"
set clientenv($i) [eval $env_cmd($i)]
error_check_good \
client_env($i) [is_valid_env $clientenv($i)] TRUE
}
# Run a modified test001 in the master.
puts "\tRep0$tnum.a: Running test001 in replicated env."
eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
# Loop, processing first the master's messages, then the client's,
# until both queues are empty.
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
for { set i 0 } { $i < $nclients } { incr i } {
set envid [expr $i + 2]
incr nproced [replprocessqueue $clientenv($i) $envid]
}
if { $nproced == 0 } {
break
}
}
# Verify the database in the client dir.
for { set i 0 } { $i < $nclients } { incr i } {
puts "\tRep0$tnum.b: Verifying contents of client database $i."
set testdir [get_home $masterenv]
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
test001.check dump_file_direction "-first" "-next"
if { [string compare [convert_method $method] -recno] != 0 } {
filesort $t1 $t3
}
error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
}
# Start an election in the first client.
puts "\tRep0$tnum.d: Starting election without dead master."
set elect_pipe(0) [start_election \
$qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
tclsleep 1
# We want to verify all the clients but the one that declared an
# election get the election message.
# We also want to verify that the master declares the election
# over by fiat, even if everyone uses a lower priority than 20.
# Loop and process all messages, keeping track of which
# sites got a HOLDELECTION and checking that the returned newmaster,
# if any, is 1 (the master's replication ID).
set got_hold_elect(M) 0
for { set i 0 } { $i < $nclients } { incr i } {
set got_hold_elect($i) 0
}
while { 1 } {
set nproced 0
set he 0
set nm 0
incr nproced [replprocessqueue $masterenv 1 0 he nm]
if { $he == 1 } {
set elect_pipe(M) [start_election $qdir \
$env_cmd(M) [expr $nclients + 1] 0 $elect_timeout]
set got_hold_elect(M) 1
}
if { $nm != 0 } {
error_check_good newmaster_is_master $nm 1
}
for { set i 0 } { $i < $nclients } { incr i } {
set he 0
set envid [expr $i + 2]
incr nproced \
[replprocessqueue $clientenv($i) $envid 0 he nm]
if { $he == 1 } {
# error_check_bad client(0)_in_elect $i 0
set elect_pipe(M) [start_election $qdir \
$env_cmd($i) [expr $nclients + 1] 0 \
$elect_timeout]
set got_hold_elect($i) 1
}
if { $nm != 0 } {
error_check_good newmaster_is_master $nm 1
}
}
if { $nproced == 0 } {
break
}
}
error_check_good got_hold_elect(master) $got_hold_elect(M) 0
unset got_hold_elect(M)
# error_check_good got_hold_elect(0) $got_hold_elect(0) 0
unset got_hold_elect(0)
for { set i 1 } { $i < $nclients } { incr i } {
error_check_good got_hold_elect($i) $got_hold_elect($i) 1
unset got_hold_elect($i)
}
cleanup_elections
# We need multiple clients to proceed from here.
if { $nclients < 2 } {
puts "\tRep0$tnum: Skipping for less than two clients."
error_check_good masterenv_close [$masterenv close] 0
for { set i 0 } { $i < $nclients } { incr i } {
error_check_good clientenv_close($i) \
[$clientenv($i) close] 0
}
return
}
# Make sure all the clients are synced up and ready to be good
# voting citizens.
error_check_good master_flush [$masterenv rep_flush] 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1 0]
for { set i 0 } { $i < $nclients } { incr i } {
incr nproced [replprocessqueue $clientenv($i) \
[expr $i + 2] 0]
}
if { $nproced == 0 } {
break
}
}
# Now hold another election in the first client, this time with
# a dead master.
puts "\tRep0$tnum.e: Starting election with dead master."
error_check_good masterenv_close [$masterenv close] 0
for { set i 0 } { $i < $nclients } { incr i } {
replclear [expr $i + 2]
}
set elect_pipe(0) [start_election \
$qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
tclsleep 1
# Process messages, and verify that the client with the highest
# priority--client #1--wins.
set got_newmaster 0
set tries 10
while { 1 } {
set nproced 0
set he 0
set nm 0
for { set i 0 } { $i < $nclients } { incr i } {
set he 0
set envid [expr $i + 2]
incr nproced \
[replprocessqueue $clientenv($i) $envid 0 he nm]
if { $he == 1 } {
# Client #1 has priority 100; everyone else
# has priority 10.
if { $i == 1 } {
set pri 100
} else {
set pri 10
}
# error_check_bad client(0)_in_elect $i 0
set elect_pipe(M) [start_election $qdir \
$env_cmd($i) [expr $nclients + 1] $pri \
$elect_timeout]
set got_hold_elect($i) 1
}
if { $nm != 0 } {
error_check_good newmaster_is_master $nm \
[expr 1 + 2]
set got_newmaster $nm
# If this env is the new master, it needs to
# configure itself as such--this is a different
# env handle from the one that performed the
# election.
if { $nm == $envid } {
error_check_good make_master($i) \
[$clientenv($i) rep_start -master] \
0
}
}
}
# We need to wait around to make doubly sure that the
# election has finished...
if { $nproced == 0 } {
incr tries -1
if { $tries == 0 } {
break
} else {
tclsleep 1
}
}
}
# Verify that client #1 is actually the winner.
error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
cleanup_elections
for { set i 0 } { $i < $nclients } { incr i } {
error_check_good clientenv_close($i) [$clientenv($i) close] 0
}
replclose $testdir/MSGQUEUEDIR
}
proc reptwo { args } { eval rep002 $args }

221
bdb/test/rep003.tcl Normal file
View file

@ -0,0 +1,221 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2002
# Sleepycat Software. All rights reserved.
#
# $Id: rep003.tcl,v 11.9 2002/08/09 02:23:50 margo Exp $
#
# TEST rep003
# TEST Repeated shutdown/restart replication test
# TEST
# TEST Run a quick put test in a replicated master environment; start up,
# TEST shut down, and restart client processes, with and without recovery.
# TEST To ensure that environment state is transient, use DB_PRIVATE.
proc rep003 { method { tnum "03" } args } {
source ./include.tcl
global testdir rep003_dbname rep003_omethod rep003_oargs
env_cleanup $testdir
set niter 10
set rep003_dbname rep003.db
if { [is_record_based $method] } {
puts "Rep0$tnum: Skipping for method $method"
return
}
set rep003_omethod [convert_method $method]
set rep003_oargs [convert_args $method $args]
replsetup $testdir/MSGQUEUEDIR
set masterdir $testdir/MASTERDIR
file mkdir $masterdir
set clientdir $testdir/CLIENTDIR
file mkdir $clientdir
puts "Rep0$tnum: Replication repeated-startup test"
# Open a master.
repladd 1
set masterenv [berkdb_env_noerr -create -log_max 1000000 \
-home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
error_check_good master_env [is_valid_env $masterenv] TRUE
puts "\tRep0$tnum.a: Simple client startup test."
# Put item one.
rep003_put $masterenv A1 a-one
# Open a client.
repladd 2
set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
-rep_client -rep_transport [list 2 replsend]]
error_check_good client_env [is_valid_env $clientenv] TRUE
# Put another quick item.
rep003_put $masterenv A2 a-two
# Loop, processing first the master's messages, then the client's,
# until both queues are empty.
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
rep003_check $clientenv A1 a-one
rep003_check $clientenv A2 a-two
error_check_good clientenv_close [$clientenv close] 0
replclear 2
# Now reopen the client after doing another put.
puts "\tRep0$tnum.b: Client restart."
rep003_put $masterenv B1 b-one
unset clientenv
set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
-rep_client -rep_transport [list 2 replsend]]
error_check_good client_env [is_valid_env $clientenv] TRUE
rep003_put $masterenv B2 b-two
# Loop, processing first the master's messages, then the client's,
# until both queues are empty.
while { 1 } {
set nproced 0
# The items from part A should be present at all times--
# if we roll them back, we've screwed up. [#5709]
rep003_check $clientenv A1 a-one
rep003_check $clientenv A2 a-two
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
rep003_check $clientenv B1 b-one
rep003_check $clientenv B2 b-two
error_check_good clientenv_close [$clientenv close] 0
replclear 2
# Now reopen the client after a recovery.
puts "\tRep0$tnum.c: Client restart after recovery."
rep003_put $masterenv C1 c-one
unset clientenv
set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
-recover -rep_client -rep_transport [list 2 replsend]]
error_check_good client_env [is_valid_env $clientenv] TRUE
rep003_put $masterenv C2 c-two
# Loop, processing first the master's messages, then the client's,
# until both queues are empty.
while { 1 } {
set nproced 0
# The items from part A should be present at all times--
# if we roll them back, we've screwed up. [#5709]
rep003_check $clientenv A1 a-one
rep003_check $clientenv A2 a-two
rep003_check $clientenv B1 b-one
rep003_check $clientenv B2 b-two
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
rep003_check $clientenv C1 c-one
rep003_check $clientenv C2 c-two
error_check_good clientenv_close [$clientenv close] 0
replclear 2
# Now reopen the client after a catastrophic recovery.
puts "\tRep0$tnum.d: Client restart after catastrophic recovery."
rep003_put $masterenv D1 d-one
unset clientenv
set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
-recover_fatal -rep_client -rep_transport [list 2 replsend]]
error_check_good client_env [is_valid_env $clientenv] TRUE
rep003_put $masterenv D2 d-two
# Loop, processing first the master's messages, then the client's,
# until both queues are empty.
while { 1 } {
set nproced 0
# The items from part A should be present at all times--
# if we roll them back, we've screwed up. [#5709]
rep003_check $clientenv A1 a-one
rep003_check $clientenv A2 a-two
rep003_check $clientenv B1 b-one
rep003_check $clientenv B2 b-two
rep003_check $clientenv C1 c-one
rep003_check $clientenv C2 c-two
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $clientenv 2]
if { $nproced == 0 } {
break
}
}
rep003_check $clientenv D1 d-one
rep003_check $clientenv D2 d-two
error_check_good clientenv_close [$clientenv close] 0
error_check_good masterenv_close [$masterenv close] 0
replclose $testdir/MSGQUEUEDIR
}
proc rep003_put { masterenv key data } {
global rep003_dbname rep003_omethod rep003_oargs
set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
$rep003_omethod $rep003_oargs $rep003_dbname]
error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
set txn [$masterenv txn]
error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
error_check_good rep3_put_close($key,$data) [$db close] 0
}
proc rep003_check { env key data } {
global rep003_dbname
set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
set dbt [$db get $key]
error_check_good rep3_check($key,$data) \
[lindex [lindex $dbt 0] 1] $data
error_check_good rep3_put_close($key,$data) [$db close] 0
}

198
bdb/test/rep004.tcl Normal file
View file

@ -0,0 +1,198 @@
#
# Copyright (c) 2002
# Sleepycat Software. All rights reserved.
#
# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $
#
# TEST rep004
# TEST Test of DB_REP_LOGSONLY.
# TEST
# TEST Run a quick put test in a master environment that has one logs-only
# TEST client. Shut down, then run catastrophic recovery in the logs-only
# TEST client and check that the database is present and populated.
proc rep004 { method { nitems 10 } { tnum "04" } args } {
source ./include.tcl
global testdir
env_cleanup $testdir
set dbname rep0$tnum.db
set omethod [convert_method $method]
set oargs [convert_args $method $args]
puts "Rep0$tnum: Test of logs-only replication clients"
replsetup $testdir/MSGQUEUEDIR
set masterdir $testdir/MASTERDIR
file mkdir $masterdir
set clientdir $testdir/CLIENTDIR
file mkdir $clientdir
set logsonlydir $testdir/LOGSONLYDIR
file mkdir $logsonlydir
# Open a master, a logsonly replica, and a normal client.
repladd 1
set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
-rep_transport [list 1 replsend]]
error_check_good master_env [is_valid_env $masterenv] TRUE
repladd 2
set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
-rep_transport [list 2 replsend]]
error_check_good logsonly_env [is_valid_env $loenv] TRUE
repladd 3
set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
-rep_transport [list 3 replsend]]
error_check_good client_env [is_valid_env $clientenv] TRUE
puts "\tRep0$tnum.a: Populate database."
set db [eval {berkdb open -create -mode 0644 -auto_commit} \
-env $masterenv $oargs $omethod $dbname]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
set count 0
while { [gets $did str] != -1 && $count < $nitems } {
if { [is_record_based $method] == 1 } {
set key [expr $count + 1]
set data $str
} else {
set key $str
set data [reverse $str]
}
set kvals($count) $key
set dvals($count) [pad_data $method $data]
set txn [$masterenv txn]
error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
set ret [eval \
{$db put} -txn $txn {$key [chop_data $method $data]}]
error_check_good put($count) $ret 0
error_check_good commit($count) [$txn commit] 0
incr count
}
puts "\tRep0$tnum.b: Sync up clients."
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $loenv 2]
incr nproced [replprocessqueue $clientenv 3]
if { $nproced == 0 } {
break
}
}
puts "\tRep0$tnum.c: Get master and logs-only client ahead."
set newcount 0
while { [gets $did str] != -1 && $newcount < $nitems } {
if { [is_record_based $method] == 1 } {
set key [expr $count + 1]
set data $str
} else {
set key $str
set data [reverse $str]
}
set kvals($count) $key
set dvals($count) [pad_data $method $data]
set txn [$masterenv txn]
error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
set ret [eval \
{$db put} -txn $txn {$key [chop_data $method $data]}]
error_check_good put($count) $ret 0
error_check_good commit($count) [$txn commit] 0
incr count
incr newcount
}
error_check_good db_close [$db close] 0
puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
incr nproced [replprocessqueue $loenv 2]
if { $nproced == 0 } {
break
}
}
# "Crash" the master, and fail over to the upgradeable client.
error_check_good masterenv_close [$masterenv close] 0
replclear 3
error_check_good upgrade_client [$clientenv rep_start -master] 0
set donenow 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $clientenv 3]
incr nproced [replprocessqueue $loenv 2]
if { $nproced == 0 } {
break
}
}
error_check_good loenv_close [$loenv close] 0
puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
puts "\tRep0$tnum.f: Verify logs-only client contents."
set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
set loc [$lodb cursor]
set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
set cc [$cdb cursor]
# Make sure new master and recovered logs-only replica match.
for { set cdbt [$cc get -first] } \
{ [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
set lodbt [$loc get -next]
error_check_good newmaster_replica_match $cdbt $lodbt
}
# Reset new master cursor.
error_check_good cc_close [$cc close] 0
set cc [$cdb cursor]
for { set lodbt [$loc get -first] } \
{ [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
set cdbt [$cc get -next]
error_check_good replica_newmaster_match $lodbt $cdbt
}
error_check_good loc_close [$loc close] 0
error_check_good lodb_close [$lodb close] 0
error_check_good loenv_close [$loenv close] 0
error_check_good cc_close [$cc close] 0
error_check_good cdb_close [$cdb close] 0
error_check_good clientenv_close [$clientenv close] 0
close $did
replclose $testdir/MSGQUEUEDIR
}

225
bdb/test/rep005.tcl Normal file
View file

@ -0,0 +1,225 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2002
# Sleepycat Software. All rights reserved.
#
# $Id: rep005.tcl,v 11.3 2002/08/08 18:13:13 sue Exp $
#
# TEST rep005
# TEST Replication election test with error handling.
# TEST
# TEST Run a modified version of test001 in a replicated master environment;
# TEST hold an election among a group of clients to make sure they select
# TEST a proper master from amongst themselves, forcing errors at various
# TEST locations in the election path.
proc rep005 { method { niter 10 } { tnum "05" } args } {
source ./include.tcl
if { [is_record_based $method] == 1 } {
puts "Rep005: Skipping for method $method."
return
}
set nclients 3
env_cleanup $testdir
set qdir $testdir/MSGQUEUEDIR
replsetup $qdir
set masterdir $testdir/MASTERDIR
file mkdir $masterdir
for { set i 0 } { $i < $nclients } { incr i } {
set clientdir($i) $testdir/CLIENTDIR.$i
file mkdir $clientdir($i)
}
puts "Rep0$tnum: Replication election test with $nclients clients."
# Open a master.
repladd 1
set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
$masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
set masterenv [eval $env_cmd(M)]
error_check_good master_env [is_valid_env $masterenv] TRUE
# Open the clients.
for { set i 0 } { $i < $nclients } { incr i } {
set envid [expr $i + 2]
repladd $envid
set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
-txn -rep_client -rep_transport \[list $envid replsend\]"
set clientenv($i) [eval $env_cmd($i)]
error_check_good \
client_env($i) [is_valid_env $clientenv($i)] TRUE
}
# Run a modified test001 in the master.
puts "\tRep0$tnum.a: Running test001 in replicated env."
eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
# Loop, processing first the master's messages, then the client's,
# until both queues are empty.
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1]
for { set i 0 } { $i < $nclients } { incr i } {
set envid [expr $i + 2]
incr nproced [replprocessqueue $clientenv($i) $envid]
}
if { $nproced == 0 } {
break
}
}
# Verify the database in the client dir.
for { set i 0 } { $i < $nclients } { incr i } {
puts "\tRep0$tnum.b: Verifying contents of client database $i."
set testdir [get_home $masterenv]
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
test001.check dump_file_direction "-first" "-next"
if { [string compare [convert_method $method] -recno] != 0 } {
filesort $t1 $t3
}
error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
}
# Make sure all the clients are synced up and ready to be good
# voting citizens.
error_check_good master_flush [$masterenv rep_flush] 0
while { 1 } {
set nproced 0
incr nproced [replprocessqueue $masterenv 1 0]
for { set i 0 } { $i < $nclients } { incr i } {
incr nproced [replprocessqueue $clientenv($i) \
[expr $i + 2] 0]
}
if { $nproced == 0 } {
break
}
}
error_check_good masterenv_close [$masterenv close] 0
for { set i 0 } { $i < $nclients } { incr i } {
replclear [expr $i + 2]
}
#
# We set up the error list for each client. We know that the
# first client is the one calling the election, therefore, add
# the error location on sending the message (electsend) for that one.
set m "Rep0$tnum"
set count 0
foreach c0 { electinit electsend electvote1 electwait1 electvote2 \
electwait2 } {
foreach c1 { electinit electvote1 electwait1 electvote2 \
electwait2 } {
foreach c2 { electinit electvote1 electwait1 \
electvote2 electwait2 } {
set elist [list $c0 $c1 $c2]
rep005_elect env_cmd clientenv $qdir $m \
$count $elist
incr count
}
}
}
for { set i 0 } { $i < $nclients } { incr i } {
error_check_good clientenv_close($i) [$clientenv($i) close] 0
}
replclose $testdir/MSGQUEUEDIR
}
proc rep005_elect { ecmd cenv qdir msg count elist } {
global elect_timeout
upvar $ecmd env_cmd
upvar $cenv clientenv
set elect_timeout 1000000
set nclients [llength $elist]
for { set i 0 } { $i < $nclients } { incr i } {
set err_cmd($i) [lindex $elist $i]
}
puts "\t$msg.d.$count: Starting election with errors $elist"
set elect_pipe(0) [start_election $qdir $env_cmd(0) \
[expr $nclients + 1] 20 $elect_timeout $err_cmd(0)]
tclsleep 1
# Process messages, and verify that the client with the highest
# priority--client #1--wins.
set got_newmaster 0
set tries 10
while { 1 } {
set nproced 0
set he 0
set nm 0
for { set i 0 } { $i < $nclients } { incr i } {
set he 0
set envid [expr $i + 2]
# puts "Processing queue for client $i"
incr nproced \
[replprocessqueue $clientenv($i) $envid 0 he nm]
if { $he == 1 } {
# Client #1 has priority 100; everyone else
if { $i == 1 } {
set pri 100
} else {
set pri 10
}
# error_check_bad client(0)_in_elect $i 0
# puts "Starting election on client $i"
set elect_pipe($i) [start_election $qdir \
$env_cmd($i) [expr $nclients + 1] $pri \
$elect_timeout $err_cmd($i)]
set got_hold_elect($i) 1
}
if { $nm != 0 } {
error_check_good newmaster_is_master $nm \
[expr 1 + 2]
set got_newmaster $nm
# If this env is the new master, it needs to
# configure itself as such--this is a different
# env handle from the one that performed the
# election.
if { $nm == $envid } {
error_check_good make_master($i) \
[$clientenv($i) rep_start -master] \
0
}
}
}
# We need to wait around to make doubly sure that the
# election has finished...
if { $nproced == 0 } {
incr tries -1
if { $tries == 0 } {
break
} else {
tclsleep 1
}
}
}
# Verify that client #1 is actually the winner.
error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
cleanup_elections
}

659
bdb/test/reputils.tcl Normal file
View file

@ -0,0 +1,659 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2001-2002
# Sleepycat Software. All rights reserved.
#
# $Id: reputils.tcl,v 11.34 2002/08/12 17:54:18 sandstro Exp $
#
# Replication testing utilities
# Environment handle for the env containing the replication "communications
# structure" (really a CDB environment).
# The test environment consists of a queue and a # directory (environment)
# per replication site. The queue is used to hold messages destined for a
# particular site and the directory will contain the environment for the
# site. So the environment looks like:
# $testdir
# ___________|______________________________
# / | \ \
# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
# | | ... |
# 1 2 .. N+1
#
# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
# queues 2 - N+1.
#
# The globals repenv(1-N) contain the environment handles for the sites
# with a given id (i.e., repenv(1) is the master's environment.
global queueenv
# Array of DB handles, one per machine ID, for the databases that contain
# messages.
global queuedbs
global machids
global elect_timeout
set elect_timeout 50000000
set drop 0
# Create the directory structure for replication testing.
# Open the master and client environments; store these in the global repenv
# Return the master's environment: "-env masterenv"
#
proc repl_envsetup { envargs largs tnum {nclients 1} {droppct 0} { oob 0 } } {
source ./include.tcl
global clientdir
global drop drop_msg
global masterdir
global repenv
global testdir
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
set masterdir $testdir/MASTERDIR
file mkdir $masterdir
if { $droppct != 0 } {
set drop 1
set drop_msg [expr 100 / $droppct]
} else {
set drop 0
}
for { set i 0 } { $i < $nclients } { incr i } {
set clientdir($i) $testdir/CLIENTDIR.$i
file mkdir $clientdir($i)
}
# Open a master.
repladd 1
#
# Set log smaller than default to force changing files,
# but big enough so that the tests that use binary files
# as keys/data can run.
#
set lmax [expr 3 * 1024 * 1024]
set masterenv [eval {berkdb_env -create -log_max $lmax} $envargs \
{-home $masterdir -txn -rep_master -rep_transport \
[list 1 replsend]}]
error_check_good master_env [is_valid_env $masterenv] TRUE
set repenv(master) $masterenv
# Open clients
for { set i 0 } { $i < $nclients } { incr i } {
set envid [expr $i + 2]
repladd $envid
set clientenv [eval {berkdb_env -create} $envargs -txn \
{-cachesize { 0 10000000 0 }} -lock_max 10000 \
{-home $clientdir($i) -rep_client -rep_transport \
[list $envid replsend]}]
error_check_good client_env [is_valid_env $clientenv] TRUE
set repenv($i) $clientenv
}
set repenv($i) NULL
append largs " -env $masterenv "
# Process startup messages
repl_envprocq $tnum $nclients $oob
return $largs
}
# Process all incoming messages. Iterate until there are no messages left
# in anyone's queue so that we capture all message exchanges. We verify that
# the requested number of clients matches the number of client environments
# we have. The oob parameter indicates if we should process the queue
# with out-of-order delivery. The replprocess procedure actually does
# the real work of processing the queue -- this routine simply iterates
# over the various queues and does the initial setup.
proc repl_envprocq { tnum { nclients 1 } { oob 0 }} {
global repenv
global drop
set masterenv $repenv(master)
for { set i 0 } { 1 } { incr i } {
if { $repenv($i) == "NULL"} {
break
}
}
error_check_good i_nclients $nclients $i
set name [format "Repl%03d" $tnum]
berkdb debug_check
puts -nonewline "\t$name: Processing master/$i client queues"
set rand_skip 0
if { $oob } {
puts " out-of-order"
} else {
puts " in order"
}
set do_check 1
set droprestore $drop
while { 1 } {
set nproced 0
if { $oob } {
set rand_skip [berkdb random_int 2 10]
}
incr nproced [replprocessqueue $masterenv 1 $rand_skip]
for { set i 0 } { $i < $nclients } { incr i } {
set envid [expr $i + 2]
if { $oob } {
set rand_skip [berkdb random_int 2 10]
}
set n [replprocessqueue $repenv($i) \
$envid $rand_skip]
incr nproced $n
}
if { $nproced == 0 } {
# Now that we delay requesting records until
# we've had a few records go by, we should always
# see that the number of requests is lower than the
# number of messages that were enqueued.
for { set i 0 } { $i < $nclients } { incr i } {
set clientenv $repenv($i)
set stats [$clientenv rep_stat]
set queued [getstats $stats \
{Total log records queued}]
error_check_bad queued_stats \
$queued -1
set requested [getstats $stats \
{Log records requested}]
error_check_bad requested_stats \
$requested -1
if { $queued != 0 && $do_check != 0 } {
error_check_good num_requested \
[expr $requested < $queued] 1
}
$clientenv rep_request 1 1
}
# If we were dropping messages, we might need
# to flush the log so that we get everything
# and end up in the right state.
if { $drop != 0 } {
set drop 0
set do_check 0
$masterenv rep_flush
berkdb debug_check
puts "\t$name: Flushing Master"
} else {
break
}
}
}
# Reset the clients back to the default state in case we
# have more processing to do.
for { set i 0 } { $i < $nclients } { incr i } {
set clientenv $repenv($i)
$clientenv rep_request 4 128
}
set drop $droprestore
}
# Verify that the directories in the master are exactly replicated in
# each of the client environments.
proc repl_envver0 { tnum method { nclients 1 } } {
global clientdir
global masterdir
global repenv
# Verify the database in the client dir.
# First dump the master.
set t1 $masterdir/t1
set t2 $masterdir/t2
set t3 $masterdir/t3
set omethod [convert_method $method]
set name [format "Repl%03d" $tnum]
#
# We are interested in the keys of whatever databases are present
# in the master environment, so we just call a no-op check function
# since we have no idea what the contents of this database really is.
# We just need to walk the master and the clients and make sure they
# have the same contents.
#
set cwd [pwd]
cd $masterdir
set stat [catch {glob test*.db} dbs]
cd $cwd
if { $stat == 1 } {
return
}
foreach testfile $dbs {
open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
repl_noop dump_file_direction "-first" "-next"
if { [string compare [convert_method $method] -recno] != 0 } {
filesort $t2 $t3
file rename -force $t3 $t2
}
for { set i 0 } { $i < $nclients } { incr i } {
puts "\t$name: Verifying client $i database \
$testfile contents."
open_and_dump_file $testfile $repenv($i) \
$t1 repl_noop dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
filesort $t1 $t3
} else {
catch {file copy -force $t1 $t3} ret
}
error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
}
}
}
# Remove all the elements from the master and verify that these
# deletions properly propagated to the clients.
proc repl_verdel { tnum method { nclients 1 } } {
global clientdir
global masterdir
global repenv
# Delete all items in the master.
set name [format "Repl%03d" $tnum]
set cwd [pwd]
cd $masterdir
set stat [catch {glob test*.db} dbs]
cd $cwd
if { $stat == 1 } {
return
}
foreach testfile $dbs {
puts "\t$name: Deleting all items from the master."
set txn [$repenv(master) txn]
error_check_good txn_begin [is_valid_txn $txn \
$repenv(master)] TRUE
set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
error_check_good reopen_master [is_valid_db $db] TRUE
set dbc [$db cursor -txn $txn]
error_check_good reopen_master_cursor \
[is_valid_cursor $dbc $db] TRUE
for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
{ set dbt [$dbc get -next] } {
error_check_good del_item [$dbc del] 0
}
error_check_good dbc_close [$dbc close] 0
error_check_good txn_commit [$txn commit] 0
error_check_good db_close [$db close] 0
repl_envprocq $tnum $nclients
# Check clients.
for { set i 0 } { $i < $nclients } { incr i } {
puts "\t$name: Verifying emptiness of client database $i."
set db [berkdb_open -env $repenv($i) $testfile]
error_check_good reopen_client($i) \
[is_valid_db $db] TRUE
set dbc [$db cursor]
error_check_good reopen_client_cursor($i) \
[is_valid_cursor $dbc $db] TRUE
error_check_good client($i)_empty \
[llength [$dbc get -first]] 0
error_check_good dbc_close [$dbc close] 0
error_check_good db_close [$db close] 0
}
}
}
# Replication "check" function for the dump procs that expect to
# be able to verify the keys and data.
proc repl_noop { k d } {
return
}
# Close all the master and client environments in a replication test directory.
proc repl_envclose { tnum envargs } {
source ./include.tcl
global clientdir
global encrypt
global masterdir
global repenv
global testdir
if { [lsearch $envargs "-encrypta*"] !=-1 } {
set encrypt 1
}
# In order to make sure that we have fully-synced and ready-to-verify
# databases on all the clients, do a checkpoint on the master and
# process messages in order to flush all the clients.
set drop 0
set do_check 0
set name [format "Repl%03d" $tnum]
berkdb debug_check
puts "\t$name: Checkpointing master."
error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
# Count clients.
for { set ncli 0 } { 1 } { incr ncli } {
if { $repenv($ncli) == "NULL" } {
break
}
}
repl_envprocq $tnum $ncli
error_check_good masterenv_close [$repenv(master) close] 0
verify_dir $masterdir "\t$name: " 0 0 1
for { set i 0 } { $i < $ncli } { incr i } {
error_check_good client($i)_close [$repenv($i) close] 0
verify_dir $clientdir($i) "\t$name: " 0 0 1
}
replclose $testdir/MSGQUEUEDIR
}
# Close up a replication group
proc replclose { queuedir } {
global queueenv queuedbs machids
foreach m $machids {
set db $queuedbs($m)
error_check_good dbr_close [$db close] 0
}
error_check_good qenv_close [$queueenv close] 0
set machids {}
}
# Create a replication group for testing.
proc replsetup { queuedir } {
global queueenv queuedbs machids
file mkdir $queuedir
set queueenv \
[berkdb_env -create -txn -lock_max 20000 -home $queuedir]
error_check_good queueenv [is_valid_env $queueenv] TRUE
if { [info exists queuedbs] } {
unset queuedbs
}
set machids {}
return $queueenv
}
# Send function for replication.
proc replsend { control rec fromid toid } {
global queuedbs queueenv machids
global drop drop_msg
#
# If we are testing with dropped messages, then we drop every
# $drop_msg time. If we do that just return 0 and don't do
# anything.
#
if { $drop != 0 } {
incr drop
if { $drop == $drop_msg } {
set drop 1
return 0
}
}
# XXX
# -1 is DB_BROADCAST_MID
if { $toid == -1 } {
set machlist $machids
} else {
if { [info exists queuedbs($toid)] != 1 } {
error "replsend: machid $toid not found"
}
set machlist [list $toid]
}
foreach m $machlist {
# XXX should a broadcast include to "self"?
if { $m == $fromid } {
continue
}
set db $queuedbs($m)
set txn [$queueenv txn]
$db put -txn $txn -append [list $control $rec $fromid]
error_check_good replsend_commit [$txn commit] 0
}
return 0
}
# Nuke all the pending messages for a particular site.
proc replclear { machid } {
global queuedbs queueenv
if { [info exists queuedbs($machid)] != 1 } {
error "FAIL: replclear: machid $machid not found"
}
set db $queuedbs($machid)
set txn [$queueenv txn]
set dbc [$db cursor -txn $txn]
for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
{ set dbt [$dbc get -rmw -next] } {
error_check_good replclear($machid)_del [$dbc del] 0
}
error_check_good replclear($machid)_dbc_close [$dbc close] 0
error_check_good replclear($machid)_txn_commit [$txn commit] 0
}
# Add a machine to a replication environment.
proc repladd { machid } {
global queueenv queuedbs machids
if { [info exists queuedbs($machid)] == 1 } {
error "FAIL: repladd: machid $machid already exists"
}
set queuedbs($machid) [berkdb open -auto_commit \
-env $queueenv -create -recno -renumber repqueue$machid.db]
error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
lappend machids $machid
}
# Process a queue of messages, skipping every "skip_interval" entry.
# We traverse the entire queue, but since we skip some messages, we
# may end up leaving things in the queue, which should get picked up
# on a later run.
proc replprocessqueue { dbenv machid { skip_interval 0 } \
{ hold_electp NONE } { newmasterp NONE } } {
global queuedbs queueenv errorCode
# hold_electp is a call-by-reference variable which lets our caller
# know we need to hold an election.
if { [string compare $hold_electp NONE] != 0 } {
upvar $hold_electp hold_elect
}
set hold_elect 0
# newmasterp is the same idea, only returning the ID of a master
# given in a DB_REP_NEWMASTER return.
if { [string compare $newmasterp NONE] != 0 } {
upvar $newmasterp newmaster
}
set newmaster 0
set nproced 0
set txn [$queueenv txn]
set dbc [$queuedbs($machid) cursor -txn $txn]
error_check_good process_dbc($machid) \
[is_valid_cursor $dbc $queuedbs($machid)] TRUE
for { set dbt [$dbc get -first] } \
{ [llength $dbt] != 0 } \
{ set dbt [$dbc get -next] } {
set data [lindex [lindex $dbt 0] 1]
# If skip_interval is nonzero, we want to process messages
# out of order. We do this in a simple but slimy way--
# continue walking with the cursor without processing the
# message or deleting it from the queue, but do increment
# "nproced". The way this proc is normally used, the
# precise value of nproced doesn't matter--we just don't
# assume the queues are empty if it's nonzero. Thus,
# if we contrive to make sure it's nonzero, we'll always
# come back to records we've skipped on a later call
# to replprocessqueue. (If there really are no records,
# we'll never get here.)
#
# Skip every skip_interval'th record (and use a remainder other
# than zero so that we're guaranteed to really process at least
# one record on every call).
if { $skip_interval != 0 } {
if { $nproced % $skip_interval == 1 } {
incr nproced
continue
}
}
# We have to play an ugly cursor game here: we currently
# hold a lock on the page of messages, but rep_process_message
# might need to lock the page with a different cursor in
# order to send a response. So save our recno, close
# the cursor, and then reopen and reset the cursor.
set recno [lindex [lindex $dbt 0] 0]
error_check_good dbc_process_close [$dbc close] 0
error_check_good txn_commit [$txn commit] 0
set ret [catch {$dbenv rep_process_message \
[lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
set txn [$queueenv txn]
set dbc [$queuedbs($machid) cursor -txn $txn]
set dbt [$dbc get -set $recno]
if { $ret != 0 } {
if { [is_substr $res DB_REP_HOLDELECTION] } {
set hold_elect 1
} else {
error "FAIL:[timestamp]\
rep_process_message returned $res"
}
}
incr nproced
$dbc del
if { $ret == 0 && $res != 0 } {
if { [is_substr $res DB_REP_NEWSITE] } {
# NEWSITE; do nothing.
} else {
set newmaster $res
# Break as soon as we get a NEWMASTER message;
# our caller needs to handle it.
break
}
}
if { $hold_elect == 1 } {
# Break also on a HOLDELECTION, for the same reason.
break
}
}
error_check_good dbc_close [$dbc close] 0
error_check_good txn_commit [$txn commit] 0
# Return the number of messages processed.
return $nproced
}
set run_repl_flag "-run_repl"
proc extract_repl_args { args } {
global run_repl_flag
for { set arg [lindex $args [set i 0]] } \
{ [string length $arg] > 0 } \
{ set arg [lindex $args [incr i]] } {
if { [string compare $arg $run_repl_flag] == 0 } {
return [lindex $args [expr $i + 1]]
}
}
return ""
}
proc delete_repl_args { args } {
global run_repl_flag
set ret {}
for { set arg [lindex $args [set i 0]] } \
{ [string length $arg] > 0 } \
{ set arg [lindex $args [incr i]] } {
if { [string compare $arg $run_repl_flag] != 0 } {
lappend ret $arg
} else {
incr i
}
}
return $ret
}
global elect_serial
global elections_in_progress
set elect_serial 0
# Start an election in a sub-process.
proc start_election { qdir envstring nsites pri timeout {err "none"}} {
source ./include.tcl
global elect_serial elect_timeout elections_in_progress machids
incr elect_serial
set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
puts $t "source $test_path/test.tcl"
puts $t "replsetup $qdir"
foreach i $machids { puts $t "repladd $i" }
puts $t "set env_cmd \{$envstring\}"
puts $t "set dbenv \[eval \$env_cmd -errfile \
$testdir/ELECTION_ERRFILE.$elect_serial -errpfx FAIL: \]"
# puts "Start election err $err, env $envstring"
puts $t "\$dbenv test abort $err"
puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
$elect_timeout\} ret\]"
if { $err != "none" } {
puts $t "\$dbenv test abort none"
puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
$elect_timeout\} ret\]"
}
flush $t
set elections_in_progress($elect_serial) $t
return $elect_serial
}
proc close_election { i } {
global elections_in_progress
set t $elections_in_progress($i)
puts $t "\$dbenv close"
close $t
unset elections_in_progress($i)
}
proc cleanup_elections { } {
global elect_serial elections_in_progress
for { set i 0 } { $i <= $elect_serial } { incr i } {
if { [info exists elections_in_progress($i)] != 0 } {
close_election $i
}
}
set elect_serial 0
}

View file

@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rpc001.tcl,v 11.23 2001/01/02 20:04:56 sue Exp $
#
# Test RPC specifics, primarily that unsupported functions return
# errors and such.
# $Id: rpc001.tcl,v 11.33 2002/07/25 22:57:32 mjc Exp $
#
# TEST rpc001
# TEST Test RPC server timeouts for cursor, txn and env handles.
# TEST Test RPC specifics, primarily that unsupported functions return
# TEST errors and such.
proc rpc001 { } {
global __debug_on
global __debug_print
global errorInfo
global rpc_svc
source ./include.tcl
#
@ -21,10 +23,10 @@ proc rpc001 { } {
set itime 10
puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
if { [string compare $rpc_server "localhost"] == 0 } {
set dpid [exec $util_path/berkeley_db_svc \
set dpid [exec $util_path/$rpc_svc \
-h $rpc_testdir -t $ttime -I $itime &]
} else {
set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir -t $ttime -I $itime&]
}
puts "\tRpc001.a: Started server, pid $dpid"
@ -36,14 +38,14 @@ proc rpc001 { } {
set testfile "rpc001.db"
set home [file tail $rpc_testdir]
set env [eval {berkdb env -create -mode 0644 -home $home \
set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
error_check_good lock_env:open [is_valid_env $env] TRUE
puts "\tRpc001.c: Opening a database"
#
# NOTE: the type of database doesn't matter, just use btree.
set db [eval {berkdb_open -create -btree -mode 0644} \
set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@ -230,9 +232,10 @@ proc rpc001 { } {
#
# We need a 2nd env just to do an op to timeout the env.
# Make the flags different so we don't end up sharing a handle.
#
set env1 [eval {berkdb env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
set env1 [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000}]
error_check_good lock_env:open [is_valid_env $env1] TRUE
puts "\tRpc001.l: Timeout idle env handle"
@ -247,7 +250,7 @@ proc rpc001 { } {
error_check_good env_timeout \
[is_substr $errorInfo "DB_NOSERVER_ID"] 1
exec $KILL $dpid
tclkill $dpid
}
proc rpc_timeoutjoin {env msg sleeptime use_txn} {
@ -257,8 +260,10 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} {
puts -nonewline "\t$msg: Test join cursors and timeouts"
if { $use_txn } {
puts " (using txns)"
set txnflag "-auto_commit"
} else {
puts " (without txns)"
set txnflag ""
}
#
# Set up a simple set of join databases
@ -278,32 +283,32 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} {
{apple pie} {raspberry pie} {lemon pie}
}
set fdb [eval {berkdb_open -create -btree -mode 0644} \
-env $env -dup fruit.db]
$txnflag -env $env -dup fruit.db]
error_check_good dbopen [is_valid_db $fdb] TRUE
set pdb [eval {berkdb_open -create -btree -mode 0644} \
-env $env -dup price.db]
$txnflag -env $env -dup price.db]
error_check_good dbopen [is_valid_db $pdb] TRUE
set ddb [eval {berkdb_open -create -btree -mode 0644} \
-env $env -dup dessert.db]
$txnflag -env $env -dup dessert.db]
error_check_good dbopen [is_valid_db $ddb] TRUE
foreach kd $fruit {
set k [lindex $kd 0]
set d [lindex $kd 1]
set ret [$fdb put $k $d]
set ret [eval {$fdb put} $txnflag {$k $d}]
error_check_good fruit_put $ret 0
}
error_check_good sync [$fdb sync] 0
foreach kd $price {
set k [lindex $kd 0]
set d [lindex $kd 1]
set ret [$pdb put $k $d]
set ret [eval {$pdb put} $txnflag {$k $d}]
error_check_good price_put $ret 0
}
error_check_good sync [$pdb sync] 0
foreach kd $dessert {
set k [lindex $kd 0]
set d [lindex $kd 1]
set ret [$ddb put $k $d]
set ret [eval {$ddb put} $txnflag {$k $d}]
error_check_good dessert_put $ret 0
}
error_check_good sync [$ddb sync] 0
@ -326,7 +331,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
#
set curs_list {}
set txn_list {}
set msgnum [expr $op * 2 + 1]
set msgnum [expr $op * 2 + 1]
if { $use_txn } {
puts "\t$msg$msgnum: Set up txns and join cursor"
set txn [$env txn]
@ -346,7 +351,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
#
# Start a cursor, (using txn child0 in the fruit and price dbs, if
# needed). # Just pick something simple to join on.
# needed). # Just pick something simple to join on.
# Then call join on the dessert db.
#
set fkey yellow
@ -372,7 +377,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
set ret [$jdbc get]
error_check_bad jget [llength $ret] 0
set msgnum [expr $op * 2 + 2]
set msgnum [expr $op * 2 + 2]
if { $op == 1 } {
puts -nonewline "\t$msg$msgnum: Timeout all cursors"
if { $use_txn } {

View file

@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
# Sel the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rpc002.tcl,v 1.7 2000/10/27 13:23:56 sue Exp $
# $Id: rpc002.tcl,v 1.17 2002/07/16 20:53:03 bostic Exp $
#
# RPC Test 2
# Test invalid RPC functions and make sure we error them correctly
# TEST rpc002
# TEST Test invalid RPC functions and make sure we error them correctly
proc rpc002 { } {
global __debug_on
global __debug_print
global errorInfo
global rpc_svc
source ./include.tcl
set testfile "rpc002.db"
@ -20,9 +21,9 @@ proc rpc002 { } {
#
puts "Rpc002: Unsupported interface test"
if { [string compare $rpc_server "localhost"] == 0 } {
set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
} else {
set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir &]
}
puts "\tRpc002.a: Started server, pid $dpid"
@ -32,7 +33,7 @@ proc rpc002 { } {
puts "\tRpc002.b: Unsupported env options"
#
# Test each "pre-open" option for env's. These need to be
# tested on the 'berkdb env' line.
# tested on the 'berkdb_env' line.
#
set rlist {
{ "-data_dir $rpc_testdir" "Rpc002.b0"}
@ -50,8 +51,8 @@ proc rpc002 { } {
{ "-verbose {recovery on}" "Rpc002.b13"}
}
set e "berkdb env -create -mode 0644 -home $home -server $rpc_server \
-client_timeout 10000 -txn"
set e "berkdb_env_noerr -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn"
foreach pair $rlist {
set cmd [lindex $pair 0]
set msg [lindex $pair 1]
@ -60,7 +61,7 @@ proc rpc002 { } {
set stat [catch {eval $e $cmd} ret]
error_check_good $cmd $stat 1
error_check_good $cmd.err \
[is_substr $errorInfo "meaningless in RPC env"] 1
[is_substr $errorInfo "meaningless in an RPC env"] 1
}
#
@ -68,7 +69,7 @@ proc rpc002 { } {
# the rest)
#
puts "\tRpc002.c: Unsupported env related interfaces"
set env [eval {berkdb env -create -mode 0644 -home $home \
set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
error_check_good envopen [is_valid_env $env] TRUE
set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
@ -89,16 +90,14 @@ proc rpc002 { } {
{ " log_archive" "Rpc002.c5"}
{ " log_file {0 0}" "Rpc002.c6"}
{ " log_flush" "Rpc002.c7"}
{ " log_get -current" "Rpc002.c8"}
{ " log_register $db $testfile" "Rpc002.c9"}
{ " log_stat" "Rpc002.c10"}
{ " log_unregister $db" "Rpc002.c11"}
{ " mpool -create -pagesize 512" "Rpc002.c12"}
{ " mpool_stat" "Rpc002.c13"}
{ " mpool_sync {0 0}" "Rpc002.c14"}
{ " mpool_trickle 50" "Rpc002.c15"}
{ " txn_checkpoint -min 1" "Rpc002.c16"}
{ " txn_stat" "Rpc002.c17"}
{ " log_cursor" "Rpc002.c8"}
{ " log_stat" "Rpc002.c9"}
{ " mpool -create -pagesize 512" "Rpc002.c10"}
{ " mpool_stat" "Rpc002.c11"}
{ " mpool_sync {0 0}" "Rpc002.c12"}
{ " mpool_trickle 50" "Rpc002.c13"}
{ " txn_checkpoint -min 1" "Rpc002.c14"}
{ " txn_stat" "Rpc002.c15"}
}
foreach pair $rlist {
@ -109,7 +108,7 @@ proc rpc002 { } {
set stat [catch {eval $env $cmd} ret]
error_check_good $cmd $stat 1
error_check_good $cmd.err \
[is_substr $errorInfo "meaningless in RPC env"] 1
[is_substr $errorInfo "meaningless in an RPC env"] 1
}
error_check_good dbclose [$db close] 0
@ -128,7 +127,7 @@ proc rpc002 { } {
set stat [catch {eval $dbcmd} ret]
error_check_good dbopen_cache $stat 1
error_check_good dbopen_cache_err \
[is_substr $errorInfo "meaningless in RPC env"] 1
[is_substr $errorInfo "meaningless in an RPC env"] 1
puts "\tRpc002.d1: Try to upgrade a database"
#
@ -136,9 +135,9 @@ proc rpc002 { } {
set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
error_check_good dbupgrade $stat 1
error_check_good dbupgrade_err \
[is_substr $errorInfo "meaningless in RPC env"] 1
[is_substr $errorInfo "meaningless in an RPC env"] 1
error_check_good envclose [$env close] 0
exec $KILL $dpid
tclkill $dpid
}

166
bdb/test/rpc003.tcl Normal file
View file

@ -0,0 +1,166 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2001-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rpc003.tcl,v 11.9 2002/07/16 20:53:03 bostic Exp $
#
# Test RPC and secondary indices.
proc rpc003 { } {
source ./include.tcl
global dict nsecondaries
global rpc_svc
#
# First set up the files. Secondary indices only work readonly
# over RPC. So we need to create the databases first without
# RPC. Then run checking over RPC.
#
puts "Rpc003: Secondary indices over RPC"
if { [string compare $rpc_server "localhost"] != 0 } {
puts "Cannot run to non-local RPC server. Skipping."
return
}
cleanup $testdir NULL
puts "\tRpc003.a: Creating local secondary index databases"
# Primary method/args.
set pmethod btree
set pomethod [convert_method $pmethod]
set pargs ""
set methods {dbtree dbtree}
set argses [convert_argses $methods ""]
set omethods [convert_methods $methods]
set nentries 500
puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
set pname "primary003.db"
set snamebase "secondary003"
# Open an environment
# XXX if one is not supplied!
set env [berkdb_env -create -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
# Open the primary.
set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
error_check_good primary_open [is_valid_db $pdb] TRUE
# Open and associate the secondaries
set sdbs {}
for { set i 0 } { $i < [llength $omethods] } { incr i } {
set sdb [eval {berkdb_open -create -env} $env \
[lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
error_check_good second_open($i) [is_valid_db $sdb] TRUE
error_check_good db_associate($i) \
[$pdb associate [callback_n $i] $sdb] 0
lappend sdbs $sdb
}
set did [open $dict]
for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
if { [is_record_based $pmethod] == 1 } {
set key [expr $n + 1]
set datum $str
} else {
set key $str
gets $did datum
}
set keys($n) $key
set data($n) [pad_data $pmethod $datum]
set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
error_check_good put($n) $ret 0
}
close $did
foreach sdb $sdbs {
error_check_good secondary_close [$sdb close] 0
}
error_check_good primary_close [$pdb close] 0
error_check_good env_close [$env close] 0
#
# We have set up our databases, so now start the server and
# read them over RPC.
#
set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
puts "\tRpc003.c: Started server, pid $dpid"
tclsleep 2
set home [file tail $rpc_testdir]
set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
-server $rpc_server}]
error_check_good lock_env:open [is_valid_env $env] TRUE
#
# Attempt to send in a NULL callback to associate. It will fail
# if the primary and secondary are not both read-only.
#
set msg "\tRpc003.d"
puts "$msg: Using r/w primary and r/w secondary"
set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
set sopen "berkdb_open_noerr -create -env $env \
[lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
rpc003_assoc_err $popen $sopen $msg
set msg "\tRpc003.e"
puts "$msg: Using r/w primary and read-only secondary"
set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
set sopen "berkdb_open_noerr -env $env -rdonly \
[lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
rpc003_assoc_err $popen $sopen $msg
set msg "\tRpc003.f"
puts "$msg: Using read-only primary and r/w secondary"
set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
set sopen "berkdb_open_noerr -create -env $env \
[lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
rpc003_assoc_err $popen $sopen $msg
# Open and associate the secondaries
puts "\tRpc003.g: Checking secondaries, both read-only"
set pdb [eval {berkdb_open_noerr -env} $env \
-rdonly $pomethod $pargs $pname]
error_check_good primary_open2 [is_valid_db $pdb] TRUE
set sdbs {}
for { set i 0 } { $i < [llength $omethods] } { incr i } {
set sdb [eval {berkdb_open -env} $env -rdonly \
[lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
error_check_good second_open2($i) [is_valid_db $sdb] TRUE
error_check_good db_associate2($i) \
[eval {$pdb associate} "" $sdb] 0
lappend sdbs $sdb
}
check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
foreach sdb $sdbs {
error_check_good secondary_close [$sdb close] 0
}
error_check_good primary_close [$pdb close] 0
error_check_good env_close [$env close] 0
tclkill $dpid
}
proc rpc003_assoc_err { popen sopen msg } {
set pdb [eval $popen]
error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
puts "$msg.0: NULL callback"
set sdb [eval $sopen]
error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
set stat [catch {eval {$pdb associate} "" $sdb} ret]
error_check_good db_associate:rdonly $stat 1
error_check_good db_associate:inval [is_substr $ret invalid] 1
puts "$msg.1: non-NULL callback"
set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
error_check_good db_associate:callback $stat 1
error_check_good db_associate:rpc \
[is_substr $ret "not supported in RPC"] 1
error_check_good assoc_sclose [$sdb close] 0
error_check_good assoc_pclose [$pdb close] 0
}

76
bdb/test/rpc004.tcl Normal file
View file

@ -0,0 +1,76 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rpc004.tcl,v 11.6 2002/07/16 20:53:03 bostic Exp $
#
# TEST rpc004
# TEST Test RPC server and security
proc rpc004 { } {
global __debug_on
global __debug_print
global errorInfo
global passwd
global rpc_svc
source ./include.tcl
puts "Rpc004: RPC server + security"
cleanup $testdir NULL
if { [string compare $rpc_server "localhost"] == 0 } {
set dpid [exec $util_path/$rpc_svc \
-h $rpc_testdir -P $passwd &]
} else {
set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir -P $passwd &]
}
puts "\tRpc004.a: Started server, pid $dpid"
tclsleep 2
remote_cleanup $rpc_server $rpc_testdir $testdir
puts "\tRpc004.b: Creating environment"
set testfile "rpc004.db"
set testfile1 "rpc004a.db"
set home [file tail $rpc_testdir]
set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -encryptaes $passwd -txn}]
error_check_good lock_env:open [is_valid_env $env] TRUE
puts "\tRpc004.c: Opening a non-encrypted database"
#
# NOTE: the type of database doesn't matter, just use btree.
set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
puts "\tRpc004.d: Opening an encrypted database"
set db1 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env -encrypt $testfile1]
error_check_good dbopen [is_valid_db $db1] TRUE
set txn [$env txn]
error_check_good txn [is_valid_txn $txn $env] TRUE
puts "\tRpc004.e: Put/get on both databases"
set key "key"
set data "data"
set ret [$db put -txn $txn $key $data]
error_check_good db_put $ret 0
set ret [$db get -txn $txn $key]
error_check_good db_get $ret [list [list $key $data]]
set ret [$db1 put -txn $txn $key $data]
error_check_good db1_put $ret 0
set ret [$db1 get -txn $txn $key]
error_check_good db1_get $ret [list [list $key $data]]
error_check_good txn_commit [$txn commit] 0
error_check_good db_close [$db close] 0
error_check_good db1_close [$db1 close] 0
error_check_good env_close [$env close] 0
# Cleanup our environment because it's encrypted
remote_cleanup $rpc_server $rpc_testdir $testdir
tclkill $dpid
}

137
bdb/test/rpc005.tcl Normal file
View file

@ -0,0 +1,137 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rpc005.tcl,v 11.4 2002/07/16 20:53:03 bostic Exp $
#
# TEST rpc005
# TEST Test RPC server handle ID sharing
proc rpc005 { } {
global __debug_on
global __debug_print
global errorInfo
global rpc_svc
source ./include.tcl
puts "Rpc005: RPC server handle sharing"
if { [string compare $rpc_server "localhost"] == 0 } {
set dpid [exec $util_path/$rpc_svc \
-h $rpc_testdir &]
} else {
set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir &]
}
puts "\tRpc005.a: Started server, pid $dpid"
tclsleep 2
remote_cleanup $rpc_server $rpc_testdir $testdir
puts "\tRpc005.b: Creating environment"
set testfile "rpc005.db"
set testfile1 "rpc005a.db"
set subdb1 "subdb1"
set subdb2 "subdb2"
set home [file tail $rpc_testdir]
set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -txn}]
error_check_good lock_env:open [is_valid_env $env] TRUE
puts "\tRpc005.c: Compare identical and different configured envs"
set env_ident [eval {berkdb_env -home $home \
-server $rpc_server -txn}]
error_check_good lock_env:open [is_valid_env $env_ident] TRUE
set env_diff [eval {berkdb_env -home $home \
-server $rpc_server -txn nosync}]
error_check_good lock_env:open [is_valid_env $env_diff] TRUE
error_check_good ident:id [$env rpcid] [$env_ident rpcid]
error_check_bad diff:id [$env rpcid] [$env_diff rpcid]
error_check_good envclose [$env_diff close] 0
error_check_good envclose [$env_ident close] 0
puts "\tRpc005.d: Opening a database"
set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
puts "\tRpc005.e: Compare identical and different configured dbs"
set db_ident [eval {berkdb_open -btree} -env $env $testfile]
error_check_good dbopen [is_valid_db $db_ident] TRUE
set db_diff [eval {berkdb_open -btree} -env $env -rdonly $testfile]
error_check_good dbopen [is_valid_db $db_diff] TRUE
set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly $testfile]
error_check_good dbopen [is_valid_db $db_diff2] TRUE
error_check_good ident:id [$db rpcid] [$db_ident rpcid]
error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
error_check_good db_close [$db_ident close] 0
error_check_good db_close [$db_diff close] 0
error_check_good db_close [$db_diff2 close] 0
error_check_good db_close [$db close] 0
puts "\tRpc005.f: Compare with a database and subdatabases"
set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile1 $subdb1]
error_check_good dbopen [is_valid_db $db] TRUE
set dbid [$db rpcid]
set db2 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile1 $subdb2]
error_check_good dbopen [is_valid_db $db2] TRUE
set db2id [$db2 rpcid]
error_check_bad 2subdb:id $dbid $db2id
set db_ident [eval {berkdb_open -btree} -env $env $testfile1 $subdb1]
error_check_good dbopen [is_valid_db $db_ident] TRUE
set identid [$db_ident rpcid]
set db_ident2 [eval {berkdb_open -btree} -env $env $testfile1 $subdb2]
error_check_good dbopen [is_valid_db $db_ident2] TRUE
set ident2id [$db_ident2 rpcid]
set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
$testfile1 $subdb1]
error_check_good dbopen [is_valid_db $db_diff1] TRUE
set diff1id [$db_diff1 rpcid]
set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
$testfile1 $subdb2]
error_check_good dbopen [is_valid_db $db_diff2] TRUE
set diff2id [$db_diff2 rpcid]
set db_diff [eval {berkdb_open -unknown} -env $env -rdonly $testfile1]
error_check_good dbopen [is_valid_db $db_diff] TRUE
set diffid [$db_diff rpcid]
set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
$testfile1 $subdb2]
error_check_good dbopen [is_valid_db $db_diff2a] TRUE
set diff2aid [$db_diff2a rpcid]
error_check_good ident:id $dbid $identid
error_check_good ident2:id $db2id $ident2id
error_check_bad diff:id $dbid $diffid
error_check_bad diff2:id $db2id $diffid
error_check_bad diff3:id $diff2id $diffid
error_check_bad diff4:id $diff1id $diffid
error_check_good diff2a:id $diff2id $diff2aid
error_check_good db_close [$db_ident close] 0
error_check_good db_close [$db_ident2 close] 0
error_check_good db_close [$db_diff close] 0
error_check_good db_close [$db_diff1 close] 0
error_check_good db_close [$db_diff2 close] 0
error_check_good db_close [$db_diff2a close] 0
error_check_good db_close [$db2 close] 0
error_check_good db_close [$db close] 0
error_check_good env_close [$env close] 0
tclkill $dpid
}

View file

@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rsrc001.tcl,v 11.18 2001/01/18 06:41:03 krinsky Exp $
# $Id: rsrc001.tcl,v 11.23 2002/01/11 15:53:33 bostic Exp $
#
# Recno backing file test.
# Try different patterns of adding records and making sure that the
# corresponding file matches
# TEST rsrc001
# TEST Recno backing file test. Try different patterns of adding
# TEST records and making sure that the corresponding file matches.
proc rsrc001 { } {
source ./include.tcl
@ -47,7 +47,7 @@ proc rsrc001 { } {
# Now fill out the backing file and create the check file.
set oid1 [open $testdir/rsrc.txt a]
set oid2 [open $testdir/check.txt w]
# This one was already put into rsrc.txt.
puts $oid2 $rec1
@ -154,15 +154,15 @@ proc rsrc001 { } {
set rec "Last record with reopen"
puts $oid $rec
incr key
incr key
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno_with_reopen $ret 0
puts "\tRsrc001.g:\
Put several beyond end of file, after reopen."
Put several beyond end of file, after reopen with snapshot."
error_check_good db_close [$db close] 0
set db [eval {berkdb_open -create -mode 0644\
-recno -source $testdir/rsrc.txt} $testfile]
-snapshot -recno -source $testdir/rsrc.txt} $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set rec "Really really last record with reopen"
@ -171,7 +171,7 @@ proc rsrc001 { } {
puts $oid ""
puts $oid $rec
incr key
incr key
incr key
incr key
incr key
@ -179,8 +179,6 @@ proc rsrc001 { } {
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno_with_reopen $ret 0
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0

View file

@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999, 2000
# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rsrc002.tcl,v 11.11 2000/11/29 15:01:06 sue Exp $
# $Id: rsrc002.tcl,v 11.14 2002/01/11 15:53:33 bostic Exp $
#
# Recno backing file test #2: test of set_re_delim.
# Specify a backing file with colon-delimited records,
# and make sure they are correctly interpreted.
# TEST rsrc002
# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
# TEST file with colon-delimited records, and make sure they are correctly
# TEST interpreted.
proc rsrc002 { } {
source ./include.tcl

View file

@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1996, 1997, 1998, 1999, 2000
# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rsrc003.tcl,v 11.1 2000/11/29 18:28:49 sue Exp $
# $Id: rsrc003.tcl,v 11.5 2002/01/11 15:53:33 bostic Exp $
#
# Recno backing file test.
# Try different patterns of adding records and making sure that the
# corresponding file matches
# TEST rsrc003
# TEST Recno backing file test. Try different patterns of adding
# TEST records and making sure that the corresponding file matches.
proc rsrc003 { } {
source ./include.tcl
global fixed_len
@ -26,7 +26,7 @@ proc rsrc003 { } {
set bigrec3 [replicate "This is record 3 " 512]
set orig_fixed_len $fixed_len
set rlist {
set rlist {
{{$rec1 $rec2 $rec3} "small records" }
{{$bigrec1 $bigrec2 $bigrec3} "large records" }}
@ -65,26 +65,26 @@ proc rsrc003 { } {
puts \
"Rsrc003: Testing with disk-backed database with $msg."
}
puts -nonewline \
"\tRsrc003.a: Read file, rewrite last record;"
puts " write it out and diff"
set db [eval {berkdb_open -create -mode 0644 -recno \
-len $reclen -source $testdir/rsrc.txt} $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
# Read the last record; replace it (don't change it).
# Then close the file and diff the two files.
set txn ""
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor \
[is_valid_cursor $dbc $db] TRUE
set rec [$dbc get -last]
error_check_good get_last [llength [lindex $rec 0]] 2
set key [lindex [lindex $rec 0] 0]
set data [lindex [lindex $rec 0] 1]
# Get the last record from the text file
set oid [open $testdir/rsrc.txt]
set laststr ""
@ -95,17 +95,17 @@ proc rsrc003 { } {
close $oid
set data [sanitize_record $data]
error_check_good getlast $data $laststr
set ret [eval {$db put} $txn {$key $data}]
error_check_good replace_last $ret 0
error_check_good curs_close [$dbc close] 0
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
error_check_good \
diff1($testdir/rsrc.txt,$testdir/check.txt) \
[filecmp $testdir/rsrc.txt $testdir/check.txt] 0
puts -nonewline "\tRsrc003.b: "
puts "Append some records in tree and verify in file."
set oid [open $testdir/check.txt a]
@ -124,7 +124,7 @@ proc rsrc003 { } {
set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
error_check_good \
diff2($testdir/{rsrc.txt,check.txt}) $ret 0
puts "\tRsrc003.c: Append by record number"
set oid [open $testdir/check.txt a]
for {set i 1} {$i < 10} {incr i} {
@ -136,14 +136,14 @@ proc rsrc003 { } {
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno $ret 0
}
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
close $oid
set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
error_check_good \
diff3($testdir/{rsrc.txt,check.txt}) $ret 0
puts \
"\tRsrc003.d: Verify proper syncing of changes on close."
error_check_good Rsrc003:db_close [$db close] 0
@ -171,4 +171,3 @@ proc rsrc003 { } {
set fixed_len $orig_fixed_len
return
}

52
bdb/test/rsrc004.tcl Normal file
View file

@ -0,0 +1,52 @@
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2001-2002
# Sleepycat Software. All rights reserved.
#
# $Id: rsrc004.tcl,v 11.3 2002/01/11 15:53:33 bostic Exp $
#
# TEST rsrc004
# TEST Recno backing file test for EOF-terminated records.
proc rsrc004 { } {
source ./include.tcl
foreach isfixed { 0 1 } {
cleanup $testdir NULL
# Create the backing text file.
set oid1 [open $testdir/rsrc.txt w]
if { $isfixed == 1 } {
puts -nonewline $oid1 "record 1xxx"
puts -nonewline $oid1 "record 2xxx"
} else {
puts $oid1 "record 1xxx"
puts $oid1 "record 2xxx"
}
puts -nonewline $oid1 "record 3"
close $oid1
set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
if { $isfixed == 1 } {
append args " -len [string length "record 1xxx"]"
set match "record 3 "
puts "Rsrc004: EOF-terminated recs: fixed length"
} else {
puts "Rsrc004: EOF-terminated recs: variable length"
set match "record 3"
}
puts "\tRsrc004.a: Read file, verify correctness."
set db [eval berkdb_open $args "$testdir/rsrc004.db"]
error_check_good dbopen [is_valid_db $db] TRUE
# Read the last record
set dbc [eval {$db cursor} ""]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set rec [$dbc get -last]
error_check_good get_last $rec [list [list 3 $match]]
error_check_good dbc_close [$dbc close] 0
error_check_good db_close [$db close] 0
}
}

37
bdb/test/scr001/chk.code Normal file
View file

@ -0,0 +1,37 @@
#!/bin/sh -
#
# $Id: chk.code,v 1.10 2002/02/04 16:03:26 bostic Exp $
#
# Check to make sure that the code samples in the documents build.
d=../..
[ -d $d/docs_src ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
echo 'FAIL: unable to find or build libdb.a'
exit 1
}
for i in `find $d/docs_src -name '*.cs'`; do
echo " compiling $i"
sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
-e 's/m4_[a-z]*[(\[)]*//g' \
-e 's/(\[//g' \
-e '/argv/!s/])//g' \
-e 's/dnl//g' \
-e 's/__GT__/>/g' \
-e 's/__LB__/[/g' \
-e 's/__LT__/</g' \
-e 's/__RB__/]/g' < $i > t.c
if cc -Wall -Werror -I.. t.c ../libdb.a -o t; then
:
else
echo "FAIL: unable to compile $i"
exit 1
fi
done
exit 0

64
bdb/test/scr002/chk.def Normal file
View file

@ -0,0 +1,64 @@
#!/bin/sh -
#
# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $
#
# Check to make sure we haven't forgotten to add any interfaces
# to the Win32 libdb.def file.
d=../..
# Test must be run from the top-level directory, not from a test directory.
[ -f $d/LICENSE ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
f=$d/build_win32/libdb.def
t1=__1
t2=__2
exitv=0
sed '/; /d' $f |
egrep @ |
awk '{print $1}' |
sed -e '/db_xa_switch/d' \
-e '/^__/d' -e '/^;/d' |
sort > $t1
egrep __P $d/dbinc_auto/ext_prot.in |
sed '/^[a-z]/!d' |
awk '{print $2}' |
sed 's/^\*//' |
sed '/^__/d' | sort > $t2
if cmp -s $t1 $t2 ; then
:
else
echo "<<< libdb.def >>> DB include files"
diff $t1 $t2
echo "FAIL: missing items in libdb.def file."
exitv=1
fi
# Check to make sure we don't have any extras in the libdb.def file.
sed '/; /d' $f |
egrep @ |
awk '{print $1}' |
sed -e '/__db_global_values/d' > $t1
for i in `cat $t1`; do
if egrep $i $d/*/*.c > /dev/null; then
:
else
echo "$f: $i not found in DB sources"
fi
done > $t2
test -s $t2 && {
cat $t2
echo "FAIL: found unnecessary items in libdb.def file."
exitv=1
}
exit $exitv

View file

@ -0,0 +1,77 @@
#!/bin/sh -
#
# $Id: chk.define,v 1.21 2002/03/27 04:32:58 bostic Exp $
#
# Check to make sure that all #defines are actually used.
d=../..
[ -f $d/LICENSE ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
exitv=0
t1=__1
t2=__2
t3=__3
egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
sed -e '/db_185.in/d' -e '/xa.h/d' |
awk '{print $2}' |
sed -e '/^B_DELETE/d' \
-e '/^B_MAX/d' \
-e '/^CIRCLEQ_/d' \
-e '/^DB_BTREEOLDVER/d' \
-e '/^DB_HASHOLDVER/d' \
-e '/^DB_LOCKVERSION/d' \
-e '/^DB_MAX_PAGES/d' \
-e '/^DB_QAMOLDVER/d' \
-e '/^DB_TXNVERSION/d' \
-e '/^DB_UNUSED/d' \
-e '/^DEFINE_DB_CLASS/d' \
-e '/^HASH_UNUSED/d' \
-e '/^LIST_/d' \
-e '/^LOG_OP/d' \
-e '/^MINFILL/d' \
-e '/^MUTEX_FIELDS/d' \
-e '/^NCACHED2X/d' \
-e '/^NCACHED30/d' \
-e '/^PAIR_MASK/d' \
-e '/^P_16_COPY/d' \
-e '/^P_32_COPY/d' \
-e '/^P_32_SWAP/d' \
-e '/^P_TO_UINT16/d' \
-e '/^QPAGE_CHKSUM/d' \
-e '/^QPAGE_NORMAL/d' \
-e '/^QPAGE_SEC/d' \
-e '/^SH_CIRCLEQ_/d' \
-e '/^SH_LIST_/d' \
-e '/^SH_TAILQ_/d' \
-e '/^SIZEOF_PAGE/d' \
-e '/^TAILQ_/d' \
-e '/^WRAPPED_CLASS/d' \
-e '/^__BIT_TYPES_DEFINED__/d' \
-e '/^__DBC_INTERNAL/d' \
-e '/^i_/d' \
-e '/_H_/d' \
-e 's/(.*//' | sort > $t1
find $d -name '*.c' -o -name '*.cpp' > $t2
for i in `cat $t1`; do
if egrep -w $i `cat $t2` > /dev/null; then
:;
else
f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
echo "FAIL: $i: $f"
fi
done | sort -k 2 > $t3
test -s $t3 && {
cat $t3
echo "FAIL: found unused #defines"
exit 1
}
exit $exitv

View file

@ -0,0 +1,31 @@
#!/bin/sh -
#
# $Id: chk.javafiles,v 1.5 2002/01/30 19:50:52 bostic Exp $
#
# Check to make sure we haven't forgotten to add any Java files to the list
# of source files in the Makefile.
d=../..
[ -f $d/LICENSE ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
f=$d/dist/Makefile.in
j=$d/java/src/com/sleepycat
t1=__1
t2=__2
find $j/db/ $j/examples $d/rpc_server/java -name \*.java -print |
sed -e 's/^.*\///' | sort > $t1
tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
cmp $t1 $t2 > /dev/null || {
echo "<<< java source files >>> Makefile"
diff $t1 $t2
exit 1
}
exit 0

112
bdb/test/scr005/chk.nl Normal file
View file

@ -0,0 +1,112 @@
#!/bin/sh -
#
# $Id: chk.nl,v 1.6 2002/01/07 15:12:12 bostic Exp $
#
# Check to make sure that there are no trailing newlines in __db_err calls.
d=../..
[ -f $d/README ] || {
echo "FAIL: chk.nl can't find the source directory."
exit 1
}
cat << END_OF_CODE > t.c
#include <sys/types.h>
#include <errno.h>
#include <stdio.h>
int chk(FILE *, char *);
int
main(argc, argv)
int argc;
char *argv[];
{
FILE *fp;
int exitv;
for (exitv = 0; *++argv != NULL;) {
if ((fp = fopen(*argv, "r")) == NULL) {
fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
return (1);
}
if (chk(fp, *argv))
exitv = 1;
(void)fclose(fp);
}
return (exitv);
}
int
chk(fp, name)
FILE *fp;
char *name;
{
int ch, exitv, line, q;
exitv = 0;
for (ch = 'a', line = 1;;) {
if ((ch = getc(fp)) == EOF)
return (exitv);
if (ch == '\n') {
++line;
continue;
}
if (ch != '_') continue;
if ((ch = getc(fp)) != '_') continue;
if ((ch = getc(fp)) != 'd') continue;
if ((ch = getc(fp)) != 'b') continue;
if ((ch = getc(fp)) != '_') continue;
if ((ch = getc(fp)) != 'e') continue;
if ((ch = getc(fp)) != 'r') continue;
if ((ch = getc(fp)) != 'r') continue;
while ((ch = getc(fp)) != '"') {
if (ch == EOF)
return (exitv);
if (ch == '\n')
++line;
}
while ((ch = getc(fp)) != '"')
switch (ch) {
case EOF:
return (exitv);
case '\\n':
++line;
break;
case '.':
if ((ch = getc(fp)) != '"')
ungetc(ch, fp);
else {
fprintf(stderr,
"%s: <period> at line %d\n", name, line);
exitv = 1;
}
break;
case '\\\\':
if ((ch = getc(fp)) != 'n')
ungetc(ch, fp);
else if ((ch = getc(fp)) != '"')
ungetc(ch, fp);
else {
fprintf(stderr,
"%s: <newline> at line %d\n", name, line);
exitv = 1;
}
break;
}
}
return (exitv);
}
END_OF_CODE
cc t.c -o t
if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
:
else
echo "FAIL: found __db_err calls ending with periods/newlines."
exit 1
fi
exit 0

36
bdb/test/scr006/chk.offt Normal file
View file

@ -0,0 +1,36 @@
#!/bin/sh -
#
# $Id: chk.offt,v 1.9 2001/10/26 13:40:15 bostic Exp $
#
# Make sure that no off_t's have snuck into the release.
d=../..
[ -f $d/LICENSE ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
t=__1
egrep -w off_t $d/*/*.[ch] $d/*/*.in |
sed -e "/#undef off_t/d" \
-e "/mp_fopen.c:.*can't use off_t's here/d" \
-e "/mp_fopen.c:.*size or type off_t's or/d" \
-e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
-e "/mutex\/tm.c:/d" \
-e "/os_map.c:.*(off_t)0))/d" \
-e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
-e "/os_seek.c:.*off_t offset;/d" \
-e "/os_seek.c:.*offset = /d" \
-e "/test_perf\/perf_misc.c:/d" \
-e "/test_server\/dbs.c:/d" \
-e "/test_vxworks\/vx_mutex.c:/d" > $t
test -s $t && {
cat $t
echo "FAIL: found questionable off_t usage"
exit 1
}
exit 0

45
bdb/test/scr007/chk.proto Normal file
View file

@ -0,0 +1,45 @@
#!/bin/sh -
#
# $Id: chk.proto,v 1.8 2002/03/27 04:32:59 bostic Exp $
#
# Check to make sure that prototypes are actually needed.
d=../..
[ -f $d/LICENSE ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
t1=__1
t2=__2
t3=__3
egrep '__P' $d/dbinc_auto/*.h |
sed -e 's/[ ][ ]*__P.*//' \
-e 's/^.*[ *]//' \
-e '/__db_cprint/d' \
-e '/__db_lprint/d' \
-e '/__db_noop_log/d' \
-e '/__db_prnpage/d' \
-e '/__db_txnlist_print/d' \
-e '/__db_util_arg/d' \
-e '/__ham_func2/d' \
-e '/__ham_func3/d' \
-e '/_getpgnos/d' \
-e '/_print$/d' \
-e '/_read$/d' > $t1
find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
for i in `cat $t1`; do
c=$(egrep -low $i $(cat $t2) | wc -l)
echo "$i: $c"
done | egrep ' 1$' > $t3
test -s $t3 && {
cat $t3
echo "FAIL: found unnecessary prototypes."
exit 1
}
exit 0

Some files were not shown because too many files have changed in this diff Show more