diff --git a/BUILD/Makefile.am b/BUILD/Makefile.am index a5f3623c25e..667d7c396b3 100644 --- a/BUILD/Makefile.am +++ b/BUILD/Makefile.am @@ -38,7 +38,6 @@ EXTRA_DIST = FINISH.sh \ compile-pentium-debug-max \ compile-pentium-debug-max-no-embedded \ compile-pentium-debug-max-no-ndb \ - compile-pentium-debug-no-bdb \ compile-pentium-debug-openssl \ compile-pentium-debug-yassl \ compile-pentium-gcov \ diff --git a/BUILD/compile-dist b/BUILD/compile-dist index 15032d79d78..874500977b7 100755 --- a/BUILD/compile-dist +++ b/BUILD/compile-dist @@ -52,7 +52,6 @@ fi --with-csv-storage-engine \ --with-example-storage-engine \ --with-federated-storage-engine \ - --with-berkeley-db \ --with-innodb \ --with-ssl \ --enable-thread-safe-client \ diff --git a/BUILD/compile-pentium-debug-no-bdb b/BUILD/compile-pentium-debug-no-bdb deleted file mode 100755 index d7e70f868cc..00000000000 --- a/BUILD/compile-pentium-debug-no-bdb +++ /dev/null @@ -1,9 +0,0 @@ -#! /bin/sh - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_flags="$pentium_cflags $debug_cflags" -extra_configs="$pentium_configs $debug_configs --without-berkeley-db $static_link" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-solaris-sparc-purify b/BUILD/compile-solaris-sparc-purify index 29cf5671432..8c24b0db98c 100755 --- a/BUILD/compile-solaris-sparc-purify +++ b/BUILD/compile-solaris-sparc-purify @@ -37,7 +37,7 @@ gmake -k clean || true path=`dirname $0` . "$path/autorun.sh" -CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-berkeley-db --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS +CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS gmake -j 4 diff --git a/CMakeLists.txt b/CMakeLists.txt index 5edc33b9f5a..ec744e975c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,9 +131,6 @@ ADD_SUBDIRECTORY(client) IF(WITH_ARCHIVE_STORAGE_ENGINE) ADD_SUBDIRECTORY(storage/archive) ENDIF(WITH_ARCHIVE_STORAGE_ENGINE) -IF(WITH_BERKELEY_STORAGE_ENGINE) - ADD_SUBDIRECTORY(storage/bdb) -ENDIF(WITH_BERKELEY_STORAGE_ENGINE) IF(WITH_BLACKHOLE_STORAGE_ENGINE) ADD_SUBDIRECTORY(storage/blackhole) ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE) diff --git a/configure.in b/configure.in index 2eb21ad701b..4b1141006b5 100644 --- a/configure.in +++ b/configure.in @@ -32,7 +32,6 @@ sinclude(config/ac-macros/check_cpu.m4) sinclude(config/ac-macros/character_sets.m4) sinclude(config/ac-macros/compiler_flag.m4) sinclude(config/ac-macros/plugins.m4) -sinclude(config/ac-macros/ha_berkeley.m4) sinclude(config/ac-macros/ha_ndbcluster.m4) sinclude(config/ac-macros/large_file.m4) sinclude(config/ac-macros/misc.m4) @@ -2142,12 +2141,6 @@ MYSQL_CHECK_SSL # functions tested above #-------------------------------------------------------------------- -MYSQL_STORAGE_ENGINE(berkeley, berkeley-db, [BerkeleyDB Storage Engine], - [Transactional Tables using BerkeleyDB], [max,max-no-ndb]) -MYSQL_PLUGIN_DIRECTORY(berkeley,[storage/bdb]) -MYSQL_PLUGIN_STATIC(berkeley, [[\$(bdb_libs_with_path)]]) -MYSQL_PLUGIN_ACTIONS(berkeley, [MYSQL_SETUP_BERKELEY_DB]) - MYSQL_STORAGE_ENGINE(blackhole,,[Blackhole Storage Engine], [Basic Write-only Read-never tables], [max,max-no-ndb]) MYSQL_PLUGIN_DIRECTORY(blackhole, [storage/blackhole]) diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 210bad20024..804ab4ad3f2 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -16,7 +16,6 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include - ${CMAKE_SOURCE_DIR}/storage/bdb/build_win32 ${CMAKE_SOURCE_DIR}/zlib ) @@ -84,9 +83,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE) IF(WITH_INNOBASE_STORAGE_ENGINE) ADD_DEPENDENCIES(mysqlserver innobase) ENDIF(WITH_INNOBASE_STORAGE_ENGINE) -IF(WITH_BERKELEY_STORAGE_ENGINE) - ADD_DEPENDENCIES(mysqlserver bdb) -ENDIF(WITH_BERKELEY_STORAGE_ENGINE) ADD_LIBRARY(libmysqld MODULE cmake_dummy.c libmysqld.def) TARGET_LINK_LIBRARIES(libmysqld wsock32) diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am index befbd3fad3a..9e85c46575d 100644 --- a/libmysqld/Makefile.am +++ b/libmysqld/Makefile.am @@ -45,7 +45,7 @@ noinst_HEADERS = embedded_priv.h emb_qcache.h sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ - ha_innodb.cc ha_berkeley.cc ha_federated.cc ha_ndbcluster.cc \ + ha_innodb.cc ha_federated.cc ha_ndbcluster.cc \ ha_ndbcluster_binlog.cc ha_partition.cc \ handler.cc sql_handler.cc \ hostname.cc init.cc password.c \ @@ -96,10 +96,6 @@ yassl_inc_libs= $(top_srcdir)/extra/yassl/src/.libs/libyassl.a \ endif # Storage engine specific compilation options - -ha_berkeley.o: ha_berkeley.cc - $(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $< - ha_ndbcluster.o:ha_ndbcluster.cc $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< diff --git a/mysql-test/extra/binlog_tests/binlog.test b/mysql-test/extra/binlog_tests/binlog.test index 993b3fbf634..fe66647ec68 100644 --- a/mysql-test/extra/binlog_tests/binlog.test +++ b/mysql-test/extra/binlog_tests/binlog.test @@ -3,7 +3,6 @@ # -- source include/not_embedded.inc --- source include/have_bdb.inc -- source include/have_innodb.inc -- source include/have_debug.inc @@ -12,7 +11,7 @@ drop table if exists t1, t2; --enable_warnings reset master; -create table t1 (a int) engine=bdb; +create table t1 (a int) engine=innodb; create table t2 (a int) engine=innodb; begin; insert t1 values (5); diff --git a/mysql-test/include/have_bdb.inc b/mysql-test/include/have_bdb.inc deleted file mode 100644 index 3f7377e7515..00000000000 --- a/mysql-test/include/have_bdb.inc +++ /dev/null @@ -1,4 +0,0 @@ --- require r/have_bdb.require -disable_query_log; -show variables like "have_bdb"; -enable_query_log; diff --git a/mysql-test/install_test_db.sh b/mysql-test/install_test_db.sh index 9006957019a..d7c19438836 100644 --- a/mysql-test/install_test_db.sh +++ b/mysql-test/install_test_db.sh @@ -80,7 +80,7 @@ basedir=. EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/" fi -mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb --tmpdir=. $EXTRA_ARG" +mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --tmpdir=. $EXTRA_ARG" echo "running $mysqld_boot" if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 45accee56fb..0d8739a4613 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -2122,7 +2122,6 @@ sub install_db ($$) { mtr_add_arg($args, "--datadir=%s", $data_dir); mtr_add_arg($args, "--skip-innodb"); mtr_add_arg($args, "--skip-ndbcluster"); - mtr_add_arg($args, "--skip-bdb"); mtr_add_arg($args, "--tmpdir=."); if ( ! $opt_netware ) @@ -2215,7 +2214,6 @@ basedir = $path_my_basedir server_id = $server_id skip-stack-trace skip-innodb -skip-bdb skip-ndbcluster EOF ; @@ -2629,7 +2627,6 @@ sub mysqld_arguments ($$$$$) { if ( $opt_valgrind_mysqld ) { mtr_add_arg($args, "%s--skip-safemalloc", $prefix); - mtr_add_arg($args, "%s--skip-bdb", $prefix); } my $pidfile; diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index b74965b706e..f2200c4be07 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -536,8 +536,8 @@ while test $# -gt 0; do --valgrind | --valgrind-all) find_valgrind; VALGRIND=$FIND_VALGRIND - EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc --skip-bdb" - EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc --skip-bdb" + EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc" + EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc" SLEEP_TIME_AFTER_RESTART=10 SLEEP_TIME_FOR_DELETE=60 USE_RUNNING_SERVER=0 diff --git a/mysql-test/r/backup.result b/mysql-test/r/backup.result index a4d1b18fe61..b299940e332 100644 --- a/mysql-test/r/backup.result +++ b/mysql-test/r/backup.result @@ -6,26 +6,26 @@ Table Op Msg_type Msg_text test.t4 backup error Failed copying .frm file (errno: X) test.t4 backup status Operation failed Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/bogus/t4.frm' (Errcode: X) backup table t4 to '../tmp'; Table Op Msg_type Msg_text test.t4 backup status OK Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead backup table t4 to '../tmp'; Table Op Msg_type Msg_text test.t4 backup error Failed copying .frm file (errno: X) test.t4 backup status Operation failed Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/tmp/t4.frm' (Errcode: X) drop table t4; restore table t4 from '../tmp'; Table Op Msg_type Msg_text test.t4 restore status OK Warnings: -Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead select count(*) from t4; count(*) 0 @@ -35,19 +35,19 @@ backup table t1 to '../tmp'; Table Op Msg_type Msg_text test.t1 backup status OK Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead drop table t1; restore table t1 from '../bogus'; Table Op Msg_type Msg_text t1 restore error Failed copying .frm file Warnings: -Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead Error 29 File 'MYSQLTEST_VARDIR/bogus/t1.frm' not found (Errcode: X) restore table t1 from '../tmp'; Table Op Msg_type Msg_text test.t1 restore status OK Warnings: -Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead select n from t1; n 23 @@ -62,7 +62,7 @@ Table Op Msg_type Msg_text test.t2 backup status OK test.t3 backup status OK Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead drop table t1,t2,t3; restore table t1,t2,t3 from '../tmp'; Table Op Msg_type Msg_text @@ -70,7 +70,7 @@ test.t1 restore status OK test.t2 restore status OK test.t3 restore status OK Warnings: -Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead select n from t1; n 23 @@ -91,7 +91,7 @@ restore table t1 from '../tmp'; Table Op Msg_type Msg_text test.t1 restore status OK Warnings: -Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead rename table t1 to t5; lock tables t5 write; backup table t5 to '../tmp'; @@ -99,7 +99,7 @@ unlock tables; Table Op Msg_type Msg_text test.t5 backup status OK Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead drop table t5; DROP TABLE IF EXISTS `t+1`; CREATE TABLE `t+1` (c1 INT); @@ -108,13 +108,13 @@ BACKUP TABLE `t+1` TO '../tmp'; Table Op Msg_type Msg_text test.t+1 backup status OK Warnings: -Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead DROP TABLE `t+1`; RESTORE TABLE `t+1` FROM '../tmp'; Table Op Msg_type Msg_text test.t+1 restore status OK Warnings: -Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. +Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead SELECT * FROM `t+1`; c1 1 diff --git a/mysql-test/r/bdb-alter-table-1.result b/mysql-test/r/bdb-alter-table-1.result deleted file mode 100644 index 0401002f1f3..00000000000 --- a/mysql-test/r/bdb-alter-table-1.result +++ /dev/null @@ -1,11 +0,0 @@ -drop table if exists t1; -create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB; -insert into t1 values(1, 't1',4,9); -insert into t1 values(2, 'metatable',1,9); -insert into t1 values(3, 'metaindex',1,9 ); -select * from t1; -objid tablename oid test -1 t1 4 9 -2 metatable 1 9 -3 metaindex 1 9 -alter table t1 drop column test; diff --git a/mysql-test/r/bdb-alter-table-2.result b/mysql-test/r/bdb-alter-table-2.result deleted file mode 100644 index c23b5ba0031..00000000000 --- a/mysql-test/r/bdb-alter-table-2.result +++ /dev/null @@ -1,6 +0,0 @@ -select * from t1; -objid tablename oid -1 t1 4 -2 metatable 1 -3 metaindex 1 -drop table t1; diff --git a/mysql-test/r/bdb-crash.result b/mysql-test/r/bdb-crash.result deleted file mode 100644 index 51b15d08859..00000000000 --- a/mysql-test/r/bdb-crash.result +++ /dev/null @@ -1,39 +0,0 @@ -drop table if exists t1; -CREATE TABLE t1 ( -ChargeID int(10) unsigned NOT NULL auto_increment, -ServiceID int(10) unsigned DEFAULT '0' NOT NULL, -ChargeDate date DEFAULT '0000-00-00' NOT NULL, -ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL, -FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL, -ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL, -ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund') -DEFAULT 'New' NOT NULL, -ChargeAuthorizationMessage text, -ChargeComment text, -ChargeTimeStamp varchar(20), -PRIMARY KEY (ChargeID), -KEY ServiceID (ServiceID), -KEY ChargeDate (ChargeDate) -) engine=BDB; -BEGIN; -INSERT INTO t1 -VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now'); -COMMIT; -BEGIN; -UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE -ChargeID = 1; -COMMIT; -INSERT INTO t1 -VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now'); -select * from t1; -ChargeID ServiceID ChargeDate ChargeAmount FedTaxes ProvTaxes ChargeStatus ChargeAuthorizationMessage ChargeComment ChargeTimeStamp -1 1 2001-03-01 1.00 1.00 1.00 New blablabla NULL now -2 1 2001-03-01 1.00 1.00 1.00 New NULL NULL now -drop table t1; -create table t1 (a int) engine=bdb; -set autocommit=0; -insert into t1 values(1); -analyze table t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -drop table t1; diff --git a/mysql-test/r/bdb-deadlock.result b/mysql-test/r/bdb-deadlock.result deleted file mode 100644 index 9394c90ff00..00000000000 --- a/mysql-test/r/bdb-deadlock.result +++ /dev/null @@ -1,31 +0,0 @@ -drop table if exists t1,t2; -create table t1 (id integer, x integer) engine=BDB; -create table t2 (id integer, x integer) engine=BDB; -insert into t1 values(0, 0); -insert into t2 values(0, 0); -set autocommit=0; -update t1 set x = 1 where id = 0; -set autocommit=0; -update t2 set x = 1 where id = 0; -select x from t1 where id = 0; -select x from t2 where id = 0; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction -commit; -x -1 -commit; -select * from t1; -id x -0 1 -select * from t2; -id x -0 1 -commit; -select * from t1; -id x -0 1 -select * from t2; -id x -0 1 -commit; -drop table t1,t2; diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result deleted file mode 100644 index f2d9c5704bb..00000000000 --- a/mysql-test/r/bdb.result +++ /dev/null @@ -1,1984 +0,0 @@ -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8; -create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=bdb; -insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt'); -select id, code, name from t1 order by id; -id code name -1 1 Tim -2 1 Monty -3 2 David -4 2 Erik -5 3 Sasha -6 3 Jeremy -7 4 Matt -update ignore t1 set id = 8, name = 'Sinisa' where id < 3; -select id, code, name from t1 order by id; -id code name -2 1 Monty -3 2 David -4 2 Erik -5 3 Sasha -6 3 Jeremy -7 4 Matt -8 1 Sinisa -update ignore t1 set id = id + 10, name = 'Ralph' where id < 4; -select id, code, name from t1 order by id; -id code name -3 2 David -4 2 Erik -5 3 Sasha -6 3 Jeremy -7 4 Matt -8 1 Sinisa -12 1 Ralph -drop table t1; -CREATE TABLE t1 ( -id int(11) NOT NULL auto_increment, -parent_id int(11) DEFAULT '0' NOT NULL, -level tinyint(4) DEFAULT '0' NOT NULL, -PRIMARY KEY (id), -KEY parent_id (parent_id), -KEY level (level) -) engine=bdb; -INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1),(179,5,2); -update t1 set parent_id=parent_id+100; -select * from t1 where parent_id=102; -id parent_id level -8 102 2 -9 102 2 -15 102 2 -update t1 set id=id+1000; -update t1 set id=1024 where id=1009; -ERROR 23000: Duplicate entry '1024' for key 'PRIMARY' -select * from t1; -id parent_id level -1001 100 0 -1002 101 1 -1003 101 1 -1004 101 1 -1005 101 1 -1006 101 1 -1007 101 1 -1008 102 2 -1009 102 2 -1015 102 2 -1016 103 2 -1017 103 2 -1018 103 2 -1019 103 2 -1020 103 2 -1021 104 2 -1022 104 2 -1024 104 2 -1025 105 2 -1026 105 2 -1027 105 2 -1028 105 2 -1029 105 2 -1030 105 2 -1031 106 2 -1032 106 2 -1033 106 2 -1034 106 2 -1035 106 2 -1036 107 2 -1037 107 2 -1038 107 2 -1040 107 2 -1157 100 0 -1179 105 2 -1183 104 2 -1193 105 2 -1202 107 2 -1203 107 2 -update ignore t1 set id=id+1; -select * from t1; -id parent_id level -1001 100 0 -1002 101 1 -1003 101 1 -1004 101 1 -1005 101 1 -1006 101 1 -1007 101 1 -1008 102 2 -1010 102 2 -1015 102 2 -1016 103 2 -1017 103 2 -1018 103 2 -1019 103 2 -1020 103 2 -1021 104 2 -1023 104 2 -1024 104 2 -1025 105 2 -1026 105 2 -1027 105 2 -1028 105 2 -1029 105 2 -1030 105 2 -1031 106 2 -1032 106 2 -1033 106 2 -1034 106 2 -1035 106 2 -1036 107 2 -1037 107 2 -1039 107 2 -1041 107 2 -1158 100 0 -1180 105 2 -1184 104 2 -1194 105 2 -1202 107 2 -1204 107 2 -update ignore t1 set id=1023 where id=1010; -select * from t1 where parent_id=102 order by parent_id,id; -id parent_id level -1008 102 2 -1010 102 2 -1015 102 2 -explain select level from t1 where level=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref level level 1 const X Using index -explain select level,id from t1 where level=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref level level 1 const X Using index -explain select level,id,parent_id from t1 where level=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref level level 1 const X -select level,id from t1 where level=1; -level id -1 1002 -1 1003 -1 1004 -1 1005 -1 1006 -1 1007 -select level,id,parent_id from t1 where level=1; -level id parent_id -1 1002 101 -1 1003 101 -1 1004 101 -1 1005 101 -1 1006 101 -1 1007 101 -optimize table t1; -Table Op Msg_type Msg_text -test.t1 optimize status OK -show keys from t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment -t1 0 PRIMARY 1 id A 39 NULL NULL BTREE -t1 1 parent_id 1 parent_id A 9 NULL NULL BTREE -t1 1 level 1 level A 3 NULL NULL BTREE -drop table t1; -CREATE TABLE t1 ( -gesuchnr int(11) DEFAULT '0' NOT NULL, -benutzer_id int(11) DEFAULT '0' NOT NULL, -PRIMARY KEY (gesuchnr,benutzer_id) -) engine=BDB; -replace into t1 (gesuchnr,benutzer_id) values (2,1); -replace into t1 (gesuchnr,benutzer_id) values (1,1); -replace into t1 (gesuchnr,benutzer_id) values (1,1); -select * from t1; -gesuchnr benutzer_id -1 1 -2 1 -drop table t1; -create table t1 (id int not null primary key, x int not null, key (x)) engine=bdb; -insert into t1 (id, x) values (1, 1); -replace into t1 (id, x) values (1, 2); -select * from t1; -id x -1 2 -drop table t1; -create table t1 (a int) engine=bdb; -insert into t1 values (1), (2); -optimize table t1; -Table Op Msg_type Msg_text -test.t1 optimize status OK -delete from t1 where a = 1; -select * from t1; -a -2 -check table t1; -Table Op Msg_type Msg_text -test.t1 check note The storage engine for the table doesn't support check -drop table t1; -create table t1 (a int,b varchar(20)) engine=bdb; -insert into t1 values (1,""), (2,"testing"); -delete from t1 where a = 1; -select * from t1; -a b -2 testing -create index skr on t1 (a); -insert into t1 values (3,""), (4,"testing"); -analyze table t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -show keys from t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment -t1 1 skr 1 a A 3 NULL NULL YES BTREE -drop table t1; -create table t1 (a int,b varchar(20),key(a)) engine=bdb; -insert into t1 values (1,""), (2,"testing"); -select * from t1 where a = 1; -a b -1 -drop table t1; -create table t1 (a char(10) not null, b int not null auto_increment, primary key(a,b)) engine=BDB; -insert into t1 values ("a",1),("b",2),("a",2),("c",1); -insert into t1 values ("a",NULL),("b",NULL),("c",NULL),("e",NULL); -insert into t1 (a) values ("a"),("b"),("c"),("d"); -insert into t1 (a) values ('k'),('d'); -insert into t1 (a) values ("a"); -insert into t1 values ("d",last_insert_id()); -select * from t1; -a b -a 1 -a 2 -a 3 -a 4 -a 5 -b 2 -b 3 -b 4 -c 1 -c 2 -c 3 -d 1 -d 2 -d 5 -e 1 -k 1 -flush tables; -select count(*) from t1; -count(*) -16 -drop table t1; -create table t1 (n int not null primary key) engine=bdb; -set autocommit=0; -insert into t1 values (4); -rollback; -select n, "after rollback" from t1; -n after rollback -insert into t1 values (4); -commit; -select n, "after commit" from t1; -n after commit -4 after commit -commit; -insert into t1 values (5); -insert into t1 values (4); -ERROR 23000: Duplicate entry '4' for key 'PRIMARY' -commit; -select n, "after commit" from t1; -n after commit -4 after commit -5 after commit -set autocommit=1; -insert into t1 values (6); -insert into t1 values (4); -ERROR 23000: Duplicate entry '4' for key 'PRIMARY' -select n from t1; -n -4 -5 -6 -set autocommit=0; -begin; -savepoint `my_savepoint`; -insert into t1 values (7); -savepoint `savept2`; -insert into t1 values (3); -select n from t1; -n -3 -4 -5 -6 -7 -savepoint savept3; -rollback to savepoint savept2; -rollback to savepoint savept3; -ERROR 42000: SAVEPOINT savept3 does not exist -rollback to savepoint savept2; -release savepoint `my_savepoint`; -select n from t1; -n -4 -5 -6 -7 -rollback to savepoint `my_savepoint`; -ERROR 42000: SAVEPOINT my_savepoint does not exist -rollback to savepoint savept2; -ERROR 42000: SAVEPOINT savept2 does not exist -insert into t1 values (8); -savepoint sv; -commit; -savepoint sv; -set autocommit=1; -rollback; -drop table t1; -create table t1 ( id int NOT NULL PRIMARY KEY, nom varchar(64)) engine=BDB; -begin; -insert into t1 values(1,'hamdouni'); -select id as afterbegin_id,nom as afterbegin_nom from t1; -afterbegin_id afterbegin_nom -1 hamdouni -rollback; -select id as afterrollback_id,nom as afterrollback_nom from t1; -afterrollback_id afterrollback_nom -set autocommit=0; -insert into t1 values(2,'mysql'); -select id as afterautocommit0_id,nom as afterautocommit0_nom from t1; -afterautocommit0_id afterautocommit0_nom -2 mysql -rollback; -select id as afterrollback_id,nom as afterrollback_nom from t1; -afterrollback_id afterrollback_nom -set autocommit=1; -drop table t1; -CREATE TABLE t1 (id char(8) not null primary key, val int not null) engine=bdb; -insert into t1 values ('pippo', 12); -insert into t1 values ('pippo', 12); -ERROR 23000: Duplicate entry 'pippo' for key 'PRIMARY' -delete from t1; -delete from t1 where id = 'pippo'; -select * from t1; -id val -insert into t1 values ('pippo', 12); -set autocommit=0; -delete from t1; -rollback; -select * from t1; -id val -pippo 12 -delete from t1; -commit; -select * from t1; -id val -drop table t1; -set autocommit=1; -CREATE TABLE t1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR(64)) ENGINE=BDB; -INSERT INTO t1 VALUES (1, 'Jochen'); -select * from t1; -ID NAME -1 Jochen -drop table t1; -CREATE TABLE t1 ( _userid VARCHAR(60) NOT NULL PRIMARY KEY) ENGINE=BDB; -set autocommit=0; -INSERT INTO t1 SET _userid='marc@anyware.co.uk'; -COMMIT; -SELECT * FROM t1; -_userid -marc@anyware.co.uk -SELECT _userid FROM t1 WHERE _userid='marc@anyware.co.uk'; -_userid -marc@anyware.co.uk -drop table t1; -set autocommit=1; -CREATE TABLE t1 ( -user_id int(10) DEFAULT '0' NOT NULL, -name varchar(100), -phone varchar(100), -ref_email varchar(100) DEFAULT '' NOT NULL, -detail varchar(200), -PRIMARY KEY (user_id,ref_email) -)engine=bdb; -INSERT INTO t1 VALUES (10292,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10292,'shirish','2333604','shirish@yahoo.com','ddsds'),(10292,'sonali','323232','sonali@bolly.com','filmstar'); -select * from t1 where user_id=10292; -user_id name phone ref_email detail -10292 sanjeev 29153373 sansh777@hotmail.com xxx -10292 shirish 2333604 shirish@yahoo.com ddsds -10292 sonali 323232 sonali@bolly.com filmstar -INSERT INTO t1 VALUES (10291,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10293,'shirish','2333604','shirish@yahoo.com','ddsds'); -select * from t1 where user_id=10292; -user_id name phone ref_email detail -10292 sanjeev 29153373 sansh777@hotmail.com xxx -10292 shirish 2333604 shirish@yahoo.com ddsds -10292 sonali 323232 sonali@bolly.com filmstar -select * from t1 where user_id>=10292; -user_id name phone ref_email detail -10292 sanjeev 29153373 sansh777@hotmail.com xxx -10292 shirish 2333604 shirish@yahoo.com ddsds -10292 sonali 323232 sonali@bolly.com filmstar -10293 shirish 2333604 shirish@yahoo.com ddsds -select * from t1 where user_id>10292; -user_id name phone ref_email detail -10293 shirish 2333604 shirish@yahoo.com ddsds -select * from t1 where user_id<10292; -user_id name phone ref_email detail -10291 sanjeev 29153373 sansh777@hotmail.com xxx -drop table t1; -CREATE TABLE t1 (a int not null, b int not null,c int not null, -key(a),primary key(a,b), unique(c),key(a),unique(b)); -show index from t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment -t1 0 PRIMARY 1 a A NULL NULL NULL BTREE -t1 0 PRIMARY 2 b A 0 NULL NULL BTREE -t1 0 c 1 c A 0 NULL NULL BTREE -t1 0 b 1 b A 0 NULL NULL BTREE -t1 1 a 1 a A NULL NULL NULL BTREE -t1 1 a_2 1 a A NULL NULL NULL BTREE -drop table t1; -create table t1 (col1 int not null, col2 char(4) not null, primary key(col1)); -alter table t1 engine=BDB; -insert into t1 values ('1','1'),('5','2'),('2','3'),('3','4'),('4','4'); -select * from t1; -col1 col2 -1 1 -2 3 -3 4 -4 4 -5 2 -update t1 set col2='7' where col1='4'; -select * from t1; -col1 col2 -1 1 -2 3 -3 4 -4 7 -5 2 -alter table t1 add co3 int not null; -select * from t1; -col1 col2 co3 -1 1 0 -2 3 0 -3 4 0 -4 7 0 -5 2 0 -update t1 set col2='9' where col1='2'; -select * from t1; -col1 col2 co3 -1 1 0 -2 9 0 -3 4 0 -4 7 0 -5 2 0 -drop table t1; -create table t1 (a int not null , b int, primary key (a)) engine = BDB; -create table t2 (a int not null , b int, primary key (a)) engine = myisam; -insert into t1 VALUES (1,3) , (2,3), (3,3); -select * from t1; -a b -1 3 -2 3 -3 3 -insert into t2 select * from t1; -select * from t2; -a b -1 3 -2 3 -3 3 -delete from t1 where b = 3; -select * from t1; -a b -insert into t1 select * from t2; -select * from t1; -a b -1 3 -2 3 -3 3 -select * from t2; -a b -1 3 -2 3 -3 3 -drop table t1,t2; -CREATE TABLE t1 ( -id int(11) NOT NULL auto_increment, -ggid varchar(32) binary DEFAULT '' NOT NULL, -email varchar(64) DEFAULT '' NOT NULL, -passwd varchar(32) binary DEFAULT '' NOT NULL, -PRIMARY KEY (id), -UNIQUE ggid (ggid) -) ENGINE=BDB; -insert into t1 (ggid,passwd) values ('test1','xxx'); -insert into t1 (ggid,passwd) values ('test2','yyy'); -insert into t1 (ggid,passwd) values ('test2','this will fail'); -ERROR 23000: Duplicate entry 'test2' for key 'ggid' -insert into t1 (ggid,id) values ('this will fail',1); -ERROR 23000: Duplicate entry '1' for key 'PRIMARY' -select * from t1 where ggid='test1'; -id ggid email passwd -1 test1 xxx -select * from t1 where passwd='xxx'; -id ggid email passwd -1 test1 xxx -select * from t1 where id=2; -id ggid email passwd -2 test2 yyy -replace into t1 (ggid,id) values ('this will work',1); -replace into t1 (ggid,passwd) values ('test2','this will work'); -update t1 set id=100,ggid='test2' where id=1; -ERROR 23000: Duplicate entry 'test2' for key 'ggid' -select * from t1; -id ggid email passwd -1 this will work -3 test2 this will work -select * from t1 where id=1; -id ggid email passwd -1 this will work -select * from t1 where id=999; -id ggid email passwd -drop table t1; -CREATE TABLE t1 ( -user_name varchar(12), -password text, -subscribed char(1), -user_id int(11) DEFAULT '0' NOT NULL, -quota bigint(20), -weight double, -access_date date, -access_time time, -approved datetime, -dummy_primary_key int(11) NOT NULL auto_increment, -PRIMARY KEY (dummy_primary_key) -) ENGINE=BDB; -INSERT INTO t1 VALUES ('user_0','somepassword','N',0,0,0,'2000-09-07','23:06:59','2000-09-07 23:06:59',1); -INSERT INTO t1 VALUES ('user_1','somepassword','Y',1,1,1,'2000-09-07','23:06:59','2000-09-07 23:06:59',2); -INSERT INTO t1 VALUES ('user_2','somepassword','N',2,2,1.4142135623731,'2000-09-07','23:06:59','2000-09-07 23:06:59',3); -INSERT INTO t1 VALUES ('user_3','somepassword','Y',3,3,1.7320508075689,'2000-09-07','23:06:59','2000-09-07 23:06:59',4); -INSERT INTO t1 VALUES ('user_4','somepassword','N',4,4,2,'2000-09-07','23:06:59','2000-09-07 23:06:59',5); -select user_name, password , subscribed, user_id, quota, weight, access_date, access_time, approved, dummy_primary_key from t1 order by user_name; -user_name password subscribed user_id quota weight access_date access_time approved dummy_primary_key -user_0 somepassword N 0 0 0 2000-09-07 23:06:59 2000-09-07 23:06:59 1 -user_1 somepassword Y 1 1 1 2000-09-07 23:06:59 2000-09-07 23:06:59 2 -user_2 somepassword N 2 2 1.4142135623731 2000-09-07 23:06:59 2000-09-07 23:06:59 3 -user_3 somepassword Y 3 3 1.7320508075689 2000-09-07 23:06:59 2000-09-07 23:06:59 4 -user_4 somepassword N 4 4 2 2000-09-07 23:06:59 2000-09-07 23:06:59 5 -drop table t1; -CREATE TABLE t1 ( -id int(11) NOT NULL auto_increment, -parent_id int(11) DEFAULT '0' NOT NULL, -level tinyint(4) DEFAULT '0' NOT NULL, -KEY (id), -KEY parent_id (parent_id), -KEY level (level) -) engine=bdb; -INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1); -INSERT INTO t1 values (179,5,2); -update t1 set parent_id=parent_id+100; -select * from t1 where parent_id=102; -id parent_id level -8 102 2 -9 102 2 -15 102 2 -update t1 set id=id+1000; -update t1 set id=1024 where id=1009; -select * from t1; -id parent_id level -1001 100 0 -1003 101 1 -1004 101 1 -1008 102 2 -1024 102 2 -1017 103 2 -1022 104 2 -1024 104 2 -1028 105 2 -1029 105 2 -1030 105 2 -1031 106 2 -1032 106 2 -1033 106 2 -1203 107 2 -1202 107 2 -1020 103 2 -1157 100 0 -1193 105 2 -1040 107 2 -1002 101 1 -1015 102 2 -1006 101 1 -1034 106 2 -1035 106 2 -1016 103 2 -1007 101 1 -1036 107 2 -1018 103 2 -1026 105 2 -1027 105 2 -1183 104 2 -1038 107 2 -1025 105 2 -1037 107 2 -1021 104 2 -1019 103 2 -1005 101 1 -1179 105 2 -update ignore t1 set id=id+1; -select * from t1; -id parent_id level -1002 100 0 -1004 101 1 -1005 101 1 -1009 102 2 -1025 102 2 -1018 103 2 -1023 104 2 -1025 104 2 -1029 105 2 -1030 105 2 -1031 105 2 -1032 106 2 -1033 106 2 -1034 106 2 -1204 107 2 -1203 107 2 -1021 103 2 -1158 100 0 -1194 105 2 -1041 107 2 -1003 101 1 -1016 102 2 -1007 101 1 -1035 106 2 -1036 106 2 -1017 103 2 -1008 101 1 -1037 107 2 -1019 103 2 -1027 105 2 -1028 105 2 -1184 104 2 -1039 107 2 -1026 105 2 -1038 107 2 -1022 104 2 -1020 103 2 -1006 101 1 -1180 105 2 -update ignore t1 set id=1023 where id=1010; -select * from t1 where parent_id=102; -id parent_id level -1009 102 2 -1025 102 2 -1016 102 2 -explain select level from t1 where level=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref level level 1 const X Using index -select level,id from t1 where level=1; -level id -1 1004 -1 1005 -1 1003 -1 1007 -1 1008 -1 1006 -select level,id,parent_id from t1 where level=1; -level id parent_id -1 1004 101 -1 1005 101 -1 1003 101 -1 1007 101 -1 1008 101 -1 1006 101 -select level,id from t1 where level=1 order by id; -level id -1 1003 -1 1004 -1 1005 -1 1006 -1 1007 -1 1008 -delete from t1 where level=1; -select * from t1; -id parent_id level -1002 100 0 -1009 102 2 -1025 102 2 -1018 103 2 -1023 104 2 -1025 104 2 -1029 105 2 -1030 105 2 -1031 105 2 -1032 106 2 -1033 106 2 -1034 106 2 -1204 107 2 -1203 107 2 -1021 103 2 -1158 100 0 -1194 105 2 -1041 107 2 -1016 102 2 -1035 106 2 -1036 106 2 -1017 103 2 -1037 107 2 -1019 103 2 -1027 105 2 -1028 105 2 -1184 104 2 -1039 107 2 -1026 105 2 -1038 107 2 -1022 104 2 -1020 103 2 -1180 105 2 -drop table t1; -CREATE TABLE t1 ( -sca_code char(6) NOT NULL, -cat_code char(6) NOT NULL, -sca_desc varchar(50), -lan_code char(2) NOT NULL, -sca_pic varchar(100), -sca_sdesc varchar(50), -sca_sch_desc varchar(16), -PRIMARY KEY (sca_code, cat_code, lan_code), -INDEX sca_pic (sca_pic) -) engine = bdb ; -INSERT INTO t1 ( sca_code, cat_code, sca_desc, lan_code, sca_pic, sca_sdesc, sca_sch_desc) VALUES ( 'PD', 'J', 'PENDANT', 'EN', NULL, NULL, 'PENDANT'),( 'RI', 'J', 'RING', 'EN', NULL, NULL, 'RING'),( 'QQ', 'N', 'RING', 'EN', 'not null', NULL, 'RING'); -select count(*) from t1 where sca_code = 'PD'; -count(*) -1 -select count(*) from t1 where sca_code <= 'PD'; -count(*) -1 -select count(*) from t1 where sca_pic is null; -count(*) -2 -alter table t1 drop index sca_pic, add index sca_pic (cat_code, sca_pic); -select count(*) from t1 where sca_code='PD' and sca_pic is null; -count(*) -1 -select count(*) from t1 where cat_code='E'; -count(*) -0 -alter table t1 drop index sca_pic, add index (sca_pic, cat_code); -select count(*) from t1 where sca_code='PD' and sca_pic is null; -count(*) -1 -select count(*) from t1 where sca_pic >= 'n'; -count(*) -1 -select sca_pic from t1 where sca_pic is null; -sca_pic -NULL -NULL -update t1 set sca_pic="test" where sca_pic is null; -delete from t1 where sca_code='pd'; -drop table t1; -set @a:=now(); -CREATE TABLE t1 (a int not null, b timestamp not null, primary key (a)) engine=bdb; -insert into t1 (a) values(1),(2),(3); -select t1.a from t1 natural join t1 as t2 where t1.b >= @a order by t1.a; -a -1 -2 -3 -select a from t1 natural join t1 as t2 where b >= @a order by a; -a -1 -2 -3 -update t1 set a=5 where a=1; -select a from t1; -a -2 -3 -5 -drop table t1; -flush logs; -create table t1 (b blob, i int, key (b(100)), key (i), key (i, b(20))) engine=bdb; -insert into t1 values ('this is a blob', 1), (null, -1), (null, null),("",1),("",2),("",3); -select b from t1 where b = 'this is a blob'; -b -this is a blob -select * from t1 where b like 't%'; -b i -this is a blob 1 -select b, i from t1 where b is not null; -b i -this is a blob 1 - 1 - 2 - 3 -select * from t1 where b is null and i > 0; -b i -select * from t1 where i is NULL; -b i -NULL NULL -update t1 set b='updated' where i=1; -select * from t1; -b i -updated 1 -NULL -1 -NULL NULL -updated 1 - 2 - 3 -drop table t1; -create table t1 (a varchar(100) not null, primary key(a), b int not null) engine=bdb; -insert into t1 values("hello",1),("world",2); -select * from t1 order by b desc; -a b -world 2 -hello 1 -optimize table t1; -Table Op Msg_type Msg_text -test.t1 optimize status OK -show keys from t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment -t1 0 PRIMARY 1 a A 2 NULL NULL BTREE -drop table t1; -create table t1 (i int, j int )ENGINE=BDB; -insert into t1 values (1,2); -select * from t1 where i=1 and j=2; -i j -1 2 -create index ax1 on t1 (i,j); -select * from t1 where i=1 and j=2; -i j -1 2 -drop table t1; -create table t1 -( -branch_id int auto_increment primary key, -branch_name varchar(255) not null, -branch_active int not null default 1, -unique branch_name(branch_name), -index branch_active(branch_active) -) engine=bdb; -create table t2 -( -target_id int auto_increment primary key, -target_name varchar(255) not null, -target_active int not null default 1, -unique target_name(target_name), -index target_active(target_active) -) engine=bdb; -create table t3 -( -platform_id int auto_increment primary key, -platform_name varchar(255) not null, -platform_active int not null default 1, -unique platform_name(platform_name), -index platform_active(platform_active) -) engine=bdb; -create table t4 -( -product_id int auto_increment primary key, -product_name varchar(255) not null, -version_file varchar(255) not null, -product_active int not null default 1, -unique product_name(product_name), -index product_active(product_active) -) engine=bdb; -create table t5 -( -product_file_id int auto_increment primary key, -product_id int not null, -file_name varchar(255) not null, -/* cvs module used to find the file version */ -module_name varchar(255) not null, -/* flag whether the file is still included in the product */ -file_included int not null default 1, -unique product_file(product_id,file_name), -index file_included(file_included) -) engine=bdb; -create table t6 -( -file_platform_id int auto_increment primary key, -product_file_id int not null, -platform_id int not null, -branch_id int not null, -/* filename in the build system */ -build_filename varchar(255) not null, -/* default filename in the build archive */ -archive_filename varchar(255) not null, -unique file_platform(product_file_id,platform_id,branch_id) -) engine=bdb; -create table t8 -( -archive_id int auto_increment primary key, -branch_id int not null, -target_id int not null, -platform_id int not null, -product_id int not null, -status_id int not null default 1, -unique archive(branch_id,target_id,platform_id,product_id), -index status_id(status_id) -) engine=bdb; -create table t7 -( -build_id int auto_increment primary key, -branch_id int not null, -target_id int not null, -build_number int not null, -build_date date not null, -/* build system tag, e.g. 'rmanight-022301-1779' */ -build_tag varchar(255) not null, -/* path relative to the build archive root, e.g. 'current' */ -build_path text not null, -unique build(branch_id,target_id,build_number) -) engine=bdb; -insert into t1 (branch_name) -values ('RealMedia'); -insert into t1 (branch_name) -values ('RP8REV'); -insert into t1 (branch_name) -values ('SERVER_8_0_GOLD'); -insert into t2 (target_name) -values ('rmanight'); -insert into t2 (target_name) -values ('playerall'); -insert into t2 (target_name) -values ('servproxyall'); -insert into t3 (platform_name) -values ('linux-2.0-libc6-i386'); -insert into t3 (platform_name) -values ('win32-i386'); -insert into t4 (product_name, version_file) -values ('realserver', 'servinst'); -insert into t4 (product_name, version_file) -values ('realproxy', 'prxyinst'); -insert into t4 (product_name, version_file) -values ('realplayer', 'playinst'); -insert into t4 (product_name, version_file) -values ('plusplayer', 'plusinst'); -create temporary table tmp1 -select branch_id, target_id, platform_id, product_id -from t1, t2, t3, t4 ; -create temporary table tmp2 -select tmp1.branch_id, tmp1.target_id, tmp1.platform_id, tmp1.product_id -from tmp1 left join t8 -using (branch_id,target_id,platform_id,product_id) -where t8.archive_id is null ; -insert into t8 -(branch_id, target_id, platform_id, product_id, status_id) -select branch_id, target_id, platform_id, product_id, 1 -from tmp2 ; -drop table tmp1 ; -drop table tmp2 ; -insert into t5 (product_id, file_name, module_name) -values (1, 'servinst', 'server'); -insert into t5 (product_id, file_name, module_name) -values (2, 'prxyinst', 'server'); -insert into t5 (product_id, file_name, module_name) -values (3, 'playinst', 'rpapp'); -insert into t5 (product_id, file_name, module_name) -values (4, 'plusinst', 'rpapp'); -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (1, 2, 3, 'servinst.exe', 'win32-servinst.exe'); -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (1, 1, 3, 'v80_linux-2.0-libc6-i386_servinst.bin', 'linux2-servinst.exe'); -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (3, 2, 2, 'playinst.exe', 'win32-playinst.exe'); -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (4, 2, 2, 'playinst.exe', 'win32-playinst.exe'); -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (2, 2, 1071, 'playerall-022101-1071', '2001-02-21', 'current'); -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (2, 2, 1072, 'playerall-022201-1072', '2001-02-22', 'current'); -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (3, 3, 388, 'servproxyall-022201-388', '2001-02-22', 'current'); -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (3, 3, 389, 'servproxyall-022301-389', '2001-02-23', 'current'); -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (4, 4, 100, 'foo target-010101-100', '2001-01-01', 'current'); -update t8 -set status_id=2 -where branch_id=2 and target_id=2 and platform_id=2 and product_id=1; -select t7.build_path -from -t1, -t7, -t2, -t3, -t4, -t5, -t6 -where -t7.branch_id = t1.branch_id and -t7.target_id = t2.target_id and -t5.product_id = t4.product_id and -t6.product_file_id = t5.product_file_id and -t6.platform_id = t3.platform_id and -t6.branch_id = t6.branch_id and -t7.build_id = 1 and -t4.product_id = 3 and -t5.file_name = 'playinst' and -t3.platform_id = 2; -build_path -current -drop table t1, t2, t3, t4, t5, t6, t7, t8; -CREATE TABLE t1 ( -a tinytext NOT NULL, -b tinyint(3) unsigned NOT NULL default '0', -PRIMARY KEY (a(32),b) -) ENGINE=BDB; -INSERT INTO t1 VALUES ('a',1),('a',2); -SELECT * FROM t1 WHERE a='a' AND b=2; -a b -a 2 -SELECT * FROM t1 WHERE a='a' AND b in (2); -a b -a 2 -SELECT * FROM t1 WHERE a='a' AND b in (1,2); -a b -a 1 -a 2 -drop table t1; -CREATE TABLE t1 ( -a int3 unsigned NOT NULL, -b int1 unsigned NOT NULL, -UNIQUE (a, b) -) ENGINE = BDB; -INSERT INTO t1 VALUES (1, 1); -SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; -MIN(B) MAX(b) -1 1 -drop table t1; -create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=bdb; -insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); -LOCK TABLES t1 WRITE; -insert into t1 values (99,1,2,'D'),(1,1,2,'D'); -ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' -select id from t1; -id -0 -1 -2 -select id from t1; -id -0 -1 -2 -UNLOCK TABLES; -DROP TABLE t1; -create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=bdb; -insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); -LOCK TABLES t1 WRITE; -begin; -insert into t1 values (99,1,2,'D'),(1,1,2,'D'); -ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' -select id from t1; -id -0 -1 -2 -insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D'); -commit; -select id,id3 from t1; -id id3 -0 0 -1 1 -2 2 -100 2 -UNLOCK TABLES; -DROP TABLE t1; -CREATE TABLE t1 (SYAIN_NO char(5) NOT NULL default '', KINMU_DATE char(6) NOT NULL default '', PRIMARY KEY (SYAIN_NO,KINMU_DATE)) ENGINE=BerkeleyDB; -CREATE TABLE t2 ( SYAIN_NO char(5) NOT NULL default '',STR_DATE char(8) NOT NULL default '',PRIMARY KEY (SYAIN_NO,STR_DATE) ) ENGINE=BerkeleyDB; -select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO; -KINMU_DATE -select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO; -KINMU_DATE -DROP TABLE t1,t2; -create table t1 (a int(11) not null, b int(11) not null, unique (a,b)) engine=bdb; -insert into t1 values (1,1), (1,2); -select * from t1 where a = 1; -a b -1 1 -1 2 -select t1.*, t2.* from t1, t1 t2 where t1.a = t2.a and t2.a = 1; -a b a b -1 1 1 1 -1 1 1 2 -1 2 1 1 -1 2 1 2 -select * from t1 where a = 1; -a b -1 1 -1 2 -drop table t1; -create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=bdb; -insert into t1 values (0,0,0,'ABCDEFGHIJ'); -create table t2 (id int NOT NULL,primary key (id)) engine=bdb; -LOCK TABLES t1 WRITE, t2 WRITE; -insert into t2 values(1); -SELECT t1.* FROM t1 WHERE id IN (1); -id id2 id3 dummy1 -SELECT t1.* FROM t2 left outer join t1 on (t1.id=t2.id); -id id2 id3 dummy1 -NULL NULL NULL NULL -delete from t1 where id3 >= 0 and id3 <= 0; -drop table t1,t2; -CREATE TABLE t1 (i varchar(48) NOT NULL default '', p varchar(255) default NULL,s varchar(48) NOT NULL default '', PRIMARY KEY (i), UNIQUE(p,s)) ENGINE=BDB; -INSERT INTO t1 VALUES ('00000000-e6c4ddeaa6-003b8-83458387','programs/xxxxxxxx.wmv','00000000-e6c4ddeb32-003bc-83458387'); -SELECT * FROM t1 WHERE p='programs/xxxxxxxx.wmv'; -i p s -00000000-e6c4ddeaa6-003b8-83458387 programs/xxxxxxxx.wmv 00000000-e6c4ddeb32-003bc-83458387 -drop table t1; -CREATE TABLE t1 ( STR_DATE varchar(8) NOT NULL default '',INFO_NOTE varchar(200) default NULL,PRIMARY KEY (STR_DATE) ) ENGINE=BerkeleyDB; -select INFO_NOTE from t1 where STR_DATE = '20010610'; -INFO_NOTE -select INFO_NOTE from t1 where STR_DATE < '20010610'; -INFO_NOTE -select INFO_NOTE from t1 where STR_DATE > '20010610'; -INFO_NOTE -drop table t1; -create table t1 (a int not null, b int, primary key (a)) engine =bdb; -create table t2 (a int not null, b int, primary key (a)) engine =bdb; -insert into t1 values (2, 3),(1, 7),(10, 7); -insert into t2 values (2, 3),(1, 7),(10, 7); -select * from t1; -a b -1 7 -2 3 -10 7 -select * from t2; -a b -1 7 -2 3 -10 7 -delete t1, t2 from t1, t2 where t1.a = t2.a; -select * from t1; -a b -select * from t2; -a b -select * from t2; -a b -drop table t1,t2; -create table t1 (x int not null, index(x)) engine=bdb; -insert into t1 values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); -select * from t1 where x <= 10 and x >= 7; -x -7 -8 -9 -10 -select * from t1 where x <= 10 and x >= 7 order by x; -x -7 -8 -9 -10 -select * from t1 where x <= 10 and x >= 7 order by x desc; -x -10 -9 -8 -7 -select * from t1 where x <= 8 and x >= 5 order by x desc; -x -8 -7 -6 -5 -select * from t1 where x < 8 and x > 5 order by x desc; -x -7 -6 -drop table t1; -create table t1 ( c char(8) not null ) engine=bdb; -insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); -insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); -alter table t1 add b char(8) not null; -alter table t1 add a char(8) not null; -alter table t1 add primary key (a,b,c); -update t1 set a=c, b=c; -create table t2 (c char(8) not null, b char(8) not null, a char(8) not null, primary key(a,b,c)) engine=bdb; -insert into t2 select * from t1; -delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; -drop table t1,t2; -create table t1 (a char(10), key(a), b int not null, key(b)) engine=bdb; -insert into t1 values ('a',1),('A',2); -explain select a from t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 -select a from t1; -a -a -A -explain select b from t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL b 4 NULL 2 Using index -select b from t1; -b -1 -2 -alter table t1 modify a char(10) binary; -explain select a from t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL a 11 NULL 2 Using index -select a from t1; -a -A -a -drop table t1; -create table t1( -pk1 text not null, pk2 text not null, pk3 char(4), -key1 int, key2 int, -primary key(pk1(4), pk2(4), pk3), key(key1), key(key2) -) engine=bdb; -insert into t1 values (concat('aaa-', repeat('A', 4000)), -concat('eee-', repeat('e', 4000)), 'a++a', 1, 1); -insert into t1 values (concat('bbb-', repeat('B', 4000)), -concat('ggg-', repeat('G', 4000)), 'b++b', 1, 1); -select substring(pk1, 1, 4), substring(pk1, 4001), -substring(pk2, 1, 4), substring(pk2, 4001), pk3, key1, key2 -from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -substring(pk1, 1, 4) substring(pk1, 4001) substring(pk2, 1, 4) substring(pk2, 4001) pk3 key1 key2 -aaa- AAAA eee- eeee a++a 1 1 -bbb- BBBB ggg- GGGG b++b 1 1 -drop table t1; -create table t1 ( -pk1 varchar(8) not null default '', -pk2 varchar(4) not null default '', -key1 int(11) default null, -key2 int(11) default null, -primary key (pk1,pk2), -key key1 (key1), -key key2 (key2)) engine=bdb; -insert into t1 values ('','empt',2,2), ('a','a--a',2,2), -('bb','b--b',2,2), ('ccc','c--c',2,2), ('dddd','d--d',2,2); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 pk2 key1 key2 - empt 2 2 -a a--a 2 2 -bb b--b 2 2 -ccc c--c 2 2 -dddd d--d 2 2 -drop table t1; -set autocommit=0; -create table t1(b varchar(30)) engine=bdb; -insert into t1 values ('one'); -commit; -select b FROM t1 outer_table where -exists (select 'two' from t1 where 'two' = outer_table.b); -b -drop table t1; -set autocommit=1; -create table t1(a int primary key, b varchar(30)) engine=bdb; -insert into t1 values (1,'one'), (2,'two'), (3,'three'), (4,'four'); -create table t2 like t1; -insert t2 select * from t1; -select a from t1 where a in (select a from t2); -a -1 -2 -3 -4 -delete from t2; -insert into t2 (a, b) -select a, b from t1 where (a, b) in (select a, b from t1); -select * from t2; -a b -1 one -2 two -3 three -4 four -drop table t1, t2; -create table t1 (a int, b varchar(30), primary key(a)) engine = bdb; -insert into t1 values (1,'one'); -commit; -truncate t1; -select * from t1; -a b -drop table t1; -SET NAMES utf8; -create table t1 (a varchar(255) character set utf8) engine=bdb; -set @a:= convert(repeat(_latin1 0xFF, 255) using utf8); -insert into t1 values (@a); -select a, length(a), char_length(a) from t1; -a length(a) char_length(a) -ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ 510 255 -drop table t1; -SET NAMES latin1; -CREATE TABLE t1 ( -id int unsigned NOT NULL auto_increment, -list_id smallint unsigned NOT NULL, -term TEXT NOT NULL, -PRIMARY KEY(id), -INDEX(list_id, term(4)) -) ENGINE=BDB CHARSET=utf8; -INSERT INTO t1 SET list_id = 1, term = "letterc"; -INSERT INTO t1 SET list_id = 1, term = "letterb"; -INSERT INTO t1 SET list_id = 1, term = "lettera"; -INSERT INTO t1 SET list_id = 1, term = "letterd"; -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "letterc"); -id -1 -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "letterb"); -id -2 -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "lettera"); -id -3 -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "letterd"); -id -4 -DROP TABLE t1; -create table t1 (a int, key(a)) engine=bdb; -create table t2 (b int, key(b)) engine=bdb; -insert into t1 values (1),(1),(2),(3),(4); -insert into t2 values (1),(5),(6),(7); -delete from t1 where (a in (select b from t2)); -select count(*) from t1; -count(*) -3 -insert into t1 set a=(select b from t2); -ERROR 21000: Subquery returns more than 1 row -select count(*) from t1; -count(*) -3 -update t1 set a = a + 1 where (a in (select b from t2)); -select count(*) from t1; -count(*) -3 -drop table t1, t2; -End of 4.1 tests -create temporary table t1 (a int, primary key(a)) engine=bdb; -select * from t1; -a -alter table t1 add b int; -select * from t1; -a b -drop table t1; -set storage_engine=bdb; -drop table if exists t1,t2,t3; ---- Testing varchar --- ---- Testing varchar --- -create table t1 (v varchar(10), c char(10), t text); -insert into t1 values('+ ', '+ ', '+ '); -set @a=repeat(' ',20); -insert into t1 values (concat('+',@a),concat('+',@a),concat('+',@a)); -Warnings: -Note 1265 Data truncated for column 'v' at row 1 -select concat('*',v,'*',c,'*',t,'*') from t1; -concat('*',v,'*',c,'*',t,'*') -*+ *+*+ * -*+ *+*+ * -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(10) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -create table t2 like t1; -show create table t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `v` varchar(10) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -create table t3 select * from t1; -show create table t3; -Table Create Table -t3 CREATE TABLE `t3` ( - `v` varchar(10) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -alter table t1 modify c varchar(10); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(10) DEFAULT NULL, - `c` varchar(10) DEFAULT NULL, - `t` text -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -alter table t1 modify v char(10); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` char(10) DEFAULT NULL, - `c` varchar(10) DEFAULT NULL, - `t` text -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -alter table t1 modify t varchar(10); -Warnings: -Note 1265 Data truncated for column 't' at row 2 -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` char(10) DEFAULT NULL, - `c` varchar(10) DEFAULT NULL, - `t` varchar(10) DEFAULT NULL -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -select concat('*',v,'*',c,'*',t,'*') from t1; -concat('*',v,'*',c,'*',t,'*') -*+*+*+ * -*+*+*+ * -drop table t1,t2,t3; -create table t1 (v varchar(10), c char(10), t text, key(v), key(c), key(t(10))); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(10) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text, - KEY `v` (`v`), - KEY `c` (`c`), - KEY `t` (`t`(10)) -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -select count(*) from t1; -count(*) -270 -insert into t1 values(concat('a',char(1)),concat('a',char(1)),concat('a',char(1))); -select count(*) from t1 where v='a'; -count(*) -10 -select count(*) from t1 where c='a'; -count(*) -10 -select count(*) from t1 where t='a'; -count(*) -10 -select count(*) from t1 where v='a '; -count(*) -10 -select count(*) from t1 where c='a '; -count(*) -10 -select count(*) from t1 where t='a '; -count(*) -10 -select count(*) from t1 where v between 'a' and 'a '; -count(*) -10 -select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; -count(*) -10 -select count(*) from t1 where v like 'a%'; -count(*) -11 -select count(*) from t1 where c like 'a%'; -count(*) -11 -select count(*) from t1 where t like 'a%'; -count(*) -11 -select count(*) from t1 where v like 'a %'; -count(*) -9 -explain select count(*) from t1 where v='a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 13 const # Using where -explain select count(*) from t1 where c='a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref c c 11 const # Using where -explain select count(*) from t1 where t='a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range t t 13 NULL # Using where -explain select count(*) from t1 where v like 'a%'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 13 NULL # Using where -explain select count(*) from t1 where v between 'a' and 'a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 13 const # Using where -explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 13 const # Using where -alter table t1 add unique(v); -ERROR 23000: Duplicate entry '{ ' for key 'v_2' -alter table t1 add key(v); -select concat('*',v,'*',c,'*',t,'*') as qq from t1 where v='a'; -qq -*a*a*a* -*a *a*a * -*a *a*a * -*a *a*a * -*a *a*a * -*a *a*a * -*a *a*a * -*a *a*a * -*a *a*a * -*a *a*a * -explain select * from t1 where v='a'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v,v_2 # 13 const # Using where -select v,count(*) from t1 group by v limit 10; -v count(*) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select v,count(c) from t1 group by v limit 10; -v count(c) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result v,count(c) from t1 group by v limit 10; -v count(c) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select c,count(*) from t1 group by c limit 10; -c count(*) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select c,count(t) from t1 group by c limit 10; -c count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result c,count(t) from t1 group by c limit 10; -c count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select t,count(*) from t1 group by t limit 10; -t count(*) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select t,count(t) from t1 group by t limit 10; -t count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result t,count(t) from t1 group by t limit 10; -t count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(300) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text, - KEY `c` (`c`), - KEY `t` (`t`(10)), - KEY `v` (`v`) -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -select count(*) from t1 where v='a'; -count(*) -10 -select count(*) from t1 where v='a '; -count(*) -10 -select count(*) from t1 where v between 'a' and 'a '; -count(*) -10 -select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; -count(*) -10 -select count(*) from t1 where v like 'a%'; -count(*) -11 -select count(*) from t1 where v like 'a %'; -count(*) -9 -explain select count(*) from t1 where v='a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 303 const # Using where -explain select count(*) from t1 where v like 'a%'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 303 NULL # Using where -explain select count(*) from t1 where v between 'a' and 'a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 303 const # Using where -explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 303 const # Using where -explain select * from t1 where v='a'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 303 const # Using where -select v,count(*) from t1 group by v limit 10; -v count(*) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -alter table t1 drop key v, add key v (v(30)); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(300) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text, - KEY `c` (`c`), - KEY `t` (`t`(10)), - KEY `v` (`v`(30)) -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -select count(*) from t1 where v='a'; -count(*) -10 -select count(*) from t1 where v='a '; -count(*) -10 -select count(*) from t1 where v between 'a' and 'a '; -count(*) -10 -select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; -count(*) -10 -select count(*) from t1 where v like 'a%'; -count(*) -11 -select count(*) from t1 where v like 'a %'; -count(*) -9 -explain select count(*) from t1 where v='a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 33 const # Using where -explain select count(*) from t1 where v like 'a%'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 33 NULL # Using where -explain select count(*) from t1 where v between 'a' and 'a '; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 33 const # Using where -explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 33 const # Using where -explain select * from t1 where v='a'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref v v 33 const # Using where -select v,count(*) from t1 group by v limit 10; -v count(*) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -alter table t1 modify v varchar(600), drop key v, add key v (v); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(600) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text, - KEY `c` (`c`), - KEY `t` (`t`(10)), - KEY `v` (`v`) -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -select v,count(*) from t1 group by v limit 10; -v count(*) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -select sql_big_result v,count(t) from t1 group by v limit 10; -v count(t) -a 1 -a 10 -b 10 -c 10 -d 10 -e 10 -f 10 -g 10 -h 10 -i 10 -drop table t1; -create table t1 (a char(10), unique (a)); -insert into t1 values ('a '); -insert into t1 values ('a '); -ERROR 23000: Duplicate entry 'a' for key 'a' -alter table t1 modify a varchar(10); -insert into t1 values ('a '),('a '),('a '),('a '); -ERROR 23000: Duplicate entry 'a ' for key 'a' -insert into t1 values ('a '); -ERROR 23000: Duplicate entry 'a ' for key 'a' -insert into t1 values ('a '); -ERROR 23000: Duplicate entry 'a ' for key 'a' -insert into t1 values ('a '); -ERROR 23000: Duplicate entry 'a ' for key 'a' -update t1 set a='a ' where a like 'a%'; -select concat(a,'.') from t1; -concat(a,'.') -a . -update t1 set a='abc ' where a like 'a '; -select concat(a,'.') from t1; -concat(a,'.') -a . -update t1 set a='a ' where a like 'a %'; -select concat(a,'.') from t1; -concat(a,'.') -a . -update t1 set a='a ' where a like 'a '; -select concat(a,'.') from t1; -concat(a,'.') -a . -drop table t1; -create table t1 (v varchar(10), c char(10), t text, key(v(5)), key(c(5)), key(t(5))); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(10) DEFAULT NULL, - `c` char(10) DEFAULT NULL, - `t` text, - KEY `v` (`v`(5)), - KEY `c` (`c`(5)), - KEY `t` (`t`(5)) -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -drop table t1; -create table t1 (v char(10) character set utf8); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` char(10) CHARACTER SET utf8 DEFAULT NULL -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -drop table t1; -create table t1 (v varchar(10), c char(10)) row_format=fixed; -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` varchar(10) DEFAULT NULL, - `c` char(10) DEFAULT NULL -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED -insert into t1 values('a','a'),('a ','a '); -select concat('*',v,'*',c,'*') from t1; -concat('*',v,'*',c,'*') -*a*a* -*a *a* -drop table t1; -create table t1 (v varchar(65530), key(v(10))); -insert into t1 values(repeat('a',65530)); -select length(v) from t1 where v=repeat('a',65530); -length(v) -65530 -drop table t1; -create table t1(a int, b varchar(12), key ba(b, a)); -insert into t1 values (1, 'A'), (20, NULL); -explain select * from t1 where a=20 and b is null; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref ba ba 20 const,const 1 Using where -select * from t1 where a=20 and b is null; -a b -20 NULL -drop table t1; -create table t1 (v varchar(65530), key(v)); -Warnings: -Warning 1071 Specified key was too long; max key length is MAX_KEY_LENGTH bytes -drop table if exists t1; -create table t1 (v varchar(65536)); -Warnings: -Note 1246 Converting column 'v' from VARCHAR to TEXT -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` mediumtext -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -drop table t1; -create table t1 (v varchar(65530) character set utf8); -Warnings: -Note 1246 Converting column 'v' from VARCHAR to TEXT -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `v` mediumtext CHARACTER SET utf8 -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -drop table t1; -set storage_engine=MyISAM; -create table t1 (a varchar(255) character set utf8, -b varchar(255) character set utf8, -c varchar(255) character set utf8, -d varchar(255) character set utf8, -key (a,b,c,d)) engine=bdb; -drop table t1; -create table t1 (a varchar(255) character set utf8, -b varchar(255) character set utf8, -c varchar(255) character set utf8, -d varchar(255) character set utf8, -e varchar(255) character set utf8, -key (a,b,c,d,e)) engine=bdb; -ERROR 42000: Specified key was too long; max key length is 3072 bytes -set autocommit=0; -create table t1 (a int) engine=bdb; -commit; -alter table t1 add primary key(a); -drop table t1; -End of 5.0 tests -create table t1 (a int) engine=bdb; -set session transaction isolation level repeatable read; -set transaction isolation level serializable; -begin; -select @@tx_isolation; -@@tx_isolation -SERIALIZABLE -insert into t1 values (1); -set transaction isolation level read committed; -ERROR 25001: Transaction isolation level can't be changed while a transaction is in progress -rollback; -begin; -select @@tx_isolation; -@@tx_isolation -REPEATABLE-READ -insert into t1 values (1); -rollback; -drop table t1; -End of 5.1 tests diff --git a/mysql-test/r/bdb_cache.result b/mysql-test/r/bdb_cache.result deleted file mode 100644 index 6506ce0412a..00000000000 --- a/mysql-test/r/bdb_cache.result +++ /dev/null @@ -1,99 +0,0 @@ -drop table if exists t1, t2, t3; -flush status; -set autocommit=0; -create table t1 (a int not null) engine=bdb; -insert into t1 values (1),(2),(3); -select * from t1; -a -1 -2 -3 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -drop table t1; -set autocommit=1; -create table t1 (a int not null) engine=bdb; -begin; -insert into t1 values (1),(2),(3); -select * from t1; -a -1 -2 -3 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -drop table t1; -create table t1 (a int not null) engine=bdb; -create table t2 (a int not null) engine=bdb; -create table t3 (a int not null) engine=bdb; -insert into t1 values (1),(2); -insert into t2 values (1),(2); -insert into t3 values (1),(2); -select * from t1; -a -1 -2 -select * from t2; -a -1 -2 -select * from t3; -a -1 -2 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 3 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -begin; -select * from t1; -a -1 -2 -select * from t2; -a -1 -2 -select * from t3; -a -1 -2 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 3 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -insert into t1 values (3); -insert into t2 values (3); -insert into t1 values (4); -select * from t1; -a -1 -2 -3 -4 -select * from t2; -a -1 -2 -3 -select * from t3; -a -1 -2 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 3 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -commit; -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -drop table if exists t1, t2, t3; diff --git a/mysql-test/r/bdb_gis.result b/mysql-test/r/bdb_gis.result deleted file mode 100644 index 4a3752e5426..00000000000 --- a/mysql-test/r/bdb_gis.result +++ /dev/null @@ -1,462 +0,0 @@ -SET storage_engine=bdb; -DROP TABLE IF EXISTS t1, gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry; -CREATE TABLE gis_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT); -CREATE TABLE gis_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g LINESTRING); -CREATE TABLE gis_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POLYGON); -CREATE TABLE gis_multi_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOINT); -CREATE TABLE gis_multi_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTILINESTRING); -CREATE TABLE gis_multi_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOLYGON); -CREATE TABLE gis_geometrycollection (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRYCOLLECTION); -CREATE TABLE gis_geometry (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRY); -SHOW CREATE TABLE gis_point; -Table Create Table -gis_point CREATE TABLE `gis_point` ( - `fid` int(11) NOT NULL AUTO_INCREMENT, - `g` point DEFAULT NULL, - PRIMARY KEY (`fid`) -) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 -SHOW FIELDS FROM gis_point; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g point YES NULL -SHOW FIELDS FROM gis_line; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g linestring YES NULL -SHOW FIELDS FROM gis_polygon; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g polygon YES NULL -SHOW FIELDS FROM gis_multi_point; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g multipoint YES NULL -SHOW FIELDS FROM gis_multi_line; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g multilinestring YES NULL -SHOW FIELDS FROM gis_multi_polygon; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g multipolygon YES NULL -SHOW FIELDS FROM gis_geometrycollection; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g geometrycollection YES NULL -SHOW FIELDS FROM gis_geometry; -Field Type Null Key Default Extra -fid int(11) NO PRI NULL auto_increment -g geometry YES NULL -INSERT INTO gis_point VALUES -(101, PointFromText('POINT(10 10)')), -(102, PointFromText('POINT(20 10)')), -(103, PointFromText('POINT(20 20)')), -(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); -INSERT INTO gis_line VALUES -(105, LineFromText('LINESTRING(0 0,0 10,10 0)')), -(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')), -(107, LineStringFromWKB(LineString(Point(10, 10), Point(40, 10)))); -INSERT INTO gis_polygon VALUES -(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')), -(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')), -(110, PolyFromWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))); -INSERT INTO gis_multi_point VALUES -(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')), -(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')), -(113, MPointFromWKB(MultiPoint(Point(3, 6), Point(4, 10)))); -INSERT INTO gis_multi_line VALUES -(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')), -(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')), -(116, MLineFromWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))); -INSERT INTO gis_multi_polygon VALUES -(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), -(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), -(119, MPolyFromWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))); -INSERT INTO gis_geometrycollection VALUES -(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')), -(121, GeometryFromWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9))))); -INSERT into gis_geometry SELECT * FROM gis_point; -INSERT into gis_geometry SELECT * FROM gis_line; -INSERT into gis_geometry SELECT * FROM gis_polygon; -INSERT into gis_geometry SELECT * FROM gis_multi_point; -INSERT into gis_geometry SELECT * FROM gis_multi_line; -INSERT into gis_geometry SELECT * FROM gis_multi_polygon; -INSERT into gis_geometry SELECT * FROM gis_geometrycollection; -SELECT fid, AsText(g) FROM gis_point ORDER by fid; -fid AsText(g) -101 POINT(10 10) -102 POINT(20 10) -103 POINT(20 20) -104 POINT(10 20) -SELECT fid, AsText(g) FROM gis_line ORDER by fid; -fid AsText(g) -105 LINESTRING(0 0,0 10,10 0) -106 LINESTRING(10 10,20 10,20 20,10 20,10 10) -107 LINESTRING(10 10,40 10) -SELECT fid, AsText(g) FROM gis_polygon ORDER by fid; -fid AsText(g) -108 POLYGON((10 10,20 10,20 20,10 20,10 10)) -109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) -110 POLYGON((0 0,30 0,30 30,0 0)) -SELECT fid, AsText(g) FROM gis_multi_point ORDER by fid; -fid AsText(g) -111 MULTIPOINT(0 0,10 10,10 20,20 20) -112 MULTIPOINT(1 1,11 11,11 21,21 21) -113 MULTIPOINT(3 6,4 10) -SELECT fid, AsText(g) FROM gis_multi_line ORDER by fid; -fid AsText(g) -114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) -115 MULTILINESTRING((10 48,10 21,10 0)) -116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) -SELECT fid, AsText(g) FROM gis_multi_polygon ORDER by fid; -fid AsText(g) -117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) -118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) -119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) -SELECT fid, AsText(g) FROM gis_geometrycollection ORDER by fid; -fid AsText(g) -120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) -121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) -SELECT fid, AsText(g) FROM gis_geometry ORDER by fid; -fid AsText(g) -101 POINT(10 10) -102 POINT(20 10) -103 POINT(20 20) -104 POINT(10 20) -105 LINESTRING(0 0,0 10,10 0) -106 LINESTRING(10 10,20 10,20 20,10 20,10 10) -107 LINESTRING(10 10,40 10) -108 POLYGON((10 10,20 10,20 20,10 20,10 10)) -109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) -110 POLYGON((0 0,30 0,30 30,0 0)) -111 MULTIPOINT(0 0,10 10,10 20,20 20) -112 MULTIPOINT(1 1,11 11,11 21,21 21) -113 MULTIPOINT(3 6,4 10) -114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) -115 MULTILINESTRING((10 48,10 21,10 0)) -116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) -117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) -118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) -119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) -120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) -121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) -SELECT fid, Dimension(g) FROM gis_geometry ORDER by fid; -fid Dimension(g) -101 0 -102 0 -103 0 -104 0 -105 1 -106 1 -107 1 -108 2 -109 2 -110 2 -111 0 -112 0 -113 0 -114 1 -115 1 -116 1 -117 2 -118 2 -119 2 -120 1 -121 1 -SELECT fid, GeometryType(g) FROM gis_geometry ORDER by fid; -fid GeometryType(g) -101 POINT -102 POINT -103 POINT -104 POINT -105 LINESTRING -106 LINESTRING -107 LINESTRING -108 POLYGON -109 POLYGON -110 POLYGON -111 MULTIPOINT -112 MULTIPOINT -113 MULTIPOINT -114 MULTILINESTRING -115 MULTILINESTRING -116 MULTILINESTRING -117 MULTIPOLYGON -118 MULTIPOLYGON -119 MULTIPOLYGON -120 GEOMETRYCOLLECTION -121 GEOMETRYCOLLECTION -SELECT fid, IsEmpty(g) FROM gis_geometry ORDER by fid; -fid IsEmpty(g) -101 0 -102 0 -103 0 -104 0 -105 0 -106 0 -107 0 -108 0 -109 0 -110 0 -111 0 -112 0 -113 0 -114 0 -115 0 -116 0 -117 0 -118 0 -119 0 -120 0 -121 0 -SELECT fid, AsText(Envelope(g)) FROM gis_geometry ORDER by fid; -fid AsText(Envelope(g)) -101 POLYGON((10 10,10 10,10 10,10 10,10 10)) -102 POLYGON((20 10,20 10,20 10,20 10,20 10)) -103 POLYGON((20 20,20 20,20 20,20 20,20 20)) -104 POLYGON((10 20,10 20,10 20,10 20,10 20)) -105 POLYGON((0 0,10 0,10 10,0 10,0 0)) -106 POLYGON((10 10,20 10,20 20,10 20,10 10)) -107 POLYGON((10 10,40 10,40 10,10 10,10 10)) -108 POLYGON((10 10,20 10,20 20,10 20,10 10)) -109 POLYGON((0 0,50 0,50 50,0 50,0 0)) -110 POLYGON((0 0,30 0,30 30,0 30,0 0)) -111 POLYGON((0 0,20 0,20 20,0 20,0 0)) -112 POLYGON((1 1,21 1,21 21,1 21,1 1)) -113 POLYGON((3 6,4 6,4 10,3 10,3 6)) -114 POLYGON((10 0,16 0,16 48,10 48,10 0)) -115 POLYGON((10 0,10 0,10 48,10 48,10 0)) -116 POLYGON((1 2,21 2,21 8,1 8,1 2)) -117 POLYGON((28 0,84 0,84 42,28 42,28 0)) -118 POLYGON((28 0,84 0,84 42,28 42,28 0)) -119 POLYGON((0 0,3 0,3 3,0 3,0 0)) -120 POLYGON((0 0,10 0,10 10,0 10,0 0)) -121 POLYGON((3 6,44 6,44 9,3 9,3 6)) -explain extended select Dimension(g), GeometryType(g), IsEmpty(g), AsText(Envelope(g)) from gis_geometry; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE gis_geometry ALL NULL NULL NULL NULL 21 100.00 -Warnings: -Note 1003 select dimension(`test`.`gis_geometry`.`g`) AS `Dimension(g)`,geometrytype(`test`.`gis_geometry`.`g`) AS `GeometryType(g)`,isempty(`test`.`gis_geometry`.`g`) AS `IsEmpty(g)`,astext(envelope(`test`.`gis_geometry`.`g`)) AS `AsText(Envelope(g))` from `test`.`gis_geometry` -SELECT fid, X(g) FROM gis_point ORDER by fid; -fid X(g) -101 10 -102 20 -103 20 -104 10 -SELECT fid, Y(g) FROM gis_point ORDER by fid; -fid Y(g) -101 10 -102 10 -103 20 -104 20 -explain extended select X(g),Y(g) FROM gis_point; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE gis_point ALL NULL NULL NULL NULL 4 100.00 -Warnings: -Note 1003 select x(`test`.`gis_point`.`g`) AS `X(g)`,y(`test`.`gis_point`.`g`) AS `Y(g)` from `test`.`gis_point` -SELECT fid, AsText(StartPoint(g)) FROM gis_line ORDER by fid; -fid AsText(StartPoint(g)) -105 POINT(0 0) -106 POINT(10 10) -107 POINT(10 10) -SELECT fid, AsText(EndPoint(g)) FROM gis_line ORDER by fid; -fid AsText(EndPoint(g)) -105 POINT(10 0) -106 POINT(10 10) -107 POINT(40 10) -SELECT fid, GLength(g) FROM gis_line ORDER by fid; -fid GLength(g) -105 24.142135623731 -106 40 -107 30 -SELECT fid, NumPoints(g) FROM gis_line ORDER by fid; -fid NumPoints(g) -105 3 -106 5 -107 2 -SELECT fid, AsText(PointN(g, 2)) FROM gis_line ORDER by fid; -fid AsText(PointN(g, 2)) -105 POINT(0 10) -106 POINT(20 10) -107 POINT(40 10) -SELECT fid, IsClosed(g) FROM gis_line ORDER by fid; -fid IsClosed(g) -105 0 -106 1 -107 0 -explain extended select AsText(StartPoint(g)),AsText(EndPoint(g)),GLength(g),NumPoints(g),AsText(PointN(g, 2)),IsClosed(g) FROM gis_line; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE gis_line ALL NULL NULL NULL NULL 3 100.00 -Warnings: -Note 1003 select astext(startpoint(`test`.`gis_line`.`g`)) AS `AsText(StartPoint(g))`,astext(endpoint(`test`.`gis_line`.`g`)) AS `AsText(EndPoint(g))`,glength(`test`.`gis_line`.`g`) AS `GLength(g)`,numpoints(`test`.`gis_line`.`g`) AS `NumPoints(g)`,astext(pointn(`test`.`gis_line`.`g`,2)) AS `AsText(PointN(g, 2))`,isclosed(`test`.`gis_line`.`g`) AS `IsClosed(g)` from `test`.`gis_line` -SELECT fid, AsText(Centroid(g)) FROM gis_polygon ORDER by fid; -fid AsText(Centroid(g)) -108 POINT(15 15) -109 POINT(25.416666666667 25.416666666667) -110 POINT(20 10) -SELECT fid, Area(g) FROM gis_polygon ORDER by fid; -fid Area(g) -108 100 -109 2400 -110 450 -SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon ORDER by fid; -fid AsText(ExteriorRing(g)) -108 LINESTRING(10 10,20 10,20 20,10 20,10 10) -109 LINESTRING(0 0,50 0,50 50,0 50,0 0) -110 LINESTRING(0 0,30 0,30 30,0 0) -SELECT fid, NumInteriorRings(g) FROM gis_polygon ORDER by fid; -fid NumInteriorRings(g) -108 0 -109 1 -110 0 -SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon ORDER by fid; -fid AsText(InteriorRingN(g, 1)) -108 NULL -109 LINESTRING(10 10,20 10,20 20,10 20,10 10) -110 NULL -explain extended select AsText(Centroid(g)),Area(g),AsText(ExteriorRing(g)),NumInteriorRings(g),AsText(InteriorRingN(g, 1)) FROM gis_polygon; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE gis_polygon ALL NULL NULL NULL NULL 3 100.00 -Warnings: -Note 1003 select astext(centroid(`test`.`gis_polygon`.`g`)) AS `AsText(Centroid(g))`,area(`test`.`gis_polygon`.`g`) AS `Area(g)`,astext(exteriorring(`test`.`gis_polygon`.`g`)) AS `AsText(ExteriorRing(g))`,numinteriorrings(`test`.`gis_polygon`.`g`) AS `NumInteriorRings(g)`,astext(interiorringn(`test`.`gis_polygon`.`g`,1)) AS `AsText(InteriorRingN(g, 1))` from `test`.`gis_polygon` -SELECT fid, IsClosed(g) FROM gis_multi_line ORDER by fid; -fid IsClosed(g) -114 0 -115 0 -116 0 -SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon ORDER by fid; -fid AsText(Centroid(g)) -117 POINT(55.588527753042 17.426536064114) -118 POINT(55.588527753042 17.426536064114) -119 POINT(2 2) -SELECT fid, Area(g) FROM gis_multi_polygon ORDER by fid; -fid Area(g) -117 1684.5 -118 1684.5 -119 4.5 -SELECT fid, NumGeometries(g) from gis_multi_point ORDER by fid; -fid NumGeometries(g) -111 4 -112 4 -113 2 -SELECT fid, NumGeometries(g) from gis_multi_line ORDER by fid; -fid NumGeometries(g) -114 2 -115 1 -116 2 -SELECT fid, NumGeometries(g) from gis_multi_polygon ORDER by fid; -fid NumGeometries(g) -117 2 -118 2 -119 1 -SELECT fid, NumGeometries(g) from gis_geometrycollection ORDER by fid; -fid NumGeometries(g) -120 2 -121 2 -explain extended SELECT fid, NumGeometries(g) from gis_multi_point; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00 -Warnings: -Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,numgeometries(`test`.`gis_multi_point`.`g`) AS `NumGeometries(g)` from `test`.`gis_multi_point` -SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point ORDER by fid; -fid AsText(GeometryN(g, 2)) -111 POINT(10 10) -112 POINT(11 11) -113 POINT(4 10) -SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line ORDER by fid; -fid AsText(GeometryN(g, 2)) -114 LINESTRING(16 0,16 23,16 48) -115 NULL -116 LINESTRING(2 5,5 8,21 7) -SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon ORDER by fid; -fid AsText(GeometryN(g, 2)) -117 POLYGON((59 18,67 18,67 13,59 13,59 18)) -118 POLYGON((59 18,67 18,67 13,59 13,59 18)) -119 NULL -SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection ORDER by fid; -fid AsText(GeometryN(g, 2)) -120 LINESTRING(0 0,10 10) -121 LINESTRING(3 6,7 9) -SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection ORDER by fid; -fid AsText(GeometryN(g, 1)) -120 POINT(0 0) -121 POINT(44 6) -explain extended SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00 -Warnings: -Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,astext(geometryn(`test`.`gis_multi_point`.`g`,2)) AS `AsText(GeometryN(g, 2))` from `test`.`gis_multi_point` -SELECT g1.fid as first, g2.fid as second, -Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o, -Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t, -Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r -FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second; -first second w c o e d t i r -120 120 1 1 0 1 0 0 1 0 -120 121 0 0 0 0 0 0 1 0 -121 120 0 0 1 0 0 0 1 0 -121 121 1 1 0 1 0 0 1 0 -explain extended SELECT g1.fid as first, g2.fid as second, -Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o, -Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t, -Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r -FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second; -id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE g1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort -1 SIMPLE g2 ALL NULL NULL NULL NULL 2 100.00 -Warnings: -Note 1003 select `test`.`g1`.`fid` AS `first`,`test`.`g2`.`fid` AS `second`,within(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `w`,contains(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `c`,overlaps(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `o`,equals(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `e`,disjoint(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `d`,touches(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `t`,intersects(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `i`,crosses(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `r` from `test`.`gis_geometrycollection` `g1` join `test`.`gis_geometrycollection` `g2` order by `test`.`g1`.`fid`,`test`.`g2`.`fid` -DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry; -CREATE TABLE t1 ( -a INTEGER PRIMARY KEY AUTO_INCREMENT, -gp point, -ln linestring, -pg polygon, -mp multipoint, -mln multilinestring, -mpg multipolygon, -gc geometrycollection, -gm geometry -); -SHOW FIELDS FROM t1; -Field Type Null Key Default Extra -a int(11) NO PRI NULL auto_increment -gp point YES NULL -ln linestring YES NULL -pg polygon YES NULL -mp multipoint YES NULL -mln multilinestring YES NULL -mpg multipolygon YES NULL -gc geometrycollection YES NULL -gm geometry YES NULL -ALTER TABLE t1 ADD fid INT; -SHOW FIELDS FROM t1; -Field Type Null Key Default Extra -a int(11) NO PRI NULL auto_increment -gp point YES NULL -ln linestring YES NULL -pg polygon YES NULL -mp multipoint YES NULL -mln multilinestring YES NULL -mpg multipolygon YES NULL -gc geometrycollection YES NULL -gm geometry YES NULL -fid int(11) YES NULL -DROP TABLE t1; -create table t1 (pk integer primary key auto_increment, a geometry not null); -insert into t1 (a) values (GeomFromText('Point(1 2)')); -insert into t1 (a) values ('Garbage'); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -insert IGNORE into t1 (a) values ('Garbage'); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -drop table t1; -create table t1 (pk integer primary key auto_increment, fl geometry); -insert into t1 (fl) values (1); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -insert into t1 (fl) values (1.11); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -insert into t1 (fl) values ("qwerty"); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -insert into t1 (fl) values (pointfromtext('point(1,1)')); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -drop table t1; diff --git a/mysql-test/r/binlog_row_binlog.result b/mysql-test/r/binlog_row_binlog.result index 17c1d171b5c..28f2284d3d2 100644 --- a/mysql-test/r/binlog_row_binlog.result +++ b/mysql-test/r/binlog_row_binlog.result @@ -1,6 +1,6 @@ drop table if exists t1, t2; reset master; -create table t1 (a int) engine=bdb; +create table t1 (a int) engine=innodb; create table t2 (a int) engine=innodb; begin; insert t1 values (5); @@ -10,12 +10,12 @@ insert t2 values (5); commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb +master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; COMMIT +master-bin.000001 # Xid 1 # COMMIT /* xid= */ master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t2) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F diff --git a/mysql-test/r/binlog_stm_binlog.result b/mysql-test/r/binlog_stm_binlog.result index 4e23db4828f..60735be3ac6 100644 --- a/mysql-test/r/binlog_stm_binlog.result +++ b/mysql-test/r/binlog_stm_binlog.result @@ -12,7 +12,7 @@ master-bin.000001 367 Xid 1 394 COMMIT /* XID */ drop table t1; drop table if exists t1, t2; reset master; -create table t1 (a int) engine=bdb; +create table t1 (a int) engine=innodb; create table t2 (a int) engine=innodb; begin; insert t1 values (5); @@ -22,11 +22,11 @@ insert t2 values (5); commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb +master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Query 1 # use `test`; insert t1 values (5) -master-bin.000001 # Query 1 # use `test`; COMMIT +master-bin.000001 # Xid 1 # COMMIT /* xid= */ master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Query 1 # use `test`; insert t2 values (5) master-bin.000001 # Xid 1 # COMMIT /* xid= */ diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index 3dcf88b8df5..2f82cb2aecb 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -479,7 +479,7 @@ drop table t1; create table t1 ( c char(10) character set utf8, unique key a (c(1)) -) engine=bdb; +) engine=innodb; insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); insert into t1 values ('aa'); ERROR 23000: Duplicate entry 'aa' for key 'a' @@ -637,7 +637,7 @@ drop table t1; create table t1 ( c char(10) character set utf8 collate utf8_bin, unique key a (c(1)) -) engine=bdb; +) engine=innodb; insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); insert into t1 values ('aa'); ERROR 23000: Duplicate entry 'aa' for key 'a' @@ -707,7 +707,7 @@ drop table t1; create table t1 ( str varchar(255) character set utf8 not null, key str (str(2)) -) engine=bdb; +) engine=innodb; INSERT INTO t1 VALUES ('str'); INSERT INTO t1 VALUES ('str2'); select * from t1 where str='str'; @@ -796,7 +796,7 @@ insert into t1 values(1,'foo'),(2,'foobar'); select * from t1 where b like 'foob%'; a b 2 foobar -alter table t1 engine=bdb; +alter table t1 engine=innodb; select * from t1 where b like 'foob%'; a b 2 foobar diff --git a/mysql-test/r/have_bdb.require b/mysql-test/r/have_bdb.require deleted file mode 100644 index 969cd6863db..00000000000 --- a/mysql-test/r/have_bdb.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_bdb YES diff --git a/mysql-test/r/im_options.result b/mysql-test/r/im_options.result index cb678581533..f35f226f665 100644 --- a/mysql-test/r/im_options.result +++ b/mysql-test/r/im_options.result @@ -43,7 +43,6 @@ character-sets-dir option_value basedir option_value skip-stack-trace option_value skip-innodb option_value -skip-bdb option_value skip-ndbcluster option_value nonguarded option_value log-output option_value @@ -64,7 +63,6 @@ character-sets-dir option_value basedir option_value skip-stack-trace option_value skip-innodb option_value -skip-bdb option_value skip-ndbcluster option_value nonguarded option_value log-output option_value diff --git a/mysql-test/r/im_utils.result b/mysql-test/r/im_utils.result index ee70d7950f8..bbd377a4af3 100644 --- a/mysql-test/r/im_utils.result +++ b/mysql-test/r/im_utils.result @@ -22,7 +22,6 @@ basedir VALUE server_id VALUE skip-stack-trace VALUE skip-innodb VALUE -skip-bdb VALUE skip-ndbcluster VALUE log-output VALUE SHOW INSTANCE OPTIONS mysqld2; @@ -41,7 +40,6 @@ basedir VALUE server_id VALUE skip-stack-trace VALUE skip-innodb VALUE -skip-bdb VALUE skip-ndbcluster VALUE nonguarded VALUE log-output VALUE diff --git a/mysql-test/r/index_merge_bdb.result b/mysql-test/r/index_merge_bdb.result deleted file mode 100644 index 3113bf95d3a..00000000000 --- a/mysql-test/r/index_merge_bdb.result +++ /dev/null @@ -1,136 +0,0 @@ -drop table if exists t1; -create table t1 ( -pk int primary key, -key1 int, -key2 int, -filler char(200), -filler2 char(200), -index(key1), -index(key2) -) engine=bdb; -select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); -pk key1 key2 filler filler2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -9 9 9 filler-data filler-data-2 -10 10 10 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -5 5 5 filler-data filler-data-2 -6 6 6 filler-data filler-data-2 -7 7 7 filler-data filler-data-2 -8 8 8 filler-data filler-data-2 -set @maxv=1000; -select * from t1 where -(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) -or key1=18 or key1=60; -pk key1 key2 filler filler2 -18 18 18 filler-data filler-data-2 -60 60 60 filler-data filler-data-2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -select * from t1 where -(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) -or key1 < 3 or key1 > @maxv-11; -pk key1 key2 filler filler2 -990 990 990 filler-data filler-data-2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -select * from t1 where -(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) -or -(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); -pk key1 key2 filler filler2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -select * from t1 where -(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) -or -(key1 < 5) or (key1 > @maxv-10); -pk key1 key2 filler filler2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -drop table t1; diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result index 8b0a5ea6a20..638c05dd712 100644 --- a/mysql-test/r/log_tables.result +++ b/mysql-test/r/log_tables.result @@ -29,13 +29,13 @@ on (mysql.general_log.command_type = join_test.command_type) drop table join_test; flush logs; lock tables mysql.general_log WRITE; -ERROR HY000: You can't write-lock a log table. Only read access is possible. +ERROR HY000: You can't write-lock a log table. Only read access is possible lock tables mysql.slow_log WRITE; -ERROR HY000: You can't write-lock a log table. Only read access is possible. +ERROR HY000: You can't write-lock a log table. Only read access is possible lock tables mysql.general_log READ; -ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. +ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead lock tables mysql.slow_log READ; -ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. +ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL; unlock tables; lock tables mysql.general_log READ LOCAL; @@ -161,13 +161,13 @@ TIMESTAMP USER_HOST THREAD_ID 1 Query set global slow_query_log='ON' TIMESTAMP USER_HOST THREAD_ID 1 Query select * from mysql.general_log flush logs; lock tables mysql.general_log WRITE; -ERROR HY000: You can't write-lock a log table. Only read access is possible. +ERROR HY000: You can't write-lock a log table. Only read access is possible lock tables mysql.slow_log WRITE; -ERROR HY000: You can't write-lock a log table. Only read access is possible. +ERROR HY000: You can't write-lock a log table. Only read access is possible lock tables mysql.general_log READ; -ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. +ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead lock tables mysql.slow_log READ; -ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. +ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL; unlock tables; set global general_log='OFF'; diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result index 8791b8cc080..3ed0222fcdb 100644 --- a/mysql-test/r/multi_update.result +++ b/mysql-test/r/multi_update.result @@ -492,7 +492,7 @@ create table t2 like t1; insert into t2 select * from t1; delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; drop table t1,t2; -create table t1 ( c char(8) not null ) engine=bdb; +create table t1 ( c char(8) not null ) engine=innodb; insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); alter table t1 add b char(8) not null; diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result index 197995505a1..668c26aad03 100644 --- a/mysql-test/r/ndb_lock.result +++ b/mysql-test/r/ndb_lock.result @@ -64,17 +64,26 @@ pk u o insert into t1 values (1,1,1); drop table t1; create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb; -insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3); +insert into t1 values (1,'one',1); begin; select * from t1 where x = 1 for update; x y z 1 one 1 begin; +select * from t1 where x = 1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +rollback; +insert into t1 values (2,'two',2),(3,"three",3); +begin; +select * from t1 where x = 1 for update; +x y z +1 one 1 +select * from t1 where x = 1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction select * from t1 where x = 2 for update; x y z 2 two 2 -select * from t1 where x = 1 for update; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction rollback; commit; begin; diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result deleted file mode 100644 index 94e6da6ccd1..00000000000 --- a/mysql-test/r/ps_6bdb.result +++ /dev/null @@ -1,3120 +0,0 @@ -use test; -drop table if exists t1, t9 ; -create table t1 -( -a int, b varchar(30), -primary key(a) -) engine = 'BDB' ; -create table t9 -( -c1 tinyint, c2 smallint, c3 mediumint, c4 int, -c5 integer, c6 bigint, c7 float, c8 double, -c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), -c13 date, c14 datetime, c15 timestamp, c16 time, -c17 year, c18 tinyint, c19 bool, c20 char, -c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, -c25 blob, c26 text, c27 mediumblob, c28 mediumtext, -c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), -c32 set('monday', 'tuesday', 'wednesday'), -primary key(c1) -) engine = 'BDB' ; -delete from t1 ; -insert into t1 values (1,'one'); -insert into t1 values (2,'two'); -insert into t1 values (3,'three'); -insert into t1 values (4,'four'); -commit ; -delete from t9 ; -insert into t9 -set c1= 1, c2= 1, c3= 1, c4= 1, c5= 1, c6= 1, c7= 1, c8= 1, c9= 1, -c10= 1, c11= 1, c12 = 1, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=true, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='one', c32= 'monday'; -insert into t9 -set c1= 9, c2= 9, c3= 9, c4= 9, c5= 9, c6= 9, c7= 9, c8= 9, c9= 9, -c10= 9, c11= 9, c12 = 9, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=false, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='two', c32= 'tuesday'; -commit ; -test_sequence ------- simple select tests ------ -prepare stmt1 from ' select * from t9 order by c1 ' ; -execute stmt1; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def test t9 t9 c1 c1 1 4 1 N 49155 0 63 -def test t9 t9 c2 c2 2 6 1 Y 32768 0 63 -def test t9 t9 c3 c3 9 9 1 Y 32768 0 63 -def test t9 t9 c4 c4 3 11 1 Y 32768 0 63 -def test t9 t9 c5 c5 3 11 1 Y 32768 0 63 -def test t9 t9 c6 c6 8 20 1 Y 32768 0 63 -def test t9 t9 c7 c7 4 12 1 Y 32768 31 63 -def test t9 t9 c8 c8 5 22 1 Y 32768 31 63 -def test t9 t9 c9 c9 5 22 1 Y 32768 31 63 -def test t9 t9 c10 c10 5 22 1 Y 32768 31 63 -def test t9 t9 c11 c11 246 9 6 Y 0 4 63 -def test t9 t9 c12 c12 246 10 6 Y 0 4 63 -def test t9 t9 c13 c13 10 10 10 Y 128 0 63 -def test t9 t9 c14 c14 12 19 19 Y 128 0 63 -def test t9 t9 c15 c15 7 19 19 N 1249 0 63 -def test t9 t9 c16 c16 11 8 8 Y 128 0 63 -def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 -def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 -def test t9 t9 c20 c20 254 1 1 Y 0 0 8 -def test t9 t9 c21 c21 254 10 10 Y 0 0 8 -def test t9 t9 c22 c22 253 30 30 Y 0 0 8 -def test t9 t9 c23 c23 252 255 8 Y 144 0 63 -def test t9 t9 c24 c24 252 255 8 Y 16 0 8 -def test t9 t9 c25 c25 252 65535 4 Y 144 0 63 -def test t9 t9 c26 c26 252 65535 4 Y 16 0 8 -def test t9 t9 c27 c27 252 16777215 10 Y 144 0 63 -def test t9 t9 c28 c28 252 16777215 10 Y 16 0 8 -def test t9 t9 c29 c29 252 4294967295 8 Y 144 0 63 -def test t9 t9 c30 c30 252 4294967295 8 Y 16 0 8 -def test t9 t9 c31 c31 254 5 3 Y 256 0 8 -def test t9 t9 c32 c32 254 24 7 Y 2048 0 8 -c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 c21 c22 c23 c24 c25 c26 c27 c28 c29 c30 c31 c32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -9 9 9 9 9 9 9 9 9 9 9.0000 9.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 0 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext two tuesday -set @arg00='SELECT' ; -@arg00 a from t1 where a=1; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '@arg00 a from t1 where a=1' at line 1 -prepare stmt1 from ' ? a from t1 where a=1 '; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '? a from t1 where a=1' at line 1 -set @arg00=1 ; -select @arg00, b from t1 where a=1 ; -@arg00 b -1 one -prepare stmt1 from ' select ?, b from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -? b -1 one -set @arg00='lion' ; -select @arg00, b from t1 where a=1 ; -@arg00 b -lion one -prepare stmt1 from ' select ?, b from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -? b -lion one -set @arg00=NULL ; -select @arg00, b from t1 where a=1 ; -@arg00 b -NULL one -prepare stmt1 from ' select ?, b from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -? b -NULL one -set @arg00=1 ; -select b, a - @arg00 from t1 where a=1 ; -b a - @arg00 -one 0 -prepare stmt1 from ' select b, a - ? from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -b a - ? -one 0 -set @arg00=null ; -select @arg00 as my_col ; -my_col -NULL -prepare stmt1 from ' select ? as my_col'; -execute stmt1 using @arg00 ; -my_col -NULL -select @arg00 + 1 as my_col ; -my_col -NULL -prepare stmt1 from ' select ? + 1 as my_col'; -execute stmt1 using @arg00 ; -my_col -NULL -select 1 + @arg00 as my_col ; -my_col -NULL -prepare stmt1 from ' select 1 + ? as my_col'; -execute stmt1 using @arg00 ; -my_col -NULL -set @arg00='MySQL' ; -select substr(@arg00,1,2) from t1 where a=1 ; -substr(@arg00,1,2) -My -prepare stmt1 from ' select substr(?,1,2) from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -substr(?,1,2) -My -set @arg00=3 ; -select substr('MySQL',@arg00,5) from t1 where a=1 ; -substr('MySQL',@arg00,5) -SQL -prepare stmt1 from ' select substr(''MySQL'',?,5) from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -substr('MySQL',?,5) -SQL -select substr('MySQL',1,@arg00) from t1 where a=1 ; -substr('MySQL',1,@arg00) -MyS -prepare stmt1 from ' select substr(''MySQL'',1,?) from t1 where a=1 ' ; -execute stmt1 using @arg00 ; -substr('MySQL',1,?) -MyS -set @arg00='MySQL' ; -select a , concat(@arg00,b) from t1 order by a; -a concat(@arg00,b) -1 MySQLone -2 MySQLtwo -3 MySQLthree -4 MySQLfour -prepare stmt1 from ' select a , concat(?,b) from t1 order by a ' ; -execute stmt1 using @arg00; -a concat(?,b) -1 MySQLone -2 MySQLtwo -3 MySQLthree -4 MySQLfour -select a , concat(b,@arg00) from t1 order by a ; -a concat(b,@arg00) -1 oneMySQL -2 twoMySQL -3 threeMySQL -4 fourMySQL -prepare stmt1 from ' select a , concat(b,?) from t1 order by a ' ; -execute stmt1 using @arg00; -a concat(b,?) -1 oneMySQL -2 twoMySQL -3 threeMySQL -4 fourMySQL -set @arg00='MySQL' ; -select group_concat(@arg00,b order by a) from t1 -group by 'a' ; -group_concat(@arg00,b order by a) -MySQLone,MySQLtwo,MySQLthree,MySQLfour -prepare stmt1 from ' select group_concat(?,b order by a) from t1 -group by ''a'' ' ; -execute stmt1 using @arg00; -group_concat(?,b order by a) -MySQLone,MySQLtwo,MySQLthree,MySQLfour -select group_concat(b,@arg00 order by a) from t1 -group by 'a' ; -group_concat(b,@arg00 order by a) -oneMySQL,twoMySQL,threeMySQL,fourMySQL -prepare stmt1 from ' select group_concat(b,? order by a) from t1 -group by ''a'' ' ; -execute stmt1 using @arg00; -group_concat(b,? order by a) -oneMySQL,twoMySQL,threeMySQL,fourMySQL -set @arg00='first' ; -set @arg01='second' ; -set @arg02=NULL; -select @arg00, @arg01 from t1 where a=1 ; -@arg00 @arg01 -first second -prepare stmt1 from ' select ?, ? from t1 where a=1 ' ; -execute stmt1 using @arg00, @arg01 ; -? ? -first second -execute stmt1 using @arg02, @arg01 ; -? ? -NULL second -execute stmt1 using @arg00, @arg02 ; -? ? -first NULL -execute stmt1 using @arg02, @arg02 ; -? ? -NULL NULL -drop table if exists t5 ; -create table t5 (id1 int(11) not null default '0', -value2 varchar(100), value1 varchar(100)) ; -insert into t5 values (1,'hh','hh'),(2,'hh','hh'), -(1,'ii','ii'),(2,'ii','ii') ; -prepare stmt1 from ' select id1,value1 from t5 where id1=? or value1=? order by id1,value1 ' ; -set @arg00=1 ; -set @arg01='hh' ; -execute stmt1 using @arg00, @arg01 ; -id1 value1 -1 hh -1 ii -2 hh -drop table t5 ; -drop table if exists t5 ; -create table t5(session_id char(9) not null) ; -insert into t5 values ('abc') ; -prepare stmt1 from ' select * from t5 -where ?=''1111'' and session_id = ''abc'' ' ; -set @arg00='abc' ; -execute stmt1 using @arg00 ; -session_id -set @arg00='1111' ; -execute stmt1 using @arg00 ; -session_id -abc -set @arg00='abc' ; -execute stmt1 using @arg00 ; -session_id -drop table t5 ; -set @arg00='FROM' ; -select a @arg00 t1 where a=1 ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '@arg00 t1 where a=1' at line 1 -prepare stmt1 from ' select a ? t1 where a=1 ' ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '? t1 where a=1' at line 1 -set @arg00='t1' ; -select a from @arg00 where a=1 ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '@arg00 where a=1' at line 1 -prepare stmt1 from ' select a from ? where a=1 ' ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '? where a=1' at line 1 -set @arg00='WHERE' ; -select a from t1 @arg00 a=1 ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '@arg00 a=1' at line 1 -prepare stmt1 from ' select a from t1 ? a=1 ' ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '? a=1' at line 1 -set @arg00=1 ; -select a FROM t1 where a=@arg00 ; -a -1 -prepare stmt1 from ' select a FROM t1 where a=? ' ; -execute stmt1 using @arg00 ; -a -1 -set @arg00=1000 ; -execute stmt1 using @arg00 ; -a -set @arg00=NULL ; -select a FROM t1 where a=@arg00 ; -a -prepare stmt1 from ' select a FROM t1 where a=? ' ; -execute stmt1 using @arg00 ; -a -set @arg00=4 ; -select a FROM t1 where a=sqrt(@arg00) ; -a -2 -prepare stmt1 from ' select a FROM t1 where a=sqrt(?) ' ; -execute stmt1 using @arg00 ; -a -2 -set @arg00=NULL ; -select a FROM t1 where a=sqrt(@arg00) ; -a -prepare stmt1 from ' select a FROM t1 where a=sqrt(?) ' ; -execute stmt1 using @arg00 ; -a -set @arg00=2 ; -set @arg01=3 ; -select a FROM t1 where a in (@arg00,@arg01) order by a; -a -2 -3 -prepare stmt1 from ' select a FROM t1 where a in (?,?) order by a '; -execute stmt1 using @arg00, @arg01; -a -2 -3 -set @arg00= 'one' ; -set @arg01= 'two' ; -set @arg02= 'five' ; -prepare stmt1 from ' select b FROM t1 where b in (?,?,?) order by b ' ; -execute stmt1 using @arg00, @arg01, @arg02 ; -b -one -two -prepare stmt1 from ' select b FROM t1 where b like ? '; -set @arg00='two' ; -execute stmt1 using @arg00 ; -b -two -set @arg00='tw%' ; -execute stmt1 using @arg00 ; -b -two -set @arg00='%wo' ; -execute stmt1 using @arg00 ; -b -two -set @arg00=null ; -insert into t9 set c1= 0, c5 = NULL ; -select c5 from t9 where c5 > NULL ; -c5 -prepare stmt1 from ' select c5 from t9 where c5 > ? '; -execute stmt1 using @arg00 ; -c5 -select c5 from t9 where c5 < NULL ; -c5 -prepare stmt1 from ' select c5 from t9 where c5 < ? '; -execute stmt1 using @arg00 ; -c5 -select c5 from t9 where c5 = NULL ; -c5 -prepare stmt1 from ' select c5 from t9 where c5 = ? '; -execute stmt1 using @arg00 ; -c5 -select c5 from t9 where c5 <=> NULL ; -c5 -NULL -prepare stmt1 from ' select c5 from t9 where c5 <=> ? '; -execute stmt1 using @arg00 ; -c5 -NULL -delete from t9 where c1= 0 ; -set @arg00='>' ; -select a FROM t1 where a @arg00 1 ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '@arg00 1' at line 1 -prepare stmt1 from ' select a FROM t1 where a ? 1 ' ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '? 1' at line 1 -set @arg00=1 ; -select a,b FROM t1 where a is not NULL -AND b is not NULL group by a - @arg00 ; -a b -1 one -2 two -3 three -4 four -prepare stmt1 from ' select a,b FROM t1 where a is not NULL -AND b is not NULL group by a - ? ' ; -execute stmt1 using @arg00 ; -a b -1 one -2 two -3 three -4 four -set @arg00='two' ; -select a,b FROM t1 where a is not NULL -AND b is not NULL having b <> @arg00 order by a ; -a b -1 one -3 three -4 four -prepare stmt1 from ' select a,b FROM t1 where a is not NULL -AND b is not NULL having b <> ? order by a ' ; -execute stmt1 using @arg00 ; -a b -1 one -3 three -4 four -set @arg00=1 ; -select a,b FROM t1 where a is not NULL -AND b is not NULL order by a - @arg00 ; -a b -1 one -2 two -3 three -4 four -prepare stmt1 from ' select a,b FROM t1 where a is not NULL -AND b is not NULL order by a - ? ' ; -execute stmt1 using @arg00 ; -a b -1 one -2 two -3 three -4 four -set @arg00=2 ; -select a,b from t1 order by 2 ; -a b -4 four -1 one -3 three -2 two -prepare stmt1 from ' select a,b from t1 -order by ? '; -execute stmt1 using @arg00; -a b -4 four -1 one -3 three -2 two -set @arg00=1 ; -execute stmt1 using @arg00; -a b -1 one -2 two -3 three -4 four -set @arg00=0 ; -execute stmt1 using @arg00; -ERROR 42S22: Unknown column '?' in 'order clause' -set @arg00=1; -prepare stmt1 from ' select a,b from t1 order by a -limit 1 '; -execute stmt1 ; -a b -1 one -prepare stmt1 from ' select a,b from t1 order by a limit ? '; -execute stmt1 using @arg00; -a b -1 one -set @arg00='b' ; -set @arg01=0 ; -set @arg02=2 ; -set @arg03=2 ; -select sum(a), @arg00 from t1 where a > @arg01 -and b is not null group by substr(b,@arg02) -having sum(a) <> @arg03 ; -sum(a) @arg00 -3 b -1 b -4 b -prepare stmt1 from ' select sum(a), ? from t1 where a > ? -and b is not null group by substr(b,?) -having sum(a) <> ? '; -execute stmt1 using @arg00, @arg01, @arg02, @arg03; -sum(a) ? -3 b -1 b -4 b -test_sequence ------- join tests ------ -select first.a as a1, second.a as a2 -from t1 first, t1 second -where first.a = second.a order by a1 ; -a1 a2 -1 1 -2 2 -3 3 -4 4 -prepare stmt1 from ' select first.a as a1, second.a as a2 - from t1 first, t1 second - where first.a = second.a order by a1 '; -execute stmt1 ; -a1 a2 -1 1 -2 2 -3 3 -4 4 -set @arg00='ABC'; -set @arg01='two'; -set @arg02='one'; -select first.a, @arg00, second.a FROM t1 first, t1 second -where @arg01 = first.b or first.a = second.a or second.b = @arg02 -order by second.a, first.a; -a @arg00 a -1 ABC 1 -2 ABC 1 -3 ABC 1 -4 ABC 1 -2 ABC 2 -2 ABC 3 -3 ABC 3 -2 ABC 4 -4 ABC 4 -prepare stmt1 from ' select first.a, ?, second.a FROM t1 first, t1 second - where ? = first.b or first.a = second.a or second.b = ? - order by second.a, first.a'; -execute stmt1 using @arg00, @arg01, @arg02; -a ? a -1 ABC 1 -2 ABC 1 -3 ABC 1 -4 ABC 1 -2 ABC 2 -2 ABC 3 -3 ABC 3 -2 ABC 4 -4 ABC 4 -drop table if exists t2 ; -create table t2 as select * from t1 ; -set @query1= 'SELECT * FROM t2 join t1 on (t1.a=t2.a) order by t2.a ' ; -set @query2= 'SELECT * FROM t2 natural join t1 order by t2.a ' ; -set @query3= 'SELECT * FROM t2 join t1 using(a) order by t2.a ' ; -set @query4= 'SELECT * FROM t2 left join t1 on(t1.a=t2.a) order by t2.a ' ; -set @query5= 'SELECT * FROM t2 natural left join t1 order by t2.a ' ; -set @query6= 'SELECT * FROM t2 left join t1 using(a) order by t2.a ' ; -set @query7= 'SELECT * FROM t2 right join t1 on(t1.a=t2.a) order by t2.a ' ; -set @query8= 'SELECT * FROM t2 natural right join t1 order by t2.a ' ; -set @query9= 'SELECT * FROM t2 right join t1 using(a) order by t2.a ' ; -the join statement is: -SELECT * FROM t2 right join t1 using(a) order by t2.a -prepare stmt1 from @query9 ; -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -the join statement is: -SELECT * FROM t2 natural right join t1 order by t2.a -prepare stmt1 from @query8 ; -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -the join statement is: -SELECT * FROM t2 right join t1 on(t1.a=t2.a) order by t2.a -prepare stmt1 from @query7 ; -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -the join statement is: -SELECT * FROM t2 left join t1 using(a) order by t2.a -prepare stmt1 from @query6 ; -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -the join statement is: -SELECT * FROM t2 natural left join t1 order by t2.a -prepare stmt1 from @query5 ; -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -the join statement is: -SELECT * FROM t2 left join t1 on(t1.a=t2.a) order by t2.a -prepare stmt1 from @query4 ; -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -the join statement is: -SELECT * FROM t2 join t1 using(a) order by t2.a -prepare stmt1 from @query3 ; -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -execute stmt1 ; -a b b -1 one one -2 two two -3 three three -4 four four -the join statement is: -SELECT * FROM t2 natural join t1 order by t2.a -prepare stmt1 from @query2 ; -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -the join statement is: -SELECT * FROM t2 join t1 on (t1.a=t2.a) order by t2.a -prepare stmt1 from @query1 ; -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -execute stmt1 ; -a b a b -1 one 1 one -2 two 2 two -3 three 3 three -4 four 4 four -drop table t2 ; -test_sequence ------- subquery tests ------ -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where b = ''two'') '; -execute stmt1 ; -a b -2 two -set @arg00='two' ; -select a, b FROM t1 outer_table where -a = (select a from t1 where b = 'two' ) and b=@arg00 ; -a b -2 two -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where b = ''two'') and b=? '; -execute stmt1 using @arg00; -a b -2 two -set @arg00='two' ; -select a, b FROM t1 outer_table where -a = (select a from t1 where b = @arg00 ) and b='two' ; -a b -2 two -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where b = ? ) and b=''two'' ' ; -execute stmt1 using @arg00; -a b -2 two -set @arg00=3 ; -set @arg01='three' ; -select a,b FROM t1 where (a,b) in (select 3, 'three'); -a b -3 three -select a FROM t1 where (a,b) in (select @arg00,@arg01); -a -3 -prepare stmt1 from ' select a FROM t1 where (a,b) in (select ?, ?) '; -execute stmt1 using @arg00, @arg01; -a -3 -set @arg00=1 ; -set @arg01='two' ; -set @arg02=2 ; -set @arg03='two' ; -select a, @arg00, b FROM t1 outer_table where -b=@arg01 and a = (select @arg02 from t1 where b = @arg03 ) ; -a @arg00 b -2 1 two -prepare stmt1 from ' select a, ?, b FROM t1 outer_table where - b=? and a = (select ? from t1 where b = ? ) ' ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03 ; -a ? b -2 1 two -prepare stmt1 from 'select c4 FROM t9 where - c13 = (select MAX(b) from t1 where a = ?) and c22 = ? ' ; -execute stmt1 using @arg01, @arg02; -c4 -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where b = outer_table.b ) order by a '; -execute stmt1 ; -a b -1 one -2 two -3 three -4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; -execute stmt1 ; -ccc -1 -deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; -execute stmt1 ; -ccc -1 -deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; -execute stmt1 ; -ccc -1 -deallocate prepare stmt1 ; -set @arg00='two' ; -select a, b FROM t1 outer_table where -a = (select a from t1 where b = outer_table.b ) and b=@arg00 ; -a b -2 two -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where b = outer_table.b) and b=? '; -execute stmt1 using @arg00; -a b -2 two -set @arg00=2 ; -select a, b FROM t1 outer_table where -a = (select a from t1 where a = @arg00 and b = outer_table.b) and b='two' ; -a b -2 two -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where a = ? and b = outer_table.b) and b=''two'' ' ; -execute stmt1 using @arg00; -a b -2 two -set @arg00=2 ; -select a, b FROM t1 outer_table where -a = (select a from t1 where outer_table.a = @arg00 and a=2) and b='two' ; -a b -2 two -prepare stmt1 from ' select a, b FROM t1 outer_table where - a = (select a from t1 where outer_table.a = ? and a=2) and b=''two'' ' ; -execute stmt1 using @arg00; -a b -2 two -set @arg00=1 ; -set @arg01='two' ; -set @arg02=2 ; -set @arg03='two' ; -select a, @arg00, b FROM t1 outer_table where -b=@arg01 and a = (select @arg02 from t1 where outer_table.b = @arg03 -and outer_table.a=a ) ; -a @arg00 b -2 1 two -prepare stmt1 from ' select a, ?, b FROM t1 outer_table where - b=? and a = (select ? from t1 where outer_table.b = ? - and outer_table.a=a ) ' ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03 ; -a ? b -2 1 two -set @arg00=1 ; -set @arg01=0 ; -select a, @arg00 -from ( select a - @arg00 as a from t1 where a=@arg00 ) as t2 -where a=@arg01; -a @arg00 -0 1 -prepare stmt1 from ' select a, ? - from ( select a - ? as a from t1 where a=? ) as t2 - where a=? '; -execute stmt1 using @arg00, @arg00, @arg00, @arg01 ; -a ? -0 1 -drop table if exists t2 ; -create table t2 as select * from t1; -prepare stmt1 from ' select a in (select a from t2) from t1 ' ; -execute stmt1 ; -a in (select a from t2) -1 -1 -1 -1 -drop table if exists t5, t6, t7 ; -create table t5 (a int , b int) ; -create table t6 like t5 ; -create table t7 like t5 ; -insert into t5 values (0, 100), (1, 2), (1, 3), (2, 2), (2, 7), -(2, -1), (3, 10) ; -insert into t6 values (0, 0), (1, 1), (2, 1), (3, 1), (4, 1) ; -insert into t7 values (3, 3), (2, 2), (1, 1) ; -prepare stmt1 from ' select a, (select count(distinct t5.b) as sum from t5, t6 - where t5.a=t6.a and t6.b > 0 and t5.a <= t7.b - group by t5.a order by sum limit 1) from t7 ' ; -execute stmt1 ; -a (select count(distinct t5.b) as sum from t5, t6 - where t5.a=t6.a and t6.b > 0 and t5.a <= t7.b - group by t5.a order by sum limit 1) -3 1 -2 2 -1 2 -execute stmt1 ; -a (select count(distinct t5.b) as sum from t5, t6 - where t5.a=t6.a and t6.b > 0 and t5.a <= t7.b - group by t5.a order by sum limit 1) -3 1 -2 2 -1 2 -execute stmt1 ; -a (select count(distinct t5.b) as sum from t5, t6 - where t5.a=t6.a and t6.b > 0 and t5.a <= t7.b - group by t5.a order by sum limit 1) -3 1 -2 2 -1 2 -drop table t5, t6, t7 ; -drop table if exists t2 ; -create table t2 as select * from t9; -set @stmt= ' SELECT - (SELECT SUM(c1 + c12 + 0.0) FROM t2 - where (t9.c2 - 0e-3) = t2.c2 - GROUP BY t9.c15 LIMIT 1) as scalar_s, - exists (select 1.0e+0 from t2 - where t2.c3 * 9.0000000000 = t9.c4) as exists_s, - c5 * 4 in (select c6 + 0.3e+1 from t2) as in_s, - (c7 - 4, c8 - 4) in (select c9 + 4.0, c10 + 40e-1 from t2) as in_row_s -FROM t9, -(select c25 x, c32 y from t2) tt WHERE x = c25 ' ; -prepare stmt1 from @stmt ; -execute stmt1 ; -execute stmt1 ; -set @stmt= concat('explain ',@stmt); -prepare stmt1 from @stmt ; -execute stmt1 ; -execute stmt1 ; -set @stmt= ' SELECT - (SELECT SUM(c1+c12+?) FROM t2 where (t9.c2-?)=t2.c2 - GROUP BY t9.c15 LIMIT 1) as scalar_s, - exists (select ? from t2 - where t2.c3*?=t9.c4) as exists_s, - c5*? in (select c6+? from t2) as in_s, - (c7-?, c8-?) in (select c9+?, c10+? from t2) as in_row_s -FROM t9, -(select c25 x, c32 y from t2) tt WHERE x =c25 ' ; -set @arg00= 0.0 ; -set @arg01= 0e-3 ; -set @arg02= 1.0e+0 ; -set @arg03= 9.0000000000 ; -set @arg04= 4 ; -set @arg05= 0.3e+1 ; -set @arg06= 4 ; -set @arg07= 4 ; -set @arg08= 4.0 ; -set @arg09= 40e-1 ; -prepare stmt1 from @stmt ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, -@arg07, @arg08, @arg09 ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, -@arg07, @arg08, @arg09 ; -set @stmt= concat('explain ',@stmt); -prepare stmt1 from @stmt ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, -@arg07, @arg08, @arg09 ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, -@arg07, @arg08, @arg09 ; -drop table t2 ; -select 1 < (select a from t1) ; -ERROR 21000: Subquery returns more than 1 row -prepare stmt1 from ' select 1 < (select a from t1) ' ; -execute stmt1 ; -ERROR 21000: Subquery returns more than 1 row -select 1 as my_col ; -my_col -1 -test_sequence ------- union tests ------ -prepare stmt1 from ' select a FROM t1 where a=1 - union distinct - select a FROM t1 where a=1 '; -execute stmt1 ; -a -1 -execute stmt1 ; -a -1 -prepare stmt1 from ' select a FROM t1 where a=1 - union all - select a FROM t1 where a=1 '; -execute stmt1 ; -a -1 -1 -prepare stmt1 from ' SELECT 1, 2 union SELECT 1 ' ; -ERROR 21000: The used SELECT statements have a different number of columns -prepare stmt1 from ' SELECT 1 union SELECT 1, 2 ' ; -ERROR 21000: The used SELECT statements have a different number of columns -prepare stmt1 from ' SELECT * from t1 union SELECT 1 ' ; -ERROR 21000: The used SELECT statements have a different number of columns -prepare stmt1 from ' SELECT 1 union SELECT * from t1 ' ; -ERROR 21000: The used SELECT statements have a different number of columns -set @arg00=1 ; -select @arg00 FROM t1 where a=1 -union distinct -select 1 FROM t1 where a=1; -@arg00 -1 -prepare stmt1 from ' select ? FROM t1 where a=1 - union distinct - select 1 FROM t1 where a=1 ' ; -execute stmt1 using @arg00; -? -1 -set @arg00=1 ; -select 1 FROM t1 where a=1 -union distinct -select @arg00 FROM t1 where a=1; -1 -1 -prepare stmt1 from ' select 1 FROM t1 where a=1 - union distinct - select ? FROM t1 where a=1 ' ; -execute stmt1 using @arg00; -1 -1 -set @arg00='a' ; -select @arg00 FROM t1 where a=1 -union distinct -select @arg00 FROM t1 where a=1; -@arg00 -a -prepare stmt1 from ' select ? FROM t1 where a=1 - union distinct - select ? FROM t1 where a=1 '; -execute stmt1 using @arg00, @arg00; -? -a -prepare stmt1 from ' select ? - union distinct - select ? '; -execute stmt1 using @arg00, @arg00; -? -a -set @arg00='a' ; -set @arg01=1 ; -set @arg02='a' ; -set @arg03=2 ; -select @arg00 FROM t1 where a=@arg01 -union distinct -select @arg02 FROM t1 where a=@arg03; -@arg00 -a -prepare stmt1 from ' select ? FROM t1 where a=? - union distinct - select ? FROM t1 where a=? ' ; -execute stmt1 using @arg00, @arg01, @arg02, @arg03; -? -a -set @arg00=1 ; -prepare stmt1 from ' select sum(a) + 200, ? from t1 -union distinct -select sum(a) + 200, 1 from t1 -group by b ' ; -execute stmt1 using @arg00; -sum(a) + 200 ? -210 1 -204 1 -201 1 -203 1 -202 1 -set @Oporto='Oporto' ; -set @Lisboa='Lisboa' ; -set @0=0 ; -set @1=1 ; -set @2=2 ; -set @3=3 ; -set @4=4 ; -select @Oporto,@Lisboa,@0,@1,@2,@3,@4 ; -@Oporto @Lisboa @0 @1 @2 @3 @4 -Oporto Lisboa 0 1 2 3 4 -select sum(a) + 200 as the_sum, @Oporto as the_town from t1 -group by b -union distinct -select sum(a) + 200, @Lisboa from t1 -group by b ; -the_sum the_town -204 Oporto -201 Oporto -203 Oporto -202 Oporto -204 Lisboa -201 Lisboa -203 Lisboa -202 Lisboa -prepare stmt1 from ' select sum(a) + 200 as the_sum, ? as the_town from t1 - group by b - union distinct - select sum(a) + 200, ? from t1 - group by b ' ; -execute stmt1 using @Oporto, @Lisboa; -the_sum the_town -204 Oporto -201 Oporto -203 Oporto -202 Oporto -204 Lisboa -201 Lisboa -203 Lisboa -202 Lisboa -select sum(a) + 200 as the_sum, @Oporto as the_town from t1 -where a > @1 -group by b -union distinct -select sum(a) + 200, @Lisboa from t1 -where a > @2 -group by b ; -the_sum the_town -204 Oporto -203 Oporto -202 Oporto -204 Lisboa -203 Lisboa -prepare stmt1 from ' select sum(a) + 200 as the_sum, ? as the_town from t1 - where a > ? - group by b - union distinct - select sum(a) + 200, ? from t1 - where a > ? - group by b ' ; -execute stmt1 using @Oporto, @1, @Lisboa, @2; -the_sum the_town -204 Oporto -203 Oporto -202 Oporto -204 Lisboa -203 Lisboa -select sum(a) + 200 as the_sum, @Oporto as the_town from t1 -where a > @1 -group by b -having avg(a) > @2 -union distinct -select sum(a) + 200, @Lisboa from t1 -where a > @2 -group by b -having avg(a) > @3; -the_sum the_town -204 Oporto -203 Oporto -204 Lisboa -prepare stmt1 from ' select sum(a) + 200 as the_sum, ? as the_town from t1 - where a > ? - group by b - having avg(a) > ? - union distinct - select sum(a) + 200, ? from t1 - where a > ? - group by b - having avg(a) > ? '; -execute stmt1 using @Oporto, @1, @2, @Lisboa, @2, @3; -the_sum the_town -204 Oporto -203 Oporto -204 Lisboa -test_sequence ------- explain select tests ------ -prepare stmt1 from ' explain select * from t9 ' ; -execute stmt1; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def id 8 3 1 N 32929 0 63 -def select_type 253 19 6 N 1 31 8 -def table 253 64 2 Y 0 31 8 -def type 253 10 3 Y 0 31 8 -def possible_keys 253 4096 0 Y 0 31 8 -def key 253 64 0 Y 0 31 8 -def key_len 253 4096 0 Y 128 31 63 -def ref 253 1024 0 Y 0 31 8 -def rows 8 10 1 Y 32928 0 63 -def Extra 253 255 0 N 1 31 8 -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t9 ALL NULL NULL NULL NULL 3 -test_sequence ------- delete tests ------ -delete from t1 ; -insert into t1 values (1,'one'); -insert into t1 values (2,'two'); -insert into t1 values (3,'three'); -insert into t1 values (4,'four'); -commit ; -delete from t9 ; -insert into t9 -set c1= 1, c2= 1, c3= 1, c4= 1, c5= 1, c6= 1, c7= 1, c8= 1, c9= 1, -c10= 1, c11= 1, c12 = 1, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=true, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='one', c32= 'monday'; -insert into t9 -set c1= 9, c2= 9, c3= 9, c4= 9, c5= 9, c6= 9, c7= 9, c8= 9, c9= 9, -c10= 9, c11= 9, c12 = 9, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=false, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='two', c32= 'tuesday'; -commit ; -prepare stmt1 from 'delete from t1 where a=2' ; -execute stmt1; -select a,b from t1 where a=2; -a b -execute stmt1; -insert into t1 values(0,NULL); -set @arg00=NULL; -prepare stmt1 from 'delete from t1 where b=?' ; -execute stmt1 using @arg00; -select a,b from t1 where b is NULL ; -a b -0 NULL -set @arg00='one'; -execute stmt1 using @arg00; -select a,b from t1 where b=@arg00; -a b -prepare stmt1 from 'truncate table t1' ; -test_sequence ------- update tests ------ -delete from t1 ; -insert into t1 values (1,'one'); -insert into t1 values (2,'two'); -insert into t1 values (3,'three'); -insert into t1 values (4,'four'); -commit ; -delete from t9 ; -insert into t9 -set c1= 1, c2= 1, c3= 1, c4= 1, c5= 1, c6= 1, c7= 1, c8= 1, c9= 1, -c10= 1, c11= 1, c12 = 1, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=true, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='one', c32= 'monday'; -insert into t9 -set c1= 9, c2= 9, c3= 9, c4= 9, c5= 9, c6= 9, c7= 9, c8= 9, c9= 9, -c10= 9, c11= 9, c12 = 9, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=false, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='two', c32= 'tuesday'; -commit ; -prepare stmt1 from 'update t1 set b=''a=two'' where a=2' ; -execute stmt1; -select a,b from t1 where a=2; -a b -2 a=two -execute stmt1; -select a,b from t1 where a=2; -a b -2 a=two -set @arg00=NULL; -prepare stmt1 from 'update t1 set b=? where a=2' ; -execute stmt1 using @arg00; -select a,b from t1 where a=2; -a b -2 NULL -set @arg00='two'; -execute stmt1 using @arg00; -select a,b from t1 where a=2; -a b -2 two -set @arg00=2; -prepare stmt1 from 'update t1 set b=NULL where a=?' ; -execute stmt1 using @arg00; -select a,b from t1 where a=@arg00; -a b -2 NULL -update t1 set b='two' where a=@arg00; -set @arg00=2000; -execute stmt1 using @arg00; -select a,b from t1 where a=@arg00; -a b -set @arg00=2; -set @arg01=22; -prepare stmt1 from 'update t1 set a=? where a=?' ; -execute stmt1 using @arg00, @arg00; -select a,b from t1 where a=@arg00; -a b -2 two -execute stmt1 using @arg01, @arg00; -select a,b from t1 where a=@arg01; -a b -22 two -execute stmt1 using @arg00, @arg01; -select a,b from t1 where a=@arg00; -a b -2 two -set @arg00=NULL; -set @arg01=2; -execute stmt1 using @arg00, @arg01; -Warnings: -Warning 1048 Column 'a' cannot be null -select a,b from t1 order by a; -a b -0 two -1 one -3 three -4 four -set @arg00=0; -execute stmt1 using @arg01, @arg00; -select a,b from t1 order by a; -a b -1 one -2 two -3 three -4 four -set @arg00=23; -set @arg01='two'; -set @arg02=2; -set @arg03='two'; -set @arg04=2; -drop table if exists t2; -create table t2 as select a,b from t1 ; -prepare stmt1 from 'update t1 set a=? where b=? - and a in (select ? from t2 - where b = ? or a = ?)'; -execute stmt1 using @arg00, @arg01, @arg02, @arg03, @arg04 ; -affected rows: 1 -info: Rows matched: 1 Changed: 1 Warnings: 0 -select a,b from t1 where a = @arg00 ; -a b -23 two -prepare stmt1 from 'update t1 set a=? where b=? - and a not in (select ? from t2 - where b = ? or a = ?)'; -execute stmt1 using @arg04, @arg01, @arg02, @arg03, @arg00 ; -affected rows: 1 -info: Rows matched: 1 Changed: 1 Warnings: 0 -select a,b from t1 order by a ; -a b -1 one -2 two -3 three -4 four -drop table t2 ; -create table t2 -( -a int, b varchar(30), -primary key(a) -) engine = 'BDB' ; -insert into t2(a,b) select a, b from t1 ; -prepare stmt1 from 'update t1 set a=? where b=? - and a in (select ? from t2 - where b = ? or a = ?)'; -execute stmt1 using @arg00, @arg01, @arg02, @arg03, @arg04 ; -affected rows: 1 -info: Rows matched: 1 Changed: 1 Warnings: 0 -select a,b from t1 where a = @arg00 ; -a b -23 two -prepare stmt1 from 'update t1 set a=? where b=? - and a not in (select ? from t2 - where b = ? or a = ?)'; -execute stmt1 using @arg04, @arg01, @arg02, @arg03, @arg00 ; -affected rows: 1 -info: Rows matched: 1 Changed: 1 Warnings: 0 -select a,b from t1 order by a ; -a b -1 one -2 two -3 three -4 four -drop table t2 ; -set @arg00=1; -prepare stmt1 from 'update t1 set b=''bla'' -where a=2 -limit 1'; -execute stmt1 ; -select a,b from t1 where b = 'bla' ; -a b -2 bla -prepare stmt1 from 'update t1 set b=''bla'' where a=2 limit ?'; -execute stmt1 using @arg00; -test_sequence ------- insert tests ------ -delete from t1 ; -insert into t1 values (1,'one'); -insert into t1 values (2,'two'); -insert into t1 values (3,'three'); -insert into t1 values (4,'four'); -commit ; -delete from t9 ; -insert into t9 -set c1= 1, c2= 1, c3= 1, c4= 1, c5= 1, c6= 1, c7= 1, c8= 1, c9= 1, -c10= 1, c11= 1, c12 = 1, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=true, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='one', c32= 'monday'; -insert into t9 -set c1= 9, c2= 9, c3= 9, c4= 9, c5= 9, c6= 9, c7= 9, c8= 9, c9= 9, -c10= 9, c11= 9, c12 = 9, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=false, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='two', c32= 'tuesday'; -commit ; -prepare stmt1 from 'insert into t1 values(5, ''five'' )'; -execute stmt1; -select a,b from t1 where a = 5; -a b -5 five -set @arg00='six' ; -prepare stmt1 from 'insert into t1 values(6, ? )'; -execute stmt1 using @arg00; -select a,b from t1 where b = @arg00; -a b -6 six -execute stmt1 using @arg00; -ERROR 23000: Duplicate entry '6' for key 'PRIMARY' -set @arg00=NULL ; -prepare stmt1 from 'insert into t1 values(0, ? )'; -execute stmt1 using @arg00; -select a,b from t1 where b is NULL; -a b -0 NULL -set @arg00=8 ; -set @arg01='eight' ; -prepare stmt1 from 'insert into t1 values(?, ? )'; -execute stmt1 using @arg00, @arg01 ; -select a,b from t1 where b = @arg01; -a b -8 eight -set @NULL= null ; -set @arg00= 'abc' ; -execute stmt1 using @NULL, @NULL ; -ERROR 23000: Column 'a' cannot be null -execute stmt1 using @NULL, @NULL ; -ERROR 23000: Column 'a' cannot be null -execute stmt1 using @NULL, @arg00 ; -ERROR 23000: Column 'a' cannot be null -execute stmt1 using @NULL, @arg00 ; -ERROR 23000: Column 'a' cannot be null -set @arg01= 10000 + 2 ; -execute stmt1 using @arg01, @arg00 ; -set @arg01= 10000 + 1 ; -execute stmt1 using @arg01, @arg00 ; -select * from t1 where a > 10000 order by a ; -a b -10001 abc -10002 abc -delete from t1 where a > 10000 ; -set @arg01= 10000 + 2 ; -execute stmt1 using @arg01, @NULL ; -set @arg01= 10000 + 1 ; -execute stmt1 using @arg01, @NULL ; -select * from t1 where a > 10000 order by a ; -a b -10001 NULL -10002 NULL -delete from t1 where a > 10000 ; -set @arg01= 10000 + 10 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 9 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 8 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 7 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 6 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 5 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 4 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 3 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 2 ; -execute stmt1 using @arg01, @arg01 ; -set @arg01= 10000 + 1 ; -execute stmt1 using @arg01, @arg01 ; -select * from t1 where a > 10000 order by a ; -a b -10001 10001 -10002 10002 -10003 10003 -10004 10004 -10005 10005 -10006 10006 -10007 10007 -10008 10008 -10009 10009 -10010 10010 -delete from t1 where a > 10000 ; -set @arg00=81 ; -set @arg01='8-1' ; -set @arg02=82 ; -set @arg03='8-2' ; -prepare stmt1 from 'insert into t1 values(?,?),(?,?)'; -execute stmt1 using @arg00, @arg01, @arg02, @arg03 ; -select a,b from t1 where a in (@arg00,@arg02) ; -a b -81 8-1 -82 8-2 -set @arg00=9 ; -set @arg01='nine' ; -prepare stmt1 from 'insert into t1 set a=?, b=? '; -execute stmt1 using @arg00, @arg01 ; -select a,b from t1 where a = @arg00 ; -a b -9 nine -set @arg00=6 ; -set @arg01=1 ; -prepare stmt1 from 'insert into t1 set a=?, b=''sechs'' - on duplicate key update a=a + ?, b=concat(b,''modified'') '; -execute stmt1 using @arg00, @arg01; -select * from t1 order by a; -a b -0 NULL -1 one -2 two -3 three -4 four -5 five -7 sixmodified -8 eight -9 nine -81 8-1 -82 8-2 -set @arg00=81 ; -set @arg01=1 ; -execute stmt1 using @arg00, @arg01; -ERROR 23000: Duplicate entry '82' for key 'PRIMARY' -drop table if exists t2 ; -create table t2 (id int auto_increment primary key) -ENGINE= 'BDB' ; -prepare stmt1 from ' select last_insert_id() ' ; -insert into t2 values (NULL) ; -execute stmt1 ; -last_insert_id() -1 -insert into t2 values (NULL) ; -execute stmt1 ; -last_insert_id() -2 -drop table t2 ; -set @1000=1000 ; -set @x1000_2="x1000_2" ; -set @x1000_3="x1000_3" ; -set @x1000="x1000" ; -set @1100=1100 ; -set @x1100="x1100" ; -set @100=100 ; -set @updated="updated" ; -insert into t1 values(1000,'x1000_1') ; -insert into t1 values(@1000,@x1000_2),(@1000,@x1000_3) -on duplicate key update a = a + @100, b = concat(b,@updated) ; -select a,b from t1 where a >= 1000 order by a ; -a b -1000 x1000_3 -1100 x1000_1updated -delete from t1 where a >= 1000 ; -insert into t1 values(1000,'x1000_1') ; -prepare stmt1 from ' insert into t1 values(?,?),(?,?) - on duplicate key update a = a + ?, b = concat(b,?) '; -execute stmt1 using @1000, @x1000_2, @1000, @x1000_3, @100, @updated ; -select a,b from t1 where a >= 1000 order by a ; -a b -1000 x1000_3 -1100 x1000_1updated -delete from t1 where a >= 1000 ; -insert into t1 values(1000,'x1000_1') ; -execute stmt1 using @1000, @x1000_2, @1100, @x1000_3, @100, @updated ; -select a,b from t1 where a >= 1000 order by a ; -a b -1200 x1000_1updatedupdated -delete from t1 where a >= 1000 ; -prepare stmt1 from ' replace into t1 (a,b) select 100, ''hundred'' '; -execute stmt1; -execute stmt1; -execute stmt1; -test_sequence ------- multi table tests ------ -delete from t1 ; -delete from t9 ; -insert into t1(a,b) values (1, 'one'), (2, 'two'), (3, 'three') ; -insert into t9 (c1,c21) -values (1, 'one'), (2, 'two'), (3, 'three') ; -prepare stmt_delete from " delete t1, t9 - from t1, t9 where t1.a=t9.c1 and t1.b='updated' "; -prepare stmt_update from " update t1, t9 - set t1.b='updated', t9.c21='updated' - where t1.a=t9.c1 and t1.a=? "; -prepare stmt_select1 from " select a, b from t1 order by a" ; -prepare stmt_select2 from " select c1, c21 from t9 order by c1" ; -set @arg00= 1 ; -execute stmt_update using @arg00 ; -execute stmt_delete ; -execute stmt_select1 ; -a b -2 two -3 three -execute stmt_select2 ; -c1 c21 -2 two -3 three -set @arg00= @arg00 + 1 ; -execute stmt_update using @arg00 ; -execute stmt_delete ; -execute stmt_select1 ; -a b -3 three -execute stmt_select2 ; -c1 c21 -3 three -set @arg00= @arg00 + 1 ; -execute stmt_update using @arg00 ; -execute stmt_delete ; -execute stmt_select1 ; -a b -execute stmt_select2 ; -c1 c21 -set @arg00= @arg00 + 1 ; -delete from t1 ; -insert into t1 values (1,'one'); -insert into t1 values (2,'two'); -insert into t1 values (3,'three'); -insert into t1 values (4,'four'); -commit ; -delete from t9 ; -insert into t9 -set c1= 1, c2= 1, c3= 1, c4= 1, c5= 1, c6= 1, c7= 1, c8= 1, c9= 1, -c10= 1, c11= 1, c12 = 1, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=true, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='one', c32= 'monday'; -insert into t9 -set c1= 9, c2= 9, c3= 9, c4= 9, c5= 9, c6= 9, c7= 9, c8= 9, c9= 9, -c10= 9, c11= 9, c12 = 9, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=false, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='two', c32= 'tuesday'; -commit ; -insert into t1 values(0,NULL) ; -set @duplicate='duplicate ' ; -set @1000=1000 ; -set @5=5 ; -select a,b from t1 where a < 5 order by a ; -a b -0 NULL -1 one -2 two -3 three -4 four -insert into t1 select a + @1000, concat(@duplicate,b) from t1 -where a < @5 ; -affected rows: 5 -info: Records: 5 Duplicates: 0 Warnings: 0 -select a,b from t1 where a >= 1000 order by a ; -a b -1000 NULL -1001 duplicate one -1002 duplicate two -1003 duplicate three -1004 duplicate four -delete from t1 where a >= 1000 ; -prepare stmt1 from ' insert into t1 select a + ?, concat(?,b) from t1 -where a < ? ' ; -execute stmt1 using @1000, @duplicate, @5; -affected rows: 5 -info: Records: 5 Duplicates: 0 Warnings: 0 -select a,b from t1 where a >= 1000 order by a ; -a b -1000 NULL -1001 duplicate one -1002 duplicate two -1003 duplicate three -1004 duplicate four -delete from t1 where a >= 1000 ; -set @1=1 ; -set @2=2 ; -set @100=100 ; -set @float=1.00; -set @five='five' ; -drop table if exists t2; -create table t2 like t1 ; -insert into t2 (b,a) -select @duplicate, sum(first.a) from t1 first, t1 second -where first.a <> @5 and second.b = first.b -and second.b <> @five -group by second.b -having sum(second.a) > @2 -union -select b, a + @100 from t1 -where (a,b) in ( select sqrt(a+@1)+CAST(@float AS signed),b -from t1); -affected rows: 3 -info: Records: 3 Duplicates: 0 Warnings: 0 -select a,b from t2 order by a ; -a b -3 duplicate -4 duplicate -103 three -delete from t2 ; -prepare stmt1 from ' insert into t2 (b,a) -select ?, sum(first.a) - from t1 first, t1 second - where first.a <> ? and second.b = first.b and second.b <> ? - group by second.b - having sum(second.a) > ? -union -select b, a + ? from t1 - where (a,b) in ( select sqrt(a+?)+CAST(? AS signed),b - from t1 ) ' ; -execute stmt1 using @duplicate, @5, @five, @2, @100, @1, @float ; -affected rows: 3 -info: Records: 3 Duplicates: 0 Warnings: 0 -select a,b from t2 order by a ; -a b -3 duplicate -4 duplicate -103 three -drop table t2; -drop table if exists t5 ; -set @arg01= 8; -set @arg02= 8.0; -set @arg03= 80.00000000000e-1; -set @arg04= 'abc' ; -set @arg05= CAST('abc' as binary) ; -set @arg06= '1991-08-05' ; -set @arg07= CAST('1991-08-05' as date); -set @arg08= '1991-08-05 01:01:01' ; -set @arg09= CAST('1991-08-05 01:01:01' as datetime) ; -set @arg10= unix_timestamp('1991-01-01 01:01:01'); -set @arg11= YEAR('1991-01-01 01:01:01'); -set @arg12= 8 ; -set @arg12= NULL ; -set @arg13= 8.0 ; -set @arg13= NULL ; -set @arg14= 'abc'; -set @arg14= NULL ; -set @arg15= CAST('abc' as binary) ; -set @arg15= NULL ; -create table t5 as select -8 as const01, @arg01 as param01, -8.0 as const02, @arg02 as param02, -80.00000000000e-1 as const03, @arg03 as param03, -'abc' as const04, @arg04 as param04, -CAST('abc' as binary) as const05, @arg05 as param05, -'1991-08-05' as const06, @arg06 as param06, -CAST('1991-08-05' as date) as const07, @arg07 as param07, -'1991-08-05 01:01:01' as const08, @arg08 as param08, -CAST('1991-08-05 01:01:01' as datetime) as const09, @arg09 as param09, -unix_timestamp('1991-01-01 01:01:01') as const10, @arg10 as param10, -YEAR('1991-01-01 01:01:01') as const11, @arg11 as param11, -NULL as const12, @arg12 as param12, -@arg13 as param13, -@arg14 as param14, -@arg15 as param15; -show create table t5 ; -Table Create Table -t5 CREATE TABLE `t5` ( - `const01` int(1) NOT NULL DEFAULT '0', - `param01` bigint(20) DEFAULT NULL, - `const02` decimal(2,1) NOT NULL DEFAULT '0.0', - `param02` decimal(65,30) DEFAULT NULL, - `const03` double NOT NULL DEFAULT '0', - `param03` double DEFAULT NULL, - `const04` varchar(3) NOT NULL DEFAULT '', - `param04` longtext, - `const05` varbinary(3) NOT NULL DEFAULT '', - `param05` longblob, - `const06` varchar(10) NOT NULL DEFAULT '', - `param06` longtext, - `const07` date DEFAULT NULL, - `param07` longblob, - `const08` varchar(19) NOT NULL DEFAULT '', - `param08` longtext, - `const09` datetime DEFAULT NULL, - `param09` longblob, - `const10` int(10) NOT NULL DEFAULT '0', - `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, - `param11` bigint(20) DEFAULT NULL, - `const12` binary(0) DEFAULT NULL, - `param12` bigint(20) DEFAULT NULL, - `param13` decimal(65,30) DEFAULT NULL, - `param14` longtext, - `param15` longblob -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -select * from t5 ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def test t5 t5 const01 const01 3 1 1 N 32769 0 63 -def test t5 t5 param01 param01 8 20 1 Y 32768 0 63 -def test t5 t5 const02 const02 246 4 3 N 1 1 63 -def test t5 t5 param02 param02 246 67 32 Y 0 30 63 -def test t5 t5 const03 const03 5 17 1 N 32769 31 63 -def test t5 t5 param03 param03 5 23 1 Y 32768 31 63 -def test t5 t5 const04 const04 253 3 3 N 1 0 8 -def test t5 t5 param04 param04 252 4294967295 3 Y 16 0 8 -def test t5 t5 const05 const05 253 3 3 N 129 0 63 -def test t5 t5 param05 param05 252 4294967295 3 Y 144 0 63 -def test t5 t5 const06 const06 253 10 10 N 1 0 8 -def test t5 t5 param06 param06 252 4294967295 10 Y 16 0 8 -def test t5 t5 const07 const07 10 10 10 Y 128 0 63 -def test t5 t5 param07 param07 252 4294967295 10 Y 144 0 63 -def test t5 t5 const08 const08 253 19 19 N 1 0 8 -def test t5 t5 param08 param08 252 4294967295 19 Y 16 0 8 -def test t5 t5 const09 const09 12 19 19 Y 128 0 63 -def test t5 t5 param09 param09 252 4294967295 19 Y 144 0 63 -def test t5 t5 const10 const10 3 10 9 N 32769 0 63 -def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 -def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 -def test t5 t5 const12 const12 254 0 0 Y 128 0 63 -def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 -def test t5 t5 param13 param13 246 67 0 Y 0 30 63 -def test t5 t5 param14 param14 252 4294967295 0 Y 16 0 8 -def test t5 t5 param15 param15 252 4294967295 0 Y 144 0 63 -const01 8 -param01 8 -const02 8.0 -param02 8.000000000000000000000000000000 -const03 8 -param03 8 -const04 abc -param04 abc -const05 abc -param05 abc -const06 1991-08-05 -param06 1991-08-05 -const07 1991-08-05 -param07 1991-08-05 -const08 1991-08-05 01:01:01 -param08 1991-08-05 01:01:01 -const09 1991-08-05 01:01:01 -param09 1991-08-05 01:01:01 -const10 662680861 -param10 662680861 -const11 1991 -param11 1991 -const12 NULL -param12 NULL -param13 NULL -param14 NULL -param15 NULL -drop table t5 ; -test_sequence ------- data type conversion tests ------ -delete from t1 ; -insert into t1 values (1,'one'); -insert into t1 values (2,'two'); -insert into t1 values (3,'three'); -insert into t1 values (4,'four'); -commit ; -delete from t9 ; -insert into t9 -set c1= 1, c2= 1, c3= 1, c4= 1, c5= 1, c6= 1, c7= 1, c8= 1, c9= 1, -c10= 1, c11= 1, c12 = 1, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=true, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='one', c32= 'monday'; -insert into t9 -set c1= 9, c2= 9, c3= 9, c4= 9, c5= 9, c6= 9, c7= 9, c8= 9, c9= 9, -c10= 9, c11= 9, c12 = 9, -c13= '2004-02-29', c14= '2004-02-29 11:11:11', c15= '2004-02-29 11:11:11', -c16= '11:11:11', c17= '2004', -c18= 1, c19=false, c20= 'a', c21= '123456789a', -c22= '123456789a123456789b123456789c', c23= 'tinyblob', c24= 'tinytext', -c25= 'blob', c26= 'text', c27= 'mediumblob', c28= 'mediumtext', -c29= 'longblob', c30= 'longtext', c31='two', c32= 'tuesday'; -commit ; -insert into t9 set c1= 0, c15= '1991-01-01 01:01:01' ; -select * from t9 order by c1 ; -c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 c21 c22 c23 c24 c25 c26 c27 c28 c29 c30 c31 c32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -9 9 9 9 9 9 9 9 9 9 9.0000 9.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 0 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext two tuesday -test_sequence ------- select @parameter:= column ------ -prepare full_info from "select @arg01, @arg02, @arg03, @arg04, - @arg05, @arg06, @arg07, @arg08, - @arg09, @arg10, @arg11, @arg12, - @arg13, @arg14, @arg15, @arg16, - @arg17, @arg18, @arg19, @arg20, - @arg21, @arg22, @arg23, @arg24, - @arg25, @arg26, @arg27, @arg28, - @arg29, @arg30, @arg31, @arg32" ; -select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, -@arg05:= c5, @arg06:= c6, @arg07:= c7, @arg08:= c8, -@arg09:= c9, @arg10:= c10, @arg11:= c11, @arg12:= c12, -@arg13:= c13, @arg14:= c14, @arg15:= c15, @arg16:= c16, -@arg17:= c17, @arg18:= c18, @arg19:= c19, @arg20:= c20, -@arg21:= c21, @arg22:= c22, @arg23:= c23, @arg24:= c24, -@arg25:= c25, @arg26:= c26, @arg27:= c27, @arg28:= c28, -@arg29:= c29, @arg30:= c30, @arg31:= c31, @arg32:= c32 -from t9 where c1= 1 ; -@arg01:= c1 @arg02:= c2 @arg03:= c3 @arg04:= c4 @arg05:= c5 @arg06:= c6 @arg07:= c7 @arg08:= c8 @arg09:= c9 @arg10:= c10 @arg11:= c11 @arg12:= c12 @arg13:= c13 @arg14:= c14 @arg15:= c15 @arg16:= c16 @arg17:= c17 @arg18:= c18 @arg19:= c19 @arg20:= c20 @arg21:= c21 @arg22:= c22 @arg23:= c23 @arg24:= c24 @arg25:= c25 @arg26:= c26 @arg27:= c27 @arg28:= c28 @arg29:= c29 @arg30:= c30 @arg31:= c31 @arg32:= c32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 1 Y 128 0 63 -def @arg03 253 20 1 Y 128 0 63 -def @arg04 253 20 1 Y 128 0 63 -def @arg05 253 20 1 Y 128 0 63 -def @arg06 253 20 1 Y 128 0 63 -def @arg07 253 23 1 Y 128 31 63 -def @arg08 253 23 1 Y 128 31 63 -def @arg09 253 23 1 Y 128 31 63 -def @arg10 253 23 1 Y 128 31 63 -def @arg11 253 67 6 Y 128 30 63 -def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 -def @arg18 253 20 1 Y 128 0 63 -def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, -@arg05:= c5, @arg06:= c6, @arg07:= c7, @arg08:= c8, -@arg09:= c9, @arg10:= c10, @arg11:= c11, @arg12:= c12, -@arg13:= c13, @arg14:= c14, @arg15:= c15, @arg16:= c16, -@arg17:= c17, @arg18:= c18, @arg19:= c19, @arg20:= c20, -@arg21:= c21, @arg22:= c22, @arg23:= c23, @arg24:= c24, -@arg25:= c25, @arg26:= c26, @arg27:= c27, @arg28:= c28, -@arg29:= c29, @arg30:= c30, @arg31:= c31, @arg32:= c32 -from t9 where c1= 0 ; -@arg01:= c1 @arg02:= c2 @arg03:= c3 @arg04:= c4 @arg05:= c5 @arg06:= c6 @arg07:= c7 @arg08:= c8 @arg09:= c9 @arg10:= c10 @arg11:= c11 @arg12:= c12 @arg13:= c13 @arg14:= c14 @arg15:= c15 @arg16:= c16 @arg17:= c17 @arg18:= c18 @arg19:= c19 @arg20:= c20 @arg21:= c21 @arg22:= c22 @arg23:= c23 @arg24:= c24 @arg25:= c25 @arg26:= c26 @arg27:= c27 @arg28:= c28 @arg29:= c29 @arg30:= c30 @arg31:= c31 @arg32:= c32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 0 Y 128 0 63 -def @arg03 253 20 0 Y 128 0 63 -def @arg04 253 20 0 Y 128 0 63 -def @arg05 253 20 0 Y 128 0 63 -def @arg06 253 20 0 Y 128 0 63 -def @arg07 253 23 0 Y 128 31 63 -def @arg08 253 23 0 Y 128 31 63 -def @arg09 253 23 0 Y 128 31 63 -def @arg10 253 23 0 Y 128 31 63 -def @arg11 253 67 0 Y 128 30 63 -def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 -def @arg18 253 20 0 Y 128 0 63 -def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -prepare stmt1 from "select - @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, - @arg05:= c5, @arg06:= c6, @arg07:= c7, @arg08:= c8, - @arg09:= c9, @arg10:= c10, @arg11:= c11, @arg12:= c12, - @arg13:= c13, @arg14:= c14, @arg15:= c15, @arg16:= c16, - @arg17:= c17, @arg18:= c18, @arg19:= c19, @arg20:= c20, - @arg21:= c21, @arg22:= c22, @arg23:= c23, @arg24:= c24, - @arg25:= c25, @arg26:= c26, @arg27:= c27, @arg28:= c28, - @arg29:= c29, @arg30:= c30, @arg31:= c31, @arg32:= c32 -from t9 where c1= ?" ; -set @my_key= 1 ; -execute stmt1 using @my_key ; -@arg01:= c1 @arg02:= c2 @arg03:= c3 @arg04:= c4 @arg05:= c5 @arg06:= c6 @arg07:= c7 @arg08:= c8 @arg09:= c9 @arg10:= c10 @arg11:= c11 @arg12:= c12 @arg13:= c13 @arg14:= c14 @arg15:= c15 @arg16:= c16 @arg17:= c17 @arg18:= c18 @arg19:= c19 @arg20:= c20 @arg21:= c21 @arg22:= c22 @arg23:= c23 @arg24:= c24 @arg25:= c25 @arg26:= c26 @arg27:= c27 @arg28:= c28 @arg29:= c29 @arg30:= c30 @arg31:= c31 @arg32:= c32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 1 Y 128 0 63 -def @arg03 253 20 1 Y 128 0 63 -def @arg04 253 20 1 Y 128 0 63 -def @arg05 253 20 1 Y 128 0 63 -def @arg06 253 20 1 Y 128 0 63 -def @arg07 253 23 1 Y 128 31 63 -def @arg08 253 23 1 Y 128 31 63 -def @arg09 253 23 1 Y 128 31 63 -def @arg10 253 23 1 Y 128 31 63 -def @arg11 253 67 6 Y 128 30 63 -def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 -def @arg18 253 20 1 Y 128 0 63 -def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -set @my_key= 0 ; -execute stmt1 using @my_key ; -@arg01:= c1 @arg02:= c2 @arg03:= c3 @arg04:= c4 @arg05:= c5 @arg06:= c6 @arg07:= c7 @arg08:= c8 @arg09:= c9 @arg10:= c10 @arg11:= c11 @arg12:= c12 @arg13:= c13 @arg14:= c14 @arg15:= c15 @arg16:= c16 @arg17:= c17 @arg18:= c18 @arg19:= c19 @arg20:= c20 @arg21:= c21 @arg22:= c22 @arg23:= c23 @arg24:= c24 @arg25:= c25 @arg26:= c26 @arg27:= c27 @arg28:= c28 @arg29:= c29 @arg30:= c30 @arg31:= c31 @arg32:= c32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 0 Y 128 0 63 -def @arg03 253 20 0 Y 128 0 63 -def @arg04 253 20 0 Y 128 0 63 -def @arg05 253 20 0 Y 128 0 63 -def @arg06 253 20 0 Y 128 0 63 -def @arg07 253 23 0 Y 128 31 63 -def @arg08 253 23 0 Y 128 31 63 -def @arg09 253 23 0 Y 128 31 63 -def @arg10 253 23 0 Y 128 31 63 -def @arg11 253 67 0 Y 128 30 63 -def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 -def @arg18 253 20 0 Y 128 0 63 -def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ':= c1 from t9 where c1= 1' at line 1 -test_sequence ------- select column, .. into @parm,.. ------ -select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, -c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, -c25, c26, c27, c28, c29, c30, c31, c32 -into @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, @arg07, @arg08, -@arg09, @arg10, @arg11, @arg12, @arg13, @arg14, @arg15, @arg16, -@arg17, @arg18, @arg19, @arg20, @arg21, @arg22, @arg23, @arg24, -@arg25, @arg26, @arg27, @arg28, @arg29, @arg30, @arg31, @arg32 -from t9 where c1= 1 ; -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 1 Y 128 0 63 -def @arg03 253 20 1 Y 128 0 63 -def @arg04 253 20 1 Y 128 0 63 -def @arg05 253 20 1 Y 128 0 63 -def @arg06 253 20 1 Y 128 0 63 -def @arg07 253 23 1 Y 128 31 63 -def @arg08 253 23 1 Y 128 31 63 -def @arg09 253 23 1 Y 128 31 63 -def @arg10 253 23 1 Y 128 31 63 -def @arg11 253 67 6 Y 128 30 63 -def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 -def @arg18 253 20 1 Y 128 0 63 -def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, -c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, -c25, c26, c27, c28, c29, c30, c31, c32 -into @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, @arg07, @arg08, -@arg09, @arg10, @arg11, @arg12, @arg13, @arg14, @arg15, @arg16, -@arg17, @arg18, @arg19, @arg20, @arg21, @arg22, @arg23, @arg24, -@arg25, @arg26, @arg27, @arg28, @arg29, @arg30, @arg31, @arg32 -from t9 where c1= 0 ; -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 0 Y 128 0 63 -def @arg03 253 20 0 Y 128 0 63 -def @arg04 253 20 0 Y 128 0 63 -def @arg05 253 20 0 Y 128 0 63 -def @arg06 253 20 0 Y 128 0 63 -def @arg07 253 23 0 Y 128 31 63 -def @arg08 253 23 0 Y 128 31 63 -def @arg09 253 23 0 Y 128 31 63 -def @arg10 253 23 0 Y 128 31 63 -def @arg11 253 67 0 Y 128 30 63 -def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 -def @arg18 253 20 0 Y 128 0 63 -def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, - c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, - c25, c26, c27, c28, c29, c30, c31, c32 -into @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, @arg07, @arg08, - @arg09, @arg10, @arg11, @arg12, @arg13, @arg14, @arg15, @arg16, - @arg17, @arg18, @arg19, @arg20, @arg21, @arg22, @arg23, @arg24, - @arg25, @arg26, @arg27, @arg28, @arg29, @arg30, @arg31, @arg32 -from t9 where c1= ?" ; -set @my_key= 1 ; -execute stmt1 using @my_key ; -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 1 Y 128 0 63 -def @arg03 253 20 1 Y 128 0 63 -def @arg04 253 20 1 Y 128 0 63 -def @arg05 253 20 1 Y 128 0 63 -def @arg06 253 20 1 Y 128 0 63 -def @arg07 253 23 1 Y 128 31 63 -def @arg08 253 23 1 Y 128 31 63 -def @arg09 253 23 1 Y 128 31 63 -def @arg10 253 23 1 Y 128 31 63 -def @arg11 253 67 6 Y 128 30 63 -def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 -def @arg18 253 20 1 Y 128 0 63 -def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday -set @my_key= 0 ; -execute stmt1 using @my_key ; -execute full_info ; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 253 20 1 Y 128 0 63 -def @arg02 253 20 0 Y 128 0 63 -def @arg03 253 20 0 Y 128 0 63 -def @arg04 253 20 0 Y 128 0 63 -def @arg05 253 20 0 Y 128 0 63 -def @arg06 253 20 0 Y 128 0 63 -def @arg07 253 23 0 Y 128 31 63 -def @arg08 253 23 0 Y 128 31 63 -def @arg09 253 23 0 Y 128 31 63 -def @arg10 253 23 0 Y 128 31 63 -def @arg11 253 67 0 Y 128 30 63 -def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 -def @arg18 253 20 0 Y 128 0 63 -def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 -@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 -0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '? from t9 where c1= 1' at line 1 -test_sequence --- insert into numeric columns -- -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 ) ; -set @arg00= 21 ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt1 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22 )" ; -execute stmt1 ; -set @arg00= 23; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, -30.0, 30.0, 30.0 ) ; -set @arg00= 31.0 ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt1 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 32.0, 32.0, 32.0, 32.0, 32.0, 32.0, 32.0, 32.0, - 32.0, 32.0, 32.0 )" ; -execute stmt1 ; -set @arg00= 33.0; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( '40', '40', '40', '40', '40', '40', '40', '40', -'40', '40', '40' ) ; -set @arg00= '41' ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt1 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( '42', '42', '42', '42', '42', '42', '42', '42', - '42', '42', '42' )" ; -execute stmt1 ; -set @arg00= '43'; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( CAST('50' as binary), CAST('50' as binary), -CAST('50' as binary), CAST('50' as binary), CAST('50' as binary), -CAST('50' as binary), CAST('50' as binary), CAST('50' as binary), -CAST('50' as binary), CAST('50' as binary), CAST('50' as binary) ) ; -set @arg00= CAST('51' as binary) ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt1 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( CAST('52' as binary), CAST('52' as binary), - CAST('52' as binary), CAST('52' as binary), CAST('52' as binary), - CAST('52' as binary), CAST('52' as binary), CAST('52' as binary), - CAST('52' as binary), CAST('52' as binary), CAST('52' as binary) )" ; -execute stmt1 ; -set @arg00= CAST('53' as binary) ; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -set @arg00= 2 ; -set @arg00= NULL ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( 60, NULL, NULL, NULL, NULL, NULL, NULL, NULL, -NULL, NULL, NULL ) ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( 61, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt1 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 62, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL )" ; -execute stmt1 ; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 63, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -set @arg00= 8.0 ; -set @arg00= NULL ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( 71, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 73, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -set @arg00= 'abc' ; -set @arg00= NULL ; -insert into t9 -( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values -( 81, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ) ; -prepare stmt2 from "insert into t9 - ( c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 83, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -execute stmt2 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 -from t9 where c1 >= 20 -order by c1 ; -c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c12 -20 20 20 20 20 20 20 20 20 20 20.0000 -21 21 21 21 21 21 21 21 21 21 21.0000 -22 22 22 22 22 22 22 22 22 22 22.0000 -23 23 23 23 23 23 23 23 23 23 23.0000 -30 30 30 30 30 30 30 30 30 30 30.0000 -31 31 31 31 31 31 31 31 31 31 31.0000 -32 32 32 32 32 32 32 32 32 32 32.0000 -33 33 33 33 33 33 33 33 33 33 33.0000 -40 40 40 40 40 40 40 40 40 40 40.0000 -41 41 41 41 41 41 41 41 41 41 41.0000 -42 42 42 42 42 42 42 42 42 42 42.0000 -43 43 43 43 43 43 43 43 43 43 43.0000 -50 50 50 50 50 50 50 50 50 50 50.0000 -51 51 51 51 51 51 51 51 51 51 51.0000 -52 52 52 52 52 52 52 52 52 52 52.0000 -53 53 53 53 53 53 53 53 53 53 53.0000 -60 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -61 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -62 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -63 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -71 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -73 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -81 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -83 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -test_sequence --- select .. where numeric column = .. -- -set @arg00= 20; -select 'true' as found from t9 -where c1= 20 and c2= 20 and c3= 20 and c4= 20 and c5= 20 and c6= 20 and c7= 20 -and c8= 20 and c9= 20 and c10= 20 and c12= 20; -found -true -select 'true' as found from t9 -where c1= @arg00 and c2= @arg00 and c3= @arg00 and c4= @arg00 and c5= @arg00 -and c6= @arg00 and c7= @arg00 and c8= @arg00 and c9= @arg00 and c10= @arg00 -and c12= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c2= 20 and c3= 20 and c4= 20 and c5= 20 and c6= 20 and c7= 20 - and c8= 20 and c9= 20 and c10= 20 and c12= 20 "; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= ? and c2= ? and c3= ? and c4= ? and c5= ? - and c6= ? and c7= ? and c8= ? and c9= ? and c10= ? - and c12= ? "; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -found -true -set @arg00= 20.0; -select 'true' as found from t9 -where c1= 20.0 and c2= 20.0 and c3= 20.0 and c4= 20.0 and c5= 20.0 and c6= 20.0 -and c7= 20.0 and c8= 20.0 and c9= 20.0 and c10= 20.0 and c12= 20.0; -found -true -select 'true' as found from t9 -where c1= @arg00 and c2= @arg00 and c3= @arg00 and c4= @arg00 and c5= @arg00 -and c6= @arg00 and c7= @arg00 and c8= @arg00 and c9= @arg00 and c10= @arg00 -and c12= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20.0 and c2= 20.0 and c3= 20.0 and c4= 20.0 and c5= 20.0 and c6= 20.0 - and c7= 20.0 and c8= 20.0 and c9= 20.0 and c10= 20.0 and c12= 20.0 "; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= ? and c2= ? and c3= ? and c4= ? and c5= ? - and c6= ? and c7= ? and c8= ? and c9= ? and c10= ? - and c12= ? "; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -found -true -select 'true' as found from t9 -where c1= '20' and c2= '20' and c3= '20' and c4= '20' and c5= '20' and c6= '20' - and c7= '20' and c8= '20' and c9= '20' and c10= '20' and c12= '20'; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= '20' and c2= '20' and c3= '20' and c4= '20' and c5= '20' and c6= '20' - and c7= '20' and c8= '20' and c9= '20' and c10= '20' and c12= '20' "; -execute stmt1 ; -found -true -set @arg00= '20'; -select 'true' as found from t9 -where c1= @arg00 and c2= @arg00 and c3= @arg00 and c4= @arg00 and c5= @arg00 -and c6= @arg00 and c7= @arg00 and c8= @arg00 and c9= @arg00 and c10= @arg00 -and c12= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= ? and c2= ? and c3= ? and c4= ? and c5= ? - and c6= ? and c7= ? and c8= ? and c9= ? and c10= ? - and c12= ? "; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -found -true -select 'true' as found from t9 -where c1= CAST('20' as binary) and c2= CAST('20' as binary) and -c3= CAST('20' as binary) and c4= CAST('20' as binary) and -c5= CAST('20' as binary) and c6= CAST('20' as binary) and -c7= CAST('20' as binary) and c8= CAST('20' as binary) and -c9= CAST('20' as binary) and c10= CAST('20' as binary) and -c12= CAST('20' as binary); -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= CAST('20' as binary) and c2= CAST('20' as binary) and - c3= CAST('20' as binary) and c4= CAST('20' as binary) and - c5= CAST('20' as binary) and c6= CAST('20' as binary) and - c7= CAST('20' as binary) and c8= CAST('20' as binary) and - c9= CAST('20' as binary) and c10= CAST('20' as binary) and - c12= CAST('20' as binary) "; -execute stmt1 ; -found -true -set @arg00= CAST('20' as binary) ; -select 'true' as found from t9 -where c1= @arg00 and c2= @arg00 and c3= @arg00 and c4= @arg00 and c5= @arg00 -and c6= @arg00 and c7= @arg00 and c8= @arg00 and c9= @arg00 and c10= @arg00 -and c12= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= ? and c2= ? and c3= ? and c4= ? and c5= ? - and c6= ? and c7= ? and c8= ? and c9= ? and c10= ? - and c12= ? "; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00 ; -found -true -delete from t9 ; -test_sequence --- some numeric overflow experiments -- -prepare my_insert from "insert into t9 - ( c21, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 ) -values - ( 'O', ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" ; -prepare my_select from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c12 -from t9 where c21 = 'O' "; -prepare my_delete from "delete from t9 where c21 = 'O' "; -set @arg00= 9223372036854775807 ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1264 Out of range value for column 'c1' at row 1 -Warning 1264 Out of range value for column 'c2' at row 1 -Warning 1264 Out of range value for column 'c3' at row 1 -Warning 1264 Out of range value for column 'c4' at row 1 -Warning 1264 Out of range value for column 'c5' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 127 -c2 32767 -c3 8388607 -c4 2147483647 -c5 2147483647 -c6 9223372036854775807 -c7 9.22337e+18 -c8 9.22337203685478e+18 -c9 9.22337203685478e+18 -c10 9.22337203685478e+18 -c12 9999.9999 -execute my_delete ; -set @arg00= '9223372036854775807' ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1264 Out of range value for column 'c1' at row 1 -Warning 1264 Out of range value for column 'c2' at row 1 -Warning 1264 Out of range value for column 'c3' at row 1 -Warning 1264 Out of range value for column 'c4' at row 1 -Warning 1264 Out of range value for column 'c5' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 127 -c2 32767 -c3 8388607 -c4 2147483647 -c5 2147483647 -c6 9223372036854775807 -c7 9.22337e+18 -c8 9.22337203685478e+18 -c9 9.22337203685478e+18 -c10 9.22337203685478e+18 -c12 9999.9999 -execute my_delete ; -set @arg00= -9223372036854775808 ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1264 Out of range value for column 'c1' at row 1 -Warning 1264 Out of range value for column 'c2' at row 1 -Warning 1264 Out of range value for column 'c3' at row 1 -Warning 1264 Out of range value for column 'c4' at row 1 -Warning 1264 Out of range value for column 'c5' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 -128 -c2 -32768 -c3 -8388608 -c4 -2147483648 -c5 -2147483648 -c6 -9223372036854775808 -c7 -9.22337e+18 -c8 -9.22337203685478e+18 -c9 -9.22337203685478e+18 -c10 -9.22337203685478e+18 -c12 -9999.9999 -execute my_delete ; -set @arg00= '-9223372036854775808' ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1264 Out of range value for column 'c1' at row 1 -Warning 1264 Out of range value for column 'c2' at row 1 -Warning 1264 Out of range value for column 'c3' at row 1 -Warning 1264 Out of range value for column 'c4' at row 1 -Warning 1264 Out of range value for column 'c5' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 -128 -c2 -32768 -c3 -8388608 -c4 -2147483648 -c5 -2147483648 -c6 -9223372036854775808 -c7 -9.22337e+18 -c8 -9.22337203685478e+18 -c9 -9.22337203685478e+18 -c10 -9.22337203685478e+18 -c12 -9999.9999 -execute my_delete ; -set @arg00= 1.11111111111111111111e+50 ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1264 Out of range value for column 'c1' at row 1 -Warning 1264 Out of range value for column 'c2' at row 1 -Warning 1264 Out of range value for column 'c3' at row 1 -Warning 1264 Out of range value for column 'c4' at row 1 -Warning 1264 Out of range value for column 'c5' at row 1 -Warning 1264 Out of range value for column 'c6' at row 1 -Warning 1264 Out of range value for column 'c7' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 127 -c2 32767 -c3 8388607 -c4 2147483647 -c5 2147483647 -c6 9223372036854775807 -c7 3.40282e+38 -c8 1.11111111111111e+50 -c9 1.11111111111111e+50 -c10 1.11111111111111e+50 -c12 9999.9999 -execute my_delete ; -set @arg00= '1.11111111111111111111e+50' ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 -Warning 1264 Out of range value for column 'c7' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 -c7 3.40282e+38 -c8 1.11111111111111e+50 -c9 1.11111111111111e+50 -c10 1.11111111111111e+50 -c12 9999.9999 -execute my_delete ; -set @arg00= -1.11111111111111111111e+50 ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1264 Out of range value for column 'c1' at row 1 -Warning 1264 Out of range value for column 'c2' at row 1 -Warning 1264 Out of range value for column 'c3' at row 1 -Warning 1264 Out of range value for column 'c4' at row 1 -Warning 1264 Out of range value for column 'c5' at row 1 -Warning 1264 Out of range value for column 'c6' at row 1 -Warning 1264 Out of range value for column 'c7' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 -128 -c2 -32768 -c3 -8388608 -c4 -2147483648 -c5 -2147483648 -c6 -9223372036854775808 -c7 -3.40282e+38 -c8 -1.11111111111111e+50 -c9 -1.11111111111111e+50 -c10 -1.11111111111111e+50 -c12 -9999.9999 -execute my_delete ; -set @arg00= '-1.11111111111111111111e+50' ; -execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 -Warning 1264 Out of range value for column 'c7' at row 1 -Warning 1264 Out of range value for column 'c12' at row 1 -execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 -c7 -3.40282e+38 -c8 -1.11111111111111e+50 -c9 -1.11111111111111e+50 -c10 -1.11111111111111e+50 -c12 -9999.9999 -execute my_delete ; -test_sequence --- insert into string columns -- -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c20' at row 1 -select c1, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30 -from t9 where c1 >= 20 -order by c1 ; -c1 c20 c21 c22 c23 c24 c25 c26 c27 c28 c29 c30 -20 2 20 20 20 20 20 20 20 20 20 20 -21 2 21 21 21 21 21 21 21 21 21 21 -22 2 22 22 22 22 22 22 22 22 22 22 -23 2 23 23 23 23 23 23 23 23 23 23 -30 3 30 30 30 30 30 30 30 30 30 30 -31 3 31 31 31 31 31 31 31 31 31 31 -32 3 32 32 32 32 32 32 32 32 32 32 -33 3 33 33 33 33 33 33 33 33 33 33 -40 4 40 40 40 40 40 40 40 40 40 40 -41 4 41 41 41 41 41 41 41 41 41 41 -42 4 42 42 42 42 42 42 42 42 42 42 -43 4 43 43 43 43 43 43 43 43 43 43 -50 5 50.0 50.0 50.0 50.0 50.0 50.0 50.0 50.0 50.0 50.0 -51 5 51.0 51.0 51.0 51.0 51.0 51.0 51.0 51.0 51.0 51.0 -52 5 52.0 52.0 52.0 52.0 52.0 52.0 52.0 52.0 52.0 52.0 -53 5 53.0 53.0 53.0 53.0 53.0 53.0 53.0 53.0 53.0 53.0 -54 5 54 54 54.00 54.00 54.00 54.00 54.00 54.00 54.00 54.00 -55 5 55 55 55 55 55 55 55 55 55 55 -56 6 56 56 56.00 56.00 56.00 56.00 56.00 56.00 56.00 56.00 -57 6 57 57 57.00 57.00 57.00 57.00 57.00 57.00 57.00 57.00 -60 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -61 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -62 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -63 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -71 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -73 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -81 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -83 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -test_sequence --- select .. where string column = .. -- -set @arg00= '20'; -select 'true' as found from t9 -where c1= 20 and concat(c20,substr('20',1+length(c20)))= '20' and c21= '20' and -c22= '20' and c23= '20' and c24= '20' and c25= '20' and c26= '20' and -c27= '20' and c28= '20' and c29= '20' and c30= '20' ; -found -true -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(@arg00,1+length(c20)))= @arg00 and -c21= @arg00 and c22= @arg00 and c23= @arg00 and c25= @arg00 and -c26= @arg00 and c27= @arg00 and c28= @arg00 and c29= @arg00 and c30= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr('20',1+length(c20)))= '20' and c21= '20' and - c22= '20' and c23= '20' and c24= '20' and c25= '20' and c26= '20' and - c27= '20' and c28= '20' and c29= '20' and c30= '20'" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(?,1+length(c20)))= ? and - c21= ? and c22= ? and c23= ? and c25= ? and - c26= ? and c27= ? and c28= ? and c29= ? and c30= ?" ; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -found -true -set @arg00= CAST('20' as binary); -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(CAST('20' as binary),1+length(c20))) -= CAST('20' as binary) and c21= CAST('20' as binary) -and c22= CAST('20' as binary) and c23= CAST('20' as binary) and -c24= CAST('20' as binary) and c25= CAST('20' as binary) and -c26= CAST('20' as binary) and c27= CAST('20' as binary) and -c28= CAST('20' as binary) and c29= CAST('20' as binary) and -c30= CAST('20' as binary) ; -found -true -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(@arg00,1+length(c20))) = @arg00 and -c21= @arg00 and c22= @arg00 and c23= @arg00 and c25= @arg00 and -c26= @arg00 and c27= @arg00 and c28= @arg00 and c29= @arg00 and -c30= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(CAST('20' as binary),1+length(c20))) - = CAST('20' as binary) and c21= CAST('20' as binary) - and c22= CAST('20' as binary) and c23= CAST('20' as binary) and - c24= CAST('20' as binary) and c25= CAST('20' as binary) and - c26= CAST('20' as binary) and c27= CAST('20' as binary) and - c28= CAST('20' as binary) and c29= CAST('20' as binary) and - c30= CAST('20' as binary)" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(?,1+length(c20))) = ? and c21= ? and - c22= ? and c23= ? and c25= ? and c26= ? and c27= ? and c28= ? and - c29= ? and c30= ?"; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -found -true -set @arg00= 20; -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(20,1+length(c20)))= 20 and c21= 20 and -c22= 20 and c23= 20 and c24= 20 and c25= 20 and c26= 20 and -c27= 20 and c28= 20 and c29= 20 and c30= 20 ; -found -true -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(@arg00,1+length(c20)))= @arg00 and -c21= @arg00 and c22= @arg00 and c23= @arg00 and c25= @arg00 and -c26= @arg00 and c27= @arg00 and c28= @arg00 and c29= @arg00 and c30= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(20,1+length(c20)))= 20 and c21= 20 and - c22= 20 and c23= 20 and c24= 20 and c25= 20 and c26= 20 and - c27= 20 and c28= 20 and c29= 20 and c30= 20" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(?,1+length(c20)))= ? and - c21= ? and c22= ? and c23= ? and c25= ? and - c26= ? and c27= ? and c28= ? and c29= ? and c30= ?" ; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -found -true -set @arg00= 20.0; -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(20.0,1+length(c20)))= 20.0 and c21= 20.0 and -c22= 20.0 and c23= 20.0 and c24= 20.0 and c25= 20.0 and c26= 20.0 and -c27= 20.0 and c28= 20.0 and c29= 20.0 and c30= 20.0 ; -found -true -select 'true' as found from t9 -where c1= 20 and concat(c20,substr(@arg00,1+length(c20)))= @arg00 and -c21= @arg00 and c22= @arg00 and c23= @arg00 and c25= @arg00 and -c26= @arg00 and c27= @arg00 and c28= @arg00 and c29= @arg00 and c30= @arg00; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(20.0,1+length(c20)))= 20.0 and c21= 20.0 and - c22= 20.0 and c23= 20.0 and c24= 20.0 and c25= 20.0 and c26= 20.0 and - c27= 20.0 and c28= 20.0 and c29= 20.0 and c30= 20.0" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and concat(c20,substr(?,1+length(c20)))= ? and - c21= ? and c22= ? and c23= ? and c25= ? and - c26= ? and c27= ? and c28= ? and c29= ? and c30= ?" ; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, -@arg00, @arg00, @arg00, @arg00, @arg00 ; -found -true -delete from t9 ; -test_sequence --- insert into date/time columns -- -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c17' at row 1 -Warnings: -Warning 1264 Out of range value for column 'c13' at row 1 -Warning 1264 Out of range value for column 'c14' at row 1 -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1264 Out of range value for column 'c13' at row 1 -Warning 1264 Out of range value for column 'c14' at row 1 -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1264 Out of range value for column 'c13' at row 1 -Warning 1264 Out of range value for column 'c14' at row 1 -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1264 Out of range value for column 'c13' at row 1 -Warning 1264 Out of range value for column 'c14' at row 1 -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -Warnings: -Warning 1265 Data truncated for column 'c15' at row 1 -Warning 1264 Out of range value for column 'c16' at row 1 -Warning 1264 Out of range value for column 'c17' at row 1 -select c1, c13, c14, c15, c16, c17 from t9 order by c1 ; -c1 c13 c14 c15 c16 c17 -20 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -21 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -22 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -23 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -30 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -31 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -32 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -33 1991-01-01 1991-01-01 01:01:01 1991-01-01 01:01:01 01:01:01 1991 -40 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -41 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -60 NULL NULL 1991-01-01 01:01:01 NULL NULL -61 NULL NULL 1991-01-01 01:01:01 NULL NULL -62 NULL NULL 1991-01-01 01:01:01 NULL NULL -63 NULL NULL 1991-01-01 01:01:01 NULL NULL -71 NULL NULL 1991-01-01 01:01:01 NULL NULL -73 NULL NULL 1991-01-01 01:01:01 NULL NULL -81 NULL NULL 1991-01-01 01:01:01 NULL NULL -83 NULL NULL 1991-01-01 01:01:01 NULL NULL -test_sequence --- select .. where date/time column = .. -- -set @arg00= '1991-01-01 01:01:01' ; -select 'true' as found from t9 -where c1= 20 and c13= '1991-01-01 01:01:01' and c14= '1991-01-01 01:01:01' and -c15= '1991-01-01 01:01:01' and c16= '1991-01-01 01:01:01' and -c17= '1991-01-01 01:01:01' ; -found -true -select 'true' as found from t9 -where c1= 20 and c13= @arg00 and c14= @arg00 and c15= @arg00 and c16= @arg00 -and c17= @arg00 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c13= '1991-01-01 01:01:01' and c14= '1991-01-01 01:01:01' and - c15= '1991-01-01 01:01:01' and c16= '1991-01-01 01:01:01' and - c17= '1991-01-01 01:01:01'" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c13= ? and c14= ? and c15= ? and c16= ? and c17= ?" ; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00 ; -found -true -set @arg00= CAST('1991-01-01 01:01:01' as datetime) ; -select 'true' as found from t9 -where c1= 20 and c13= CAST('1991-01-01 01:01:01' as datetime) and -c14= CAST('1991-01-01 01:01:01' as datetime) and -c15= CAST('1991-01-01 01:01:01' as datetime) and -c16= CAST('1991-01-01 01:01:01' as datetime) and -c17= CAST('1991-01-01 01:01:01' as datetime) ; -found -true -select 'true' as found from t9 -where c1= 20 and c13= @arg00 and c14= @arg00 and c15= @arg00 and c16= @arg00 -and c17= @arg00 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c13= CAST('1991-01-01 01:01:01' as datetime) and - c14= CAST('1991-01-01 01:01:01' as datetime) and - c15= CAST('1991-01-01 01:01:01' as datetime) and - c16= CAST('1991-01-01 01:01:01' as datetime) and - c17= CAST('1991-01-01 01:01:01' as datetime)" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c13= ? and c14= ? and c15= ? and c16= ? and c17= ?" ; -execute stmt1 using @arg00, @arg00, @arg00, @arg00, @arg00 ; -found -true -set @arg00= 1991 ; -select 'true' as found from t9 -where c1= 20 and c17= 1991 ; -found -true -select 'true' as found from t9 -where c1= 20 and c17= @arg00 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c17= 1991" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and c17= ?" ; -execute stmt1 using @arg00 ; -found -true -set @arg00= 1.991e+3 ; -select 'true' as found from t9 -where c1= 20 and abs(c17 - 1.991e+3) < 0.01 ; -found -true -select 'true' as found from t9 -where c1= 20 and abs(c17 - @arg00) < 0.01 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and abs(c17 - 1.991e+3) < 0.01" ; -execute stmt1 ; -found -true -prepare stmt1 from "select 'true' as found from t9 -where c1= 20 and abs(c17 - ?) < 0.01" ; -execute stmt1 using @arg00 ; -found -true -drop table t1, t9; diff --git a/mysql-test/r/rowid_order_bdb.result b/mysql-test/r/rowid_order_bdb.result deleted file mode 100644 index bbdc6f6ff77..00000000000 --- a/mysql-test/r/rowid_order_bdb.result +++ /dev/null @@ -1,186 +0,0 @@ -drop table if exists t1, t2, t3,t4; -create table t1 ( -pk1 int not NULL, -key1 int(11), -key2 int(11), -PRIMARY KEY (pk1), -KEY key1 (key1), -KEY key2 (key2) -) engine=bdb; -insert into t1 values (-5, 1, 1), -(-100, 1, 1), -(3, 1, 1), -(0, 1, 1), -(10, 1, 1); -explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 5 Using sort_union(key1,key2); Using where -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 key1 key2 --100 1 1 --5 1 1 -0 1 1 -3 1 1 -10 1 1 -drop table t1; -create table t1 ( -pk1 int unsigned not NULL, -key1 int(11), -key2 int(11), -PRIMARY KEY (pk1), -KEY key1 (key1), -KEY key2 (key2) -) engine=bdb; -insert into t1 values (0, 1, 1), -(0xFFFFFFFF, 1, 1), -(0xFFFFFFFE, 1, 1), -(1, 1, 1), -(2, 1, 1); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 key1 key2 -0 1 1 -1 1 1 -2 1 1 -4294967294 1 1 -4294967295 1 1 -drop table t1; -create table t1 ( -pk1 char(4) not NULL, -key1 int(11), -key2 int(11), -PRIMARY KEY (pk1), -KEY key1 (key1), -KEY key2 (key2) -) engine=bdb collate latin2_general_ci; -insert into t1 values ('a1', 1, 1), -('b2', 1, 1), -('A3', 1, 1), -('B4', 1, 1); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 key1 key2 -a1 1 1 -A3 1 1 -b2 1 1 -B4 1 1 -drop table t1; -create table t1 ( -pk1 int not NULL, -pk2 char(4) not NULL collate latin1_german1_ci, -pk3 char(4) not NULL collate latin1_bin, -key1 int(11), -key2 int(11), -PRIMARY KEY (pk1,pk2,pk3), -KEY key1 (key1), -KEY key2 (key2) -) engine=bdb; -insert into t1 values -(1, 'u', 'u', 1, 1), -(1, 'u', char(0xEC), 1, 1), -(1, 'u', 'x', 1, 1); -insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1; -insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u'; -insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1; -select * from t1; -pk1 pk2 pk3 key1 key2 -1 u 1 1 -1 x 1 1 -1 1 1 -1 u u 1 1 -1 u x 1 1 -1 u 1 1 -1 x u 1 1 -1 x x 1 1 -1 x 1 1 -2 u 1 1 -2 x 1 1 -2 1 1 -2 u u 1 1 -2 u x 1 1 -2 u 1 1 -2 x u 1 1 -2 x x 1 1 -2 x 1 1 -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 pk2 pk3 key1 key2 -1 u 1 1 -1 x 1 1 -1 1 1 -1 u u 1 1 -1 u x 1 1 -1 u 1 1 -1 x u 1 1 -1 x x 1 1 -1 x 1 1 -2 u 1 1 -2 x 1 1 -2 1 1 -2 u u 1 1 -2 u x 1 1 -2 u 1 1 -2 x u 1 1 -2 x x 1 1 -2 x 1 1 -alter table t1 drop primary key; -select * from t1; -pk1 pk2 pk3 key1 key2 -1 u 1 1 -1 x 1 1 -1 1 1 -1 u u 1 1 -1 u x 1 1 -1 u 1 1 -1 x u 1 1 -1 x x 1 1 -1 x 1 1 -2 u 1 1 -2 x 1 1 -2 1 1 -2 u u 1 1 -2 u x 1 1 -2 u 1 1 -2 x u 1 1 -2 x x 1 1 -2 x 1 1 -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 pk2 pk3 key1 key2 -1 u 1 1 -1 x 1 1 -1 1 1 -1 u u 1 1 -1 u x 1 1 -1 u 1 1 -1 x u 1 1 -1 x x 1 1 -1 x 1 1 -2 u 1 1 -2 x 1 1 -2 1 1 -2 u u 1 1 -2 u x 1 1 -2 u 1 1 -2 x u 1 1 -2 x x 1 1 -2 x 1 1 -drop table t1; -create table t1 ( -pk1 varchar(8) NOT NULL default '', -pk2 varchar(4) NOT NULL default '', -key1 int(11), -key2 int(11), -primary key(pk1, pk2), -KEY key1 (key1), -KEY key2 (key2) -) engine=bdb; -insert into t1 values ('','empt',2,2), -('a','a--a',2,2), -('bb','b--b',2,2), -('ccc','c--c',2,2), -('dddd','d--d',2,2); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -pk1 pk2 key1 key2 - empt 2 2 -a a--a 2 2 -bb b--b 2 2 -ccc c--c 2 2 -dddd d--d 2 2 -drop table t1; diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index 80f28a3c731..7759a6f1f7f 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -692,7 +692,7 @@ drop database mysqltest; show full plugin; show warnings; Level Code Message -Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead. +Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead show plugin; show plugins; End of 5.1 tests diff --git a/mysql-test/r/sp_trans.result b/mysql-test/r/sp_trans.result index a5012673c12..bf4c478677b 100644 --- a/mysql-test/r/sp_trans.result +++ b/mysql-test/r/sp_trans.result @@ -535,7 +535,7 @@ use db_bug7787| CREATE PROCEDURE p1() SHOW INNODB STATUS; | Warnings: -Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead. +Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost| DROP DATABASE db_bug7787| drop user user_bug7787@localhost| diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result index b1c55c517ac..be31b5272d8 100644 --- a/mysql-test/r/type_timestamp.result +++ b/mysql-test/r/type_timestamp.result @@ -101,13 +101,13 @@ create table t1 (t2 timestamp(2), t4 timestamp(4), t6 timestamp(6), t8 timestamp(8), t10 timestamp(10), t12 timestamp(12), t14 timestamp(14)); Warnings: -Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. -Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. -Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. -Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. -Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. -Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. -Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. +Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead +Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead +Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead +Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead +Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead +Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead +Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead insert t1 values (0,0,0,0,0,0,0), ("1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", diff --git a/mysql-test/r/warnings.result b/mysql-test/r/warnings.result index 317b4c6bccb..26b3c1625aa 100644 --- a/mysql-test/r/warnings.result +++ b/mysql-test/r/warnings.result @@ -175,7 +175,7 @@ Warning 1266 Using storage engine MyISAM for table 't1' drop table t1; set table_type=MYISAM; Warnings: -Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead. +Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead create table t1 (a int); insert into t1 (a) values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); update t1 set a='abc'; diff --git a/mysql-test/t/bdb-alter-table-1.test b/mysql-test/t/bdb-alter-table-1.test deleted file mode 100644 index 9cb469a8df6..00000000000 --- a/mysql-test/t/bdb-alter-table-1.test +++ /dev/null @@ -1,18 +0,0 @@ -# -# Test of problem when shutting down mysqld at once after ALTER TABLE -# --- source include/have_bdb.inc ---disable_warnings -drop table if exists t1; ---enable_warnings - -create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB; -insert into t1 values(1, 't1',4,9); -insert into t1 values(2, 'metatable',1,9); -insert into t1 values(3, 'metaindex',1,9 ); -select * from t1; -alter table t1 drop column test; - -# Now we do a reboot and continue with the next test - -# End of 4.1 tests diff --git a/mysql-test/t/bdb-alter-table-2-master.opt b/mysql-test/t/bdb-alter-table-2-master.opt deleted file mode 100644 index 15ad73c500f..00000000000 --- a/mysql-test/t/bdb-alter-table-2-master.opt +++ /dev/null @@ -1,2 +0,0 @@ ---skip-external-locking - diff --git a/mysql-test/t/bdb-alter-table-2.test b/mysql-test/t/bdb-alter-table-2.test deleted file mode 100644 index 15b8938a11d..00000000000 --- a/mysql-test/t/bdb-alter-table-2.test +++ /dev/null @@ -1,10 +0,0 @@ -# -# Note that this test uses tables from the previous test -# This is to test that the table t1 survives a reboot of MySQL -# The options in the -master.opt file are just there to force the reboot -# --- source include/have_bdb.inc -select * from t1; -drop table t1; - -# End of 4.1 tests diff --git a/mysql-test/t/bdb-crash.test b/mysql-test/t/bdb-crash.test deleted file mode 100644 index 75f4d04d5df..00000000000 --- a/mysql-test/t/bdb-crash.test +++ /dev/null @@ -1,51 +0,0 @@ --- source include/have_bdb.inc - -# test for bug reported by Mark Steele - ---disable_warnings -drop table if exists t1; ---enable_warnings -CREATE TABLE t1 ( - ChargeID int(10) unsigned NOT NULL auto_increment, - ServiceID int(10) unsigned DEFAULT '0' NOT NULL, - ChargeDate date DEFAULT '0000-00-00' NOT NULL, - ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL, - FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL, - ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL, - ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund') -DEFAULT 'New' NOT NULL, - ChargeAuthorizationMessage text, - ChargeComment text, - ChargeTimeStamp varchar(20), - PRIMARY KEY (ChargeID), - KEY ServiceID (ServiceID), - KEY ChargeDate (ChargeDate) -) engine=BDB; - -BEGIN; -INSERT INTO t1 -VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now'); -COMMIT; - -BEGIN; -UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE -ChargeID = 1; -COMMIT; - -INSERT INTO t1 -VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now'); -select * from t1; -drop table t1; - -# -# Test for bug #2342 "Running ANALYZE TABLE on bdb table -# inside a transaction hangs server thread" - -create table t1 (a int) engine=bdb; - -set autocommit=0; -insert into t1 values(1); -analyze table t1; -drop table t1; - -# End of 4.1 tests diff --git a/mysql-test/t/bdb-deadlock.test b/mysql-test/t/bdb-deadlock.test deleted file mode 100644 index 88243cfc860..00000000000 --- a/mysql-test/t/bdb-deadlock.test +++ /dev/null @@ -1,59 +0,0 @@ -# This test doesn't work with the embedded version as this code -# assumes that one query is running while we are doing queries on -# a second connection. -# This would work if mysqltest run would be threaded and handle each -# connection in a separate thread. -# - --- source include/not_embedded.inc --- source include/have_bdb.inc - -connect (con1,localhost,root,,); -connect (con2,localhost,root,,); - ---disable_warnings -drop table if exists t1,t2; ---enable_warnings -connection con1; -create table t1 (id integer, x integer) engine=BDB; -create table t2 (id integer, x integer) engine=BDB; -insert into t1 values(0, 0); -insert into t2 values(0, 0); -set autocommit=0; -update t1 set x = 1 where id = 0; - -connection con2; -set autocommit=0; -update t2 set x = 1 where id = 0; - -# The following query should hang because con1 is locking the page ---send -select x from t1 where id = 0; - -connection con1; -# This should generate a deadlock as we are trying to access a locked row ---send -select x from t2 where id = 0; - -connection con2; ---error 1213 -reap; -commit; - -connection con1; -reap; -commit; - -connection con2; -select * from t1; -select * from t2; -commit; - -connection con1; -select * from t1; -select * from t2; -commit; - -drop table t1,t2; - -# End of 4.1 tests diff --git a/mysql-test/t/bdb-deadlock.tminus b/mysql-test/t/bdb-deadlock.tminus deleted file mode 100644 index 3918a8ffe9d..00000000000 --- a/mysql-test/t/bdb-deadlock.tminus +++ /dev/null @@ -1,59 +0,0 @@ -# This test doesn't work with the embedded version as this code -# assumes that one query is running while we are doing queries on -# a second connection. -# This would work if mysqltest run would be threaded and handle each -# connection in a separate thread. -# - -#-- source include/not_embedded.inc --- source include/have_bdb.inc - -connect (con1,localhost,root,,); -connect (con2,localhost,root,,); - ---disable_warnings -drop table if exists t1,t2; ---enable_warnings -connection con1; -create table t1 (id integer, x integer) engine=BDB; -create table t2 (id integer, x integer) engine=BDB; -insert into t1 values(0, 0); -insert into t2 values(0, 0); -set autocommit=0; -update t1 set x = 1 where id = 0; - -connection con2; -set autocommit=0; -update t2 set x = 1 where id = 0; - -# The following query should hang because con1 is locking the page ---send -select x from t1 where id = 0; - -connection con1; -# This should generate a deadlock as we are trying to access a locked row ---send -select x from t2 where id = 0; - -connection con2; ---error 1213 -reap; -commit; - -connection con1; -reap; -commit; - -connection con2; -select * from t1; -select * from t2; -commit; - -connection con1; -select * from t1; -select * from t2; -commit; - -drop table t1,t2; - -# End of 4.1 tests diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test deleted file mode 100644 index ebee341907c..00000000000 --- a/mysql-test/t/bdb.test +++ /dev/null @@ -1,1070 +0,0 @@ --- source include/have_bdb.inc - -# -# Small basic test with ignore -# - ---disable_warnings -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8; ---enable_warnings -create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=bdb; - -insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt'); -select id, code, name from t1 order by id; - -update ignore t1 set id = 8, name = 'Sinisa' where id < 3; -select id, code, name from t1 order by id; -update ignore t1 set id = id + 10, name = 'Ralph' where id < 4; -select id, code, name from t1 order by id; - -drop table t1; - -# -# A bit bigger test -# - -CREATE TABLE t1 ( - id int(11) NOT NULL auto_increment, - parent_id int(11) DEFAULT '0' NOT NULL, - level tinyint(4) DEFAULT '0' NOT NULL, - PRIMARY KEY (id), - KEY parent_id (parent_id), - KEY level (level) -) engine=bdb; -INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1),(179,5,2); -update t1 set parent_id=parent_id+100; -select * from t1 where parent_id=102; -update t1 set id=id+1000; --- error 1062 -update t1 set id=1024 where id=1009; -select * from t1; -update ignore t1 set id=id+1; # This will change all rows -select * from t1; -update ignore t1 set id=1023 where id=1010; -select * from t1 where parent_id=102 order by parent_id,id; -# Here and below the differences in result are caused by difference in -# floating point calculations performed in BDB handler. ---replace_result 5 X 6 X -explain select level from t1 where level=1; ---replace_result 5 X 6 X -explain select level,id from t1 where level=1; ---replace_result 5 X 6 X -explain select level,id,parent_id from t1 where level=1; -select level,id from t1 where level=1; -select level,id,parent_id from t1 where level=1; -optimize table t1; -show keys from t1; -drop table t1; - -# -# Test replace -# - -CREATE TABLE t1 ( - gesuchnr int(11) DEFAULT '0' NOT NULL, - benutzer_id int(11) DEFAULT '0' NOT NULL, - PRIMARY KEY (gesuchnr,benutzer_id) -) engine=BDB; - -replace into t1 (gesuchnr,benutzer_id) values (2,1); -replace into t1 (gesuchnr,benutzer_id) values (1,1); -replace into t1 (gesuchnr,benutzer_id) values (1,1); -select * from t1; -drop table t1; - -# test for bug in replace with secondary key -create table t1 (id int not null primary key, x int not null, key (x)) engine=bdb; -insert into t1 (id, x) values (1, 1); -replace into t1 (id, x) values (1, 2); -select * from t1; -drop table t1; - -# -# test delete using hidden_primary_key -# - -create table t1 (a int) engine=bdb; -insert into t1 values (1), (2); -optimize table t1; -delete from t1 where a = 1; -select * from t1; -check table t1; -drop table t1; - -create table t1 (a int,b varchar(20)) engine=bdb; -insert into t1 values (1,""), (2,"testing"); -delete from t1 where a = 1; -select * from t1; -create index skr on t1 (a); -insert into t1 values (3,""), (4,"testing"); -analyze table t1; -show keys from t1; -drop table t1; - -# Test of reading on secondary key with may be null - -create table t1 (a int,b varchar(20),key(a)) engine=bdb; -insert into t1 values (1,""), (2,"testing"); -select * from t1 where a = 1; -drop table t1; - -# -# Test auto_increment on sub key -# - -create table t1 (a char(10) not null, b int not null auto_increment, primary key(a,b)) engine=BDB; -insert into t1 values ("a",1),("b",2),("a",2),("c",1); -insert into t1 values ("a",NULL),("b",NULL),("c",NULL),("e",NULL); -insert into t1 (a) values ("a"),("b"),("c"),("d"); -insert into t1 (a) values ('k'),('d'); -insert into t1 (a) values ("a"); -insert into t1 values ("d",last_insert_id()); -select * from t1; -flush tables; -select count(*) from t1; -drop table t1; - -# -# Test rollback -# - -create table t1 (n int not null primary key) engine=bdb; -set autocommit=0; -insert into t1 values (4); -rollback; -select n, "after rollback" from t1; -insert into t1 values (4); -commit; -select n, "after commit" from t1; -commit; -insert into t1 values (5); --- error 1062 -insert into t1 values (4); -commit; -select n, "after commit" from t1; -set autocommit=1; -insert into t1 values (6); --- error 1062 -insert into t1 values (4); -select n from t1; -set autocommit=0; -# -# savepoints -# -begin; -savepoint `my_savepoint`; -insert into t1 values (7); -savepoint `savept2`; -insert into t1 values (3); -select n from t1; -savepoint savept3; -rollback to savepoint savept2; ---error 1305 -rollback to savepoint savept3; -rollback to savepoint savept2; -release savepoint `my_savepoint`; -select n from t1; --- error 1305 -rollback to savepoint `my_savepoint`; ---error 1305 -rollback to savepoint savept2; -insert into t1 values (8); -savepoint sv; -commit; -savepoint sv; -set autocommit=1; -# nop -rollback; -drop table t1; - -# -# Testing transactions -# - -create table t1 ( id int NOT NULL PRIMARY KEY, nom varchar(64)) engine=BDB; -begin; -insert into t1 values(1,'hamdouni'); -select id as afterbegin_id,nom as afterbegin_nom from t1; -rollback; -select id as afterrollback_id,nom as afterrollback_nom from t1; -set autocommit=0; -insert into t1 values(2,'mysql'); -select id as afterautocommit0_id,nom as afterautocommit0_nom from t1; -rollback; -select id as afterrollback_id,nom as afterrollback_nom from t1; -set autocommit=1; -drop table t1; - -# -# Simple not autocommit test -# - -CREATE TABLE t1 (id char(8) not null primary key, val int not null) engine=bdb; -insert into t1 values ('pippo', 12); --- error 1062 -insert into t1 values ('pippo', 12); # Gives error -delete from t1; -delete from t1 where id = 'pippo'; -select * from t1; - -insert into t1 values ('pippo', 12); -set autocommit=0; -delete from t1; -rollback; -select * from t1; -delete from t1; -commit; -select * from t1; -drop table t1; -set autocommit=1; - -# -# The following simple tests failed at some point -# - -CREATE TABLE t1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR(64)) ENGINE=BDB; -INSERT INTO t1 VALUES (1, 'Jochen'); -select * from t1; -drop table t1; - -CREATE TABLE t1 ( _userid VARCHAR(60) NOT NULL PRIMARY KEY) ENGINE=BDB; -set autocommit=0; -INSERT INTO t1 SET _userid='marc@anyware.co.uk'; -COMMIT; -SELECT * FROM t1; -SELECT _userid FROM t1 WHERE _userid='marc@anyware.co.uk'; -drop table t1; -set autocommit=1; - -# -# Test when reading on part of unique key -# -CREATE TABLE t1 ( - user_id int(10) DEFAULT '0' NOT NULL, - name varchar(100), - phone varchar(100), - ref_email varchar(100) DEFAULT '' NOT NULL, - detail varchar(200), - PRIMARY KEY (user_id,ref_email) -)engine=bdb; - -INSERT INTO t1 VALUES (10292,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10292,'shirish','2333604','shirish@yahoo.com','ddsds'),(10292,'sonali','323232','sonali@bolly.com','filmstar'); -select * from t1 where user_id=10292; -INSERT INTO t1 VALUES (10291,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10293,'shirish','2333604','shirish@yahoo.com','ddsds'); -select * from t1 where user_id=10292; -select * from t1 where user_id>=10292; -select * from t1 where user_id>10292; -select * from t1 where user_id<10292; -drop table t1; - -# -# Test that keys are created in right order -# - -CREATE TABLE t1 (a int not null, b int not null,c int not null, -key(a),primary key(a,b), unique(c),key(a),unique(b)); -show index from t1; -drop table t1; - -# -# Test of ALTER TABLE and BDB tables -# - -create table t1 (col1 int not null, col2 char(4) not null, primary key(col1)); -alter table t1 engine=BDB; -insert into t1 values ('1','1'),('5','2'),('2','3'),('3','4'),('4','4'); -select * from t1; -update t1 set col2='7' where col1='4'; -select * from t1; -alter table t1 add co3 int not null; -select * from t1; -update t1 set col2='9' where col1='2'; -select * from t1; -drop table t1; - -# -# INSERT INTO BDB tables -# - -create table t1 (a int not null , b int, primary key (a)) engine = BDB; -create table t2 (a int not null , b int, primary key (a)) engine = myisam; -insert into t1 VALUES (1,3) , (2,3), (3,3); -select * from t1; -insert into t2 select * from t1; -select * from t2; -delete from t1 where b = 3; -select * from t1; -insert into t1 select * from t2; -select * from t1; -select * from t2; -drop table t1,t2; - -# -# Search on unique key -# - -CREATE TABLE t1 ( - id int(11) NOT NULL auto_increment, - ggid varchar(32) binary DEFAULT '' NOT NULL, - email varchar(64) DEFAULT '' NOT NULL, - passwd varchar(32) binary DEFAULT '' NOT NULL, - PRIMARY KEY (id), - UNIQUE ggid (ggid) -) ENGINE=BDB; - -insert into t1 (ggid,passwd) values ('test1','xxx'); -insert into t1 (ggid,passwd) values ('test2','yyy'); --- error 1062 -insert into t1 (ggid,passwd) values ('test2','this will fail'); --- error 1062 -insert into t1 (ggid,id) values ('this will fail',1); - -select * from t1 where ggid='test1'; -select * from t1 where passwd='xxx'; -select * from t1 where id=2; - -replace into t1 (ggid,id) values ('this will work',1); -replace into t1 (ggid,passwd) values ('test2','this will work'); --- error 1062 -update t1 set id=100,ggid='test2' where id=1; -select * from t1; -select * from t1 where id=1; -select * from t1 where id=999; -drop table t1; - -# -# ORDER BY on not primary key -# - -CREATE TABLE t1 ( - user_name varchar(12), - password text, - subscribed char(1), - user_id int(11) DEFAULT '0' NOT NULL, - quota bigint(20), - weight double, - access_date date, - access_time time, - approved datetime, - dummy_primary_key int(11) NOT NULL auto_increment, - PRIMARY KEY (dummy_primary_key) -) ENGINE=BDB; -INSERT INTO t1 VALUES ('user_0','somepassword','N',0,0,0,'2000-09-07','23:06:59','2000-09-07 23:06:59',1); -INSERT INTO t1 VALUES ('user_1','somepassword','Y',1,1,1,'2000-09-07','23:06:59','2000-09-07 23:06:59',2); -INSERT INTO t1 VALUES ('user_2','somepassword','N',2,2,1.4142135623731,'2000-09-07','23:06:59','2000-09-07 23:06:59',3); -INSERT INTO t1 VALUES ('user_3','somepassword','Y',3,3,1.7320508075689,'2000-09-07','23:06:59','2000-09-07 23:06:59',4); -INSERT INTO t1 VALUES ('user_4','somepassword','N',4,4,2,'2000-09-07','23:06:59','2000-09-07 23:06:59',5); -select user_name, password , subscribed, user_id, quota, weight, access_date, access_time, approved, dummy_primary_key from t1 order by user_name; -drop table t1; - -# -# Testing of tables without primary keys -# - -CREATE TABLE t1 ( - id int(11) NOT NULL auto_increment, - parent_id int(11) DEFAULT '0' NOT NULL, - level tinyint(4) DEFAULT '0' NOT NULL, - KEY (id), - KEY parent_id (parent_id), - KEY level (level) -) engine=bdb; -INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1); -INSERT INTO t1 values (179,5,2); -update t1 set parent_id=parent_id+100; -select * from t1 where parent_id=102; -update t1 set id=id+1000; -update t1 set id=1024 where id=1009; -select * from t1; -update ignore t1 set id=id+1; # This will change all rows -select * from t1; -update ignore t1 set id=1023 where id=1010; -select * from t1 where parent_id=102; ---replace_result 5 X 6 X -explain select level from t1 where level=1; -select level,id from t1 where level=1; -select level,id,parent_id from t1 where level=1; -select level,id from t1 where level=1 order by id; -delete from t1 where level=1; -select * from t1; -drop table t1; - -# -# Test of index only reads -# -CREATE TABLE t1 ( - sca_code char(6) NOT NULL, - cat_code char(6) NOT NULL, - sca_desc varchar(50), - lan_code char(2) NOT NULL, - sca_pic varchar(100), - sca_sdesc varchar(50), - sca_sch_desc varchar(16), - PRIMARY KEY (sca_code, cat_code, lan_code), - INDEX sca_pic (sca_pic) -) engine = bdb ; - -INSERT INTO t1 ( sca_code, cat_code, sca_desc, lan_code, sca_pic, sca_sdesc, sca_sch_desc) VALUES ( 'PD', 'J', 'PENDANT', 'EN', NULL, NULL, 'PENDANT'),( 'RI', 'J', 'RING', 'EN', NULL, NULL, 'RING'),( 'QQ', 'N', 'RING', 'EN', 'not null', NULL, 'RING'); -select count(*) from t1 where sca_code = 'PD'; -select count(*) from t1 where sca_code <= 'PD'; -select count(*) from t1 where sca_pic is null; -alter table t1 drop index sca_pic, add index sca_pic (cat_code, sca_pic); -select count(*) from t1 where sca_code='PD' and sca_pic is null; -select count(*) from t1 where cat_code='E'; - -alter table t1 drop index sca_pic, add index (sca_pic, cat_code); -select count(*) from t1 where sca_code='PD' and sca_pic is null; -select count(*) from t1 where sca_pic >= 'n'; -select sca_pic from t1 where sca_pic is null; -update t1 set sca_pic="test" where sca_pic is null; -delete from t1 where sca_code='pd'; -drop table t1; - -# -# Test of opening table twice and timestamps -# -set @a:=now(); -CREATE TABLE t1 (a int not null, b timestamp not null, primary key (a)) engine=bdb; -insert into t1 (a) values(1),(2),(3); -select t1.a from t1 natural join t1 as t2 where t1.b >= @a order by t1.a; -select a from t1 natural join t1 as t2 where b >= @a order by a; -update t1 set a=5 where a=1; -select a from t1; -drop table t1; - -# -# Test flushing of berkeley DB logs -# -flush logs; - -# -# Test key on blob with null values -# -create table t1 (b blob, i int, key (b(100)), key (i), key (i, b(20))) engine=bdb; -insert into t1 values ('this is a blob', 1), (null, -1), (null, null),("",1),("",2),("",3); -select b from t1 where b = 'this is a blob'; -select * from t1 where b like 't%'; -select b, i from t1 where b is not null; -select * from t1 where b is null and i > 0; -select * from t1 where i is NULL; -update t1 set b='updated' where i=1; -select * from t1; -drop table t1; - -# -# Test with variable length primary key -# -create table t1 (a varchar(100) not null, primary key(a), b int not null) engine=bdb; -insert into t1 values("hello",1),("world",2); -select * from t1 order by b desc; -optimize table t1; -show keys from t1; -drop table t1; - -# -# Test of bug in create index with NULL columns -# -create table t1 (i int, j int )ENGINE=BDB; -insert into t1 values (1,2); -select * from t1 where i=1 and j=2; -create index ax1 on t1 (i,j); -select * from t1 where i=1 and j=2; -drop table t1; - -# -# Test of with CONST tables and TEXT columns -# This gave a wrong result because the row information was freed too early -# - -create table t1 -( - branch_id int auto_increment primary key, - branch_name varchar(255) not null, - branch_active int not null default 1, - - unique branch_name(branch_name), - index branch_active(branch_active) -) engine=bdb; -create table t2 -( - target_id int auto_increment primary key, - target_name varchar(255) not null, - target_active int not null default 1, - - unique target_name(target_name), - index target_active(target_active) -) engine=bdb; -create table t3 -( - platform_id int auto_increment primary key, - platform_name varchar(255) not null, - platform_active int not null default 1, - - unique platform_name(platform_name), - index platform_active(platform_active) -) engine=bdb; -create table t4 -( - product_id int auto_increment primary key, - product_name varchar(255) not null, - version_file varchar(255) not null, - product_active int not null default 1, - - unique product_name(product_name), - index product_active(product_active) -) engine=bdb; -create table t5 -( - product_file_id int auto_increment primary key, - product_id int not null, - file_name varchar(255) not null, - /* cvs module used to find the file version */ - module_name varchar(255) not null, - /* flag whether the file is still included in the product */ - file_included int not null default 1, - - unique product_file(product_id,file_name), - index file_included(file_included) -) engine=bdb; -create table t6 -( - file_platform_id int auto_increment primary key, - product_file_id int not null, - platform_id int not null, - branch_id int not null, - /* filename in the build system */ - build_filename varchar(255) not null, - /* default filename in the build archive */ - archive_filename varchar(255) not null, - - unique file_platform(product_file_id,platform_id,branch_id) -) engine=bdb; -create table t8 -( - archive_id int auto_increment primary key, - branch_id int not null, - target_id int not null, - platform_id int not null, - product_id int not null, - status_id int not null default 1, - - unique archive(branch_id,target_id,platform_id,product_id), - index status_id(status_id) -) engine=bdb; -create table t7 -( - build_id int auto_increment primary key, - branch_id int not null, - target_id int not null, - build_number int not null, - build_date date not null, - /* build system tag, e.g. 'rmanight-022301-1779' */ - build_tag varchar(255) not null, - /* path relative to the build archive root, e.g. 'current' */ - build_path text not null, - - unique build(branch_id,target_id,build_number) -) engine=bdb; - -insert into t1 (branch_name) -values ('RealMedia'); -insert into t1 (branch_name) -values ('RP8REV'); -insert into t1 (branch_name) -values ('SERVER_8_0_GOLD'); - -insert into t2 (target_name) -values ('rmanight'); -insert into t2 (target_name) -values ('playerall'); -insert into t2 (target_name) -values ('servproxyall'); - -insert into t3 (platform_name) -values ('linux-2.0-libc6-i386'); -insert into t3 (platform_name) -values ('win32-i386'); - -insert into t4 (product_name, version_file) -values ('realserver', 'servinst'); -insert into t4 (product_name, version_file) -values ('realproxy', 'prxyinst'); -insert into t4 (product_name, version_file) -values ('realplayer', 'playinst'); -insert into t4 (product_name, version_file) -values ('plusplayer', 'plusinst'); - -create temporary table tmp1 - select branch_id, target_id, platform_id, product_id - from t1, t2, t3, t4 ; -create temporary table tmp2 - select tmp1.branch_id, tmp1.target_id, tmp1.platform_id, tmp1.product_id - from tmp1 left join t8 - using (branch_id,target_id,platform_id,product_id) - where t8.archive_id is null ; -insert into t8 - (branch_id, target_id, platform_id, product_id, status_id) - select branch_id, target_id, platform_id, product_id, 1 - from tmp2 ; -drop table tmp1 ; -drop table tmp2 ; - -insert into t5 (product_id, file_name, module_name) -values (1, 'servinst', 'server'); - -insert into t5 (product_id, file_name, module_name) -values (2, 'prxyinst', 'server'); - -insert into t5 (product_id, file_name, module_name) -values (3, 'playinst', 'rpapp'); - -insert into t5 (product_id, file_name, module_name) -values (4, 'plusinst', 'rpapp'); - -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (1, 2, 3, 'servinst.exe', 'win32-servinst.exe'); - -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (1, 1, 3, 'v80_linux-2.0-libc6-i386_servinst.bin', 'linux2-servinst.exe'); - -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (3, 2, 2, 'playinst.exe', 'win32-playinst.exe'); - -insert into t6 -(product_file_id,platform_id,branch_id,build_filename,archive_filename) -values (4, 2, 2, 'playinst.exe', 'win32-playinst.exe'); - -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (2, 2, 1071, 'playerall-022101-1071', '2001-02-21', 'current'); - -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (2, 2, 1072, 'playerall-022201-1072', '2001-02-22', 'current'); - -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (3, 3, 388, 'servproxyall-022201-388', '2001-02-22', 'current'); - -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (3, 3, 389, 'servproxyall-022301-389', '2001-02-23', 'current'); - -insert into t7 -(branch_id,target_id,build_number,build_tag,build_date,build_path) -values (4, 4, 100, 'foo target-010101-100', '2001-01-01', 'current'); - -update t8 -set status_id=2 -where branch_id=2 and target_id=2 and platform_id=2 and product_id=1; - -select t7.build_path -from - t1, - t7, - t2, - t3, - t4, - t5, - t6 -where - t7.branch_id = t1.branch_id and - t7.target_id = t2.target_id and - t5.product_id = t4.product_id and - t6.product_file_id = t5.product_file_id and - t6.platform_id = t3.platform_id and - t6.branch_id = t6.branch_id and - t7.build_id = 1 and - t4.product_id = 3 and - t5.file_name = 'playinst' and - t3.platform_id = 2; - -drop table t1, t2, t3, t4, t5, t6, t7, t8; - -# -# Test with blob + tinyint key -# - -CREATE TABLE t1 ( - a tinytext NOT NULL, - b tinyint(3) unsigned NOT NULL default '0', - PRIMARY KEY (a(32),b) -) ENGINE=BDB; -INSERT INTO t1 VALUES ('a',1),('a',2); -SELECT * FROM t1 WHERE a='a' AND b=2; -SELECT * FROM t1 WHERE a='a' AND b in (2); -SELECT * FROM t1 WHERE a='a' AND b in (1,2); -drop table t1; - -# -# Test min-max optimization -# - -CREATE TABLE t1 ( - a int3 unsigned NOT NULL, - b int1 unsigned NOT NULL, - UNIQUE (a, b) -) ENGINE = BDB; - -INSERT INTO t1 VALUES (1, 1); -SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; -drop table t1; - -# -# Test problem with BDB and lock tables with duplicate write. -# - -create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=bdb; -insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); -LOCK TABLES t1 WRITE; ---error 1062 -insert into t1 values (99,1,2,'D'),(1,1,2,'D'); -select id from t1; -select id from t1; -UNLOCK TABLES; -DROP TABLE t1; - -create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=bdb; -insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); -LOCK TABLES t1 WRITE; -begin; ---error 1062 -insert into t1 values (99,1,2,'D'),(1,1,2,'D'); -select id from t1; -insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D'); -commit; -select id,id3 from t1; -UNLOCK TABLES; -DROP TABLE t1; - -# -# Test with empty tables (crashed with lock error) -# - -CREATE TABLE t1 (SYAIN_NO char(5) NOT NULL default '', KINMU_DATE char(6) NOT NULL default '', PRIMARY KEY (SYAIN_NO,KINMU_DATE)) ENGINE=BerkeleyDB; -CREATE TABLE t2 ( SYAIN_NO char(5) NOT NULL default '',STR_DATE char(8) NOT NULL default '',PRIMARY KEY (SYAIN_NO,STR_DATE) ) ENGINE=BerkeleyDB; -select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO; -select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO; -DROP TABLE t1,t2; - -# -# Test problem with joining table to itself on a multi-part unique key -# - -create table t1 (a int(11) not null, b int(11) not null, unique (a,b)) engine=bdb; -insert into t1 values (1,1), (1,2); -select * from t1 where a = 1; -select t1.*, t2.* from t1, t1 t2 where t1.a = t2.a and t2.a = 1; -select * from t1 where a = 1; -drop table t1; - -# -# This caused a deadlock in BDB internal locks -# - -create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=bdb; -insert into t1 values (0,0,0,'ABCDEFGHIJ'); -create table t2 (id int NOT NULL,primary key (id)) engine=bdb; -LOCK TABLES t1 WRITE, t2 WRITE; -insert into t2 values(1); -SELECT t1.* FROM t1 WHERE id IN (1); -SELECT t1.* FROM t2 left outer join t1 on (t1.id=t2.id); -delete from t1 where id3 >= 0 and id3 <= 0; -drop table t1,t2; - -# -# Test problems with NULL -# - -CREATE TABLE t1 (i varchar(48) NOT NULL default '', p varchar(255) default NULL,s varchar(48) NOT NULL default '', PRIMARY KEY (i), UNIQUE(p,s)) ENGINE=BDB; -INSERT INTO t1 VALUES ('00000000-e6c4ddeaa6-003b8-83458387','programs/xxxxxxxx.wmv','00000000-e6c4ddeb32-003bc-83458387'); -SELECT * FROM t1 WHERE p='programs/xxxxxxxx.wmv'; -drop table t1; - -# -# Test problem which gave error 'Can't find record in 't1'' -# - -CREATE TABLE t1 ( STR_DATE varchar(8) NOT NULL default '',INFO_NOTE varchar(200) default NULL,PRIMARY KEY (STR_DATE) ) ENGINE=BerkeleyDB; -select INFO_NOTE from t1 where STR_DATE = '20010610'; -select INFO_NOTE from t1 where STR_DATE < '20010610'; -select INFO_NOTE from t1 where STR_DATE > '20010610'; -drop table t1; - -# -# Test problem with multi table delete which quickly shows up with bdb tables. -# - -create table t1 (a int not null, b int, primary key (a)) engine =bdb; -create table t2 (a int not null, b int, primary key (a)) engine =bdb; -insert into t1 values (2, 3),(1, 7),(10, 7); -insert into t2 values (2, 3),(1, 7),(10, 7); -select * from t1; -select * from t2; -delete t1, t2 from t1, t2 where t1.a = t2.a; -select * from t1; -select * from t2; -select * from t2; -drop table t1,t2; - -# -# The bug #971 -# - -create table t1 (x int not null, index(x)) engine=bdb; -insert into t1 values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); -select * from t1 where x <= 10 and x >= 7; -select * from t1 where x <= 10 and x >= 7 order by x; -select * from t1 where x <= 10 and x >= 7 order by x desc; -select * from t1 where x <= 8 and x >= 5 order by x desc; -select * from t1 where x < 8 and x > 5 order by x desc; -drop table t1; - -# -# Test of multi-table-updates (bug #1980). -# - -create table t1 ( c char(8) not null ) engine=bdb; -insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); -insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); - -alter table t1 add b char(8) not null; -alter table t1 add a char(8) not null; -alter table t1 add primary key (a,b,c); -update t1 set a=c, b=c; - -create table t2 (c char(8) not null, b char(8) not null, a char(8) not null, primary key(a,b,c)) engine=bdb; -insert into t2 select * from t1; - -delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; -drop table t1,t2; - -# -# Test index only read (Bug #2509) -# -create table t1 (a char(10), key(a), b int not null, key(b)) engine=bdb; -insert into t1 values ('a',1),('A',2); -explain select a from t1; -select a from t1; -explain select b from t1; -select b from t1; -alter table t1 modify a char(10) binary; -explain select a from t1; -select a from t1; -drop table t1; - -# -# bug#2686 - index_merge select on BerkeleyDB table with varchar PK crashes -# - -create table t1( - pk1 text not null, pk2 text not null, pk3 char(4), - key1 int, key2 int, - primary key(pk1(4), pk2(4), pk3), key(key1), key(key2) -) engine=bdb; -insert into t1 values (concat('aaa-', repeat('A', 4000)), - concat('eee-', repeat('e', 4000)), 'a++a', 1, 1); -insert into t1 values (concat('bbb-', repeat('B', 4000)), - concat('ggg-', repeat('G', 4000)), 'b++b', 1, 1); -select substring(pk1, 1, 4), substring(pk1, 4001), - substring(pk2, 1, 4), substring(pk2, 4001), pk3, key1, key2 - from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# -# bug#2688 - Wrong index_merge query results for BDB table with -# variable length primary key -# - -create table t1 ( - pk1 varchar(8) not null default '', - pk2 varchar(4) not null default '', - key1 int(11) default null, - key2 int(11) default null, - primary key (pk1,pk2), - key key1 (key1), - key key2 (key2)) engine=bdb; -insert into t1 values ('','empt',2,2), ('a','a--a',2,2), - ('bb','b--b',2,2), ('ccc','c--c',2,2), ('dddd','d--d',2,2); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - - -# -# Bug #4000: problem with active cursor. -# - -set autocommit=0; -create table t1(b varchar(30)) engine=bdb; -insert into t1 values ('one'); -commit; -select b FROM t1 outer_table where -exists (select 'two' from t1 where 'two' = outer_table.b); -drop table t1; -set autocommit=1; - -# -# Bug #4089: subselect and open cursor. -# - -create table t1(a int primary key, b varchar(30)) engine=bdb; -insert into t1 values (1,'one'), (2,'two'), (3,'three'), (4,'four'); -create table t2 like t1; -insert t2 select * from t1; -select a from t1 where a in (select a from t2); -delete from t2; -insert into t2 (a, b) - select a, b from t1 where (a, b) in (select a, b from t1); -select * from t2; -drop table t1, t2; - -# -# Bug #4304: TRUNCATE , wrong result -# - -create table t1 (a int, b varchar(30), primary key(a)) engine = bdb; -insert into t1 values (1,'one'); -commit; -truncate t1; -select * from t1; -drop table t1; - -# -# Check that BDB works fine with a string which is -# longer than 255 bytes for multibyte characters. -# -SET NAMES utf8; -create table t1 (a varchar(255) character set utf8) engine=bdb; -set @a:= convert(repeat(_latin1 0xFF, 255) using utf8); -insert into t1 values (@a); -select a, length(a), char_length(a) from t1; -drop table t1; -SET NAMES latin1; - -# -# Bug #5832 SELECT doesn't return records in some cases -# -CREATE TABLE t1 ( - id int unsigned NOT NULL auto_increment, - list_id smallint unsigned NOT NULL, - term TEXT NOT NULL, - PRIMARY KEY(id), - INDEX(list_id, term(4)) -) ENGINE=BDB CHARSET=utf8; -INSERT INTO t1 SET list_id = 1, term = "letterc"; -INSERT INTO t1 SET list_id = 1, term = "letterb"; -INSERT INTO t1 SET list_id = 1, term = "lettera"; -INSERT INTO t1 SET list_id = 1, term = "letterd"; -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "letterc"); -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "letterb"); -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "lettera"); -SELECT id FROM t1 WHERE (list_id = 1) AND (term = "letterd"); -DROP TABLE t1; - -# -# Bug #15536: Crash when DELETE with subquery using BDB tables -# -create table t1 (a int, key(a)) engine=bdb; -create table t2 (b int, key(b)) engine=bdb; -insert into t1 values (1),(1),(2),(3),(4); -insert into t2 values (1),(5),(6),(7); -delete from t1 where (a in (select b from t2)); -select count(*) from t1; -# INSERT also blows up ---error 1242 -insert into t1 set a=(select b from t2); -select count(*) from t1; -# UPDATE also blows up -update t1 set a = a + 1 where (a in (select b from t2)); -select count(*) from t1; -drop table t1, t2; - ---echo End of 4.1 tests - -# -# alter temp table -# -create temporary table t1 (a int, primary key(a)) engine=bdb; -select * from t1; -alter table t1 add b int; -select * from t1; -drop table t1; - - -# -# Test varchar -# - -let $default=`select @@storage_engine`; -set storage_engine=bdb; -source include/varchar.inc; - -# -# Some errors/warnings on create -# - ---replace_result 1024 MAX_KEY_LENGTH 3072 MAX_KEY_LENGTH -create table t1 (v varchar(65530), key(v)); -drop table if exists t1; -create table t1 (v varchar(65536)); -show create table t1; -drop table t1; -create table t1 (v varchar(65530) character set utf8); -show create table t1; -drop table t1; - -# End varchar test -eval set storage_engine=$default; - -# -# Test that we can create a large key -# -create table t1 (a varchar(255) character set utf8, - b varchar(255) character set utf8, - c varchar(255) character set utf8, - d varchar(255) character set utf8, - key (a,b,c,d)) engine=bdb; -drop table t1; ---error ER_TOO_LONG_KEY -create table t1 (a varchar(255) character set utf8, - b varchar(255) character set utf8, - c varchar(255) character set utf8, - d varchar(255) character set utf8, - e varchar(255) character set utf8, - key (a,b,c,d,e)) engine=bdb; - -# -# Bug #14212: Server crash after COMMIT + ALTER TABLE -# -set autocommit=0; -create table t1 (a int) engine=bdb; -commit; -alter table t1 add primary key(a); -drop table t1; - - ---echo End of 5.0 tests - -# -# Bug #7955: SET TRANSACTION ISIOLATION LEVEL lives longer than next -# transaciton -# -create table t1 (a int) engine=bdb; -set session transaction isolation level repeatable read; -set transaction isolation level serializable; -begin; -select @@tx_isolation; -insert into t1 values (1); ---error ER_CANT_CHANGE_TX_ISOLATION -set transaction isolation level read committed; -rollback; -begin; -select @@tx_isolation; -insert into t1 values (1); -rollback; -drop table t1; - ---echo End of 5.1 tests diff --git a/mysql-test/t/bdb_cache-master.opt b/mysql-test/t/bdb_cache-master.opt deleted file mode 100644 index 5f0ebff98f6..00000000000 --- a/mysql-test/t/bdb_cache-master.opt +++ /dev/null @@ -1 +0,0 @@ ---set-variable=query_cache_size=1M diff --git a/mysql-test/t/bdb_cache.test b/mysql-test/t/bdb_cache.test deleted file mode 100644 index 85328920d71..00000000000 --- a/mysql-test/t/bdb_cache.test +++ /dev/null @@ -1,53 +0,0 @@ --- source include/have_bdb.inc --- source include/have_query_cache.inc - -# -# Without auto_commit. -# ---disable_warnings -drop table if exists t1, t2, t3; ---enable_warnings -flush status; -set autocommit=0; -create table t1 (a int not null) engine=bdb; -insert into t1 values (1),(2),(3); -select * from t1; -show status like "Qcache_queries_in_cache"; -drop table t1; -set autocommit=1; -create table t1 (a int not null) engine=bdb; -begin; -insert into t1 values (1),(2),(3); -select * from t1; -show status like "Qcache_queries_in_cache"; -drop table t1; -create table t1 (a int not null) engine=bdb; -create table t2 (a int not null) engine=bdb; -create table t3 (a int not null) engine=bdb; -insert into t1 values (1),(2); -insert into t2 values (1),(2); -insert into t3 values (1),(2); -select * from t1; -select * from t2; -select * from t3; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_hits"; -begin; -select * from t1; -select * from t2; -select * from t3; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_hits"; -insert into t1 values (3); -insert into t2 values (3); -insert into t1 values (4); -select * from t1; -select * from t2; -select * from t3; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_hits"; -commit; -show status like "Qcache_queries_in_cache"; -drop table if exists t1, t2, t3; - -# End of 4.1 tests diff --git a/mysql-test/t/bdb_gis.test b/mysql-test/t/bdb_gis.test deleted file mode 100644 index 88dcbb7cbe9..00000000000 --- a/mysql-test/t/bdb_gis.test +++ /dev/null @@ -1,3 +0,0 @@ --- source include/have_bdb.inc -SET storage_engine=bdb; ---source include/gis_generic.inc diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test index 77b76a14171..ccb3e7f718c 100644 --- a/mysql-test/t/ctype_utf8.test +++ b/mysql-test/t/ctype_utf8.test @@ -360,7 +360,7 @@ drop table t1; create table t1 ( c char(10) character set utf8, unique key a (c(1)) -) engine=bdb; +) engine=innodb; --enable_warnings insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); --error 1062 @@ -483,7 +483,7 @@ drop table t1; create table t1 ( c char(10) character set utf8 collate utf8_bin, unique key a (c(1)) -) engine=bdb; +) engine=innodb; --enable_warnings insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); --error 1062 @@ -558,7 +558,7 @@ drop table t1; create table t1 ( str varchar(255) character set utf8 not null, key str (str(2)) -) engine=bdb; +) engine=innodb; --enable_warnings INSERT INTO t1 VALUES ('str'); INSERT INTO t1 VALUES ('str2'); @@ -644,7 +644,7 @@ create table t1 ( insert into t1 values(1,'foo'),(2,'foobar'); select * from t1 where b like 'foob%'; --disable_warnings -alter table t1 engine=bdb; +alter table t1 engine=innodb; --enable_warnings select * from t1 where b like 'foob%'; drop table t1; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index cf387f7acee..359092b43b3 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -44,3 +44,4 @@ rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson crash_commit_before : 2006-08-02 msvensson rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly) +federated_transactions : Need to be re-enabled once Patrick's merge is complete diff --git a/mysql-test/t/federated_transactions.test b/mysql-test/t/federated_transactions.test index 9f3b030f462..2fc737730bf 100644 --- a/mysql-test/t/federated_transactions.test +++ b/mysql-test/t/federated_transactions.test @@ -10,7 +10,7 @@ CREATE TABLE federated.t1 ( `id` int(20) NOT NULL, `name` varchar(32) NOT NULL default '' ) - DEFAULT CHARSET=latin1 ENGINE=InnoDB; + DEFAULT CHARSET=latin1 ENGINE=innodb; connection master; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/t/index_merge_bdb.test b/mysql-test/t/index_merge_bdb.test deleted file mode 100644 index c49e6ab3175..00000000000 --- a/mysql-test/t/index_merge_bdb.test +++ /dev/null @@ -1,52 +0,0 @@ -# -# 2-sweeps read Index_merge test -# --- source include/have_bdb.inc - ---disable_warnings -drop table if exists t1; ---enable_warnings - -create table t1 ( - pk int primary key, - key1 int, - key2 int, - filler char(200), - filler2 char(200), - index(key1), - index(key2) -) engine=bdb; - - ---disable_query_log -let $1=1000; -while ($1) -{ - eval insert into t1 values($1, $1, $1, 'filler-data','filler-data-2'); - dec $1; -} ---enable_query_log - -select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); - -set @maxv=1000; - -select * from t1 where - (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) - or key1=18 or key1=60; - -select * from t1 where - (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) - or key1 < 3 or key1 > @maxv-11; - -select * from t1 where - (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) - or - (key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); - -select * from t1 where - (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) - or - (key1 < 5) or (key1 > @maxv-10); - -drop table t1; diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test index 21271517564..9cd93f2c7dd 100644 --- a/mysql-test/t/multi_update.test +++ b/mysql-test/t/multi_update.test @@ -485,7 +485,7 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; drop table t1,t2; --disable_warnings -create table t1 ( c char(8) not null ) engine=bdb; +create table t1 ( c char(8) not null ) engine=innodb; --enable_warnings insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test index b022ea550cc..474155e06f4 100644 --- a/mysql-test/t/ndb_lock.test +++ b/mysql-test/t/ndb_lock.test @@ -73,7 +73,7 @@ drop table t1; create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb; -insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3); +insert into t1 values (1,'one',1); # PK access connection con1; @@ -82,11 +82,22 @@ select * from t1 where x = 1 for update; connection con2; begin; -select * from t1 where x = 2 for update; --error 1205 select * from t1 where x = 1 for update; rollback; +connection con1; +rollback; +insert into t1 values (2,'two',2),(3,"three",3); +begin; +select * from t1 where x = 1 for update; + +connection con2; +--error 1205 +select * from t1 where x = 1 for update; +select * from t1 where x = 2 for update; +rollback; + connection con1; commit; diff --git a/mysql-test/t/ps_6bdb.test b/mysql-test/t/ps_6bdb.test deleted file mode 100644 index 49dd7aa924b..00000000000 --- a/mysql-test/t/ps_6bdb.test +++ /dev/null @@ -1,25 +0,0 @@ -############################################### -# # -# Prepared Statements test on BDB tables # -# # -############################################### - -# -# NOTE: PLEASE SEE ps_1general.test (bottom) -# BEFORE ADDING NEW TEST CASES HERE !!! - -use test; - --- source include/have_bdb.inc -let $type= 'BDB' ; --- source include/ps_create.inc --- source include/ps_renew.inc - --- source include/ps_query.inc --- source include/ps_modify.inc --- source include/ps_modify1.inc --- source include/ps_conv.inc - -drop table t1, t9; - -# End of 4.1 tests diff --git a/mysql-test/t/rowid_order_bdb.test b/mysql-test/t/rowid_order_bdb.test deleted file mode 100644 index ef133054c35..00000000000 --- a/mysql-test/t/rowid_order_bdb.test +++ /dev/null @@ -1,108 +0,0 @@ -# -# Test for rowid ordering (and comparison) functions. -# do index_merge select for tables with PK of various types. -# ---disable_warnings -drop table if exists t1, t2, t3,t4; ---enable_warnings - --- source include/have_bdb.inc - -# Signed number as rowid -create table t1 ( - pk1 int not NULL, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1), - KEY key1 (key1), - KEY key2 (key2) -) engine=bdb; -insert into t1 values (-5, 1, 1), - (-100, 1, 1), - (3, 1, 1), - (0, 1, 1), - (10, 1, 1); -explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Unsigned numbers as rowids -create table t1 ( - pk1 int unsigned not NULL, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1), - KEY key1 (key1), - KEY key2 (key2) -) engine=bdb; -insert into t1 values (0, 1, 1), - (0xFFFFFFFF, 1, 1), - (0xFFFFFFFE, 1, 1), - (1, 1, 1), - (2, 1, 1); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Case-insensitive char(N) -create table t1 ( - pk1 char(4) not NULL, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1), - KEY key1 (key1), - KEY key2 (key2) -) engine=bdb collate latin2_general_ci; -insert into t1 values ('a1', 1, 1), - ('b2', 1, 1), - ('A3', 1, 1), - ('B4', 1, 1); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Multi-part PK -create table t1 ( - pk1 int not NULL, - pk2 char(4) not NULL collate latin1_german1_ci, - pk3 char(4) not NULL collate latin1_bin, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1,pk2,pk3), - KEY key1 (key1), - KEY key2 (key2) -) engine=bdb; -insert into t1 values - (1, 'u', 'u', 1, 1), - (1, 'u', char(0xEC), 1, 1), - (1, 'u', 'x', 1, 1); -insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1; -insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u'; -insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1; -select * from t1; -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; - -# Hidden PK -alter table t1 drop primary key; -select * from t1; -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Variable-length PK -# this is also test for Bug#2688 -create table t1 ( - pk1 varchar(8) NOT NULL default '', - pk2 varchar(4) NOT NULL default '', - key1 int(11), - key2 int(11), - primary key(pk1, pk2), - KEY key1 (key1), - KEY key2 (key2) -) engine=bdb; -insert into t1 values ('','empt',2,2), - ('a','a--a',2,2), - ('bb','b--b',2,2), - ('ccc','c--c',2,2), - ('dddd','d--d',2,2); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; - -drop table t1; - diff --git a/mysys/base64.c b/mysys/base64.c index 610797dd2ce..fb51bdb3a60 100644 --- a/mysys/base64.c +++ b/mysys/base64.c @@ -42,7 +42,7 @@ base64_needed_encoded_length(int length_of_data) int base64_needed_decoded_length(int length_of_encoded_data) { - return ceil(length_of_encoded_data * 3 / 4); + return (int)ceil(length_of_encoded_data * 3 / 4); } diff --git a/mysys/my_pread.c b/mysys/my_pread.c index ac52895efe9..978366e57e5 100644 --- a/mysys/my_pread.c +++ b/mysys/my_pread.c @@ -46,7 +46,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, before seeking to the given offset */ - error= (old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || + error= (old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || lseek(Filedes, offset, MY_SEEK_SET) == -1L; if (!error) /* Seek was successful */ @@ -121,7 +121,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset, As we cannot change the file pointer, we save the old position, before seeking to the given offset */ - error= ((old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || + error= ((old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || lseek(Filedes, offset, MY_SEEK_SET) == -1L); if (!error) /* Seek was successful */ diff --git a/netware/mysql_install_db.c b/netware/mysql_install_db.c index 65ee7873e5c..07e02c35ff3 100644 --- a/netware/mysql_install_db.c +++ b/netware/mysql_install_db.c @@ -361,7 +361,6 @@ int mysql_install_db(int argc, char *argv[]) add_arg(&al, "--bootstrap"); add_arg(&al, "--skip-grant-tables"); add_arg(&al, "--skip-innodb"); - add_arg(&al, "--skip-bdb"); // spawn mysqld err = spawn(mysqld, &al, TRUE, sql_file, out_log, err_log); diff --git a/netware/mysql_test_run.c b/netware/mysql_test_run.c index 9b02f897a60..774aa61bea4 100644 --- a/netware/mysql_test_run.c +++ b/netware/mysql_test_run.c @@ -210,7 +210,6 @@ void install_db(char *datadir) add_arg(&al, "--basedir=%s", base_dir); add_arg(&al, "--datadir=%s", datadir); add_arg(&al, "--skip-innodb"); - add_arg(&al, "--skip-bdb"); // spawn if ((err = spawn(mysqld_file, &al, TRUE, input, output, error)) != 0) diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 4ac9cf909e9..58ac9c4e3ad 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -212,7 +212,7 @@ then fi mysqld_install_cmd_line="$mysqld $defaults $mysqld_opt --bootstrap \ --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb \ ---skip-bdb --skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K" +--skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K" if $scriptdir/mysql_create_system_tables $create_option $mdata $hostname $windows \ | eval "$mysqld_install_cmd_line" then diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 8efe09a45be..b4cfe041529 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -429,7 +429,7 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time, goto err; } - if (check_date(l_time, not_zero_date, flags, was_cut)) + if ((my_bool)check_date(l_time, not_zero_date, flags, was_cut)) goto err; l_time->time_type= (number_of_fields <= 3 ? diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 95073b95ad6..4d0c0cf3207 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -8,8 +8,7 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/zlib - ${CMAKE_SOURCE_DIR}/storage/bdb/build_win32 - ${CMAKE_SOURCE_DIR}/storage/bdb/dbinc) +) SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc ${CMAKE_SOURCE_DIR}/sql/message.h @@ -29,7 +28,7 @@ ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc discover.cc ../libmysql/errmsg.c field.cc field_conv.cc filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc - ha_innodb.cc ha_partition.cc ha_federated.cc ha_berkeley.cc + ha_innodb.cc ha_partition.cc ha_federated.cc handler.cc hash_filo.cc hash_filo.h hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc item_create.cc item_func.cc item_geofunc.cc item_row.cc @@ -79,9 +78,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE) IF(WITH_INNOBASE_STORAGE_ENGINE) TARGET_LINK_LIBRARIES(mysqld innobase) ENDIF(WITH_INNOBASE_STORAGE_ENGINE) -IF(WITH_BERKELEY_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld bdb) -ENDIF(WITH_BERKELEY_STORAGE_ENGINE) ADD_DEPENDENCIES(mysqld GenError) diff --git a/sql/Makefile.am b/sql/Makefile.am index 31d6a327c06..5c509707f51 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -47,10 +47,10 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ item_create.h item_subselect.h item_row.h \ mysql_priv.h item_geofunc.h sql_bitmap.h \ procedure.h sql_class.h sql_lex.h sql_list.h \ - sql_manager.h sql_map.h sql_string.h unireg.h \ + sql_map.h sql_string.h unireg.h \ sql_error.h field.h handler.h mysqld_suffix.h \ ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \ - ha_innodb.h ha_berkeley.h ha_federated.h \ + ha_innodb.h ha_federated.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \ ha_ndbcluster_tables.h \ opt_range.h protocol.h rpl_tblmap.h \ @@ -88,7 +88,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ discover.cc time.cc opt_range.cc opt_sum.cc \ records.cc filesort.cc handler.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ - ha_partition.cc ha_innodb.cc ha_berkeley.cc \ + ha_partition.cc ha_innodb.cc \ ha_federated.cc \ ha_ndbcluster.cc ha_ndbcluster_binlog.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ @@ -161,9 +161,6 @@ lex_hash.h: gen_lex_hash$(EXEEXT) ./gen_lex_hash$(EXEEXT) > $@ # the following three should eventually be moved out of this directory -ha_berkeley.o: ha_berkeley.cc ha_berkeley.h - $(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $< - ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< diff --git a/sql/field.cc b/sql/field.cc index e45e1586d89..8ac32f03049 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -6571,7 +6571,7 @@ void Field_varstring::sql_type(String &res) const } -uint Field_varstring::data_length(const char *from) +uint32 Field_varstring::data_length(const char *from) { return length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); } diff --git a/sql/field.h b/sql/field.h index d0568d1d297..fce3b51c04b 100644 --- a/sql/field.h +++ b/sql/field.h @@ -118,6 +118,11 @@ public: */ virtual String *val_str(String*,String *)=0; String *val_int_as_str(String *val_buffer, my_bool unsigned_flag); + /* + str_needs_quotes() returns TRUE if the value returned by val_str() needs + to be quoted when used in constructing an SQL query. + */ + virtual bool str_needs_quotes() { return FALSE; } virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } virtual Item_result cast_to_int_type () const { return result_type(); } @@ -417,6 +422,7 @@ public: uint32 max_length() { return field_length; } friend class create_field; my_decimal *val_decimal(my_decimal *); + virtual bool str_needs_quotes() { return TRUE; } uint is_equal(create_field *new_field); }; @@ -1120,7 +1126,7 @@ public: int key_cmp(const byte *str, uint length); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); - uint data_length(const char *from); + uint32 data_length(const char *from); uint size_of() const { return sizeof(*this); } enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; } bool has_charset(void) const @@ -1385,6 +1391,7 @@ public: double val_real(void); longlong val_int(void); String *val_str(String*, String *); + virtual bool str_needs_quotes() { return TRUE; } my_decimal *val_decimal(my_decimal *); int cmp(const char *a, const char *b) { return cmp_binary(a, b); } diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc deleted file mode 100644 index d8159a81f90..00000000000 --- a/sql/ha_berkeley.cc +++ /dev/null @@ -1,2754 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -/* - TODO: - - Not compressed keys should use cmp_fix_length_key - - Don't automaticly pack all string keys (To do this we need to modify - CREATE TABLE so that one can use the pack_keys argument per key). - - An argument to pack_key that we don't want compression. - - DB_DBT_USERMEM should be used for fixed length tables - We will need an updated Berkeley DB version for this. - - Killing threads that has got a 'deadlock' - - SHOW TABLE STATUS should give more information about the table. - - Get a more accurate count of the number of rows - (estimate_rows_upper_bound()). - We could store the found number of rows when the table is scanned and - then increment the counter for each attempted write. - - We will need to extend the manager thread to makes checkpoints at - given intervals. - - When not using UPDATE IGNORE, don't make a sub transaction but abort - the main transaction on errors. - - Handling of drop table during autocommit=0 ? - (Should we just give an error in this case if there is a pending - transaction ?) - - When using ALTER TABLE IGNORE, we should not start an transaction, but do - everything wthout transactions. - - When we do rollback, we need to subtract the number of changed rows - from the updated tables. - - Testing of: - - Mark tables that participate in a transaction so that they are not - closed during the transaction. We need to test what happens if - MySQL closes a table that is updated by a not commited transaction. -*/ - - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "mysql_priv.h" - -#include -#include -#include - -#ifdef WITH_BERKELEY_STORAGE_ENGINE -#include "ha_berkeley.h" -#include "sql_manager.h" -#include - -#include - -#define HA_BERKELEY_ROWS_IN_TABLE 10000 /* to get optimization right */ -#define HA_BERKELEY_RANGE_COUNT 100 -#define HA_BERKELEY_MAX_ROWS 10000000 /* Max rows in table */ -/* extra rows for estimate_rows_upper_bound() */ -#define HA_BERKELEY_EXTRA_ROWS 100 - -/* Bits for share->status */ -#define STATUS_PRIMARY_KEY_INIT 1 -#define STATUS_ROW_COUNT_INIT 2 -#define STATUS_BDB_ANALYZE 4 - -const u_int32_t bdb_DB_TXN_NOSYNC= DB_TXN_NOSYNC; -const u_int32_t bdb_DB_RECOVER= DB_RECOVER; -const u_int32_t bdb_DB_PRIVATE= DB_PRIVATE; -const u_int32_t bdb_DB_DIRECT_DB= DB_DIRECT_DB; -const u_int32_t bdb_DB_DIRECT_LOG= DB_DIRECT_LOG; -const char *ha_berkeley_ext=".db"; -bool berkeley_shared_data=0; -u_int32_t berkeley_init_flags= DB_PRIVATE | DB_RECOVER, - berkeley_env_flags= DB_LOG_AUTOREMOVE, - berkeley_lock_type= DB_LOCK_DEFAULT; -ulong berkeley_log_buffer_size=0 , berkeley_log_file_size=0; -ulonglong berkeley_cache_size= 0; -char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; -long berkeley_lock_scan_time=0; -ulong berkeley_region_size=0, berkeley_cache_parts=1; -ulong berkeley_trans_retry=1; -ulong berkeley_max_lock; -pthread_mutex_t bdb_mutex; - -static DB_ENV *db_env; -static HASH bdb_open_tables; - -static const char berkeley_hton_name[]= "BerkeleyDB"; -static const int berkeley_hton_name_length=sizeof(berkeley_hton_name)-1; - -const char *berkeley_lock_names[] = -{ "DEFAULT", "OLDEST", "RANDOM", "YOUNGEST", "EXPIRE", "MAXLOCKS", - "MAXWRITE", "MINLOCKS", "MINWRITE", 0 }; -u_int32_t berkeley_lock_types[]= -{ DB_LOCK_DEFAULT, DB_LOCK_OLDEST, DB_LOCK_RANDOM, DB_LOCK_YOUNGEST, - DB_LOCK_EXPIRE, DB_LOCK_MAXLOCKS, DB_LOCK_MAXWRITE, DB_LOCK_MINLOCKS, - DB_LOCK_MINWRITE }; -TYPELIB berkeley_lock_typelib= {array_elements(berkeley_lock_names)-1,"", - berkeley_lock_names, NULL}; - -static void berkeley_print_error(const DB_ENV *db_env, const char *db_errpfx, - const char *buffer); -static byte* bdb_get_key(BDB_SHARE *share,uint *length, - my_bool not_used __attribute__((unused))); -static BDB_SHARE *get_share(const char *table_name, TABLE *table); -static int free_share(BDB_SHARE *share, TABLE *table, uint hidden_primary_key, - bool mutex_is_locked); -static int write_status(DB *status_block, char *buff, uint length); -static void update_status(BDB_SHARE *share, TABLE *table); - -static int berkeley_close_connection(THD *thd); -static int berkeley_commit(THD *thd, bool all); -static int berkeley_rollback(THD *thd, bool all); -static int berkeley_rollback_to_savepoint(THD* thd, void *savepoint); -static int berkeley_savepoint(THD* thd, void *savepoint); -static int berkeley_release_savepoint(THD* thd, void *savepoint); -static handler *berkeley_create_handler(TABLE_SHARE *table, - MEM_ROOT *mem_root); - -handlerton berkeley_hton; - -static handler *berkeley_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) -{ - return new (mem_root) ha_berkeley(table); -} - -typedef struct st_berkeley_trx_data { - DB_TXN *all; - DB_TXN *stmt; - DB_TXN *sp_level; - uint bdb_lock_count; -} berkeley_trx_data; - -/* General functions */ - -int berkeley_init(void) -{ - DBUG_ENTER("berkeley_init"); - - berkeley_hton.state=SHOW_OPTION_YES; - berkeley_hton.db_type=DB_TYPE_BERKELEY_DB; - berkeley_hton.savepoint_offset=sizeof(DB_TXN *); - berkeley_hton.close_connection=berkeley_close_connection; - berkeley_hton.savepoint_set=berkeley_savepoint; - berkeley_hton.savepoint_rollback=berkeley_rollback_to_savepoint; - berkeley_hton.savepoint_release=berkeley_release_savepoint; - berkeley_hton.commit=berkeley_commit; - berkeley_hton.rollback=berkeley_rollback; - berkeley_hton.create=berkeley_create_handler; - berkeley_hton.panic=berkeley_end; - berkeley_hton.flush_logs=berkeley_flush_logs; - berkeley_hton.show_status=berkeley_show_status; - berkeley_hton.flags=HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME; - - if (have_berkeley_db != SHOW_OPTION_YES) - return 0; // nothing else to do - - if (!berkeley_tmpdir) - berkeley_tmpdir=mysql_tmpdir; - if (!berkeley_home) - berkeley_home=mysql_real_data_home; - DBUG_PRINT("bdb",("berkeley_home: %s",mysql_real_data_home)); - - /* - If we don't set set_lg_bsize() we will get into trouble when - trying to use many open BDB tables. - If log buffer is not set, assume that the we will need 512 byte per - open table. This is a number that we have reached by testing. - */ - if (!berkeley_log_buffer_size) - { - berkeley_log_buffer_size= max(table_cache_size*512,32*1024); - } - /* - Berkeley DB require that - berkeley_log_file_size >= berkeley_log_buffer_size*4 - */ - berkeley_log_file_size= berkeley_log_buffer_size*4; - berkeley_log_file_size= MY_ALIGN(berkeley_log_file_size,1024*1024L); - berkeley_log_file_size= max(berkeley_log_file_size, 10*1024*1024L); - - if (db_env_create(&db_env,0)) - goto error; - db_env->set_errcall(db_env,berkeley_print_error); - db_env->set_errpfx(db_env,"bdb"); - db_env->set_tmp_dir(db_env, berkeley_tmpdir); - db_env->set_data_dir(db_env, mysql_data_home); - db_env->set_flags(db_env, berkeley_env_flags, 1); - if (berkeley_logdir) - db_env->set_lg_dir(db_env, berkeley_logdir); /* purecov: tested */ - - if (opt_endinfo) - db_env->set_verbose(db_env, - DB_VERB_DEADLOCK | DB_VERB_RECOVERY, - 1); - - if (berkeley_cache_size > (uint) ~0) - db_env->set_cachesize(db_env, berkeley_cache_size / (1024*1024L*1024L), - berkeley_cache_size % (1024L*1024L*1024L), - berkeley_cache_parts); - else - db_env->set_cachesize(db_env, 0, berkeley_cache_size, berkeley_cache_parts); - - db_env->set_lg_max(db_env, berkeley_log_file_size); - db_env->set_lg_bsize(db_env, berkeley_log_buffer_size); - db_env->set_lk_detect(db_env, berkeley_lock_type); - db_env->set_lg_regionmax(db_env, berkeley_region_size); - if (berkeley_max_lock) - db_env->set_lk_max(db_env, berkeley_max_lock); - - if (db_env->open(db_env, - berkeley_home, - berkeley_init_flags | DB_INIT_LOCK | - DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | - DB_CREATE | DB_THREAD, 0666)) - { - db_env->close(db_env,0); - db_env=0; - goto error; - } - - (void) hash_init(&bdb_open_tables,system_charset_info,32,0,0, - (hash_get_key) bdb_get_key,0,0); - pthread_mutex_init(&bdb_mutex,MY_MUTEX_INIT_FAST); - DBUG_RETURN(FALSE); -error: - have_berkeley_db= SHOW_OPTION_DISABLED; // If we couldn't use handler - DBUG_RETURN(TRUE); -} - - -int berkeley_end(ha_panic_function type) -{ - int error= 0; - DBUG_ENTER("berkeley_end"); - if (db_env) - { - berkeley_cleanup_log_files(); - error= db_env->close(db_env,0); // Error is logged - db_env= 0; - hash_free(&bdb_open_tables); - pthread_mutex_destroy(&bdb_mutex); - } - DBUG_RETURN(error); -} - -static int berkeley_close_connection(THD *thd) -{ - my_free((gptr)thd->ha_data[berkeley_hton.slot], MYF(0)); - return 0; -} - -bool berkeley_flush_logs() -{ - int error; - bool result=0; - DBUG_ENTER("berkeley_flush_logs"); - if ((error=db_env->log_flush(db_env,0))) - { - my_error(ER_ERROR_DURING_FLUSH_LOGS,MYF(0),error); /* purecov: inspected */ - result=1; /* purecov: inspected */ - } - if ((error=db_env->txn_checkpoint(db_env,0,0,0))) - { - my_error(ER_ERROR_DURING_CHECKPOINT,MYF(0),error); /* purecov: inspected */ - result=1; /* purecov: inspected */ - } - DBUG_RETURN(result); -} - -static int berkeley_commit(THD *thd, bool all) -{ - DBUG_ENTER("berkeley_commit"); - DBUG_PRINT("trans",("ending transaction %s", all ? "all" : "stmt")); - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - DB_TXN **txn= all ? &trx->all : &trx->stmt; - int error= (*txn)->commit(*txn,0); - *txn=0; -#ifndef DBUG_OFF - if (error) - DBUG_PRINT("error",("error: %d",error)); -#endif - DBUG_RETURN(error); -} - -static int berkeley_rollback(THD *thd, bool all) -{ - DBUG_ENTER("berkeley_rollback"); - DBUG_PRINT("trans",("aborting transaction %s", all ? "all" : "stmt")); - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - DB_TXN **txn= all ? &trx->all : &trx->stmt; - int error= (*txn)->abort(*txn); - *txn=0; - DBUG_RETURN(error); -} - -static int berkeley_savepoint(THD* thd, void *savepoint) -{ - int error; - DB_TXN **save_txn= (DB_TXN**) savepoint; - DBUG_ENTER("berkeley_savepoint"); - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - if (!(error= db_env->txn_begin(db_env, trx->sp_level, save_txn, 0))) - { - trx->sp_level= *save_txn; - } - DBUG_RETURN(error); -} - -static int berkeley_rollback_to_savepoint(THD* thd, void *savepoint) -{ - int error; - DB_TXN *parent, **save_txn= (DB_TXN**) savepoint; - DBUG_ENTER("berkeley_rollback_to_savepoint"); - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - parent= (*save_txn)->parent; - if (!(error= (*save_txn)->abort(*save_txn))) - { - trx->sp_level= parent; - error= berkeley_savepoint(thd, savepoint); - } - DBUG_RETURN(error); -} - -static int berkeley_release_savepoint(THD* thd, void *savepoint) -{ - int error; - DB_TXN *parent, **save_txn= (DB_TXN**) savepoint; - DBUG_ENTER("berkeley_release_savepoint"); - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - parent= (*save_txn)->parent; - if (!(error= (*save_txn)->commit(*save_txn,0))) - { - trx->sp_level= parent; - *save_txn= 0; - } - DBUG_RETURN(error); -} - -static bool berkeley_show_logs(THD *thd, stat_print_fn *stat_print) -{ - char **all_logs, **free_logs, **a, **f; - int error=1; - MEM_ROOT **root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC); - MEM_ROOT show_logs_root, *old_mem_root= *root_ptr; - DBUG_ENTER("berkeley_show_logs"); - - init_sql_alloc(&show_logs_root, BDB_LOG_ALLOC_BLOCK_SIZE, - BDB_LOG_ALLOC_BLOCK_SIZE); - *root_ptr= &show_logs_root; - all_logs= free_logs= 0; - - if ((error= db_env->log_archive(db_env, &all_logs, - DB_ARCH_ABS | DB_ARCH_LOG)) || - (error= db_env->log_archive(db_env, &free_logs, DB_ARCH_ABS))) - { - DBUG_PRINT("error", ("log_archive failed (error %d)", error)); - db_env->err(db_env, error, "log_archive: DB_ARCH_ABS"); - if (error== DB_NOTFOUND) - error=0; // No log files - goto err; - } - /* Error is 0 here */ - if (all_logs) - { - for (a = all_logs, f = free_logs; *a; ++a) - { - if (f && *f && strcmp(*a, *f) == 0) - { - f++; - if ((error= stat_print(thd, berkeley_hton_name, - berkeley_hton_name_length, *a, strlen(*a), - STRING_WITH_LEN(SHOW_LOG_STATUS_FREE)))) - break; - } - else - { - if ((error= stat_print(thd, berkeley_hton_name, - berkeley_hton_name_length, *a, strlen(*a), - STRING_WITH_LEN(SHOW_LOG_STATUS_INUSE)))) - break; - } - } - } -err: - if (all_logs) - free(all_logs); - if (free_logs) - free(free_logs); - free_root(&show_logs_root,MYF(0)); - *root_ptr= old_mem_root; - DBUG_RETURN(error); -} - -bool berkeley_show_status(THD *thd, stat_print_fn *stat_print, - enum ha_stat_type stat_type) -{ - switch (stat_type) { - case HA_ENGINE_LOGS: - return berkeley_show_logs(thd, stat_print); - default: - return FALSE; - } -} - -static void berkeley_print_error(const DB_ENV *db_env, const char *db_errpfx, - const char *buffer) -{ - sql_print_error("%s: %s",db_errpfx,buffer); /* purecov: tested */ -} - - -void berkeley_cleanup_log_files(void) -{ - DBUG_ENTER("berkeley_cleanup_log_files"); - char **names; - int error; - -// by HF. Sometimes it crashes. TODO - find out why -#ifndef EMBEDDED_LIBRARY - /* XXX: Probably this should be done somewhere else, and - * should be tunable by the user. */ - if ((error = db_env->txn_checkpoint(db_env, 0, 0, 0))) - my_error(ER_ERROR_DURING_CHECKPOINT, MYF(0), error); /* purecov: inspected */ -#endif - if ((error = db_env->log_archive(db_env, &names, DB_ARCH_ABS)) != 0) - { - DBUG_PRINT("error", ("log_archive failed (error %d)", error)); /* purecov: inspected */ - db_env->err(db_env, error, "log_archive: DB_ARCH_ABS"); /* purecov: inspected */ - DBUG_VOID_RETURN; /* purecov: inspected */ - } - - if (names) - { /* purecov: tested */ - char **np; /* purecov: tested */ - for (np = names; *np; ++np) /* purecov: tested */ - my_delete(*np, MYF(MY_WME)); /* purecov: tested */ - - free(names); /* purecov: tested */ - } - - DBUG_VOID_RETURN; -} - - -/***************************************************************************** -** Berkeley DB tables -*****************************************************************************/ - -ha_berkeley::ha_berkeley(TABLE_SHARE *table_arg) - :handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0), - int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ | - HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | - HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | - HA_CAN_GEOMETRY | - HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX), - changed_rows(0), last_dup_key((uint) -1), version(0), using_ignore(0) -{} - - -static const char *ha_berkeley_exts[] = { - ha_berkeley_ext, - NullS -}; - -const char **ha_berkeley::bas_ext() const -{ - return ha_berkeley_exts; -} - -ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const -{ - ulong flags= (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY - | HA_READ_RANGE); - for (uint i= all_parts ? 0 : part ; i <= part ; i++) - { - KEY_PART_INFO *key_part= table_share->key_info[idx].key_part+i; - if (key_part->field->type() == FIELD_TYPE_BLOB) - { - /* We can't use BLOBS to shortcut sorts */ - flags&= ~(HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE); - break; - } - switch (key_part->field->key_type()) { - case HA_KEYTYPE_TEXT: - case HA_KEYTYPE_VARTEXT1: - case HA_KEYTYPE_VARTEXT2: - /* - As BDB stores only one copy of equal strings, we can't use key read - on these. Binary collations do support key read though. - */ - if (!(key_part->field->charset()->state & MY_CS_BINSORT)) - flags&= ~HA_KEYREAD_ONLY; - break; - default: // Keep compiler happy - break; - } - } - return flags; -} - - -static int -berkeley_cmp_hidden_key(DB* file, const DBT *new_key, const DBT *saved_key) -{ - ulonglong a=uint5korr((char*) new_key->data); - ulonglong b=uint5korr((char*) saved_key->data); - return a < b ? -1 : (a > b ? 1 : 0); -} - -static int -berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) -{ - KEY *key= (new_key->app_private ? (KEY*) new_key->app_private : - (KEY*) (file->app_private)); - char *new_key_ptr= (char*) new_key->data; - char *saved_key_ptr=(char*) saved_key->data; - KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; - uint key_length=new_key->size; - - DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size); - for (; key_part != end && (int) key_length > 0; key_part++) - { - int cmp; - uint length; - if (key_part->null_bit) - { - if (*new_key_ptr != *saved_key_ptr++) - return ((int) *new_key_ptr - (int) saved_key_ptr[-1]); - key_length--; - if (!*new_key_ptr++) - continue; - } - if ((cmp= key_part->field->pack_cmp(new_key_ptr,saved_key_ptr, - key_part->length, - key->table->insert_or_update))) - return cmp; - length= key_part->field->packed_col_length(new_key_ptr, - key_part->length); - new_key_ptr+=length; - key_length-=length; - saved_key_ptr+=key_part->field->packed_col_length(saved_key_ptr, - key_part->length); - } - return key->handler.bdb_return_if_eq; -} - - -/* The following is not yet used; Should be used for fixed length keys */ - -#ifdef NOT_YET -static int -berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key) -{ - KEY *key= (new_key->app_private ? (KEY*) new_key->app_private : - (KEY*) (file->app_private)); - char *new_key_ptr= (char*) new_key->data; - char *saved_key_ptr=(char*) saved_key->data; - KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; - uint key_length=new_key->size; - - for (; key_part != end && (int) key_length > 0 ; key_part++) - { - int cmp; - if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,0,0))) - return cmp; - new_key_ptr+=key_part->length; - key_length-= key_part->length; - saved_key_ptr+=key_part->length; - } - return key->handler.bdb_return_if_eq; -} -#endif - - -/* Compare key against row */ - -static bool -berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length) -{ - KEY_PART_INFO *key_part= key_info->key_part, - *end=key_part+key_info->key_parts; - - for (; key_part != end && (int) key_length > 0; key_part++) - { - int cmp; - uint length; - if (key_part->null_bit) - { - key_length--; - /* - With the current usage, the following case will always be FALSE, - because NULL keys are sorted before any other key - */ - if (*key != (table->record[0][key_part->null_offset] & - key_part->null_bit) ? 0 : 1) - return 1; - if (!*key++) // Null value - continue; - } - /* - Last argument has to be 0 as we are also using this to function to see - if a key like 'a ' matched a row with 'a' - */ - if ((cmp= key_part->field->pack_cmp(key, key_part->length, 0))) - return cmp; - length= key_part->field->packed_col_length(key,key_part->length); - key+= length; - key_length-= length; - } - return 0; // Identical keys -} - - -int ha_berkeley::open(const char *name, int mode, uint test_if_locked) -{ - char name_buff[FN_REFLEN]; - uint open_mode=(mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD; - uint max_key_length; - int error; - DBUG_ENTER("ha_berkeley::open"); - - /* Open primary key */ - hidden_primary_key=0; - if ((primary_key= table_share->primary_key) >= MAX_KEY) - { // No primary key - primary_key= table_share->keys; - key_used_on_scan=MAX_KEY; - ref_length=hidden_primary_key=BDB_HIDDEN_PRIMARY_KEY_LENGTH; - } - else - key_used_on_scan=primary_key; - - /* Need some extra memory in case of packed keys */ - max_key_length= table_share->max_key_length + MAX_REF_PARTS*3; - if (!(alloc_ptr= - my_multi_malloc(MYF(MY_WME), - &key_buff, max_key_length, - &key_buff2, max_key_length, - &primary_key_buff, - (hidden_primary_key ? 0 : - table_share->key_info[table_share->primary_key].key_length), - NullS))) - DBUG_RETURN(1); /* purecov: inspected */ - if (!(rec_buff= (byte*) my_malloc((alloced_rec_buff_length= - table_share->rec_buff_length), - MYF(MY_WME)))) - { - my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - - /* Init shared structure */ - if (!(share= get_share(name,table))) - { - my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ - my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - thr_lock_data_init(&share->lock,&lock,(void*) 0); - key_file = share->key_file; - key_type = share->key_type; - bzero((char*) ¤t_row,sizeof(current_row)); - - /* Fill in shared structure, if needed */ - pthread_mutex_lock(&share->mutex); - file= share->file; - if (!share->use_count++) - { - if ((error=db_create(&file, db_env, 0))) - { - free_share(share,table, hidden_primary_key,1); /* purecov: inspected */ - my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ - my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ - my_errno=error; /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - share->file= file; - - file->set_bt_compare(file, - (hidden_primary_key ? berkeley_cmp_hidden_key : - berkeley_cmp_packed_key)); - if (!hidden_primary_key) - file->app_private= (void*) (table->key_info + table_share->primary_key); - if ((error= db_env->txn_begin(db_env, NULL, (DB_TXN**) &transaction, 0)) || - (error= (file->open(file, transaction, - fn_format(name_buff, name, "", ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT), - "main", DB_BTREE, open_mode, 0))) || - (error= transaction->commit(transaction, 0))) - { - free_share(share, table, hidden_primary_key,1); /* purecov: inspected */ - my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ - my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ - my_errno=error; /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - - /* Open other keys; These are part of the share structure */ - key_file[primary_key]=file; - key_type[primary_key]= hidden_primary_key ? 0 : DB_NOOVERWRITE; - - DB **ptr=key_file; - for (uint i=0, used_keys=0; i < table_share->keys ; i++, ptr++) - { - char part[7]; - if (i != primary_key) - { - if ((error=db_create(ptr, db_env, 0))) - { - close(); /* purecov: inspected */ - my_errno=error; /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - sprintf(part,"key%02d",++used_keys); - key_type[i]=table->key_info[i].flags & HA_NOSAME ? DB_NOOVERWRITE : 0; - (*ptr)->set_bt_compare(*ptr, berkeley_cmp_packed_key); - (*ptr)->app_private= (void*) (table->key_info+i); - if (!(table->key_info[i].flags & HA_NOSAME)) - { - DBUG_PRINT("bdb",("Setting DB_DUP for key %u", i)); - (*ptr)->set_flags(*ptr, DB_DUP); - } - if ((error= db_env->txn_begin(db_env, NULL, (DB_TXN**) &transaction, - 0)) || - (error=((*ptr)->open(*ptr, transaction, name_buff, part, DB_BTREE, - open_mode, 0))) || - (error= transaction->commit(transaction, 0))) - { - close(); /* purecov: inspected */ - my_errno=error; /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - } - } - /* Calculate pack_length of primary key */ - share->fixed_length_primary_key= 1; - if (!hidden_primary_key) - { - ref_length=0; - KEY_PART_INFO *key_part= table->key_info[primary_key].key_part; - KEY_PART_INFO *end=key_part+table->key_info[primary_key].key_parts; - for (; key_part != end ; key_part++) - ref_length+= key_part->field->max_packed_col_length(key_part->length); - share->fixed_length_primary_key= - (ref_length == table->key_info[primary_key].key_length); - share->status|= STATUS_PRIMARY_KEY_INIT; - } - share->ref_length= ref_length; - } - ref_length= share->ref_length; // If second open - pthread_mutex_unlock(&share->mutex); - - transaction=0; - cursor=0; - key_read=0; - stats.block_size=8192; // Berkeley DB block size - share->fixed_length_row= !(table_share->db_create_options & - HA_OPTION_PACK_RECORD); - - get_status(); - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); - DBUG_RETURN(0); -} - - -int ha_berkeley::close(void) -{ - DBUG_ENTER("ha_berkeley::close"); - - my_free((char*) rec_buff,MYF(MY_ALLOW_ZERO_PTR)); - my_free(alloc_ptr,MYF(MY_ALLOW_ZERO_PTR)); - ha_berkeley::reset(); // current_row buffer - DBUG_RETURN(free_share(share,table, hidden_primary_key,0)); -} - - -/* Reallocate buffer if needed */ - -bool ha_berkeley::fix_rec_buff_for_blob(ulong length) -{ - if (! rec_buff || length > alloced_rec_buff_length) - { - byte *newptr; - if (!(newptr=(byte*) my_realloc((gptr) rec_buff, length, - MYF(MY_ALLOW_ZERO_PTR)))) - return 1; /* purecov: inspected */ - rec_buff=newptr; - alloced_rec_buff_length=length; - } - return 0; -} - - -/* Calculate max length needed for row */ - -ulong ha_berkeley::max_row_length(const byte *buf) -{ - ulong length= table_share->reclength + table_share->fields*2; - uint *ptr, *end; - for (ptr= table_share->blob_field, end=ptr + table_share->blob_fields ; - ptr != end ; - ptr++) - { - Field_blob *blob= ((Field_blob*) table->field[*ptr]); - length+= blob->get_length((char*) buf + blob->offset())+2; - } - return length; -} - - -/* - Pack a row for storage. If the row is of fixed length, just store the - row 'as is'. - If not, we will generate a packed row suitable for storage. - This will only fail if we don't have enough memory to pack the row, which; - may only happen in rows with blobs, as the default row length is - pre-allocated. -*/ - -int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row) -{ - byte *ptr; - bzero((char*) row,sizeof(*row)); - if (share->fixed_length_row) - { - row->data=(void*) record; - row->size= table_share->reclength+hidden_primary_key; - if (hidden_primary_key) - { - if (new_row) - get_auto_primary_key(current_ident); - memcpy_fixed((char*) record+table_share->reclength, - (char*) current_ident, - BDB_HIDDEN_PRIMARY_KEY_LENGTH); - } - return 0; - } - if (table_share->blob_fields) - { - if (fix_rec_buff_for_blob(max_row_length(record))) - return HA_ERR_OUT_OF_MEM; /* purecov: inspected */ - } - - /* Copy null bits */ - memcpy(rec_buff, record, table_share->null_bytes); - ptr= rec_buff + table_share->null_bytes; - - for (Field **field=table->field ; *field ; field++) - ptr=(byte*) (*field)->pack((char*) ptr, - (char*) record + (*field)->offset()); - - if (hidden_primary_key) - { - if (new_row) - get_auto_primary_key(current_ident); - memcpy_fixed((char*) ptr, (char*) current_ident, - BDB_HIDDEN_PRIMARY_KEY_LENGTH); - ptr+=BDB_HIDDEN_PRIMARY_KEY_LENGTH; - } - row->data=rec_buff; - row->size= (size_t) (ptr - rec_buff); - return 0; -} - - -void ha_berkeley::unpack_row(char *record, DBT *row) -{ - if (share->fixed_length_row) - memcpy(record,(char*) row->data,table_share->reclength+hidden_primary_key); - else - { - /* Copy null bits */ - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); - const char *ptr= (const char*) row->data; - memcpy(record, ptr, table_share->null_bytes); - ptr+= table_share->null_bytes; - for (Field **field=table->field ; *field ; field++) - ptr= (*field)->unpack(record + (*field)->offset(), ptr); - dbug_tmp_restore_column_map(table->write_set, old_map); - } -} - - -/* Store the key and the primary key into the row */ - -void ha_berkeley::unpack_key(char *record, DBT *key, uint index) -{ - KEY *key_info= table->key_info+index; - KEY_PART_INFO *key_part= key_info->key_part, - *end= key_part+key_info->key_parts; - char *pos= (char*) key->data; - - for (; key_part != end; key_part++) - { - if (key_part->null_bit) - { - if (!*pos++) // Null value - { - /* - We don't need to reset the record data as we will not access it - if the null data is set - */ - - record[key_part->null_offset]|=key_part->null_bit; - continue; - } - record[key_part->null_offset]&= ~key_part->null_bit; - } - pos= (char*) key_part->field->unpack_key(record + key_part->field->offset(), - pos, key_part->length); - } -} - - -/* - Create a packed key from a row. This key will be written as such - to the index tree. - - This will never fail as the key buffer is pre-allocated. -*/ - -DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, - const byte *record, int key_length) -{ - bzero((char*) key,sizeof(*key)); - if (hidden_primary_key && keynr == primary_key) - { - /* We don't need to set app_private here */ - key->data=current_ident; - key->size=BDB_HIDDEN_PRIMARY_KEY_LENGTH; - return key; - } - - KEY *key_info=table->key_info+keynr; - KEY_PART_INFO *key_part=key_info->key_part; - KEY_PART_INFO *end=key_part+key_info->key_parts; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); - DBUG_ENTER("create_key"); - - key->data=buff; - key->app_private= key_info; - for (; key_part != end && key_length > 0; key_part++) - { - if (key_part->null_bit) - { - /* Store 0 if the key part is a NULL part */ - if (record[key_part->null_offset] & key_part->null_bit) - { - *buff++ =0; - key->flags|=DB_DBT_DUPOK; - continue; - } - *buff++ = 1; // Store NOT NULL marker - } - buff=key_part->field->pack_key(buff,(char*) (record + key_part->offset), - key_part->length); - key_length-=key_part->length; - } - key->size= (buff - (char*) key->data); - DBUG_DUMP("key",(char*) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); - DBUG_RETURN(key); -} - - -/* - Create a packed key from from a MySQL unpacked key (like the one that is - sent from the index_read() - - This key is to be used to read a row -*/ - -DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, - const byte *key_ptr, uint key_length) -{ - KEY *key_info=table->key_info+keynr; - KEY_PART_INFO *key_part=key_info->key_part; - KEY_PART_INFO *end=key_part+key_info->key_parts; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); - DBUG_ENTER("bdb:pack_key"); - - bzero((char*) key,sizeof(*key)); - key->data=buff; - key->app_private= (void*) key_info; - - for (; key_part != end && (int) key_length > 0 ; key_part++) - { - uint offset=0; - if (key_part->null_bit) - { - if (!(*buff++ = (*key_ptr == 0))) // Store 0 if NULL - { - key_length-= key_part->store_length; - key_ptr+= key_part->store_length; - key->flags|=DB_DBT_DUPOK; - continue; - } - offset=1; // Data is at key_ptr+1 - } - buff=key_part->field->pack_key_from_key_image(buff,(char*) key_ptr+offset, - key_part->length); - key_ptr+=key_part->store_length; - key_length-=key_part->store_length; - } - key->size= (buff - (char*) key->data); - DBUG_DUMP("key",(char*) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); - DBUG_RETURN(key); -} - - -int ha_berkeley::write_row(byte * record) -{ - DBT row,prim_key,key; - int error; - DBUG_ENTER("write_row"); - - statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) - table->timestamp_field->set_time(); - if (table->next_number_field && record == table->record[0]) - update_auto_increment(); - if ((error=pack_row(&row, record,1))) - DBUG_RETURN(error); /* purecov: inspected */ - - table->insert_or_update= 1; // For handling of VARCHAR - if (table_share->keys + test(hidden_primary_key) == 1) - { - error=file->put(file, transaction, create_key(&prim_key, primary_key, - key_buff, record), - &row, key_type[primary_key]); - last_dup_key=primary_key; - } - else - { - DB_TXN *sub_trans = transaction; - /* Don't use sub transactions in temporary tables */ - for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) - { - key_map changed_keys(0); - if (!(error=file->put(file, sub_trans, create_key(&prim_key, primary_key, - key_buff, record), - &row, key_type[primary_key]))) - { - changed_keys.set_bit(primary_key); - for (uint keynr=0 ; keynr < table_share->keys ; keynr++) - { - if (keynr == primary_key) - continue; - if ((error=key_file[keynr]->put(key_file[keynr], sub_trans, - create_key(&key, keynr, key_buff2, - record), - &prim_key, key_type[keynr]))) - { - last_dup_key=keynr; - break; - } - changed_keys.set_bit(keynr); - } - } - else - last_dup_key=primary_key; - if (error) - { - /* Remove inserted row */ - DBUG_PRINT("error",("Got error %d",error)); - if (using_ignore) - { - int new_error = 0; - if (!changed_keys.is_clear_all()) - { - new_error = 0; - for (uint keynr=0; - keynr < table_share->keys+test(hidden_primary_key); - keynr++) - { - if (changed_keys.is_set(keynr)) - { - if ((new_error = remove_key(sub_trans, keynr, record, - &prim_key))) - break; /* purecov: inspected */ - } - } - } - if (new_error) - { - error=new_error; // This shouldn't happen /* purecov: inspected */ - break; /* purecov: inspected */ - } - } - } - if (error != DB_LOCK_DEADLOCK) - break; - } - } - table->insert_or_update= 0; - if (error == DB_KEYEXIST) - error=HA_ERR_FOUND_DUPP_KEY; - else if (!error) - changed_rows++; - DBUG_RETURN(error); -} - - -/* Compare if a key in a row has changed */ - -int ha_berkeley::key_cmp(uint keynr, const byte * old_row, - const byte * new_row) -{ - KEY_PART_INFO *key_part=table->key_info[keynr].key_part; - KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts; - - for (; key_part != end ; key_part++) - { - if (key_part->null_bit) - { - if ((old_row[key_part->null_offset] & key_part->null_bit) != - (new_row[key_part->null_offset] & key_part->null_bit)) - return 1; - } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) - { - - if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), - (char*) (new_row + key_part->offset), - (ulong) key_part->length)) - return 1; - } - else - { - if (memcmp(old_row+key_part->offset, new_row+key_part->offset, - key_part->length)) - return 1; - } - } - return 0; -} - - -/* - Update a row from one value to another. - Clobbers key_buff2 -*/ - -int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed, - const byte * old_row, DBT *old_key, - const byte * new_row, DBT *new_key, - bool local_using_ignore) -{ - DBT row; - int error; - DBUG_ENTER("update_primary_key"); - - if (primary_key_changed) - { - // Primary key changed or we are updating a key that can have duplicates. - // Delete the old row and add a new one - if (!(error=remove_key(trans, primary_key, old_row, old_key))) - { - if (!(error=pack_row(&row, new_row, 0))) - { - if ((error=file->put(file, trans, new_key, &row, - key_type[primary_key]))) - { - // Probably a duplicated key; restore old key and row if needed - last_dup_key=primary_key; - if (local_using_ignore) - { - int new_error; - if ((new_error=pack_row(&row, old_row, 0)) || - (new_error=file->put(file, trans, old_key, &row, - key_type[primary_key]))) - error=new_error; // fatal error /* purecov: inspected */ - } - } - } - } - } - else - { - // Primary key didn't change; just update the row data - if (!(error=pack_row(&row, new_row, 0))) - error=file->put(file, trans, new_key, &row, 0); - } - DBUG_RETURN(error); -} - -/* - Restore changed keys, when a non-fatal error aborts the insert/update - of one row. - Clobbers keybuff2 -*/ - -int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys, - uint primary_key, - const byte *old_row, DBT *old_key, - const byte *new_row, DBT *new_key) -{ - int error; - DBT tmp_key; - uint keynr; - DBUG_ENTER("restore_keys"); - - /* Restore the old primary key, and the old row, but don't ignore - duplicate key failure */ - if ((error=update_primary_key(trans, TRUE, new_row, new_key, - old_row, old_key, FALSE))) - goto err; /* purecov: inspected */ - - /* Remove the new key, and put back the old key - changed_keys is a map of all non-primary keys that need to be - rolled back. The last key set in changed_keys is the one that - triggered the duplicate key error (it wasn't inserted), so for - that one just put back the old value. */ - if (!changed_keys->is_clear_all()) - { - for (keynr=0 ; keynr < table_share->keys+test(hidden_primary_key) ; keynr++) - { - if (changed_keys->is_set(keynr)) - { - if (changed_keys->is_prefix(1) && - (error = remove_key(trans, keynr, new_row, new_key))) - break; /* purecov: inspected */ - if ((error = key_file[keynr]->put(key_file[keynr], trans, - create_key(&tmp_key, keynr, key_buff2, - old_row), - old_key, key_type[keynr]))) - break; /* purecov: inspected */ - } - } - } - -err: - DBUG_ASSERT(error != DB_KEYEXIST); - DBUG_RETURN(error); -} - - -int ha_berkeley::update_row(const byte * old_row, byte * new_row) -{ - DBT prim_key, key, old_prim_key; - int error; - DB_TXN *sub_trans; - bool primary_key_changed; - DBUG_ENTER("update_row"); - - LINT_INIT(error); - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - - table->insert_or_update= 1; // For handling of VARCHAR - if (hidden_primary_key) - { - primary_key_changed=0; - bzero((char*) &prim_key,sizeof(prim_key)); - prim_key.data= (void*) current_ident; - prim_key.size=BDB_HIDDEN_PRIMARY_KEY_LENGTH; - old_prim_key=prim_key; - } - else - { - create_key(&prim_key, primary_key, key_buff, new_row); - - if ((primary_key_changed=key_cmp(primary_key, old_row, new_row))) - create_key(&old_prim_key, primary_key, primary_key_buff, old_row); - else - old_prim_key=prim_key; - } - - sub_trans = transaction; - for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) - { - key_map changed_keys(0); - /* Start by updating the primary key */ - if (!(error=update_primary_key(sub_trans, primary_key_changed, - old_row, &old_prim_key, - new_row, &prim_key, - using_ignore))) - { - // Update all other keys - for (uint keynr=0 ; keynr < table_share->keys ; keynr++) - { - if (keynr == primary_key) - continue; - if (key_cmp(keynr, old_row, new_row) || primary_key_changed) - { - if ((error=remove_key(sub_trans, keynr, old_row, &old_prim_key))) - { - table->insert_or_update= 0; - DBUG_RETURN(error); // Fatal error /* purecov: inspected */ - } - changed_keys.set_bit(keynr); - if ((error=key_file[keynr]->put(key_file[keynr], sub_trans, - create_key(&key, keynr, key_buff2, - new_row), - &prim_key, key_type[keynr]))) - { - last_dup_key=keynr; - break; - } - } - } - } - if (error) - { - /* Remove inserted row */ - DBUG_PRINT("error",("Got error %d",error)); - if (using_ignore) - { - int new_error = 0; - if (!changed_keys.is_clear_all()) - new_error=restore_keys(transaction, &changed_keys, primary_key, - old_row, &old_prim_key, new_row, &prim_key); - if (new_error) - { - /* This shouldn't happen */ - error=new_error; /* purecov: inspected */ - break; /* purecov: inspected */ - } - } - } - if (error != DB_LOCK_DEADLOCK) - break; - } - table->insert_or_update= 0; - if (error == DB_KEYEXIST) - error=HA_ERR_FOUND_DUPP_KEY; - DBUG_RETURN(error); -} - - -/* - Delete one key - This uses key_buff2, when keynr != primary key, so it's important that - a function that calls this doesn't use this buffer for anything else. -*/ - -int ha_berkeley::remove_key(DB_TXN *trans, uint keynr, const byte *record, - DBT *prim_key) -{ - int error; - DBT key; - DBUG_ENTER("remove_key"); - DBUG_PRINT("enter",("index: %d",keynr)); - - if (keynr == active_index && cursor) - error=cursor->c_del(cursor,0); - else if (keynr == primary_key || - ((table->key_info[keynr].flags & (HA_NOSAME | HA_NULL_PART_KEY)) == - HA_NOSAME)) - { // Unique key - DBUG_ASSERT(keynr == primary_key || prim_key->data != key_buff2); - error=key_file[keynr]->del(key_file[keynr], trans, - keynr == primary_key ? - prim_key : - create_key(&key, keynr, key_buff2, record), - 0); - } - else - { - /* - To delete the not duplicated key, we need to open an cursor on the - row to find the key to be delete and delete it. - We will never come here with keynr = primary_key - */ - DBUG_ASSERT(keynr != primary_key && prim_key->data != key_buff2); - DBC *tmp_cursor; - if (!(error=key_file[keynr]->cursor(key_file[keynr], trans, - &tmp_cursor, 0))) - { - if (!(error=tmp_cursor->c_get(tmp_cursor, - create_key(&key, keynr, key_buff2, record), - prim_key, DB_GET_BOTH | DB_RMW))) - { // This shouldn't happen - error=tmp_cursor->c_del(tmp_cursor,0); - } - int result=tmp_cursor->c_close(tmp_cursor); - if (!error) - error=result; - } - } - DBUG_RETURN(error); -} - - -/* Delete all keys for new_record */ - -int ha_berkeley::remove_keys(DB_TXN *trans, const byte *record, - DBT *new_record, DBT *prim_key, key_map *keys) -{ - int result = 0; - for (uint keynr=0; - keynr < table_share->keys+test(hidden_primary_key); - keynr++) - { - if (keys->is_set(keynr)) - { - int new_error=remove_key(trans, keynr, record, prim_key); - if (new_error) - { - result=new_error; // Return last error /* purecov: inspected */ - break; // Let rollback correct things /* purecov: inspected */ - } - } - } - return result; -} - - -int ha_berkeley::delete_row(const byte * record) -{ - int error; - DBT row, prim_key; - key_map keys= table_share->keys_in_use; - DBUG_ENTER("delete_row"); - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); - - if ((error=pack_row(&row, record, 0))) - DBUG_RETURN((error)); /* purecov: inspected */ - create_key(&prim_key, primary_key, key_buff, record); - if (hidden_primary_key) - keys.set_bit(primary_key); - - /* Subtransactions may be used in order to retry the delete in - case we get a DB_LOCK_DEADLOCK error. */ - DB_TXN *sub_trans = transaction; - for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) - { - error=remove_keys(sub_trans, record, &row, &prim_key, &keys); - if (error) - { /* purecov: inspected */ - DBUG_PRINT("error",("Got error %d",error)); - break; // No retry - return error - } - if (error != DB_LOCK_DEADLOCK) - break; - } -#ifdef CANT_COUNT_DELETED_ROWS - if (!error) - changed_rows--; -#endif - DBUG_RETURN(error); -} - - -int ha_berkeley::index_init(uint keynr, bool sorted) -{ - int error; - DBUG_ENTER("ha_berkeley::index_init"); - DBUG_PRINT("enter",("table: '%s' key: %d", table_share->table_name.str, - keynr)); - - /* - Under some very rare conditions (like full joins) we may already have - an active cursor at this point - */ - if (cursor) - { - DBUG_PRINT("note",("Closing active cursor")); - cursor->c_close(cursor); - } - active_index=keynr; - if ((error=key_file[keynr]->cursor(key_file[keynr], transaction, &cursor, - table->reginfo.lock_type > - TL_WRITE_ALLOW_READ ? - 0 : 0))) - cursor=0; // Safety /* purecov: inspected */ - bzero((char*) &last_key,sizeof(last_key)); - DBUG_RETURN(error); -} - -int ha_berkeley::index_end() -{ - int error=0; - DBUG_ENTER("ha_berkely::index_end"); - if (cursor) - { - DBUG_PRINT("enter",("table: '%s'", table_share->table_name.str)); - error=cursor->c_close(cursor); - cursor=0; - } - active_index=MAX_KEY; - DBUG_RETURN(error); -} - - -/* What to do after we have read a row based on an index */ - -int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, - DBT *found_key, bool read_next) -{ - DBUG_ENTER("ha_berkeley::read_row"); - if (error) - { - if (error == DB_NOTFOUND || error == DB_KEYEMPTY) - error=read_next ? HA_ERR_END_OF_FILE : HA_ERR_KEY_NOT_FOUND; - table->status=STATUS_NOT_FOUND; - DBUG_RETURN(error); - } - if (hidden_primary_key) - memcpy_fixed(current_ident, - (char*) row->data+row->size-BDB_HIDDEN_PRIMARY_KEY_LENGTH, - BDB_HIDDEN_PRIMARY_KEY_LENGTH); - table->status=0; - if (keynr != primary_key) - { - /* We only found the primary key. Now we have to use this to find - the row data */ - if (key_read && found_key) - { - unpack_key(buf,found_key,keynr); - if (!hidden_primary_key) - unpack_key(buf,row,primary_key); - DBUG_RETURN(0); - } - DBT key; - bzero((char*) &key,sizeof(key)); - key.data=key_buff; - key.size=row->size; - key.app_private= (void*) (table->key_info+primary_key); - memcpy(key_buff,row->data,row->size); - /* Read the data into current_row */ - current_row.flags=DB_DBT_REALLOC; - if ((error=file->get(file, transaction, &key, ¤t_row, 0))) - { - table->status=STATUS_NOT_FOUND; /* purecov: inspected */ - DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error); /* purecov: inspected */ - } - row= ¤t_row; - } - unpack_row(buf,row); - DBUG_RETURN(0); -} - - -/* This is only used to read whole keys */ - -int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - table->in_use->status_var.ha_read_key_count++; - DBUG_ENTER("index_read_idx"); - current_row.flags=DB_DBT_REALLOC; - active_index=MAX_KEY; - DBUG_RETURN(read_row(key_file[keynr]->get(key_file[keynr], transaction, - pack_key(&last_key, keynr, key_buff, key, - key_len), - ¤t_row,0), - (char*) buf, keynr, ¤t_row, &last_key, 0)); -} - - -int ha_berkeley::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - DBT row; - int error; - KEY *key_info= &table->key_info[active_index]; - int do_prev= 0; - DBUG_ENTER("ha_berkeley::index_read"); - - table->in_use->status_var.ha_read_key_count++; - bzero((char*) &row,sizeof(row)); - if (find_flag == HA_READ_BEFORE_KEY) - { - find_flag= HA_READ_KEY_OR_NEXT; - do_prev= 1; - } - else if (find_flag == HA_READ_PREFIX_LAST_OR_PREV) - { - find_flag= HA_READ_AFTER_KEY; - do_prev= 1; - } - if (key_len == key_info->key_length && - !(table->key_info[active_index].flags & HA_END_SPACE_KEY)) - { - if (find_flag == HA_READ_AFTER_KEY) - key_info->handler.bdb_return_if_eq= 1; - error=read_row(cursor->c_get(cursor, pack_key(&last_key, - active_index, - key_buff, - key, key_len), - &row, - (find_flag == HA_READ_KEY_EXACT ? - DB_SET : DB_SET_RANGE)), - (char*) buf, active_index, &row, (DBT*) 0, 0); - key_info->handler.bdb_return_if_eq= 0; - } - else - { - /* read of partial key */ - pack_key(&last_key, active_index, key_buff, key, key_len); - /* Store for compare */ - memcpy(key_buff2, key_buff, (key_len=last_key.size)); - /* - If HA_READ_AFTER_KEY is set, return next key, else return first - matching key. - */ - key_info->handler.bdb_return_if_eq= (find_flag == HA_READ_AFTER_KEY ? - 1 : -1); - error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE), - (char*) buf, active_index, &row, (DBT*) 0, 0); - key_info->handler.bdb_return_if_eq= 0; - if (!error && find_flag == HA_READ_KEY_EXACT) - { - /* Ensure that we found a key that is equal to the current one */ - if (!error && berkeley_key_cmp(table, key_info, key_buff2, key_len)) - error=HA_ERR_KEY_NOT_FOUND; - } - } - if (do_prev) - { - bzero((char*) &row, sizeof(row)); - error= read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), - (char*) buf, active_index, &row, &last_key, 1); - } - DBUG_RETURN(error); -} - -/* - Read last key is solved by reading the next key and then reading - the previous key -*/ - -int ha_berkeley::index_read_last(byte * buf, const byte * key, uint key_len) -{ - DBT row; - int error; - KEY *key_info= &table->key_info[active_index]; - DBUG_ENTER("ha_berkeley::index_read"); - - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - - /* read of partial key */ - pack_key(&last_key, active_index, key_buff, key, key_len); - /* Store for compare */ - memcpy(key_buff2, key_buff, (key_len=last_key.size)); - key_info->handler.bdb_return_if_eq= 1; - error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE), - (char*) buf, active_index, &row, (DBT*) 0, 0); - key_info->handler.bdb_return_if_eq= 0; - bzero((char*) &row,sizeof(row)); - if (read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), - (char*) buf, active_index, &row, &last_key, 1) || - berkeley_key_cmp(table, key_info, key_buff2, key_len)) - error=HA_ERR_KEY_NOT_FOUND; - DBUG_RETURN(error); -} - - -int ha_berkeley::index_next(byte * buf) -{ - DBT row; - DBUG_ENTER("index_next"); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), - (char*) buf, active_index, &row, &last_key, 1)); -} - -int ha_berkeley::index_next_same(byte * buf, const byte *key, uint keylen) -{ - DBT row; - int error; - DBUG_ENTER("index_next_same"); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - if (keylen == table->key_info[active_index].key_length && - !(table->key_info[active_index].flags & HA_END_SPACE_KEY)) - error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT_DUP), - (char*) buf, active_index, &row, &last_key, 1); - else - { - error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), - (char*) buf, active_index, &row, &last_key, 1); - if (!error && ::key_cmp_if_same(table, key, active_index, keylen)) - error=HA_ERR_END_OF_FILE; - } - DBUG_RETURN(error); -} - - -int ha_berkeley::index_prev(byte * buf) -{ - DBT row; - DBUG_ENTER("index_prev"); - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), - (char*) buf, active_index, &row, &last_key, 1)); -} - - -int ha_berkeley::index_first(byte * buf) -{ - DBT row; - DBUG_ENTER("index_first"); - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_FIRST), - (char*) buf, active_index, &row, &last_key, 1)); -} - -int ha_berkeley::index_last(byte * buf) -{ - DBT row; - DBUG_ENTER("index_last"); - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_LAST), - (char*) buf, active_index, &row, &last_key, 0)); -} - -int ha_berkeley::rnd_init(bool scan) -{ - DBUG_ENTER("rnd_init"); - current_row.flags=DB_DBT_REALLOC; - DBUG_RETURN(index_init(primary_key, 0)); -} - -int ha_berkeley::rnd_end() -{ - return index_end(); -} - -int ha_berkeley::rnd_next(byte *buf) -{ - DBT row; - DBUG_ENTER("rnd_next"); - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); - bzero((char*) &row,sizeof(row)); - DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), - (char*) buf, primary_key, &row, &last_key, 1)); -} - - -DBT *ha_berkeley::get_pos(DBT *to, byte *pos) -{ - /* We don't need to set app_private here */ - bzero((char*) to,sizeof(*to)); - - to->data=pos; - if (share->fixed_length_primary_key) - to->size=ref_length; - else - { - KEY_PART_INFO *key_part=table->key_info[primary_key].key_part; - KEY_PART_INFO *end=key_part+table->key_info[primary_key].key_parts; - - for (; key_part != end ; key_part++) - pos+=key_part->field->packed_col_length((char*) pos,key_part->length); - to->size= (uint) (pos- (byte*) to->data); - } - DBUG_DUMP("key", (char*) to->data, to->size); - return to; -} - - -int ha_berkeley::rnd_pos(byte * buf, byte *pos) -{ - DBT db_pos; - - DBUG_ENTER("ha_berkeley::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); - active_index= MAX_KEY; - DBUG_RETURN(read_row(file->get(file, transaction, - get_pos(&db_pos, pos), - ¤t_row, 0), - (char*) buf, primary_key, ¤t_row, (DBT*) 0, 0)); -} - -/* - Set a reference to the current record in (ref,ref_length). - - SYNOPSIS - ha_berkeley::position() - record The current record buffer - - DESCRIPTION - The BDB handler stores the primary key in (ref,ref_length). - There is either an explicit primary key, or an implicit (hidden) - primary key. - During open(), 'ref_length' is calculated as the maximum primary - key length. When an actual key is shorter than that, the rest of - the buffer must be cleared out. The row cannot be identified, if - garbage follows behind the end of the key. There is no length - field for the current key, so that the whole ref_length is used - for comparison. - - RETURN - nothing -*/ - -void ha_berkeley::position(const byte *record) -{ - DBT key; - DBUG_ENTER("ha_berkeley::position"); - if (hidden_primary_key) - { - DBUG_ASSERT(ref_length == BDB_HIDDEN_PRIMARY_KEY_LENGTH); - memcpy_fixed(ref, (char*) current_ident, BDB_HIDDEN_PRIMARY_KEY_LENGTH); - } - else - { - create_key(&key, primary_key, (char*) ref, record); - if (key.size < ref_length) - bzero(ref + key.size, ref_length - key.size); - } - DBUG_VOID_RETURN; -} - - -void ha_berkeley::info(uint flag) -{ - DBUG_ENTER("ha_berkeley::info"); - if (flag & HA_STATUS_VARIABLE) - { - // Just to get optimizations right - stats.records = share->rows + changed_rows; - stats.deleted = 0; - } - if ((flag & HA_STATUS_CONST) || version != share->version) - { - version=share->version; - for (uint i=0 ; i < table_share->keys ; i++) - { - table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= - share->rec_per_key[i]; - } - } - /* Don't return key if we got an error for the internal primary key */ - if (flag & HA_STATUS_ERRKEY && last_dup_key < table_share->keys) - errkey= last_dup_key; - DBUG_VOID_RETURN; -} - - -int ha_berkeley::extra(enum ha_extra_function operation) -{ - switch (operation) { - case HA_EXTRA_RESET_STATE: - reset(); - break; - case HA_EXTRA_KEYREAD: - key_read=1; // Query satisfied with key - break; - case HA_EXTRA_NO_KEYREAD: - key_read=0; - break; - case HA_EXTRA_IGNORE_DUP_KEY: - using_ignore=1; - break; - case HA_EXTRA_NO_IGNORE_DUP_KEY: - using_ignore=0; - break; - default: - break; - } - return 0; -} - - -int ha_berkeley::reset(void) -{ - key_read= 0; - using_ignore= 0; - if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC)) - { - current_row.flags= 0; - if (current_row.data) - { - free(current_row.data); - current_row.data= 0; - } - } - return 0; -} - - -/* - As MySQL will execute an external lock for every new table it uses - we can use this to start the transactions. - If we are in auto_commit mode we just need to start a transaction - for the statement to be able to rollback the statement. - If not, we have to start a master transaction if there doesn't exist - one from before. -*/ - -int ha_berkeley::external_lock(THD *thd, int lock_type) -{ - int error=0; - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - DBUG_ENTER("ha_berkeley::external_lock"); - if (!trx) - { - thd->ha_data[berkeley_hton.slot]= trx= (berkeley_trx_data *) - my_malloc(sizeof(*trx), MYF(MY_ZEROFILL)); - if (!trx) - DBUG_RETURN(1); - } - if (trx->all == 0) - trx->sp_level= 0; - if (lock_type != F_UNLCK) - { - if (!trx->bdb_lock_count++) - { - DBUG_ASSERT(trx->stmt == 0); - transaction=0; // Safety - /* First table lock, start transaction */ - if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | - OPTION_TABLE_LOCK)) && !trx->all) - { - /* We have to start a master transaction */ - DBUG_PRINT("trans",("starting transaction all: options: 0x%lx", - (ulong) thd->options)); - if ((error= db_env->txn_begin(db_env, NULL, &trx->all, 0))) - { - trx->bdb_lock_count--; // We didn't get the lock - DBUG_RETURN(error); - } - trx->sp_level= trx->all; - trans_register_ha(thd, TRUE, &berkeley_hton); - if (thd->in_lock_tables) - DBUG_RETURN(0); // Don't create stmt trans - } - DBUG_PRINT("trans",("starting transaction stmt")); - if ((error= db_env->txn_begin(db_env, trx->sp_level, &trx->stmt, 0))) - { - /* We leave the possible master transaction open */ - trx->bdb_lock_count--; // We didn't get the lock - DBUG_RETURN(error); - } - trans_register_ha(thd, FALSE, &berkeley_hton); - } - transaction= trx->stmt; - } - else - { - lock.type=TL_UNLOCK; // Unlocked - thread_safe_add(share->rows, changed_rows, &share->mutex); - changed_rows=0; - if (!--trx->bdb_lock_count) - { - if (trx->stmt) - { - /* - F_UNLCK is done without a transaction commit / rollback. - This happens if the thread didn't update any rows - We must in this case commit the work to keep the row locks - */ - DBUG_PRINT("trans",("commiting non-updating transaction")); - error= trx->stmt->commit(trx->stmt,0); - trx->stmt= transaction= 0; - } - } - } - DBUG_RETURN(error); -} - - -/* - When using LOCK TABLE's external_lock is only called when the actual - TABLE LOCK is done. - Under LOCK TABLES, each used tables will force a call to start_stmt. -*/ - -int ha_berkeley::start_stmt(THD *thd, thr_lock_type lock_type) -{ - int error=0; - DBUG_ENTER("ha_berkeley::start_stmt"); - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - DBUG_ASSERT(trx); - /* - note that trx->stmt may have been already initialized as start_stmt() - is called for *each table* not for each storage engine, - and there could be many bdb tables referenced in the query - */ - if (!trx->stmt) - { - DBUG_PRINT("trans",("starting transaction stmt")); - error= db_env->txn_begin(db_env, trx->sp_level, &trx->stmt, 0); - trans_register_ha(thd, FALSE, &berkeley_hton); - } - transaction= trx->stmt; - DBUG_RETURN(error); -} - - -/* - The idea with handler::store_lock() is the following: - - The statement decided which locks we should need for the table - for updates/deletes/inserts we get WRITE locks, for SELECT... we get - read locks. - - Before adding the lock into the table lock handler (see thr_lock.c) - mysqld calls store lock with the requested locks. Store lock can now - modify a write lock to a read lock (or some other lock), ignore the - lock (if we don't want to use MySQL table locks at all) or add locks - for many tables (like we do when we are using a MERGE handler). - - Berkeley DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which - signals that we are doing WRITES, but we are still allowing other - reader's and writer's. - - When releasing locks, store_lock() are also called. In this case one - usually doesn't have to do anything. - - In some exceptional cases MySQL may send a request for a TL_IGNORE; - This means that we are requesting the same lock as last time and this - should also be ignored. (This may happen when someone does a flush - table when we have opened a part of the tables, in which case mysqld - closes and reopens the tables and tries to get the same locks at last - time). In the future we will probably try to remove this. -*/ - - -THR_LOCK_DATA **ha_berkeley::store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) - { - /* If we are not doing a LOCK TABLE, then allow multiple writers */ - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && - lock_type <= TL_WRITE) && - !thd->in_lock_tables) - lock_type = TL_WRITE_ALLOW_WRITE; - lock.type= lock_type; - } - *to++= &lock; - return to; -} - - -static int create_sub_table(const char *table_name, const char *sub_name, - DBTYPE type, int flags) -{ - int error; - DB *file; - DBUG_ENTER("create_sub_table"); - DBUG_PRINT("enter",("sub_name: %s flags: %d",sub_name, flags)); - - if (!(error=db_create(&file, db_env, 0))) - { - file->set_flags(file, flags); - error=(file->open(file, NULL, table_name, sub_name, type, - DB_THREAD | DB_CREATE, my_umask)); - if (error) - { - DBUG_PRINT("error",("Got error: %d when opening table '%s'",error, /* purecov: inspected */ - table_name)); /* purecov: inspected */ - (void) file->remove(file,table_name,NULL,0); /* purecov: inspected */ - } - else - (void) file->close(file,0); - } - else - { - DBUG_PRINT("error",("Got error: %d when creting table",error)); /* purecov: inspected */ - } - if (error) - my_errno=error; /* purecov: inspected */ - DBUG_RETURN(error); -} - - -int ha_berkeley::create(const char *name, register TABLE *form, - HA_CREATE_INFO *create_info) -{ - char name_buff[FN_REFLEN]; - char part[7]; - uint index=1; - int error; - DBUG_ENTER("ha_berkeley::create"); - - fn_format(name_buff,name,"", ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT); - - /* Create the main table that will hold the real rows */ - if ((error= create_sub_table(name_buff,"main",DB_BTREE,0))) - DBUG_RETURN(error); /* purecov: inspected */ - - primary_key= form->s->primary_key; - /* Create the keys */ - for (uint i=0; i < form->s->keys; i++) - { - if (i != primary_key) - { - sprintf(part,"key%02d",index++); - if ((error= create_sub_table(name_buff, part, DB_BTREE, - (form->key_info[i].flags & HA_NOSAME) ? 0 : - DB_DUP))) - DBUG_RETURN(error); /* purecov: inspected */ - } - } - - /* Create the status block to save information from last status command */ - /* Is DB_BTREE the best option here ? (QUEUE can't be used in sub tables) */ - - DB *status_block; - if (!(error=(db_create(&status_block, db_env, 0)))) - { - if (!(error=(status_block->open(status_block, NULL, name_buff, - "status", DB_BTREE, DB_CREATE, 0)))) - { - char rec_buff[4+MAX_KEY*4]; - uint length= 4+ form->s->keys*4; - bzero(rec_buff, length); - error= write_status(status_block, rec_buff, length); - status_block->close(status_block,0); - } - } - DBUG_RETURN(error); -} - - - -int ha_berkeley::delete_table(const char *name) -{ - int error; - char name_buff[FN_REFLEN]; - DBUG_ENTER("delete_table"); - if ((error=db_create(&file, db_env, 0))) - my_errno=error; /* purecov: inspected */ - else - error=file->remove(file,fn_format(name_buff,name,"",ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT), - NULL,0); - file=0; // Safety - DBUG_RETURN(error); -} - - -int ha_berkeley::rename_table(const char * from, const char * to) -{ - int error; - char from_buff[FN_REFLEN]; - char to_buff[FN_REFLEN]; - - if ((error= db_create(&file, db_env, 0))) - my_errno= error; - else - { - /* On should not do a file->close() after rename returns */ - error= file->rename(file, - fn_format(from_buff, from, "", - ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT), - NULL, fn_format(to_buff, to, "", ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT), 0); - } - return error; -} - - -/* - How many seeks it will take to read through the table - This is to be comparable to the number returned by records_in_range so - that we can decide if we should scan the table or use keys. -*/ - -double ha_berkeley::scan_time() -{ - return rows2double(stats.records/3); -} - -ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, - key_range *end_key) -{ - DBT key; - DB_KEY_RANGE start_range, end_range; - DB *kfile=key_file[keynr]; - double start_pos,end_pos,rows; - bool error; - KEY *key_info= &table->key_info[keynr]; - DBUG_ENTER("ha_berkeley::records_in_range"); - - /* Ensure we get maximum range, even for varchar keys with different space */ - key_info->handler.bdb_return_if_eq= -1; - error= ((start_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, - start_key->key, - start_key->length), - &start_range,0))); - if (error) - { - key_info->handler.bdb_return_if_eq= 0; - // Better than returning an error - DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */ - } - key_info->handler.bdb_return_if_eq= 1; - error= (end_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, - end_key->key, - end_key->length), - &end_range,0)); - key_info->handler.bdb_return_if_eq= 0; - if (error) - { - // Better than returning an error - DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */ - } - - if (!start_key) - start_pos= 0.0; - else if (start_key->flag == HA_READ_KEY_EXACT) - start_pos=start_range.less; - else - start_pos=start_range.less+start_range.equal; - - if (!end_key) - end_pos= 1.0; - else if (end_key->flag == HA_READ_BEFORE_KEY) - end_pos=end_range.less; - else - end_pos=end_range.less+end_range.equal; - rows=(end_pos-start_pos)*stats.records; - DBUG_PRINT("exit",("rows: %g",rows)); - DBUG_RETURN((ha_rows)(rows <= 1.0 ? 1 : rows)); -} - - -void ha_berkeley::get_auto_increment(ulonglong offset, ulonglong increment, - ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values) -{ - /* Ideally in case of real error (not "empty table") nr should be ~ULL(0) */ - ulonglong nr=1; // Default if error or new key - int error; - (void) ha_berkeley::extra(HA_EXTRA_KEYREAD); - - /* Set 'active_index' */ - ha_berkeley::index_init(table_share->next_number_index, 0); - - if (!table_share->next_number_key_offset) - { // Autoincrement at key-start - error=ha_berkeley::index_last(table->record[1]); - /* has taken read lock on page of max key so reserves to infinite */ - *nb_reserved_values= ULONGLONG_MAX; - } - else - { - /* - MySQL needs to call us for next row: assume we are inserting ("a",null) - here, we return 3, and next this statement will want to insert ("b",null): - there is no reason why ("b",3+1) would be the good row to insert: maybe it - already exists, maybe 3+1 is too large... - */ - *nb_reserved_values= 1; - DBT row,old_key; - bzero((char*) &row,sizeof(row)); - KEY *key_info= &table->key_info[active_index]; - - /* Reading next available number for a sub key */ - ha_berkeley::create_key(&last_key, active_index, - key_buff, table->record[0], - table_share->next_number_key_offset); - /* Store for compare */ - memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size)); - old_key.app_private=(void*) key_info; - error=1; - { - /* Modify the compare so that we will find the next key */ - key_info->handler.bdb_return_if_eq= 1; - /* We lock the next key as the new key will probl. be on the same page */ - error=cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE | DB_RMW); - key_info->handler.bdb_return_if_eq= 0; - if (!error || error == DB_NOTFOUND) - { - /* - Now search go one step back and then we should have found the - biggest key with the given prefix - */ - error=1; - if (!cursor->c_get(cursor, &last_key, &row, DB_PREV | DB_RMW) && - !berkeley_cmp_packed_key(key_file[active_index], &old_key, - &last_key)) - { - error=0; // Found value - unpack_key((char*) table->record[1], &last_key, active_index); - } - } - } - } - if (!error) - nr= (ulonglong) - table->next_number_field->val_int_offset(table_share->rec_buff_length)+1; - ha_berkeley::index_end(); - (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD); - *first_value= nr; -} - -void ha_berkeley::print_error(int error, myf errflag) -{ - if (error == DB_LOCK_DEADLOCK) - error=HA_ERR_LOCK_DEADLOCK; - handler::print_error(error,errflag); -} - -/**************************************************************************** - Analyzing, checking, and optimizing tables -****************************************************************************/ - -#ifdef NOT_YET -static void print_msg(THD *thd, const char *table_name, const char *op_name, - const char *msg_type, const char *fmt, ...) -{ - Protocol *protocol= thd->protocol; - char msgbuf[256]; - msgbuf[0] = 0; - va_list args; - va_start(args,fmt); - - my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args); - msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia - DBUG_PRINT(msg_type,("message: %s",msgbuf)); - - protocol->set_nfields(4); - protocol->prepare_for_resend(); - protocol->store(table_name); - protocol->store(op_name); - protocol->store(msg_type); - protocol->store(msgbuf); - if (protocol->write()) - thd->killed=THD::KILL_CONNECTION; -} -#endif - -int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) -{ - uint i; - DB_BTREE_STAT *stat=0; - DB_TXN_STAT *txn_stat_ptr= 0; - berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; - DBUG_ASSERT(trx); - - for (i=0 ; i < table_share->keys ; i++) - { - if (stat) - { - free(stat); - stat=0; - } - if ((key_file[i]->stat)(key_file[i], trx->all, (void*) &stat, 0)) - goto err; /* purecov: inspected */ - share->rec_per_key[i]= (stat->bt_ndata / - (stat->bt_nkeys ? stat->bt_nkeys : 1)); - } - /* A hidden primary key is not in key_file[] */ - if (hidden_primary_key) - { - if (stat) - { - free(stat); - stat=0; - } - if ((file->stat)(file, trx->all, (void*) &stat, 0)) - goto err; /* purecov: inspected */ - } - pthread_mutex_lock(&share->mutex); - share->rows=stat->bt_ndata; - share->status|=STATUS_BDB_ANALYZE; // Save status on close - share->version++; // Update stat in table - pthread_mutex_unlock(&share->mutex); - update_status(share,table); // Write status to file - if (stat) - free(stat); - return ((share->status & STATUS_BDB_ANALYZE) ? HA_ADMIN_FAILED : - HA_ADMIN_OK); - -err: - if (stat) /* purecov: inspected */ - free(stat); /* purecov: inspected */ - return HA_ADMIN_FAILED; /* purecov: inspected */ -} - -int ha_berkeley::optimize(THD* thd, HA_CHECK_OPT* check_opt) -{ - return ha_berkeley::analyze(thd,check_opt); -} - - -int ha_berkeley::check(THD* thd, HA_CHECK_OPT* check_opt) -{ - DBUG_ENTER("ha_berkeley::check"); - - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - -#ifdef NOT_YET - char name_buff[FN_REFLEN]; - int error; - DB *tmp_file; - /* - To get this to work we need to ensure that no running transaction is - using the table. We also need to create a new environment without - locking for this. - */ - - /* We must open the file again to be able to check it! */ - if ((error=db_create(&tmp_file, db_env, 0))) - { - print_msg(thd, table->real_name, "check", "error", - "Got error %d creating environment",error); - DBUG_RETURN(HA_ADMIN_FAILED); - } - - /* Compare the overall structure */ - tmp_file->set_bt_compare(tmp_file, - (hidden_primary_key ? berkeley_cmp_hidden_key : - berkeley_cmp_packed_key)); - tmp_file->app_private= (void*) (table->key_info+table->primary_key); - fn_format(name_buff,share->table_name.str,"", ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT); - if ((error=tmp_file->verify(tmp_file, name_buff, NullS, (FILE*) 0, - hidden_primary_key ? 0 : DB_NOORDERCHK))) - { - print_msg(thd, table->real_name, "check", "error", - "Got error %d checking file structure",error); - tmp_file->close(tmp_file,0); - DBUG_RETURN(HA_ADMIN_CORRUPT); - } - - /* Check each index */ - tmp_file->set_bt_compare(tmp_file, berkeley_cmp_packed_key); - for (uint index=0,i=0 ; i < table->keys ; i++) - { - char part[7]; - if (i == primary_key) - strmov(part,"main"); - else - sprintf(part,"key%02d",++index); - tmp_file->app_private= (void*) (table->key_info+i); - if ((error=tmp_file->verify(tmp_file, name_buff, part, (FILE*) 0, - DB_ORDERCHKONLY))) - { - print_msg(thd, table->real_name, "check", "error", - "Key %d was not in order (Error: %d)", - index+ test(i >= primary_key), - error); - tmp_file->close(tmp_file,0); - DBUG_RETURN(HA_ADMIN_CORRUPT); - } - } - tmp_file->close(tmp_file,0); - DBUG_RETURN(HA_ADMIN_OK); -#endif -} - -/**************************************************************************** - Handling the shared BDB_SHARE structure that is needed to provide table - locking. -****************************************************************************/ - -static byte* bdb_get_key(BDB_SHARE *share,uint *length, - my_bool not_used __attribute__((unused))) -{ - *length=share->table_name_length; - return (byte*) share->table_name; -} - -static BDB_SHARE *get_share(const char *table_name, TABLE *table) -{ - BDB_SHARE *share; - pthread_mutex_lock(&bdb_mutex); - uint length=(uint) strlen(table_name); - if (!(share=(BDB_SHARE*) hash_search(&bdb_open_tables, (byte*) table_name, - length))) - { - ulong *rec_per_key; - char *tmp_name; - DB **key_file; - u_int32_t *key_type; - uint keys= table->s->keys; - - if ((share=(BDB_SHARE *) - my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), - &share, sizeof(*share), - &rec_per_key, keys * sizeof(ha_rows), - &tmp_name, length+1, - &key_file, (keys+1) * sizeof(*key_file), - &key_type, (keys+1) * sizeof(u_int32_t), - NullS))) - { - share->rec_per_key = rec_per_key; - share->table_name = tmp_name; - share->table_name_length=length; - strmov(share->table_name, table_name); - share->key_file = key_file; - share->key_type = key_type; - if (my_hash_insert(&bdb_open_tables, (byte*) share)) - { - pthread_mutex_unlock(&bdb_mutex); /* purecov: inspected */ - my_free((gptr) share,0); /* purecov: inspected */ - return 0; /* purecov: inspected */ - } - thr_lock_init(&share->lock); - pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); - } - } - pthread_mutex_unlock(&bdb_mutex); - return share; -} - -static int free_share(BDB_SHARE *share, TABLE *table, uint hidden_primary_key, - bool mutex_is_locked) -{ - int error, result = 0; - uint keys= table->s->keys + test(hidden_primary_key); - pthread_mutex_lock(&bdb_mutex); - if (mutex_is_locked) - pthread_mutex_unlock(&share->mutex); /* purecov: inspected */ - if (!--share->use_count) - { - DB **key_file = share->key_file; - update_status(share,table); - /* this does share->file->close() implicitly */ - for (uint i=0; i < keys; i++) - { - if (key_file[i] && (error=key_file[i]->close(key_file[i],0))) - result=error; /* purecov: inspected */ - } - if (share->status_block && - (error = share->status_block->close(share->status_block,0))) - result = error; /* purecov: inspected */ - hash_delete(&bdb_open_tables, (byte*) share); - thr_lock_delete(&share->lock); - pthread_mutex_destroy(&share->mutex); - my_free((gptr) share, MYF(0)); - } - pthread_mutex_unlock(&bdb_mutex); - return result; -} - -/* - Get status information that is stored in the 'status' sub database - and the max used value for the hidden primary key. -*/ - -void ha_berkeley::get_status() -{ - if (!test_all_bits(share->status,(STATUS_PRIMARY_KEY_INIT | - STATUS_ROW_COUNT_INIT))) - { - pthread_mutex_lock(&share->mutex); - if (!(share->status & STATUS_PRIMARY_KEY_INIT)) - { - (void) extra(HA_EXTRA_KEYREAD); - index_init(primary_key, 0); - if (!index_last(table->record[1])) - share->auto_ident=uint5korr(current_ident); - index_end(); - (void) extra(HA_EXTRA_NO_KEYREAD); - } - if (! share->status_block) - { - char name_buff[FN_REFLEN]; - uint open_mode= (((table->db_stat & HA_READ_ONLY) ? DB_RDONLY : 0) - | DB_THREAD); - fn_format(name_buff, share->table_name, "", ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT); - if (!db_create(&share->status_block, db_env, 0)) - { - if (share->status_block->open(share->status_block, NULL, name_buff, - "status", DB_BTREE, open_mode, 0)) - { - share->status_block->close(share->status_block, 0); /* purecov: inspected */ - share->status_block=0; /* purecov: inspected */ - } - } - } - if (!(share->status & STATUS_ROW_COUNT_INIT) && share->status_block) - { - share->org_rows= share->rows= - table_share->max_rows ? table_share->max_rows : HA_BERKELEY_MAX_ROWS; - if (!share->status_block->cursor(share->status_block, 0, &cursor, 0)) - { - DBT row; - char rec_buff[64]; - bzero((char*) &row,sizeof(row)); - bzero((char*) &last_key,sizeof(last_key)); - row.data=rec_buff; - row.ulen=sizeof(rec_buff); - row.flags=DB_DBT_USERMEM; - if (!cursor->c_get(cursor, &last_key, &row, DB_FIRST)) - { - uint i; - uchar *pos=(uchar*) row.data; - share->org_rows=share->rows=uint4korr(pos); pos+=4; - for (i=0 ; i < table_share->keys ; i++) - { - share->rec_per_key[i]=uint4korr(pos); - pos+=4; - } - } - cursor->c_close(cursor); - } - cursor=0; // Safety - } - share->status|= STATUS_PRIMARY_KEY_INIT | STATUS_ROW_COUNT_INIT; - pthread_mutex_unlock(&share->mutex); - } -} - - -static int write_status(DB *status_block, char *buff, uint length) -{ - DBT row,key; - int error; - const char *key_buff="status"; - - bzero((char*) &row,sizeof(row)); - bzero((char*) &key,sizeof(key)); - row.data=buff; - key.data=(void*) key_buff; - key.size=sizeof(key_buff); - row.size=length; - error=status_block->put(status_block, 0, &key, &row, 0); - return error; -} - - -static void update_status(BDB_SHARE *share, TABLE *table) -{ - DBUG_ENTER("update_status"); - if (share->rows != share->org_rows || - (share->status & STATUS_BDB_ANALYZE)) - { - pthread_mutex_lock(&share->mutex); - if (!share->status_block) - { - /* - Create sub database 'status' if it doesn't exist from before - (This '*should*' always exist for table created with MySQL) - */ - - char name_buff[FN_REFLEN]; /* purecov: inspected */ - if (db_create(&share->status_block, db_env, 0)) /* purecov: inspected */ - goto end; /* purecov: inspected */ - share->status_block->set_flags(share->status_block,0); /* purecov: inspected */ - if (share->status_block->open(share->status_block, NULL, - fn_format(name_buff,share->table_name, - "", ha_berkeley_ext, - MY_UNPACK_FILENAME|MY_APPEND_EXT), - "status", DB_BTREE, - DB_THREAD | DB_CREATE, my_umask)) /* purecov: inspected */ - goto end; /* purecov: inspected */ - } - { - char rec_buff[4+MAX_KEY*4], *pos=rec_buff; - int4store(pos,share->rows); pos+=4; - for (uint i=0 ; i < table->s->keys ; i++) - { - int4store(pos,share->rec_per_key[i]); pos+=4; - } - DBUG_PRINT("info",("updating status for %s", share->table_name)); - (void) write_status(share->status_block, rec_buff, - (uint) (pos-rec_buff)); - share->status&= ~STATUS_BDB_ANALYZE; - share->org_rows=share->rows; - } -end: - pthread_mutex_unlock(&share->mutex); - } - DBUG_VOID_RETURN; -} - - -/* - Return an estimated of the number of rows in the table. - Used when sorting to allocate buffers and by the optimizer. -*/ - -ha_rows ha_berkeley::estimate_rows_upper_bound() -{ - return share->rows + HA_BERKELEY_EXTRA_ROWS; -} - -int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2) -{ - if (hidden_primary_key) - return memcmp(ref1, ref2, BDB_HIDDEN_PRIMARY_KEY_LENGTH); - - int result; - Field *field; - KEY *key_info=table->key_info+table_share->primary_key; - KEY_PART_INFO *key_part=key_info->key_part; - KEY_PART_INFO *end=key_part+key_info->key_parts; - - for (; key_part != end; key_part++) - { - field= key_part->field; - result= field->pack_cmp((const char*)ref1, (const char*)ref2, - key_part->length, 0); - if (result) - return result; - ref1+= field->packed_col_length((const char*)ref1, key_part->length); - ref2+= field->packed_col_length((const char*)ref2, key_part->length); - } - - return 0; -} - - -bool ha_berkeley::check_if_incompatible_data(HA_CREATE_INFO *info, - uint table_changes) -{ - if (table_changes < IS_EQUAL_YES) - return COMPATIBLE_DATA_NO; - return COMPATIBLE_DATA_YES; -} - -struct st_mysql_storage_engine berkeley_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &berkeley_hton }; - -mysql_declare_plugin(berkeley) -{ - MYSQL_STORAGE_ENGINE_PLUGIN, - &berkeley_storage_engine, - berkeley_hton_name, - "Sleepycat Software", - "Supports transactions and page-level locking", - berkeley_init, /* Plugin Init */ - NULL, /* Plugin Deinit */ - 0x0100, /* 1.0 */ - 0 -} -mysql_declare_plugin_end; - -#endif diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h deleted file mode 100644 index 47aab1fbb68..00000000000 --- a/sql/ha_berkeley.h +++ /dev/null @@ -1,180 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif - -/* class for the the myisam handler */ - -#include - -#define BDB_HIDDEN_PRIMARY_KEY_LENGTH 5 - -typedef struct st_berkeley_share { - ulonglong auto_ident; - ha_rows rows, org_rows; - ulong *rec_per_key; - THR_LOCK lock; - pthread_mutex_t mutex; - char *table_name; - DB *status_block, *file, **key_file; - u_int32_t *key_type; - uint table_name_length,use_count; - uint status,version; - uint ref_length; - bool fixed_length_primary_key, fixed_length_row; -} BDB_SHARE; - - -class ha_berkeley: public handler -{ - THR_LOCK_DATA lock; - DBT last_key,current_row; - gptr alloc_ptr; - byte *rec_buff; - char *key_buff, *key_buff2, *primary_key_buff; - DB *file, **key_file; - DB_TXN *transaction; - u_int32_t *key_type; - DBC *cursor; - BDB_SHARE *share; - ulong int_table_flags; - ulong alloced_rec_buff_length; - ulong changed_rows; - uint primary_key,last_dup_key, hidden_primary_key, version; - bool key_read, using_ignore; - bool fix_rec_buff_for_blob(ulong length); - byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH]; - - ulong max_row_length(const byte *buf); - int pack_row(DBT *row,const byte *record, bool new_row); - void unpack_row(char *record, DBT *row); - void unpack_key(char *record, DBT *key, uint index); - DBT *create_key(DBT *key, uint keynr, char *buff, const byte *record, - int key_length = MAX_KEY_LENGTH); - DBT *pack_key(DBT *key, uint keynr, char *buff, const byte *key_ptr, - uint key_length); - int remove_key(DB_TXN *trans, uint keynr, const byte *record, DBT *prim_key); - int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record, - DBT *prim_key, key_map *keys); - int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key, - const byte *old_row, DBT *old_key, - const byte *new_row, DBT *new_key); - int key_cmp(uint keynr, const byte * old_row, const byte * new_row); - int update_primary_key(DB_TXN *trans, bool primary_key_changed, - const byte * old_row, DBT *old_key, - const byte * new_row, DBT *prim_key, - bool local_using_ignore); - int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool); - DBT *get_pos(DBT *to, byte *pos); - - public: - ha_berkeley(TABLE_SHARE *table_arg); - ~ha_berkeley() {} - const char *table_type() const { return "BerkeleyDB"; } - ulong index_flags(uint idx, uint part, bool all_parts) const; - const char *index_type(uint key_number) { return "BTREE"; } - const char **bas_ext() const; - ulonglong table_flags(void) const { return int_table_flags; } - uint max_supported_keys() const { return MAX_KEY-1; } - uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; } - ha_rows estimate_rows_upper_bound(); - uint max_supported_key_length() const { return UINT_MAX32; } - uint max_supported_key_part_length() const { return UINT_MAX32; } - - const key_map *keys_to_use_for_scanning() { return &key_map_full; } - - int open(const char *name, int mode, uint test_if_locked); - int close(void); - double scan_time(); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_init(uint index, bool sorted); - int index_end(); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_next_same(byte * buf, const byte *key, uint keylen); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int rnd_init(bool scan); - int rnd_end(); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); - void info(uint); - int extra(enum ha_extra_function operation); - int reset(void); - int external_lock(THD *thd, int lock_type); - int start_stmt(THD *thd, thr_lock_type lock_type); - void position(byte *record); - int analyze(THD* thd,HA_CHECK_OPT* check_opt); - int optimize(THD* thd, HA_CHECK_OPT* check_opt); - int check(THD* thd, HA_CHECK_OPT* check_opt); - - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); - int create(const char *name, register TABLE *form, - HA_CREATE_INFO *create_info); - int delete_table(const char *name); - int rename_table(const char* from, const char* to); - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type); - - void get_status(); - inline void get_auto_primary_key(byte *to) - { - pthread_mutex_lock(&share->mutex); - share->auto_ident++; - int5store(to,share->auto_ident); - pthread_mutex_unlock(&share->mutex); - } - virtual void get_auto_increment(ulonglong offset, ulonglong increment, - ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values); - void print_error(int error, myf errflag); - uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; } - bool primary_key_is_clustered() { return true; } - int cmp_ref(const byte *ref1, const byte *ref2); - bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); -}; - -extern const u_int32_t bdb_DB_TXN_NOSYNC; -extern const u_int32_t bdb_DB_RECOVER; -extern const u_int32_t bdb_DB_PRIVATE; -extern const u_int32_t bdb_DB_DIRECT_DB; -extern const u_int32_t bdb_DB_DIRECT_LOG; -extern bool berkeley_shared_data; -extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type, - berkeley_lock_types[]; -extern ulong berkeley_max_lock, berkeley_log_buffer_size; -extern ulonglong berkeley_cache_size; -extern ulong berkeley_region_size, berkeley_cache_parts; -extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; -extern long berkeley_lock_scan_time; -extern TYPELIB berkeley_lock_typelib; - -int berkeley_init(void); -int berkeley_end(ha_panic_function type); -bool berkeley_flush_logs(void); -bool berkeley_show_status(THD *thd, stat_print_fn *print, enum ha_stat_type); diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index bf3c3ac1beb..8aca6362462 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -1142,7 +1142,7 @@ bool ha_federated::create_where_from_key(String *to, Field *field= key_part->field; uint store_length= key_part->store_length; uint part_length= min(store_length, length); - needs_quotes= 1; + needs_quotes= field->str_needs_quotes(); DBUG_DUMP("key, start of loop", (char *) ptr, length); if (key_part->null_bit) @@ -1663,23 +1663,22 @@ int ha_federated::write_row(byte *buf) { commas_added= TRUE; if ((*field)->is_null()) - insert_field_value_string.append(STRING_WITH_LEN(" NULL ")); + values_string.append(STRING_WITH_LEN(" NULL ")); else { + bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&insert_field_value_string); - values_string.append('\''); + if (needs_quote) + values_string.append('\''); insert_field_value_string.print(&values_string); - values_string.append('\''); + if (needs_quote) + values_string.append('\''); insert_field_value_string.length(0); } /* append the field name */ insert_string.append((*field)->field_name); - /* append the value */ - values_string.append(insert_field_value_string); - insert_field_value_string.length(0); - /* append commas between both fields and fieldnames */ /* unfortunately, we can't use the logic if *(fields + 1) to @@ -1884,12 +1883,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) update_string.append(STRING_WITH_LEN(" NULL ")); else { - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); /* otherwise = */ + my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); - update_string.append('\''); + if (needs_quote) + update_string.append('\''); field_value.print(&update_string); - update_string.append('\''); + if (needs_quote) + update_string.append('\''); field_value.length(0); tmp_restore_column_map(table->read_set, old_map); } @@ -1903,12 +1905,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) where_string.append(STRING_WITH_LEN(" IS NULL ")); else { + bool needs_quote= (*field)->str_needs_quotes(); where_string.append(STRING_WITH_LEN(" = ")); (*field)->val_str(&field_value, (char*) (old_data + (*field)->offset())); - where_string.append('\''); + if (needs_quote) + where_string.append('\''); field_value.print(&where_string); - where_string.append('\''); + if (needs_quote) + where_string.append('\''); field_value.length(0); } where_string.append(STRING_WITH_LEN(" AND ")); @@ -1983,11 +1988,14 @@ int ha_federated::delete_row(const byte *buf) } else { - delete_string.append(STRING_WITH_LEN(" = ")); - cur_field->val_str(&data_string); - delete_string.append('\''); - data_string.print(&delete_string); - delete_string.append('\''); + bool needs_quote= cur_field->str_needs_quotes(); + delete_string.append(STRING_WITH_LEN(" = ")); + cur_field->val_str(&data_string); + if (needs_quote) + delete_string.append('\''); + data_string.print(&delete_string); + if (needs_quote) + delete_string.append('\''); } delete_string.append(STRING_WITH_LEN(" AND ")); } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 9b44573673d..e8f2ec2af2b 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -256,13 +256,15 @@ int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans) } inline -int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans) +int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans, + bool force_release) { #ifdef NOT_USED int m_batch_execute= 0; if (m_batch_execute) return 0; #endif + h->release_completed_operations(trans, force_release); return h->m_ignore_no_key ? execute_no_commit_ignore_no_key(h,trans) : trans->execute(NdbTransaction::NoCommit, @@ -297,13 +299,15 @@ int execute_commit(THD *thd, NdbTransaction *trans) } inline -int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans) +int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans, + bool force_release) { #ifdef NOT_USED int m_batch_execute= 0; if (m_batch_execute) return 0; #endif + h->release_completed_operations(trans, force_release); return trans->execute(NdbTransaction::NoCommit, NdbTransaction::AO_IgnoreError, h->m_force_send); @@ -328,6 +332,7 @@ Thd_ndb::Thd_ndb() all= NULL; stmt= NULL; error= 0; + query_state&= NDB_QUERY_NORMAL; options= 0; (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0, (hash_get_key)thd_ndb_share_get_key, 0, 0); @@ -1696,7 +1701,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit_ie(this,trans) != 0) + if (execute_no_commit_ie(this,trans,false) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1761,7 +1766,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data, } } - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1914,7 +1919,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record) } last= trans->getLastDefinedOperation(); if (first) - res= execute_no_commit_ie(this,trans); + res= execute_no_commit_ie(this,trans,false); else { // Table has no keys @@ -1963,7 +1968,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans) != 0) + if (execute_no_commit_ie(this,trans,false) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -2011,7 +2016,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) */ if (m_ops_pending && m_blobs_pending) { - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) DBUG_RETURN(ndb_err(trans)); m_ops_pending= 0; m_blobs_pending= FALSE; @@ -2043,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) { if (m_transaction_on) { - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) DBUG_RETURN(-1); } else @@ -2370,7 +2375,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(next_result(buf)); @@ -2440,7 +2445,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2603,7 +2608,7 @@ int ha_ndbcluster::write_row(byte *record) m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) { m_skip_auto_increment= TRUE; no_uncommitted_rows_execute_failure(); @@ -2840,7 +2845,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) op->setValue(no_fields, part_func_value); } // Execute update operation - if (!cursor && execute_no_commit(this,trans) != 0) { + if (!cursor && execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2926,7 +2931,7 @@ int ha_ndbcluster::delete_row(const byte *record) } // Execute delete operation - if (execute_no_commit(this,trans) != 0) { + if (execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3392,6 +3397,26 @@ int ha_ndbcluster::close_scan() NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor; + if (m_lock_tuple) + { + /* + Lock level m_lock.type either TL_WRITE_ALLOW_WRITE + (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT + LOCK WITH SHARE MODE) and row was not explictly unlocked + with unlock_row() call + */ + NdbOperation *op; + // Lock row + DBUG_PRINT("info", ("Keeping lock on scanned row")); + + if (!(op= cursor->lockCurrentTuple())) + { + m_lock_tuple= false; + ERR_RETURN(trans->getNdbError()); + } + m_ops_pending++; + } + m_lock_tuple= false; if (m_ops_pending) { /* @@ -3399,7 +3424,7 @@ int ha_ndbcluster::close_scan() deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); - if (execute_no_commit(this,trans) != 0) { + if (execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3793,7 +3818,7 @@ int ha_ndbcluster::end_bulk_insert() m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this, trans) != 0) + if (execute_no_commit(this, trans,false) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); @@ -3968,6 +3993,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ERR_RETURN(ndb->getNdbError()); thd_ndb->init_open_tables(); thd_ndb->stmt= trans; + thd_ndb->query_state&= NDB_QUERY_NORMAL; trans_register_ha(thd, FALSE, &ndbcluster_hton); } else @@ -3983,6 +4009,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ERR_RETURN(ndb->getNdbError()); thd_ndb->init_open_tables(); thd_ndb->all= trans; + thd_ndb->query_state&= NDB_QUERY_NORMAL; trans_register_ha(thd, TRUE, &ndbcluster_hton); /* @@ -4139,6 +4166,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) thd_ndb->stmt= trans; trans_register_ha(thd, FALSE, &ndbcluster_hton); } + thd_ndb->query_state&= NDB_QUERY_NORMAL; m_active_trans= trans; // Start of statement @@ -7557,6 +7585,30 @@ int ha_ndbcluster::write_ndb_file(const char *name) DBUG_RETURN(error); } +void +ha_ndbcluster::release_completed_operations(NdbTransaction *trans, + bool force_release) +{ + if (trans->hasBlobOperation()) + { + /* We are reading/writing BLOB fields, + releasing operation records is unsafe + */ + return; + } + if (!force_release) + { + if (get_thd_ndb(current_thd)->query_state & NDB_QUERY_MULTI_READ_RANGE) + { + /* We are batching reads and have not consumed all fetched + rows yet, releasing operation records is unsafe + */ + return; + } + } + trans->releaseCompletedOperations(); +} + int ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, KEY_MULTI_RANGE *ranges, @@ -7572,6 +7624,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, NDB_INDEX_TYPE index_type= get_index_type(active_index); ulong reclength= table_share->reclength; NdbOperation* op; + Thd_ndb *thd_ndb= get_thd_ndb(current_thd); if (uses_blob_value()) { @@ -7585,7 +7638,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, sorted, buffer)); } - + thd_ndb->query_state|= NDB_QUERY_MULTI_READ_RANGE; m_disable_multi_read= FALSE; /** @@ -7757,7 +7810,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, */ m_current_multi_operation= lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation(); - if (!(res= execute_no_commit_ie(this, m_active_trans))) + if (!(res= execute_no_commit_ie(this, m_active_trans, true))) { m_multi_range_defined= multi_range_curr; multi_range_curr= ranges; diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index a9e33491d07..c48f78a7f51 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -534,6 +534,11 @@ class Ndb_cond_traverse_context Ndb_rewrite_context *rewrite_stack; }; +typedef enum ndb_query_state_bits { + NDB_QUERY_NORMAL = 0, + NDB_QUERY_MULTI_READ_RANGE = 1 +} NDB_QUERY_STATE_BITS; + /* Place holder for ha_ndbcluster thread specific data */ @@ -571,6 +576,7 @@ class Thd_ndb int error; uint32 options; List changed_tables; + uint query_state; HASH open_tables; }; @@ -849,8 +855,8 @@ private: friend int execute_commit(ha_ndbcluster*, NdbTransaction*); friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*); - friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*); - friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*); + friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool); + friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool); NdbTransaction *m_active_trans; NdbScanOperation *m_active_cursor; @@ -898,6 +904,8 @@ private: bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; + void release_completed_operations(NdbTransaction*, bool); + Ndb_cond_stack *m_cond_stack; bool m_disable_multi_read; byte *m_multi_range_result_ptr; diff --git a/sql/handler.cc b/sql/handler.cc index fbccfe7fa46..d67acf69d14 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -74,7 +74,6 @@ static const LEX_STRING sys_table_aliases[]= { {(char*)STRING_WITH_LEN("INNOBASE")}, {(char*)STRING_WITH_LEN("INNODB")}, {(char*)STRING_WITH_LEN("NDB")}, {(char*)STRING_WITH_LEN("NDBCLUSTER")}, - {(char*)STRING_WITH_LEN("BDB")}, {(char*)STRING_WITH_LEN("BERKELEYDB")}, {(char*)STRING_WITH_LEN("HEAP")}, {(char*)STRING_WITH_LEN("MEMORY")}, {(char*)STRING_WITH_LEN("MERGE")}, {(char*)STRING_WITH_LEN("MRG_MYISAM")}, {NullS, 0} @@ -1508,7 +1507,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, /* Read first row (only) from a table - This is never called for InnoDB or BDB tables, as these table types + This is never called for InnoDB tables, as these table types has the HA_STATS_RECORDS_IS_EXACT set. */ diff --git a/sql/lex.h b/sql/lex.h index 67daf4566f8..b8d6a662754 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -82,10 +82,8 @@ static SYMBOL symbols[] = { { "AVG", SYM(AVG_SYM)}, { "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)}, { "BACKUP", SYM(BACKUP_SYM)}, - { "BDB", SYM(BERKELEY_DB_SYM)}, { "BEFORE", SYM(BEFORE_SYM)}, { "BEGIN", SYM(BEGIN_SYM)}, - { "BERKELEYDB", SYM(BERKELEY_DB_SYM)}, { "BETWEEN", SYM(BETWEEN_SYM)}, { "BIGINT", SYM(BIGINT)}, { "BINARY", SYM(BINARY)}, diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index afe2c237d3d..4910f66ff69 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1620,12 +1620,6 @@ extern handlerton innobase_hton; #else extern SHOW_COMP_OPTION have_innodb; #endif -#ifdef WITH_BERKELEY_STORAGE_ENGINE -extern handlerton berkeley_hton; -#define have_berkeley_db berkeley_hton.state -#else -extern SHOW_COMP_OPTION have_berkeley_db; -#endif #ifdef WITH_EXAMPLE_STORAGE_ENGINE extern handlerton example_hton; #define have_example_db example_hton.state diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b4558dec35e..9714759e6e2 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -310,7 +310,7 @@ static bool lower_case_table_names_used= 0; static bool volatile select_thread_in_use, signal_thread_in_use; static bool volatile ready_to_exit; static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; -static my_bool opt_bdb, opt_isam, opt_ndbcluster, opt_merge; +static my_bool opt_isam, opt_ndbcluster, opt_merge; static my_bool opt_short_log_format= 0; static uint kill_cached_threads, wake_thread; static ulong killed_threads, thread_created; @@ -332,10 +332,6 @@ static I_List thread_cache; static pthread_cond_t COND_thread_cache, COND_flush_thread_cache; -#ifdef WITH_BERKELEY_STORAGE_ENGINE -static my_bool opt_sync_bdb_logs; -#endif - /* Global variables */ bool opt_update_log, opt_bin_log; @@ -405,22 +401,6 @@ extern ulong srv_commit_concurrency; extern ulong srv_flush_log_at_trx_commit; } #endif -#ifdef WITH_BERKELEY_STORAGE_ENGINE -#ifndef HAVE_U_INT32_T -typedef unsigned int u_int32_t; -#endif -extern const u_int32_t bdb_DB_TXN_NOSYNC, bdb_DB_RECOVER, bdb_DB_PRIVATE, - bdb_DB_DIRECT_DB, bdb_DB_DIRECT_LOG; -extern bool berkeley_shared_data; -extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type, - berkeley_lock_types[]; -extern ulong berkeley_max_lock, berkeley_log_buffer_size; -extern ulonglong berkeley_cache_size; -extern ulong berkeley_region_size, berkeley_cache_parts; -extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; -extern long berkeley_lock_scan_time; -extern TYPELIB berkeley_lock_typelib; -#endif #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE const char *opt_ndbcluster_connectstring= 0; @@ -3355,11 +3335,7 @@ server."); static void create_maintenance_thread() { - if ( -#ifdef WITH_BERKELEY_STORAGE_ENGINE - (have_berkeley_db == SHOW_OPTION_YES) || -#endif - (flush_time && flush_time != ~(ulong) 0L)) + if (flush_time && flush_time != ~(ulong) 0L) { pthread_t hThread; if (pthread_create(&hThread,&connection_attrib,handle_manager,0)) @@ -4901,38 +4877,6 @@ struct my_option my_long_options[] = "Path to installation directory. All paths are usually resolved relative to this.", (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \ -Disable with --skip-bdb (will save memory).", - (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0, - 0, 0, 0}, -#ifdef WITH_BERKELEY_STORAGE_ENGINE - {"bdb-data-direct", OPT_BDB_DATA_DIRECT, - "Turn off system buffering of BDB database files to avoid double caching.", - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home, - (gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-lock-detect", OPT_BDB_LOCK, - "Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec).", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-log-direct", OPT_BDB_LOG_DIRECT, - "Turn off system buffering of BDB log files to avoid double caching.", - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory.", - (gptr*) &berkeley_logdir, (gptr*) &berkeley_logdir, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-no-recover", OPT_BDB_NO_RECOVER, - "Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-no-sync", OPT_BDB_NOSYNC, - "This option is deprecated, use --skip-sync-bdb-logs instead", - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-shared-data", OPT_BDB_SHARED, - "Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, - 0, 0, 0, 0, 0}, - {"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.", - (gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, -#endif /* WITH_BERKELEY_STORAGE_ENGINE */ {"big-tables", OPT_BIG_TABLES, "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -5747,31 +5691,6 @@ log and this option does nothing anymore.", "The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.", (gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG, REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 }, -#ifdef WITH_BERKELEY_STORAGE_ENGINE - { "bdb_cache_parts", OPT_BDB_CACHE_PARTS, - "Number of parts to use for BDB cache.", - (gptr*) &berkeley_cache_parts, (gptr*) &berkeley_cache_parts, 0, GET_ULONG, - REQUIRED_ARG, 1, 1, 1024, 0, 1, 0}, - { "bdb_cache_size", OPT_BDB_CACHE_SIZE, - "The buffer that is allocated to cache index and rows for BDB tables.", - (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULL, - REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (ulonglong) ~0, 0, IO_SIZE, 0}, - {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.", - (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, - {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE, - "The buffer that is allocated to cache index and rows for BDB tables.", - (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0, - GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0}, - {"bdb_max_lock", OPT_BDB_MAX_LOCK, - "The maximum number of locks you can have active on a BDB table.", - (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, - {"bdb_region_size", OPT_BDB_REGION_SIZE, - "The size of the underlying logging area of the Berkeley DB environment.", - (gptr*) &berkeley_region_size, (gptr*) &berkeley_region_size, 0, GET_ULONG, - OPT_ARG, 60*1024L, 60*1024L, (long) ~0, 0, 1, 0}, -#endif /* WITH_BERKELEY_STORAGE_ENGINE */ {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, @@ -6263,12 +6182,6 @@ The minimum value for this variable is 4096.", (gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD, 1, 0}, -#ifdef WITH_BERKELEY_STORAGE_ENGINE - {"sync-bdb-logs", OPT_BDB_SYNC, - "Synchronously flush Berkeley DB logs. Enabled by default", - (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, - NO_ARG, 1, 0, 0, 0, 0, 0}, -#endif /* WITH_BERKELEY_STORAGE_ENGINE */ {"sync-binlog", OPT_SYNC_BINLOG, "Synchronously flush binary log to disk after every #th event. " "Use 0 (default) to disable synchronous flushing.", @@ -7583,59 +7496,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), have_merge_db= SHOW_OPTION_YES; else have_merge_db= SHOW_OPTION_DISABLED; -#ifdef WITH_BERKELEY_STORAGE_ENGINE - case OPT_BDB_NOSYNC: - /* Deprecated option */ - opt_sync_bdb_logs= 0; - /* Fall through */ - case OPT_BDB_SYNC: - if (!opt_sync_bdb_logs) - berkeley_env_flags|= bdb_DB_TXN_NOSYNC; - else - berkeley_env_flags&= ~bdb_DB_TXN_NOSYNC; - break; - case OPT_BDB_LOG_DIRECT: - berkeley_env_flags|= bdb_DB_DIRECT_DB; - break; - case OPT_BDB_DATA_DIRECT: - berkeley_env_flags|= bdb_DB_DIRECT_LOG; - break; - case OPT_BDB_NO_RECOVER: - berkeley_init_flags&= ~(bdb_DB_RECOVER); - break; - case OPT_BDB_LOCK: - { - int type; - if ((type=find_type(argument, &berkeley_lock_typelib, 2)) > 0) - berkeley_lock_type=berkeley_lock_types[type-1]; - else - { - int err; - char *end; - uint length= strlen(argument); - long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err); - if (end == argument+length) - berkeley_lock_scan_time= value; - else - { - fprintf(stderr,"Unknown lock type: %s\n",argument); - exit(1); - } - } - break; - } - case OPT_BDB_SHARED: - berkeley_init_flags&= ~(bdb_DB_PRIVATE); - berkeley_shared_data= 1; - break; -#endif /* WITH_BERKELEY_STORAGE_ENGINE */ case OPT_BDB: -#ifdef WITH_BERKELEY_STORAGE_ENGINE - if (opt_bdb) - have_berkeley_db= SHOW_OPTION_YES; - else - have_berkeley_db= SHOW_OPTION_DISABLED; -#endif break; case OPT_NDBCLUSTER: #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE @@ -7868,10 +7729,6 @@ static void get_options(int argc,char **argv) #ifndef WITH_ISAM_STORAGE_ENGINE if (opt_isam) sql_print_warning("this binary does not contain ISAM storage engine"); -#endif -#ifndef WITH_BERKELEY_STORAGE_ENGINE - if (opt_bdb) - sql_print_warning("this binary does not contain BDB storage engine"); #endif if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) && !opt_slow_log) @@ -8215,7 +8072,6 @@ void refresh_status(THD *thd) /***************************************************************************** Instantiate have_xyx for missing storage engines *****************************************************************************/ -#undef have_berkeley_db #undef have_innodb #undef have_ndbcluster #undef have_example_db @@ -8225,7 +8081,6 @@ void refresh_status(THD *thd) #undef have_partition_db #undef have_blackhole_db -SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO; SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO; SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO; @@ -8235,14 +8090,6 @@ SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO; -#ifndef WITH_BERKELEY_STORAGE_ENGINE -bool berkeley_shared_data; -ulong berkeley_max_lock, berkeley_log_buffer_size; -ulonglong berkeley_cache_size; -ulong berkeley_region_size, berkeley_cache_parts; -char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; -#endif - #ifndef WITH_INNOBASE_STORAGE_ENGINE uint innobase_flush_log_at_trx_commit; ulong innobase_fast_shutdown; diff --git a/sql/set_var.cc b/sql/set_var.cc index 0be4fd72b30..7243b7cbbe1 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -59,13 +59,6 @@ #include "event_scheduler.h" -/* WITH_BERKELEY_STORAGE_ENGINE */ -extern bool berkeley_shared_data; -extern ulong berkeley_max_lock, berkeley_log_buffer_size; -extern ulonglong berkeley_cache_size; -extern ulong berkeley_region_size, berkeley_cache_parts; -extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; - /* WITH_INNOBASE_STORAGE_ENGINE */ extern uint innobase_flush_log_at_trx_commit; extern ulong innobase_fast_shutdown; @@ -669,7 +662,6 @@ sys_var_thd_time_zone sys_time_zone("time_zone"); /* Read only variables */ sys_var_have_variable sys_have_archive_db("have_archive", &have_archive_db); -sys_var_have_variable sys_have_berkeley_db("have_bdb", &have_berkeley_db); sys_var_have_variable sys_have_blackhole_db("have_blackhole_engine", &have_blackhole_db); sys_var_have_variable sys_have_compress("have_compress", &have_compress); @@ -760,15 +752,6 @@ SHOW_VAR init_vars[]= { {sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS}, {"back_log", (char*) &back_log, SHOW_LONG}, {sys_basedir.name, (char*) &sys_basedir, SHOW_SYS}, - {"bdb_cache_parts", (char*) &berkeley_cache_parts, SHOW_LONG}, - {"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONGLONG}, - {"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR}, - {"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG}, - {"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR}, - {"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG}, - {"bdb_region_size", (char*) &berkeley_region_size, SHOW_LONG}, - {"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL}, - {"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR}, {sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS}, {sys_binlog_format.name, (char*) &sys_binlog_format, SHOW_SYS}, {sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS}, @@ -813,7 +796,6 @@ SHOW_VAR init_vars[]= { {sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS}, {sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS}, {sys_have_archive_db.name, (char*) &have_archive_db, SHOW_HAVE}, - {sys_have_berkeley_db.name, (char*) &have_berkeley_db, SHOW_HAVE}, {sys_have_blackhole_db.name,(char*) &have_blackhole_db, SHOW_HAVE}, {sys_have_compress.name, (char*) &have_compress, SHOW_HAVE}, {sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE}, diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 64cf018e03b..297e4c5c374 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5,5567 +5,5567 @@ default-language eng start-error-number 1000 ER_HASHCHK - eng "hashchk" + eng "hashchk" ER_NISAMCHK - eng "isamchk" + eng "isamchk" ER_NO - cze "NE" - dan "NEJ" - nla "NEE" - eng "NO" - est "EI" - fre "NON" - ger "Nein" - greek "" - hun "NEM" - kor "ƴϿ" - nor "NEI" - norwegian-ny "NEI" - pol "NIE" - por "NO" - rum "NU" - rus "" - serbian "NE" - slo "NIE" - ukr "" + cze "NE" + dan "NEJ" + nla "NEE" + eng "NO" + est "EI" + fre "NON" + ger "Nein" + greek "" + hun "NEM" + kor "ƴϿ" + nor "NEI" + norwegian-ny "NEI" + pol "NIE" + por "NO" + rum "NU" + rus "" + serbian "NE" + slo "NIE" + ukr "" ER_YES - cze "ANO" - dan "JA" - nla "JA" - eng "YES" - est "JAH" - fre "OUI" - ger "Ja" - greek "" - hun "IGEN" - ita "SI" - kor "" - nor "JA" - norwegian-ny "JA" - pol "TAK" - por "SIM" - rum "DA" - rus "" - serbian "DA" - slo "no" - spa "SI" - ukr "" + cze "ANO" + dan "JA" + nla "JA" + eng "YES" + est "JAH" + fre "OUI" + ger "Ja" + greek "" + hun "IGEN" + ita "SI" + kor "" + nor "JA" + norwegian-ny "JA" + pol "TAK" + por "SIM" + rum "DA" + rus "" + serbian "DA" + slo "no" + spa "SI" + ukr "" ER_CANT_CREATE_FILE - cze "Nemohu vytvo-Bit soubor '%-.64s' (chybov kd: %d)" - dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)" - nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)" - eng "Can't create file '%-.200s' (errno: %d)" - est "Ei suuda luua faili '%-.64s' (veakood: %d)" - fre "Ne peut crer le fichier '%-.64s' (Errcode: %d)" - ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "A '%-.64s' file nem hozhato letre (hibakod: %d)" - ita "Impossibile creare il file '%-.64s' (errno: %d)" - jpn "'%-.64s' ե뤬ޤ (errno: %d)" - kor "ȭ '%-.64s' ߽ϴ. (ȣ: %d)" - nor "Kan ikke opprette fila '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)" - pol "Nie mona stworzy pliku '%-.64s' (Kod bdu: %d)" - por "No pode criar o arquivo '%-.64s' (erro no. %d)" - rum "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Ne mogu da kreiram file '%-.64s' (errno: %d)" - slo "Nemem vytvori sbor '%-.64s' (chybov kd: %d)" - spa "No puedo crear archivo '%-.64s' (Error: %d)" - swe "Kan inte skapa filen '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Nemohu vytvo-Bit soubor '%-.64s' (chybov kd: %d)" + dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)" + nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create file '%-.200s' (errno: %d)" + est "Ei suuda luua faili '%-.64s' (veakood: %d)" + fre "Ne peut crer le fichier '%-.64s' (Errcode: %d)" + ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "A '%-.64s' file nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il file '%-.64s' (errno: %d)" + jpn "'%-.64s' ե뤬ޤ (errno: %d)" + kor "ȭ '%-.64s' ߽ϴ. (ȣ: %d)" + nor "Kan ikke opprette fila '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)" + pol "Nie mona stworzy pliku '%-.64s' (Kod bdu: %d)" + por "No pode criar o arquivo '%-.64s' (erro no. %d)" + rum "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Ne mogu da kreiram file '%-.64s' (errno: %d)" + slo "Nemem vytvori sbor '%-.64s' (chybov kd: %d)" + spa "No puedo crear archivo '%-.64s' (Error: %d)" + swe "Kan inte skapa filen '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_CANT_CREATE_TABLE - cze "Nemohu vytvo-Bit tabulku '%-.64s' (chybov kd: %d)" - dan "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)" - nla "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)" - eng "Can't create table '%-.64s' (errno: %d)" - jps "'%-.64s' e[u܂.(errno: %d)", - est "Ei suuda luua tabelit '%-.64s' (veakood: %d)" - fre "Ne peut crer la table '%-.64s' (Errcode: %d)" - ger "Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "A '%-.64s' tabla nem hozhato letre (hibakod: %d)" - ita "Impossibile creare la tabella '%-.64s' (errno: %d)" - jpn "'%-.64s' ơ֥뤬ޤ.(errno: %d)" - kor "̺ '%-.64s' ߽ϴ. (ȣ: %d)" - nor "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)" - pol "Nie mona stworzy tabeli '%-.64s' (Kod bdu: %d)" - por "No pode criar a tabela '%-.64s' (erro no. %d)" - rum "Nu pot sa creez tabla '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Ne mogu da kreiram tabelu '%-.64s' (errno: %d)" - slo "Nemem vytvori tabuku '%-.64s' (chybov kd: %d)" - spa "No puedo crear tabla '%-.64s' (Error: %d)" - swe "Kan inte skapa tabellen '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Nemohu vytvo-Bit tabulku '%-.64s' (chybov kd: %d)" + dan "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)" + nla "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create table '%-.64s' (errno: %d)" + jps "'%-.64s' e[u܂.(errno: %d)", + est "Ei suuda luua tabelit '%-.64s' (veakood: %d)" + fre "Ne peut crer la table '%-.64s' (Errcode: %d)" + ger "Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "A '%-.64s' tabla nem hozhato letre (hibakod: %d)" + ita "Impossibile creare la tabella '%-.64s' (errno: %d)" + jpn "'%-.64s' ơ֥뤬ޤ.(errno: %d)" + kor "̺ '%-.64s' ߽ϴ. (ȣ: %d)" + nor "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)" + pol "Nie mona stworzy tabeli '%-.64s' (Kod bdu: %d)" + por "No pode criar a tabela '%-.64s' (erro no. %d)" + rum "Nu pot sa creez tabla '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Ne mogu da kreiram tabelu '%-.64s' (errno: %d)" + slo "Nemem vytvori tabuku '%-.64s' (chybov kd: %d)" + spa "No puedo crear tabla '%-.64s' (Error: %d)" + swe "Kan inte skapa tabellen '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_CANT_CREATE_DB - cze "Nemohu vytvo-Bit databzi '%-.64s' (chybov kd: %d)" - dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)" - nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)" - eng "Can't create database '%-.64s' (errno: %d)" - jps "'%-.64s' f[^x[X܂ (errno: %d)", - est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)" - fre "Ne peut crer la base '%-.64s' (Erreur %d)" - ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)" - ita "Impossibile creare il database '%-.64s' (errno: %d)" - jpn "'%-.64s' ǡ١ޤ (errno: %d)" - kor "Ÿ̽ '%-.64s' ߽ϴ.. (ȣ: %d)" - nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)" - pol "Nie mona stworzy bazy danych '%-.64s' (Kod bdu: %d)" - por "No pode criar o banco de dados '%-.64s' (erro no. %d)" - rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)" - slo "Nemem vytvori databzu '%-.64s' (chybov kd: %d)" - spa "No puedo crear base de datos '%-.64s' (Error: %d)" - swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Nemohu vytvo-Bit databzi '%-.64s' (chybov kd: %d)" + dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)" + nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create database '%-.64s' (errno: %d)" + jps "'%-.64s' f[^x[X܂ (errno: %d)", + est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)" + fre "Ne peut crer la base '%-.64s' (Erreur %d)" + ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il database '%-.64s' (errno: %d)" + jpn "'%-.64s' ǡ١ޤ (errno: %d)" + kor "Ÿ̽ '%-.64s' ߽ϴ.. (ȣ: %d)" + nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)" + pol "Nie mona stworzy bazy danych '%-.64s' (Kod bdu: %d)" + por "No pode criar o banco de dados '%-.64s' (erro no. %d)" + rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)" + slo "Nemem vytvori databzu '%-.64s' (chybov kd: %d)" + spa "No puedo crear base de datos '%-.64s' (Error: %d)" + swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_DB_CREATE_EXISTS - cze "Nemohu vytvo-Bit databzi '%-.64s'; databze ji existuje" - dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer" - nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds" - eng "Can't create database '%-.64s'; database exists" - jps "'%-.64s' f[^x[X܂.ɂ̃f[^x[X݂܂", - est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib" - fre "Ne peut crer la base '%-.64s'; elle existe dj" - ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank existiert bereits" - greek " '%-.64s'; " - hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik" - ita "Impossibile creare il database '%-.64s'; il database esiste" - jpn "'%-.64s' ǡ١ޤ.ˤΥǡ١¸ߤޤ" - kor "Ÿ̽ '%-.64s' ߽ϴ.. Ÿ̽ " - nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer" - norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer" - pol "Nie mona stworzy bazy danych '%-.64s'; baza danych ju istnieje" - por "No pode criar o banco de dados '%-.64s'; este banco de dados j existe" - rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja" - rus " '%-.64s'. " - serbian "Ne mogu da kreiram bazu '%-.64s'; baza ve postoji." - slo "Nemem vytvori databzu '%-.64s'; databza existuje" - spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe" - swe "Databasen '%-.64s' existerar redan" - ukr " '%-.64s'. դ" + cze "Nemohu vytvo-Bit databzi '%-.64s'; databze ji existuje" + dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer" + nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds" + eng "Can't create database '%-.64s'; database exists" + jps "'%-.64s' f[^x[X܂.ɂ̃f[^x[X݂܂", + est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib" + fre "Ne peut crer la base '%-.64s'; elle existe dj" + ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank existiert bereits" + greek " '%-.64s'; " + hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik" + ita "Impossibile creare il database '%-.64s'; il database esiste" + jpn "'%-.64s' ǡ١ޤ.ˤΥǡ١¸ߤޤ" + kor "Ÿ̽ '%-.64s' ߽ϴ.. Ÿ̽ " + nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer" + norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer" + pol "Nie mona stworzy bazy danych '%-.64s'; baza danych ju istnieje" + por "No pode criar o banco de dados '%-.64s'; este banco de dados j existe" + rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja" + rus " '%-.64s'. " + serbian "Ne mogu da kreiram bazu '%-.64s'; baza ve postoji." + slo "Nemem vytvori databzu '%-.64s'; databza existuje" + spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe" + swe "Databasen '%-.64s' existerar redan" + ukr " '%-.64s'. դ" ER_DB_DROP_EXISTS - cze "Nemohu zru-Bit databzi '%-.64s', databze neexistuje" - dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke" - nla "Kan database '%-.64s' niet verwijderen; database bestaat niet" - eng "Can't drop database '%-.64s'; database doesn't exist" - jps "'%-.64s' f[^x[Xjł܂. ̃f[^x[XȂ̂ł.", - est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri" - fre "Ne peut effacer la base '%-.64s'; elle n'existe pas" - ger "Kann Datenbank '%-.64s' nicht lschen; Datenbank nicht vorhanden" - greek " '%-.64s'. " - hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" - ita "Impossibile cancellare '%-.64s'; il database non esiste" - jpn "'%-.64s' ǡ١˴Ǥޤ. Υǡ١ʤΤǤ." - kor "Ÿ̽ '%-.64s' ߽ϴ. Ÿ̽ " - nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke" - norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje" - pol "Nie mona usun? bazy danych '%-.64s'; baza danych nie istnieje" - por "No pode eliminar o banco de dados '%-.64s'; este banco de dados no existe" - rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta" - rus " '%-.64s'. " - serbian "Ne mogu da izbriem bazu '%-.64s'; baza ne postoji." - slo "Nemem zmaza databzu '%-.64s'; databza neexistuje" - spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe" - swe "Kan inte radera databasen '%-.64s'; databasen finns inte" - ukr " '%-.64s'. դ" + cze "Nemohu zru-Bit databzi '%-.64s', databze neexistuje" + dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke" + nla "Kan database '%-.64s' niet verwijderen; database bestaat niet" + eng "Can't drop database '%-.64s'; database doesn't exist" + jps "'%-.64s' f[^x[Xjł܂. ̃f[^x[XȂ̂ł.", + est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri" + fre "Ne peut effacer la base '%-.64s'; elle n'existe pas" + ger "Kann Datenbank '%-.64s' nicht lschen; Datenbank nicht vorhanden" + greek " '%-.64s'. " + hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" + ita "Impossibile cancellare '%-.64s'; il database non esiste" + jpn "'%-.64s' ǡ١˴Ǥޤ. Υǡ١ʤΤǤ." + kor "Ÿ̽ '%-.64s' ߽ϴ. Ÿ̽ " + nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke" + norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje" + pol "Nie mona usun? bazy danych '%-.64s'; baza danych nie istnieje" + por "No pode eliminar o banco de dados '%-.64s'; este banco de dados no existe" + rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta" + rus " '%-.64s'. " + serbian "Ne mogu da izbriem bazu '%-.64s'; baza ne postoji." + slo "Nemem zmaza databzu '%-.64s'; databza neexistuje" + spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe" + swe "Kan inte radera databasen '%-.64s'; databasen finns inte" + ukr " '%-.64s'. դ" ER_DB_DROP_DELETE - cze "Chyba p-Bi ruen databze (nemohu vymazat '%-.64s', chyba %d)" - dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)" - nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)" - eng "Error dropping database (can't delete '%-.64s', errno: %d)" - jps "f[^x[XjG[ ('%-.64s' 폜ł܂, errno: %d)", - est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)" - fre "Ne peut effacer la base '%-.64s' (erreur %d)" - ger "Fehler beim Lschen der Datenbank ('%-.64s' kann nicht gelscht werden, Fehler: %d)" - greek " ( '%-.64s', : %d)" - hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)" - ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)" - jpn "ǡ١˴顼 ('%-.64s' Ǥޤ, errno: %d)" - kor "Ÿ̽ ('%-.64s' ϴ, ȣ: %d)" - nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)" - norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)" - pol "B?d podczas usuwania bazy danych (nie mona usun? '%-.64s', b?d %d)" - por "Erro ao eliminar banco de dados (no pode eliminar '%-.64s' - erro no. %d)" - rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)" - rus " ( '%-.64s', : %d)" - serbian "Ne mogu da izbriem bazu (ne mogu da izbriem '%-.64s', errno: %d)" - slo "Chyba pri mazan databzy (nemem zmaza '%-.64s', chybov kd: %d)" - spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)" - swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)" - ukr " ( '%-.64s', : %d)" + cze "Chyba p-Bi ruen databze (nemohu vymazat '%-.64s', chyba %d)" + dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)" + eng "Error dropping database (can't delete '%-.64s', errno: %d)" + jps "f[^x[XjG[ ('%-.64s' 폜ł܂, errno: %d)", + est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)" + fre "Ne peut effacer la base '%-.64s' (erreur %d)" + ger "Fehler beim Lschen der Datenbank ('%-.64s' kann nicht gelscht werden, Fehler: %d)" + greek " ( '%-.64s', : %d)" + hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)" + jpn "ǡ١˴顼 ('%-.64s' Ǥޤ, errno: %d)" + kor "Ÿ̽ ('%-.64s' ϴ, ȣ: %d)" + nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)" + norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)" + pol "B?d podczas usuwania bazy danych (nie mona usun? '%-.64s', b?d %d)" + por "Erro ao eliminar banco de dados (no pode eliminar '%-.64s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)" + rus " ( '%-.64s', : %d)" + serbian "Ne mogu da izbriem bazu (ne mogu da izbriem '%-.64s', errno: %d)" + slo "Chyba pri mazan databzy (nemem zmaza '%-.64s', chybov kd: %d)" + spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)" + ukr " ( '%-.64s', : %d)" ER_DB_DROP_RMDIR - cze "Chyba p-Bi ruen databze (nemohu vymazat adres '%-.64s', chyba %d)" - dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)" - nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)" - eng "Error dropping database (can't rmdir '%-.64s', errno: %d)" - jps "f[^x[XjG[ ('%-.64s' rmdir ł܂, errno: %d)", - est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)" - fre "Erreur en effaant la base (rmdir '%-.64s', erreur %d)" - ger "Fehler beim Lschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelscht werden, Fehler: %d)" - greek " ( '%-.64s', : %d)" - hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)" - ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)" - jpn "ǡ١˴顼 ('%-.64s' rmdir Ǥޤ, errno: %d)" - kor "Ÿ̽ (rmdir '%-.64s' ϴ, ȣ: %d)" - nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)" - norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)" - pol "B?d podczas usuwania bazy danych (nie mona wykona rmdir '%-.64s', b?d %d)" - por "Erro ao eliminar banco de dados (no pode remover diretrio '%-.64s' - erro no. %d)" - rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)" - rus " ( '%-.64s', : %d)" - serbian "Ne mogu da izbriem bazu (ne mogu da izbriem direktorijum '%-.64s', errno: %d)" - slo "Chyba pri mazan databzy (nemem vymaza adresr '%-.64s', chybov kd: %d)" - spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)" - swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)" - ukr " ( '%-.64s', : %d)" + cze "Chyba p-Bi ruen databze (nemohu vymazat adres '%-.64s', chyba %d)" + dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)" + eng "Error dropping database (can't rmdir '%-.64s', errno: %d)" + jps "f[^x[XjG[ ('%-.64s' rmdir ł܂, errno: %d)", + est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)" + fre "Erreur en effaant la base (rmdir '%-.64s', erreur %d)" + ger "Fehler beim Lschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelscht werden, Fehler: %d)" + greek " ( '%-.64s', : %d)" + hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)" + jpn "ǡ١˴顼 ('%-.64s' rmdir Ǥޤ, errno: %d)" + kor "Ÿ̽ (rmdir '%-.64s' ϴ, ȣ: %d)" + nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)" + norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)" + pol "B?d podczas usuwania bazy danych (nie mona wykona rmdir '%-.64s', b?d %d)" + por "Erro ao eliminar banco de dados (no pode remover diretrio '%-.64s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)" + rus " ( '%-.64s', : %d)" + serbian "Ne mogu da izbriem bazu (ne mogu da izbriem direktorijum '%-.64s', errno: %d)" + slo "Chyba pri mazan databzy (nemem vymaza adresr '%-.64s', chybov kd: %d)" + spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)" + ukr " ( '%-.64s', : %d)" ER_CANT_DELETE_FILE - cze "Chyba p-Bi vmazu '%-.64s' (chybov kd: %d)" - dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)" - nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)" - eng "Error on delete of '%-.64s' (errno: %d)" - jps "'%-.64s' ̍폜G[ (errno: %d)", - est "Viga '%-.64s' kustutamisel (veakood: %d)" - fre "Erreur en effaant '%-.64s' (Errcode: %d)" - ger "Fehler beim Lschen von '%-.64s' (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "Torlesi hiba: '%-.64s' (hibakod: %d)" - ita "Errore durante la cancellazione di '%-.64s' (errno: %d)" - jpn "'%-.64s' κ顼 (errno: %d)" - kor "'%-.64s' (ȣ: %d)" - nor "Feil ved sletting av '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)" - pol "B?d podczas usuwania '%-.64s' (Kod bdu: %d)" - por "Erro na remoo de '%-.64s' (erro no. %d)" - rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Greka pri brisanju '%-.64s' (errno: %d)" - slo "Chyba pri mazan '%-.64s' (chybov kd: %d)" - spa "Error en el borrado de '%-.64s' (Error: %d)" - swe "Kan inte radera filen '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Chyba p-Bi vmazu '%-.64s' (chybov kd: %d)" + dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)" + eng "Error on delete of '%-.64s' (errno: %d)" + jps "'%-.64s' ̍폜G[ (errno: %d)", + est "Viga '%-.64s' kustutamisel (veakood: %d)" + fre "Erreur en effaant '%-.64s' (Errcode: %d)" + ger "Fehler beim Lschen von '%-.64s' (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "Torlesi hiba: '%-.64s' (hibakod: %d)" + ita "Errore durante la cancellazione di '%-.64s' (errno: %d)" + jpn "'%-.64s' κ顼 (errno: %d)" + kor "'%-.64s' (ȣ: %d)" + nor "Feil ved sletting av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)" + pol "B?d podczas usuwania '%-.64s' (Kod bdu: %d)" + por "Erro na remoo de '%-.64s' (erro no. %d)" + rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Greka pri brisanju '%-.64s' (errno: %d)" + slo "Chyba pri mazan '%-.64s' (chybov kd: %d)" + spa "Error en el borrado de '%-.64s' (Error: %d)" + swe "Kan inte radera filen '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_CANT_FIND_SYSTEM_REC - cze "Nemohu -Bst zznam v systmov tabulce" - dan "Kan ikke lse posten i systemfolderen" - nla "Kan record niet lezen in de systeem tabel" - eng "Can't read record in system table" - jps "system table ̃R[hǂގł܂ł", - est "Ei suuda lugeda kirjet ssteemsest tabelist" - fre "Ne peut lire un enregistrement de la table 'system'" - ger "Datensatz in der Systemtabelle nicht lesbar" - greek " " - hun "Nem olvashato rekord a rendszertablaban" - ita "Impossibile leggere il record dalla tabella di sistema" - jpn "system table Υ쥳ɤɤǤޤǤ" - kor "system ̺ ڵ带 ϴ." - nor "Kan ikke lese posten i systemkatalogen" - norwegian-ny "Kan ikkje lese posten i systemkatalogen" - pol "Nie mona odczyta rekordu z tabeli systemowej" - por "No pode ler um registro numa tabela do sistema" - rum "Nu pot sa citesc cimpurile in tabla de system (system table)" - rus " " - serbian "Ne mogu da proitam slog iz sistemske tabele" - slo "Nemem ta zznam v systmovej tabuke" - spa "No puedo leer el registro en la tabla del sistema" - swe "Hittar inte posten i systemregistret" - ukr " ϧ æ" + cze "Nemohu -Bst zznam v systmov tabulce" + dan "Kan ikke lse posten i systemfolderen" + nla "Kan record niet lezen in de systeem tabel" + eng "Can't read record in system table" + jps "system table ̃R[hǂގł܂ł", + est "Ei suuda lugeda kirjet ssteemsest tabelist" + fre "Ne peut lire un enregistrement de la table 'system'" + ger "Datensatz in der Systemtabelle nicht lesbar" + greek " " + hun "Nem olvashato rekord a rendszertablaban" + ita "Impossibile leggere il record dalla tabella di sistema" + jpn "system table Υ쥳ɤɤǤޤǤ" + kor "system ̺ ڵ带 ϴ." + nor "Kan ikke lese posten i systemkatalogen" + norwegian-ny "Kan ikkje lese posten i systemkatalogen" + pol "Nie mona odczyta rekordu z tabeli systemowej" + por "No pode ler um registro numa tabela do sistema" + rum "Nu pot sa citesc cimpurile in tabla de system (system table)" + rus " " + serbian "Ne mogu da proitam slog iz sistemske tabele" + slo "Nemem ta zznam v systmovej tabuke" + spa "No puedo leer el registro en la tabla del sistema" + swe "Hittar inte posten i systemregistret" + ukr " ϧ æ" ER_CANT_GET_STAT - cze "Nemohu z-Bskat stav '%-.64s' (chybov kd: %d)" - dan "Kan ikke lse status af '%-.64s' (Fejlkode: %d)" - nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)" - eng "Can't get status of '%-.200s' (errno: %d)" - jps "'%-.64s' ̃XeC^X܂. (errno: %d)", - est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)" - fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)" - ger "Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)" - ita "Impossibile leggere lo stato di '%-.64s' (errno: %d)" - jpn "'%-.64s' Υƥޤ. (errno: %d)" - kor "'%-.64s' ¸ ߽ϴ. (ȣ: %d)" - nor "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)" - pol "Nie mona otrzyma statusu '%-.64s' (Kod bdu: %d)" - por "No pode obter o status de '%-.64s' (erro no. %d)" - rum "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)" - slo "Nemem zisti stav '%-.64s' (chybov kd: %d)" - spa "No puedo obtener el estado de '%-.64s' (Error: %d)" - swe "Kan inte lsa filinformationen (stat) frn '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Nemohu z-Bskat stav '%-.64s' (chybov kd: %d)" + dan "Kan ikke lse status af '%-.64s' (Fejlkode: %d)" + nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)" + eng "Can't get status of '%-.200s' (errno: %d)" + jps "'%-.64s' ̃XeC^X܂. (errno: %d)", + est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)" + fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)" + ger "Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere lo stato di '%-.64s' (errno: %d)" + jpn "'%-.64s' Υƥޤ. (errno: %d)" + kor "'%-.64s' ¸ ߽ϴ. (ȣ: %d)" + nor "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)" + pol "Nie mona otrzyma statusu '%-.64s' (Kod bdu: %d)" + por "No pode obter o status de '%-.64s' (erro no. %d)" + rum "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)" + slo "Nemem zisti stav '%-.64s' (chybov kd: %d)" + spa "No puedo obtener el estado de '%-.64s' (Error: %d)" + swe "Kan inte lsa filinformationen (stat) frn '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_CANT_GET_WD - cze "Chyba p-Bi zjiovn pracovn adres (chybov kd: %d)" - dan "Kan ikke lse aktive folder (Fejlkode: %d)" - nla "Kan de werkdirectory niet krijgen (Errcode: %d)" - eng "Can't get working directory (errno: %d)" - jps "working directory 𓾂鎖ł܂ł (errno: %d)", - est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)" - fre "Ne peut obtenir le rpertoire de travail (Errcode: %d)" - ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)" - greek " ( : %d)" - hun "A munkakonyvtar nem allapithato meg (hibakod: %d)" - ita "Impossibile leggere la directory di lavoro (errno: %d)" - jpn "working directory ǤޤǤ (errno: %d)" - kor " 丮 ã ߽ϴ. (ȣ: %d)" - nor "Kan ikke lese aktiv katalog(Feilkode: %d)" - norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)" - pol "Nie mona rozpozna aktualnego katalogu (Kod bdu: %d)" - por "No pode obter o diretrio corrente (erro no. %d)" - rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)" - rus " (: %d)" - serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)" - slo "Nemem zisti pracovn adresr (chybov kd: %d)" - spa "No puedo acceder al directorio (Error: %d)" - swe "Kan inte inte lsa aktivt bibliotek. (Felkod: %d)" - ukr " (: %d)" + cze "Chyba p-Bi zjiovn pracovn adres (chybov kd: %d)" + dan "Kan ikke lse aktive folder (Fejlkode: %d)" + nla "Kan de werkdirectory niet krijgen (Errcode: %d)" + eng "Can't get working directory (errno: %d)" + jps "working directory 𓾂鎖ł܂ł (errno: %d)", + est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)" + fre "Ne peut obtenir le rpertoire de travail (Errcode: %d)" + ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)" + greek " ( : %d)" + hun "A munkakonyvtar nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere la directory di lavoro (errno: %d)" + jpn "working directory ǤޤǤ (errno: %d)" + kor " 丮 ã ߽ϴ. (ȣ: %d)" + nor "Kan ikke lese aktiv katalog(Feilkode: %d)" + norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)" + pol "Nie mona rozpozna aktualnego katalogu (Kod bdu: %d)" + por "No pode obter o diretrio corrente (erro no. %d)" + rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)" + rus " (: %d)" + serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)" + slo "Nemem zisti pracovn adresr (chybov kd: %d)" + spa "No puedo acceder al directorio (Error: %d)" + swe "Kan inte inte lsa aktivt bibliotek. (Felkod: %d)" + ukr " (: %d)" ER_CANT_LOCK - cze "Nemohu uzamknout soubor (chybov-B kd: %d)" - dan "Kan ikke lse fil (Fejlkode: %d)" - nla "Kan de file niet blokeren (Errcode: %d)" - eng "Can't lock file (errno: %d)" - jps "t@CbNł܂ (errno: %d)", - est "Ei suuda lukustada faili (veakood: %d)" - fre "Ne peut verrouiller le fichier (Errcode: %d)" - ger "Datei kann nicht gesperrt werden (Fehler: %d)" - greek " ( : %d)" - hun "A file nem zarolhato. (hibakod: %d)" - ita "Impossibile il locking il file (errno: %d)" - jpn "եåǤޤ (errno: %d)" - kor "ȭ (lock) ߽ϴ. (ȣ: %d)" - nor "Kan ikke lse fila (Feilkode: %d)" - norwegian-ny "Kan ikkje lse fila (Feilkode: %d)" - pol "Nie mona zablokowa pliku (Kod bdu: %d)" - por "No pode travar o arquivo (erro no. %d)" - rum "Nu pot sa lock fisierul (Eroare: %d)" - rus " (: %d)" - serbian "Ne mogu da zakljuam file (errno: %d)" - slo "Nemem zamkn sbor (chybov kd: %d)" - spa "No puedo bloquear archivo: (Error: %d)" - swe "Kan inte lsa filen. (Felkod: %d)" - ukr " (: %d)" + cze "Nemohu uzamknout soubor (chybov-B kd: %d)" + dan "Kan ikke lse fil (Fejlkode: %d)" + nla "Kan de file niet blokeren (Errcode: %d)" + eng "Can't lock file (errno: %d)" + jps "t@CbNł܂ (errno: %d)", + est "Ei suuda lukustada faili (veakood: %d)" + fre "Ne peut verrouiller le fichier (Errcode: %d)" + ger "Datei kann nicht gesperrt werden (Fehler: %d)" + greek " ( : %d)" + hun "A file nem zarolhato. (hibakod: %d)" + ita "Impossibile il locking il file (errno: %d)" + jpn "եåǤޤ (errno: %d)" + kor "ȭ (lock) ߽ϴ. (ȣ: %d)" + nor "Kan ikke lse fila (Feilkode: %d)" + norwegian-ny "Kan ikkje lse fila (Feilkode: %d)" + pol "Nie mona zablokowa pliku (Kod bdu: %d)" + por "No pode travar o arquivo (erro no. %d)" + rum "Nu pot sa lock fisierul (Eroare: %d)" + rus " (: %d)" + serbian "Ne mogu da zakljuam file (errno: %d)" + slo "Nemem zamkn sbor (chybov kd: %d)" + spa "No puedo bloquear archivo: (Error: %d)" + swe "Kan inte lsa filen. (Felkod: %d)" + ukr " (: %d)" ER_CANT_OPEN_FILE - cze "Nemohu otev-Bt soubor '%-.64s' (chybov kd: %d)" - dan "Kan ikke bne fil: '%-.64s' (Fejlkode: %d)" - nla "Kan de file '%-.64s' niet openen (Errcode: %d)" - eng "Can't open file: '%-.200s' (errno: %d)" - jps "'%-.64s' t@CJł܂ (errno: %d)", - est "Ei suuda avada faili '%-.64s' (veakood: %d)" - fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)" - ger "Kann Datei '%-.64s' nicht ffnen (Fehler: %d)" - greek " : '%-.64s' ( : %d)" - hun "A '%-.64s' file nem nyithato meg (hibakod: %d)" - ita "Impossibile aprire il file: '%-.64s' (errno: %d)" - jpn "'%-.64s' ե򳫤Ǥޤ (errno: %d)" - kor "ȭ ߽ϴ.: '%-.64s' (ȣ: %d)" - nor "Kan ikke pne fila: '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje pne fila: '%-.64s' (Feilkode: %d)" - pol "Nie mona otworzy pliku: '%-.64s' (Kod bdu: %d)" - por "No pode abrir o arquivo '%-.64s' (erro no. %d)" - rum "Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)" - rus " : '%-.64s' (: %d)" - serbian "Ne mogu da otvorim file: '%-.64s' (errno: %d)" - slo "Nemem otvori sbor: '%-.64s' (chybov kd: %d)" - spa "No puedo abrir archivo: '%-.64s' (Error: %d)" - swe "Kan inte anvnda '%-.64s' (Felkod: %d)" - ukr " צ : '%-.64s' (: %d)" + cze "Nemohu otev-Bt soubor '%-.64s' (chybov kd: %d)" + dan "Kan ikke bne fil: '%-.64s' (Fejlkode: %d)" + nla "Kan de file '%-.64s' niet openen (Errcode: %d)" + eng "Can't open file: '%-.200s' (errno: %d)" + jps "'%-.64s' t@CJł܂ (errno: %d)", + est "Ei suuda avada faili '%-.64s' (veakood: %d)" + fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)" + ger "Kann Datei '%-.64s' nicht ffnen (Fehler: %d)" + greek " : '%-.64s' ( : %d)" + hun "A '%-.64s' file nem nyithato meg (hibakod: %d)" + ita "Impossibile aprire il file: '%-.64s' (errno: %d)" + jpn "'%-.64s' ե򳫤Ǥޤ (errno: %d)" + kor "ȭ ߽ϴ.: '%-.64s' (ȣ: %d)" + nor "Kan ikke pne fila: '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje pne fila: '%-.64s' (Feilkode: %d)" + pol "Nie mona otworzy pliku: '%-.64s' (Kod bdu: %d)" + por "No pode abrir o arquivo '%-.64s' (erro no. %d)" + rum "Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)" + rus " : '%-.64s' (: %d)" + serbian "Ne mogu da otvorim file: '%-.64s' (errno: %d)" + slo "Nemem otvori sbor: '%-.64s' (chybov kd: %d)" + spa "No puedo abrir archivo: '%-.64s' (Error: %d)" + swe "Kan inte anvnda '%-.64s' (Felkod: %d)" + ukr " צ : '%-.64s' (: %d)" ER_FILE_NOT_FOUND - cze "Nemohu naj-Bt soubor '%-.64s' (chybov kd: %d)" - dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)" - nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)" - eng "Can't find file: '%-.200s' (errno: %d)" - jps "'%-.64s' t@Ct鎖ł܂.(errno: %d)", - est "Ei suuda leida faili '%-.64s' (veakood: %d)" - fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)" - ger "Kann Datei '%-.64s' nicht finden (Fehler: %d)" - greek " : '%-.64s' ( : %d)" - hun "A(z) '%-.64s' file nem talalhato (hibakod: %d)" - ita "Impossibile trovare il file: '%-.64s' (errno: %d)" - jpn "'%-.64s' եդǤޤ.(errno: %d)" - kor "ȭ ã ߽ϴ.: '%-.64s' (ȣ: %d)" - nor "Kan ikke finne fila: '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)" - pol "Nie mona znale pliku: '%-.64s' (Kod bdu: %d)" - por "No pode encontrar o arquivo '%-.64s' (erro no. %d)" - rum "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)" - rus " : '%-.64s' (: %d)" - serbian "Ne mogu da pronaem file: '%-.64s' (errno: %d)" - slo "Nemem njs sbor: '%-.64s' (chybov kd: %d)" - spa "No puedo encontrar archivo: '%-.64s' (Error: %d)" - swe "Hittar inte filen '%-.64s' (Felkod: %d)" - ukr " : '%-.64s' (: %d)" + cze "Nemohu naj-Bt soubor '%-.64s' (chybov kd: %d)" + dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)" + nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)" + eng "Can't find file: '%-.200s' (errno: %d)" + jps "'%-.64s' t@Ct鎖ł܂.(errno: %d)", + est "Ei suuda leida faili '%-.64s' (veakood: %d)" + fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)" + ger "Kann Datei '%-.64s' nicht finden (Fehler: %d)" + greek " : '%-.64s' ( : %d)" + hun "A(z) '%-.64s' file nem talalhato (hibakod: %d)" + ita "Impossibile trovare il file: '%-.64s' (errno: %d)" + jpn "'%-.64s' եդǤޤ.(errno: %d)" + kor "ȭ ã ߽ϴ.: '%-.64s' (ȣ: %d)" + nor "Kan ikke finne fila: '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)" + pol "Nie mona znale pliku: '%-.64s' (Kod bdu: %d)" + por "No pode encontrar o arquivo '%-.64s' (erro no. %d)" + rum "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)" + rus " : '%-.64s' (: %d)" + serbian "Ne mogu da pronaem file: '%-.64s' (errno: %d)" + slo "Nemem njs sbor: '%-.64s' (chybov kd: %d)" + spa "No puedo encontrar archivo: '%-.64s' (Error: %d)" + swe "Hittar inte filen '%-.64s' (Felkod: %d)" + ukr " : '%-.64s' (: %d)" ER_CANT_READ_DIR - cze "Nemohu -Bst adres '%-.64s' (chybov kd: %d)" - dan "Kan ikke lse folder '%-.64s' (Fejlkode: %d)" - nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)" - eng "Can't read dir of '%-.64s' (errno: %d)" - jps "'%-.64s' fBNgǂ߂܂.(errno: %d)", - est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)" - fre "Ne peut lire le rpertoire de '%-.64s' (Errcode: %d)" - ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)" - ita "Impossibile leggere la directory di '%-.64s' (errno: %d)" - jpn "'%-.64s' ǥ쥯ȥ꤬ɤޤ.(errno: %d)" - kor "'%-.64s'丮 ߽ϴ. (ȣ: %d)" - nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)" - pol "Nie mona odczyta katalogu '%-.64s' (Kod bdu: %d)" - por "No pode ler o diretrio de '%-.64s' (erro no. %d)" - rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Ne mogu da proitam direktorijum '%-.64s' (errno: %d)" - slo "Nemem ta adresr '%-.64s' (chybov kd: %d)" - spa "No puedo leer el directorio de '%-.64s' (Error: %d)" - swe "Kan inte lsa frn bibliotek '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Nemohu -Bst adres '%-.64s' (chybov kd: %d)" + dan "Kan ikke lse folder '%-.64s' (Fejlkode: %d)" + nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)" + eng "Can't read dir of '%-.64s' (errno: %d)" + jps "'%-.64s' fBNgǂ߂܂.(errno: %d)", + est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)" + fre "Ne peut lire le rpertoire de '%-.64s' (Errcode: %d)" + ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)" + ita "Impossibile leggere la directory di '%-.64s' (errno: %d)" + jpn "'%-.64s' ǥ쥯ȥ꤬ɤޤ.(errno: %d)" + kor "'%-.64s'丮 ߽ϴ. (ȣ: %d)" + nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)" + pol "Nie mona odczyta katalogu '%-.64s' (Kod bdu: %d)" + por "No pode ler o diretrio de '%-.64s' (erro no. %d)" + rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Ne mogu da proitam direktorijum '%-.64s' (errno: %d)" + slo "Nemem ta adresr '%-.64s' (chybov kd: %d)" + spa "No puedo leer el directorio de '%-.64s' (Error: %d)" + swe "Kan inte lsa frn bibliotek '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_CANT_SET_WD - cze "Nemohu zm-Bnit adres na '%-.64s' (chybov kd: %d)" - dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)" - nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)" - eng "Can't change dir to '%-.64s' (errno: %d)" - jps "'%-.64s' fBNg chdir ł܂.(errno: %d)", - est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)" - fre "Ne peut changer le rpertoire pour '%-.64s' (Errcode: %d)" - ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)" - ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)" - jpn "'%-.64s' ǥ쥯ȥ chdir Ǥޤ.(errno: %d)" - kor "'%-.64s'丮 ̵ ϴ. (ȣ: %d)" - nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)" - pol "Nie mona zmieni katalogu na '%-.64s' (Kod bdu: %d)" - por "No pode mudar para o diretrio '%-.64s' (erro no. %d)" - rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)" - rus " '%-.64s' (: %d)" - serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)" - slo "Nemem vojs do adresra '%-.64s' (chybov kd: %d)" - spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)" - swe "Kan inte byta till '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Nemohu zm-Bnit adres na '%-.64s' (chybov kd: %d)" + dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)" + nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)" + eng "Can't change dir to '%-.64s' (errno: %d)" + jps "'%-.64s' fBNg chdir ł܂.(errno: %d)", + est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)" + fre "Ne peut changer le rpertoire pour '%-.64s' (Errcode: %d)" + ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)" + ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)" + jpn "'%-.64s' ǥ쥯ȥ chdir Ǥޤ.(errno: %d)" + kor "'%-.64s'丮 ̵ ϴ. (ȣ: %d)" + nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)" + pol "Nie mona zmieni katalogu na '%-.64s' (Kod bdu: %d)" + por "No pode mudar para o diretrio '%-.64s' (erro no. %d)" + rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)" + rus " '%-.64s' (: %d)" + serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)" + slo "Nemem vojs do adresra '%-.64s' (chybov kd: %d)" + spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)" + swe "Kan inte byta till '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_CHECKREAD - cze "Z-Bznam byl zmnn od poslednho ten v tabulce '%-.64s'" - dan "Posten er ndret siden sidste lsning '%-.64s'" - nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'" - eng "Record has changed since last read in table '%-.64s'" - est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik" - fre "Enregistrement modifi depuis sa dernire lecture dans la table '%-.64s'" - ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' gendert" - greek " '%-.64s'" - hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" - ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'" - kor "̺ '%-.64s' Record Ǿϴ." - nor "Posten har blitt endret siden den ble lest '%-.64s'" - norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'" - pol "Rekord zosta zmieniony od ostaniego odczytania z tabeli '%-.64s'" - por "Registro alterado desde a ltima leitura da tabela '%-.64s'" - rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'" - rus " '%-.64s'" - serbian "Slog je promenjen od zadnjeg itanja tabele '%-.64s'" - slo "Zznam bol zmenen od poslednho tania v tabuke '%-.64s'" - spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'" - swe "Posten har frndrats sedan den lstes i register '%-.64s'" - ukr " ͦ æ '%-.64s'" + cze "Z-Bznam byl zmnn od poslednho ten v tabulce '%-.64s'" + dan "Posten er ndret siden sidste lsning '%-.64s'" + nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'" + eng "Record has changed since last read in table '%-.64s'" + est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik" + fre "Enregistrement modifi depuis sa dernire lecture dans la table '%-.64s'" + ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' gendert" + greek " '%-.64s'" + hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" + ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'" + kor "̺ '%-.64s' Record Ǿϴ." + nor "Posten har blitt endret siden den ble lest '%-.64s'" + norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'" + pol "Rekord zosta zmieniony od ostaniego odczytania z tabeli '%-.64s'" + por "Registro alterado desde a ltima leitura da tabela '%-.64s'" + rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'" + rus " '%-.64s'" + serbian "Slog je promenjen od zadnjeg itanja tabele '%-.64s'" + slo "Zznam bol zmenen od poslednho tania v tabuke '%-.64s'" + spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'" + swe "Posten har frndrats sedan den lstes i register '%-.64s'" + ukr " ͦ æ '%-.64s'" ER_DISK_FULL - cze "Disk je pln-B (%s), ekm na uvolnn njakho msta ..." - dan "Ikke mere diskplads (%s). Venter p at f frigjort plads..." - nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..." - eng "Disk full (%s); waiting for someone to free some space..." - jps "Disk full (%s). N炷܂ł܂Ă...", - est "Ketas tis (%s). Ootame kuni tekib vaba ruumi..." - fre "Disque plein (%s). J'attend que quelqu'un libre de l'espace..." - ger "Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ..." - greek " (%s). , ..." - hun "A lemez megtelt (%s)." - ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..." - jpn "Disk full (%s). ï򸺤餹ޤǤޤäƤ..." - kor "Disk full (%s). ٸ ﶧ ٸϴ..." - nor "Ikke mer diskplass (%s). Venter p f frigjort plass..." - norwegian-ny "Ikkje meir diskplass (%s). Ventar p f frigjort plass..." - pol "Dysk peny (%s). Oczekiwanie na zwolnienie miejsca..." - por "Disco cheio (%s). Aguardando algum liberar algum espao..." - rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..." - rus " . (%s). , - ..." - serbian "Disk je pun (%s). ekam nekoga da doe i oslobodi neto mesta..." - slo "Disk je pln (%s), akm na uvonenie miesta..." - spa "Disco lleno (%s). Esperando para que se libere algo de espacio..." - swe "Disken r full (%s). Vntar tills det finns ledigt utrymme..." - ukr " (%s). , צ ͦ..." + cze "Disk je pln-B (%s), ekm na uvolnn njakho msta ..." + dan "Ikke mere diskplads (%s). Venter p at f frigjort plads..." + nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..." + eng "Disk full (%s); waiting for someone to free some space..." + jps "Disk full (%s). N炷܂ł܂Ă...", + est "Ketas tis (%s). Ootame kuni tekib vaba ruumi..." + fre "Disque plein (%s). J'attend que quelqu'un libre de l'espace..." + ger "Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ..." + greek " (%s). , ..." + hun "A lemez megtelt (%s)." + ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..." + jpn "Disk full (%s). ï򸺤餹ޤǤޤäƤ..." + kor "Disk full (%s). ٸ ﶧ ٸϴ..." + nor "Ikke mer diskplass (%s). Venter p f frigjort plass..." + norwegian-ny "Ikkje meir diskplass (%s). Ventar p f frigjort plass..." + pol "Dysk peny (%s). Oczekiwanie na zwolnienie miejsca..." + por "Disco cheio (%s). Aguardando algum liberar algum espao..." + rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..." + rus " . (%s). , - ..." + serbian "Disk je pun (%s). ekam nekoga da doe i oslobodi neto mesta..." + slo "Disk je pln (%s), akm na uvonenie miesta..." + spa "Disco lleno (%s). Esperando para que se libere algo de espacio..." + swe "Disken r full (%s). Vntar tills det finns ledigt utrymme..." + ukr " (%s). , צ ͦ..." ER_DUP_KEY 23000 - cze "Nemohu zapsat, zdvojen-B kl v tabulce '%-.64s'" - dan "Kan ikke skrive, flere ens ngler i tabellen '%-.64s'" - nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'" - eng "Can't write; duplicate key in table '%-.64s'" - jps "table '%-.64s' key dĂď߂܂", - est "Ei saa kirjutada, korduv vti tabelis '%-.64s'" - fre "Ecriture impossible, doublon dans une cl de la table '%-.64s'" - ger "Kann nicht speichern, Grund: doppelter Schlssel in Tabelle '%-.64s'" - greek " , '%-.64s'" - hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban." - ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'" - jpn "table '%-.64s' key ʣƤƽ񤭤ޤ" - kor " ϴ., ̺ '%-.64s' ߺ Ű" - nor "Kan ikke skrive, flere like nkler i tabellen '%-.64s'" - norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'" - pol "Nie mona zapisa, powtrzone klucze w tabeli '%-.64s'" - por "No pode gravar. Chave duplicada na tabela '%-.64s'" - rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'" - rus " , '%-.64s'" - serbian "Ne mogu da piem poto postoji duplirani klju u tabeli '%-.64s'" - slo "Nemem zapsa, duplikt ka v tabuke '%-.64s'" - spa "No puedo escribir, clave duplicada en la tabla '%-.64s'" - swe "Kan inte skriva, dubbel sknyckel i register '%-.64s'" - ukr " , æ '%-.64s'" + cze "Nemohu zapsat, zdvojen-B kl v tabulce '%-.64s'" + dan "Kan ikke skrive, flere ens ngler i tabellen '%-.64s'" + nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'" + eng "Can't write; duplicate key in table '%-.64s'" + jps "table '%-.64s' key dĂď߂܂", + est "Ei saa kirjutada, korduv vti tabelis '%-.64s'" + fre "Ecriture impossible, doublon dans une cl de la table '%-.64s'" + ger "Kann nicht speichern, Grund: doppelter Schlssel in Tabelle '%-.64s'" + greek " , '%-.64s'" + hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban." + ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'" + jpn "table '%-.64s' key ʣƤƽ񤭤ޤ" + kor " ϴ., ̺ '%-.64s' ߺ Ű" + nor "Kan ikke skrive, flere like nkler i tabellen '%-.64s'" + norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'" + pol "Nie mona zapisa, powtrzone klucze w tabeli '%-.64s'" + por "No pode gravar. Chave duplicada na tabela '%-.64s'" + rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'" + rus " , '%-.64s'" + serbian "Ne mogu da piem poto postoji duplirani klju u tabeli '%-.64s'" + slo "Nemem zapsa, duplikt ka v tabuke '%-.64s'" + spa "No puedo escribir, clave duplicada en la tabla '%-.64s'" + swe "Kan inte skriva, dubbel sknyckel i register '%-.64s'" + ukr " , æ '%-.64s'" ER_ERROR_ON_CLOSE - cze "Chyba p-Bi zavrn '%-.64s' (chybov kd: %d)" - dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)" - nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)" - eng "Error on close of '%-.64s' (errno: %d)" - est "Viga faili '%-.64s' sulgemisel (veakood: %d)" - fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)" - ger "Fehler beim Schlieen von '%-.64s' (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)" - ita "Errore durante la chiusura di '%-.64s' (errno: %d)" - kor "'%-.64s'ݴ (ȣ: %d)" - nor "Feil ved lukking av '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)" - pol "B?d podczas zamykania '%-.64s' (Kod bdu: %d)" - por "Erro ao fechar '%-.64s' (erro no. %d)" - rum "Eroare inchizind '%-.64s' (errno: %d)" - rus " '%-.64s' (: %d)" - serbian "Greka pri zatvaranju '%-.64s' (errno: %d)" - slo "Chyba pri zatvran '%-.64s' (chybov kd: %d)" - spa "Error en el cierre de '%-.64s' (Error: %d)" - swe "Fick fel vid stngning av '%-.64s' (Felkod: %d)" - ukr " '%-.64s' (: %d)" + cze "Chyba p-Bi zavrn '%-.64s' (chybov kd: %d)" + dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)" + eng "Error on close of '%-.64s' (errno: %d)" + est "Viga faili '%-.64s' sulgemisel (veakood: %d)" + fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)" + ger "Fehler beim Schlieen von '%-.64s' (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)" + ita "Errore durante la chiusura di '%-.64s' (errno: %d)" + kor "'%-.64s'ݴ (ȣ: %d)" + nor "Feil ved lukking av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)" + pol "B?d podczas zamykania '%-.64s' (Kod bdu: %d)" + por "Erro ao fechar '%-.64s' (erro no. %d)" + rum "Eroare inchizind '%-.64s' (errno: %d)" + rus " '%-.64s' (: %d)" + serbian "Greka pri zatvaranju '%-.64s' (errno: %d)" + slo "Chyba pri zatvran '%-.64s' (chybov kd: %d)" + spa "Error en el cierre de '%-.64s' (Error: %d)" + swe "Fick fel vid stngning av '%-.64s' (Felkod: %d)" + ukr " '%-.64s' (: %d)" ER_ERROR_ON_READ - cze "Chyba p-Bi ten souboru '%-.64s' (chybov kd: %d)" - dan "Fejl ved lsning af '%-.64s' (Fejlkode: %d)" - nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)" - eng "Error reading file '%-.200s' (errno: %d)" - jps "'%-.64s' t@C̓ǂݍ݃G[ (errno: %d)", - est "Viga faili '%-.64s' lugemisel (veakood: %d)" - fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)" - ger "Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "Hiba a '%-.64s'file olvasasakor. (hibakod: %d)" - ita "Errore durante la lettura del file '%-.64s' (errno: %d)" - jpn "'%-.64s' եɤ߹ߥ顼 (errno: %d)" - kor "'%-.64s'ȭ б (ȣ: %d)" - nor "Feil ved lesing av '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved lesing av '%-.64s' (Feilkode: %d)" - pol "B?d podczas odczytu pliku '%-.64s' (Kod bdu: %d)" - por "Erro ao ler arquivo '%-.64s' (erro no. %d)" - rum "Eroare citind fisierul '%-.64s' (errno: %d)" - rus " '%-.64s' (: %d)" - serbian "Greka pri itanju file-a '%-.64s' (errno: %d)" - slo "Chyba pri tan sboru '%-.64s' (chybov kd: %d)" - spa "Error leyendo el fichero '%-.64s' (Error: %d)" - swe "Fick fel vid lsning av '%-.64s' (Felkod %d)" - ukr " '%-.64s' (: %d)" + cze "Chyba p-Bi ten souboru '%-.64s' (chybov kd: %d)" + dan "Fejl ved lsning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)" + eng "Error reading file '%-.200s' (errno: %d)" + jps "'%-.64s' t@C̓ǂݍ݃G[ (errno: %d)", + est "Viga faili '%-.64s' lugemisel (veakood: %d)" + fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)" + ger "Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "Hiba a '%-.64s'file olvasasakor. (hibakod: %d)" + ita "Errore durante la lettura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' եɤ߹ߥ顼 (errno: %d)" + kor "'%-.64s'ȭ б (ȣ: %d)" + nor "Feil ved lesing av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved lesing av '%-.64s' (Feilkode: %d)" + pol "B?d podczas odczytu pliku '%-.64s' (Kod bdu: %d)" + por "Erro ao ler arquivo '%-.64s' (erro no. %d)" + rum "Eroare citind fisierul '%-.64s' (errno: %d)" + rus " '%-.64s' (: %d)" + serbian "Greka pri itanju file-a '%-.64s' (errno: %d)" + slo "Chyba pri tan sboru '%-.64s' (chybov kd: %d)" + spa "Error leyendo el fichero '%-.64s' (Error: %d)" + swe "Fick fel vid lsning av '%-.64s' (Felkod %d)" + ukr " '%-.64s' (: %d)" ER_ERROR_ON_RENAME - cze "Chyba p-Bi pejmenovn '%-.64s' na '%-.64s' (chybov kd: %d)" - dan "Fejl ved omdbning af '%-.64s' til '%-.64s' (Fejlkode: %d)" - nla "Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)" - eng "Error on rename of '%-.64s' to '%-.64s' (errno: %d)" - jps "'%-.64s' '%-.64s' rename ł܂ (errno: %d)", - est "Viga faili '%-.64s' mbernimetamisel '%-.64s'-ks (veakood: %d)" - fre "Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)" - ger "Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)" - greek " '%-.64s' to '%-.64s' ( : %d)" - hun "Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)" - ita "Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)" - jpn "'%-.64s' '%-.64s' rename Ǥޤ (errno: %d)" - kor "'%-.64s' '%-.64s' ̸ (ȣ: %d)" - nor "Feil ved omdping av '%-.64s' til '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved omdyping av '%-.64s' til '%-.64s' (Feilkode: %d)" - pol "B?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod bdu: %d)" - por "Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)" - rum "Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)" - rus " '%-.64s' '%-.64s' (: %d)" - serbian "Greka pri promeni imena '%-.64s' na '%-.64s' (errno: %d)" - slo "Chyba pri premenovvan '%-.64s' na '%-.64s' (chybov kd: %d)" - spa "Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)" - swe "Kan inte byta namn frn '%-.64s' till '%-.64s' (Felkod: %d)" - ukr " '%-.64s' '%-.64s' (: %d)" + cze "Chyba p-Bi pejmenovn '%-.64s' na '%-.64s' (chybov kd: %d)" + dan "Fejl ved omdbning af '%-.64s' til '%-.64s' (Fejlkode: %d)" + nla "Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)" + eng "Error on rename of '%-.64s' to '%-.64s' (errno: %d)" + jps "'%-.64s' '%-.64s' rename ł܂ (errno: %d)", + est "Viga faili '%-.64s' mbernimetamisel '%-.64s'-ks (veakood: %d)" + fre "Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)" + ger "Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)" + greek " '%-.64s' to '%-.64s' ( : %d)" + hun "Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)" + ita "Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)" + jpn "'%-.64s' '%-.64s' rename Ǥޤ (errno: %d)" + kor "'%-.64s' '%-.64s' ̸ (ȣ: %d)" + nor "Feil ved omdping av '%-.64s' til '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved omdyping av '%-.64s' til '%-.64s' (Feilkode: %d)" + pol "B?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod bdu: %d)" + por "Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)" + rum "Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)" + rus " '%-.64s' '%-.64s' (: %d)" + serbian "Greka pri promeni imena '%-.64s' na '%-.64s' (errno: %d)" + slo "Chyba pri premenovvan '%-.64s' na '%-.64s' (chybov kd: %d)" + spa "Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)" + swe "Kan inte byta namn frn '%-.64s' till '%-.64s' (Felkod: %d)" + ukr " '%-.64s' '%-.64s' (: %d)" ER_ERROR_ON_WRITE - cze "Chyba p-Bi zpisu do souboru '%-.64s' (chybov kd: %d)" - dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)" - nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)" - eng "Error writing file '%-.200s' (errno: %d)" - jps "'%-.64s' t@Cł܂ (errno: %d)", - est "Viga faili '%-.64s' kirjutamisel (veakood: %d)" - fre "Erreur d'criture du fichier '%-.64s' (Errcode: %d)" - ger "Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)" - greek " '%-.64s' ( : %d)" - hun "Hiba a '%-.64s' file irasakor. (hibakod: %d)" - ita "Errore durante la scrittura del file '%-.64s' (errno: %d)" - jpn "'%-.64s' ե񤯻Ǥޤ (errno: %d)" - kor "'%-.64s'ȭ (ȣ: %d)" - nor "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" - pol "B?d podczas zapisywania pliku '%-.64s' (Kod bdu: %d)" - por "Erro ao gravar arquivo '%-.64s' (erro no. %d)" - rum "Eroare scriind fisierul '%-.64s' (errno: %d)" - rus " '%-.64s' (: %d)" - serbian "Greka pri upisu '%-.64s' (errno: %d)" - slo "Chyba pri zpise do sboru '%-.64s' (chybov kd: %d)" - spa "Error escribiendo el archivo '%-.64s' (Error: %d)" - swe "Fick fel vid skrivning till '%-.64s' (Felkod %d)" - ukr " '%-.64s' (: %d)" + cze "Chyba p-Bi zpisu do souboru '%-.64s' (chybov kd: %d)" + dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)" + nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)" + eng "Error writing file '%-.200s' (errno: %d)" + jps "'%-.64s' t@Cł܂ (errno: %d)", + est "Viga faili '%-.64s' kirjutamisel (veakood: %d)" + fre "Erreur d'criture du fichier '%-.64s' (Errcode: %d)" + ger "Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)" + greek " '%-.64s' ( : %d)" + hun "Hiba a '%-.64s' file irasakor. (hibakod: %d)" + ita "Errore durante la scrittura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' ե񤯻Ǥޤ (errno: %d)" + kor "'%-.64s'ȭ (ȣ: %d)" + nor "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" + pol "B?d podczas zapisywania pliku '%-.64s' (Kod bdu: %d)" + por "Erro ao gravar arquivo '%-.64s' (erro no. %d)" + rum "Eroare scriind fisierul '%-.64s' (errno: %d)" + rus " '%-.64s' (: %d)" + serbian "Greka pri upisu '%-.64s' (errno: %d)" + slo "Chyba pri zpise do sboru '%-.64s' (chybov kd: %d)" + spa "Error escribiendo el archivo '%-.64s' (Error: %d)" + swe "Fick fel vid skrivning till '%-.64s' (Felkod %d)" + ukr " '%-.64s' (: %d)" ER_FILE_USED - cze "'%-.64s' je zam-Ben proti zmnm" - dan "'%-.64s' er lst mod opdateringer" - nla "'%-.64s' is geblokeerd tegen veranderingen" - eng "'%-.64s' is locked against change" - jps "'%-.64s' ̓bNĂ܂", - est "'%-.64s' on lukustatud muudatuste vastu" - fre "'%-.64s' est verrouill contre les modifications" - ger "'%-.64s' ist fr nderungen gesperrt" - greek "'%-.64s' " - hun "'%-.64s' a valtoztatas ellen zarolva" - ita "'%-.64s' e` soggetto a lock contro i cambiamenti" - jpn "'%-.64s' ϥåƤޤ" - kor "'%-.64s' ϴ." - nor "'%-.64s' er lst mot oppdateringer" - norwegian-ny "'%-.64s' er lst mot oppdateringar" - pol "'%-.64s' jest zablokowany na wypadek zmian" - por "'%-.64s' est com travamento contra alteraes" - rum "'%-.64s' este blocat pentry schimbari (loccked against change)" - rus "'%-.64s' " - serbian "'%-.64s' je zakljuan za upis" - slo "'%-.64s' je zamknut proti zmenm" - spa "'%-.64s' esta bloqueado contra cambios" - swe "'%-.64s' r lst mot anvndning" - ukr "'%-.64s' ͦ" + cze "'%-.64s' je zam-Ben proti zmnm" + dan "'%-.64s' er lst mod opdateringer" + nla "'%-.64s' is geblokeerd tegen veranderingen" + eng "'%-.64s' is locked against change" + jps "'%-.64s' ̓bNĂ܂", + est "'%-.64s' on lukustatud muudatuste vastu" + fre "'%-.64s' est verrouill contre les modifications" + ger "'%-.64s' ist fr nderungen gesperrt" + greek "'%-.64s' " + hun "'%-.64s' a valtoztatas ellen zarolva" + ita "'%-.64s' e` soggetto a lock contro i cambiamenti" + jpn "'%-.64s' ϥåƤޤ" + kor "'%-.64s' ϴ." + nor "'%-.64s' er lst mot oppdateringer" + norwegian-ny "'%-.64s' er lst mot oppdateringar" + pol "'%-.64s' jest zablokowany na wypadek zmian" + por "'%-.64s' est com travamento contra alteraes" + rum "'%-.64s' este blocat pentry schimbari (loccked against change)" + rus "'%-.64s' " + serbian "'%-.64s' je zakljuan za upis" + slo "'%-.64s' je zamknut proti zmenm" + spa "'%-.64s' esta bloqueado contra cambios" + swe "'%-.64s' r lst mot anvndning" + ukr "'%-.64s' ͦ" ER_FILSORT_ABORT - cze "T-Bdn perueno" - dan "Sortering afbrudt" - nla "Sorteren afgebroken" - eng "Sort aborted" - jps "Sort f", - est "Sorteerimine katkestatud" - fre "Tri alphabtique abandonn" - ger "Sortiervorgang abgebrochen" - greek " " - hun "Sikertelen rendezes" - ita "Operazione di ordinamento abbandonata" - jpn "Sort " - kor "Ʈ ߴܵǾϴ." - nor "Sortering avbrutt" - norwegian-ny "Sortering avbrote" - pol "Sortowanie przerwane" - por "Ordenao abortada" - rum "Sortare intrerupta" - rus " " - serbian "Sortiranje je prekinuto" - slo "Triedenie preruen" - spa "Ordeancion cancelada" - swe "Sorteringen avbruten" - ukr " " + cze "T-Bdn perueno" + dan "Sortering afbrudt" + nla "Sorteren afgebroken" + eng "Sort aborted" + jps "Sort f", + est "Sorteerimine katkestatud" + fre "Tri alphabtique abandonn" + ger "Sortiervorgang abgebrochen" + greek " " + hun "Sikertelen rendezes" + ita "Operazione di ordinamento abbandonata" + jpn "Sort " + kor "Ʈ ߴܵǾϴ." + nor "Sortering avbrutt" + norwegian-ny "Sortering avbrote" + pol "Sortowanie przerwane" + por "Ordenao abortada" + rum "Sortare intrerupta" + rus " " + serbian "Sortiranje je prekinuto" + slo "Triedenie preruen" + spa "Ordeancion cancelada" + swe "Sorteringen avbruten" + ukr " " ER_FORM_NOT_FOUND - cze "Pohled '%-.64s' pro '%-.64s' neexistuje" - dan "View '%-.64s' eksisterer ikke for '%-.64s'" - nla "View '%-.64s' bestaat niet voor '%-.64s'" - eng "View '%-.64s' doesn't exist for '%-.64s'" - jps "View '%-.64s' '%-.64s' ɒ`Ă܂", - est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks" - fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'" - ger "View '%-.64s' existiert fr '%-.64s' nicht" - greek " View '%-.64s' '%-.64s'" - hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz" - ita "La view '%-.64s' non esiste per '%-.64s'" - jpn "View '%-.64s' '%-.64s' Ƥޤ" - kor " '%-.64s' '%-.64s' ϴ." - nor "View '%-.64s' eksisterer ikke for '%-.64s'" - norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'" - pol "Widok '%-.64s' nie istnieje dla '%-.64s'" - por "Viso '%-.64s' no existe para '%-.64s'" - rum "View '%-.64s' nu exista pentru '%-.64s'" - rus " '%-.64s' '%-.64s'" - serbian "View '%-.64s' ne postoji za '%-.64s'" - slo "Pohad '%-.64s' neexistuje pre '%-.64s'" - spa "La vista '%-.64s' no existe para '%-.64s'" - swe "Formulr '%-.64s' finns inte i '%-.64s'" - ukr " '%-.64s' դ '%-.64s'" + cze "Pohled '%-.64s' pro '%-.64s' neexistuje" + dan "View '%-.64s' eksisterer ikke for '%-.64s'" + nla "View '%-.64s' bestaat niet voor '%-.64s'" + eng "View '%-.64s' doesn't exist for '%-.64s'" + jps "View '%-.64s' '%-.64s' ɒ`Ă܂", + est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks" + fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'" + ger "View '%-.64s' existiert fr '%-.64s' nicht" + greek " View '%-.64s' '%-.64s'" + hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz" + ita "La view '%-.64s' non esiste per '%-.64s'" + jpn "View '%-.64s' '%-.64s' Ƥޤ" + kor " '%-.64s' '%-.64s' ϴ." + nor "View '%-.64s' eksisterer ikke for '%-.64s'" + norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'" + pol "Widok '%-.64s' nie istnieje dla '%-.64s'" + por "Viso '%-.64s' no existe para '%-.64s'" + rum "View '%-.64s' nu exista pentru '%-.64s'" + rus " '%-.64s' '%-.64s'" + serbian "View '%-.64s' ne postoji za '%-.64s'" + slo "Pohad '%-.64s' neexistuje pre '%-.64s'" + spa "La vista '%-.64s' no existe para '%-.64s'" + swe "Formulr '%-.64s' finns inte i '%-.64s'" + ukr " '%-.64s' դ '%-.64s'" ER_GET_ERRNO - cze "Obsluha tabulky vr-Btila chybu %d" - dan "Modtog fejl %d fra tabel hndteringen" - nla "Fout %d van tabel handler" - eng "Got error %d from storage engine" - est "Tabeli handler tagastas vea %d" - fre "Reu l'erreur %d du handler de la table" - ger "Fehler %d (Speicher-Engine)" - greek " %d (table handler)" - hun "%d hibajelzes a tablakezelotol" - ita "Rilevato l'errore %d dal gestore delle tabelle" - jpn "Got error %d from table handler" - kor "̺ handler %d ߻ Ͽϴ." - nor "Mottok feil %d fra tabell hndterer" - norwegian-ny "Mottok feil %d fra tabell handterar" - pol "Otrzymano b?d %d z obsugi tabeli" - por "Obteve erro %d no manipulador de tabelas" - rum "Eroarea %d obtinuta din handlerul tabelei" - rus " %d " - serbian "Handler tabela je vratio greku %d" - slo "Obsluha tabuky vrtila chybu %d" - spa "Error %d desde el manejador de la tabla" - swe "Fick felkod %d frn databashanteraren" - ukr " %d צ æ" + cze "Obsluha tabulky vr-Btila chybu %d" + dan "Modtog fejl %d fra tabel hndteringen" + nla "Fout %d van tabel handler" + eng "Got error %d from storage engine" + est "Tabeli handler tagastas vea %d" + fre "Reu l'erreur %d du handler de la table" + ger "Fehler %d (Speicher-Engine)" + greek " %d (table handler)" + hun "%d hibajelzes a tablakezelotol" + ita "Rilevato l'errore %d dal gestore delle tabelle" + jpn "Got error %d from table handler" + kor "̺ handler %d ߻ Ͽϴ." + nor "Mottok feil %d fra tabell hndterer" + norwegian-ny "Mottok feil %d fra tabell handterar" + pol "Otrzymano b?d %d z obsugi tabeli" + por "Obteve erro %d no manipulador de tabelas" + rum "Eroarea %d obtinuta din handlerul tabelei" + rus " %d " + serbian "Handler tabela je vratio greku %d" + slo "Obsluha tabuky vrtila chybu %d" + spa "Error %d desde el manejador de la tabla" + swe "Fick felkod %d frn databashanteraren" + ukr " %d צ æ" ER_ILLEGAL_HA - cze "Obsluha tabulky '%-.64s' nem-B tento parametr" - dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'" - nla "Tabel handler voor '%-.64s' heeft deze optie niet" - eng "Table storage engine for '%-.64s' doesn't have this option" - est "Tabeli '%-.64s' handler ei toeta antud operatsiooni" - fre "Le handler de la table '%-.64s' n'a pas cette option" - ger "Diese Option gibt es nicht (Speicher-Engine fr '%-.64s')" - greek " (table handler) '%-.64s' " - hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja" - ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione" - jpn "Table handler for '%-.64s' doesn't have this option" - kor "'%-.64s' ̺ handler ̷ ɼ ϴ." - nor "Tabell hndtereren for '%-.64s' har ikke denne muligheten" - norwegian-ny "Tabell hndteraren for '%-.64s' har ikkje denne moglegheita" - pol "Obsuga tabeli '%-.64s' nie posiada tej opcji" - por "Manipulador de tabela para '%-.64s' no tem esta opo" - rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune" - rus " '%-.64s' " - serbian "Handler tabela za '%-.64s' nema ovu opciju" - slo "Obsluha tabuky '%-.64s' nem tento parameter" - spa "El manejador de la tabla de '%-.64s' no tiene esta opcion" - swe "Registrets databas har inte denna facilitet" - ukr " æ '%-.64s' æ Ԧ" + cze "Obsluha tabulky '%-.64s' nem-B tento parametr" + dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'" + nla "Tabel handler voor '%-.64s' heeft deze optie niet" + eng "Table storage engine for '%-.64s' doesn't have this option" + est "Tabeli '%-.64s' handler ei toeta antud operatsiooni" + fre "Le handler de la table '%-.64s' n'a pas cette option" + ger "Diese Option gibt es nicht (Speicher-Engine fr '%-.64s')" + greek " (table handler) '%-.64s' " + hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja" + ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione" + jpn "Table handler for '%-.64s' doesn't have this option" + kor "'%-.64s' ̺ handler ̷ ɼ ϴ." + nor "Tabell hndtereren for '%-.64s' har ikke denne muligheten" + norwegian-ny "Tabell hndteraren for '%-.64s' har ikkje denne moglegheita" + pol "Obsuga tabeli '%-.64s' nie posiada tej opcji" + por "Manipulador de tabela para '%-.64s' no tem esta opo" + rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune" + rus " '%-.64s' " + serbian "Handler tabela za '%-.64s' nema ovu opciju" + slo "Obsluha tabuky '%-.64s' nem tento parameter" + spa "El manejador de la tabla de '%-.64s' no tiene esta opcion" + swe "Registrets databas har inte denna facilitet" + ukr " æ '%-.64s' æ Ԧ" ER_KEY_NOT_FOUND - cze "Nemohu naj-Bt zznam v '%-.64s'" - dan "Kan ikke finde posten i '%-.64s'" - nla "Kan record niet vinden in '%-.64s'" - eng "Can't find record in '%-.64s'" - jps "'%-.64s'̂ȂɃR[ht܂", - est "Ei suuda leida kirjet '%-.64s'-s" - fre "Ne peut trouver l'enregistrement dans '%-.64s'" - ger "Kann Datensatz in '%-.64s' nicht finden" - greek " '%-.64s'" - hun "Nem talalhato a rekord '%-.64s'-ben" - ita "Impossibile trovare il record in '%-.64s'" - jpn "'%-.64s'Τʤ˥쥳ɤդޤ" - kor "'%-.64s' ڵ带 ã ϴ." - nor "Kan ikke finne posten i '%-.64s'" - norwegian-ny "Kan ikkje finne posten i '%-.64s'" - pol "Nie mona znale rekordu w '%-.64s'" - por "No pode encontrar registro em '%-.64s'" - rum "Nu pot sa gasesc recordul in '%-.64s'" - rus " '%-.64s'" - serbian "Ne mogu da pronaem slog u '%-.64s'" - slo "Nemem njs zznam v '%-.64s'" - spa "No puedo encontrar el registro en '%-.64s'" - swe "Hittar inte posten" - ukr " '%-.64s'" + cze "Nemohu naj-Bt zznam v '%-.64s'" + dan "Kan ikke finde posten i '%-.64s'" + nla "Kan record niet vinden in '%-.64s'" + eng "Can't find record in '%-.64s'" + jps "'%-.64s'̂ȂɃR[ht܂", + est "Ei suuda leida kirjet '%-.64s'-s" + fre "Ne peut trouver l'enregistrement dans '%-.64s'" + ger "Kann Datensatz in '%-.64s' nicht finden" + greek " '%-.64s'" + hun "Nem talalhato a rekord '%-.64s'-ben" + ita "Impossibile trovare il record in '%-.64s'" + jpn "'%-.64s'Τʤ˥쥳ɤդޤ" + kor "'%-.64s' ڵ带 ã ϴ." + nor "Kan ikke finne posten i '%-.64s'" + norwegian-ny "Kan ikkje finne posten i '%-.64s'" + pol "Nie mona znale rekordu w '%-.64s'" + por "No pode encontrar registro em '%-.64s'" + rum "Nu pot sa gasesc recordul in '%-.64s'" + rus " '%-.64s'" + serbian "Ne mogu da pronaem slog u '%-.64s'" + slo "Nemem njs zznam v '%-.64s'" + spa "No puedo encontrar el registro en '%-.64s'" + swe "Hittar inte posten" + ukr " '%-.64s'" ER_NOT_FORM_FILE - cze "Nespr-Bvn informace v souboru '%-.64s'" - dan "Forkert indhold i: '%-.64s'" - nla "Verkeerde info in file: '%-.64s'" - eng "Incorrect information in file: '%-.200s'" - jps "t@C '%-.64s' info ԈĂ悤ł", - est "Vigane informatsioon failis '%-.64s'" - fre "Information erronne dans le fichier: '%-.64s'" - ger "Falsche Information in Datei '%-.64s'" - greek " : '%-.64s'" - hun "Ervenytelen info a file-ban: '%-.64s'" - ita "Informazione errata nel file: '%-.64s'" - jpn "ե '%-.64s' info ְäƤ褦Ǥ" - kor "ȭ Ȯ : '%-.64s'" - nor "Feil informasjon i filen: '%-.64s'" - norwegian-ny "Feil informasjon i fila: '%-.64s'" - pol "Niewa?ciwa informacja w pliku: '%-.64s'" - por "Informao incorreta no arquivo '%-.64s'" - rum "Informatie incorecta in fisierul: '%-.64s'" - rus " '%-.64s'" - serbian "Pogrena informacija u file-u: '%-.64s'" - slo "Nesprvna informcia v sbore: '%-.64s'" - spa "Informacion erronea en el archivo: '%-.64s'" - swe "Felaktig fil: '%-.64s'" - ukr " æ ̦: '%-.64s'" + cze "Nespr-Bvn informace v souboru '%-.64s'" + dan "Forkert indhold i: '%-.64s'" + nla "Verkeerde info in file: '%-.64s'" + eng "Incorrect information in file: '%-.200s'" + jps "t@C '%-.64s' info ԈĂ悤ł", + est "Vigane informatsioon failis '%-.64s'" + fre "Information erronne dans le fichier: '%-.64s'" + ger "Falsche Information in Datei '%-.64s'" + greek " : '%-.64s'" + hun "Ervenytelen info a file-ban: '%-.64s'" + ita "Informazione errata nel file: '%-.64s'" + jpn "ե '%-.64s' info ְäƤ褦Ǥ" + kor "ȭ Ȯ : '%-.64s'" + nor "Feil informasjon i filen: '%-.64s'" + norwegian-ny "Feil informasjon i fila: '%-.64s'" + pol "Niewa?ciwa informacja w pliku: '%-.64s'" + por "Informao incorreta no arquivo '%-.64s'" + rum "Informatie incorecta in fisierul: '%-.64s'" + rus " '%-.64s'" + serbian "Pogrena informacija u file-u: '%-.64s'" + slo "Nesprvna informcia v sbore: '%-.64s'" + spa "Informacion erronea en el archivo: '%-.64s'" + swe "Felaktig fil: '%-.64s'" + ukr " æ ̦: '%-.64s'" ER_NOT_KEYFILE - cze "Nespr-Bvn kl pro tabulku '%-.64s'; pokuste se ho opravit" - dan "Fejl i indeksfilen til tabellen '%-.64s'; prv at reparere den" - nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren" - eng "Incorrect key file for table '%-.200s'; try to repair it" - jps "'%-.64s' e[u key file ԈĂ悤ł. CĂ", - est "Tabeli '%-.64s' vtmefail on vigane; proovi seda parandada" - fre "Index corrompu dans la table: '%-.64s'; essayez de le rparer" - ger "Fehlerhafte Index-Datei fr Tabelle '%-.64s'; versuche zu reparieren" - greek " (key file) : '%-.64s'; , !" - hun "Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!" - ita "File chiave errato per la tabella : '%-.64s'; prova a riparalo" - jpn "'%-.64s' ơ֥ key file ְäƤ褦Ǥ. 򤷤Ƥ" - kor "'%-.64s' ̺ Ȯ Ű . Ͻÿ!" - nor "Tabellen '%-.64s' har feil i nkkelfilen; forsk reparer den" - norwegian-ny "Tabellen '%-.64s' har feil i nykkelfila; prv reparere den" - pol "Niewa?ciwy plik kluczy dla tabeli: '%-.64s'; sprbuj go naprawi" - por "Arquivo de ndice incorreto para tabela '%-.64s'; tente repar-lo" - rum "Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari" - rus " : '%-.64s'. " - serbian "Pogrean key file za tabelu: '%-.64s'; probajte da ga ispravite" - slo "Nesprvny k pre tabuku '%-.64s'; pokste sa ho opravi" - spa "Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo" - swe "Fatalt fel vid hantering av register '%-.64s'; kr en reparation" - ukr " æ: '%-.64s'; צ" + cze "Nespr-Bvn kl pro tabulku '%-.64s'; pokuste se ho opravit" + dan "Fejl i indeksfilen til tabellen '%-.64s'; prv at reparere den" + nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren" + eng "Incorrect key file for table '%-.200s'; try to repair it" + jps "'%-.64s' e[u key file ԈĂ悤ł. CĂ", + est "Tabeli '%-.64s' vtmefail on vigane; proovi seda parandada" + fre "Index corrompu dans la table: '%-.64s'; essayez de le rparer" + ger "Fehlerhafte Index-Datei fr Tabelle '%-.64s'; versuche zu reparieren" + greek " (key file) : '%-.64s'; , !" + hun "Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!" + ita "File chiave errato per la tabella : '%-.64s'; prova a riparalo" + jpn "'%-.64s' ơ֥ key file ְäƤ褦Ǥ. 򤷤Ƥ" + kor "'%-.64s' ̺ Ȯ Ű . Ͻÿ!" + nor "Tabellen '%-.64s' har feil i nkkelfilen; forsk reparer den" + norwegian-ny "Tabellen '%-.64s' har feil i nykkelfila; prv reparere den" + pol "Niewa?ciwy plik kluczy dla tabeli: '%-.64s'; sprbuj go naprawi" + por "Arquivo de ndice incorreto para tabela '%-.64s'; tente repar-lo" + rum "Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari" + rus " : '%-.64s'. " + serbian "Pogrean key file za tabelu: '%-.64s'; probajte da ga ispravite" + slo "Nesprvny k pre tabuku '%-.64s'; pokste sa ho opravi" + spa "Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo" + swe "Fatalt fel vid hantering av register '%-.64s'; kr en reparation" + ukr " æ: '%-.64s'; צ" ER_OLD_KEYFILE - cze "Star-B klov soubor pro '%-.64s'; opravte ho." - dan "Gammel indeksfil for tabellen '%-.64s'; reparer den" - nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!" - eng "Old key file for table '%-.64s'; repair it!" - jps "'%-.64s' e[u͌Â` key file ̂悤ł; CĂ", - est "Tabeli '%-.64s' vtmefail on aegunud; paranda see!" - fre "Vieux fichier d'index pour la table '%-.64s'; rparez le!" - ger "Alte Index-Datei fr Tabelle '%-.64s'. Bitte reparieren" - greek " (key file) '%-.64s'; , !" - hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!" - ita "File chiave vecchio per la tabella '%-.64s'; riparalo!" - jpn "'%-.64s' ơ֥ϸŤ key file Τ褦Ǥ; 򤷤Ƥ" - kor "'%-.64s' ̺ Ű . Ͻÿ!" - nor "Gammel nkkelfil for tabellen '%-.64s'; reparer den!" - norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!" - pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!" - por "Arquivo de ndice desatualizado para tabela '%-.64s'; repare-o!" - rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!" - rus " '%-.64s'; !" - serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga" - slo "Star kov sbor pre '%-.64s'; opravte ho!" - spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!" - swe "Gammal nyckelfil '%-.64s'; reparera registret" - ukr " æ '%-.64s'; צ !" + cze "Star-B klov soubor pro '%-.64s'; opravte ho." + dan "Gammel indeksfil for tabellen '%-.64s'; reparer den" + nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!" + eng "Old key file for table '%-.64s'; repair it!" + jps "'%-.64s' e[u͌Â` key file ̂悤ł; CĂ", + est "Tabeli '%-.64s' vtmefail on aegunud; paranda see!" + fre "Vieux fichier d'index pour la table '%-.64s'; rparez le!" + ger "Alte Index-Datei fr Tabelle '%-.64s'. Bitte reparieren" + greek " (key file) '%-.64s'; , !" + hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!" + ita "File chiave vecchio per la tabella '%-.64s'; riparalo!" + jpn "'%-.64s' ơ֥ϸŤ key file Τ褦Ǥ; 򤷤Ƥ" + kor "'%-.64s' ̺ Ű . Ͻÿ!" + nor "Gammel nkkelfil for tabellen '%-.64s'; reparer den!" + norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!" + pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!" + por "Arquivo de ndice desatualizado para tabela '%-.64s'; repare-o!" + rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!" + rus " '%-.64s'; !" + serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga" + slo "Star kov sbor pre '%-.64s'; opravte ho!" + spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!" + swe "Gammal nyckelfil '%-.64s'; reparera registret" + ukr " æ '%-.64s'; צ !" ER_OPEN_AS_READONLY - cze "'%-.64s' je jen pro -Bten" - dan "'%-.64s' er skrivebeskyttet" - nla "'%-.64s' is alleen leesbaar" - eng "Table '%-.64s' is read only" - jps "'%-.64s' ͓ǂݍݐpł", - est "Tabel '%-.64s' on ainult lugemiseks" - fre "'%-.64s' est en lecture seulement" - ger "Tabelle '%-.64s' ist nur lesbar" - greek "'%-.64s' " - hun "'%-.64s' irasvedett" - ita "'%-.64s' e` di sola lettura" - jpn "'%-.64s' ɤ߹ѤǤ" - kor "̺ '%-.64s' б Դϴ." - nor "'%-.64s' er skrivebeskyttet" - norwegian-ny "'%-.64s' er skrivetryggja" - pol "'%-.64s' jest tylko do odczytu" - por "Tabela '%-.64s' somente para leitura" - rum "Tabela '%-.64s' e read-only" - rus " '%-.64s' " - serbian "Tabelu '%-.64s' je dozvoljeno samo itati" - slo "'%-.64s' is ta only" - spa "'%-.64s' es de solo lectura" - swe "'%-.64s' r skyddad mot frndring" - ukr " '%-.64s' Ԧ " + cze "'%-.64s' je jen pro -Bten" + dan "'%-.64s' er skrivebeskyttet" + nla "'%-.64s' is alleen leesbaar" + eng "Table '%-.64s' is read only" + jps "'%-.64s' ͓ǂݍݐpł", + est "Tabel '%-.64s' on ainult lugemiseks" + fre "'%-.64s' est en lecture seulement" + ger "Tabelle '%-.64s' ist nur lesbar" + greek "'%-.64s' " + hun "'%-.64s' irasvedett" + ita "'%-.64s' e` di sola lettura" + jpn "'%-.64s' ɤ߹ѤǤ" + kor "̺ '%-.64s' б Դϴ." + nor "'%-.64s' er skrivebeskyttet" + norwegian-ny "'%-.64s' er skrivetryggja" + pol "'%-.64s' jest tylko do odczytu" + por "Tabela '%-.64s' somente para leitura" + rum "Tabela '%-.64s' e read-only" + rus " '%-.64s' " + serbian "Tabelu '%-.64s' je dozvoljeno samo itati" + slo "'%-.64s' is ta only" + spa "'%-.64s' es de solo lectura" + swe "'%-.64s' r skyddad mot frndring" + ukr " '%-.64s' Ԧ " ER_OUTOFMEMORY HY001 S1001 - cze "M-Blo pamti. Pestartujte daemona a zkuste znovu (je poteba %d byt)" - dan "Ikke mere hukommelse. Genstart serveren og prv igen (mangler %d bytes)" - nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)" - eng "Out of memory; restart server and try again (needed %d bytes)" - jps "Out of memory. f[X^[gĂ݂Ă (%d bytes Kv)", - est "Mlu sai otsa. Proovi MySQL uuesti kivitada (puudu ji %d baiti)" - fre "Manque de mmoire. Redmarrez le dmon et r-essayez (%d octets ncessaires)" - ger "Kein Speicher vorhanden (%d Bytes bentigt). Bitte Server neu starten" - greek " . , (demon) ( %d bytes)" - hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)" - ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)" - jpn "Out of memory. ǡꥹȤƤߤƤ (%d bytes ɬ)" - kor "Out of memory. ٽ Ͻÿ (needed %d bytes)" - nor "Ikke mer minne. Star p nytt tjenesten og prv igjen (trengte %d byter)" - norwegian-ny "Ikkje meir minne. Start p nytt tenesten og prv igjen (trengte %d bytar)" - pol "Zbyt mao pamici. Uruchom ponownie demona i sprbuj ponownie (potrzeba %d bajtw)" - por "Sem memria. Reinicie o programa e tente novamente (necessita de %d bytes)" - rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)" - rus " . ( %d )" - serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)" - slo "Mlo pamti. Retartujte daemona a skste znova (je potrebnch %d bytov)" - spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)" - swe "Ovntat slut p minnet, starta om programmet och frsk p nytt (Behvde %d bytes)" - ukr " 'Ԧ. (Ҧ %d Ԧ)" + cze "M-Blo pamti. Pestartujte daemona a zkuste znovu (je poteba %d byt)" + dan "Ikke mere hukommelse. Genstart serveren og prv igen (mangler %d bytes)" + nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)" + eng "Out of memory; restart server and try again (needed %d bytes)" + jps "Out of memory. f[X^[gĂ݂Ă (%d bytes Kv)", + est "Mlu sai otsa. Proovi MySQL uuesti kivitada (puudu ji %d baiti)" + fre "Manque de mmoire. Redmarrez le dmon et r-essayez (%d octets ncessaires)" + ger "Kein Speicher vorhanden (%d Bytes bentigt). Bitte Server neu starten" + greek " . , (demon) ( %d bytes)" + hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)" + ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)" + jpn "Out of memory. ǡꥹȤƤߤƤ (%d bytes ɬ)" + kor "Out of memory. ٽ Ͻÿ (needed %d bytes)" + nor "Ikke mer minne. Star p nytt tjenesten og prv igjen (trengte %d byter)" + norwegian-ny "Ikkje meir minne. Start p nytt tenesten og prv igjen (trengte %d bytar)" + pol "Zbyt mao pamici. Uruchom ponownie demona i sprbuj ponownie (potrzeba %d bajtw)" + por "Sem memria. Reinicie o programa e tente novamente (necessita de %d bytes)" + rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)" + rus " . ( %d )" + serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)" + slo "Mlo pamti. Retartujte daemona a skste znova (je potrebnch %d bytov)" + spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)" + swe "Ovntat slut p minnet, starta om programmet och frsk p nytt (Behvde %d bytes)" + ukr " 'Ԧ. (Ҧ %d Ԧ)" ER_OUT_OF_SORTMEMORY HY001 S1001 - cze "M-Blo pamti pro tdn. Zvyte velikost tdcho bufferu" - dan "Ikke mere sorteringshukommelse. g sorteringshukommelse (sort buffer size) for serveren" - nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size" - eng "Out of sort memory; increase server sort buffer size" - jps "Out of sort memory. sort buffer size Ȃ悤ł.", - est "Mlu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit" - fre "Manque de mmoire pour le tri. Augmentez-la." - ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhht werden" - greek " . sort buffer size (demon)" - hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet" - ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone" - jpn "Out of sort memory. sort buffer size ­ʤ褦Ǥ." - kor "Out of sort memory. daemon sort buffer ũ⸦ Ű" - nor "Ikke mer sorteringsminne. k sorteringsminnet (sort buffer size) for tjenesten" - norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten" - pol "Zbyt mao pamici dla sortowania. Zwiksz wielko? bufora demona dla sortowania" - por "Sem memria para ordenao. Aumente tamanho do 'buffer' de ordenao" - rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)" - rus " . " - serbian "Nema memorije za sortiranje. Poveajte veliinu sort buffer-a MySQL server-u" - slo "Mlo pamti pre triedenie, zvte vekos triediaceho bufferu" - spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion" - swe "Sorteringsbufferten rcker inte till. Kontrollera startparametrarna" - ukr " 'Ԧ . ¦ ͦ " + cze "M-Blo pamti pro tdn. Zvyte velikost tdcho bufferu" + dan "Ikke mere sorteringshukommelse. g sorteringshukommelse (sort buffer size) for serveren" + nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size" + eng "Out of sort memory; increase server sort buffer size" + jps "Out of sort memory. sort buffer size Ȃ悤ł.", + est "Mlu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit" + fre "Manque de mmoire pour le tri. Augmentez-la." + ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhht werden" + greek " . sort buffer size (demon)" + hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet" + ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone" + jpn "Out of sort memory. sort buffer size ­ʤ褦Ǥ." + kor "Out of sort memory. daemon sort buffer ũ⸦ Ű" + nor "Ikke mer sorteringsminne. k sorteringsminnet (sort buffer size) for tjenesten" + norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten" + pol "Zbyt mao pamici dla sortowania. Zwiksz wielko? bufora demona dla sortowania" + por "Sem memria para ordenao. Aumente tamanho do 'buffer' de ordenao" + rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)" + rus " . " + serbian "Nema memorije za sortiranje. Poveajte veliinu sort buffer-a MySQL server-u" + slo "Mlo pamti pre triedenie, zvte vekos triediaceho bufferu" + spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion" + swe "Sorteringsbufferten rcker inte till. Kontrollera startparametrarna" + ukr " 'Ԧ . ¦ ͦ " ER_UNEXPECTED_EOF - cze "Neo-Bekvan konec souboru pi ten '%-.64s' (chybov kd: %d)" - dan "Uventet afslutning p fil (eof) ved lsning af filen '%-.64s' (Fejlkode: %d)" - nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)" - eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)" - jps "'%-.64s' t@Cǂݍݒ EOF \ʏŌ܂. (errno: %d)", - est "Ootamatu faililpumrgend faili '%-.64s' lugemisel (veakood: %d)" - fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)" - ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)" - greek " , '%-.64s' ( : %d)" - hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)" - ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)" - jpn "'%-.64s' եɤ߹ EOF ͽ̽Ǹޤ. (errno: %d)" - kor "'%-.64s' ȭ д ߸ eof ߰ (ȣ: %d)" - nor "Uventet slutt p fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)" - norwegian-ny "Uventa slutt p fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)" - pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod bdu: %d)" - por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)" - rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)" - rus " '%-.64s' (: %d)" - serbian "Neoekivani kraj pri itanju file-a '%-.64s' (errno: %d)" - slo "Neoakvan koniec sboru pri tan '%-.64s' (chybov kd: %d)" - spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)" - swe "Ovntat filslut vid lsning frn '%-.64s' (Felkod: %d)" - ukr " ˦ '%-.64s' (: %d)" + cze "Neo-Bekvan konec souboru pi ten '%-.64s' (chybov kd: %d)" + dan "Uventet afslutning p fil (eof) ved lsning af filen '%-.64s' (Fejlkode: %d)" + nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)" + eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)" + jps "'%-.64s' t@Cǂݍݒ EOF \ʏŌ܂. (errno: %d)", + est "Ootamatu faililpumrgend faili '%-.64s' lugemisel (veakood: %d)" + fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)" + ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)" + greek " , '%-.64s' ( : %d)" + hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)" + ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' եɤ߹ EOF ͽ̽Ǹޤ. (errno: %d)" + kor "'%-.64s' ȭ д ߸ eof ߰ (ȣ: %d)" + nor "Uventet slutt p fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)" + norwegian-ny "Uventa slutt p fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)" + pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod bdu: %d)" + por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)" + rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)" + rus " '%-.64s' (: %d)" + serbian "Neoekivani kraj pri itanju file-a '%-.64s' (errno: %d)" + slo "Neoakvan koniec sboru pri tan '%-.64s' (chybov kd: %d)" + spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)" + swe "Ovntat filslut vid lsning frn '%-.64s' (Felkod: %d)" + ukr " ˦ '%-.64s' (: %d)" ER_CON_COUNT_ERROR 08004 - cze "P-Bli mnoho spojen" - dan "For mange forbindelser (connections)" - nla "Te veel verbindingen" - eng "Too many connections" - jps "ڑ܂", - est "Liiga palju samaaegseid hendusi" - fre "Trop de connections" - ger "Zu viele Verbindungen" - greek " ..." - hun "Tul sok kapcsolat" - ita "Troppe connessioni" - jpn "³¿ޤ" - kor "ʹ ... max_connection Űÿ..." - nor "For mange tilkoblinger (connections)" - norwegian-ny "For mange tilkoplingar (connections)" - pol "Zbyt wiele po?cze" - por "Excesso de conexes" - rum "Prea multe conectiuni" - rus " " - serbian "Previe konekcija" - slo "Prli mnoho spojen" - spa "Demasiadas conexiones" - swe "Fr mnga anslutningar" - ukr " '" + cze "P-Bli mnoho spojen" + dan "For mange forbindelser (connections)" + nla "Te veel verbindingen" + eng "Too many connections" + jps "ڑ܂", + est "Liiga palju samaaegseid hendusi" + fre "Trop de connections" + ger "Zu viele Verbindungen" + greek " ..." + hun "Tul sok kapcsolat" + ita "Troppe connessioni" + jpn "³¿ޤ" + kor "ʹ ... max_connection Űÿ..." + nor "For mange tilkoblinger (connections)" + norwegian-ny "For mange tilkoplingar (connections)" + pol "Zbyt wiele po?cze" + por "Excesso de conexes" + rum "Prea multe conectiuni" + rus " " + serbian "Previe konekcija" + slo "Prli mnoho spojen" + spa "Demasiadas conexiones" + swe "Fr mnga anslutningar" + ukr " '" ER_OUT_OF_RESOURCES - cze "M-Blo prostoru/pamti pro thread" - dan "Udget for trde/hukommelse" - nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen" - eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space" - jps "Out of memory; mysqld ̑̃vZX[SĎgĂ邩mFĂ. [g؂ĂȂꍇA'ulimit' ݒ肵 mysqld ̃[gpEʂ𑽂邩Aswap space 𑝂₵Ă݂Ă", - est "Mlu sai otsa. Vimalik, et aitab swap-i lisamine vi ksu 'ulimit' abil MySQL-le rohkema mlu kasutamise lubamine" - fre "Manque de 'threads'/mmoire" - ger "Kein Speicher mehr vorhanden. Prfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafr sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten" - greek " (Out of thread space/memory)" - hun "Elfogyott a thread-memoria" - ita "Fine dello spazio/memoria per i thread" - jpn "Out of memory; mysqld ¾Υץ꡼ƻȤäƤ뤫ǧƤ. ꡼ȤڤäƤʤ硢'ulimit' ꤷ mysqld Υ꡼Ѹ³̤¿뤫swap space 䤷ƤߤƤ" - kor "Out of memory; mysqld Ǵٸ μ 밡 ޸𸮸 äũϽÿ. ׷ ʴٸ ulimit ̿Ͽ ޸𸮸 ֵ ϰų ̽ Űÿ" - nor "Tomt for trd plass/minne" - norwegian-ny "Tomt for trd plass/minne" - pol "Zbyt mao miejsca/pamici dla w?tku" - por "Sem memria. Verifique se o mysqld ou algum outro processo est usando toda memria disponvel. Se no, voc pode ter que usar 'ulimit' para permitir ao mysqld usar mais memria ou voc pode adicionar mais rea de 'swap'" - rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)" - rus " ; , mysqld - . , ulimit, mysqld , " - serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi vie memorije ili probajte da dodate vie swap memorije)" - slo "Mlo miesta-pamti pre vlkno" - spa "Memoria/espacio de tranpaso insuficiente" - swe "Fick slut p minnet. Kontrollera om mysqld eller ngon annan process anvnder allt tillgngligt minne. Om inte, frsk anvnda 'ulimit' eller allokera mera swap" - ukr " 'Ԧ; צ mysqld ˦ ۦ '. Φ, 'ulimit', mysqld ¦ 'Ԧ ¦ ͦ Ц " + cze "M-Blo prostoru/pamti pro thread" + dan "Udget for trde/hukommelse" + nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen" + eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space" + jps "Out of memory; mysqld ̑̃vZX[SĎgĂ邩mFĂ. [g؂ĂȂꍇA'ulimit' ݒ肵 mysqld ̃[gpEʂ𑽂邩Aswap space 𑝂₵Ă݂Ă", + est "Mlu sai otsa. Vimalik, et aitab swap-i lisamine vi ksu 'ulimit' abil MySQL-le rohkema mlu kasutamise lubamine" + fre "Manque de 'threads'/mmoire" + ger "Kein Speicher mehr vorhanden. Prfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafr sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten" + greek " (Out of thread space/memory)" + hun "Elfogyott a thread-memoria" + ita "Fine dello spazio/memoria per i thread" + jpn "Out of memory; mysqld ¾Υץ꡼ƻȤäƤ뤫ǧƤ. ꡼ȤڤäƤʤ硢'ulimit' ꤷ mysqld Υ꡼Ѹ³̤¿뤫swap space 䤷ƤߤƤ" + kor "Out of memory; mysqld Ǵٸ μ 밡 ޸𸮸 äũϽÿ. ׷ ʴٸ ulimit ̿Ͽ ޸𸮸 ֵ ϰų ̽ Űÿ" + nor "Tomt for trd plass/minne" + norwegian-ny "Tomt for trd plass/minne" + pol "Zbyt mao miejsca/pamici dla w?tku" + por "Sem memria. Verifique se o mysqld ou algum outro processo est usando toda memria disponvel. Se no, voc pode ter que usar 'ulimit' para permitir ao mysqld usar mais memria ou voc pode adicionar mais rea de 'swap'" + rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)" + rus " ; , mysqld - . , ulimit, mysqld , " + serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi vie memorije ili probajte da dodate vie swap memorije)" + slo "Mlo miesta-pamti pre vlkno" + spa "Memoria/espacio de tranpaso insuficiente" + swe "Fick slut p minnet. Kontrollera om mysqld eller ngon annan process anvnder allt tillgngligt minne. Om inte, frsk anvnda 'ulimit' eller allokera mera swap" + ukr " 'Ԧ; צ mysqld ˦ ۦ '. Φ, 'ulimit', mysqld ¦ 'Ԧ ¦ ͦ Ц " ER_BAD_HOST_ERROR 08S01 - cze "Nemohu zjistit jm-Bno stroje pro Vai adresu" - dan "Kan ikke f vrtsnavn for din adresse" - nla "Kan de hostname niet krijgen van uw adres" - eng "Can't get hostname for your address" - jps " address hostname ܂.", - est "Ei suuda lahendada IP aadressi masina nimeks" - fre "Ne peut obtenir de hostname pour votre adresse" - ger "Kann Hostnamen fr diese Adresse nicht erhalten" - greek " hostname address " - hun "A gepnev nem allapithato meg a cimbol" - ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)" - jpn " address hostname ޤ." - kor " ǻ ȣƮ̸ ϴ." - nor "Kan ikke f tak i vertsnavn for din adresse" - norwegian-ny "Kan ikkje f tak i vertsnavn for di adresse" - pol "Nie mona otrzyma nazwy hosta dla twojego adresu" - por "No pode obter nome do 'host' para seu endereo" - rum "Nu pot sa obtin hostname-ul adresei tale" - rus " " - serbian "Ne mogu da dobijem ime host-a za vau IP adresu" - slo "Nemem zisti meno hostitea pre vau adresu" - spa "No puedo obtener el nombre de maquina de tu direccion" - swe "Kan inte hitta 'hostname' fr din adress" - ukr " ' ϧ " + cze "Nemohu zjistit jm-Bno stroje pro Vai adresu" + dan "Kan ikke f vrtsnavn for din adresse" + nla "Kan de hostname niet krijgen van uw adres" + eng "Can't get hostname for your address" + jps " address hostname ܂.", + est "Ei suuda lahendada IP aadressi masina nimeks" + fre "Ne peut obtenir de hostname pour votre adresse" + ger "Kann Hostnamen fr diese Adresse nicht erhalten" + greek " hostname address " + hun "A gepnev nem allapithato meg a cimbol" + ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)" + jpn " address hostname ޤ." + kor " ǻ ȣƮ̸ ϴ." + nor "Kan ikke f tak i vertsnavn for din adresse" + norwegian-ny "Kan ikkje f tak i vertsnavn for di adresse" + pol "Nie mona otrzyma nazwy hosta dla twojego adresu" + por "No pode obter nome do 'host' para seu endereo" + rum "Nu pot sa obtin hostname-ul adresei tale" + rus " " + serbian "Ne mogu da dobijem ime host-a za vau IP adresu" + slo "Nemem zisti meno hostitea pre vau adresu" + spa "No puedo obtener el nombre de maquina de tu direccion" + swe "Kan inte hitta 'hostname' fr din adress" + ukr " ' ϧ " ER_HANDSHAKE_ERROR 08S01 - cze "Chyba p-Bi ustavovn spojen" - dan "Forkert hndtryk (handshake)" - nla "Verkeerde handshake" - eng "Bad handshake" - est "Vr handshake" - fre "Mauvais 'handshake'" - ger "Ungltiger Handshake" - greek " (handshake) " - hun "A kapcsolatfelvetel nem sikerult (Bad handshake)" - ita "Negoziazione impossibile" - nor "Feil hndtrykk (handshake)" - norwegian-ny "Feil handtrykk (handshake)" - pol "Zy uchwyt(handshake)" - por "Negociao de acesso falhou" - rum "Prost inceput de conectie (bad handshake)" - rus " " - serbian "Lo poetak komunikacije (handshake)" - slo "Chyba pri nadvzovan spojenia" - spa "Protocolo erroneo" - swe "Fel vid initiering av kommunikationen med klienten" - ukr "צ '" + cze "Chyba p-Bi ustavovn spojen" + dan "Forkert hndtryk (handshake)" + nla "Verkeerde handshake" + eng "Bad handshake" + est "Vr handshake" + fre "Mauvais 'handshake'" + ger "Ungltiger Handshake" + greek " (handshake) " + hun "A kapcsolatfelvetel nem sikerult (Bad handshake)" + ita "Negoziazione impossibile" + nor "Feil hndtrykk (handshake)" + norwegian-ny "Feil handtrykk (handshake)" + pol "Zy uchwyt(handshake)" + por "Negociao de acesso falhou" + rum "Prost inceput de conectie (bad handshake)" + rus " " + serbian "Lo poetak komunikacije (handshake)" + slo "Chyba pri nadvzovan spojenia" + spa "Protocolo erroneo" + swe "Fel vid initiering av kommunikationen med klienten" + ukr "צ '" ER_DBACCESS_DENIED_ERROR 42000 - cze "P-Bstup pro uivatele '%-.32s'@'%-.64s' k databzi '%-.64s' nen povolen" - dan "Adgang ngtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'" - nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'" - eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'" - jps "[U[ '%-.32s'@'%-.64s' '%-.64s' f[^x[Xւ̃ANZXۂ܂", - est "Ligips keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'" - fre "Accs refus pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'" - ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung fr Datenbank '%-.64s'" - greek " : '%-.32s'@'%-.64s' '%-.64s'" - hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz." - ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'" - jpn "桼 '%-.32s'@'%-.64s' '%-.64s' ǡ١ؤΥݤޤ" - kor "'%-.32s'@'%-.64s' ڴ '%-.64s' Ÿ̽ ź Ǿϴ." - nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet" - norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta" - por "Acesso negado para o usurio '%-.32s'@'%-.64s' ao banco de dados '%-.64s'" - rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'" - rus " '%-.32s'@'%-.64s' '%-.64s' " - serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'" - slo "Zakzan prstup pre uvatea: '%-.32s'@'%-.64s' k databzi '%-.64s'" - spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'" - swe "Anvndare '%-.32s'@'%-.64s' r ej berttigad att anvnda databasen %-.64s" - ukr " : '%-.32s'@'%-.64s' '%-.64s'" + cze "P-Bstup pro uivatele '%-.32s'@'%-.64s' k databzi '%-.64s' nen povolen" + dan "Adgang ngtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'" + nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'" + eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'" + jps "[U[ '%-.32s'@'%-.64s' '%-.64s' f[^x[Xւ̃ANZXۂ܂", + est "Ligips keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'" + fre "Accs refus pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung fr Datenbank '%-.64s'" + greek " : '%-.32s'@'%-.64s' '%-.64s'" + hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz." + ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'" + jpn "桼 '%-.32s'@'%-.64s' '%-.64s' ǡ١ؤΥݤޤ" + kor "'%-.32s'@'%-.64s' ڴ '%-.64s' Ÿ̽ ź Ǿϴ." + nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet" + norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta" + por "Acesso negado para o usurio '%-.32s'@'%-.64s' ao banco de dados '%-.64s'" + rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'" + rus " '%-.32s'@'%-.64s' '%-.64s' " + serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'" + slo "Zakzan prstup pre uvatea: '%-.32s'@'%-.64s' k databzi '%-.64s'" + spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'" + swe "Anvndare '%-.32s'@'%-.64s' r ej berttigad att anvnda databasen %-.64s" + ukr " : '%-.32s'@'%-.64s' '%-.64s'" ER_ACCESS_DENIED_ERROR 28000 - cze "P-Bstup pro uivatele '%-.32s'@'%-.64s' (s heslem %s)" - dan "Adgang ngtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)" - nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)" - eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)" - jps "[U[ '%-.32s'@'%-.64s' ۂ܂.uUsing password: %s)", - est "Ligips keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)" - fre "Accs refus pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)" - ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)" - greek " : '%-.32s'@'%-.64s' ( password: %s)" - hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" - ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)" - jpn "桼 '%-.32s'@'%-.64s' ݤޤ.uUsing password: %s)" - kor "'%-.32s'@'%-.64s' ڴ ź Ǿϴ. (using password: %s)" - nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)" - norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)" - por "Acesso negado para o usurio '%-.32s'@'%-.64s' (senha usada: %s)" - rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)" - rus " '%-.32s'@'%-.64s' ( : %s)" - serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')" - slo "Zakzan prstup pre uvatea: '%-.32s'@'%-.64s' (pouitie hesla: %s)" - spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)" - swe "Anvndare '%-.32s'@'%-.64s' r ej berttigad att logga in (Anvnder lsen: %s)" - ukr " : '%-.32s'@'%-.64s' ( : %s)" + cze "P-Bstup pro uivatele '%-.32s'@'%-.64s' (s heslem %s)" + dan "Adgang ngtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)" + nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)" + eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)" + jps "[U[ '%-.32s'@'%-.64s' ۂ܂.uUsing password: %s)", + est "Ligips keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)" + fre "Accs refus pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)" + greek " : '%-.32s'@'%-.64s' ( password: %s)" + hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" + ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)" + jpn "桼 '%-.32s'@'%-.64s' ݤޤ.uUsing password: %s)" + kor "'%-.32s'@'%-.64s' ڴ ź Ǿϴ. (using password: %s)" + nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)" + norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)" + por "Acesso negado para o usurio '%-.32s'@'%-.64s' (senha usada: %s)" + rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)" + rus " '%-.32s'@'%-.64s' ( : %s)" + serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')" + slo "Zakzan prstup pre uvatea: '%-.32s'@'%-.64s' (pouitie hesla: %s)" + spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)" + swe "Anvndare '%-.32s'@'%-.64s' r ej berttigad att logga in (Anvnder lsen: %s)" + ukr " : '%-.32s'@'%-.64s' ( : %s)" ER_NO_DB_ERROR 3D000 - cze "Nebyla vybr-Bna dn databze" - dan "Ingen database valgt" - nla "Geen database geselecteerd" - eng "No database selected" - jps "f[^x[XIĂ܂.", - est "Andmebaasi ei ole valitud" - fre "Aucune base n'a t slectionne" - ger "Keine Datenbank ausgewhlt" - greek " " - hun "Nincs kivalasztott adatbazis" - ita "Nessun database selezionato" - jpn "ǡ١򤵤Ƥޤ." - kor "õ Ÿ̽ ϴ." - nor "Ingen database valgt" - norwegian-ny "Ingen database vald" - pol "Nie wybrano adnej bazy danych" - por "Nenhum banco de dados foi selecionado" - rum "Nici o baza de data nu a fost selectata inca" - rus " " - serbian "Ni jedna baza nije selektovana" - slo "Nebola vybran databza" - spa "Base de datos no seleccionada" - swe "Ingen databas i anvndning" - ukr " " + cze "Nebyla vybr-Bna dn databze" + dan "Ingen database valgt" + nla "Geen database geselecteerd" + eng "No database selected" + jps "f[^x[XIĂ܂.", + est "Andmebaasi ei ole valitud" + fre "Aucune base n'a t slectionne" + ger "Keine Datenbank ausgewhlt" + greek " " + hun "Nincs kivalasztott adatbazis" + ita "Nessun database selezionato" + jpn "ǡ١򤵤Ƥޤ." + kor "õ Ÿ̽ ϴ." + nor "Ingen database valgt" + norwegian-ny "Ingen database vald" + pol "Nie wybrano adnej bazy danych" + por "Nenhum banco de dados foi selecionado" + rum "Nici o baza de data nu a fost selectata inca" + rus " " + serbian "Ni jedna baza nije selektovana" + slo "Nebola vybran databza" + spa "Base de datos no seleccionada" + swe "Ingen databas i anvndning" + ukr " " ER_UNKNOWN_COM_ERROR 08S01 - cze "Nezn-Bm pkaz" - dan "Ukendt kommando" - nla "Onbekend commando" - eng "Unknown command" - jps "̃R}h͉H", - est "Tundmatu ksk" - fre "Commande inconnue" - ger "Unbekannter Befehl" - greek " " - hun "Ervenytelen parancs" - ita "Comando sconosciuto" - jpn "Υޥɤϲ" - kor "ɾ 𸣰ھ..." - nor "Ukjent kommando" - norwegian-ny "Ukjent kommando" - pol "Nieznana komenda" - por "Comando desconhecido" - rum "Comanda invalida" - rus " " - serbian "Nepoznata komanda" - slo "Neznmy prkaz" - spa "Comando desconocido" - swe "Oknt commando" - ukr "צ " + cze "Nezn-Bm pkaz" + dan "Ukendt kommando" + nla "Onbekend commando" + eng "Unknown command" + jps "̃R}h͉H", + est "Tundmatu ksk" + fre "Commande inconnue" + ger "Unbekannter Befehl" + greek " " + hun "Ervenytelen parancs" + ita "Comando sconosciuto" + jpn "Υޥɤϲ" + kor "ɾ 𸣰ھ..." + nor "Ukjent kommando" + norwegian-ny "Ukjent kommando" + pol "Nieznana komenda" + por "Comando desconhecido" + rum "Comanda invalida" + rus " " + serbian "Nepoznata komanda" + slo "Neznmy prkaz" + spa "Comando desconocido" + swe "Oknt commando" + ukr "צ " ER_BAD_NULL_ERROR 23000 - cze "Sloupec '%-.64s' nem-Be bt null" - dan "Kolonne '%-.64s' kan ikke vre NULL" - nla "Kolom '%-.64s' kan niet null zijn" - eng "Column '%-.64s' cannot be null" - jps "Column '%-.64s' null ɂ͂łȂ̂ł", - est "Tulp '%-.64s' ei saa omada nullvrtust" - fre "Le champ '%-.64s' ne peut tre vide (null)" - ger "Feld '%-.64s' darf nicht NULL sein" - greek " '%-.64s' (null)" - hun "A(z) '%-.64s' oszlop erteke nem lehet nulla" - ita "La colonna '%-.64s' non puo` essere nulla" - jpn "Column '%-.64s' null ˤϤǤʤΤǤ" - kor "Į '%-.64s' (Null) Ǹ ȵ˴ϴ. " - nor "Kolonne '%-.64s' kan ikke vere null" - norwegian-ny "Kolonne '%-.64s' kan ikkje vere null" - pol "Kolumna '%-.64s' nie moe by null" - por "Coluna '%-.64s' no pode ser vazia" - rum "Coloana '%-.64s' nu poate sa fie null" - rus " '%-.64s' NULL" - serbian "Kolona '%-.64s' ne moe biti NULL" - slo "Pole '%-.64s' neme by null" - spa "La columna '%-.64s' no puede ser nula" - swe "Kolumn '%-.64s' fr inte vara NULL" - ukr " '%-.64s' " + cze "Sloupec '%-.64s' nem-Be bt null" + dan "Kolonne '%-.64s' kan ikke vre NULL" + nla "Kolom '%-.64s' kan niet null zijn" + eng "Column '%-.64s' cannot be null" + jps "Column '%-.64s' null ɂ͂łȂ̂ł", + est "Tulp '%-.64s' ei saa omada nullvrtust" + fre "Le champ '%-.64s' ne peut tre vide (null)" + ger "Feld '%-.64s' darf nicht NULL sein" + greek " '%-.64s' (null)" + hun "A(z) '%-.64s' oszlop erteke nem lehet nulla" + ita "La colonna '%-.64s' non puo` essere nulla" + jpn "Column '%-.64s' null ˤϤǤʤΤǤ" + kor "Į '%-.64s' (Null) Ǹ ȵ˴ϴ. " + nor "Kolonne '%-.64s' kan ikke vere null" + norwegian-ny "Kolonne '%-.64s' kan ikkje vere null" + pol "Kolumna '%-.64s' nie moe by null" + por "Coluna '%-.64s' no pode ser vazia" + rum "Coloana '%-.64s' nu poate sa fie null" + rus " '%-.64s' NULL" + serbian "Kolona '%-.64s' ne moe biti NULL" + slo "Pole '%-.64s' neme by null" + spa "La columna '%-.64s' no puede ser nula" + swe "Kolumn '%-.64s' fr inte vara NULL" + ukr " '%-.64s' " ER_BAD_DB_ERROR 42000 - cze "Nezn-Bm databze '%-.64s'" - dan "Ukendt database '%-.64s'" - nla "Onbekende database '%-.64s'" - eng "Unknown database '%-.64s'" - jps "'%-.64s' Ȃăf[^x[X͒m܂.", - est "Tundmatu andmebaas '%-.64s'" - fre "Base '%-.64s' inconnue" - ger "Unbekannte Datenbank '%-.64s'" - greek " '%-.64s'" - hun "Ervenytelen adatbazis: '%-.64s'" - ita "Database '%-.64s' sconosciuto" - jpn "'%-.64s' ʤƥǡ١Τޤ." - kor "Ÿ̽ '%-.64s' ˼ " - nor "Ukjent database '%-.64s'" - norwegian-ny "Ukjent database '%-.64s'" - pol "Nieznana baza danych '%-.64s'" - por "Banco de dados '%-.64s' desconhecido" - rum "Baza de data invalida '%-.64s'" - rus " '%-.64s'" - serbian "Nepoznata baza '%-.64s'" - slo "Neznma databza '%-.64s'" - spa "Base de datos desconocida '%-.64s'" - swe "Oknd databas: '%-.64s'" - ukr "צ '%-.64s'" + cze "Nezn-Bm databze '%-.64s'" + dan "Ukendt database '%-.64s'" + nla "Onbekende database '%-.64s'" + eng "Unknown database '%-.64s'" + jps "'%-.64s' Ȃăf[^x[X͒m܂.", + est "Tundmatu andmebaas '%-.64s'" + fre "Base '%-.64s' inconnue" + ger "Unbekannte Datenbank '%-.64s'" + greek " '%-.64s'" + hun "Ervenytelen adatbazis: '%-.64s'" + ita "Database '%-.64s' sconosciuto" + jpn "'%-.64s' ʤƥǡ١Τޤ." + kor "Ÿ̽ '%-.64s' ˼ " + nor "Ukjent database '%-.64s'" + norwegian-ny "Ukjent database '%-.64s'" + pol "Nieznana baza danych '%-.64s'" + por "Banco de dados '%-.64s' desconhecido" + rum "Baza de data invalida '%-.64s'" + rus " '%-.64s'" + serbian "Nepoznata baza '%-.64s'" + slo "Neznma databza '%-.64s'" + spa "Base de datos desconocida '%-.64s'" + swe "Oknd databas: '%-.64s'" + ukr "צ '%-.64s'" ER_TABLE_EXISTS_ERROR 42S01 - cze "Tabulka '%-.64s' ji-B existuje" - dan "Tabellen '%-.64s' findes allerede" - nla "Tabel '%-.64s' bestaat al" - eng "Table '%-.64s' already exists" - jps "Table '%-.64s' ͊ɂ܂", - est "Tabel '%-.64s' juba eksisteerib" - fre "La table '%-.64s' existe dj" - ger "Tabelle '%-.64s' bereits vorhanden" - greek " '%-.64s' " - hun "A(z) '%-.64s' tabla mar letezik" - ita "La tabella '%-.64s' esiste gia`" - jpn "Table '%-.64s' ϴˤޤ" - kor "̺ '%-.64s' ̹ " - nor "Tabellen '%-.64s' eksisterer allerede" - norwegian-ny "Tabellen '%-.64s' eksisterar allereide" - pol "Tabela '%-.64s' ju istnieje" - por "Tabela '%-.64s' j existe" - rum "Tabela '%-.64s' exista deja" - rus " '%-.64s' " - serbian "Tabela '%-.64s' ve postoji" - slo "Tabuka '%-.64s' u existuje" - spa "La tabla '%-.64s' ya existe" - swe "Tabellen '%-.64s' finns redan" - ukr " '%-.64s' դ" + cze "Tabulka '%-.64s' ji-B existuje" + dan "Tabellen '%-.64s' findes allerede" + nla "Tabel '%-.64s' bestaat al" + eng "Table '%-.64s' already exists" + jps "Table '%-.64s' ͊ɂ܂", + est "Tabel '%-.64s' juba eksisteerib" + fre "La table '%-.64s' existe dj" + ger "Tabelle '%-.64s' bereits vorhanden" + greek " '%-.64s' " + hun "A(z) '%-.64s' tabla mar letezik" + ita "La tabella '%-.64s' esiste gia`" + jpn "Table '%-.64s' ϴˤޤ" + kor "̺ '%-.64s' ̹ " + nor "Tabellen '%-.64s' eksisterer allerede" + norwegian-ny "Tabellen '%-.64s' eksisterar allereide" + pol "Tabela '%-.64s' ju istnieje" + por "Tabela '%-.64s' j existe" + rum "Tabela '%-.64s' exista deja" + rus " '%-.64s' " + serbian "Tabela '%-.64s' ve postoji" + slo "Tabuka '%-.64s' u existuje" + spa "La tabla '%-.64s' ya existe" + swe "Tabellen '%-.64s' finns redan" + ukr " '%-.64s' դ" ER_BAD_TABLE_ERROR 42S02 - cze "Nezn-Bm tabulka '%-.100s'" - dan "Ukendt tabel '%-.100s'" - nla "Onbekende tabel '%-.100s'" - eng "Unknown table '%-.100s'" - jps "table '%-.100s' ͂܂.", - est "Tundmatu tabel '%-.100s'" - fre "Table '%-.100s' inconnue" - ger "Unbekannte Tabelle '%-.100s'" - greek " '%-.100s'" - hun "Ervenytelen tabla: '%-.100s'" - ita "Tabella '%-.100s' sconosciuta" - jpn "table '%-.100s' Ϥޤ." - kor "̺ '%-.100s' ˼ " - nor "Ukjent tabell '%-.100s'" - norwegian-ny "Ukjent tabell '%-.100s'" - pol "Nieznana tabela '%-.100s'" - por "Tabela '%-.100s' desconhecida" - rum "Tabela '%-.100s' este invalida" - rus " '%-.100s'" - serbian "Nepoznata tabela '%-.100s'" - slo "Neznma tabuka '%-.100s'" - spa "Tabla '%-.100s' desconocida" - swe "Oknd tabell '%-.100s'" - ukr "צ '%-.100s'" + cze "Nezn-Bm tabulka '%-.100s'" + dan "Ukendt tabel '%-.100s'" + nla "Onbekende tabel '%-.100s'" + eng "Unknown table '%-.100s'" + jps "table '%-.100s' ͂܂.", + est "Tundmatu tabel '%-.100s'" + fre "Table '%-.100s' inconnue" + ger "Unbekannte Tabelle '%-.100s'" + greek " '%-.100s'" + hun "Ervenytelen tabla: '%-.100s'" + ita "Tabella '%-.100s' sconosciuta" + jpn "table '%-.100s' Ϥޤ." + kor "̺ '%-.100s' ˼ " + nor "Ukjent tabell '%-.100s'" + norwegian-ny "Ukjent tabell '%-.100s'" + pol "Nieznana tabela '%-.100s'" + por "Tabela '%-.100s' desconhecida" + rum "Tabela '%-.100s' este invalida" + rus " '%-.100s'" + serbian "Nepoznata tabela '%-.100s'" + slo "Neznma tabuka '%-.100s'" + spa "Tabla '%-.100s' desconocida" + swe "Oknd tabell '%-.100s'" + ukr "צ '%-.100s'" ER_NON_UNIQ_ERROR 23000 - cze "Sloupec '%-.64s' v %s nen-B zcela jasn" - dan "Felt: '%-.64s' i tabel %s er ikke entydigt" - nla "Kolom: '%-.64s' in %s is niet eenduidig" - eng "Column '%-.64s' in %-.64s is ambiguous" - est "Vli '%-.64s' %-.64s-s ei ole hene" - fre "Champ: '%-.64s' dans %s est ambigu" - ger "Feld '%-.64s' in %-.64s ist nicht eindeutig" - greek " : '%-.64s' %-.64s " - hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu" - ita "Colonna: '%-.64s' di %-.64s e` ambigua" - jpn "Column: '%-.64s' in %-.64s is ambiguous" - kor "Į: '%-.64s' in '%-.64s' ȣ" - nor "Felt: '%-.64s' i tabell %s er ikke entydig" - norwegian-ny "Kolonne: '%-.64s' i tabell %s er ikkje eintydig" - pol "Kolumna: '%-.64s' w %s jest dwuznaczna" - por "Coluna '%-.64s' em '%-.64s' ambgua" - rum "Coloana: '%-.64s' in %-.64s este ambigua" - rus " '%-.64s' %-.64s " - serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu" - slo "Pole: '%-.64s' v %-.64s je nejasn" - spa "La columna: '%-.64s' en %s es ambigua" - swe "Kolumn '%-.64s' i %s r inte unik" - ukr " '%-.64s' %-.64s " + cze "Sloupec '%-.64s' v %s nen-B zcela jasn" + dan "Felt: '%-.64s' i tabel %s er ikke entydigt" + nla "Kolom: '%-.64s' in %s is niet eenduidig" + eng "Column '%-.64s' in %-.64s is ambiguous" + est "Vli '%-.64s' %-.64s-s ei ole hene" + fre "Champ: '%-.64s' dans %s est ambigu" + ger "Feld '%-.64s' in %-.64s ist nicht eindeutig" + greek " : '%-.64s' %-.64s " + hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu" + ita "Colonna: '%-.64s' di %-.64s e` ambigua" + jpn "Column: '%-.64s' in %-.64s is ambiguous" + kor "Į: '%-.64s' in '%-.64s' ȣ" + nor "Felt: '%-.64s' i tabell %s er ikke entydig" + norwegian-ny "Kolonne: '%-.64s' i tabell %s er ikkje eintydig" + pol "Kolumna: '%-.64s' w %s jest dwuznaczna" + por "Coluna '%-.64s' em '%-.64s' ambgua" + rum "Coloana: '%-.64s' in %-.64s este ambigua" + rus " '%-.64s' %-.64s " + serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu" + slo "Pole: '%-.64s' v %-.64s je nejasn" + spa "La columna: '%-.64s' en %s es ambigua" + swe "Kolumn '%-.64s' i %s r inte unik" + ukr " '%-.64s' %-.64s " ER_SERVER_SHUTDOWN 08S01 - cze "Prob-Bh ukonovn prce serveru" - dan "Database nedlukning er i gang" - nla "Bezig met het stoppen van de server" - eng "Server shutdown in progress" - jps "Server shutdown ...", - est "Serveri seiskamine kib" - fre "Arrt du serveur en cours" - ger "Der Server wird heruntergefahren" - greek " (server shutdown)" - hun "A szerver leallitasa folyamatban" - ita "Shutdown del server in corso" - jpn "Server shutdown ..." - kor "Server ˴ٿ Դϴ." - nor "Database nedkobling er i gang" - norwegian-ny "Tenar nedkopling er i gang" - pol "Trwa koczenie dziaania serwera" - por "'Shutdown' do servidor em andamento" - rum "Terminarea serverului este in desfasurare" - rus " " - serbian "Gaenje servera je u toku" - slo "Prebieha ukonovanie prce servera" - spa "Desconexion de servidor en proceso" - swe "Servern gr nu ned" - ukr "դ " + cze "Prob-Bh ukonovn prce serveru" + dan "Database nedlukning er i gang" + nla "Bezig met het stoppen van de server" + eng "Server shutdown in progress" + jps "Server shutdown ...", + est "Serveri seiskamine kib" + fre "Arrt du serveur en cours" + ger "Der Server wird heruntergefahren" + greek " (server shutdown)" + hun "A szerver leallitasa folyamatban" + ita "Shutdown del server in corso" + jpn "Server shutdown ..." + kor "Server ˴ٿ Դϴ." + nor "Database nedkobling er i gang" + norwegian-ny "Tenar nedkopling er i gang" + pol "Trwa koczenie dziaania serwera" + por "'Shutdown' do servidor em andamento" + rum "Terminarea serverului este in desfasurare" + rus " " + serbian "Gaenje servera je u toku" + slo "Prebieha ukonovanie prce servera" + spa "Desconexion de servidor en proceso" + swe "Servern gr nu ned" + ukr "դ " ER_BAD_FIELD_ERROR 42S22 S0022 - cze "Nezn-Bm sloupec '%-.64s' v %s" - dan "Ukendt kolonne '%-.64s' i tabel %s" - nla "Onbekende kolom '%-.64s' in %s" - eng "Unknown column '%-.64s' in '%-.64s'" - jps "'%-.64s' column '%-.64s' ɂ͂܂.", - est "Tundmatu tulp '%-.64s' '%-.64s'-s" - fre "Champ '%-.64s' inconnu dans %s" - ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s" - greek " '%-.64s' '%-.64s'" - hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben" - ita "Colonna sconosciuta '%-.64s' in '%-.64s'" - jpn "'%-.64s' column '%-.64s' ˤϤޤ." - kor "Unknown Į '%-.64s' in '%-.64s'" - nor "Ukjent kolonne '%-.64s' i tabell %s" - norwegian-ny "Ukjent felt '%-.64s' i tabell %s" - pol "Nieznana kolumna '%-.64s' w %s" - por "Coluna '%-.64s' desconhecida em '%-.64s'" - rum "Coloana invalida '%-.64s' in '%-.64s'" - rus " '%-.64s' '%-.64s'" - serbian "Nepoznata kolona '%-.64s' u '%-.64s'" - slo "Neznme pole '%-.64s' v '%-.64s'" - spa "La columna '%-.64s' en %s es desconocida" - swe "Oknd kolumn '%-.64s' i %s" - ukr "צ '%-.64s' '%-.64s'" + cze "Nezn-Bm sloupec '%-.64s' v %s" + dan "Ukendt kolonne '%-.64s' i tabel %s" + nla "Onbekende kolom '%-.64s' in %s" + eng "Unknown column '%-.64s' in '%-.64s'" + jps "'%-.64s' column '%-.64s' ɂ͂܂.", + est "Tundmatu tulp '%-.64s' '%-.64s'-s" + fre "Champ '%-.64s' inconnu dans %s" + ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s" + greek " '%-.64s' '%-.64s'" + hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben" + ita "Colonna sconosciuta '%-.64s' in '%-.64s'" + jpn "'%-.64s' column '%-.64s' ˤϤޤ." + kor "Unknown Į '%-.64s' in '%-.64s'" + nor "Ukjent kolonne '%-.64s' i tabell %s" + norwegian-ny "Ukjent felt '%-.64s' i tabell %s" + pol "Nieznana kolumna '%-.64s' w %s" + por "Coluna '%-.64s' desconhecida em '%-.64s'" + rum "Coloana invalida '%-.64s' in '%-.64s'" + rus " '%-.64s' '%-.64s'" + serbian "Nepoznata kolona '%-.64s' u '%-.64s'" + slo "Neznme pole '%-.64s' v '%-.64s'" + spa "La columna '%-.64s' en %s es desconocida" + swe "Oknd kolumn '%-.64s' i %s" + ukr "צ '%-.64s' '%-.64s'" ER_WRONG_FIELD_WITH_GROUP 42000 S1009 - cze "Pou-Bit '%-.64s' nebylo v group by" - dan "Brugte '%-.64s' som ikke var i group by" - nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt" - eng "'%-.64s' isn't in GROUP BY" - jps "'%-.64s' isn't in GROUP BY", - est "'%-.64s' puudub GROUP BY klauslis" - fre "'%-.64s' n'est pas dans 'group by'" - ger "'%-.64s' ist nicht in GROUP BY vorhanden" - greek " '%-.64s' group by" - hun "Used '%-.64s' with wasn't in group by" - ita "Usato '%-.64s' che non e` nel GROUP BY" - kor "'%-.64s' GROUP BYӿ " - nor "Brukte '%-.64s' som ikke var i group by" - norwegian-ny "Brukte '%-.64s' som ikkje var i group by" - pol "Uyto '%-.64s' bez umieszczenia w group by" - por "'%-.64s' no est em 'GROUP BY'" - rum "'%-.64s' nu exista in clauza GROUP BY" - rus "'%-.64s' GROUP BY" - serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'" - slo "Pouit '%-.64s' nebolo v 'group by'" - spa "Usado '%-.64s' el cual no esta group by" - swe "'%-.64s' finns inte i GROUP BY" - ukr "'%-.64s' GROUP BY" + cze "Pou-Bit '%-.64s' nebylo v group by" + dan "Brugte '%-.64s' som ikke var i group by" + nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt" + eng "'%-.64s' isn't in GROUP BY" + jps "'%-.64s' isn't in GROUP BY", + est "'%-.64s' puudub GROUP BY klauslis" + fre "'%-.64s' n'est pas dans 'group by'" + ger "'%-.64s' ist nicht in GROUP BY vorhanden" + greek " '%-.64s' group by" + hun "Used '%-.64s' with wasn't in group by" + ita "Usato '%-.64s' che non e` nel GROUP BY" + kor "'%-.64s' GROUP BYӿ " + nor "Brukte '%-.64s' som ikke var i group by" + norwegian-ny "Brukte '%-.64s' som ikkje var i group by" + pol "Uyto '%-.64s' bez umieszczenia w group by" + por "'%-.64s' no est em 'GROUP BY'" + rum "'%-.64s' nu exista in clauza GROUP BY" + rus "'%-.64s' GROUP BY" + serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'" + slo "Pouit '%-.64s' nebolo v 'group by'" + spa "Usado '%-.64s' el cual no esta group by" + swe "'%-.64s' finns inte i GROUP BY" + ukr "'%-.64s' GROUP BY" ER_WRONG_GROUP_FIELD 42000 S1009 - cze "Nemohu pou-Bt group na '%-.64s'" - dan "Kan ikke gruppere p '%-.64s'" - nla "Kan '%-.64s' niet groeperen" - eng "Can't group on '%-.64s'" - est "Ei saa grupeerida '%-.64s' jrgi" - fre "Ne peut regrouper '%-.64s'" - ger "Gruppierung ber '%-.64s' nicht mglich" - greek " (group on) '%-.64s'" - hun "A group nem hasznalhato: '%-.64s'" - ita "Impossibile raggruppare per '%-.64s'" - kor "'%-.64s' ׷ " - nor "Kan ikke gruppere p '%-.64s'" - norwegian-ny "Kan ikkje gruppere p '%-.64s'" - pol "Nie mona grupowa po '%-.64s'" - por "No pode agrupar em '%-.64s'" - rum "Nu pot sa grupez pe (group on) '%-.64s'" - rus " '%-.64s'" - serbian "Ne mogu da grupiem po '%-.64s'" - slo "Nemem poui 'group' na '%-.64s'" - spa "No puedo agrupar por '%-.64s'" - swe "Kan inte anvnda GROUP BY med '%-.64s'" - ukr " '%-.64s'" + cze "Nemohu pou-Bt group na '%-.64s'" + dan "Kan ikke gruppere p '%-.64s'" + nla "Kan '%-.64s' niet groeperen" + eng "Can't group on '%-.64s'" + est "Ei saa grupeerida '%-.64s' jrgi" + fre "Ne peut regrouper '%-.64s'" + ger "Gruppierung ber '%-.64s' nicht mglich" + greek " (group on) '%-.64s'" + hun "A group nem hasznalhato: '%-.64s'" + ita "Impossibile raggruppare per '%-.64s'" + kor "'%-.64s' ׷ " + nor "Kan ikke gruppere p '%-.64s'" + norwegian-ny "Kan ikkje gruppere p '%-.64s'" + pol "Nie mona grupowa po '%-.64s'" + por "No pode agrupar em '%-.64s'" + rum "Nu pot sa grupez pe (group on) '%-.64s'" + rus " '%-.64s'" + serbian "Ne mogu da grupiem po '%-.64s'" + slo "Nemem poui 'group' na '%-.64s'" + spa "No puedo agrupar por '%-.64s'" + swe "Kan inte anvnda GROUP BY med '%-.64s'" + ukr " '%-.64s'" ER_WRONG_SUM_SELECT 42000 S1009 - cze "P-Bkaz obsahuje zrove funkci sum a sloupce" - dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk" - nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht" - eng "Statement has sum functions and columns in same statement" - est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid" - fre "Vous demandez la fonction sum() et des champs dans la mme commande" - ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt" - greek " sum functions columns " - ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY" - kor "Statement sum ̰ Į statementԴϴ." - nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk" - norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk" - pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu" - por "Clusula contm funes de soma e colunas juntas" - rum "Comanda are functii suma si coloane in aceeasi comanda" - rus " , GROUP BY. ?" - serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme" - slo "Prkaz obsahuje zrove funkciu 'sum' a poa" - spa "El estamento tiene funciones de suma y columnas en el mismo estamento" - swe "Kommandot har bde sum functions och enkla funktioner" - ukr " ڦ Цަ æ æ" + cze "P-Bkaz obsahuje zrove funkci sum a sloupce" + dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk" + nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht" + eng "Statement has sum functions and columns in same statement" + est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid" + fre "Vous demandez la fonction sum() et des champs dans la mme commande" + ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt" + greek " sum functions columns " + ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY" + kor "Statement sum ̰ Į statementԴϴ." + nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk" + norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk" + pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu" + por "Clusula contm funes de soma e colunas juntas" + rum "Comanda are functii suma si coloane in aceeasi comanda" + rus " , GROUP BY. ?" + serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme" + slo "Prkaz obsahuje zrove funkciu 'sum' a poa" + spa "El estamento tiene funciones de suma y columnas en el mismo estamento" + swe "Kommandot har bde sum functions och enkla funktioner" + ukr " ڦ Цަ æ æ" ER_WRONG_VALUE_COUNT 21S01 - cze "Po-Bet sloupc neodpovd zadan hodnot" - dan "Kolonne tller stemmer ikke med antallet af vrdier" - nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes" - eng "Column count doesn't match value count" - est "Tulpade arv erineb vrtuste arvust" - ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte" - greek " Column count value count" - hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel" - ita "Il numero delle colonne non e` uguale al numero dei valori" - kor "Į īƮ īƮ ġ ʽϴ." - nor "Felt telling stemmer verdi telling" - norwegian-ny "Kolonne telling stemmer verdi telling" - pol "Liczba kolumn nie odpowiada liczbie warto?ci" - por "Contagem de colunas no confere com a contagem de valores" - rum "Numarul de coloane nu este acelasi cu numarul valoarei" - rus " " - serbian "Broj kolona ne odgovara broju vrednosti" - slo "Poet pol nezodpoved zadanej hodnote" - spa "La columna con count no tiene valores para contar" - swe "Antalet kolumner motsvarar inte antalet vrden" - ukr "˦ æ Ц ˦˦ " + cze "Po-Bet sloupc neodpovd zadan hodnot" + dan "Kolonne tller stemmer ikke med antallet af vrdier" + nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes" + eng "Column count doesn't match value count" + est "Tulpade arv erineb vrtuste arvust" + ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte" + greek " Column count value count" + hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel" + ita "Il numero delle colonne non e` uguale al numero dei valori" + kor "Į īƮ īƮ ġ ʽϴ." + nor "Felt telling stemmer verdi telling" + norwegian-ny "Kolonne telling stemmer verdi telling" + pol "Liczba kolumn nie odpowiada liczbie warto?ci" + por "Contagem de colunas no confere com a contagem de valores" + rum "Numarul de coloane nu este acelasi cu numarul valoarei" + rus " " + serbian "Broj kolona ne odgovara broju vrednosti" + slo "Poet pol nezodpoved zadanej hodnote" + spa "La columna con count no tiene valores para contar" + swe "Antalet kolumner motsvarar inte antalet vrden" + ukr "˦ æ Ц ˦˦ " ER_TOO_LONG_IDENT 42000 S1009 - cze "Jm-Bno identifiktoru '%-.64s' je pli dlouh" - dan "Navnet '%-.64s' er for langt" - nla "Naam voor herkenning '%-.64s' is te lang" - eng "Identifier name '%-.100s' is too long" - jps "Identifier name '%-.100s' ͒܂", - est "Identifikaatori '%-.100s' nimi on liiga pikk" - fre "Le nom de l'identificateur '%-.64s' est trop long" - ger "Name des Bezeichners '%-.100s' ist zu lang" - greek " identifier name '%-.100s' " - hun "A(z) '%-.100s' azonositonev tul hosszu." - ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" - jpn "Identifier name '%-.100s' Ĺޤ" - kor "Identifier '%-.100s' ʹ 決." - nor "Identifikator '%-.64s' er for lang" - norwegian-ny "Identifikator '%-.64s' er for lang" - pol "Nazwa identyfikatora '%-.64s' jest zbyt duga" - por "Nome identificador '%-.100s' longo demais" - rum "Numele indentificatorului '%-.100s' este prea lung" - rus " '%-.100s'" - serbian "Ime '%-.100s' je predugako" - slo "Meno identifiktora '%-.100s' je prli dlh" - spa "El nombre del identificador '%-.64s' es demasiado grande" - swe "Kolumnnamn '%-.64s' r fr lngt" - ukr "' Ʀ '%-.100s' " + cze "Jm-Bno identifiktoru '%-.64s' je pli dlouh" + dan "Navnet '%-.64s' er for langt" + nla "Naam voor herkenning '%-.64s' is te lang" + eng "Identifier name '%-.100s' is too long" + jps "Identifier name '%-.100s' ͒܂", + est "Identifikaatori '%-.100s' nimi on liiga pikk" + fre "Le nom de l'identificateur '%-.64s' est trop long" + ger "Name des Bezeichners '%-.100s' ist zu lang" + greek " identifier name '%-.100s' " + hun "A(z) '%-.100s' azonositonev tul hosszu." + ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" + jpn "Identifier name '%-.100s' Ĺޤ" + kor "Identifier '%-.100s' ʹ 決." + nor "Identifikator '%-.64s' er for lang" + norwegian-ny "Identifikator '%-.64s' er for lang" + pol "Nazwa identyfikatora '%-.64s' jest zbyt duga" + por "Nome identificador '%-.100s' longo demais" + rum "Numele indentificatorului '%-.100s' este prea lung" + rus " '%-.100s'" + serbian "Ime '%-.100s' je predugako" + slo "Meno identifiktora '%-.100s' je prli dlh" + spa "El nombre del identificador '%-.64s' es demasiado grande" + swe "Kolumnnamn '%-.64s' r fr lngt" + ukr "' Ʀ '%-.100s' " ER_DUP_FIELDNAME 42S21 S1009 - cze "Zdvojen-B jmno sloupce '%-.64s'" - dan "Feltnavnet '%-.64s' findes allerede" - nla "Dubbele kolom naam '%-.64s'" - eng "Duplicate column name '%-.64s'" - jps "'%-.64s' Ƃ column ͏dĂ܂", - est "Kattuv tulba nimi '%-.64s'" - fre "Nom du champ '%-.64s' dj utilis" - ger "Doppelter Spaltenname: '%-.64s'" - greek " column name '%-.64s'" - hun "Duplikalt oszlopazonosito: '%-.64s'" - ita "Nome colonna duplicato '%-.64s'" - jpn "'%-.64s' Ȥ column ̾ϽʣƤޤ" - kor "ߺ Į ̸: '%-.64s'" - nor "Feltnavnet '%-.64s' eksisterte fra fr" - norwegian-ny "Feltnamnet '%-.64s' eksisterte fr fr" - pol "Powtrzona nazwa kolumny '%-.64s'" - por "Nome da coluna '%-.64s' duplicado" - rum "Numele coloanei '%-.64s' e duplicat" - rus " '%-.64s'" - serbian "Duplirano ime kolone '%-.64s'" - slo "Opakovan meno poa '%-.64s'" - spa "Nombre de columna duplicado '%-.64s'" - swe "Kolumnnamn '%-.64s finns flera gnger" - ukr " ' '%-.64s'" + cze "Zdvojen-B jmno sloupce '%-.64s'" + dan "Feltnavnet '%-.64s' findes allerede" + nla "Dubbele kolom naam '%-.64s'" + eng "Duplicate column name '%-.64s'" + jps "'%-.64s' Ƃ column ͏dĂ܂", + est "Kattuv tulba nimi '%-.64s'" + fre "Nom du champ '%-.64s' dj utilis" + ger "Doppelter Spaltenname: '%-.64s'" + greek " column name '%-.64s'" + hun "Duplikalt oszlopazonosito: '%-.64s'" + ita "Nome colonna duplicato '%-.64s'" + jpn "'%-.64s' Ȥ column ̾ϽʣƤޤ" + kor "ߺ Į ̸: '%-.64s'" + nor "Feltnavnet '%-.64s' eksisterte fra fr" + norwegian-ny "Feltnamnet '%-.64s' eksisterte fr fr" + pol "Powtrzona nazwa kolumny '%-.64s'" + por "Nome da coluna '%-.64s' duplicado" + rum "Numele coloanei '%-.64s' e duplicat" + rus " '%-.64s'" + serbian "Duplirano ime kolone '%-.64s'" + slo "Opakovan meno poa '%-.64s'" + spa "Nombre de columna duplicado '%-.64s'" + swe "Kolumnnamn '%-.64s finns flera gnger" + ukr " ' '%-.64s'" ER_DUP_KEYNAME 42000 S1009 - cze "Zdvojen-B jmno kle '%-.64s'" - dan "Indeksnavnet '%-.64s' findes allerede" - nla "Dubbele zoeksleutel naam '%-.64s'" - eng "Duplicate key name '%-.64s'" - jps "'%-.64s' Ƃ key ̖O͏dĂ܂", - est "Kattuv vtme nimi '%-.64s'" - fre "Nom de clef '%-.64s' dj utilis" - ger "Doppelter Name fr Schlssel vorhanden: '%-.64s'" - greek " key name '%-.64s'" - hun "Duplikalt kulcsazonosito: '%-.64s'" - ita "Nome chiave duplicato '%-.64s'" - jpn "'%-.64s' Ȥ key ̾ϽʣƤޤ" - kor "ߺ Ű ̸ : '%-.64s'" - nor "Nkkelnavnet '%-.64s' eksisterte fra fr" - norwegian-ny "Nkkelnamnet '%-.64s' eksisterte fr fr" - pol "Powtrzony nazwa klucza '%-.64s'" - por "Nome da chave '%-.64s' duplicado" - rum "Numele cheiei '%-.64s' e duplicat" - rus " '%-.64s'" - serbian "Duplirano ime kljua '%-.64s'" - slo "Opakovan meno ka '%-.64s'" - spa "Nombre de clave duplicado '%-.64s'" - swe "Nyckelnamn '%-.64s' finns flera gnger" - ukr " ' '%-.64s'" + cze "Zdvojen-B jmno kle '%-.64s'" + dan "Indeksnavnet '%-.64s' findes allerede" + nla "Dubbele zoeksleutel naam '%-.64s'" + eng "Duplicate key name '%-.64s'" + jps "'%-.64s' Ƃ key ̖O͏dĂ܂", + est "Kattuv vtme nimi '%-.64s'" + fre "Nom de clef '%-.64s' dj utilis" + ger "Doppelter Name fr Schlssel vorhanden: '%-.64s'" + greek " key name '%-.64s'" + hun "Duplikalt kulcsazonosito: '%-.64s'" + ita "Nome chiave duplicato '%-.64s'" + jpn "'%-.64s' Ȥ key ̾ϽʣƤޤ" + kor "ߺ Ű ̸ : '%-.64s'" + nor "Nkkelnavnet '%-.64s' eksisterte fra fr" + norwegian-ny "Nkkelnamnet '%-.64s' eksisterte fr fr" + pol "Powtrzony nazwa klucza '%-.64s'" + por "Nome da chave '%-.64s' duplicado" + rum "Numele cheiei '%-.64s' e duplicat" + rus " '%-.64s'" + serbian "Duplirano ime kljua '%-.64s'" + slo "Opakovan meno ka '%-.64s'" + spa "Nombre de clave duplicado '%-.64s'" + swe "Nyckelnamn '%-.64s' finns flera gnger" + ukr " ' '%-.64s'" ER_DUP_ENTRY 23000 S1009 - cze "Zvojen-B kl '%-.64s' (slo kle '%-.64s')" - dan "Ens vrdier '%-.64s' for indeks '%-.64s'" - nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.64s'" - eng "Duplicate entry '%-.64s' for key '%-.64s'" - jps "'%-.64s' key '%-.64s' ɂďdĂ܂", - est "Kattuv vrtus '%-.64s' vtmele '%-.64s'" - fre "Duplicata du champ '%-.64s' pour la clef '%-.64s'" - ger "Doppelter Eintrag '%-.64s' fr Schlssel '%-.64s'" - greek " '%-.64s' '%-.64s'" - hun "Duplikalt bejegyzes '%-.64s' a '%-.64s' kulcs szerint." - ita "Valore duplicato '%-.64s' per la chiave '%-.64s'" - jpn "'%-.64s' key '%-.64s' ˤƽʣƤޤ" - kor "ߺ Է '%-.64s': key '%-.64s'" - nor "Like verdier '%-.64s' for nkkel '%-.64s'" - norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.64s'" - pol "Powtrzone wyst?pienie '%-.64s' dla klucza '%-.64s'" - por "Entrada '%-.64s' duplicada para a chave '%-.64s'" - rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.64s'" - rus " '%-.64s' '%-.64s'" - serbian "Dupliran unos '%-.64s' za klju '%-.64s'" - slo "Opakovan k '%-.64s' (slo ka '%-.64s')" - spa "Entrada duplicada '%-.64s' para la clave '%-.64s'" - swe "Dubbel nyckel '%-.64s' fr nyckel '%-.64s'" - ukr " '%-.64s' '%-.64s'" + cze "Zvojen-B kl '%-.64s' (slo kle '%-.64s')" + dan "Ens vrdier '%-.64s' for indeks '%-.64s'" + nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.64s'" + eng "Duplicate entry '%-.64s' for key '%-.64s'" + jps "'%-.64s' key '%-.64s' ɂďdĂ܂", + est "Kattuv vrtus '%-.64s' vtmele '%-.64s'" + fre "Duplicata du champ '%-.64s' pour la clef '%-.64s'" + ger "Doppelter Eintrag '%-.64s' fr Schlssel '%-.64s'" + greek " '%-.64s' '%-.64s'" + hun "Duplikalt bejegyzes '%-.64s' a '%-.64s' kulcs szerint." + ita "Valore duplicato '%-.64s' per la chiave '%-.64s'" + jpn "'%-.64s' key '%-.64s' ˤƽʣƤޤ" + kor "ߺ Է '%-.64s': key '%-.64s'" + nor "Like verdier '%-.64s' for nkkel '%-.64s'" + norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.64s'" + pol "Powtrzone wyst?pienie '%-.64s' dla klucza '%-.64s'" + por "Entrada '%-.64s' duplicada para a chave '%-.64s'" + rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.64s'" + rus " '%-.64s' '%-.64s'" + serbian "Dupliran unos '%-.64s' za klju '%-.64s'" + slo "Opakovan k '%-.64s' (slo ka '%-.64s')" + spa "Entrada duplicada '%-.64s' para la clave '%-.64s'" + swe "Dubbel nyckel '%-.64s' fr nyckel '%-.64s'" + ukr " '%-.64s' '%-.64s'" ER_WRONG_FIELD_SPEC 42000 S1009 - cze "Chybn-B specifikace sloupce '%-.64s'" - dan "Forkert kolonnespecifikaton for felt '%-.64s'" - nla "Verkeerde kolom specificatie voor kolom '%-.64s'" - eng "Incorrect column specifier for column '%-.64s'" - est "Vigane tulba kirjeldus tulbale '%-.64s'" - fre "Mauvais paramtre de champ pour le champ '%-.64s'" - ger "Falsche Spezifikation fr Feld '%-.64s'" - greek " column specifier '%-.64s'" - hun "Rossz oszlopazonosito: '%-.64s'" - ita "Specifica errata per la colonna '%-.64s'" - kor "Į '%-.64s' Ȯ Į " - nor "Feil kolonne spesifikator for felt '%-.64s'" - norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'" - pol "Bdna specyfikacja kolumny dla kolumny '%-.64s'" - por "Especificador de coluna incorreto para a coluna '%-.64s'" - rum "Specificandul coloanei '%-.64s' este incorect" - rus " '%-.64s'" - serbian "Pogrean naziv kolone za kolonu '%-.64s'" - slo "Chyba v pecifikcii poa '%-.64s'" - spa "Especificador de columna erroneo para la columna '%-.64s'" - swe "Felaktigt kolumntyp fr kolumn '%-.64s'" - ukr "צ Ʀ '%-.64s'" + cze "Chybn-B specifikace sloupce '%-.64s'" + dan "Forkert kolonnespecifikaton for felt '%-.64s'" + nla "Verkeerde kolom specificatie voor kolom '%-.64s'" + eng "Incorrect column specifier for column '%-.64s'" + est "Vigane tulba kirjeldus tulbale '%-.64s'" + fre "Mauvais paramtre de champ pour le champ '%-.64s'" + ger "Falsche Spezifikation fr Feld '%-.64s'" + greek " column specifier '%-.64s'" + hun "Rossz oszlopazonosito: '%-.64s'" + ita "Specifica errata per la colonna '%-.64s'" + kor "Į '%-.64s' Ȯ Į " + nor "Feil kolonne spesifikator for felt '%-.64s'" + norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'" + pol "Bdna specyfikacja kolumny dla kolumny '%-.64s'" + por "Especificador de coluna incorreto para a coluna '%-.64s'" + rum "Specificandul coloanei '%-.64s' este incorect" + rus " '%-.64s'" + serbian "Pogrean naziv kolone za kolonu '%-.64s'" + slo "Chyba v pecifikcii poa '%-.64s'" + spa "Especificador de columna erroneo para la columna '%-.64s'" + swe "Felaktigt kolumntyp fr kolumn '%-.64s'" + ukr "צ Ʀ '%-.64s'" ER_PARSE_ERROR 42000 - cze "%s bl-Bzko '%-.64s' na dku %d" - dan "%s nr '%-.64s' p linje %d" - nla "%s bij '%-.64s' in regel %d" - eng "%s near '%-.80s' at line %d" - jps "%s : '%-.80s' t : %d s", - est "%s '%-.80s' ligidal real %d" - fre "%s prs de '%-.64s' la ligne %d" - ger "%s bei '%-.80s' in Zeile %d" - greek "%s '%-.80s' %d" - hun "A %s a '%-.80s'-hez kozeli a %d sorban" - ita "%s vicino a '%-.80s' linea %d" - jpn "%s : '%-.80s' ն : %d " - kor "'%-.64s' ϴ. ('%-.80s' ɾ %d)" - nor "%s nr '%-.64s' p linje %d" - norwegian-ny "%s attmed '%-.64s' p line %d" - pol "%s obok '%-.64s' w linii %d" - por "%s prximo a '%-.80s' na linha %d" - rum "%s linga '%-.80s' pe linia %d" - rus "%s '%-.80s' %d" - serbian "'%s' u iskazu '%-.80s' na liniji %d" - slo "%s blzko '%-.80s' na riadku %d" - spa "%s cerca '%-.64s' en la linea %d" - swe "%s nra '%-.64s' p rad %d" - ukr "%s ¦ '%-.80s' æ %d" + cze "%s bl-Bzko '%-.64s' na dku %d" + dan "%s nr '%-.64s' p linje %d" + nla "%s bij '%-.64s' in regel %d" + eng "%s near '%-.80s' at line %d" + jps "%s : '%-.80s' t : %d s", + est "%s '%-.80s' ligidal real %d" + fre "%s prs de '%-.64s' la ligne %d" + ger "%s bei '%-.80s' in Zeile %d" + greek "%s '%-.80s' %d" + hun "A %s a '%-.80s'-hez kozeli a %d sorban" + ita "%s vicino a '%-.80s' linea %d" + jpn "%s : '%-.80s' ն : %d " + kor "'%-.64s' ϴ. ('%-.80s' ɾ %d)" + nor "%s nr '%-.64s' p linje %d" + norwegian-ny "%s attmed '%-.64s' p line %d" + pol "%s obok '%-.64s' w linii %d" + por "%s prximo a '%-.80s' na linha %d" + rum "%s linga '%-.80s' pe linia %d" + rus "%s '%-.80s' %d" + serbian "'%s' u iskazu '%-.80s' na liniji %d" + slo "%s blzko '%-.80s' na riadku %d" + spa "%s cerca '%-.64s' en la linea %d" + swe "%s nra '%-.64s' p rad %d" + ukr "%s ¦ '%-.80s' æ %d" ER_EMPTY_QUERY 42000 - cze "V-Bsledek dotazu je przdn" - dan "Foresprgsel var tom" - nla "Query was leeg" - eng "Query was empty" - jps "Query ł.", - est "Thi pring" - fre "Query est vide" - ger "Leere Abfrage" - greek " (query) " - hun "Ures lekerdezes." - ita "La query e` vuota" - jpn "Query Ǥ." - kor " ϴ." - nor "Foresprsel var tom" - norwegian-ny "Frespurnad var tom" - pol "Zapytanie byo puste" - por "Consulta (query) estava vazia" - rum "Query-ul a fost gol" - rus " " - serbian "Upit je bio prazan" - slo "Vsledok poiadavky bol przdny" - spa "La query estaba vacia" - swe "Frgan var tom" - ukr " " + cze "V-Bsledek dotazu je przdn" + dan "Foresprgsel var tom" + nla "Query was leeg" + eng "Query was empty" + jps "Query ł.", + est "Thi pring" + fre "Query est vide" + ger "Leere Abfrage" + greek " (query) " + hun "Ures lekerdezes." + ita "La query e` vuota" + jpn "Query Ǥ." + kor " ϴ." + nor "Foresprsel var tom" + norwegian-ny "Frespurnad var tom" + pol "Zapytanie byo puste" + por "Consulta (query) estava vazia" + rum "Query-ul a fost gol" + rus " " + serbian "Upit je bio prazan" + slo "Vsledok poiadavky bol przdny" + spa "La query estaba vacia" + swe "Frgan var tom" + ukr " " ER_NONUNIQ_TABLE 42000 S1009 - cze "Nejednozna-Bn tabulka/alias: '%-.64s'" - dan "Tabellen/aliaset: '%-.64s' er ikke unikt" - nla "Niet unieke waarde tabel/alias: '%-.64s'" - eng "Not unique table/alias: '%-.64s'" - jps "'%-.64s' ͈ӂ table/alias ł͂܂", - est "Ei ole unikaalne tabel/alias '%-.64s'" - fre "Table/alias: '%-.64s' non unique" - ger "Tabellenname/Alias '%-.64s' nicht eindeutig" - greek " unique table/alias: '%-.64s'" - hun "Nem egyedi tabla/alias: '%-.64s'" - ita "Tabella/alias non unico: '%-.64s'" - jpn "'%-.64s' ϰդ table/alias ̾ǤϤޤ" - kor "Unique ̺/alias: '%-.64s'" - nor "Ikke unikt tabell/alias: '%-.64s'" - norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'" - pol "Tabela/alias nie s? unikalne: '%-.64s'" - por "Tabela/alias '%-.64s' no nica" - rum "Tabela/alias: '%-.64s' nu este unic" - rus " / '%-.64s'" - serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'" - slo "Nie jednoznan tabuka/alias: '%-.64s'" - spa "Tabla/alias: '%-.64s' es no unica" - swe "Icke unikt tabell/alias: '%-.64s'" - ukr "Φ /Φ: '%-.64s'" + cze "Nejednozna-Bn tabulka/alias: '%-.64s'" + dan "Tabellen/aliaset: '%-.64s' er ikke unikt" + nla "Niet unieke waarde tabel/alias: '%-.64s'" + eng "Not unique table/alias: '%-.64s'" + jps "'%-.64s' ͈ӂ table/alias ł͂܂", + est "Ei ole unikaalne tabel/alias '%-.64s'" + fre "Table/alias: '%-.64s' non unique" + ger "Tabellenname/Alias '%-.64s' nicht eindeutig" + greek " unique table/alias: '%-.64s'" + hun "Nem egyedi tabla/alias: '%-.64s'" + ita "Tabella/alias non unico: '%-.64s'" + jpn "'%-.64s' ϰդ table/alias ̾ǤϤޤ" + kor "Unique ̺/alias: '%-.64s'" + nor "Ikke unikt tabell/alias: '%-.64s'" + norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'" + pol "Tabela/alias nie s? unikalne: '%-.64s'" + por "Tabela/alias '%-.64s' no nica" + rum "Tabela/alias: '%-.64s' nu este unic" + rus " / '%-.64s'" + serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'" + slo "Nie jednoznan tabuka/alias: '%-.64s'" + spa "Tabla/alias: '%-.64s' es no unica" + swe "Icke unikt tabell/alias: '%-.64s'" + ukr "Φ /Φ: '%-.64s'" ER_INVALID_DEFAULT 42000 S1009 - cze "Chybn-B defaultn hodnota pro '%-.64s'" - dan "Ugyldig standardvrdi for '%-.64s'" - nla "Foutieve standaard waarde voor '%-.64s'" - eng "Invalid default value for '%-.64s'" - est "Vigane vaikevrtus '%-.64s' jaoks" - fre "Valeur par dfaut invalide pour '%-.64s'" - ger "Fehlerhafter Vorgabewert (DEFAULT) fr '%-.64s'" - greek " (default value) '%-.64s'" - hun "Ervenytelen ertek: '%-.64s'" - ita "Valore di default non valido per '%-.64s'" - kor "'%-.64s' ȿ Ʈ ϼ̽ϴ." - nor "Ugyldig standardverdi for '%-.64s'" - norwegian-ny "Ugyldig standardverdi for '%-.64s'" - pol "Niewa?ciwa warto? domy?lna dla '%-.64s'" - por "Valor padro (default) invlido para '%-.64s'" - rum "Valoarea de default este invalida pentru '%-.64s'" - rus " '%-.64s'" - serbian "Loa default vrednost za '%-.64s'" - slo "Chybn implicitn hodnota pre '%-.64s'" - spa "Valor por defecto invalido para '%-.64s'" - swe "Ogiltigt DEFAULT vrde fr '%-.64s'" - ukr "צ '%-.64s'" + cze "Chybn-B defaultn hodnota pro '%-.64s'" + dan "Ugyldig standardvrdi for '%-.64s'" + nla "Foutieve standaard waarde voor '%-.64s'" + eng "Invalid default value for '%-.64s'" + est "Vigane vaikevrtus '%-.64s' jaoks" + fre "Valeur par dfaut invalide pour '%-.64s'" + ger "Fehlerhafter Vorgabewert (DEFAULT) fr '%-.64s'" + greek " (default value) '%-.64s'" + hun "Ervenytelen ertek: '%-.64s'" + ita "Valore di default non valido per '%-.64s'" + kor "'%-.64s' ȿ Ʈ ϼ̽ϴ." + nor "Ugyldig standardverdi for '%-.64s'" + norwegian-ny "Ugyldig standardverdi for '%-.64s'" + pol "Niewa?ciwa warto? domy?lna dla '%-.64s'" + por "Valor padro (default) invlido para '%-.64s'" + rum "Valoarea de default este invalida pentru '%-.64s'" + rus " '%-.64s'" + serbian "Loa default vrednost za '%-.64s'" + slo "Chybn implicitn hodnota pre '%-.64s'" + spa "Valor por defecto invalido para '%-.64s'" + swe "Ogiltigt DEFAULT vrde fr '%-.64s'" + ukr "צ '%-.64s'" ER_MULTIPLE_PRI_KEY 42000 S1009 - cze "Definov-Bno vce primrnch kl" - dan "Flere primrngler specificeret" - nla "Meerdere primaire zoeksleutels gedefinieerd" - eng "Multiple primary key defined" - jps " primary key `܂", - est "Mitut primaarset vtit ei saa olla" - fre "Plusieurs clefs primaires dfinies" - ger "Mehrere Primrschlssel (PRIMARY KEY) definiert" - greek " primary key " - hun "Tobbszoros elsodleges kulcs definialas." - ita "Definite piu` chiave primarie" - jpn "ʣ primary key ޤ" - kor "Multiple primary key ǵǾ ֽ" - nor "Fleire primrnkle spesifisert" - norwegian-ny "Fleire primrnyklar spesifisert" - pol "Zdefiniowano wiele kluczy podstawowych" - por "Definida mais de uma chave primria" - rum "Chei primare definite de mai multe ori" - rus " " - serbian "Definisani viestruki primarni kljuevi" - slo "Zadefinovanch viac primrnych kov" - spa "Multiples claves primarias definidas" - swe "Flera PRIMARY KEY anvnda" - ukr " " + cze "Definov-Bno vce primrnch kl" + dan "Flere primrngler specificeret" + nla "Meerdere primaire zoeksleutels gedefinieerd" + eng "Multiple primary key defined" + jps " primary key `܂", + est "Mitut primaarset vtit ei saa olla" + fre "Plusieurs clefs primaires dfinies" + ger "Mehrere Primrschlssel (PRIMARY KEY) definiert" + greek " primary key " + hun "Tobbszoros elsodleges kulcs definialas." + ita "Definite piu` chiave primarie" + jpn "ʣ primary key ޤ" + kor "Multiple primary key ǵǾ ֽ" + nor "Fleire primrnkle spesifisert" + norwegian-ny "Fleire primrnyklar spesifisert" + pol "Zdefiniowano wiele kluczy podstawowych" + por "Definida mais de uma chave primria" + rum "Chei primare definite de mai multe ori" + rus " " + serbian "Definisani viestruki primarni kljuevi" + slo "Zadefinovanch viac primrnych kov" + spa "Multiples claves primarias definidas" + swe "Flera PRIMARY KEY anvnda" + ukr " " ER_TOO_MANY_KEYS 42000 S1009 - cze "Zad-Bno pli mnoho kl, je povoleno nejvce %d kl" - dan "For mange ngler specificeret. Kun %d ngler m bruges" - nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan" - eng "Too many keys specified; max %d keys allowed" - jps "key ̎w肪܂. key ͍ő %d ܂łł", - est "Liiga palju vtmeid. Maksimaalselt vib olla %d vtit" - fre "Trop de clefs sont dfinies. Maximum de %d clefs allou" - ger "Zu viele Schlssel definiert. Maximal %d Schlssel erlaubt" - greek " key . %d " - hun "Tul sok kulcs. Maximum %d kulcs engedelyezett." - ita "Troppe chiavi. Sono ammesse max %d chiavi" - jpn "key λ꤬¿ޤ. key Ϻ %d ޤǤǤ" - kor "ʹ Ű ǵǾ ϴ.. ִ %d Ű " - nor "For mange nkler spesifisert. Maks %d nkler tillatt" - norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt" - pol "Okre?lono zbyt wiele kluczy. Dostpnych jest maksymalnie %d kluczy" - por "Especificadas chaves demais. O mximo permitido so %d chaves" - rum "Prea multe chei. Numarul de chei maxim este %d" - rus " . %d " - serbian "Navedeno je previe kljueva. Maksimum %d kljueva je dozvoljeno" - slo "Zadanch rli vea kov. Najviac %d kov je povolench" - spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas" - swe "Fr mnga nycklar anvnda. Man fr ha hgst %d nycklar" - ukr " ަ . ¦ %d ަ" + cze "Zad-Bno pli mnoho kl, je povoleno nejvce %d kl" + dan "For mange ngler specificeret. Kun %d ngler m bruges" + nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan" + eng "Too many keys specified; max %d keys allowed" + jps "key ̎w肪܂. key ͍ő %d ܂łł", + est "Liiga palju vtmeid. Maksimaalselt vib olla %d vtit" + fre "Trop de clefs sont dfinies. Maximum de %d clefs allou" + ger "Zu viele Schlssel definiert. Maximal %d Schlssel erlaubt" + greek " key . %d " + hun "Tul sok kulcs. Maximum %d kulcs engedelyezett." + ita "Troppe chiavi. Sono ammesse max %d chiavi" + jpn "key λ꤬¿ޤ. key Ϻ %d ޤǤǤ" + kor "ʹ Ű ǵǾ ϴ.. ִ %d Ű " + nor "For mange nkler spesifisert. Maks %d nkler tillatt" + norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt" + pol "Okre?lono zbyt wiele kluczy. Dostpnych jest maksymalnie %d kluczy" + por "Especificadas chaves demais. O mximo permitido so %d chaves" + rum "Prea multe chei. Numarul de chei maxim este %d" + rus " . %d " + serbian "Navedeno je previe kljueva. Maksimum %d kljueva je dozvoljeno" + slo "Zadanch rli vea kov. Najviac %d kov je povolench" + spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas" + swe "Fr mnga nycklar anvnda. Man fr ha hgst %d nycklar" + ukr " ަ . ¦ %d ަ" ER_TOO_MANY_KEY_PARTS 42000 S1009 - cze "Zad-Bno pli mnoho st kl, je povoleno nejvce %d st" - dan "For mange ngledele specificeret. Kun %d dele m bruges" - nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan" - eng "Too many key parts specified; max %d parts allowed" - est "Vti koosneb liiga paljudest osadest. Maksimaalselt vib olla %d osa" - fre "Trop de parties specifies dans la clef. Maximum de %d parties" - ger "Zu viele Teilschlssel definiert. Maximal %d Teilschlssel erlaubt" - greek " key parts . %d " - hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett" - ita "Troppe parti di chiave specificate. Sono ammesse max %d parti" - kor "ʹ Ű κ(parts) ǵǾ ϴ.. ִ %d κ " - nor "For mange nkkeldeler spesifisert. Maks %d deler tillatt" - norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt" - pol "Okre?lono zbyt wiele cz?ci klucza. Dostpnych jest maksymalnie %d cz?ci" - por "Especificadas partes de chave demais. O mximo permitido so %d partes" - rum "Prea multe chei. Numarul de chei maxim este %d" - rus " . %d " - serbian "Navedeno je previe delova kljua. Maksimum %d delova je dozvoljeno" - slo "Zadanch rli vea ast kov. Je povolench najviac %d ast" - spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas" - swe "Fr mnga nyckeldelar anvnda. Man fr ha hgst %d nyckeldelar" - ukr " . ¦ %d " + cze "Zad-Bno pli mnoho st kl, je povoleno nejvce %d st" + dan "For mange ngledele specificeret. Kun %d dele m bruges" + nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan" + eng "Too many key parts specified; max %d parts allowed" + est "Vti koosneb liiga paljudest osadest. Maksimaalselt vib olla %d osa" + fre "Trop de parties specifies dans la clef. Maximum de %d parties" + ger "Zu viele Teilschlssel definiert. Maximal %d Teilschlssel erlaubt" + greek " key parts . %d " + hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett" + ita "Troppe parti di chiave specificate. Sono ammesse max %d parti" + kor "ʹ Ű κ(parts) ǵǾ ϴ.. ִ %d κ " + nor "For mange nkkeldeler spesifisert. Maks %d deler tillatt" + norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt" + pol "Okre?lono zbyt wiele cz?ci klucza. Dostpnych jest maksymalnie %d cz?ci" + por "Especificadas partes de chave demais. O mximo permitido so %d partes" + rum "Prea multe chei. Numarul de chei maxim este %d" + rus " . %d " + serbian "Navedeno je previe delova kljua. Maksimum %d delova je dozvoljeno" + slo "Zadanch rli vea ast kov. Je povolench najviac %d ast" + spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas" + swe "Fr mnga nyckeldelar anvnda. Man fr ha hgst %d nyckeldelar" + ukr " . ¦ %d " ER_TOO_LONG_KEY 42000 S1009 - cze "Zadan-B kl byl pli dlouh, nejvt dlka kle je %d" - dan "Specificeret ngle var for lang. Maksimal nglelngde er %d" - nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d" - eng "Specified key was too long; max key length is %d bytes" - jps "key ܂. key ͍̒ő %d ł", - est "Vti on liiga pikk. Maksimaalne vtmepikkus on %d" - fre "La cl est trop longue. Longueur maximale: %d" - ger "Schlssel ist zu lang. Die maximale Schlssellnge betrgt %d" - greek " . %d" - hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d" - ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d" - jpn "key Ĺޤ. key ĹϺ %d Ǥ" - kor "ǵ Ű ʹ ϴ. ִ Ű ̴ %dԴϴ." - nor "Spesifisert nkkel var for lang. Maks nkkellengde er is %d" - norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d" - pol "Zdefinowany klucz jest zbyt dugi. Maksymaln? dugo?ci? klucza jest %d" - por "Chave especificada longa demais. O comprimento de chave mximo permitido %d" - rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d" - rus " . %d " - serbian "Navedeni klju je predug. Maksimalna duina kljua je %d" - slo "Zadan k je prli dlh, najvia dka ka je %d" - spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d" - swe "Fr lng nyckel. Hgsta tilltna nyckellngd r %d" - ukr " . ¦ %d Ԧ" + cze "Zadan-B kl byl pli dlouh, nejvt dlka kle je %d" + dan "Specificeret ngle var for lang. Maksimal nglelngde er %d" + nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d" + eng "Specified key was too long; max key length is %d bytes" + jps "key ܂. key ͍̒ő %d ł", + est "Vti on liiga pikk. Maksimaalne vtmepikkus on %d" + fre "La cl est trop longue. Longueur maximale: %d" + ger "Schlssel ist zu lang. Die maximale Schlssellnge betrgt %d" + greek " . %d" + hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d" + ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d" + jpn "key Ĺޤ. key ĹϺ %d Ǥ" + kor "ǵ Ű ʹ ϴ. ִ Ű ̴ %dԴϴ." + nor "Spesifisert nkkel var for lang. Maks nkkellengde er is %d" + norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d" + pol "Zdefinowany klucz jest zbyt dugi. Maksymaln? dugo?ci? klucza jest %d" + por "Chave especificada longa demais. O comprimento de chave mximo permitido %d" + rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d" + rus " . %d " + serbian "Navedeni klju je predug. Maksimalna duina kljua je %d" + slo "Zadan k je prli dlh, najvia dka ka je %d" + spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d" + swe "Fr lng nyckel. Hgsta tilltna nyckellngd r %d" + ukr " . ¦ %d Ԧ" ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 - cze "Kl-Bov sloupec '%-.64s' v tabulce neexistuje" - dan "Nglefeltet '%-.64s' eksisterer ikke i tabellen" - nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel" - eng "Key column '%-.64s' doesn't exist in table" - jps "Key column '%-.64s' e[uɂ܂.", - est "Vtme tulp '%-.64s' puudub tabelis" - fre "La cl '%-.64s' n'existe pas dans la table" - ger "In der Tabelle gibt es kein Schlsselfeld '%-.64s'" - greek " '%-.64s' " - hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban" - ita "La colonna chiave '%-.64s' non esiste nella tabella" - jpn "Key column '%-.64s' ơ֥ˤޤ." - kor "Key Į '%-.64s' ̺ ʽϴ." - nor "Nkkel felt '%-.64s' eksiterer ikke i tabellen" - norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen" - pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli" - por "Coluna chave '%-.64s' no existe na tabela" - rum "Coloana cheie '%-.64s' nu exista in tabela" - rus " '%-.64s' " - serbian "Kljuna kolona '%-.64s' ne postoji u tabeli" - slo "Kov stpec '%-.64s' v tabuke neexistuje" - spa "La columna clave '%-.64s' no existe en la tabla" - swe "Nyckelkolumn '%-.64s' finns inte" - ukr " '%-.64s' դ æ" + cze "Kl-Bov sloupec '%-.64s' v tabulce neexistuje" + dan "Nglefeltet '%-.64s' eksisterer ikke i tabellen" + nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel" + eng "Key column '%-.64s' doesn't exist in table" + jps "Key column '%-.64s' e[uɂ܂.", + est "Vtme tulp '%-.64s' puudub tabelis" + fre "La cl '%-.64s' n'existe pas dans la table" + ger "In der Tabelle gibt es kein Schlsselfeld '%-.64s'" + greek " '%-.64s' " + hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban" + ita "La colonna chiave '%-.64s' non esiste nella tabella" + jpn "Key column '%-.64s' ơ֥ˤޤ." + kor "Key Į '%-.64s' ̺ ʽϴ." + nor "Nkkel felt '%-.64s' eksiterer ikke i tabellen" + norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen" + pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli" + por "Coluna chave '%-.64s' no existe na tabela" + rum "Coloana cheie '%-.64s' nu exista in tabela" + rus " '%-.64s' " + serbian "Kljuna kolona '%-.64s' ne postoji u tabeli" + slo "Kov stpec '%-.64s' v tabuke neexistuje" + spa "La columna clave '%-.64s' no existe en la tabla" + swe "Nyckelkolumn '%-.64s' finns inte" + ukr " '%-.64s' դ æ" ER_BLOB_USED_AS_KEY 42000 S1009 - cze "Blob sloupec '%-.64s' nem-Be bt pouit jako kl" - dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks" - nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie" - eng "BLOB column '%-.64s' can't be used in key specification with the used table type" - est "BLOB-tpi tulpa '%-.64s' ei saa kasutada vtmena" - fre "Champ BLOB '%-.64s' ne peut tre utilis dans une cl" - ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlssel verwendet werden" - greek " Blob '%-.64s' (key specification)" - hun "Blob objektum '%-.64s' nem hasznalhato kulcskent" - ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave" - kor "BLOB Į '%-.64s' Ű ǿ ϴ." - nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nkler" - norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar" - pol "Kolumna typu Blob '%-.64s' nie moe by uyta w specyfikacji klucza" - por "Coluna BLOB '%-.64s' no pode ser utilizada na especificao de chave para o tipo de tabela usado" - rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit" - rus " BLOB '%-.64s' " - serbian "BLOB kolona '%-.64s' ne moe biti upotrebljena za navoenje kljua sa tipom tabele koji se trenutno koristi" - slo "Blob pole '%-.64s' neme by pouit ako k" - spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave" - swe "En BLOB '%-.64s' kan inte vara nyckel med den anvnda tabelltypen" - ukr "BLOB '%-.64s' Φ Ц æ" + cze "Blob sloupec '%-.64s' nem-Be bt pouit jako kl" + dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks" + nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie" + eng "BLOB column '%-.64s' can't be used in key specification with the used table type" + est "BLOB-tpi tulpa '%-.64s' ei saa kasutada vtmena" + fre "Champ BLOB '%-.64s' ne peut tre utilis dans une cl" + ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlssel verwendet werden" + greek " Blob '%-.64s' (key specification)" + hun "Blob objektum '%-.64s' nem hasznalhato kulcskent" + ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave" + kor "BLOB Į '%-.64s' Ű ǿ ϴ." + nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nkler" + norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar" + pol "Kolumna typu Blob '%-.64s' nie moe by uyta w specyfikacji klucza" + por "Coluna BLOB '%-.64s' no pode ser utilizada na especificao de chave para o tipo de tabela usado" + rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit" + rus " BLOB '%-.64s' " + serbian "BLOB kolona '%-.64s' ne moe biti upotrebljena za navoenje kljua sa tipom tabele koji se trenutno koristi" + slo "Blob pole '%-.64s' neme by pouit ako k" + spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave" + swe "En BLOB '%-.64s' kan inte vara nyckel med den anvnda tabelltypen" + ukr "BLOB '%-.64s' Φ Ц æ" ER_TOO_BIG_FIELDLENGTH 42000 S1009 - cze "P-Bli velk dlka sloupce '%-.64s' (nejvce %d). Pouijte BLOB" - dan "For stor feltlngde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet" - nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB" - eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead" - jps "column '%-.64s' ,mۂ column ̑傫܂. (ő %d ܂). BLOB ɎgpĂ.", - est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB vljatpi" - fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB" - ger "Feldlnge fr Feld '%-.64s' zu gro (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!" - greek " '%-.64s' (max = %d). BLOB" - hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb." - ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB." - jpn "column '%-.64s' ,ݤ column 礭¿ޤ. ( %d ޤ). BLOB 򤫤˻ѤƤ." - kor "Į '%-.64s' Į ̰ ʹ ϴ (ִ = %d). ſ BLOB ϼ." - nor "For stor nkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor" - norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor" - pol "Zbyt dua dugo? kolumny '%-.64s' (maks. = %d). W zamian uyj typu BLOB" - por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar" - rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine" - rus " '%-.64s' ( = %d). BLOB TEXT " - serbian "Previe podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje" - slo "Prli vek dka pre pole '%-.64s' (maximum = %d). Pouite BLOB" - spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar" - swe "Fr stor kolumnlngd angiven fr '%-.64s' (max= %d). Anvnd en BLOB instllet" - ukr " '%-.64s' (max = %d). BLOB" + cze "P-Bli velk dlka sloupce '%-.64s' (nejvce %d). Pouijte BLOB" + dan "For stor feltlngde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet" + nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB" + eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead" + jps "column '%-.64s' ,mۂ column ̑傫܂. (ő %d ܂). BLOB ɎgpĂ.", + est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB vljatpi" + fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB" + ger "Feldlnge fr Feld '%-.64s' zu gro (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!" + greek " '%-.64s' (max = %d). BLOB" + hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb." + ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB." + jpn "column '%-.64s' ,ݤ column 礭¿ޤ. ( %d ޤ). BLOB 򤫤˻ѤƤ." + kor "Į '%-.64s' Į ̰ ʹ ϴ (ִ = %d). ſ BLOB ϼ." + nor "For stor nkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor" + norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor" + pol "Zbyt dua dugo? kolumny '%-.64s' (maks. = %d). W zamian uyj typu BLOB" + por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar" + rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine" + rus " '%-.64s' ( = %d). BLOB TEXT " + serbian "Previe podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje" + slo "Prli vek dka pre pole '%-.64s' (maximum = %d). Pouite BLOB" + spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar" + swe "Fr stor kolumnlngd angiven fr '%-.64s' (max= %d). Anvnd en BLOB instllet" + ukr " '%-.64s' (max = %d). BLOB" ER_WRONG_AUTO_KEY 42000 S1009 - cze "M-Bete mt pouze jedno AUTO pole a to mus bt definovno jako kl" - dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal vre indekseret" - nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd." - eng "Incorrect table definition; there can be only one auto column and it must be defined as a key" - jps "e[u̒`Ⴂ܂; there can be only one auto column and it must be defined as a key", - est "Vigane tabelikirjeldus; Tabelis tohib olla ks auto_increment tpi tulp ning see peab olema defineeritud vtmena" - fre "Un seul champ automatique est permis et il doit tre index" - ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlssel definiert werden" - greek " auto field key" - hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni." - ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave" - jpn "ơ֥㤤ޤ; there can be only one auto column and it must be defined as a key" - kor "Ȯ ̺ ; ̺ ϳ auto Į ϰ Ű ǵǾ մϴ." - nor "Bare ett auto felt kan vre definert som nkkel." - norwegian-ny "Bare eitt auto felt kan vre definert som nkkel." - pol "W tabeli moe by tylko jedno pole auto i musi ono by zdefiniowane jako klucz" - por "Definio incorreta de tabela. Somente permitido um nico campo auto-incrementado e ele tem que ser definido como chave" - rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie" - rus " : , " - serbian "Pogrena definicija tabele; U tabeli moe postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljua" - slo "Mete ma iba jedno AUTO pole a to mus by definovan ako k" - spa "Puede ser solamente un campo automatico y este debe ser definido como una clave" - swe "Det fr finnas endast ett AUTO_INCREMENT-flt och detta mste vara en nyckel" - ukr "צ æ; , " + cze "M-Bete mt pouze jedno AUTO pole a to mus bt definovno jako kl" + dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal vre indekseret" + nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd." + eng "Incorrect table definition; there can be only one auto column and it must be defined as a key" + jps "e[u̒`Ⴂ܂; there can be only one auto column and it must be defined as a key", + est "Vigane tabelikirjeldus; Tabelis tohib olla ks auto_increment tpi tulp ning see peab olema defineeritud vtmena" + fre "Un seul champ automatique est permis et il doit tre index" + ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlssel definiert werden" + greek " auto field key" + hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni." + ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave" + jpn "ơ֥㤤ޤ; there can be only one auto column and it must be defined as a key" + kor "Ȯ ̺ ; ̺ ϳ auto Į ϰ Ű ǵǾ մϴ." + nor "Bare ett auto felt kan vre definert som nkkel." + norwegian-ny "Bare eitt auto felt kan vre definert som nkkel." + pol "W tabeli moe by tylko jedno pole auto i musi ono by zdefiniowane jako klucz" + por "Definio incorreta de tabela. Somente permitido um nico campo auto-incrementado e ele tem que ser definido como chave" + rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie" + rus " : , " + serbian "Pogrena definicija tabele; U tabeli moe postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljua" + slo "Mete ma iba jedno AUTO pole a to mus by definovan ako k" + spa "Puede ser solamente un campo automatico y este debe ser definido como una clave" + swe "Det fr finnas endast ett AUTO_INCREMENT-flt och detta mste vara en nyckel" + ukr "צ æ; , " ER_READY - cze "%s: p-Bipraven na spojen" - dan "%s: klar til tilslutninger" - nla "%s: klaar voor verbindingen" - eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d" - jps "%s: ", - est "%s: ootab hendusi" - fre "%s: Prt pour des connections" - ger "%-.64s: Bereit fr Verbindungen.\nVersion: '%2' Socket: '%s' Port: %d" - greek "%s: " - hun "%s: kapcsolatra kesz" - ita "%s: Pronto per le connessioni\n" - jpn "%s: λ" - kor "%s: غԴϴ" - nor "%s: klar for tilkoblinger" - norwegian-ny "%s: klar for tilkoblingar" - pol "%s: gotowe do po?czenia" - por "%s: Pronto para conexes" - rum "%s: sint gata pentru conectii" - rus "%s: .\n: '%s' : '%s' : %d %s" - serbian "%s: Spreman za konekcije\n" - slo "%s: pripraven na spojenie" - spa "%s: preparado para conexiones" - swe "%s: klar att ta emot klienter" - ukr "%s: '!" + cze "%s: p-Bipraven na spojen" + dan "%s: klar til tilslutninger" + nla "%s: klaar voor verbindingen" + eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d" + jps "%s: ", + est "%s: ootab hendusi" + fre "%s: Prt pour des connections" + ger "%-.64s: Bereit fr Verbindungen.\nVersion: '%2' Socket: '%s' Port: %d" + greek "%s: " + hun "%s: kapcsolatra kesz" + ita "%s: Pronto per le connessioni\n" + jpn "%s: λ" + kor "%s: غԴϴ" + nor "%s: klar for tilkoblinger" + norwegian-ny "%s: klar for tilkoblingar" + pol "%s: gotowe do po?czenia" + por "%s: Pronto para conexes" + rum "%s: sint gata pentru conectii" + rus "%s: .\n: '%s' : '%s' : %d %s" + serbian "%s: Spreman za konekcije\n" + slo "%s: pripraven na spojenie" + spa "%s: preparado para conexiones" + swe "%s: klar att ta emot klienter" + ukr "%s: '!" ER_NORMAL_SHUTDOWN - cze "%s: norm-Bln ukonen\n" - dan "%s: Normal nedlukning\n" - nla "%s: Normaal afgesloten \n" - eng "%s: Normal shutdown\n" - est "%s: MySQL lpetas\n" - fre "%s: Arrt normal du serveur\n" - ger "%-.64s: Normal heruntergefahren\n" - greek "%s: shutdown\n" - hun "%s: Normal leallitas\n" - ita "%s: Shutdown normale\n" - kor "%s: shutdown\n" - nor "%s: Normal avslutning\n" - norwegian-ny "%s: Normal nedkopling\n" - pol "%s: Standardowe zakoczenie dziaania\n" - por "%s: 'Shutdown' normal\n" - rum "%s: Terminare normala\n" - rus "%s: \n" - serbian "%s: Normalno gaenje\n" - slo "%s: normlne ukonenie\n" - spa "%s: Apagado normal\n" - swe "%s: Normal avslutning\n" - ukr "%s: \n" + cze "%s: norm-Bln ukonen\n" + dan "%s: Normal nedlukning\n" + nla "%s: Normaal afgesloten \n" + eng "%s: Normal shutdown\n" + est "%s: MySQL lpetas\n" + fre "%s: Arrt normal du serveur\n" + ger "%-.64s: Normal heruntergefahren\n" + greek "%s: shutdown\n" + hun "%s: Normal leallitas\n" + ita "%s: Shutdown normale\n" + kor "%s: shutdown\n" + nor "%s: Normal avslutning\n" + norwegian-ny "%s: Normal nedkopling\n" + pol "%s: Standardowe zakoczenie dziaania\n" + por "%s: 'Shutdown' normal\n" + rum "%s: Terminare normala\n" + rus "%s: \n" + serbian "%s: Normalno gaenje\n" + slo "%s: normlne ukonenie\n" + spa "%s: Apagado normal\n" + swe "%s: Normal avslutning\n" + ukr "%s: \n" ER_GOT_SIGNAL - cze "%s: p-Bijat signal %d, konm\n" - dan "%s: Fangede signal %d. Afslutter!!\n" - nla "%s: Signaal %d. Systeem breekt af!\n" - eng "%s: Got signal %d. Aborting!\n" - jps "%s: Got signal %d. f!\n", - est "%s: sain signaali %d. Lpetan!\n" - fre "%s: Reu le signal %d. Abandonne!\n" - ger "%-.64s: Signal %d erhalten. Abbruch!\n" - greek "%s: %d. !\n" - hun "%s: %d jelzes. Megszakitva!\n" - ita "%s: Ricevuto segnale %d. Interruzione!\n" - jpn "%s: Got signal %d. !\n" - kor "%s: %d ȣ . !\n" - nor "%s: Oppdaget signal %d. Avslutter!\n" - norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n" - pol "%s: Otrzymano sygna %d. Koczenie dziaania!\n" - por "%s: Obteve sinal %d. Abortando!\n" - rum "%s: Semnal %d obtinut. Aborting!\n" - rus "%s: %d. !\n" - serbian "%s: Dobio signal %d. Prekidam!\n" - slo "%s: prijat signl %d, ukonenie (Abort)!\n" - spa "%s: Recibiendo signal %d. Abortando!\n" - swe "%s: Fick signal %d. Avslutar!\n" - ukr "%s: %d. !\n" + cze "%s: p-Bijat signal %d, konm\n" + dan "%s: Fangede signal %d. Afslutter!!\n" + nla "%s: Signaal %d. Systeem breekt af!\n" + eng "%s: Got signal %d. Aborting!\n" + jps "%s: Got signal %d. f!\n", + est "%s: sain signaali %d. Lpetan!\n" + fre "%s: Reu le signal %d. Abandonne!\n" + ger "%-.64s: Signal %d erhalten. Abbruch!\n" + greek "%s: %d. !\n" + hun "%s: %d jelzes. Megszakitva!\n" + ita "%s: Ricevuto segnale %d. Interruzione!\n" + jpn "%s: Got signal %d. !\n" + kor "%s: %d ȣ . !\n" + nor "%s: Oppdaget signal %d. Avslutter!\n" + norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n" + pol "%s: Otrzymano sygna %d. Koczenie dziaania!\n" + por "%s: Obteve sinal %d. Abortando!\n" + rum "%s: Semnal %d obtinut. Aborting!\n" + rus "%s: %d. !\n" + serbian "%s: Dobio signal %d. Prekidam!\n" + slo "%s: prijat signl %d, ukonenie (Abort)!\n" + spa "%s: Recibiendo signal %d. Abortando!\n" + swe "%s: Fick signal %d. Avslutar!\n" + ukr "%s: %d. !\n" ER_SHUTDOWN_COMPLETE - cze "%s: ukon-Ben prce hotovo\n" - dan "%s: Server lukket\n" - nla "%s: Afsluiten afgerond\n" - eng "%s: Shutdown complete\n" - jps "%s: Shutdown \n", - est "%s: Lpp\n" - fre "%s: Arrt du serveur termin\n" - ger "%-.64s: Herunterfahren beendet\n" - greek "%s: Shutdown \n" - hun "%s: A leallitas kesz\n" - ita "%s: Shutdown completato\n" - jpn "%s: Shutdown λ\n" - kor "%s: Shutdown Ϸ!\n" - nor "%s: Avslutning komplett\n" - norwegian-ny "%s: Nedkopling komplett\n" - pol "%s: Zakoczenie dziaania wykonane\n" - por "%s: 'Shutdown' completo\n" - rum "%s: Terminare completa\n" - rus "%s: \n" - serbian "%s: Gaenje zavreno\n" - slo "%s: prca ukonen\n" - spa "%s: Apagado completado\n" - swe "%s: Avslutning klar\n" - ukr "%s: \n" + cze "%s: ukon-Ben prce hotovo\n" + dan "%s: Server lukket\n" + nla "%s: Afsluiten afgerond\n" + eng "%s: Shutdown complete\n" + jps "%s: Shutdown \n", + est "%s: Lpp\n" + fre "%s: Arrt du serveur termin\n" + ger "%-.64s: Herunterfahren beendet\n" + greek "%s: Shutdown \n" + hun "%s: A leallitas kesz\n" + ita "%s: Shutdown completato\n" + jpn "%s: Shutdown λ\n" + kor "%s: Shutdown Ϸ!\n" + nor "%s: Avslutning komplett\n" + norwegian-ny "%s: Nedkopling komplett\n" + pol "%s: Zakoczenie dziaania wykonane\n" + por "%s: 'Shutdown' completo\n" + rum "%s: Terminare completa\n" + rus "%s: \n" + serbian "%s: Gaenje zavreno\n" + slo "%s: prca ukonen\n" + spa "%s: Apagado completado\n" + swe "%s: Avslutning klar\n" + ukr "%s: \n" ER_FORCING_CLOSE 08S01 - cze "%s: n-Bsiln uzaven threadu %ld uivatele '%-.64s'\n" - dan "%s: Forceret nedlukning af trd: %ld bruger: '%-.64s'\n" - nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n" - eng "%s: Forcing close of thread %ld user: '%-.32s'\n" - jps "%s: Xbh %ld I user: '%-.64s'\n", - est "%s: Sulgen juga lime %ld kasutaja: '%-.32s'\n" - fre "%s: Arrt forc de la tche (thread) %ld utilisateur: '%-.64s'\n" - ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n" - greek "%s: thread %ld user: '%-.64s'\n" - hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n" - ita "%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n" - jpn "%s: å %ld λ user: '%-.64s'\n" - kor "%s: thread %ld user: '%-.64s'\n" - nor "%s: Ptvinget avslutning av trd %ld bruker: '%-.64s'\n" - norwegian-ny "%s: Ptvinga avslutning av trd %ld brukar: '%-.64s'\n" - pol "%s: Wymuszenie zamknicia w?tku %ld uytkownik: '%-.64s'\n" - por "%s: Forando finalizao da 'thread' %ld - usurio '%-.32s'\n" - rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n" - rus "%s: %ld : '%-.32s'\n" - serbian "%s: Usiljeno gaenje thread-a %ld koji pripada korisniku: '%-.32s'\n" - slo "%s: nsiln ukonenie vlkna %ld uvatea '%-.64s'\n" - spa "%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n" - swe "%s: Stnger av trd %ld; anvndare: '%-.64s'\n" - ukr "%s: Ǧ %ld : '%-.32s'\n" + cze "%s: n-Bsiln uzaven threadu %ld uivatele '%-.64s'\n" + dan "%s: Forceret nedlukning af trd: %ld bruger: '%-.64s'\n" + nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n" + eng "%s: Forcing close of thread %ld user: '%-.32s'\n" + jps "%s: Xbh %ld I user: '%-.64s'\n", + est "%s: Sulgen juga lime %ld kasutaja: '%-.32s'\n" + fre "%s: Arrt forc de la tche (thread) %ld utilisateur: '%-.64s'\n" + ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n" + greek "%s: thread %ld user: '%-.64s'\n" + hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n" + ita "%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n" + jpn "%s: å %ld λ user: '%-.64s'\n" + kor "%s: thread %ld user: '%-.64s'\n" + nor "%s: Ptvinget avslutning av trd %ld bruker: '%-.64s'\n" + norwegian-ny "%s: Ptvinga avslutning av trd %ld brukar: '%-.64s'\n" + pol "%s: Wymuszenie zamknicia w?tku %ld uytkownik: '%-.64s'\n" + por "%s: Forando finalizao da 'thread' %ld - usurio '%-.32s'\n" + rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n" + rus "%s: %ld : '%-.32s'\n" + serbian "%s: Usiljeno gaenje thread-a %ld koji pripada korisniku: '%-.32s'\n" + slo "%s: nsiln ukonenie vlkna %ld uvatea '%-.64s'\n" + spa "%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n" + swe "%s: Stnger av trd %ld; anvndare: '%-.64s'\n" + ukr "%s: Ǧ %ld : '%-.32s'\n" ER_IPSOCK_ERROR 08S01 - cze "Nemohu vytvo-Bit IP socket" - dan "Kan ikke oprette IP socket" - nla "Kan IP-socket niet openen" - eng "Can't create IP socket" - jps "IP socket ܂", - est "Ei suuda luua IP socketit" - fre "Ne peut crer la connection IP (socket)" - ger "Kann IP-Socket nicht erzeugen" - greek " IP socket" - hun "Az IP socket nem hozhato letre" - ita "Impossibile creare il socket IP" - jpn "IP socket ޤ" - kor "IP ߽ϴ." - nor "Kan ikke opprette IP socket" - norwegian-ny "Kan ikkje opprette IP socket" - pol "Nie mona stworzy socket'u IP" - por "No pode criar o soquete IP" - rum "Nu pot crea IP socket" - rus " IP-" - serbian "Ne mogu da kreiram IP socket" - slo "Nemem vytvori IP socket" - spa "No puedo crear IP socket" - swe "Kan inte skapa IP-socket" - ukr " IP '" + cze "Nemohu vytvo-Bit IP socket" + dan "Kan ikke oprette IP socket" + nla "Kan IP-socket niet openen" + eng "Can't create IP socket" + jps "IP socket ܂", + est "Ei suuda luua IP socketit" + fre "Ne peut crer la connection IP (socket)" + ger "Kann IP-Socket nicht erzeugen" + greek " IP socket" + hun "Az IP socket nem hozhato letre" + ita "Impossibile creare il socket IP" + jpn "IP socket ޤ" + kor "IP ߽ϴ." + nor "Kan ikke opprette IP socket" + norwegian-ny "Kan ikkje opprette IP socket" + pol "Nie mona stworzy socket'u IP" + por "No pode criar o soquete IP" + rum "Nu pot crea IP socket" + rus " IP-" + serbian "Ne mogu da kreiram IP socket" + slo "Nemem vytvori IP socket" + spa "No puedo crear IP socket" + swe "Kan inte skapa IP-socket" + ukr " IP '" ER_NO_SUCH_INDEX 42S12 S1009 - cze "Tabulka '%-.64s' nem-B index odpovdajc CREATE INDEX. Vytvote tabulku znovu" - dan "Tabellen '%-.64s' har ikke den ngle, som blev brugt i CREATE INDEX. Genopret tabellen" - nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" - eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table" - jps "Table '%-.64s' ͂̂悤 index Ă܂(CREATE INDEX sɎw肳Ă܂). e[u蒼Ă", - est "Tabelil '%-.64s' puuduvad vtmed. Loo tabel uuesti" - fre "La table '%-.64s' n'a pas d'index comme celle utilise dans CREATE INDEX. Recrez la table" - ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" - greek " '%-.64s' (index) CREATE INDEX. , " - hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" - ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" - jpn "Table '%-.64s' ϤΤ褦 index äƤޤ(CREATE INDEX ¹Ի˻ꤵƤޤ). ơ֥ľƤ" - kor "̺ '%-.64s' ε ʾҽϴ. alter ̺ ̿Ͽ ̺ ϼ..." - nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" - norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen p nytt" - pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwrz tabel" - por "Tabela '%-.64s' no possui um ndice como o usado em CREATE INDEX. Recrie a tabela" - rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela" - rus " '%-.64s' , CREATE INDEX. " - serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" - slo "Tabuka '%-.64s' nem index zodpovedajci CREATE INDEX. Vytvorte tabulku znova" - spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" - swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" - ukr " '%-.64s' , Ц CREATE INDEX. Ҧ " + cze "Tabulka '%-.64s' nem-B index odpovdajc CREATE INDEX. Vytvote tabulku znovu" + dan "Tabellen '%-.64s' har ikke den ngle, som blev brugt i CREATE INDEX. Genopret tabellen" + nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" + eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table" + jps "Table '%-.64s' ͂̂悤 index Ă܂(CREATE INDEX sɎw肳Ă܂). e[u蒼Ă", + est "Tabelil '%-.64s' puuduvad vtmed. Loo tabel uuesti" + fre "La table '%-.64s' n'a pas d'index comme celle utilise dans CREATE INDEX. Recrez la table" + ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" + greek " '%-.64s' (index) CREATE INDEX. , " + hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" + ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" + jpn "Table '%-.64s' ϤΤ褦 index äƤޤ(CREATE INDEX ¹Ի˻ꤵƤޤ). ơ֥ľƤ" + kor "̺ '%-.64s' ε ʾҽϴ. alter ̺ ̿Ͽ ̺ ϼ..." + nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" + norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen p nytt" + pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwrz tabel" + por "Tabela '%-.64s' no possui um ndice como o usado em CREATE INDEX. Recrie a tabela" + rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela" + rus " '%-.64s' , CREATE INDEX. " + serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" + slo "Tabuka '%-.64s' nem index zodpovedajci CREATE INDEX. Vytvorte tabulku znova" + spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" + swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" + ukr " '%-.64s' , Ц CREATE INDEX. Ҧ " ER_WRONG_FIELD_TERMINATORS 42000 S1009 - cze "Argument separ-Btoru poloek nebyl oekvn. Pette si manul" - dan "Felt adskiller er ikke som forventet, se dokumentationen" - nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding" - eng "Field separator argument is not what is expected; check the manual" - est "Vljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga" - fre "Sparateur de champs inconnu. Vrifiez dans le manuel" - ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen" - greek " . manual" - hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!" - ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale" - kor "ʵ μ ʽϴ. ޴ ã ." - nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen" - norwegian-ny "Felt skiljer argumenta er ikkje som venta, sj dokumentasjonen" - pol "Nie oczekiwano separatora. Sprawd podrcznik" - por "Argumento separador de campos no o esperado. Cheque o manual" - rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul" - rus " - , . " - serbian "Argument separatora polja nije ono to se oekivalo. Proverite uputstvo MySQL server-a" - slo "Argument oddeova pol nezodpoved poiadavkm. Skontrolujte v manuli" - spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual" - swe "Fltseparatorerna r vad som frvntades. Kontrollera mot manualen" - ukr " Ħ ̦. æ" + cze "Argument separ-Btoru poloek nebyl oekvn. Pette si manul" + dan "Felt adskiller er ikke som forventet, se dokumentationen" + nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding" + eng "Field separator argument is not what is expected; check the manual" + est "Vljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga" + fre "Sparateur de champs inconnu. Vrifiez dans le manuel" + ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen" + greek " . manual" + hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!" + ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale" + kor "ʵ μ ʽϴ. ޴ ã ." + nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen" + norwegian-ny "Felt skiljer argumenta er ikkje som venta, sj dokumentasjonen" + pol "Nie oczekiwano separatora. Sprawd podrcznik" + por "Argumento separador de campos no o esperado. Cheque o manual" + rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul" + rus " - , . " + serbian "Argument separatora polja nije ono to se oekivalo. Proverite uputstvo MySQL server-a" + slo "Argument oddeova pol nezodpoved poiadavkm. Skontrolujte v manuli" + spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual" + swe "Fltseparatorerna r vad som frvntades. Kontrollera mot manualen" + ukr " Ħ ̦. æ" ER_BLOBS_AND_NO_TERMINATED 42000 S1009 - cze "Nen-B mon pout pevn rowlength s BLOBem. Pouijte 'fields terminated by'." - dan "Man kan ikke bruge faste feltlngder med BLOB. Brug i stedet 'fields terminated by'." - nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'." - eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'" - est "BLOB-tpi vljade olemasolul ei saa kasutada fikseeritud vljapikkust. Vajalik 'fields terminated by' mrang." - fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'." - ger "Eine feste Zeilenlnge kann fr BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden" - greek " fixed rowlength BLOBs. 'fields terminated by'." - hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ." - ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'." - jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'." - kor "BLOBδ lowlength ϴ. 'fields terminated by' ϼ." - nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." - norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." - pol "Nie mona uy staej dugo?ci wiersza z polami typu BLOB. Uyj 'fields terminated by'." - por "Voc no pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado." - rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'." - rus " BLOB , 'fields terminated by'" - serbian "Ne moete koristiti fiksnu veliinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju." - slo "Nie je mon poui fixn dku s BLOBom. Pouite 'fields terminated by'." - spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '." - swe "Man kan inte anvnda fast radlngd med blobs. Anvnd 'fields terminated by'" - ukr " BLOB. 'fields terminated by'" + cze "Nen-B mon pout pevn rowlength s BLOBem. Pouijte 'fields terminated by'." + dan "Man kan ikke bruge faste feltlngder med BLOB. Brug i stedet 'fields terminated by'." + nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'." + eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'" + est "BLOB-tpi vljade olemasolul ei saa kasutada fikseeritud vljapikkust. Vajalik 'fields terminated by' mrang." + fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'." + ger "Eine feste Zeilenlnge kann fr BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden" + greek " fixed rowlength BLOBs. 'fields terminated by'." + hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ." + ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'." + jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'." + kor "BLOBδ lowlength ϴ. 'fields terminated by' ϼ." + nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." + norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." + pol "Nie mona uy staej dugo?ci wiersza z polami typu BLOB. Uyj 'fields terminated by'." + por "Voc no pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado." + rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'." + rus " BLOB , 'fields terminated by'" + serbian "Ne moete koristiti fiksnu veliinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju." + slo "Nie je mon poui fixn dku s BLOBom. Pouite 'fields terminated by'." + spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '." + swe "Man kan inte anvnda fast radlngd med blobs. Anvnd 'fields terminated by'" + ukr " BLOB. 'fields terminated by'" ER_TEXTFILE_NOT_READABLE - cze "Soubor '%-.64s' mus-B bt v adresi databze nebo iteln pro vechny" - dan "Filen '%-.64s' skal vre i database-folderen og kunne lses af alle" - nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." - eng "The file '%-.128s' must be in the database directory or be readable by all" - jps "t@C '%-.64s' databse directory ɂ邩SẴ[U[ǂ߂悤ɋ‚ĂȂ΂Ȃ܂.", - est "Fail '%-.64s' peab asuma andmebaasi kataloogis vi olema kigile loetav" - fre "Le fichier '%-.64s' doit tre dans le rpertoire de la base et lisible par tous" - ger "Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden oder lesbar fr alle sein" - greek " '%-.64s' database directory " - hun "A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" - ita "Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti" - jpn "ե '%-.64s' databse directory ˤ뤫ƤΥ桼ɤ褦˵ĤƤʤФʤޤ." - kor "'%-.64s' ȭϴ Ÿ̽ 丮 ϰų ο б Ͽ մϴ." - nor "Filen '%-.64s' m vre i database-katalogen for vre lesbar for alle" - norwegian-ny "Filen '%-.64s' m vre i database-katalogen for vre lesbar for alle" - pol "Plik '%-.64s' musi znajdowa sie w katalogu bazy danych lub mie prawa czytania przez wszystkich" - por "Arquivo '%-.64s' tem que estar no diretrio do banco de dados ou ter leitura possvel para todos" - rum "Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)" - rus " '%-.64s' , , " - serbian "File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajua prava pristupa" - slo "Sbor '%-.64s' mus by v adresri databzy, alebo itaten pre vetkch" - spa "El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos" - swe "Textfilen '%.64s' mste finnas i databasbiblioteket eller vara lsbar fr alla" - ukr " '%-.64s' æ Ӧ" + cze "Soubor '%-.64s' mus-B bt v adresi databze nebo iteln pro vechny" + dan "Filen '%-.64s' skal vre i database-folderen og kunne lses af alle" + nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." + eng "The file '%-.128s' must be in the database directory or be readable by all" + jps "t@C '%-.64s' databse directory ɂ邩SẴ[U[ǂ߂悤ɋ‚ĂȂ΂Ȃ܂.", + est "Fail '%-.64s' peab asuma andmebaasi kataloogis vi olema kigile loetav" + fre "Le fichier '%-.64s' doit tre dans le rpertoire de la base et lisible par tous" + ger "Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden oder lesbar fr alle sein" + greek " '%-.64s' database directory " + hun "A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" + ita "Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti" + jpn "ե '%-.64s' databse directory ˤ뤫ƤΥ桼ɤ褦˵ĤƤʤФʤޤ." + kor "'%-.64s' ȭϴ Ÿ̽ 丮 ϰų ο б Ͽ մϴ." + nor "Filen '%-.64s' m vre i database-katalogen for vre lesbar for alle" + norwegian-ny "Filen '%-.64s' m vre i database-katalogen for vre lesbar for alle" + pol "Plik '%-.64s' musi znajdowa sie w katalogu bazy danych lub mie prawa czytania przez wszystkich" + por "Arquivo '%-.64s' tem que estar no diretrio do banco de dados ou ter leitura possvel para todos" + rum "Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)" + rus " '%-.64s' , , " + serbian "File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajua prava pristupa" + slo "Sbor '%-.64s' mus by v adresri databzy, alebo itaten pre vetkch" + spa "El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos" + swe "Textfilen '%.64s' mste finnas i databasbiblioteket eller vara lsbar fr alla" + ukr " '%-.64s' æ Ӧ" ER_FILE_EXISTS_ERROR - cze "Soubor '%-.64s' ji-B existuje" - dan "Filen '%-.64s' eksisterer allerede" - nla "Het bestand '%-.64s' bestaat reeds" - eng "File '%-.200s' already exists" - jps "File '%-.64s' ͊ɑ݂܂", - est "Fail '%-.80s' juba eksisteerib" - fre "Le fichier '%-.64s' existe dj" - ger "Datei '%-.80s' bereits vorhanden" - greek " '%-.64s' " - hun "A '%-.64s' file mar letezik." - ita "Il file '%-.64s' esiste gia`" - jpn "File '%-.64s' ϴ¸ߤޤ" - kor "'%-.64s' ȭ ̹ մϴ." - nor "Filen '%-.64s' eksisterte allerede" - norwegian-ny "Filen '%-.64s' eksisterte allereide" - pol "Plik '%-.64s' ju istnieje" - por "Arquivo '%-.80s' j existe" - rum "Fisierul '%-.80s' exista deja" - rus " '%-.80s' " - serbian "File '%-.80s' ve postoji" - slo "Sbor '%-.64s' u existuje" - spa "El archivo '%-.64s' ya existe" - swe "Filen '%-.64s' existerar redan" - ukr " '%-.80s' դ" + cze "Soubor '%-.64s' ji-B existuje" + dan "Filen '%-.64s' eksisterer allerede" + nla "Het bestand '%-.64s' bestaat reeds" + eng "File '%-.200s' already exists" + jps "File '%-.64s' ͊ɑ݂܂", + est "Fail '%-.80s' juba eksisteerib" + fre "Le fichier '%-.64s' existe dj" + ger "Datei '%-.80s' bereits vorhanden" + greek " '%-.64s' " + hun "A '%-.64s' file mar letezik." + ita "Il file '%-.64s' esiste gia`" + jpn "File '%-.64s' ϴ¸ߤޤ" + kor "'%-.64s' ȭ ̹ մϴ." + nor "Filen '%-.64s' eksisterte allerede" + norwegian-ny "Filen '%-.64s' eksisterte allereide" + pol "Plik '%-.64s' ju istnieje" + por "Arquivo '%-.80s' j existe" + rum "Fisierul '%-.80s' exista deja" + rus " '%-.80s' " + serbian "File '%-.80s' ve postoji" + slo "Sbor '%-.64s' u existuje" + spa "El archivo '%-.64s' ya existe" + swe "Filen '%-.64s' existerar redan" + ukr " '%-.80s' դ" ER_LOAD_INFO - cze "Z-Bznam: %ld Vymazno: %ld Peskoeno: %ld Varovn: %ld" - dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld" - nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld" - eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld" - jps "R[h: %ld 폜: %ld Skipped: %ld Warnings: %ld", - est "Kirjeid: %ld Kustutatud: %ld Vahele jetud: %ld Hoiatusi: %ld" - fre "Enregistrements: %ld Effacs: %ld Non traits: %ld Avertissements: %ld" - ger "Datenstze: %ld Gelscht: %ld Ausgelassen: %ld Warnungen: %ld" - greek ": %ld : %ld : %ld : %ld" - hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld" - ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld" - jpn "쥳ɿ: %ld : %ld Skipped: %ld Warnings: %ld" - kor "ڵ: %ld : %ld ŵ: %ld : %ld" - nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld" - norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld tvaringar: %ld" - pol "Recordw: %ld Usunitych: %ld Pominitych: %ld Ostrzee: %ld" - por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld" - rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld" - rus ": %ld : %ld : %ld : %ld" - serbian "Slogova: %ld Izbrisano: %ld Preskoeno: %ld Upozorenja: %ld" - slo "Zznamov: %ld Zmazanch: %ld Preskoench: %ld Varovania: %ld" - spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld" - swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld" - ukr "Ӧ: %ld : %ld : %ld : %ld" + cze "Z-Bznam: %ld Vymazno: %ld Peskoeno: %ld Varovn: %ld" + dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld" + nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld" + eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld" + jps "R[h: %ld 폜: %ld Skipped: %ld Warnings: %ld", + est "Kirjeid: %ld Kustutatud: %ld Vahele jetud: %ld Hoiatusi: %ld" + fre "Enregistrements: %ld Effacs: %ld Non traits: %ld Avertissements: %ld" + ger "Datenstze: %ld Gelscht: %ld Ausgelassen: %ld Warnungen: %ld" + greek ": %ld : %ld : %ld : %ld" + hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld" + ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld" + jpn "쥳ɿ: %ld : %ld Skipped: %ld Warnings: %ld" + kor "ڵ: %ld : %ld ŵ: %ld : %ld" + nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld" + norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld tvaringar: %ld" + pol "Recordw: %ld Usunitych: %ld Pominitych: %ld Ostrzee: %ld" + por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld" + rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld" + rus ": %ld : %ld : %ld : %ld" + serbian "Slogova: %ld Izbrisano: %ld Preskoeno: %ld Upozorenja: %ld" + slo "Zznamov: %ld Zmazanch: %ld Preskoench: %ld Varovania: %ld" + spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld" + swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld" + ukr "Ӧ: %ld : %ld : %ld : %ld" ER_ALTER_INFO - cze "Z-Bznam: %ld Zdvojench: %ld" - dan "Poster: %ld Ens: %ld" - nla "Records: %ld Dubbel: %ld" - eng "Records: %ld Duplicates: %ld" - jps "R[h: %ld d: %ld", - est "Kirjeid: %ld Kattuvaid: %ld" - fre "Enregistrements: %ld Doublons: %ld" - ger "Datenstze: %ld Duplikate: %ld" - greek ": %ld : %ld" - hun "Rekordok: %ld Duplikalva: %ld" - ita "Records: %ld Duplicati: %ld" - jpn "쥳ɿ: %ld ʣ: %ld" - kor "ڵ: %ld ߺ: %ld" - nor "Poster: %ld Like: %ld" - norwegian-ny "Poster: %ld Like: %ld" - pol "Rekordw: %ld Duplikatw: %ld" - por "Registros: %ld - Duplicados: %ld" - rum "Recorduri: %ld Duplicate: %ld" - rus ": %ld : %ld" - serbian "Slogova: %ld Duplikata: %ld" - slo "Zznamov: %ld Opakovanch: %ld" - spa "Registros: %ld Duplicados: %ld" - swe "Rader: %ld Dubletter: %ld" - ukr "Ӧ: %ld ̦Ԧ: %ld" + cze "Z-Bznam: %ld Zdvojench: %ld" + dan "Poster: %ld Ens: %ld" + nla "Records: %ld Dubbel: %ld" + eng "Records: %ld Duplicates: %ld" + jps "R[h: %ld d: %ld", + est "Kirjeid: %ld Kattuvaid: %ld" + fre "Enregistrements: %ld Doublons: %ld" + ger "Datenstze: %ld Duplikate: %ld" + greek ": %ld : %ld" + hun "Rekordok: %ld Duplikalva: %ld" + ita "Records: %ld Duplicati: %ld" + jpn "쥳ɿ: %ld ʣ: %ld" + kor "ڵ: %ld ߺ: %ld" + nor "Poster: %ld Like: %ld" + norwegian-ny "Poster: %ld Like: %ld" + pol "Rekordw: %ld Duplikatw: %ld" + por "Registros: %ld - Duplicados: %ld" + rum "Recorduri: %ld Duplicate: %ld" + rus ": %ld : %ld" + serbian "Slogova: %ld Duplikata: %ld" + slo "Zznamov: %ld Opakovanch: %ld" + spa "Registros: %ld Duplicados: %ld" + swe "Rader: %ld Dubletter: %ld" + ukr "Ӧ: %ld ̦Ԧ: %ld" ER_WRONG_SUB_KEY - cze "Chybn-B podst kle -- nen to etzec nebo je del ne dlka sti kle" - dan "Forkert indeksdel. Den anvendte ngledel er ikke en streng eller lngden er strre end nglelngden" - nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel" - eng "Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys" - est "Vigane vtme osa. Kasutatud vtmeosa ei ole string tpi, mratud pikkus on pikem kui vtmeosa vi tabelihandler ei toeta seda tpi vtmeid" - fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dpasse celle dfinie dans la clef" - ger "Falscher Unterteilschlssel. Der verwendete Schlsselteil ist entweder kein String, die verwendete Lnge ist lnger als der Teilschlssel oder die Speicher-Engine untersttzt keine Unterteilschlssel" - greek " sub part key. key part string " - hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz" - ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave." - jpn "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" - kor "Ȯ Ʈ Ű. Ű Ʈ Ʈ ƴϰų Ű Ʈ ̰ ʹ ϴ." - nor "Feil delnkkel. Den brukte delnkkelen er ikke en streng eller den oppgitte lengde er lengre enn nkkel lengden" - norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden" - pol "Bdna podcz? klucza. Uyta cz? klucza nie jest acuchem lub uyta dugo? jest wiksza ni cz? klucza" - por "Sub parte da chave incorreta. A parte da chave usada no uma 'string' ou o comprimento usado maior que parte da chave ou o manipulador de tabelas no suporta sub chaves nicas" - rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii" - rus " . , , , " - serbian "Pogrean pod-klju dela kljua. Upotrebljeni deo kljua nije string, upotrebljena duina je vea od dela kljua ili handler tabela ne podrava jedinstvene pod-kljueve" - slo "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" - spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave" - swe "Felaktig delnyckel. Nyckeldelen r inte en strng eller den angivna lngden r lngre n kolumnlngden" - ukr "צ . , ڦ æ Цդ Φ " + cze "Chybn-B podst kle -- nen to etzec nebo je del ne dlka sti kle" + dan "Forkert indeksdel. Den anvendte ngledel er ikke en streng eller lngden er strre end nglelngden" + nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel" + eng "Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys" + est "Vigane vtme osa. Kasutatud vtmeosa ei ole string tpi, mratud pikkus on pikem kui vtmeosa vi tabelihandler ei toeta seda tpi vtmeid" + fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dpasse celle dfinie dans la clef" + ger "Falscher Unterteilschlssel. Der verwendete Schlsselteil ist entweder kein String, die verwendete Lnge ist lnger als der Teilschlssel oder die Speicher-Engine untersttzt keine Unterteilschlssel" + greek " sub part key. key part string " + hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz" + ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave." + jpn "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" + kor "Ȯ Ʈ Ű. Ű Ʈ Ʈ ƴϰų Ű Ʈ ̰ ʹ ϴ." + nor "Feil delnkkel. Den brukte delnkkelen er ikke en streng eller den oppgitte lengde er lengre enn nkkel lengden" + norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden" + pol "Bdna podcz? klucza. Uyta cz? klucza nie jest acuchem lub uyta dugo? jest wiksza ni cz? klucza" + por "Sub parte da chave incorreta. A parte da chave usada no uma 'string' ou o comprimento usado maior que parte da chave ou o manipulador de tabelas no suporta sub chaves nicas" + rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii" + rus " . , , , " + serbian "Pogrean pod-klju dela kljua. Upotrebljeni deo kljua nije string, upotrebljena duina je vea od dela kljua ili handler tabela ne podrava jedinstvene pod-kljueve" + slo "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" + spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave" + swe "Felaktig delnyckel. Nyckeldelen r inte en strng eller den angivna lngden r lngre n kolumnlngden" + ukr "צ . , ڦ æ Цդ Φ " ER_CANT_REMOVE_ALL_FIELDS 42000 - cze "Nen-B mon vymazat vechny poloky s ALTER TABLE. Pouijte DROP TABLE" - dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet." - nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!" - eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead" - jps "ALTER TABLE őSĂ column ͍폜ł܂. DROP TABLE gpĂ", - est "ALTER TABLE kasutades ei saa kustutada kiki tulpasid. Kustuta tabel DROP TABLE abil" - fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE" - ger "Mit ALTER TABLE knnen nicht alle Felder auf einmal gelscht werden. Dafr DROP TABLE verwenden" - greek " ALTER TABLE. DROP TABLE" - hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette" - ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE" - jpn "ALTER TABLE Ƥ column ϺǤޤ. DROP TABLE ѤƤ" - kor "ALTER TABLE δ Į ϴ. DROP TABLE ̿ϼ." - nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden." - norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor." - pol "Nie mona usun? wszystkich pl wykorzystuj?c ALTER TABLE. W zamian uyj DROP TABLE" - por "Voc no pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar" - rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb" - rus " ALTER TABLE. DROP TABLE" - serbian "Ne moete da izbriete sve kolone pomou komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako elite to da uradite" - slo "One nemem zmaza all fields with ALTER TABLE; use DROP TABLE instead" - spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo" - swe "Man kan inte radera alla flt med ALTER TABLE. Anvnd DROP TABLE istllet" - ukr " Ӧ æ ALTER TABLE. DROP TABLE" + cze "Nen-B mon vymazat vechny poloky s ALTER TABLE. Pouijte DROP TABLE" + dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet." + nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!" + eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead" + jps "ALTER TABLE őSĂ column ͍폜ł܂. DROP TABLE gpĂ", + est "ALTER TABLE kasutades ei saa kustutada kiki tulpasid. Kustuta tabel DROP TABLE abil" + fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE" + ger "Mit ALTER TABLE knnen nicht alle Felder auf einmal gelscht werden. Dafr DROP TABLE verwenden" + greek " ALTER TABLE. DROP TABLE" + hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette" + ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE" + jpn "ALTER TABLE Ƥ column ϺǤޤ. DROP TABLE ѤƤ" + kor "ALTER TABLE δ Į ϴ. DROP TABLE ̿ϼ." + nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden." + norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor." + pol "Nie mona usun? wszystkich pl wykorzystuj?c ALTER TABLE. W zamian uyj DROP TABLE" + por "Voc no pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar" + rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb" + rus " ALTER TABLE. DROP TABLE" + serbian "Ne moete da izbriete sve kolone pomou komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako elite to da uradite" + slo "One nemem zmaza all fields with ALTER TABLE; use DROP TABLE instead" + spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo" + swe "Man kan inte radera alla flt med ALTER TABLE. Anvnd DROP TABLE istllet" + ukr " Ӧ æ ALTER TABLE. DROP TABLE" ER_CANT_DROP_FIELD_OR_KEY 42000 - cze "Nemohu zru-Bit '%-.64s' (provst DROP). Zkontrolujte, zda neexistuj zznamy/kle" - dan "Kan ikke udfre DROP '%-.64s'. Undersg om feltet/nglen eksisterer." - nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." - eng "Can't DROP '%-.64s'; check that column/key exists" - jps "'%-.64s' jł܂ł; check that column/key exists", - est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/vti eksisteerib" - fre "Ne peut effacer (DROP) '%-.64s'. Vrifiez s'il existe" - ger "Kann '%-.64s' nicht lschen. Existiert die Spalte oder der Schlssel?" - greek " (DROP) '%-.64s'. / " - hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" - ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista" - jpn "'%-.64s' ˴ǤޤǤ; check that column/key exists" - kor "'%-.64s' DROP ϴ. Į̳ Ű ϴ äũϼ." - nor "Kan ikke DROP '%-.64s'. Undersk om felt/nkkel eksisterer." - norwegian-ny "Kan ikkje DROP '%-.64s'. Undersk om felt/nkkel eksisterar." - pol "Nie mona wykona operacji DROP '%-.64s'. Sprawd, czy to pole/klucz istnieje" - por "No se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe" - rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista" - rus " (DROP) '%-.64s'. / " - serbian "Ne mogu da izvrim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno klju) postoji" - slo "Nemem zrui (DROP) '%-.64s'. Skontrolujte, i neexistuj zznamy/ke" - spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe" - swe "Kan inte ta bort '%-.64s'. Kontrollera att fltet/nyckel finns" - ukr " DROP '%-.64s'. צ, / դ" + cze "Nemohu zru-Bit '%-.64s' (provst DROP). Zkontrolujte, zda neexistuj zznamy/kle" + dan "Kan ikke udfre DROP '%-.64s'. Undersg om feltet/nglen eksisterer." + nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." + eng "Can't DROP '%-.64s'; check that column/key exists" + jps "'%-.64s' jł܂ł; check that column/key exists", + est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/vti eksisteerib" + fre "Ne peut effacer (DROP) '%-.64s'. Vrifiez s'il existe" + ger "Kann '%-.64s' nicht lschen. Existiert die Spalte oder der Schlssel?" + greek " (DROP) '%-.64s'. / " + hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" + ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista" + jpn "'%-.64s' ˴ǤޤǤ; check that column/key exists" + kor "'%-.64s' DROP ϴ. Į̳ Ű ϴ äũϼ." + nor "Kan ikke DROP '%-.64s'. Undersk om felt/nkkel eksisterer." + norwegian-ny "Kan ikkje DROP '%-.64s'. Undersk om felt/nkkel eksisterar." + pol "Nie mona wykona operacji DROP '%-.64s'. Sprawd, czy to pole/klucz istnieje" + por "No se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe" + rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista" + rus " (DROP) '%-.64s'. / " + serbian "Ne mogu da izvrim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno klju) postoji" + slo "Nemem zrui (DROP) '%-.64s'. Skontrolujte, i neexistuj zznamy/ke" + spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe" + swe "Kan inte ta bort '%-.64s'. Kontrollera att fltet/nyckel finns" + ukr " DROP '%-.64s'. צ, / դ" ER_INSERT_INFO - cze "Z-Bznam: %ld Zdvojench: %ld Varovn: %ld" - dan "Poster: %ld Ens: %ld Advarsler: %ld" - nla "Records: %ld Dubbel: %ld Waarschuwing: %ld" - eng "Records: %ld Duplicates: %ld Warnings: %ld" - jps "R[h: %ld d: %ld Warnings: %ld", - est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld" - fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld" - ger "Datenstze: %ld Duplikate: %ld Warnungen: %ld" - greek ": %ld : %ld : %ld" - hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld" - ita "Records: %ld Duplicati: %ld Avvertimenti: %ld" - jpn "쥳ɿ: %ld ʣ: %ld Warnings: %ld" - kor "ڵ: %ld ߺ: %ld : %ld" - nor "Poster: %ld Like: %ld Advarsler: %ld" - norwegian-ny "Postar: %ld Like: %ld tvaringar: %ld" - pol "Rekordw: %ld Duplikatw: %ld Ostrzee: %ld" - por "Registros: %ld - Duplicados: %ld - Avisos: %ld" - rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld" - rus ": %ld : %ld : %ld" - serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld" - slo "Zznamov: %ld Opakovanch: %ld Varovania: %ld" - spa "Registros: %ld Duplicados: %ld Peligros: %ld" - swe "Rader: %ld Dubletter: %ld Varningar: %ld" - ukr "Ӧ: %ld ̦Ԧ: %ld : %ld" + cze "Z-Bznam: %ld Zdvojench: %ld Varovn: %ld" + dan "Poster: %ld Ens: %ld Advarsler: %ld" + nla "Records: %ld Dubbel: %ld Waarschuwing: %ld" + eng "Records: %ld Duplicates: %ld Warnings: %ld" + jps "R[h: %ld d: %ld Warnings: %ld", + est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld" + fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld" + ger "Datenstze: %ld Duplikate: %ld Warnungen: %ld" + greek ": %ld : %ld : %ld" + hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld" + ita "Records: %ld Duplicati: %ld Avvertimenti: %ld" + jpn "쥳ɿ: %ld ʣ: %ld Warnings: %ld" + kor "ڵ: %ld ߺ: %ld : %ld" + nor "Poster: %ld Like: %ld Advarsler: %ld" + norwegian-ny "Postar: %ld Like: %ld tvaringar: %ld" + pol "Rekordw: %ld Duplikatw: %ld Ostrzee: %ld" + por "Registros: %ld - Duplicados: %ld - Avisos: %ld" + rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld" + rus ": %ld : %ld : %ld" + serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld" + slo "Zznamov: %ld Opakovanch: %ld Varovania: %ld" + spa "Registros: %ld Duplicados: %ld Peligros: %ld" + swe "Rader: %ld Dubletter: %ld Varningar: %ld" + ukr "Ӧ: %ld ̦Ԧ: %ld : %ld" ER_UPDATE_TABLE_USED - eng "You can't specify target table '%-.64s' for update in FROM clause" - ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulssig." - rus " '%-.64s' FROM " - swe "INSERT-table '%-.64s' fr inte finnas i FROM tabell-listan" - ukr " '%-.64s' ͦ ̦ FROM" + eng "You can't specify target table '%-.64s' for update in FROM clause" + ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulssig." + rus " '%-.64s' FROM " + swe "INSERT-table '%-.64s' fr inte finnas i FROM tabell-listan" + ukr " '%-.64s' ͦ ̦ FROM" ER_NO_SUCH_THREAD - cze "Nezn-Bm identifikace threadu: %lu" - dan "Ukendt trd id: %lu" - nla "Onbekend thread id: %lu" - eng "Unknown thread id: %lu" - jps "thread id: %lu ͂܂", - est "Tundmatu lim: %lu" - fre "Numro de tche inconnu: %lu" - ger "Unbekannte Thread-ID: %lu" - greek " thread id: %lu" - hun "Ervenytelen szal (thread) id: %lu" - ita "Thread id: %lu sconosciuto" - jpn "thread id: %lu Ϥޤ" - kor "˼ id: %lu" - nor "Ukjent trd id: %lu" - norwegian-ny "Ukjent trd id: %lu" - pol "Nieznany identyfikator w?tku: %lu" - por "'Id' de 'thread' %lu desconhecido" - rum "Id-ul: %lu thread-ului este necunoscut" - rus " : %lu" - serbian "Nepoznat thread identifikator: %lu" - slo "Neznma identifikcia vlkna: %lu" - spa "Identificador del thread: %lu desconocido" - swe "Finns ingen trd med id %lu" - ukr "צ Ʀ Ǧ: %lu" + cze "Nezn-Bm identifikace threadu: %lu" + dan "Ukendt trd id: %lu" + nla "Onbekend thread id: %lu" + eng "Unknown thread id: %lu" + jps "thread id: %lu ͂܂", + est "Tundmatu lim: %lu" + fre "Numro de tche inconnu: %lu" + ger "Unbekannte Thread-ID: %lu" + greek " thread id: %lu" + hun "Ervenytelen szal (thread) id: %lu" + ita "Thread id: %lu sconosciuto" + jpn "thread id: %lu Ϥޤ" + kor "˼ id: %lu" + nor "Ukjent trd id: %lu" + norwegian-ny "Ukjent trd id: %lu" + pol "Nieznany identyfikator w?tku: %lu" + por "'Id' de 'thread' %lu desconhecido" + rum "Id-ul: %lu thread-ului este necunoscut" + rus " : %lu" + serbian "Nepoznat thread identifikator: %lu" + slo "Neznma identifikcia vlkna: %lu" + spa "Identificador del thread: %lu desconocido" + swe "Finns ingen trd med id %lu" + ukr "צ Ʀ Ǧ: %lu" ER_KILL_DENIED_ERROR - cze "Nejste vlastn-Bkem threadu %lu" - dan "Du er ikke ejer af trden %lu" - nla "U bent geen bezitter van thread %lu" - eng "You are not owner of thread %lu" - jps "thread %lu ̃I[i[ł͂܂", - est "Ei ole lime %lu omanik" - fre "Vous n'tes pas propritaire de la tche no: %lu" - ger "Sie sind nicht Eigentmer von Thread %lu" - greek " owner thread %lu" - hun "A %lu thread-nek mas a tulajdonosa" - ita "Utente non proprietario del thread %lu" - jpn "thread %lu ΥʡǤϤޤ" - kor "(Thread) %lu ڰ ƴմϴ." - nor "Du er ikke eier av trden %lu" - norwegian-ny "Du er ikkje eigar av trd %lu" - pol "Nie jeste? wa?cicielem w?tku %lu" - por "Voc no proprietrio da 'thread' %lu" - rum "Nu sinteti proprietarul threadului %lu" - rus " %lu" - serbian "Vi niste vlasnik thread-a %lu" - slo "Nie ste vlastnkom vlkna %lu" - spa "Tu no eres el propietario del thread%lu" - swe "Du r inte gare till trd %lu" - ukr " Ǧ %lu" + cze "Nejste vlastn-Bkem threadu %lu" + dan "Du er ikke ejer af trden %lu" + nla "U bent geen bezitter van thread %lu" + eng "You are not owner of thread %lu" + jps "thread %lu ̃I[i[ł͂܂", + est "Ei ole lime %lu omanik" + fre "Vous n'tes pas propritaire de la tche no: %lu" + ger "Sie sind nicht Eigentmer von Thread %lu" + greek " owner thread %lu" + hun "A %lu thread-nek mas a tulajdonosa" + ita "Utente non proprietario del thread %lu" + jpn "thread %lu ΥʡǤϤޤ" + kor "(Thread) %lu ڰ ƴմϴ." + nor "Du er ikke eier av trden %lu" + norwegian-ny "Du er ikkje eigar av trd %lu" + pol "Nie jeste? wa?cicielem w?tku %lu" + por "Voc no proprietrio da 'thread' %lu" + rum "Nu sinteti proprietarul threadului %lu" + rus " %lu" + serbian "Vi niste vlasnik thread-a %lu" + slo "Nie ste vlastnkom vlkna %lu" + spa "Tu no eres el propietario del thread%lu" + swe "Du r inte gare till trd %lu" + ukr " Ǧ %lu" ER_NO_TABLES_USED - cze "Nejsou pou-Bity dn tabulky" - dan "Ingen tabeller i brug" - nla "Geen tabellen gebruikt." - eng "No tables used" - est "htegi tabelit pole kasutusel" - fre "Aucune table utilise" - ger "Keine Tabellen verwendet" - greek " " - hun "Nincs hasznalt tabla" - ita "Nessuna tabella usata" - kor " ̺ ʾҽϴ." - nor "Ingen tabeller i bruk" - norwegian-ny "Ingen tabellar i bruk" - pol "Nie ma adej uytej tabeli" - por "Nenhuma tabela usada" - rum "Nici o tabela folosita" - rus " " - serbian "Nema upotrebljenih tabela" - slo "Nie je pouit iadna tabuka" - spa "No ha tablas usadas" - swe "Inga tabeller angivna" - ukr " " + cze "Nejsou pou-Bity dn tabulky" + dan "Ingen tabeller i brug" + nla "Geen tabellen gebruikt." + eng "No tables used" + est "htegi tabelit pole kasutusel" + fre "Aucune table utilise" + ger "Keine Tabellen verwendet" + greek " " + hun "Nincs hasznalt tabla" + ita "Nessuna tabella usata" + kor " ̺ ʾҽϴ." + nor "Ingen tabeller i bruk" + norwegian-ny "Ingen tabellar i bruk" + pol "Nie ma adej uytej tabeli" + por "Nenhuma tabela usada" + rum "Nici o tabela folosita" + rus " " + serbian "Nema upotrebljenih tabela" + slo "Nie je pouit iadna tabuka" + spa "No ha tablas usadas" + swe "Inga tabeller angivna" + ukr " " ER_TOO_BIG_SET - cze "P-Bli mnoho etzc pro sloupec %s a SET" - dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s" - nla "Teveel strings voor kolom %s en SET" - eng "Too many strings for column %-.64s and SET" - est "Liiga palju string tulbale %-.64s tbile SET" - fre "Trop de chanes dans la colonne %s avec SET" - ger "Zu viele Strings fr Feld %-.64s und SET angegeben" - greek " strings %-.64s SET" - hun "Tul sok karakter: %-.64s es SET" - ita "Troppe stringhe per la colonna %-.64s e la SET" - kor "Į %-.64s SET Ʈ ʹ ϴ." - nor "For mange tekststrenger kolonne %s og SET" - norwegian-ny "For mange tekststrengar felt %s og SET" - pol "Zbyt wiele acuchw dla kolumny %s i polecenia SET" - por "'Strings' demais para coluna '%-.64s' e SET" - rum "Prea multe siruri pentru coloana %-.64s si SET" - rus " %-.64s SET" - serbian "Previe string-ova za kolonu '%-.64s' i komandu 'SET'" - slo "Prli mnoho reazcov pre pole %-.64s a SET" - spa "Muchas strings para columna %s y SET" - swe "Fr mnga alternativ till kolumn %s fr SET" - ukr " %-.64s SET" + cze "P-Bli mnoho etzc pro sloupec %s a SET" + dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s" + nla "Teveel strings voor kolom %s en SET" + eng "Too many strings for column %-.64s and SET" + est "Liiga palju string tulbale %-.64s tbile SET" + fre "Trop de chanes dans la colonne %s avec SET" + ger "Zu viele Strings fr Feld %-.64s und SET angegeben" + greek " strings %-.64s SET" + hun "Tul sok karakter: %-.64s es SET" + ita "Troppe stringhe per la colonna %-.64s e la SET" + kor "Į %-.64s SET Ʈ ʹ ϴ." + nor "For mange tekststrenger kolonne %s og SET" + norwegian-ny "For mange tekststrengar felt %s og SET" + pol "Zbyt wiele acuchw dla kolumny %s i polecenia SET" + por "'Strings' demais para coluna '%-.64s' e SET" + rum "Prea multe siruri pentru coloana %-.64s si SET" + rus " %-.64s SET" + serbian "Previe string-ova za kolonu '%-.64s' i komandu 'SET'" + slo "Prli mnoho reazcov pre pole %-.64s a SET" + spa "Muchas strings para columna %s y SET" + swe "Fr mnga alternativ till kolumn %s fr SET" + ukr " %-.64s SET" ER_NO_UNIQUE_LOGFILE - cze "Nemohu vytvo-Bit jednoznan jmno logovacho souboru %s.(1-999)\n" - dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n" - nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n" - eng "Can't generate a unique log-filename %-.200s.(1-999)\n" - est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n" - fre "Ne peut gnrer un unique nom de journal %s.(1-999)\n" - ger "Kann keinen eindeutigen Dateinamen fr die Logdatei %-.64s(1-999) erzeugen\n" - greek " unique log-filename %-.64s.(1-999)\n" - hun "Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n" - ita "Impossibile generare un nome del file log unico %-.64s.(1-999)\n" - kor "Unique αȭ '%-.64s' ϴ.(1-999)\n" - nor "Kan ikke lage unikt loggfilnavn %s.(1-999)\n" - norwegian-ny "Kan ikkje lage unikt loggfilnavn %s.(1-999)\n" - pol "Nie mona stworzy unikalnej nazwy pliku z logiem %s.(1-999)\n" - por "No pode gerar um nome de arquivo de 'log' nico '%-.64s'.(1-999)\n" - rum "Nu pot sa generez un nume de log unic %-.64s.(1-999)\n" - rus " %-.64s.(1-999)\n" - serbian "Ne mogu da generiem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n" - slo "Nemem vytvori uniktne meno log-sboru %-.64s.(1-999)\n" - spa "No puede crear un unico archivo log %s.(1-999)\n" - swe "Kan inte generera ett unikt filnamn %s.(1-999)\n" - ukr " Φ ' log- %-.64s.(1-999)\n" + cze "Nemohu vytvo-Bit jednoznan jmno logovacho souboru %s.(1-999)\n" + dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n" + nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n" + eng "Can't generate a unique log-filename %-.200s.(1-999)\n" + est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n" + fre "Ne peut gnrer un unique nom de journal %s.(1-999)\n" + ger "Kann keinen eindeutigen Dateinamen fr die Logdatei %-.64s(1-999) erzeugen\n" + greek " unique log-filename %-.64s.(1-999)\n" + hun "Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n" + ita "Impossibile generare un nome del file log unico %-.64s.(1-999)\n" + kor "Unique αȭ '%-.64s' ϴ.(1-999)\n" + nor "Kan ikke lage unikt loggfilnavn %s.(1-999)\n" + norwegian-ny "Kan ikkje lage unikt loggfilnavn %s.(1-999)\n" + pol "Nie mona stworzy unikalnej nazwy pliku z logiem %s.(1-999)\n" + por "No pode gerar um nome de arquivo de 'log' nico '%-.64s'.(1-999)\n" + rum "Nu pot sa generez un nume de log unic %-.64s.(1-999)\n" + rus " %-.64s.(1-999)\n" + serbian "Ne mogu da generiem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n" + slo "Nemem vytvori uniktne meno log-sboru %-.64s.(1-999)\n" + spa "No puede crear un unico archivo log %s.(1-999)\n" + swe "Kan inte generera ett unikt filnamn %s.(1-999)\n" + ukr " Φ ' log- %-.64s.(1-999)\n" ER_TABLE_NOT_LOCKED_FOR_WRITE - cze "Tabulka '%-.64s' byla zam-Bena s READ a neme bt zmnna" - dan "Tabellen '%-.64s' var lst med READ ls og kan ikke opdateres" - nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." - eng "Table '%-.64s' was locked with a READ lock and can't be updated" - jps "Table '%-.64s' READ lock ɂȂĂāAXV͂ł܂", - est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav" - fre "Table '%-.64s' verrouille lecture (READ): modification impossible" - ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" - greek " '%-.64s' READ lock " - hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni" - ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata" - jpn "Table '%-.64s' READ lock ˤʤäƤơϤǤޤ" - kor "̺ '%-.64s' READ ־ ϴ." - nor "Tabellen '%-.64s' var lst med READ ls og kan ikke oppdateres" - norwegian-ny "Tabellen '%-.64s' var lst med READ ls og kan ikkje oppdaterast" - pol "Tabela '%-.64s' zostaa zablokowana przez READ i nie moe zosta zaktualizowana" - por "Tabela '%-.64s' foi travada com trava de leitura e no pode ser atualizada" - rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata" - rus " '%-.64s' READ lock " - serbian "Tabela '%-.64s' je zakljuana READ lock-om; iz nje se moe samo itati ali u nju se ne moe pisati" - slo "Tabuka '%-.64s' bola zamknut s READ a neme by zmenen" - spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada" - swe "Tabell '%-.64s' kan inte uppdateras emedan den r lst fr lsning" - ukr " '%-.64s' Ԧ , " + cze "Tabulka '%-.64s' byla zam-Bena s READ a neme bt zmnna" + dan "Tabellen '%-.64s' var lst med READ ls og kan ikke opdateres" + nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." + eng "Table '%-.64s' was locked with a READ lock and can't be updated" + jps "Table '%-.64s' READ lock ɂȂĂāAXV͂ł܂", + est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav" + fre "Table '%-.64s' verrouille lecture (READ): modification impossible" + ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" + greek " '%-.64s' READ lock " + hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni" + ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata" + jpn "Table '%-.64s' READ lock ˤʤäƤơϤǤޤ" + kor "̺ '%-.64s' READ ־ ϴ." + nor "Tabellen '%-.64s' var lst med READ ls og kan ikke oppdateres" + norwegian-ny "Tabellen '%-.64s' var lst med READ ls og kan ikkje oppdaterast" + pol "Tabela '%-.64s' zostaa zablokowana przez READ i nie moe zosta zaktualizowana" + por "Tabela '%-.64s' foi travada com trava de leitura e no pode ser atualizada" + rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata" + rus " '%-.64s' READ lock " + serbian "Tabela '%-.64s' je zakljuana READ lock-om; iz nje se moe samo itati ali u nju se ne moe pisati" + slo "Tabuka '%-.64s' bola zamknut s READ a neme by zmenen" + spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada" + swe "Tabell '%-.64s' kan inte uppdateras emedan den r lst fr lsning" + ukr " '%-.64s' Ԧ , " ER_TABLE_NOT_LOCKED - cze "Tabulka '%-.64s' nebyla zam-Bena s LOCK TABLES" - dan "Tabellen '%-.64s' var ikke lst med LOCK TABLES" - nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES" - eng "Table '%-.64s' was not locked with LOCK TABLES" - jps "Table '%-.64s' LOCK TABLES ɂăbNĂ܂", - est "Tabel '%-.64s' ei ole lukustatud ksuga LOCK TABLES" - fre "Table '%-.64s' non verrouille: utilisez LOCK TABLES" - ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt" - greek " '%-.64s' LOCK TABLES" - hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel" - ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES" - jpn "Table '%-.64s' LOCK TABLES ˤäƥåƤޤ" - kor "̺ '%-.64s' LOCK TABLES ʾҽϴ." - nor "Tabellen '%-.64s' var ikke lst med LOCK TABLES" - norwegian-ny "Tabellen '%-.64s' var ikkje lst med LOCK TABLES" - pol "Tabela '%-.64s' nie zostaa zablokowana poleceniem LOCK TABLES" - por "Tabela '%-.64s' no foi travada com LOCK TABLES" - rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES" - rus " '%-.64s' LOCK TABLES" - serbian "Tabela '%-.64s' nije bila zakljuana komandom 'LOCK TABLES'" - slo "Tabuka '%-.64s' nebola zamknut s LOCK TABLES" - spa "Tabla '%-.64s' no fue trabada con LOCK TABLES" - swe "Tabell '%-.64s' r inte lst med LOCK TABLES" - ukr " '%-.64s' LOCK TABLES" + cze "Tabulka '%-.64s' nebyla zam-Bena s LOCK TABLES" + dan "Tabellen '%-.64s' var ikke lst med LOCK TABLES" + nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES" + eng "Table '%-.64s' was not locked with LOCK TABLES" + jps "Table '%-.64s' LOCK TABLES ɂăbNĂ܂", + est "Tabel '%-.64s' ei ole lukustatud ksuga LOCK TABLES" + fre "Table '%-.64s' non verrouille: utilisez LOCK TABLES" + ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt" + greek " '%-.64s' LOCK TABLES" + hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel" + ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES" + jpn "Table '%-.64s' LOCK TABLES ˤäƥåƤޤ" + kor "̺ '%-.64s' LOCK TABLES ʾҽϴ." + nor "Tabellen '%-.64s' var ikke lst med LOCK TABLES" + norwegian-ny "Tabellen '%-.64s' var ikkje lst med LOCK TABLES" + pol "Tabela '%-.64s' nie zostaa zablokowana poleceniem LOCK TABLES" + por "Tabela '%-.64s' no foi travada com LOCK TABLES" + rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES" + rus " '%-.64s' LOCK TABLES" + serbian "Tabela '%-.64s' nije bila zakljuana komandom 'LOCK TABLES'" + slo "Tabuka '%-.64s' nebola zamknut s LOCK TABLES" + spa "Tabla '%-.64s' no fue trabada con LOCK TABLES" + swe "Tabell '%-.64s' r inte lst med LOCK TABLES" + ukr " '%-.64s' LOCK TABLES" ER_BLOB_CANT_HAVE_DEFAULT 42000 - cze "Blob polo-Bka '%-.64s' neme mt defaultn hodnotu" - dan "BLOB feltet '%-.64s' kan ikke have en standard vrdi" - nla "Blob veld '%-.64s' can geen standaardwaarde bevatten" - eng "BLOB/TEXT column '%-.64s' can't have a default value" - est "BLOB-tpi tulp '%-.64s' ei saa omada vaikevrtust" - fre "BLOB '%-.64s' ne peut avoir de valeur par dfaut" - ger "BLOB/TEXT-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben" - greek " Blob '%-.64s' (default value)" - hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke" - ita "Il campo BLOB '%-.64s' non puo` avere un valore di default" - jpn "BLOB column '%-.64s' can't have a default value" - kor "BLOB Į '%-.64s' Ʈ ϴ." - nor "Blob feltet '%-.64s' kan ikke ha en standard verdi" - norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi" - pol "Pole typu blob '%-.64s' nie moe mie domy?lnej warto?ci" - por "Coluna BLOB '%-.64s' no pode ter um valor padro (default)" - rum "Coloana BLOB '%-.64s' nu poate avea o valoare default" - rus " BLOB '%-.64s'" - serbian "BLOB kolona '%-.64s' ne moe imati default vrednost" - slo "Pole BLOB '%-.64s' neme ma implicitn hodnotu" - spa "Campo Blob '%-.64s' no puede tener valores patron" - swe "BLOB flt '%-.64s' kan inte ha ett DEFAULT-vrde" - ukr " BLOB '%-.64s' " + cze "Blob polo-Bka '%-.64s' neme mt defaultn hodnotu" + dan "BLOB feltet '%-.64s' kan ikke have en standard vrdi" + nla "Blob veld '%-.64s' can geen standaardwaarde bevatten" + eng "BLOB/TEXT column '%-.64s' can't have a default value" + est "BLOB-tpi tulp '%-.64s' ei saa omada vaikevrtust" + fre "BLOB '%-.64s' ne peut avoir de valeur par dfaut" + ger "BLOB/TEXT-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben" + greek " Blob '%-.64s' (default value)" + hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke" + ita "Il campo BLOB '%-.64s' non puo` avere un valore di default" + jpn "BLOB column '%-.64s' can't have a default value" + kor "BLOB Į '%-.64s' Ʈ ϴ." + nor "Blob feltet '%-.64s' kan ikke ha en standard verdi" + norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi" + pol "Pole typu blob '%-.64s' nie moe mie domy?lnej warto?ci" + por "Coluna BLOB '%-.64s' no pode ter um valor padro (default)" + rum "Coloana BLOB '%-.64s' nu poate avea o valoare default" + rus " BLOB '%-.64s'" + serbian "BLOB kolona '%-.64s' ne moe imati default vrednost" + slo "Pole BLOB '%-.64s' neme ma implicitn hodnotu" + spa "Campo Blob '%-.64s' no puede tener valores patron" + swe "BLOB flt '%-.64s' kan inte ha ett DEFAULT-vrde" + ukr " BLOB '%-.64s' " ER_WRONG_DB_NAME 42000 - cze "Nep-Bpustn jmno databze '%-.64s'" - dan "Ugyldigt database navn '%-.64s'" - nla "Databasenaam '%-.64s' is niet getoegestaan" - eng "Incorrect database name '%-.100s'" - jps "w肵 database '%-.100s' ԈĂ܂", - est "Vigane andmebaasi nimi '%-.100s'" - fre "Nom de base de donne illgal: '%-.64s'" - ger "Unerlaubter Datenbankname '%-.100s'" - greek " '%-.100s'" - hun "Hibas adatbazisnev: '%-.100s'" - ita "Nome database errato '%-.100s'" - jpn "ꤷ database ̾ '%-.100s' ְäƤޤ" - kor "'%-.100s' Ÿ̽ ̸ Ȯմϴ." - nor "Ugyldig database navn '%-.64s'" - norwegian-ny "Ugyldig database namn '%-.64s'" - pol "Niedozwolona nazwa bazy danych '%-.64s'" - por "Nome de banco de dados '%-.100s' incorreto" - rum "Numele bazei de date este incorect '%-.100s'" - rus " '%-.100s'" - serbian "Pogreno ime baze '%-.100s'" - slo "Neprpustn meno databzy '%-.100s'" - spa "Nombre de base de datos ilegal '%-.64s'" - swe "Felaktigt databasnamn '%-.64s'" - ukr "צ ' '%-.100s'" + cze "Nep-Bpustn jmno databze '%-.64s'" + dan "Ugyldigt database navn '%-.64s'" + nla "Databasenaam '%-.64s' is niet getoegestaan" + eng "Incorrect database name '%-.100s'" + jps "w肵 database '%-.100s' ԈĂ܂", + est "Vigane andmebaasi nimi '%-.100s'" + fre "Nom de base de donne illgal: '%-.64s'" + ger "Unerlaubter Datenbankname '%-.100s'" + greek " '%-.100s'" + hun "Hibas adatbazisnev: '%-.100s'" + ita "Nome database errato '%-.100s'" + jpn "ꤷ database ̾ '%-.100s' ְäƤޤ" + kor "'%-.100s' Ÿ̽ ̸ Ȯմϴ." + nor "Ugyldig database navn '%-.64s'" + norwegian-ny "Ugyldig database namn '%-.64s'" + pol "Niedozwolona nazwa bazy danych '%-.64s'" + por "Nome de banco de dados '%-.100s' incorreto" + rum "Numele bazei de date este incorect '%-.100s'" + rus " '%-.100s'" + serbian "Pogreno ime baze '%-.100s'" + slo "Neprpustn meno databzy '%-.100s'" + spa "Nombre de base de datos ilegal '%-.64s'" + swe "Felaktigt databasnamn '%-.64s'" + ukr "צ ' '%-.100s'" ER_WRONG_TABLE_NAME 42000 - cze "Nep-Bpustn jmno tabulky '%-.64s'" - dan "Ugyldigt tabel navn '%-.64s'" - nla "Niet toegestane tabelnaam '%-.64s'" - eng "Incorrect table name '%-.100s'" - jps "w肵 table '%-.100s' ͂܂Ă܂", - est "Vigane tabeli nimi '%-.100s'" - fre "Nom de table illgal: '%-.64s'" - ger "Unerlaubter Tabellenname '%-.100s'" - greek " '%-.100s'" - hun "Hibas tablanev: '%-.100s'" - ita "Nome tabella errato '%-.100s'" - jpn "ꤷ table ̾ '%-.100s' ϤޤäƤޤ" - kor "'%-.100s' ̺ ̸ Ȯմϴ." - nor "Ugyldig tabell navn '%-.64s'" - norwegian-ny "Ugyldig tabell namn '%-.64s'" - pol "Niedozwolona nazwa tabeli '%-.64s'..." - por "Nome de tabela '%-.100s' incorreto" - rum "Numele tabelei este incorect '%-.100s'" - rus " '%-.100s'" - serbian "Pogreno ime tabele '%-.100s'" - slo "Neprpustn meno tabuky '%-.100s'" - spa "Nombre de tabla ilegal '%-.64s'" - swe "Felaktigt tabellnamn '%-.64s'" - ukr "צ ' æ '%-.100s'" + cze "Nep-Bpustn jmno tabulky '%-.64s'" + dan "Ugyldigt tabel navn '%-.64s'" + nla "Niet toegestane tabelnaam '%-.64s'" + eng "Incorrect table name '%-.100s'" + jps "w肵 table '%-.100s' ͂܂Ă܂", + est "Vigane tabeli nimi '%-.100s'" + fre "Nom de table illgal: '%-.64s'" + ger "Unerlaubter Tabellenname '%-.100s'" + greek " '%-.100s'" + hun "Hibas tablanev: '%-.100s'" + ita "Nome tabella errato '%-.100s'" + jpn "ꤷ table ̾ '%-.100s' ϤޤäƤޤ" + kor "'%-.100s' ̺ ̸ Ȯմϴ." + nor "Ugyldig tabell navn '%-.64s'" + norwegian-ny "Ugyldig tabell namn '%-.64s'" + pol "Niedozwolona nazwa tabeli '%-.64s'..." + por "Nome de tabela '%-.100s' incorreto" + rum "Numele tabelei este incorect '%-.100s'" + rus " '%-.100s'" + serbian "Pogreno ime tabele '%-.100s'" + slo "Neprpustn meno tabuky '%-.100s'" + spa "Nombre de tabla ilegal '%-.64s'" + swe "Felaktigt tabellnamn '%-.64s'" + ukr "צ ' æ '%-.100s'" ER_TOO_BIG_SELECT 42000 - cze "Zadan-B SELECT by prochzel pli mnoho zznam a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v podku, pouijte SET SQL_BIG_SELECTS=1" - dan "SELECT ville undersge for mange poster og ville sandsynligvis tage meget lang tid. Undersg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt" - nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is." - eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay" - est "SELECT lause peab lbi vaatama suure hulga kirjeid ja vtaks tenoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada ksku SET SQL_BIG_SELECTS=1" - fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vrifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien" - ger "Die Ausfhrung des SELECT wrde zu viele Datenstze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel berprfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden" - greek " SELECT . WHERE SET SQL_BIG_SELECTS=1 SELECT " - hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay" - ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto." - kor "SELECT ɿ ʹ ڵ带 ã ð ҿ˴ϴ. WHERE ϰų, SELECT okǸ SET SQL_BIG_SELECTS=1 ɼ ϼ." - nor "SELECT ville underske for mange poster og ville sannsynligvis ta veldig lang tid. Undersk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" - norwegian-ny "SELECT ville underskje for mange postar og ville sannsynligvis ta veldig lang tid. Undersk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" - pol "Operacja SELECT bdzie dotyczya zbyt wielu rekordw i prawdopodobnie zajmie bardzo duo czasu. Sprawd warunek WHERE i uyj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna" - por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua clusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto" - rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay" - rus " SELECT , , . WHERE, , , SET SQL_BIG_SELECTS=1" - serbian "Komanda 'SELECT' e ispitati previe slogova i potroiti previe vremena. Proverite va 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako elite ba ovakvu komandu" - slo "Zadan poiadavka SELECT by prechdzala prli mnoho zznamov a trvala by prli dlho. Skontrolujte tvar WHERE a ak je v poriadku, pouite SET SQL_BIG_SELECTS=1" - spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto" - swe "Den angivna frgan skulle lsa mer n MAX_JOIN_SIZE rader. Kontrollera din WHERE och anvnd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins" - ukr " SELECT Ҧ Ӧ, , , . צ WHERE SET SQL_BIG_SELECTS=1, SELECT צ" + cze "Zadan-B SELECT by prochzel pli mnoho zznam a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v podku, pouijte SET SQL_BIG_SELECTS=1" + dan "SELECT ville undersge for mange poster og ville sandsynligvis tage meget lang tid. Undersg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt" + nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is." + eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay" + est "SELECT lause peab lbi vaatama suure hulga kirjeid ja vtaks tenoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada ksku SET SQL_BIG_SELECTS=1" + fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vrifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien" + ger "Die Ausfhrung des SELECT wrde zu viele Datenstze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel berprfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden" + greek " SELECT . WHERE SET SQL_BIG_SELECTS=1 SELECT " + hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay" + ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto." + kor "SELECT ɿ ʹ ڵ带 ã ð ҿ˴ϴ. WHERE ϰų, SELECT okǸ SET SQL_BIG_SELECTS=1 ɼ ϼ." + nor "SELECT ville underske for mange poster og ville sannsynligvis ta veldig lang tid. Undersk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" + norwegian-ny "SELECT ville underskje for mange postar og ville sannsynligvis ta veldig lang tid. Undersk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" + pol "Operacja SELECT bdzie dotyczya zbyt wielu rekordw i prawdopodobnie zajmie bardzo duo czasu. Sprawd warunek WHERE i uyj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna" + por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua clusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto" + rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay" + rus " SELECT , , . WHERE, , , SET SQL_BIG_SELECTS=1" + serbian "Komanda 'SELECT' e ispitati previe slogova i potroiti previe vremena. Proverite va 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako elite ba ovakvu komandu" + slo "Zadan poiadavka SELECT by prechdzala prli mnoho zznamov a trvala by prli dlho. Skontrolujte tvar WHERE a ak je v poriadku, pouite SET SQL_BIG_SELECTS=1" + spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto" + swe "Den angivna frgan skulle lsa mer n MAX_JOIN_SIZE rader. Kontrollera din WHERE och anvnd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins" + ukr " SELECT Ҧ Ӧ, , , . צ WHERE SET SQL_BIG_SELECTS=1, SELECT צ" ER_UNKNOWN_ERROR - cze "Nezn-Bm chyba" - dan "Ukendt fejl" - nla "Onbekende Fout" - eng "Unknown error" - est "Tundmatu viga" - fre "Erreur inconnue" - ger "Unbekannter Fehler" - greek " " - hun "Ismeretlen hiba" - ita "Errore sconosciuto" - kor "˼ Դϴ." - nor "Ukjent feil" - norwegian-ny "Ukjend feil" - por "Erro desconhecido" - rum "Eroare unknown" - rus " " - serbian "Nepoznata greka" - slo "Neznm chyba" - spa "Error desconocido" - swe "Oidentifierat fel" - ukr "צ " + cze "Nezn-Bm chyba" + dan "Ukendt fejl" + nla "Onbekende Fout" + eng "Unknown error" + est "Tundmatu viga" + fre "Erreur inconnue" + ger "Unbekannter Fehler" + greek " " + hun "Ismeretlen hiba" + ita "Errore sconosciuto" + kor "˼ Դϴ." + nor "Ukjent feil" + norwegian-ny "Ukjend feil" + por "Erro desconhecido" + rum "Eroare unknown" + rus " " + serbian "Nepoznata greka" + slo "Neznm chyba" + spa "Error desconocido" + swe "Oidentifierat fel" + ukr "צ " ER_UNKNOWN_PROCEDURE 42000 - cze "Nezn-Bm procedura %s" - dan "Ukendt procedure %s" - nla "Onbekende procedure %s" - eng "Unknown procedure '%-.64s'" - est "Tundmatu protseduur '%-.64s'" - fre "Procdure %s inconnue" - ger "Unbekannte Prozedur '%-.64s'" - greek " '%-.64s'" - hun "Ismeretlen eljaras: '%-.64s'" - ita "Procedura '%-.64s' sconosciuta" - kor "˼ ๮ : '%-.64s'" - nor "Ukjent prosedyre %s" - norwegian-ny "Ukjend prosedyre %s" - pol "Unkown procedure %s" - por "'Procedure' '%-.64s' desconhecida" - rum "Procedura unknown '%-.64s'" - rus " '%-.64s'" - serbian "Nepoznata procedura '%-.64s'" - slo "Neznm procedra '%-.64s'" - spa "Procedimiento desconocido %s" - swe "Oknd procedur: %s" - ukr "צ '%-.64s'" + cze "Nezn-Bm procedura %s" + dan "Ukendt procedure %s" + nla "Onbekende procedure %s" + eng "Unknown procedure '%-.64s'" + est "Tundmatu protseduur '%-.64s'" + fre "Procdure %s inconnue" + ger "Unbekannte Prozedur '%-.64s'" + greek " '%-.64s'" + hun "Ismeretlen eljaras: '%-.64s'" + ita "Procedura '%-.64s' sconosciuta" + kor "˼ ๮ : '%-.64s'" + nor "Ukjent prosedyre %s" + norwegian-ny "Ukjend prosedyre %s" + pol "Unkown procedure %s" + por "'Procedure' '%-.64s' desconhecida" + rum "Procedura unknown '%-.64s'" + rus " '%-.64s'" + serbian "Nepoznata procedura '%-.64s'" + slo "Neznm procedra '%-.64s'" + spa "Procedimiento desconocido %s" + swe "Oknd procedur: %s" + ukr "צ '%-.64s'" ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 - cze "Chybn-B poet parametr procedury %s" - dan "Forkert antal parametre til proceduren %s" - nla "Foutief aantal parameters doorgegeven aan procedure %s" - eng "Incorrect parameter count to procedure '%-.64s'" - est "Vale parameetrite hulk protseduurile '%-.64s'" - fre "Mauvais nombre de paramtres pour la procedure %s" - ger "Falsche Parameterzahl fr Prozedur '%-.64s'" - greek " '%-.64s'" - hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal" - ita "Numero di parametri errato per la procedura '%-.64s'" - kor "'%-.64s' ๮ Ȯ Ķ" - nor "Feil parameter antall til prosedyren %s" - norwegian-ny "Feil parameter tal til prosedyra %s" - pol "Incorrect parameter count to procedure %s" - por "Nmero de parmetros incorreto para a 'procedure' '%-.64s'" - rum "Procedura '%-.64s' are un numar incorect de parametri" - rus " '%-.64s'" - serbian "Pogrean broj parametara za proceduru '%-.64s'" - slo "Chybn poet parametrov procedry '%-.64s'" - spa "Equivocado parametro count para procedimiento %s" - swe "Felaktigt antal parametrar till procedur %s" - ukr " ˦˦ Ҧ '%-.64s'" + cze "Chybn-B poet parametr procedury %s" + dan "Forkert antal parametre til proceduren %s" + nla "Foutief aantal parameters doorgegeven aan procedure %s" + eng "Incorrect parameter count to procedure '%-.64s'" + est "Vale parameetrite hulk protseduurile '%-.64s'" + fre "Mauvais nombre de paramtres pour la procedure %s" + ger "Falsche Parameterzahl fr Prozedur '%-.64s'" + greek " '%-.64s'" + hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal" + ita "Numero di parametri errato per la procedura '%-.64s'" + kor "'%-.64s' ๮ Ȯ Ķ" + nor "Feil parameter antall til prosedyren %s" + norwegian-ny "Feil parameter tal til prosedyra %s" + pol "Incorrect parameter count to procedure %s" + por "Nmero de parmetros incorreto para a 'procedure' '%-.64s'" + rum "Procedura '%-.64s' are un numar incorect de parametri" + rus " '%-.64s'" + serbian "Pogrean broj parametara za proceduru '%-.64s'" + slo "Chybn poet parametrov procedry '%-.64s'" + spa "Equivocado parametro count para procedimiento %s" + swe "Felaktigt antal parametrar till procedur %s" + ukr " ˦˦ Ҧ '%-.64s'" ER_WRONG_PARAMETERS_TO_PROCEDURE - cze "Chybn-B parametry procedury %s" - dan "Forkert(e) parametre til proceduren %s" - nla "Foutieve parameters voor procedure %s" - eng "Incorrect parameters to procedure '%-.64s'" - est "Vigased parameetrid protseduurile '%-.64s'" - fre "Paramtre erron pour la procedure %s" - ger "Falsche Parameter fr Prozedur '%-.64s'" - greek " '%-.64s'" - hun "Rossz parameter a(z) '%-.64s' eljarasban" - ita "Parametri errati per la procedura '%-.64s'" - kor "'%-.64s' ๮ Ȯ Ķ" - nor "Feil parametre til prosedyren %s" - norwegian-ny "Feil parameter til prosedyra %s" - pol "Incorrect parameters to procedure %s" - por "Parmetros incorretos para a 'procedure' '%-.64s'" - rum "Procedura '%-.64s' are parametrii incorecti" - rus " '%-.64s'" - serbian "Pogreni parametri prosleeni proceduri '%-.64s'" - slo "Chybn parametre procedry '%-.64s'" - spa "Equivocados parametros para procedimiento %s" - swe "Felaktiga parametrar till procedur %s" - ukr " '%-.64s'" + cze "Chybn-B parametry procedury %s" + dan "Forkert(e) parametre til proceduren %s" + nla "Foutieve parameters voor procedure %s" + eng "Incorrect parameters to procedure '%-.64s'" + est "Vigased parameetrid protseduurile '%-.64s'" + fre "Paramtre erron pour la procedure %s" + ger "Falsche Parameter fr Prozedur '%-.64s'" + greek " '%-.64s'" + hun "Rossz parameter a(z) '%-.64s' eljarasban" + ita "Parametri errati per la procedura '%-.64s'" + kor "'%-.64s' ๮ Ȯ Ķ" + nor "Feil parametre til prosedyren %s" + norwegian-ny "Feil parameter til prosedyra %s" + pol "Incorrect parameters to procedure %s" + por "Parmetros incorretos para a 'procedure' '%-.64s'" + rum "Procedura '%-.64s' are parametrii incorecti" + rus " '%-.64s'" + serbian "Pogreni parametri prosleeni proceduri '%-.64s'" + slo "Chybn parametre procedry '%-.64s'" + spa "Equivocados parametros para procedimiento %s" + swe "Felaktiga parametrar till procedur %s" + ukr " '%-.64s'" ER_UNKNOWN_TABLE 42S02 - cze "Nezn-Bm tabulka '%-.64s' v %s" - dan "Ukendt tabel '%-.64s' i %s" - nla "Onbekende tabel '%-.64s' in %s" - eng "Unknown table '%-.64s' in %-.32s" - est "Tundmatu tabel '%-.64s' %-.32s-s" - fre "Table inconnue '%-.64s' dans %s" - ger "Unbekannte Tabelle '%-.64s' in '%-.64s'" - greek " '%-.64s' %s" - hun "Ismeretlen tabla: '%-.64s' %s-ban" - ita "Tabella '%-.64s' sconosciuta in %s" - jpn "Unknown table '%-.64s' in %s" - kor "˼ ̺ '%-.64s' (Ÿ̽ %s)" - nor "Ukjent tabell '%-.64s' i %s" - norwegian-ny "Ukjend tabell '%-.64s' i %s" - pol "Unknown table '%-.64s' in %s" - por "Tabela '%-.64s' desconhecida em '%-.32s'" - rum "Tabla '%-.64s' invalida in %-.32s" - rus " '%-.64s' %-.32s" - serbian "Nepoznata tabela '%-.64s' u '%-.32s'" - slo "Neznma tabuka '%-.64s' v %s" - spa "Tabla desconocida '%-.64s' in %s" - swe "Oknd tabell '%-.64s' i '%-.64s'" - ukr "צ '%-.64s' %-.32s" + cze "Nezn-Bm tabulka '%-.64s' v %s" + dan "Ukendt tabel '%-.64s' i %s" + nla "Onbekende tabel '%-.64s' in %s" + eng "Unknown table '%-.64s' in %-.32s" + est "Tundmatu tabel '%-.64s' %-.32s-s" + fre "Table inconnue '%-.64s' dans %s" + ger "Unbekannte Tabelle '%-.64s' in '%-.64s'" + greek " '%-.64s' %s" + hun "Ismeretlen tabla: '%-.64s' %s-ban" + ita "Tabella '%-.64s' sconosciuta in %s" + jpn "Unknown table '%-.64s' in %s" + kor "˼ ̺ '%-.64s' (Ÿ̽ %s)" + nor "Ukjent tabell '%-.64s' i %s" + norwegian-ny "Ukjend tabell '%-.64s' i %s" + pol "Unknown table '%-.64s' in %s" + por "Tabela '%-.64s' desconhecida em '%-.32s'" + rum "Tabla '%-.64s' invalida in %-.32s" + rus " '%-.64s' %-.32s" + serbian "Nepoznata tabela '%-.64s' u '%-.32s'" + slo "Neznma tabuka '%-.64s' v %s" + spa "Tabla desconocida '%-.64s' in %s" + swe "Oknd tabell '%-.64s' i '%-.64s'" + ukr "צ '%-.64s' %-.32s" ER_FIELD_SPECIFIED_TWICE 42000 - cze "Polo-Bka '%-.64s' je zadna dvakrt" - dan "Feltet '%-.64s' er anvendt to gange" - nla "Veld '%-.64s' is dubbel gespecificeerd" - eng "Column '%-.64s' specified twice" - est "Tulp '%-.64s' on mratletud topelt" - fre "Champ '%-.64s' spcifi deux fois" - ger "Feld '%-.64s' wurde zweimal angegeben" - greek " '%-.64s' " - hun "A(z) '%-.64s' mezot ketszer definialta" - ita "Campo '%-.64s' specificato 2 volte" - kor "Į '%-.64s' ι ǵǾ ϴ." - nor "Feltet '%-.64s' er spesifisert to ganger" - norwegian-ny "Feltet '%-.64s' er spesifisert to gangar" - pol "Field '%-.64s' specified twice" - por "Coluna '%-.64s' especificada duas vezes" - rum "Coloana '%-.64s' specificata de doua ori" - rus " '%-.64s' " - serbian "Kolona '%-.64s' je navedena dva puta" - slo "Pole '%-.64s' je zadan dvakrt" - spa "Campo '%-.64s' especificado dos veces" - swe "Flt '%-.64s' r redan anvnt" - ukr " '%-.64s' צަ" + cze "Polo-Bka '%-.64s' je zadna dvakrt" + dan "Feltet '%-.64s' er anvendt to gange" + nla "Veld '%-.64s' is dubbel gespecificeerd" + eng "Column '%-.64s' specified twice" + est "Tulp '%-.64s' on mratletud topelt" + fre "Champ '%-.64s' spcifi deux fois" + ger "Feld '%-.64s' wurde zweimal angegeben" + greek " '%-.64s' " + hun "A(z) '%-.64s' mezot ketszer definialta" + ita "Campo '%-.64s' specificato 2 volte" + kor "Į '%-.64s' ι ǵǾ ϴ." + nor "Feltet '%-.64s' er spesifisert to ganger" + norwegian-ny "Feltet '%-.64s' er spesifisert to gangar" + pol "Field '%-.64s' specified twice" + por "Coluna '%-.64s' especificada duas vezes" + rum "Coloana '%-.64s' specificata de doua ori" + rus " '%-.64s' " + serbian "Kolona '%-.64s' je navedena dva puta" + slo "Pole '%-.64s' je zadan dvakrt" + spa "Campo '%-.64s' especificado dos veces" + swe "Flt '%-.64s' r redan anvnt" + ukr " '%-.64s' צަ" ER_INVALID_GROUP_FUNC_USE - cze "Nespr-Bvn pouit funkce group" - dan "Forkert brug af grupperings-funktion" - nla "Ongeldig gebruik van GROUP-functie" - eng "Invalid use of group function" - est "Vigane grupeerimisfunktsiooni kasutus" - fre "Utilisation invalide de la clause GROUP" - ger "Falsche Verwendung einer Gruppierungsfunktion" - greek " group function" - hun "A group funkcio ervenytelen hasznalata" - ita "Uso non valido di una funzione di raggruppamento" - kor "߸ ׷ Լ Ͽϴ." - por "Uso invlido de funo de agrupamento (GROUP)" - rum "Folosire incorecta a functiei group" - rus " " - serbian "Pogrena upotreba 'GROUP' funkcije" - slo "Nesprvne pouitie funkcie GROUP" - spa "Invalido uso de funcin en grupo" - swe "Felaktig anvndning av SQL grupp function" - ukr " æ " + cze "Nespr-Bvn pouit funkce group" + dan "Forkert brug af grupperings-funktion" + nla "Ongeldig gebruik van GROUP-functie" + eng "Invalid use of group function" + est "Vigane grupeerimisfunktsiooni kasutus" + fre "Utilisation invalide de la clause GROUP" + ger "Falsche Verwendung einer Gruppierungsfunktion" + greek " group function" + hun "A group funkcio ervenytelen hasznalata" + ita "Uso non valido di una funzione di raggruppamento" + kor "߸ ׷ Լ Ͽϴ." + por "Uso invlido de funo de agrupamento (GROUP)" + rum "Folosire incorecta a functiei group" + rus " " + serbian "Pogrena upotreba 'GROUP' funkcije" + slo "Nesprvne pouitie funkcie GROUP" + spa "Invalido uso de funcin en grupo" + swe "Felaktig anvndning av SQL grupp function" + ukr " æ " ER_UNSUPPORTED_EXTENSION 42000 - cze "Tabulka '%-.64s' pou-Bv rozen, kter v tto verzi MySQL nen" - dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version" - nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt." - eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version" - est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis" - fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL" - ger "Tabelle '%-.64s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfgbar ist" - greek " '%-.64s' extension MySQL" - hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban." - ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL" - kor "̺ '%-.64s' Ȯ ̿ MySQL ʽϴ." - nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" - norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" - pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" - por "Tabela '%-.64s' usa uma extenso que no existe nesta verso do MySQL" - rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL" - rus " '%-.64s' , MySQL" - serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a" - slo "Tabuka '%-.64s' pouva rozrenie, ktor v tejto verzii MySQL nie je" - spa "Tabla '%-.64s' usa una extensin que no existe en esta MySQL versin" - swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL" - ukr " '%-.64s' դ , դ æ Ӧ MySQL" + cze "Tabulka '%-.64s' pou-Bv rozen, kter v tto verzi MySQL nen" + dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version" + nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt." + eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version" + est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis" + fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL" + ger "Tabelle '%-.64s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfgbar ist" + greek " '%-.64s' extension MySQL" + hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban." + ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL" + kor "̺ '%-.64s' Ȯ ̿ MySQL ʽϴ." + nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + por "Tabela '%-.64s' usa uma extenso que no existe nesta verso do MySQL" + rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL" + rus " '%-.64s' , MySQL" + serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a" + slo "Tabuka '%-.64s' pouva rozrenie, ktor v tejto verzii MySQL nie je" + spa "Tabla '%-.64s' usa una extensin que no existe en esta MySQL versin" + swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL" + ukr " '%-.64s' դ , դ æ Ӧ MySQL" ER_TABLE_MUST_HAVE_COLUMNS 42000 - cze "Tabulka mus-B mt alespo jeden sloupec" - dan "En tabel skal have mindst een kolonne" - nla "Een tabel moet minstens 1 kolom bevatten" - eng "A table must have at least 1 column" - jps "e[u͍Œ 1 ‚ column Kvł", - est "Tabelis peab olema vhemalt ks tulp" - fre "Une table doit comporter au moins une colonne" - ger "Eine Tabelle muss mindestens eine Spalte besitzen" - greek " " - hun "A tablanak legalabb egy oszlopot tartalmazni kell" - ita "Una tabella deve avere almeno 1 colonna" - jpn "ơ֥Ϻ 1 Ĥ column ɬפǤ" - kor "ϳ ̺  ϳ Į Ͽ մϴ." - por "Uma tabela tem que ter pelo menos uma (1) coluna" - rum "O tabela trebuie sa aiba cel putin o coloana" - rus " " - serbian "Tabela mora imati najmanje jednu kolonu" - slo "Tabuka mus ma aspo 1 pole" - spa "Una tabla debe tener al menos 1 columna" - swe "Tabeller mste ha minst 1 kolumn" - ukr " " + cze "Tabulka mus-B mt alespo jeden sloupec" + dan "En tabel skal have mindst een kolonne" + nla "Een tabel moet minstens 1 kolom bevatten" + eng "A table must have at least 1 column" + jps "e[u͍Œ 1 ‚ column Kvł", + est "Tabelis peab olema vhemalt ks tulp" + fre "Une table doit comporter au moins une colonne" + ger "Eine Tabelle muss mindestens eine Spalte besitzen" + greek " " + hun "A tablanak legalabb egy oszlopot tartalmazni kell" + ita "Una tabella deve avere almeno 1 colonna" + jpn "ơ֥Ϻ 1 Ĥ column ɬפǤ" + kor "ϳ ̺  ϳ Į Ͽ մϴ." + por "Uma tabela tem que ter pelo menos uma (1) coluna" + rum "O tabela trebuie sa aiba cel putin o coloana" + rus " " + serbian "Tabela mora imati najmanje jednu kolonu" + slo "Tabuka mus ma aspo 1 pole" + spa "Una tabla debe tener al menos 1 columna" + swe "Tabeller mste ha minst 1 kolumn" + ukr " " ER_RECORD_FILE_FULL - cze "Tabulka '%-.64s' je pln-B" - dan "Tabellen '%-.64s' er fuld" - nla "De tabel '%-.64s' is vol" - eng "The table '%-.64s' is full" - jps "table '%-.64s' ͂ςł", - est "Tabel '%-.64s' on tis" - fre "La table '%-.64s' est pleine" - ger "Tabelle '%-.64s' ist voll" - greek " '%-.64s' " - hun "A '%-.64s' tabla megtelt" - ita "La tabella '%-.64s' e` piena" - jpn "table '%-.64s' ϤäѤǤ" - kor "̺ '%-.64s' fullϴ. " - por "Tabela '%-.64s' est cheia" - rum "Tabela '%-.64s' e plina" - rus " '%-.64s' " - serbian "Tabela '%-.64s' je popunjena do kraja" - slo "Tabuka '%-.64s' je pln" - spa "La tabla '%-.64s' est llena" - swe "Tabellen '%-.64s' r full" - ukr " '%-.64s' " + cze "Tabulka '%-.64s' je pln-B" + dan "Tabellen '%-.64s' er fuld" + nla "De tabel '%-.64s' is vol" + eng "The table '%-.64s' is full" + jps "table '%-.64s' ͂ςł", + est "Tabel '%-.64s' on tis" + fre "La table '%-.64s' est pleine" + ger "Tabelle '%-.64s' ist voll" + greek " '%-.64s' " + hun "A '%-.64s' tabla megtelt" + ita "La tabella '%-.64s' e` piena" + jpn "table '%-.64s' ϤäѤǤ" + kor "̺ '%-.64s' fullϴ. " + por "Tabela '%-.64s' est cheia" + rum "Tabela '%-.64s' e plina" + rus " '%-.64s' " + serbian "Tabela '%-.64s' je popunjena do kraja" + slo "Tabuka '%-.64s' je pln" + spa "La tabla '%-.64s' est llena" + swe "Tabellen '%-.64s' r full" + ukr " '%-.64s' " ER_UNKNOWN_CHARACTER_SET 42000 - cze "Nezn-Bm znakov sada: '%-.64s'" - dan "Ukendt tegnst: '%-.64s'" - nla "Onbekende character set: '%-.64s'" - eng "Unknown character set: '%-.64s'" - jps "character set '%-.64s' ̓T|[gĂ܂", - est "Vigane kooditabel '%-.64s'" - fre "Jeu de caractres inconnu: '%-.64s'" - ger "Unbekannter Zeichensatz: '%-.64s'" - greek " character set: '%-.64s'" - hun "Ervenytelen karakterkeszlet: '%-.64s'" - ita "Set di caratteri '%-.64s' sconosciuto" - jpn "character set '%-.64s' ϥݡȤƤޤ" - kor "˼ Set: '%-.64s'" - por "Conjunto de caracteres '%-.64s' desconhecido" - rum "Set de caractere invalid: '%-.64s'" - rus " '%-.64s'" - serbian "Nepoznati karakter-set: '%-.64s'" - slo "Neznma znakov sada: '%-.64s'" - spa "Juego de caracteres desconocido: '%-.64s'" - swe "Oknd teckenuppsttning: '%-.64s'" - ukr "צ : '%-.64s'" + cze "Nezn-Bm znakov sada: '%-.64s'" + dan "Ukendt tegnst: '%-.64s'" + nla "Onbekende character set: '%-.64s'" + eng "Unknown character set: '%-.64s'" + jps "character set '%-.64s' ̓T|[gĂ܂", + est "Vigane kooditabel '%-.64s'" + fre "Jeu de caractres inconnu: '%-.64s'" + ger "Unbekannter Zeichensatz: '%-.64s'" + greek " character set: '%-.64s'" + hun "Ervenytelen karakterkeszlet: '%-.64s'" + ita "Set di caratteri '%-.64s' sconosciuto" + jpn "character set '%-.64s' ϥݡȤƤޤ" + kor "˼ Set: '%-.64s'" + por "Conjunto de caracteres '%-.64s' desconhecido" + rum "Set de caractere invalid: '%-.64s'" + rus " '%-.64s'" + serbian "Nepoznati karakter-set: '%-.64s'" + slo "Neznma znakov sada: '%-.64s'" + spa "Juego de caracteres desconocido: '%-.64s'" + swe "Oknd teckenuppsttning: '%-.64s'" + ukr "צ : '%-.64s'" ER_TOO_MANY_TABLES - cze "P-Bli mnoho tabulek, MySQL jich me mt v joinu jen %d" - dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join" - nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten" - eng "Too many tables; MySQL can only use %d tables in a join" - jps "e[u܂; MySQL can only use %d tables in a join", - est "Liiga palju tabeleid. MySQL suudab JOINiga hendada kuni %d tabelit" - fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN" - ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden" - greek " . MySQL %d join" - hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor" - ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join" - jpn "ơ֥뤬¿ޤ; MySQL can only use %d tables in a join" - kor "ʹ ̺ JoinǾϴ. MySQL JOIN %d ̺ ֽϴ." - por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma juno (JOIN)" - rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join" - rus " . MySQL %d " - serbian "Previe tabela. MySQL moe upotrebiti maksimum %d tabela pri 'JOIN' operaciji" - slo "Prli mnoho tabuliek. MySQL me poui len %d v JOIN-e" - spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join" - swe "Fr mnga tabeller. MySQL can ha hgst %d tabeller i en och samma join" - ukr " . MySQL %d 'Φ" + cze "P-Bli mnoho tabulek, MySQL jich me mt v joinu jen %d" + dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join" + nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten" + eng "Too many tables; MySQL can only use %d tables in a join" + jps "e[u܂; MySQL can only use %d tables in a join", + est "Liiga palju tabeleid. MySQL suudab JOINiga hendada kuni %d tabelit" + fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN" + ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden" + greek " . MySQL %d join" + hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor" + ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join" + jpn "ơ֥뤬¿ޤ; MySQL can only use %d tables in a join" + kor "ʹ ̺ JoinǾϴ. MySQL JOIN %d ̺ ֽϴ." + por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma juno (JOIN)" + rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join" + rus " . MySQL %d " + serbian "Previe tabela. MySQL moe upotrebiti maksimum %d tabela pri 'JOIN' operaciji" + slo "Prli mnoho tabuliek. MySQL me poui len %d v JOIN-e" + spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join" + swe "Fr mnga tabeller. MySQL can ha hgst %d tabeller i en och samma join" + ukr " . MySQL %d 'Φ" ER_TOO_MANY_FIELDS - cze "P-Bli mnoho poloek" - dan "For mange felter" - nla "Te veel velden" - eng "Too many columns" - jps "column ܂", - est "Liiga palju tulpasid" - fre "Trop de champs" - ger "Zu viele Felder" - greek " " - hun "Tul sok mezo" - ita "Troppi campi" - jpn "column ¿ޤ" - kor "Į ʹ ϴ." - por "Colunas demais" - rum "Prea multe coloane" - rus " " - serbian "Previe kolona" - slo "Prli mnoho pol" - spa "Muchos campos" - swe "Fr mnga flt" - ukr " æ" + cze "P-Bli mnoho poloek" + dan "For mange felter" + nla "Te veel velden" + eng "Too many columns" + jps "column ܂", + est "Liiga palju tulpasid" + fre "Trop de champs" + ger "Zu viele Felder" + greek " " + hun "Tul sok mezo" + ita "Troppi campi" + jpn "column ¿ޤ" + kor "Į ʹ ϴ." + por "Colunas demais" + rum "Prea multe coloane" + rus " " + serbian "Previe kolona" + slo "Prli mnoho pol" + spa "Muchos campos" + swe "Fr mnga flt" + ukr " æ" ER_TOO_BIG_ROWSIZE 42000 - cze "-Bdek je pli velk. Maximln velikost dku, nepotaje poloky blob, je %d. Muste zmnit nkter poloky na blob" - dan "For store poster. Max post strrelse, uden BLOB's, er %d. Du m lave nogle felter til BLOB's" - nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen." - eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs" - jps "row size 傫܂. BLOB ܂܂Ȃꍇ row size ̍ő %d ł. ‚ field BLOB ɕςĂ.", - est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tpi vlju on %d. Muuda mned vljad BLOB-tpi vljadeks" - fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB" - ger "Zeilenlnge zu gro. Die maximale Zeilenlnge fr den verwendeten Tabellentyp (ohne BLOB-Felder) betrgt %ld. Einige Felder mssen in BLOB oder TEXT umgewandelt werden" - greek " . , blobs, %d. blobs" - hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia" - ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB" - jpn "row size 礭ޤ. BLOB ޤޤʤ row size κ %d Ǥ. Ĥ field BLOB ѤƤ." - kor "ʹ ū row Դϴ. BLOB ʰ ִ row %dԴϴ. 󸶰 ʵ BLOB ٲټž ڱ.." - por "Tamanho de linha grande demais. O mximo tamanho de linha, no contando BLOBs, %d. Voc tem que mudar alguns campos para BLOBs" - rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri" - rus " . , BLOB, - %d. , BLOB" - serbian "Prevelik slog. Maksimalna veliina sloga, ne raunajui BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB" - slo "Riadok je prli vek. Maximlna vekos riadku, okrem 'BLOB', je %d. Muste zmeni niektor poloky na BLOB" - spa "Tamao de lnea muy grande. Mximo tamao de lnea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob" - swe "Fr stor total radlngd. Den hgst tilltna radlngden, frutom BLOBs, r %d. ndra ngra av dina flt till BLOB" - ukr " . ¦ , BLOB, %d. Ҧ ˦ æ BLOB" + cze "-Bdek je pli velk. Maximln velikost dku, nepotaje poloky blob, je %d. Muste zmnit nkter poloky na blob" + dan "For store poster. Max post strrelse, uden BLOB's, er %d. Du m lave nogle felter til BLOB's" + nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen." + eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs" + jps "row size 傫܂. BLOB ܂܂Ȃꍇ row size ̍ő %d ł. ‚ field BLOB ɕςĂ.", + est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tpi vlju on %d. Muuda mned vljad BLOB-tpi vljadeks" + fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB" + ger "Zeilenlnge zu gro. Die maximale Zeilenlnge fr den verwendeten Tabellentyp (ohne BLOB-Felder) betrgt %ld. Einige Felder mssen in BLOB oder TEXT umgewandelt werden" + greek " . , blobs, %d. blobs" + hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia" + ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB" + jpn "row size 礭ޤ. BLOB ޤޤʤ row size κ %d Ǥ. Ĥ field BLOB ѤƤ." + kor "ʹ ū row Դϴ. BLOB ʰ ִ row %dԴϴ. 󸶰 ʵ BLOB ٲټž ڱ.." + por "Tamanho de linha grande demais. O mximo tamanho de linha, no contando BLOBs, %d. Voc tem que mudar alguns campos para BLOBs" + rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri" + rus " . , BLOB, - %d. , BLOB" + serbian "Prevelik slog. Maksimalna veliina sloga, ne raunajui BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB" + slo "Riadok je prli vek. Maximlna vekos riadku, okrem 'BLOB', je %d. Muste zmeni niektor poloky na BLOB" + spa "Tamao de lnea muy grande. Mximo tamao de lnea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob" + swe "Fr stor total radlngd. Den hgst tilltna radlngden, frutom BLOBs, r %d. ndra ngra av dina flt till BLOB" + ukr " . ¦ , BLOB, %d. Ҧ ˦ æ BLOB" ER_STACK_OVERRUN - cze "P-Beteen zsobnku threadu: pouito %ld z %ld. Pouijte 'mysqld -O thread_stack=#' k zadn vtho zsobnku" - dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en strre stak om ndvendigt" - nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)." - eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed" - jps "Thread stack overrun: Used: %ld of a %ld stack. X^bN𑽂̈Ƃ肽ꍇA'mysqld -O thread_stack=#' Ǝw肵Ă", - fre "Dbordement de la pile des tches (Thread stack). Utilises: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur" - ger "Thread-Stack-berlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenden, um bei Bedarf einen greren Stack anzulegen" - greek "Stack overrun thread: Used: %ld of a %ld stack. 'mysqld -O thread_stack=#' stack " - hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz" - ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande." - jpn "Thread stack overrun: Used: %ld of a %ld stack. åΰ¿Ȥꤿ硢'mysqld -O thread_stack=#' ȻꤷƤ" - kor " ƽϴ. : %ld : %ld. ʿ ū Ҷ 'mysqld -O thread_stack=#' ϼ" - por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessrio" - rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare" - rus " : : %ld %ld . 'mysqld -O thread_stack=#' , " - serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete vei stack ako je potrebno" - slo "Preteenie zsobnku vlkna: pouit: %ld z %ld. Pouite 'mysqld -O thread_stack=#' k zadaniu vieho zsobnka" - spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario" - swe "Trdstacken tog slut: Har anvnt %ld av %ld bytes. Anvnd 'mysqld -O thread_stack=#' ifall du behver en strre stack" - ukr " Ǧ : : %ld %ld. 'mysqld -O thread_stack=#' ¦ , Ȧ" + cze "P-Beteen zsobnku threadu: pouito %ld z %ld. Pouijte 'mysqld -O thread_stack=#' k zadn vtho zsobnku" + dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en strre stak om ndvendigt" + nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)." + eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed" + jps "Thread stack overrun: Used: %ld of a %ld stack. X^bN𑽂̈Ƃ肽ꍇA'mysqld -O thread_stack=#' Ǝw肵Ă", + fre "Dbordement de la pile des tches (Thread stack). Utilises: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur" + ger "Thread-Stack-berlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenden, um bei Bedarf einen greren Stack anzulegen" + greek "Stack overrun thread: Used: %ld of a %ld stack. 'mysqld -O thread_stack=#' stack " + hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz" + ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande." + jpn "Thread stack overrun: Used: %ld of a %ld stack. åΰ¿Ȥꤿ硢'mysqld -O thread_stack=#' ȻꤷƤ" + kor " ƽϴ. : %ld : %ld. ʿ ū Ҷ 'mysqld -O thread_stack=#' ϼ" + por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessrio" + rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare" + rus " : : %ld %ld . 'mysqld -O thread_stack=#' , " + serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete vei stack ako je potrebno" + slo "Preteenie zsobnku vlkna: pouit: %ld z %ld. Pouite 'mysqld -O thread_stack=#' k zadaniu vieho zsobnka" + spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario" + swe "Trdstacken tog slut: Har anvnt %ld av %ld bytes. Anvnd 'mysqld -O thread_stack=#' ifall du behver en strre stack" + ukr " Ǧ : : %ld %ld. 'mysqld -O thread_stack=#' ¦ , Ȧ" ER_WRONG_OUTER_JOIN 42000 - cze "V OUTER JOIN byl nalezen k-Bov odkaz. Provte ON podmnky" - dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions" - nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions" - eng "Cross dependency found in OUTER JOIN; examine your ON conditions" - est "Ristsltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi" - fre "Dpendance croise dans une clause OUTER JOIN. Vrifiez la condition ON" - ger "OUTER JOIN enthlt fehlerhafte Abhngigkeiten. In ON verwendete Bedingungen berprfen" - greek "Cross dependency OUTER JOIN. ON" - hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket" - ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON" - por "Dependncia cruzada encontrada em juno externa (OUTER JOIN); examine as condies utilizadas nas clusulas 'ON'" - rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON" - rus " OUTER JOIN . ON" - serbian "Unakrsna zavisnost pronaena u komandi 'OUTER JOIN'. Istraite vae 'ON' uslove" - slo "V OUTER JOIN bol njden krov odkaz. Skontrolujte podmienky ON" - spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condicin ON" - swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket" - ukr " Φ OUTER JOIN. צ ON" + cze "V OUTER JOIN byl nalezen k-Bov odkaz. Provte ON podmnky" + dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions" + nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions" + eng "Cross dependency found in OUTER JOIN; examine your ON conditions" + est "Ristsltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi" + fre "Dpendance croise dans une clause OUTER JOIN. Vrifiez la condition ON" + ger "OUTER JOIN enthlt fehlerhafte Abhngigkeiten. In ON verwendete Bedingungen berprfen" + greek "Cross dependency OUTER JOIN. ON" + hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket" + ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON" + por "Dependncia cruzada encontrada em juno externa (OUTER JOIN); examine as condies utilizadas nas clusulas 'ON'" + rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON" + rus " OUTER JOIN . ON" + serbian "Unakrsna zavisnost pronaena u komandi 'OUTER JOIN'. Istraite vae 'ON' uslove" + slo "V OUTER JOIN bol njden krov odkaz. Skontrolujte podmienky ON" + spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condicin ON" + swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket" + ukr " Φ OUTER JOIN. צ ON" ER_NULL_COLUMN_IN_INDEX 42000 - eng "Table handler doesn't support NULL in given index. Please change column '%-.64s' to be NOT NULL or use another handler" - swe "Tabell hanteraren kan inte indexera NULL kolumner fr den givna index typen. ndra '%-.64s' till NOT NULL eller anvnd en annan hanterare" + eng "Table handler doesn't support NULL in given index. Please change column '%-.64s' to be NOT NULL or use another handler" + swe "Tabell hanteraren kan inte indexera NULL kolumner fr den givna index typen. ndra '%-.64s' till NOT NULL eller anvnd en annan hanterare" ER_CANT_FIND_UDF - cze "Nemohu na-Bst funkci '%-.64s'" - dan "Kan ikke lse funktionen '%-.64s'" - nla "Kan functie '%-.64s' niet laden" - eng "Can't load function '%-.64s'" - jps "function '%-.64s' [hł܂", - est "Ei suuda avada funktsiooni '%-.64s'" - fre "Imposible de charger la fonction '%-.64s'" - ger "Kann Funktion '%-.64s' nicht laden" - greek " load '%-.64s'" - hun "A(z) '%-.64s' fuggveny nem toltheto be" - ita "Impossibile caricare la funzione '%-.64s'" - jpn "function '%-.64s' ɤǤޤ" - kor "'%-.64s' Լ ε ߽ϴ." - por "No pode carregar a funo '%-.64s'" - rum "Nu pot incarca functia '%-.64s'" - rus " '%-.64s'" - serbian "Ne mogu da uitam funkciju '%-.64s'" - slo "Nemem nata funkciu '%-.64s'" - spa "No puedo cargar funcin '%-.64s'" - swe "Kan inte ladda funktionen '%-.64s'" - ukr " æ '%-.64s'" + cze "Nemohu na-Bst funkci '%-.64s'" + dan "Kan ikke lse funktionen '%-.64s'" + nla "Kan functie '%-.64s' niet laden" + eng "Can't load function '%-.64s'" + jps "function '%-.64s' [hł܂", + est "Ei suuda avada funktsiooni '%-.64s'" + fre "Imposible de charger la fonction '%-.64s'" + ger "Kann Funktion '%-.64s' nicht laden" + greek " load '%-.64s'" + hun "A(z) '%-.64s' fuggveny nem toltheto be" + ita "Impossibile caricare la funzione '%-.64s'" + jpn "function '%-.64s' ɤǤޤ" + kor "'%-.64s' Լ ε ߽ϴ." + por "No pode carregar a funo '%-.64s'" + rum "Nu pot incarca functia '%-.64s'" + rus " '%-.64s'" + serbian "Ne mogu da uitam funkciju '%-.64s'" + slo "Nemem nata funkciu '%-.64s'" + spa "No puedo cargar funcin '%-.64s'" + swe "Kan inte ladda funktionen '%-.64s'" + ukr " æ '%-.64s'" ER_CANT_INITIALIZE_UDF - cze "Nemohu inicializovat funkci '%-.64s'; %-.80s" - dan "Kan ikke starte funktionen '%-.64s'; %-.80s" - nla "Kan functie '%-.64s' niet initialiseren; %-.80s" - eng "Can't initialize function '%-.64s'; %-.80s" - jps "function '%-.64s' ł܂; %-.80s", - est "Ei suuda algvrtustada funktsiooni '%-.64s'; %-.80s" - fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s" - ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s" - greek " '%-.64s'; %-.80s" - hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s" - ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s" - jpn "function '%-.64s' Ǥޤ; %-.80s" - kor "'%-.64s' Լ ʱȭ ߽ϴ.; %-.80s" - por "No pode inicializar a funo '%-.64s' - '%-.80s'" - rum "Nu pot initializa functia '%-.64s'; %-.80s" - rus " '%-.64s'; %-.80s" - serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s" - slo "Nemem inicializova funkciu '%-.64s'; %-.80s" - spa "No puedo inicializar funcin '%-.64s'; %-.80s" - swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'" - ukr " Φæ̦ æ '%-.64s'; %-.80s" + cze "Nemohu inicializovat funkci '%-.64s'; %-.80s" + dan "Kan ikke starte funktionen '%-.64s'; %-.80s" + nla "Kan functie '%-.64s' niet initialiseren; %-.80s" + eng "Can't initialize function '%-.64s'; %-.80s" + jps "function '%-.64s' ł܂; %-.80s", + est "Ei suuda algvrtustada funktsiooni '%-.64s'; %-.80s" + fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s" + ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s" + greek " '%-.64s'; %-.80s" + hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s" + ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s" + jpn "function '%-.64s' Ǥޤ; %-.80s" + kor "'%-.64s' Լ ʱȭ ߽ϴ.; %-.80s" + por "No pode inicializar a funo '%-.64s' - '%-.80s'" + rum "Nu pot initializa functia '%-.64s'; %-.80s" + rus " '%-.64s'; %-.80s" + serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s" + slo "Nemem inicializova funkciu '%-.64s'; %-.80s" + spa "No puedo inicializar funcin '%-.64s'; %-.80s" + swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'" + ukr " Φæ̦ æ '%-.64s'; %-.80s" ER_UDF_NO_PATHS - cze "Pro sd-Blenou knihovnu nejsou povoleny cesty" - dan "Angivelse af sti ikke tilladt for delt bibliotek" - nla "Geen pad toegestaan voor shared library" - eng "No paths allowed for shared library" - jps "shared library ւ̃pXʂĂ܂", - est "Teegi nimes ei tohi olla kataloogi" - fre "Chemin interdit pour les bibliothques partages" - ger "Keine Pfade gestattet fr Shared Library" - greek " paths shared library" - hun "Nincs ut a megosztott konyvtarakhoz (shared library)" - ita "Non sono ammessi path per le librerie condivisa" - jpn "shared library ؤΥѥ̤äƤޤ" - kor " ̹ н ǵǾ ʽϴ." - por "No h caminhos (paths) permitidos para biblioteca compartilhada" - rum "Nici un paths nu e permis pentru o librarie shared" - rus " " - serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke" - slo "Neprpustn iadne cesty k zdieanej kninici" - spa "No pasos permitidos para librarias conjugadas" - swe "Man fr inte ange skvg fr dynamiska bibliotek" - ukr " Ԧ Ħ ¦̦" + cze "Pro sd-Blenou knihovnu nejsou povoleny cesty" + dan "Angivelse af sti ikke tilladt for delt bibliotek" + nla "Geen pad toegestaan voor shared library" + eng "No paths allowed for shared library" + jps "shared library ւ̃pXʂĂ܂", + est "Teegi nimes ei tohi olla kataloogi" + fre "Chemin interdit pour les bibliothques partages" + ger "Keine Pfade gestattet fr Shared Library" + greek " paths shared library" + hun "Nincs ut a megosztott konyvtarakhoz (shared library)" + ita "Non sono ammessi path per le librerie condivisa" + jpn "shared library ؤΥѥ̤äƤޤ" + kor " ̹ н ǵǾ ʽϴ." + por "No h caminhos (paths) permitidos para biblioteca compartilhada" + rum "Nici un paths nu e permis pentru o librarie shared" + rus " " + serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke" + slo "Neprpustn iadne cesty k zdieanej kninici" + spa "No pasos permitidos para librarias conjugadas" + swe "Man fr inte ange skvg fr dynamiska bibliotek" + ukr " Ԧ Ħ ¦̦" ER_UDF_EXISTS - cze "Funkce '%-.64s' ji-B existuje" - dan "Funktionen '%-.64s' findes allerede" - nla "Functie '%-.64s' bestaat reeds" - eng "Function '%-.64s' already exists" - jps "Function '%-.64s' ͊ɒ`Ă܂", - est "Funktsioon '%-.64s' juba eksisteerib" - fre "La fonction '%-.64s' existe dj" - ger "Funktion '%-.64s' existiert schon" - greek " '%-.64s' " - hun "A '%-.64s' fuggveny mar letezik" - ita "La funzione '%-.64s' esiste gia`" - jpn "Function '%-.64s' ϴƤޤ" - kor "'%-.64s' Լ ̹ մϴ." - por "Funo '%-.64s' j existe" - rum "Functia '%-.64s' exista deja" - rus " '%-.64s' " - serbian "Funkcija '%-.64s' ve postoji" - slo "Funkcia '%-.64s' u existuje" - spa "Funcin '%-.64s' ya existe" - swe "Funktionen '%-.64s' finns redan" - ukr "æ '%-.64s' դ" + cze "Funkce '%-.64s' ji-B existuje" + dan "Funktionen '%-.64s' findes allerede" + nla "Functie '%-.64s' bestaat reeds" + eng "Function '%-.64s' already exists" + jps "Function '%-.64s' ͊ɒ`Ă܂", + est "Funktsioon '%-.64s' juba eksisteerib" + fre "La fonction '%-.64s' existe dj" + ger "Funktion '%-.64s' existiert schon" + greek " '%-.64s' " + hun "A '%-.64s' fuggveny mar letezik" + ita "La funzione '%-.64s' esiste gia`" + jpn "Function '%-.64s' ϴƤޤ" + kor "'%-.64s' Լ ̹ մϴ." + por "Funo '%-.64s' j existe" + rum "Functia '%-.64s' exista deja" + rus " '%-.64s' " + serbian "Funkcija '%-.64s' ve postoji" + slo "Funkcia '%-.64s' u existuje" + spa "Funcin '%-.64s' ya existe" + swe "Funktionen '%-.64s' finns redan" + ukr "æ '%-.64s' դ" ER_CANT_OPEN_LIBRARY - cze "Nemohu otev-Bt sdlenou knihovnu '%-.64s' (errno: %d %-.128s)" - dan "Kan ikke bne delt bibliotek '%-.64s' (errno: %d %-.128s)" - nla "Kan shared library '%-.64s' niet openen (Errcode: %d %-.128s)" - eng "Can't open shared library '%-.64s' (errno: %d %-.128s)" - jps "shared library '%-.64s' Jł܂ (errno: %d %-.128s)", - est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.128s)" - fre "Impossible d'ouvrir la bibliothque partage '%-.64s' (errno: %d %-.128s)" - ger "Kann Shared Library '%-.64s' nicht ffnen (Fehler: %d %-.128s)" - greek " shared library '%-.64s' ( : %d %-.128s)" - hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)" - ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %-.128s)" - jpn "shared library '%-.64s' 򳫤Ǥޤ (errno: %d %-.128s)" - kor "'%-.64s' ̹ ϴ.(ȣ: %d %-.128s)" - nor "Can't open shared library '%-.64s' (errno: %d %-.128s)" - norwegian-ny "Can't open shared library '%-.64s' (errno: %d %-.128s)" - pol "Can't open shared library '%-.64s' (errno: %d %-.128s)" - por "No pode abrir biblioteca compartilhada '%-.64s' (erro no. %d '%-.128s')" - rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.128s)" - rus " '%-.64s' (: %d %-.128s)" - serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.128s)" - slo "Nemem otvori zdiean kninicu '%-.64s' (chybov kd: %d %-.128s)" - spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %-.128s)" - swe "Kan inte ppna det dynamiska biblioteket '%-.64s' (Felkod: %d %-.128s)" - ukr " צ Ħ ¦̦ '%-.64s' (: %d %-.128s)" + cze "Nemohu otev-Bt sdlenou knihovnu '%-.64s' (errno: %d %-.128s)" + dan "Kan ikke bne delt bibliotek '%-.64s' (errno: %d %-.128s)" + nla "Kan shared library '%-.64s' niet openen (Errcode: %d %-.128s)" + eng "Can't open shared library '%-.64s' (errno: %d %-.128s)" + jps "shared library '%-.64s' Jł܂ (errno: %d %-.128s)", + est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.128s)" + fre "Impossible d'ouvrir la bibliothque partage '%-.64s' (errno: %d %-.128s)" + ger "Kann Shared Library '%-.64s' nicht ffnen (Fehler: %d %-.128s)" + greek " shared library '%-.64s' ( : %d %-.128s)" + hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)" + ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %-.128s)" + jpn "shared library '%-.64s' 򳫤Ǥޤ (errno: %d %-.128s)" + kor "'%-.64s' ̹ ϴ.(ȣ: %d %-.128s)" + nor "Can't open shared library '%-.64s' (errno: %d %-.128s)" + norwegian-ny "Can't open shared library '%-.64s' (errno: %d %-.128s)" + pol "Can't open shared library '%-.64s' (errno: %d %-.128s)" + por "No pode abrir biblioteca compartilhada '%-.64s' (erro no. %d '%-.128s')" + rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.128s)" + rus " '%-.64s' (: %d %-.128s)" + serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.128s)" + slo "Nemem otvori zdiean kninicu '%-.64s' (chybov kd: %d %-.128s)" + spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %-.128s)" + swe "Kan inte ppna det dynamiska biblioteket '%-.64s' (Felkod: %d %-.128s)" + ukr " צ Ħ ¦̦ '%-.64s' (: %d %-.128s)" ER_CANT_FIND_DL_ENTRY - cze "Nemohu naj-Bt funkci '%-.128s' v knihovn" - dan "Kan ikke finde funktionen '%-.128s' i bibliotek" - nla "Kan functie '%-.128s' niet in library vinden" - eng "Can't find symbol '%-.128s' in library" - jps "function '%-.128s' Cu[Ɍt鎖ł܂", - est "Ei leia funktsiooni '%-.128s' antud teegis" - fre "Impossible de trouver la fonction '%-.128s' dans la bibliothque" - ger "Kann Funktion '%-.128s' in der Library nicht finden" - greek " '%-.128s' " - hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban" - ita "Impossibile trovare la funzione '%-.128s' nella libreria" - jpn "function '%-.128s' 饤֥꡼˸դǤޤ" - kor "̹ '%-.128s' Լ ã ϴ." - por "No pode encontrar a funo '%-.128s' na biblioteca" - rum "Nu pot gasi functia '%-.128s' in libraria" - rus " '%-.128s' " - serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci" - slo "Nemem njs funkciu '%-.128s' v kninici" - spa "No puedo encontrar funcin '%-.128s' en libraria" - swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket" - ukr " æ '%-.128s' ¦̦æ" + cze "Nemohu naj-Bt funkci '%-.128s' v knihovn" + dan "Kan ikke finde funktionen '%-.128s' i bibliotek" + nla "Kan functie '%-.128s' niet in library vinden" + eng "Can't find symbol '%-.128s' in library" + jps "function '%-.128s' Cu[Ɍt鎖ł܂", + est "Ei leia funktsiooni '%-.128s' antud teegis" + fre "Impossible de trouver la fonction '%-.128s' dans la bibliothque" + ger "Kann Funktion '%-.128s' in der Library nicht finden" + greek " '%-.128s' " + hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban" + ita "Impossibile trovare la funzione '%-.128s' nella libreria" + jpn "function '%-.128s' 饤֥꡼˸դǤޤ" + kor "̹ '%-.128s' Լ ã ϴ." + por "No pode encontrar a funo '%-.128s' na biblioteca" + rum "Nu pot gasi functia '%-.128s' in libraria" + rus " '%-.128s' " + serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci" + slo "Nemem njs funkciu '%-.128s' v kninici" + spa "No puedo encontrar funcin '%-.128s' en libraria" + swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket" + ukr " æ '%-.128s' ¦̦æ" ER_FUNCTION_NOT_DEFINED - cze "Funkce '%-.64s' nen-B definovna" - dan "Funktionen '%-.64s' er ikke defineret" - nla "Functie '%-.64s' is niet gedefinieerd" - eng "Function '%-.64s' is not defined" - jps "Function '%-.64s' ͒`Ă܂", - est "Funktsioon '%-.64s' ei ole defineeritud" - fre "La fonction '%-.64s' n'est pas dfinie" - ger "Funktion '%-.64s' ist nicht definiert" - greek " '%-.64s' " - hun "A '%-.64s' fuggveny nem definialt" - ita "La funzione '%-.64s' non e` definita" - jpn "Function '%-.64s' Ƥޤ" - kor "'%-.64s' Լ ǵǾ ʽϴ." - por "Funo '%-.64s' no est definida" - rum "Functia '%-.64s' nu e definita" - rus " '%-.64s' " - serbian "Funkcija '%-.64s' nije definisana" - slo "Funkcia '%-.64s' nie je definovan" - spa "Funcin '%-.64s' no est definida" - swe "Funktionen '%-.64s' r inte definierad" - ukr "æ '%-.64s' " + cze "Funkce '%-.64s' nen-B definovna" + dan "Funktionen '%-.64s' er ikke defineret" + nla "Functie '%-.64s' is niet gedefinieerd" + eng "Function '%-.64s' is not defined" + jps "Function '%-.64s' ͒`Ă܂", + est "Funktsioon '%-.64s' ei ole defineeritud" + fre "La fonction '%-.64s' n'est pas dfinie" + ger "Funktion '%-.64s' ist nicht definiert" + greek " '%-.64s' " + hun "A '%-.64s' fuggveny nem definialt" + ita "La funzione '%-.64s' non e` definita" + jpn "Function '%-.64s' Ƥޤ" + kor "'%-.64s' Լ ǵǾ ʽϴ." + por "Funo '%-.64s' no est definida" + rum "Functia '%-.64s' nu e definita" + rus " '%-.64s' " + serbian "Funkcija '%-.64s' nije definisana" + slo "Funkcia '%-.64s' nie je definovan" + spa "Funcin '%-.64s' no est definida" + swe "Funktionen '%-.64s' r inte definierad" + ukr "æ '%-.64s' " ER_HOST_IS_BLOCKED - cze "Stroj '%-.64s' je zablokov-Bn kvli mnoha chybm pi pipojovn. Odblokujete pouitm 'mysqladmin flush-hosts'" - dan "Vrten er blokeret p grund af mange fejlforesprgsler. Ls op med 'mysqladmin flush-hosts'" - nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'" - eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'" - jps "Host '%-.64s' many connection error ̂߁Aۂ܂. 'mysqladmin flush-hosts' ʼnĂ", - est "Masin '%-.64s' on blokeeritud hulgaliste hendusvigade tttu. Blokeeringu saab thistada 'mysqladmin flush-hosts' ksuga" - fre "L'hte '%-.64s' est bloqu cause d'un trop grand nombre d'erreur de connection. Dbloquer le par 'mysqladmin flush-hosts'" - ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" - greek " . 'mysqladmin flush-hosts'" - hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" - ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" - jpn "Host '%-.64s' many connection error Τᡢݤޤ. 'mysqladmin flush-hosts' DzƤ" - kor "ʹ Ͽ ȣƮ '%-.64s' Ǿϴ. 'mysqladmin flush-hosts' ̿Ͽ ϼ" - por "'Host' '%-.64s' est bloqueado devido a muitos erros de conexo. Desbloqueie com 'mysqladmin flush-hosts'" - rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'" - rus " '%-.64s' - . 'mysqladmin flush-hosts'" - serbian "Host '%-.64s' je blokiran zbog previe greaka u konekciji. Moete ga odblokirati pomou komande 'mysqladmin flush-hosts'" - spa "Servidor '%-.64s' est bloqueado por muchos errores de conexin. Desbloquear con 'mysqladmin flush-hosts'" - swe "Denna dator, '%-.64s', r blockerad pga mnga felaktig paket. Gr 'mysqladmin flush-hosts' fr att ta bort alla blockeringarna" - ukr " '%-.64s' ϧ ˦Ԧ '. 'mysqladmin flush-hosts'" + cze "Stroj '%-.64s' je zablokov-Bn kvli mnoha chybm pi pipojovn. Odblokujete pouitm 'mysqladmin flush-hosts'" + dan "Vrten er blokeret p grund af mange fejlforesprgsler. Ls op med 'mysqladmin flush-hosts'" + nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'" + eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'" + jps "Host '%-.64s' many connection error ̂߁Aۂ܂. 'mysqladmin flush-hosts' ʼnĂ", + est "Masin '%-.64s' on blokeeritud hulgaliste hendusvigade tttu. Blokeeringu saab thistada 'mysqladmin flush-hosts' ksuga" + fre "L'hte '%-.64s' est bloqu cause d'un trop grand nombre d'erreur de connection. Dbloquer le par 'mysqladmin flush-hosts'" + ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" + greek " . 'mysqladmin flush-hosts'" + hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" + ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" + jpn "Host '%-.64s' many connection error Τᡢݤޤ. 'mysqladmin flush-hosts' DzƤ" + kor "ʹ Ͽ ȣƮ '%-.64s' Ǿϴ. 'mysqladmin flush-hosts' ̿Ͽ ϼ" + por "'Host' '%-.64s' est bloqueado devido a muitos erros de conexo. Desbloqueie com 'mysqladmin flush-hosts'" + rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'" + rus " '%-.64s' - . 'mysqladmin flush-hosts'" + serbian "Host '%-.64s' je blokiran zbog previe greaka u konekciji. Moete ga odblokirati pomou komande 'mysqladmin flush-hosts'" + spa "Servidor '%-.64s' est bloqueado por muchos errores de conexin. Desbloquear con 'mysqladmin flush-hosts'" + swe "Denna dator, '%-.64s', r blockerad pga mnga felaktig paket. Gr 'mysqladmin flush-hosts' fr att ta bort alla blockeringarna" + ukr " '%-.64s' ϧ ˦Ԧ '. 'mysqladmin flush-hosts'" ER_HOST_NOT_PRIVILEGED - cze "Stroj '%-.64s' nem-B povoleno se k tomuto MySQL serveru pipojit" - dan "Vrten '%-.64s' kan ikke tilkoble denne MySQL-server" - nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server" - eng "Host '%-.64s' is not allowed to connect to this MySQL server" - jps "Host '%-.64s' MySQL server ɐڑ‚Ă܂", - est "Masinal '%-.64s' puudub ligips sellele MySQL serverile" - fre "Le hte '%-.64s' n'est pas authoris se connecter ce serveur MySQL" - ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden" - greek " MySQL server" - hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez" - ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL" - jpn "Host '%-.64s' MySQL server ³ĤƤޤ" - kor "'%-.64s' ȣƮ MySQL 㰡 ߽ϴ." - por "'Host' '%-.64s' no tem permisso para se conectar com este servidor MySQL" - rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL" - rus " '%-.64s' MySQL" - serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server" - spa "Servidor '%-.64s' no est permitido para conectar con este servidor MySQL" - swe "Denna dator, '%-.64s', har inte privileger att anvnda denna MySQL server" - ukr " '%-.64s' ' MySQL" + cze "Stroj '%-.64s' nem-B povoleno se k tomuto MySQL serveru pipojit" + dan "Vrten '%-.64s' kan ikke tilkoble denne MySQL-server" + nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server" + eng "Host '%-.64s' is not allowed to connect to this MySQL server" + jps "Host '%-.64s' MySQL server ɐڑ‚Ă܂", + est "Masinal '%-.64s' puudub ligips sellele MySQL serverile" + fre "Le hte '%-.64s' n'est pas authoris se connecter ce serveur MySQL" + ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden" + greek " MySQL server" + hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez" + ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL" + jpn "Host '%-.64s' MySQL server ³ĤƤޤ" + kor "'%-.64s' ȣƮ MySQL 㰡 ߽ϴ." + por "'Host' '%-.64s' no tem permisso para se conectar com este servidor MySQL" + rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL" + rus " '%-.64s' MySQL" + serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server" + spa "Servidor '%-.64s' no est permitido para conectar con este servidor MySQL" + swe "Denna dator, '%-.64s', har inte privileger att anvnda denna MySQL server" + ukr " '%-.64s' ' MySQL" ER_PASSWORD_ANONYMOUS_USER 42000 - cze "Pou-Bvte MySQL jako anonymn uivatel a anonymn uivatel nemaj povoleno mnit hesla" - dan "Du bruger MySQL som anonym bruger. Anonyme brugere m ikke ndre adgangskoder" - nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen" - eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords" - jps "MySQL anonymous users ŎgpĂԂł́ApX[h̕ύX͂ł܂", - est "Te kasutate MySQL-i anonmse kasutajana, kelledel pole parooli muutmise igust" - fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autoriss changer les mots de passe" - ger "Sie benutzen MySQL als anonymer Benutzer und drfen daher keine Passwrter ndern" - greek " MySQL anonymous user passwords " - hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas" - ita "Impossibile cambiare la password usando MySQL come utente anonimo" - jpn "MySQL anonymous users ǻѤƤ֤ǤϡѥɤѹϤǤޤ" - kor " MySQL ͸ ڷ ϼ̽ϴ.͸ ڴ ȣ ϴ." - por "Voc est usando o MySQL como usurio annimo e usurios annimos no tm permisso para mudar senhas" - rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele" - rus " MySQL , " - serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke" - spa "Tu ests usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves" - swe "Du anvnder MySQL som en anonym anvndare och som sdan fr du inte ndra ditt lsenord" - ukr " դ MySQL Φ , ͦ ̦" + cze "Pou-Bvte MySQL jako anonymn uivatel a anonymn uivatel nemaj povoleno mnit hesla" + dan "Du bruger MySQL som anonym bruger. Anonyme brugere m ikke ndre adgangskoder" + nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen" + eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords" + jps "MySQL anonymous users ŎgpĂԂł́ApX[h̕ύX͂ł܂", + est "Te kasutate MySQL-i anonmse kasutajana, kelledel pole parooli muutmise igust" + fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autoriss changer les mots de passe" + ger "Sie benutzen MySQL als anonymer Benutzer und drfen daher keine Passwrter ndern" + greek " MySQL anonymous user passwords " + hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas" + ita "Impossibile cambiare la password usando MySQL come utente anonimo" + jpn "MySQL anonymous users ǻѤƤ֤ǤϡѥɤѹϤǤޤ" + kor " MySQL ͸ ڷ ϼ̽ϴ.͸ ڴ ȣ ϴ." + por "Voc est usando o MySQL como usurio annimo e usurios annimos no tm permisso para mudar senhas" + rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele" + rus " MySQL , " + serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke" + spa "Tu ests usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves" + swe "Du anvnder MySQL som en anonym anvndare och som sdan fr du inte ndra ditt lsenord" + ukr " դ MySQL Φ , ͦ ̦" ER_PASSWORD_NOT_ALLOWED 42000 - cze "Na zm-Bnu hesel ostatnm muste mt prvo provst update tabulek v databzi mysql" - dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ndre andres adgangskoder" - nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen" - eng "You must have privileges to update tables in the mysql database to be able to change passwords for others" - jps "̃[U[̃pX[hύX邽߂ɂ, mysql f[^x[Xɑ΂ update ̋‚Ȃ΂Ȃ܂.", - est "Teiste paroolide muutmiseks on nutav tabelite muutmisigus 'mysql' andmebaasis" - fre "Vous devez avoir le privilge update sur les tables de la base de donne mysql pour pouvoir changer les mots de passe des autres" - ger "Sie bentigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwrter anderer Benutzer ndern zu knnen" - greek " (update) mysql passwords " - hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz" - ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti" - jpn "¾Υ桼Υѥɤѹ뤿ˤ, mysql ǡ١Ф update εĤʤФʤޤ." - kor " ٸڵ ȣ ֵ Ÿ̽ մϴ." - por "Voc deve ter privilgios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros" - rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora" - rus " , mysql" - serbian "Morate imati privilegije da moete da update-ujete odreene tabele ako elite da menjate lozinke za druge korisnike" - spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros" - swe "Fr att ndra lsenord fr andra mste du ha rttigheter att uppdatera mysql-databasen" - ukr " Φ ڦ mysql, צ ͦ " + cze "Na zm-Bnu hesel ostatnm muste mt prvo provst update tabulek v databzi mysql" + dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ndre andres adgangskoder" + nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen" + eng "You must have privileges to update tables in the mysql database to be able to change passwords for others" + jps "̃[U[̃pX[hύX邽߂ɂ, mysql f[^x[Xɑ΂ update ̋‚Ȃ΂Ȃ܂.", + est "Teiste paroolide muutmiseks on nutav tabelite muutmisigus 'mysql' andmebaasis" + fre "Vous devez avoir le privilge update sur les tables de la base de donne mysql pour pouvoir changer les mots de passe des autres" + ger "Sie bentigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwrter anderer Benutzer ndern zu knnen" + greek " (update) mysql passwords " + hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz" + ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti" + jpn "¾Υ桼Υѥɤѹ뤿ˤ, mysql ǡ١Ф update εĤʤФʤޤ." + kor " ٸڵ ȣ ֵ Ÿ̽ մϴ." + por "Voc deve ter privilgios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros" + rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora" + rus " , mysql" + serbian "Morate imati privilegije da moete da update-ujete odreene tabele ako elite da menjate lozinke za druge korisnike" + spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros" + swe "Fr att ndra lsenord fr andra mste du ha rttigheter att uppdatera mysql-databasen" + ukr " Φ ڦ mysql, צ ͦ " ER_PASSWORD_NO_MATCH 42000 - cze "V tabulce user nen-B dn odpovdajc dek" - dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen" - nla "Kan geen enkele passende rij vinden in de gebruikers tabel" - eng "Can't find any matching row in the user table" - est "Ei leia vastavat kirjet kasutajate tabelis" - fre "Impossible de trouver un enregistrement correspondant dans la table user" - ger "Kann keinen passenden Datensatz in Tabelle 'user' finden" - greek " " - hun "Nincs megegyezo sor a user tablaban" - ita "Impossibile trovare la riga corrispondente nella tabella user" - kor " ̺ ġϴ ã ϴ." - por "No pode encontrar nenhuma linha que combine na tabela usurio (user table)" - rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului" - rus " " - serbian "Ne mogu da pronaem odgovarajui slog u 'user' tabeli" - spa "No puedo encontrar una lnea correponsdiente en la tabla user" - swe "Hittade inte anvndaren i 'user'-tabellen" - ukr " צצ Ӧ æ " + cze "V tabulce user nen-B dn odpovdajc dek" + dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen" + nla "Kan geen enkele passende rij vinden in de gebruikers tabel" + eng "Can't find any matching row in the user table" + est "Ei leia vastavat kirjet kasutajate tabelis" + fre "Impossible de trouver un enregistrement correspondant dans la table user" + ger "Kann keinen passenden Datensatz in Tabelle 'user' finden" + greek " " + hun "Nincs megegyezo sor a user tablaban" + ita "Impossibile trovare la riga corrispondente nella tabella user" + kor " ̺ ġϴ ã ϴ." + por "No pode encontrar nenhuma linha que combine na tabela usurio (user table)" + rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului" + rus " " + serbian "Ne mogu da pronaem odgovarajui slog u 'user' tabeli" + spa "No puedo encontrar una lnea correponsdiente en la tabla user" + swe "Hittade inte anvndaren i 'user'-tabellen" + ukr " צצ Ӧ æ " ER_UPDATE_INFO - cze "Nalezen-Bch dk: %ld Zmnno: %ld Varovn: %ld" - dan "Poster fundet: %ld ndret: %ld Advarsler: %ld" - nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld" - eng "Rows matched: %ld Changed: %ld Warnings: %ld" - jps "v(Rows matched): %ld ύX: %ld Warnings: %ld", - est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld" - fre "Enregistrements correspondants: %ld Modifis: %ld Warnings: %ld" - ger "Datenstze gefunden: %ld Gendert: %ld Warnungen: %ld" - hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld" - ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld" - jpn "׿(Rows matched): %ld ѹ: %ld Warnings: %ld" - kor "ġϴ Rows : %ld : %ld : %ld" - por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld" - rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld" - rus " : %ld : %ld : %ld" - serbian "Odgovarajuih slogova: %ld Promenjeno: %ld Upozorenja: %ld" - spa "Lneas correspondientes: %ld Cambiadas: %ld Avisos: %ld" - swe "Rader: %ld Uppdaterade: %ld Varningar: %ld" - ukr "Ӧ צצ: %ld ͦ: %ld : %ld" + cze "Nalezen-Bch dk: %ld Zmnno: %ld Varovn: %ld" + dan "Poster fundet: %ld ndret: %ld Advarsler: %ld" + nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld" + eng "Rows matched: %ld Changed: %ld Warnings: %ld" + jps "v(Rows matched): %ld ύX: %ld Warnings: %ld", + est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld" + fre "Enregistrements correspondants: %ld Modifis: %ld Warnings: %ld" + ger "Datenstze gefunden: %ld Gendert: %ld Warnungen: %ld" + hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld" + ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld" + jpn "׿(Rows matched): %ld ѹ: %ld Warnings: %ld" + kor "ġϴ Rows : %ld : %ld : %ld" + por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld" + rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld" + rus " : %ld : %ld : %ld" + serbian "Odgovarajuih slogova: %ld Promenjeno: %ld Upozorenja: %ld" + spa "Lneas correspondientes: %ld Cambiadas: %ld Avisos: %ld" + swe "Rader: %ld Uppdaterade: %ld Varningar: %ld" + ukr "Ӧ צצ: %ld ͦ: %ld : %ld" ER_CANT_CREATE_THREAD - cze "Nemohu vytvo-Bit nov thread (errno %d). Pokud je jet njak voln pam, podvejte se do manulu na st o chybch specifickch pro jednotliv operan systmy" - dan "Kan ikke danne en ny trd (fejl nr. %d). Hvis computeren ikke er lbet tr for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhngig fejl" - nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout" - eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug" - jps "VKɃXbh܂ł (errno %d). őgpƒ[zĂȂ̂ɃG[ĂȂ, }jA̒ 'possible OS-dependent bug' ƂTĂ݂Ă.", - est "Ei suuda luua uut lime (veakood %d). Kui mlu ei ole otsas, on tenoliselt tegemist operatsioonissteemispetsiifilise veaga" - fre "Impossible de crer une nouvelle tche (errno %d). S'il reste de la mmoire libre, consultez le manual pour trouver un ventuel bug dpendant de l'OS" - ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfgbar sein, bitte im Handbuch wegen mglicher Fehler im Betriebssystem nachschlagen" - hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet" - ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO" - jpn "˥åɤޤǤ (errno %d). ⤷ѵĥ꡼ۤƤʤΤ˥顼ȯƤʤ, ޥ˥奢椫 'possible OS-dependent bug' ȤʸõƤߤƤ." - kor "ο 带 ϴ.(ȣ %d). ޸𸮰 ִٸ OS-dependent ޴ κ ãƺÿ." - nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" - norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" - pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" - por "No pode criar uma nova 'thread' (erro no. %d). Se voc no estiver sem memria disponvel, voc pode consultar o manual sobre um possvel 'bug' dependente do sistema operacional" - rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare" - rus " ( %d). , , " - serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate jo slobodne memorije, trebali biste da pogledate u priruniku da li je ovo specifina greka vaeg operativnog sistema" - spa "No puedo crear un nuevo thread (errno %d). Si tu est con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO" - swe "Kan inte skapa en ny trd (errno %d)" - ukr " Ǧ ( %d). ', æ ϧ - " + cze "Nemohu vytvo-Bit nov thread (errno %d). Pokud je jet njak voln pam, podvejte se do manulu na st o chybch specifickch pro jednotliv operan systmy" + dan "Kan ikke danne en ny trd (fejl nr. %d). Hvis computeren ikke er lbet tr for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhngig fejl" + nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout" + eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug" + jps "VKɃXbh܂ł (errno %d). őgpƒ[zĂȂ̂ɃG[ĂȂ, }jA̒ 'possible OS-dependent bug' ƂTĂ݂Ă.", + est "Ei suuda luua uut lime (veakood %d). Kui mlu ei ole otsas, on tenoliselt tegemist operatsioonissteemispetsiifilise veaga" + fre "Impossible de crer une nouvelle tche (errno %d). S'il reste de la mmoire libre, consultez le manual pour trouver un ventuel bug dpendant de l'OS" + ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfgbar sein, bitte im Handbuch wegen mglicher Fehler im Betriebssystem nachschlagen" + hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet" + ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO" + jpn "˥åɤޤǤ (errno %d). ⤷ѵĥ꡼ۤƤʤΤ˥顼ȯƤʤ, ޥ˥奢椫 'possible OS-dependent bug' ȤʸõƤߤƤ." + kor "ο 带 ϴ.(ȣ %d). ޸𸮰 ִٸ OS-dependent ޴ κ ãƺÿ." + nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + por "No pode criar uma nova 'thread' (erro no. %d). Se voc no estiver sem memria disponvel, voc pode consultar o manual sobre um possvel 'bug' dependente do sistema operacional" + rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare" + rus " ( %d). , , " + serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate jo slobodne memorije, trebali biste da pogledate u priruniku da li je ovo specifina greka vaeg operativnog sistema" + spa "No puedo crear un nuevo thread (errno %d). Si tu est con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO" + swe "Kan inte skapa en ny trd (errno %d)" + ukr " Ǧ ( %d). ', æ ϧ - " ER_WRONG_VALUE_COUNT_ON_ROW 21S01 - cze "Po-Bet sloupc neodpovd potu hodnot na dku %ld" - dan "Kolonne antallet stemmer ikke overens med antallet af vrdier i post %ld" - nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld" - eng "Column count doesn't match value count at row %ld" - est "Tulpade hulk erineb vrtuste hulgast real %ld" - ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %ld berein" - hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel" - ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld" - kor "Row %ld Į īƮ value īͿ ġ ʽϴ." - por "Contagem de colunas no confere com a contagem de valores na linha %ld" - rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld" - rus " %ld" - serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld" - spa "El nmero de columnas no corresponde al nmero en la lnea %ld" - swe "Antalet kolumner motsvarar inte antalet vrden p rad: %ld" - ukr "˦ æ Ц ˦˦ æ %ld" + cze "Po-Bet sloupc neodpovd potu hodnot na dku %ld" + dan "Kolonne antallet stemmer ikke overens med antallet af vrdier i post %ld" + nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld" + eng "Column count doesn't match value count at row %ld" + est "Tulpade hulk erineb vrtuste hulgast real %ld" + ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %ld berein" + hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel" + ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld" + kor "Row %ld Į īƮ value īͿ ġ ʽϴ." + por "Contagem de colunas no confere com a contagem de valores na linha %ld" + rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld" + rus " %ld" + serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld" + spa "El nmero de columnas no corresponde al nmero en la lnea %ld" + swe "Antalet kolumner motsvarar inte antalet vrden p rad: %ld" + ukr "˦ æ Ц ˦˦ æ %ld" ER_CANT_REOPEN_TABLE - cze "Nemohu znovuotev-Bt tabulku: '%-.64s" - dan "Kan ikke genbne tabel '%-.64s" - nla "Kan tabel niet opnieuw openen: '%-.64s" - eng "Can't reopen table: '%-.64s'" - est "Ei suuda taasavada tabelit '%-.64s'" - fre "Impossible de rouvrir la table: '%-.64s" - ger "Kann Tabelle'%-.64s' nicht erneut ffnen" - hun "Nem lehet ujra-megnyitni a tablat: '%-.64s" - ita "Impossibile riaprire la tabella: '%-.64s'" - kor "̺ ٽ : '%-.64s" - nor "Can't reopen table: '%-.64s" - norwegian-ny "Can't reopen table: '%-.64s" - pol "Can't reopen table: '%-.64s" - por "No pode reabrir a tabela '%-.64s" - rum "Nu pot redeschide tabela: '%-.64s'" - rus " '%-.64s'" - serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'" - slo "Can't reopen table: '%-.64s" - spa "No puedo reabrir tabla: '%-.64s" - swe "Kunde inte stnga och ppna tabell '%-.64s" - ukr " צ : '%-.64s'" + cze "Nemohu znovuotev-Bt tabulku: '%-.64s" + dan "Kan ikke genbne tabel '%-.64s" + nla "Kan tabel niet opnieuw openen: '%-.64s" + eng "Can't reopen table: '%-.64s'" + est "Ei suuda taasavada tabelit '%-.64s'" + fre "Impossible de rouvrir la table: '%-.64s" + ger "Kann Tabelle'%-.64s' nicht erneut ffnen" + hun "Nem lehet ujra-megnyitni a tablat: '%-.64s" + ita "Impossibile riaprire la tabella: '%-.64s'" + kor "̺ ٽ : '%-.64s" + nor "Can't reopen table: '%-.64s" + norwegian-ny "Can't reopen table: '%-.64s" + pol "Can't reopen table: '%-.64s" + por "No pode reabrir a tabela '%-.64s" + rum "Nu pot redeschide tabela: '%-.64s'" + rus " '%-.64s'" + serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'" + slo "Can't reopen table: '%-.64s" + spa "No puedo reabrir tabla: '%-.64s" + swe "Kunde inte stnga och ppna tabell '%-.64s" + ukr " צ : '%-.64s'" ER_INVALID_USE_OF_NULL 22004 - cze "Neplatn-B uit hodnoty NULL" - dan "Forkert brug af nulvrdi (NULL)" - nla "Foutief gebruik van de NULL waarde" - eng "Invalid use of NULL value" - jps "NULL l̎gp@sK؂ł", - est "NULL vrtuse vrkasutus" - fre "Utilisation incorrecte de la valeur NULL" - ger "Unerlaubte Verwendung eines NULL-Werts" - hun "A NULL ervenytelen hasznalata" - ita "Uso scorretto del valore NULL" - jpn "NULL ͤλˡŬڤǤ" - kor "NULL ߸ ϼ̱..." - por "Uso invlido do valor NULL" - rum "Folosirea unei value NULL e invalida" - rus " NULL" - serbian "Pogrena upotreba vrednosti NULL" - spa "Invalido uso de valor NULL" - swe "Felaktig anvnding av NULL" - ukr " NULL" + cze "Neplatn-B uit hodnoty NULL" + dan "Forkert brug af nulvrdi (NULL)" + nla "Foutief gebruik van de NULL waarde" + eng "Invalid use of NULL value" + jps "NULL l̎gp@sK؂ł", + est "NULL vrtuse vrkasutus" + fre "Utilisation incorrecte de la valeur NULL" + ger "Unerlaubte Verwendung eines NULL-Werts" + hun "A NULL ervenytelen hasznalata" + ita "Uso scorretto del valore NULL" + jpn "NULL ͤλˡŬڤǤ" + kor "NULL ߸ ϼ̱..." + por "Uso invlido do valor NULL" + rum "Folosirea unei value NULL e invalida" + rus " NULL" + serbian "Pogrena upotreba vrednosti NULL" + spa "Invalido uso de valor NULL" + swe "Felaktig anvnding av NULL" + ukr " NULL" ER_REGEXP_ERROR 42000 - cze "Regul-Brn vraz vrtil chybu '%-.64s'" - dan "Fik fejl '%-.64s' fra regexp" - nla "Fout '%-.64s' ontvangen van regexp" - eng "Got error '%-.64s' from regexp" - est "regexp tagastas vea '%-.64s'" - fre "Erreur '%-.64s' provenant de regexp" - ger "regexp lieferte Fehler '%-.64s'" - hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)" - ita "Errore '%-.64s' da regexp" - kor "regexp '%-.64s' ϴ." - por "Obteve erro '%-.64s' em regexp" - rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)" - rus " '%-.64s' " - serbian "Funkcija regexp je vratila greku '%-.64s'" - spa "Obtenido error '%-.64s' de regexp" - swe "Fick fel '%-.64s' frn REGEXP" - ukr " '%-.64s' צ " + cze "Regul-Brn vraz vrtil chybu '%-.64s'" + dan "Fik fejl '%-.64s' fra regexp" + nla "Fout '%-.64s' ontvangen van regexp" + eng "Got error '%-.64s' from regexp" + est "regexp tagastas vea '%-.64s'" + fre "Erreur '%-.64s' provenant de regexp" + ger "regexp lieferte Fehler '%-.64s'" + hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)" + ita "Errore '%-.64s' da regexp" + kor "regexp '%-.64s' ϴ." + por "Obteve erro '%-.64s' em regexp" + rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)" + rus " '%-.64s' " + serbian "Funkcija regexp je vratila greku '%-.64s'" + spa "Obtenido error '%-.64s' de regexp" + swe "Fick fel '%-.64s' frn REGEXP" + ukr " '%-.64s' צ " ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 - cze "Pokud nen-B dn GROUP BY klauzule, nen dovoleno souasn pouit GROUP poloek (MIN(),MAX(),COUNT()...) s ne GROUP polokami" - dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prdikat" - nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is" - eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause" - est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud" - fre "Mlanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY" - ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulssig, wenn keine GROUP-BY-Klausel vorhanden ist" - hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul" - ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY" - kor "Mixing of GROUP Įs (MIN(),MAX(),COUNT(),...) with no GROUP Įs is illegal if there is no GROUP BY clause" - por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas no agrupadas ilegal, se no existir uma clusula de agrupamento (clusula GROUP BY)" - rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY" - rus " (GROUP) (MIN(),MAX(),COUNT(),...) , GROUP BY" - serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrena ako ne postoji 'GROUP BY' iskaz" - spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY" - swe "Man fr ha bde GROUP-kolumner (MIN(),MAX(),COUNT()...) och flt i en frga om man inte har en GROUP BY-del" - ukr "ͦ GROUP æ (MIN(),MAX(),COUNT()...) GROUP , GROUP BY" + cze "Pokud nen-B dn GROUP BY klauzule, nen dovoleno souasn pouit GROUP poloek (MIN(),MAX(),COUNT()...) s ne GROUP polokami" + dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prdikat" + nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is" + eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause" + est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud" + fre "Mlanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY" + ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulssig, wenn keine GROUP-BY-Klausel vorhanden ist" + hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul" + ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY" + kor "Mixing of GROUP Įs (MIN(),MAX(),COUNT(),...) with no GROUP Įs is illegal if there is no GROUP BY clause" + por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas no agrupadas ilegal, se no existir uma clusula de agrupamento (clusula GROUP BY)" + rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY" + rus " (GROUP) (MIN(),MAX(),COUNT(),...) , GROUP BY" + serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrena ako ne postoji 'GROUP BY' iskaz" + spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY" + swe "Man fr ha bde GROUP-kolumner (MIN(),MAX(),COUNT()...) och flt i en frga om man inte har en GROUP BY-del" + ukr "ͦ GROUP æ (MIN(),MAX(),COUNT()...) GROUP , GROUP BY" ER_NONEXISTING_GRANT 42000 - cze "Neexistuje odpov-Bdajc grant pro uivatele '%-.32s' na stroji '%-.64s'" - dan "Denne tilladelse findes ikke for brugeren '%-.32s' p vrt '%-.64s'" - nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'" - eng "There is no such grant defined for user '%-.32s' on host '%-.64s'" - jps "[U[ '%-.32s' (zXg '%-.64s' ̃[U[) ͋‚Ă܂", - est "Sellist igust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'" - fre "Un tel droit n'est pas dfini pour l'utilisateur '%-.32s' sur l'hte '%-.64s'" - ger "Fr Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung" - hun "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" - ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'" - jpn "桼 '%-.32s' (ۥ '%-.64s' Υ桼) ϵĤƤޤ" - kor " '%-.32s' (ȣƮ '%-.64s') Ͽ ǵ ׷ ϴ." - por "No existe tal permisso (grant) definida para o usurio '%-.32s' no 'host' '%-.64s'" - rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'" - rus " '%-.32s' '%-.64s'" - serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'" - spa "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'" - swe "Det finns inget privilegium definierat fr anvndare '%-.32s' p '%-.64s'" - ukr " '%-.32s' '%-.64s'" + cze "Neexistuje odpov-Bdajc grant pro uivatele '%-.32s' na stroji '%-.64s'" + dan "Denne tilladelse findes ikke for brugeren '%-.32s' p vrt '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'" + eng "There is no such grant defined for user '%-.32s' on host '%-.64s'" + jps "[U[ '%-.32s' (zXg '%-.64s' ̃[U[) ͋‚Ă܂", + est "Sellist igust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'" + fre "Un tel droit n'est pas dfini pour l'utilisateur '%-.32s' sur l'hte '%-.64s'" + ger "Fr Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung" + hun "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" + ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'" + jpn "桼 '%-.32s' (ۥ '%-.64s' Υ桼) ϵĤƤޤ" + kor " '%-.32s' (ȣƮ '%-.64s') Ͽ ǵ ׷ ϴ." + por "No existe tal permisso (grant) definida para o usurio '%-.32s' no 'host' '%-.64s'" + rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'" + rus " '%-.32s' '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'" + spa "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'" + swe "Det finns inget privilegium definierat fr anvndare '%-.32s' p '%-.64s'" + ukr " '%-.32s' '%-.64s'" ER_TABLEACCESS_DENIED_ERROR 42000 - cze "%-.16s p-Bkaz nepstupn pro uivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'" - dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'" - nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'" - eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'" - jps "R}h %-.16s [U[ '%-.32s'@'%-.64s' ,e[u '%-.64s' ɑ΂ċ‚Ă܂", - est "%-.16s ksk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'" - fre "La commande '%-.16s' est interdite l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'" - ger "%-.16s Befehl nicht erlaubt fr Benutzer '%-.32s'@'%-.64s' auf Tabelle '%-.64s'" - hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban" - ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'" - jpn "ޥ %-.16s 桼 '%-.32s'@'%-.64s' ,ơ֥ '%-.64s' ФƵĤƤޤ" - kor "'%-.16s' ڿ źεǾϴ. : '%-.32s'@'%-.64s' for ̺ '%-.64s'" - por "Comando '%-.16s' negado para o usurio '%-.32s'@'%-.64s' na tabela '%-.64s'" - rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'" - rus " %-.16s '%-.32s'@'%-.64s' '%-.64s'" - serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'" - spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'" - swe "%-.16s ej tilltet fr '%-.32s'@'%-.64s' fr tabell '%-.64s'" - ukr "%-.16s : '%-.32s'@'%-.64s' æ '%-.64s'" + cze "%-.16s p-Bkaz nepstupn pro uivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'" + jps "R}h %-.16s [U[ '%-.32s'@'%-.64s' ,e[u '%-.64s' ɑ΂ċ‚Ă܂", + est "%-.16s ksk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'" + fre "La commande '%-.16s' est interdite l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'" + ger "%-.16s Befehl nicht erlaubt fr Benutzer '%-.32s'@'%-.64s' auf Tabelle '%-.64s'" + hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'" + jpn "ޥ %-.16s 桼 '%-.32s'@'%-.64s' ,ơ֥ '%-.64s' ФƵĤƤޤ" + kor "'%-.16s' ڿ źεǾϴ. : '%-.32s'@'%-.64s' for ̺ '%-.64s'" + por "Comando '%-.16s' negado para o usurio '%-.32s'@'%-.64s' na tabela '%-.64s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'" + rus " %-.16s '%-.32s'@'%-.64s' '%-.64s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'" + spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'" + swe "%-.16s ej tilltet fr '%-.32s'@'%-.64s' fr tabell '%-.64s'" + ukr "%-.16s : '%-.32s'@'%-.64s' æ '%-.64s'" ER_COLUMNACCESS_DENIED_ERROR 42000 - cze "%-.16s p-Bkaz nepstupn pro uivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'" - dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'" - nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'" - eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'" - jps "R}h %-.16s [U[ '%-.32s'@'%-.64s'\n J '%-.64s' e[u '%-.64s' ɑ΂ċ‚Ă܂", - est "%-.16s ksk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'" - fre "La commande '%-.16s' est interdite l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'" - ger "%-.16s Befehl nicht erlaubt fr Benutzer '%-.32s'@'%-.64s' und Feld '%-.64s' in Tabelle '%-.64s'" - hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban" - ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'" - jpn "ޥ %-.16s 桼 '%-.32s'@'%-.64s'\n '%-.64s' ơ֥ '%-.64s' ФƵĤƤޤ" - kor "'%-.16s' ڿ źεǾϴ. : '%-.32s'@'%-.64s' for Į '%-.64s' in ̺ '%-.64s'" - por "Comando '%-.16s' negado para o usurio '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'" - rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'" - rus " %-.16s '%-.32s'@'%-.64s' '%-.64s' '%-.64s'" - serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'" - spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'" - swe "%-.16s ej tilltet fr '%-.32s'@'%-.64s' fr kolumn '%-.64s' i tabell '%-.64s'" - ukr "%-.16s : '%-.32s'@'%-.64s' '%-.64s' æ '%-.64s'" + cze "%-.16s p-Bkaz nepstupn pro uivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'" + jps "R}h %-.16s [U[ '%-.32s'@'%-.64s'\n J '%-.64s' e[u '%-.64s' ɑ΂ċ‚Ă܂", + est "%-.16s ksk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'" + fre "La commande '%-.16s' est interdite l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'" + ger "%-.16s Befehl nicht erlaubt fr Benutzer '%-.32s'@'%-.64s' und Feld '%-.64s' in Tabelle '%-.64s'" + hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'" + jpn "ޥ %-.16s 桼 '%-.32s'@'%-.64s'\n '%-.64s' ơ֥ '%-.64s' ФƵĤƤޤ" + kor "'%-.16s' ڿ źεǾϴ. : '%-.32s'@'%-.64s' for Į '%-.64s' in ̺ '%-.64s'" + por "Comando '%-.16s' negado para o usurio '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'" + rus " %-.16s '%-.32s'@'%-.64s' '%-.64s' '%-.64s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'" + spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'" + swe "%-.16s ej tilltet fr '%-.32s'@'%-.64s' fr kolumn '%-.64s' i tabell '%-.64s'" + ukr "%-.16s : '%-.32s'@'%-.64s' '%-.64s' æ '%-.64s'" ER_ILLEGAL_GRANT_FOR_TABLE 42000 - cze "Neplatn-B pkaz GRANT/REVOKE. Prosm, pette si v manulu, jak privilegia je mon pout." - dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres." - nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden." - eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used" - est "Vigane GRANT/REVOKE ksk. Tutvu kasutajajuhendiga" - fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel." - ger "Unzulssiger GRANT- oder REVOKE-Befehl. Verfgbare Berechtigungen sind im Handbuch aufgefhrt" - greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used." - hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek" - ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati." - jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." - kor "߸ GRANT/REVOKE .  Ǹ Ǿ ִ ޴ ÿ." - nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." - norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." - pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." - por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilgios podem ser usados." - rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite." - rus " GRANT REVOKE. , , " - serbian "Pogrena 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruniku koje vrednosti mogu biti upotrebljene." - slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." - spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados." - swe "Felaktigt GRANT-privilegium anvnt" - ukr " GRANT/REVOKE ; æ , ˦ " + cze "Neplatn-B pkaz GRANT/REVOKE. Prosm, pette si v manulu, jak privilegia je mon pout." + dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres." + nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden." + eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used" + est "Vigane GRANT/REVOKE ksk. Tutvu kasutajajuhendiga" + fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel." + ger "Unzulssiger GRANT- oder REVOKE-Befehl. Verfgbare Berechtigungen sind im Handbuch aufgefhrt" + greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used." + hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek" + ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati." + jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + kor "߸ GRANT/REVOKE .  Ǹ Ǿ ִ ޴ ÿ." + nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilgios podem ser usados." + rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite." + rus " GRANT REVOKE. , , " + serbian "Pogrena 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruniku koje vrednosti mogu biti upotrebljene." + slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados." + swe "Felaktigt GRANT-privilegium anvnt" + ukr " GRANT/REVOKE ; æ , ˦ " ER_GRANT_WRONG_HOST_OR_USER 42000 - cze "Argument p-Bkazu GRANT uivatel nebo stroj je pli dlouh" - dan "Vrts- eller brugernavn for langt til GRANT" - nla "De host of gebruiker parameter voor GRANT is te lang" - eng "The host or user argument to GRANT is too long" - est "Masina vi kasutaja nimi GRANT lauses on liiga pikk" - fre "L'hte ou l'utilisateur donn en argument GRANT est trop long" - ger "Das Host- oder User-Argument fr GRANT ist zu lang" - hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban" - ita "L'argomento host o utente per la GRANT e` troppo lungo" - kor "(GRANT) Ͽ ڳ ȣƮ ʹ ϴ." - por "Argumento de 'host' ou de usurio para o GRANT longo demais" - rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung" - rus " / GRANT" - serbian "Argument 'host' ili 'korisnik' prosleen komandi 'GRANT' je predugaak" - spa "El argumento para servidor o usuario para GRANT es demasiado grande" - swe "Felaktigt maskinnamn eller anvndarnamn anvnt med GRANT" - ukr " host user GRANT " + cze "Argument p-Bkazu GRANT uivatel nebo stroj je pli dlouh" + dan "Vrts- eller brugernavn for langt til GRANT" + nla "De host of gebruiker parameter voor GRANT is te lang" + eng "The host or user argument to GRANT is too long" + est "Masina vi kasutaja nimi GRANT lauses on liiga pikk" + fre "L'hte ou l'utilisateur donn en argument GRANT est trop long" + ger "Das Host- oder User-Argument fr GRANT ist zu lang" + hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban" + ita "L'argomento host o utente per la GRANT e` troppo lungo" + kor "(GRANT) Ͽ ڳ ȣƮ ʹ ϴ." + por "Argumento de 'host' ou de usurio para o GRANT longo demais" + rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung" + rus " / GRANT" + serbian "Argument 'host' ili 'korisnik' prosleen komandi 'GRANT' je predugaak" + spa "El argumento para servidor o usuario para GRANT es demasiado grande" + swe "Felaktigt maskinnamn eller anvndarnamn anvnt med GRANT" + ukr " host user GRANT " ER_NO_SUCH_TABLE 42S02 - cze "Tabulka '%-.64s.%s' neexistuje" - dan "Tabellen '%-.64s.%-.64s' eksisterer ikke" - nla "Tabel '%-.64s.%s' bestaat niet" - eng "Table '%-.64s.%-.64s' doesn't exist" - est "Tabelit '%-.64s.%-.64s' ei eksisteeri" - fre "La table '%-.64s.%s' n'existe pas" - ger "Tabelle '%-.64s.%-.64s' existiert nicht" - hun "A '%-.64s.%s' tabla nem letezik" - ita "La tabella '%-.64s.%s' non esiste" - jpn "Table '%-.64s.%s' doesn't exist" - kor "̺ '%-.64s.%s' ʽϴ." - nor "Table '%-.64s.%s' doesn't exist" - norwegian-ny "Table '%-.64s.%s' doesn't exist" - pol "Table '%-.64s.%s' doesn't exist" - por "Tabela '%-.64s.%-.64s' no existe" - rum "Tabela '%-.64s.%-.64s' nu exista" - rus " '%-.64s.%-.64s' " - serbian "Tabela '%-.64s.%-.64s' ne postoji" - slo "Table '%-.64s.%s' doesn't exist" - spa "Tabla '%-.64s.%s' no existe" - swe "Det finns ingen tabell som heter '%-.64s.%s'" - ukr " '%-.64s.%-.64s' դ" + cze "Tabulka '%-.64s.%s' neexistuje" + dan "Tabellen '%-.64s.%-.64s' eksisterer ikke" + nla "Tabel '%-.64s.%s' bestaat niet" + eng "Table '%-.64s.%-.64s' doesn't exist" + est "Tabelit '%-.64s.%-.64s' ei eksisteeri" + fre "La table '%-.64s.%s' n'existe pas" + ger "Tabelle '%-.64s.%-.64s' existiert nicht" + hun "A '%-.64s.%s' tabla nem letezik" + ita "La tabella '%-.64s.%s' non esiste" + jpn "Table '%-.64s.%s' doesn't exist" + kor "̺ '%-.64s.%s' ʽϴ." + nor "Table '%-.64s.%s' doesn't exist" + norwegian-ny "Table '%-.64s.%s' doesn't exist" + pol "Table '%-.64s.%s' doesn't exist" + por "Tabela '%-.64s.%-.64s' no existe" + rum "Tabela '%-.64s.%-.64s' nu exista" + rus " '%-.64s.%-.64s' " + serbian "Tabela '%-.64s.%-.64s' ne postoji" + slo "Table '%-.64s.%s' doesn't exist" + spa "Tabla '%-.64s.%s' no existe" + swe "Det finns ingen tabell som heter '%-.64s.%s'" + ukr " '%-.64s.%-.64s' դ" ER_NONEXISTING_TABLE_GRANT 42000 - cze "Neexistuje odpov-Bdajc grant pro uivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'" - dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' p vrt '%-.64s' for tabellen '%-.64s'" - nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'" - eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'" - est "Sellist igust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'" - fre "Un tel droit n'est pas dfini pour l'utilisateur '%-.32s' sur l'hte '%-.64s' sur la table '%-.64s'" - ger "Eine solche Berechtigung ist fr User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s' nicht definiert" - hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett" - ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'" - kor " '%-.32s'(ȣƮ '%-.64s') ̺ '%-.64s' ϱ Ͽ ǵ ϴ. " - por "No existe tal permisso (grant) definido para o usurio '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'" - rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'" - rus " '%-.32s' '%-.64s' '%-.64s'" - serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'" - spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'" - swe "Det finns inget privilegium definierat fr anvndare '%-.32s' p '%-.64s' fr tabell '%-.64s'" - ukr " '%-.32s' '%-.64s' æ '%-.64s'" + cze "Neexistuje odpov-Bdajc grant pro uivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'" + dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' p vrt '%-.64s' for tabellen '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'" + eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'" + est "Sellist igust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'" + fre "Un tel droit n'est pas dfini pour l'utilisateur '%-.32s' sur l'hte '%-.64s' sur la table '%-.64s'" + ger "Eine solche Berechtigung ist fr User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s' nicht definiert" + hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett" + ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'" + kor " '%-.32s'(ȣƮ '%-.64s') ̺ '%-.64s' ϱ Ͽ ǵ ϴ. " + por "No existe tal permisso (grant) definido para o usurio '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'" + rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'" + rus " '%-.32s' '%-.64s' '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'" + spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'" + swe "Det finns inget privilegium definierat fr anvndare '%-.32s' p '%-.64s' fr tabell '%-.64s'" + ukr " '%-.32s' '%-.64s' æ '%-.64s'" ER_NOT_ALLOWED_COMMAND 42000 - cze "Pou-Bit pkaz nen v tto verzi MySQL povolen" - dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL" - nla "Het used commando is niet toegestaan in deze MySQL versie" - eng "The used command is not allowed with this MySQL version" - est "Antud ksk ei ole lubatud kesolevas MySQL versioonis" - fre "Cette commande n'existe pas dans cette version de MySQL" - ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulssig" - hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban" - ita "Il comando utilizzato non e` supportato in questa versione di MySQL" - kor " MySQL ̿ ʽϴ." - por "Comando usado no permitido para esta verso do MySQL" - rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL" - rus " MySQL" - serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera" - spa "El comando usado no es permitido con esta versin de MySQL" - swe "Du kan inte anvnda detta kommando med denna MySQL version" - ukr " æ Ӧ MySQL" + cze "Pou-Bit pkaz nen v tto verzi MySQL povolen" + dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL" + nla "Het used commando is niet toegestaan in deze MySQL versie" + eng "The used command is not allowed with this MySQL version" + est "Antud ksk ei ole lubatud kesolevas MySQL versioonis" + fre "Cette commande n'existe pas dans cette version de MySQL" + ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulssig" + hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban" + ita "Il comando utilizzato non e` supportato in questa versione di MySQL" + kor " MySQL ̿ ʽϴ." + por "Comando usado no permitido para esta verso do MySQL" + rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL" + rus " MySQL" + serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera" + spa "El comando usado no es permitido con esta versin de MySQL" + swe "Du kan inte anvnda detta kommando med denna MySQL version" + ukr " æ Ӧ MySQL" ER_SYNTAX_ERROR 42000 - cze "Va-Be syntaxe je njak divn" - dan "Der er en fejl i SQL syntaksen" - nla "Er is iets fout in de gebruikte syntax" - eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use" - est "Viga SQL sntaksis" - fre "Erreur de syntaxe" - ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen" - greek "You have an error in your SQL syntax" - hun "Szintaktikai hiba" - ita "Errore di sintassi nella query SQL" - jpn "Something is wrong in your syntax" - kor "SQL ֽϴ." - nor "Something is wrong in your syntax" - norwegian-ny "Something is wrong in your syntax" - pol "Something is wrong in your syntax" - por "Voc tem um erro de sintaxe no seu SQL" - rum "Aveti o eroare in sintaxa RSQL" - rus " . MySQL " - serbian "Imate greku u vaoj SQL sintaksi" - slo "Something is wrong in your syntax" - spa "Algo est equivocado en su sintax" - swe "Du har ngot fel i din syntax" - ukr " Ӧ SQL" + cze "Va-Be syntaxe je njak divn" + dan "Der er en fejl i SQL syntaksen" + nla "Er is iets fout in de gebruikte syntax" + eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use" + est "Viga SQL sntaksis" + fre "Erreur de syntaxe" + ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen" + greek "You have an error in your SQL syntax" + hun "Szintaktikai hiba" + ita "Errore di sintassi nella query SQL" + jpn "Something is wrong in your syntax" + kor "SQL ֽϴ." + nor "Something is wrong in your syntax" + norwegian-ny "Something is wrong in your syntax" + pol "Something is wrong in your syntax" + por "Voc tem um erro de sintaxe no seu SQL" + rum "Aveti o eroare in sintaxa RSQL" + rus " . MySQL " + serbian "Imate greku u vaoj SQL sintaksi" + slo "Something is wrong in your syntax" + spa "Algo est equivocado en su sintax" + swe "Du har ngot fel i din syntax" + ukr " Ӧ SQL" ER_DELAYED_CANT_CHANGE_LOCK - cze "Zpo-Bdn insert threadu nebyl schopen zskat poadovan zmek pro tabulku %-.64s" - dan "Forsinket indsttelse trden (delayed insert thread) kunne ikke opn ls p tabellen %-.64s" - nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s" - eng "Delayed insert thread couldn't get requested lock for table %-.64s" - est "INSERT DELAYED lim ei suutnud saada soovitud lukku tabelile %-.64s" - fre "La tche 'delayed insert' n'a pas pu obtenir le verrou dmand sur la table %-.64s" - ger "Verzgerter (DELAYED) Einfge-Thread konnte die angeforderte Sperre fr Tabelle '%-.64s' nicht erhalten" - hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz" - ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s" - kor " insert 尡 ̺ %-.64s 䱸 ŷ ó ϴ." - por "'Thread' de insero retardada (atrasada) pois no conseguiu obter a trava solicitada para tabela '%-.64s'" - rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s" - rus ", (delayed insert), %-.64s" - serbian "Prolongirani 'INSERT' thread nije mogao da dobije traeno zakljuavanje tabele '%-.64s'" - spa "Thread de insercin retarda no pudiendo bloquear para la tabla %-.64s" - swe "DELAYED INSERT-trden kunde inte lsa tabell '%-.64s'" - ukr " INSERT DELAYED æ %-.64s" + cze "Zpo-Bdn insert threadu nebyl schopen zskat poadovan zmek pro tabulku %-.64s" + dan "Forsinket indsttelse trden (delayed insert thread) kunne ikke opn ls p tabellen %-.64s" + nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s" + eng "Delayed insert thread couldn't get requested lock for table %-.64s" + est "INSERT DELAYED lim ei suutnud saada soovitud lukku tabelile %-.64s" + fre "La tche 'delayed insert' n'a pas pu obtenir le verrou dmand sur la table %-.64s" + ger "Verzgerter (DELAYED) Einfge-Thread konnte die angeforderte Sperre fr Tabelle '%-.64s' nicht erhalten" + hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz" + ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s" + kor " insert 尡 ̺ %-.64s 䱸 ŷ ó ϴ." + por "'Thread' de insero retardada (atrasada) pois no conseguiu obter a trava solicitada para tabela '%-.64s'" + rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s" + rus ", (delayed insert), %-.64s" + serbian "Prolongirani 'INSERT' thread nije mogao da dobije traeno zakljuavanje tabele '%-.64s'" + spa "Thread de insercin retarda no pudiendo bloquear para la tabla %-.64s" + swe "DELAYED INSERT-trden kunde inte lsa tabell '%-.64s'" + ukr " INSERT DELAYED æ %-.64s" ER_TOO_MANY_DELAYED_THREADS - cze "P-Bli mnoho zpodnch thread" - dan "For mange slettede trde (threads) i brug" - nla "Te veel 'delayed' threads in gebruik" - eng "Too many delayed threads in use" - est "Liiga palju DELAYED limesid kasutusel" - fre "Trop de tche 'delayed' en cours" - ger "Zu viele verzgerte (DELAYED) Threads in Verwendung" - hun "Tul sok kesletetett thread (delayed)" - ita "Troppi threads ritardati in uso" - kor "ʹ 带 ϰ ֽϴ." - por "Excesso de 'threads' retardadas (atrasadas) em uso" - rum "Prea multe threaduri aminate care sint in uz" - rus " , (delayed insert)" - serbian "Previe prolongiranih thread-ova je u upotrebi" - spa "Muchos threads retardados en uso" - swe "Det finns redan 'max_delayed_threads' trdar i anvnding" - ukr " Ǧ դ" + cze "P-Bli mnoho zpodnch thread" + dan "For mange slettede trde (threads) i brug" + nla "Te veel 'delayed' threads in gebruik" + eng "Too many delayed threads in use" + est "Liiga palju DELAYED limesid kasutusel" + fre "Trop de tche 'delayed' en cours" + ger "Zu viele verzgerte (DELAYED) Threads in Verwendung" + hun "Tul sok kesletetett thread (delayed)" + ita "Troppi threads ritardati in uso" + kor "ʹ 带 ϰ ֽϴ." + por "Excesso de 'threads' retardadas (atrasadas) em uso" + rum "Prea multe threaduri aminate care sint in uz" + rus " , (delayed insert)" + serbian "Previe prolongiranih thread-ova je u upotrebi" + spa "Muchos threads retardados en uso" + swe "Det finns redan 'max_delayed_threads' trdar i anvnding" + ukr " Ǧ դ" ER_ABORTING_CONNECTION 08S01 - cze "Zru-Beno spojen %ld do databze: '%-.64s' uivatel: '%-.64s' (%s)" - dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)" - nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)" - eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - est "hendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)" - fre "Connection %ld avorte vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)" - ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)" - hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)" - ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)" - jpn "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - kor "Ÿ̽ %ld ߴܵ : '%-.64s' : '%-.64s' (%s)" - nor "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - pol "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - por "Conexo %ld abortou para o banco de dados '%-.64s' - usurio '%-.32s' (%-.64s)" - rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)" - rus " %ld '%-.64s' '%-.32s' (%-.64s)" - serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)" - slo "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - spa "Conexin abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)" - swe "Avbrt lnken fr trd %ld till db '%-.64s', anvndare '%-.64s' (%s)" - ukr " ' %ld : '%-.64s' : '%-.32s' (%-.64s)" + cze "Zru-Beno spojen %ld do databze: '%-.64s' uivatel: '%-.64s' (%s)" + dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)" + eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + est "hendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)" + fre "Connection %ld avorte vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)" + hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)" + ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)" + jpn "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + kor "Ÿ̽ %ld ߴܵ : '%-.64s' : '%-.64s' (%s)" + nor "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + pol "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + por "Conexo %ld abortou para o banco de dados '%-.64s' - usurio '%-.32s' (%-.64s)" + rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)" + rus " %ld '%-.64s' '%-.32s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)" + slo "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + spa "Conexin abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)" + swe "Avbrt lnken fr trd %ld till db '%-.64s', anvndare '%-.64s' (%s)" + ukr " ' %ld : '%-.64s' : '%-.32s' (%-.64s)" ER_NET_PACKET_TOO_LARGE 08S01 - cze "Zji-Btn pchoz packet del ne 'max_allowed_packet'" - dan "Modtog en datapakke som var strre end 'max_allowed_packet'" - nla "Groter pakket ontvangen dan 'max_allowed_packet'" - eng "Got a packet bigger than 'max_allowed_packet' bytes" - est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga" - fre "Paquet plus grand que 'max_allowed_packet' reu" - ger "Empfangenes Paket ist grer als 'max_allowed_packet' Bytes" - hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'" - ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'" - kor "'max_allowed_packet' ū Ŷ ޾ҽϴ." - por "Obteve um pacote maior do que a taxa mxima de pacotes definida (max_allowed_packet)" - rum "Un packet mai mare decit 'max_allowed_packet' a fost primit" - rus " , 'max_allowed_packet'" - serbian "Primio sam mreni paket vei od definisane vrednosti 'max_allowed_packet'" - spa "Obtenido un paquete mayor que 'max_allowed_packet'" - swe "Kommunkationspaketet r strre n 'max_allowed_packet'" - ukr " ¦ Φ max_allowed_packet" + cze "Zji-Btn pchoz packet del ne 'max_allowed_packet'" + dan "Modtog en datapakke som var strre end 'max_allowed_packet'" + nla "Groter pakket ontvangen dan 'max_allowed_packet'" + eng "Got a packet bigger than 'max_allowed_packet' bytes" + est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga" + fre "Paquet plus grand que 'max_allowed_packet' reu" + ger "Empfangenes Paket ist grer als 'max_allowed_packet' Bytes" + hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'" + ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'" + kor "'max_allowed_packet' ū Ŷ ޾ҽϴ." + por "Obteve um pacote maior do que a taxa mxima de pacotes definida (max_allowed_packet)" + rum "Un packet mai mare decit 'max_allowed_packet' a fost primit" + rus " , 'max_allowed_packet'" + serbian "Primio sam mreni paket vei od definisane vrednosti 'max_allowed_packet'" + spa "Obtenido un paquete mayor que 'max_allowed_packet'" + swe "Kommunkationspaketet r strre n 'max_allowed_packet'" + ukr " ¦ Φ max_allowed_packet" ER_NET_READ_ERROR_FROM_PIPE 08S01 - cze "Zji-Btna chyba pi ten z roury spojen" - dan "Fik lsefejl fra forbindelse (connection pipe)" - nla "Kreeg leesfout van de verbindings pipe" - eng "Got a read error from the connection pipe" - est "Viga hendustoru lugemisel" - fre "Erreur de lecture reue du pipe de connection" - ger "Lese-Fehler bei einer Verbindungs-Pipe" - hun "Olvasasi hiba a kapcsolat soran" - ita "Rilevato un errore di lettura dalla pipe di connessione" - kor " κ ߻Ͽϴ." - por "Obteve um erro de leitura no 'pipe' da conexo" - rum "Eroare la citire din cauza lui 'connection pipe'" - rus " (connection pipe)" - serbian "Greka pri itanju podataka sa pipe-a" - spa "Obtenido un error de lectura de la conexin pipe" - swe "Fick lsfel frn klienten vid lsning frn 'PIPE'" - ukr " Φæ " + cze "Zji-Btna chyba pi ten z roury spojen" + dan "Fik lsefejl fra forbindelse (connection pipe)" + nla "Kreeg leesfout van de verbindings pipe" + eng "Got a read error from the connection pipe" + est "Viga hendustoru lugemisel" + fre "Erreur de lecture reue du pipe de connection" + ger "Lese-Fehler bei einer Verbindungs-Pipe" + hun "Olvasasi hiba a kapcsolat soran" + ita "Rilevato un errore di lettura dalla pipe di connessione" + kor " κ ߻Ͽϴ." + por "Obteve um erro de leitura no 'pipe' da conexo" + rum "Eroare la citire din cauza lui 'connection pipe'" + rus " (connection pipe)" + serbian "Greka pri itanju podataka sa pipe-a" + spa "Obtenido un error de lectura de la conexin pipe" + swe "Fick lsfel frn klienten vid lsning frn 'PIPE'" + ukr " Φæ " ER_NET_FCNTL_ERROR 08S01 - cze "Zji-Btna chyba fcntl()" - dan "Fik fejlmeddelelse fra fcntl()" - nla "Kreeg fout van fcntl()" - eng "Got an error from fcntl()" - est "fcntl() tagastas vea" - fre "Erreur reue de fcntl() " - ger "fcntl() lieferte einen Fehler" - hun "Hiba a fcntl() fuggvenyben" - ita "Rilevato un errore da fcntl()" - kor "fcntl() Լκ ߻Ͽϴ." - por "Obteve um erro em fcntl()" - rum "Eroare obtinuta de la fcntl()" - rus " fcntl()" - serbian "Greka pri izvravanju funkcije fcntl()" - spa "Obtenido un error de fcntl()" - swe "Fick fatalt fel frn 'fcntl()'" - ukr " צ fcntl()" + cze "Zji-Btna chyba fcntl()" + dan "Fik fejlmeddelelse fra fcntl()" + nla "Kreeg fout van fcntl()" + eng "Got an error from fcntl()" + est "fcntl() tagastas vea" + fre "Erreur reue de fcntl() " + ger "fcntl() lieferte einen Fehler" + hun "Hiba a fcntl() fuggvenyben" + ita "Rilevato un errore da fcntl()" + kor "fcntl() Լκ ߻Ͽϴ." + por "Obteve um erro em fcntl()" + rum "Eroare obtinuta de la fcntl()" + rus " fcntl()" + serbian "Greka pri izvravanju funkcije fcntl()" + spa "Obtenido un error de fcntl()" + swe "Fick fatalt fel frn 'fcntl()'" + ukr " צ fcntl()" ER_NET_PACKETS_OUT_OF_ORDER 08S01 - cze "P-Bchoz packety v chybnm poad" - dan "Modtog ikke datapakker i korrekt rkkeflge" - nla "Pakketten in verkeerde volgorde ontvangen" - eng "Got packets out of order" - est "Paketid saabusid vales jrjekorras" - fre "Paquets reus dans le dsordre" - ger "Pakete nicht in der richtigen Reihenfolge empfangen" - hun "Helytelen sorrendben erkezett adatcsomagok" - ita "Ricevuti pacchetti non in ordine" - kor " ʴ Ŷ ޾ҽϴ." - por "Obteve pacotes fora de ordem" - rum "Packets care nu sint ordonati au fost gasiti" - rus " " - serbian "Primio sam mrene pakete van reda" - spa "Obtenido paquetes desordenados" - swe "Kommunikationspaketen kom i fel ordning" - ukr " " + cze "P-Bchoz packety v chybnm poad" + dan "Modtog ikke datapakker i korrekt rkkeflge" + nla "Pakketten in verkeerde volgorde ontvangen" + eng "Got packets out of order" + est "Paketid saabusid vales jrjekorras" + fre "Paquets reus dans le dsordre" + ger "Pakete nicht in der richtigen Reihenfolge empfangen" + hun "Helytelen sorrendben erkezett adatcsomagok" + ita "Ricevuti pacchetti non in ordine" + kor " ʴ Ŷ ޾ҽϴ." + por "Obteve pacotes fora de ordem" + rum "Packets care nu sint ordonati au fost gasiti" + rus " " + serbian "Primio sam mrene pakete van reda" + spa "Obtenido paquetes desordenados" + swe "Kommunikationspaketen kom i fel ordning" + ukr " " ER_NET_UNCOMPRESS_ERROR 08S01 - cze "Nemohu rozkomprimovat komunika-Bn packet" - dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)" - nla "Communicatiepakket kon niet worden gedecomprimeerd" - eng "Couldn't uncompress communication packet" - est "Viga andmepaketi lahtipakkimisel" - fre "Impossible de dcompresser le paquet reu" - ger "Kommunikationspaket lsst sich nicht entpacken" - hun "A kommunikacios adatcsomagok nem tomorithetok ki" - ita "Impossibile scompattare i pacchetti di comunicazione" - kor " Ŷ ϴ." - por "No conseguiu descomprimir pacote de comunicao" - rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)" - rus " , " - serbian "Ne mogu da dekompresujem mrene pakete" - spa "No puedo descomprimir paquetes de comunicacin" - swe "Kunde inte packa up kommunikationspaketet" - ukr " Φæ " + cze "Nemohu rozkomprimovat komunika-Bn packet" + dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)" + nla "Communicatiepakket kon niet worden gedecomprimeerd" + eng "Couldn't uncompress communication packet" + est "Viga andmepaketi lahtipakkimisel" + fre "Impossible de dcompresser le paquet reu" + ger "Kommunikationspaket lsst sich nicht entpacken" + hun "A kommunikacios adatcsomagok nem tomorithetok ki" + ita "Impossibile scompattare i pacchetti di comunicazione" + kor " Ŷ ϴ." + por "No conseguiu descomprimir pacote de comunicao" + rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)" + rus " , " + serbian "Ne mogu da dekompresujem mrene pakete" + spa "No puedo descomprimir paquetes de comunicacin" + swe "Kunde inte packa up kommunikationspaketet" + ukr " Φæ " ER_NET_READ_ERROR 08S01 - cze "Zji-Btna chyba pi ten komunikanho packetu" - dan "Fik fejlmeddelelse ved lsning af kommunikations-pakker (communication packets)" - nla "Fout bij het lezen van communicatiepakketten" - eng "Got an error reading communication packets" - est "Viga andmepaketi lugemisel" - fre "Erreur de lecture des paquets reus" - ger "Fehler beim Lesen eines Kommunikationspakets" - hun "HIba a kommunikacios adatcsomagok olvasasa soran" - ita "Rilevato un errore ricevendo i pacchetti di comunicazione" - kor " Ŷ д ߻Ͽϴ." - por "Obteve um erro na leitura de pacotes de comunicao" - rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)" - rus " " - serbian "Greka pri primanju mrenih paketa" - spa "Obtenido un error leyendo paquetes de comunicacin" - swe "Fick ett fel vid lsning frn klienten" - ukr " Φæ Ԧ" + cze "Zji-Btna chyba pi ten komunikanho packetu" + dan "Fik fejlmeddelelse ved lsning af kommunikations-pakker (communication packets)" + nla "Fout bij het lezen van communicatiepakketten" + eng "Got an error reading communication packets" + est "Viga andmepaketi lugemisel" + fre "Erreur de lecture des paquets reus" + ger "Fehler beim Lesen eines Kommunikationspakets" + hun "HIba a kommunikacios adatcsomagok olvasasa soran" + ita "Rilevato un errore ricevendo i pacchetti di comunicazione" + kor " Ŷ д ߻Ͽϴ." + por "Obteve um erro na leitura de pacotes de comunicao" + rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)" + rus " " + serbian "Greka pri primanju mrenih paketa" + spa "Obtenido un error leyendo paquetes de comunicacin" + swe "Fick ett fel vid lsning frn klienten" + ukr " Φæ Ԧ" ER_NET_READ_INTERRUPTED 08S01 - cze "Zji-Btn timeout pi ten komunikanho packetu" - dan "Timeout-fejl ved lsning af kommunukations-pakker (communication packets)" - nla "Timeout bij het lezen van communicatiepakketten" - eng "Got timeout reading communication packets" - est "Kontrollaja letamine andmepakettide lugemisel" - fre "Timeout en lecture des paquets reus" - ger "Zeitberschreitung beim Lesen eines Kommunikationspakets" - hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran" - ita "Rilevato un timeout ricevendo i pacchetti di comunicazione" - kor " Ŷ д timeout ߻Ͽϴ." - por "Obteve expirao de tempo (timeout) na leitura de pacotes de comunicao" - rum "Timeout obtinut citind pachetele de comunicatie (communication packets)" - rus " " - serbian "Vremenski limit za itanje mrenih paketa je istekao" - spa "Obtenido timeout leyendo paquetes de comunicacin" - swe "Fick 'timeout' vid lsning frn klienten" - ukr " Φæ Ԧ" + cze "Zji-Btn timeout pi ten komunikanho packetu" + dan "Timeout-fejl ved lsning af kommunukations-pakker (communication packets)" + nla "Timeout bij het lezen van communicatiepakketten" + eng "Got timeout reading communication packets" + est "Kontrollaja letamine andmepakettide lugemisel" + fre "Timeout en lecture des paquets reus" + ger "Zeitberschreitung beim Lesen eines Kommunikationspakets" + hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran" + ita "Rilevato un timeout ricevendo i pacchetti di comunicazione" + kor " Ŷ д timeout ߻Ͽϴ." + por "Obteve expirao de tempo (timeout) na leitura de pacotes de comunicao" + rum "Timeout obtinut citind pachetele de comunicatie (communication packets)" + rus " " + serbian "Vremenski limit za itanje mrenih paketa je istekao" + spa "Obtenido timeout leyendo paquetes de comunicacin" + swe "Fick 'timeout' vid lsning frn klienten" + ukr " Φæ Ԧ" ER_NET_ERROR_ON_WRITE 08S01 - cze "Zji-Btna chyba pi zpisu komunikanho packetu" - dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)" - nla "Fout bij het schrijven van communicatiepakketten" - eng "Got an error writing communication packets" - est "Viga andmepaketi kirjutamisel" - fre "Erreur d'criture des paquets envoys" - ger "Fehler beim Schreiben eines Kommunikationspakets" - hun "Hiba a kommunikacios csomagok irasa soran" - ita "Rilevato un errore inviando i pacchetti di comunicazione" - kor " Ŷ ϴ ߻Ͽϴ." - por "Obteve um erro na escrita de pacotes de comunicao" - rum "Eroare in scrierea pachetelor de comunicatie (communication packets)" - rus " " - serbian "Greka pri slanju mrenih paketa" - spa "Obtenido un error de escribiendo paquetes de comunicacin" - swe "Fick ett fel vid skrivning till klienten" - ukr " Φæ Ԧ" + cze "Zji-Btna chyba pi zpisu komunikanho packetu" + dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)" + nla "Fout bij het schrijven van communicatiepakketten" + eng "Got an error writing communication packets" + est "Viga andmepaketi kirjutamisel" + fre "Erreur d'criture des paquets envoys" + ger "Fehler beim Schreiben eines Kommunikationspakets" + hun "Hiba a kommunikacios csomagok irasa soran" + ita "Rilevato un errore inviando i pacchetti di comunicazione" + kor " Ŷ ϴ ߻Ͽϴ." + por "Obteve um erro na escrita de pacotes de comunicao" + rum "Eroare in scrierea pachetelor de comunicatie (communication packets)" + rus " " + serbian "Greka pri slanju mrenih paketa" + spa "Obtenido un error de escribiendo paquetes de comunicacin" + swe "Fick ett fel vid skrivning till klienten" + ukr " Φæ Ԧ" ER_NET_WRITE_INTERRUPTED 08S01 - cze "Zji-Btn timeout pi zpisu komunikanho packetu" - dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)" - nla "Timeout bij het schrijven van communicatiepakketten" - eng "Got timeout writing communication packets" - est "Kontrollaja letamine andmepakettide kirjutamisel" - fre "Timeout d'criture des paquets envoys" - ger "Zeitberschreitung beim Schreiben eines Kommunikationspakets" - hun "Idotullepes a kommunikacios csomagok irasa soran" - ita "Rilevato un timeout inviando i pacchetti di comunicazione" - kor " ϴ timeout ߻Ͽϴ." - por "Obteve expirao de tempo ('timeout') na escrita de pacotes de comunicao" - rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)" - rus " " - serbian "Vremenski limit za slanje mrenih paketa je istekao" - spa "Obtenido timeout escribiendo paquetes de comunicacin" - swe "Fick 'timeout' vid skrivning till klienten" - ukr " Φæ Ԧ" + cze "Zji-Btn timeout pi zpisu komunikanho packetu" + dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)" + nla "Timeout bij het schrijven van communicatiepakketten" + eng "Got timeout writing communication packets" + est "Kontrollaja letamine andmepakettide kirjutamisel" + fre "Timeout d'criture des paquets envoys" + ger "Zeitberschreitung beim Schreiben eines Kommunikationspakets" + hun "Idotullepes a kommunikacios csomagok irasa soran" + ita "Rilevato un timeout inviando i pacchetti di comunicazione" + kor " ϴ timeout ߻Ͽϴ." + por "Obteve expirao de tempo ('timeout') na escrita de pacotes de comunicao" + rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)" + rus " " + serbian "Vremenski limit za slanje mrenih paketa je istekao" + spa "Obtenido timeout escribiendo paquetes de comunicacin" + swe "Fick 'timeout' vid skrivning till klienten" + ukr " Φæ Ԧ" ER_TOO_LONG_STRING 42000 - cze "V-Bsledn etzec je del ne 'max_allowed_packet'" - dan "Strengen med resultater er strre end 'max_allowed_packet'" - nla "Resultaat string is langer dan 'max_allowed_packet'" - eng "Result string is longer than 'max_allowed_packet' bytes" - est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga" - fre "La chane rsultat est plus grande que 'max_allowed_packet'" - ger "Ergebnis-String ist lnger als 'max_allowed_packet' Bytes" - hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'" - ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'" - por "'String' resultante mais longa do que 'max_allowed_packet'" - rum "Sirul rezultat este mai lung decit 'max_allowed_packet'" - rus " , 'max_allowed_packet'" - serbian "Rezultujui string je dui nego to to dozvoljava parametar servera 'max_allowed_packet'" - spa "La string resultante es mayor que max_allowed_packet" - swe "Resultatstrngen r lngre n max_allowed_packet" - ukr " Φ max_allowed_packet" + cze "V-Bsledn etzec je del ne 'max_allowed_packet'" + dan "Strengen med resultater er strre end 'max_allowed_packet'" + nla "Resultaat string is langer dan 'max_allowed_packet'" + eng "Result string is longer than 'max_allowed_packet' bytes" + est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga" + fre "La chane rsultat est plus grande que 'max_allowed_packet'" + ger "Ergebnis-String ist lnger als 'max_allowed_packet' Bytes" + hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'" + ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'" + por "'String' resultante mais longa do que 'max_allowed_packet'" + rum "Sirul rezultat este mai lung decit 'max_allowed_packet'" + rus " , 'max_allowed_packet'" + serbian "Rezultujui string je dui nego to to dozvoljava parametar servera 'max_allowed_packet'" + spa "La string resultante es mayor que max_allowed_packet" + swe "Resultatstrngen r lngre n max_allowed_packet" + ukr " Φ max_allowed_packet" ER_TABLE_CANT_HANDLE_BLOB 42000 - cze "Typ pou-Bit tabulky nepodporuje BLOB/TEXT sloupce" - dan "Denne tabeltype understtter ikke brug af BLOB og TEXT kolonner" - nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen" - eng "The used table type doesn't support BLOB/TEXT columns" - est "Valitud tabelitp ei toeta BLOB/TEXT tpi vlju" - fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT" - ger "Der verwendete Tabellentyp untersttzt keine BLOB- und TEXT-Felder" - hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket" - ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT" - por "Tipo de tabela usado no permite colunas BLOB/TEXT" - rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT" - rus " BLOB/TEXT" - serbian "Iskoriteni tip tabele ne podrava kolone tipa 'BLOB' odnosno 'TEXT'" - spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT" - swe "Den anvnda tabelltypen kan inte hantera BLOB/TEXT-kolumner" - ukr " æ Цդ BLOB/TEXT æ" + cze "Typ pou-Bit tabulky nepodporuje BLOB/TEXT sloupce" + dan "Denne tabeltype understtter ikke brug af BLOB og TEXT kolonner" + nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen" + eng "The used table type doesn't support BLOB/TEXT columns" + est "Valitud tabelitp ei toeta BLOB/TEXT tpi vlju" + fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT" + ger "Der verwendete Tabellentyp untersttzt keine BLOB- und TEXT-Felder" + hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket" + ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT" + por "Tipo de tabela usado no permite colunas BLOB/TEXT" + rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT" + rus " BLOB/TEXT" + serbian "Iskoriteni tip tabele ne podrava kolone tipa 'BLOB' odnosno 'TEXT'" + spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT" + swe "Den anvnda tabelltypen kan inte hantera BLOB/TEXT-kolumner" + ukr " æ Цդ BLOB/TEXT æ" ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 - cze "Typ pou-Bit tabulky nepodporuje AUTO_INCREMENT sloupce" - dan "Denne tabeltype understtter ikke brug af AUTO_INCREMENT kolonner" - nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen" - eng "The used table type doesn't support AUTO_INCREMENT columns" - est "Valitud tabelitp ei toeta AUTO_INCREMENT tpi vlju" - fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT" - ger "Der verwendete Tabellentyp untersttzt keine AUTO_INCREMENT-Felder" - hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket" - ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT" - por "Tipo de tabela usado no permite colunas AUTO_INCREMENT" - rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT" - rus " " - serbian "Iskoriteni tip tabele ne podrava kolone tipa 'AUTO_INCREMENT'" - spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT" - swe "Den anvnda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner" - ukr " æ Цդ AUTO_INCREMENT æ" + cze "Typ pou-Bit tabulky nepodporuje AUTO_INCREMENT sloupce" + dan "Denne tabeltype understtter ikke brug af AUTO_INCREMENT kolonner" + nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen" + eng "The used table type doesn't support AUTO_INCREMENT columns" + est "Valitud tabelitp ei toeta AUTO_INCREMENT tpi vlju" + fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT" + ger "Der verwendete Tabellentyp untersttzt keine AUTO_INCREMENT-Felder" + hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket" + ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT" + por "Tipo de tabela usado no permite colunas AUTO_INCREMENT" + rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT" + rus " " + serbian "Iskoriteni tip tabele ne podrava kolone tipa 'AUTO_INCREMENT'" + spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT" + swe "Den anvnda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner" + ukr " æ Цդ AUTO_INCREMENT æ" ER_DELAYED_INSERT_TABLE_LOCKED - cze "INSERT DELAYED nen-B mono s tabulkou '%-.64s' pout, protoe je zamen pomoc LOCK TABLES" - dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er lst med LOCK TABLES" - nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES" - eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES" - est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES ksuga" - fre "INSERT DELAYED ne peut tre utilis avec la table '%-.64s', car elle est verroue avec LOCK TABLES" - ger "INSERT DELAYED kann fr Tabelle '%-.64s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist" - greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)" - ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'" - jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - por "INSERT DELAYED no pode ser usado com a tabela '%-.64s', porque ela est travada com LOCK TABLES" - rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES" - rus " INSERT DELAYED '%-.64s', LOCK TABLES" - serbian "Komanda 'INSERT DELAYED' ne moe biti iskoritena u tabeli '%-.64s', zbog toga to je zakljuana komandom 'LOCK TABLES'" - slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES" - swe "INSERT DELAYED kan inte anvndas med tabell '%-.64s', emedan den r lst med LOCK TABLES" - ukr "INSERT DELAYED '%-.64s', LOCK TABLES" + cze "INSERT DELAYED nen-B mono s tabulkou '%-.64s' pout, protoe je zamen pomoc LOCK TABLES" + dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er lst med LOCK TABLES" + nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES" + eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES" + est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES ksuga" + fre "INSERT DELAYED ne peut tre utilis avec la table '%-.64s', car elle est verroue avec LOCK TABLES" + ger "INSERT DELAYED kann fr Tabelle '%-.64s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist" + greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)" + ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'" + jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + por "INSERT DELAYED no pode ser usado com a tabela '%-.64s', porque ela est travada com LOCK TABLES" + rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES" + rus " INSERT DELAYED '%-.64s', LOCK TABLES" + serbian "Komanda 'INSERT DELAYED' ne moe biti iskoritena u tabeli '%-.64s', zbog toga to je zakljuana komandom 'LOCK TABLES'" + slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES" + swe "INSERT DELAYED kan inte anvndas med tabell '%-.64s', emedan den r lst med LOCK TABLES" + ukr "INSERT DELAYED '%-.64s', LOCK TABLES" ER_WRONG_COLUMN_NAME 42000 - cze "Nespr-Bvn jmno sloupce '%-.100s'" - dan "Forkert kolonnenavn '%-.100s'" - nla "Incorrecte kolom naam '%-.100s'" - eng "Incorrect column name '%-.100s'" - est "Vigane tulba nimi '%-.100s'" - fre "Nom de colonne '%-.100s' incorrect" - ger "Falscher Spaltenname '%-.100s'" - hun "Ervenytelen mezonev: '%-.100s'" - ita "Nome colonna '%-.100s' non corretto" - por "Nome de coluna '%-.100s' incorreto" - rum "Nume increct de coloana '%-.100s'" - rus " '%-.100s'" - serbian "Pogreno ime kolone '%-.100s'" - spa "Incorrecto nombre de columna '%-.100s'" - swe "Felaktigt kolumnnamn '%-.100s'" - ukr "צ ' '%-.100s'" + cze "Nespr-Bvn jmno sloupce '%-.100s'" + dan "Forkert kolonnenavn '%-.100s'" + nla "Incorrecte kolom naam '%-.100s'" + eng "Incorrect column name '%-.100s'" + est "Vigane tulba nimi '%-.100s'" + fre "Nom de colonne '%-.100s' incorrect" + ger "Falscher Spaltenname '%-.100s'" + hun "Ervenytelen mezonev: '%-.100s'" + ita "Nome colonna '%-.100s' non corretto" + por "Nome de coluna '%-.100s' incorreto" + rum "Nume increct de coloana '%-.100s'" + rus " '%-.100s'" + serbian "Pogreno ime kolone '%-.100s'" + spa "Incorrecto nombre de columna '%-.100s'" + swe "Felaktigt kolumnnamn '%-.100s'" + ukr "צ ' '%-.100s'" ER_WRONG_KEY_COLUMN 42000 - cze "Handler pou-Bit tabulky neum indexovat sloupce '%-.64s'" - dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'" - nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren" - eng "The used storage engine can't index column '%-.64s'" - est "Tabelihandler ei oska indekseerida tulpa '%-.64s'" - fre "Le handler de la table ne peut index la colonne '%-.64s'" - ger "Die verwendete Speicher-Engine kann die Spalte '%-.64s' nicht indizieren" - greek "The used table handler can't index column '%-.64s'" - hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni" - ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'" - jpn "The used table handler can't index column '%-.64s'" - kor "The used table handler can't index column '%-.64s'" - nor "The used table handler can't index column '%-.64s'" - norwegian-ny "The used table handler can't index column '%-.64s'" - pol "The used table handler can't index column '%-.64s'" - por "O manipulador de tabela usado no pode indexar a coluna '%-.64s'" - rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'" - rus " '%-.64s'" - serbian "Handler tabele ne moe da indeksira kolonu '%-.64s'" - slo "The used table handler can't index column '%-.64s'" - spa "El manipulador de tabla usado no puede indexar columna '%-.64s'" - swe "Den anvnda tabelltypen kan inte indexera kolumn '%-.64s'" - ukr " ڦ æ '%-.64s'" + cze "Handler pou-Bit tabulky neum indexovat sloupce '%-.64s'" + dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'" + nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren" + eng "The used storage engine can't index column '%-.64s'" + est "Tabelihandler ei oska indekseerida tulpa '%-.64s'" + fre "Le handler de la table ne peut index la colonne '%-.64s'" + ger "Die verwendete Speicher-Engine kann die Spalte '%-.64s' nicht indizieren" + greek "The used table handler can't index column '%-.64s'" + hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni" + ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'" + jpn "The used table handler can't index column '%-.64s'" + kor "The used table handler can't index column '%-.64s'" + nor "The used table handler can't index column '%-.64s'" + norwegian-ny "The used table handler can't index column '%-.64s'" + pol "The used table handler can't index column '%-.64s'" + por "O manipulador de tabela usado no pode indexar a coluna '%-.64s'" + rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'" + rus " '%-.64s'" + serbian "Handler tabele ne moe da indeksira kolonu '%-.64s'" + slo "The used table handler can't index column '%-.64s'" + spa "El manipulador de tabla usado no puede indexar columna '%-.64s'" + swe "Den anvnda tabelltypen kan inte indexera kolumn '%-.64s'" + ukr " ڦ æ '%-.64s'" ER_WRONG_MRG_TABLE - cze "V-Bechny tabulky v MERGE tabulce nejsou definovny stejn" - dan "Tabellerne i MERGE er ikke defineret ens" - nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities" - eng "All tables in the MERGE table are not identically defined" - est "Kik tabelid MERGE tabeli mratluses ei ole identsed" - fre "Toutes les tables de la table de type MERGE n'ont pas la mme dfinition" - ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert" - hun "A MERGE tablaban talalhato tablak definicioja nem azonos" - ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica" - jpn "All tables in the MERGE table are not defined identically" - kor "All tables in the MERGE table are not defined identically" - nor "All tables in the MERGE table are not defined identically" - norwegian-ny "All tables in the MERGE table are not defined identically" - pol "All tables in the MERGE table are not defined identically" - por "Todas as tabelas contidas na tabela fundida (MERGE) no esto definidas identicamente" - rum "Toate tabelele din tabela MERGE nu sint definite identic" - rus " MERGE " - serbian "Tabele iskoritene u 'MERGE' tabeli nisu definisane na isti nain" - slo "All tables in the MERGE table are not defined identically" - spa "Todas las tablas en la MERGE tabla no estan definidas identicamente" - swe "Tabellerna i MERGE-tabellen r inte identiskt definierade" - ukr "æ MERGE TABLE Ҧ " + cze "V-Bechny tabulky v MERGE tabulce nejsou definovny stejn" + dan "Tabellerne i MERGE er ikke defineret ens" + nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities" + eng "All tables in the MERGE table are not identically defined" + est "Kik tabelid MERGE tabeli mratluses ei ole identsed" + fre "Toutes les tables de la table de type MERGE n'ont pas la mme dfinition" + ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert" + hun "A MERGE tablaban talalhato tablak definicioja nem azonos" + ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica" + jpn "All tables in the MERGE table are not defined identically" + kor "All tables in the MERGE table are not defined identically" + nor "All tables in the MERGE table are not defined identically" + norwegian-ny "All tables in the MERGE table are not defined identically" + pol "All tables in the MERGE table are not defined identically" + por "Todas as tabelas contidas na tabela fundida (MERGE) no esto definidas identicamente" + rum "Toate tabelele din tabela MERGE nu sint definite identic" + rus " MERGE " + serbian "Tabele iskoritene u 'MERGE' tabeli nisu definisane na isti nain" + slo "All tables in the MERGE table are not defined identically" + spa "Todas las tablas en la MERGE tabla no estan definidas identicamente" + swe "Tabellerna i MERGE-tabellen r inte identiskt definierade" + ukr "æ MERGE TABLE Ҧ " ER_DUP_UNIQUE 23000 - cze "Kv-Bli unique constraintu nemozu zapsat do tabulky '%-.64s'" - dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler" - nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking" - eng "Can't write, because of unique constraint, to table '%-.64s'" - est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub hesuse kitsendust" - fre "criture impossible cause d'un index UNIQUE sur la table '%-.64s'" - ger "Schreiben in Tabelle '%-.64s' nicht mglich wegen einer Eindeutigkeitsbeschrnkung (unique constraint)" - hun "A '%-.64s' nem irhato, az egyedi mezok miatt" - ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`" - por "No pode gravar, devido restrio UNIQUE, na tabela '%-.64s'" - rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'" - rus " '%-.64s' - " - serbian "Zbog provere jedinstvenosti ne mogu da upiem podatke u tabelu '%-.64s'" - spa "No puedo escribir, debido al nico constraint, para tabla '%-.64s'" - swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test" - ukr " æ '%-.64s', ΦԦ" + cze "Kv-Bli unique constraintu nemozu zapsat do tabulky '%-.64s'" + dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler" + nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking" + eng "Can't write, because of unique constraint, to table '%-.64s'" + est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub hesuse kitsendust" + fre "criture impossible cause d'un index UNIQUE sur la table '%-.64s'" + ger "Schreiben in Tabelle '%-.64s' nicht mglich wegen einer Eindeutigkeitsbeschrnkung (unique constraint)" + hun "A '%-.64s' nem irhato, az egyedi mezok miatt" + ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`" + por "No pode gravar, devido restrio UNIQUE, na tabela '%-.64s'" + rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'" + rus " '%-.64s' - " + serbian "Zbog provere jedinstvenosti ne mogu da upiem podatke u tabelu '%-.64s'" + spa "No puedo escribir, debido al nico constraint, para tabla '%-.64s'" + swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test" + ukr " æ '%-.64s', ΦԦ" ER_BLOB_KEY_WITHOUT_LENGTH 42000 - cze "BLOB sloupec '%-.64s' je pou-Bit ve specifikaci kle bez dlky" - dan "BLOB kolonnen '%-.64s' brugt i nglespecifikation uden nglelngde" - nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" - eng "BLOB/TEXT column '%-.64s' used in key specification without a key length" - est "BLOB-tpi tulp '%-.64s' on kasutusel vtmes ilma pikkust mratlemata" - fre "La colonne '%-.64s' de type BLOB est utilise dans une dfinition d'index sans longueur d'index" - ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlsseldefinition ohne Schlssellngenangabe verwendet" - greek "BLOB column '%-.64s' used in key specification without a key length" - hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" - ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" - jpn "BLOB column '%-.64s' used in key specification without a key length" - kor "BLOB column '%-.64s' used in key specification without a key length" - nor "BLOB column '%-.64s' used in key specification without a key length" - norwegian-ny "BLOB column '%-.64s' used in key specification without a key length" - pol "BLOB column '%-.64s' used in key specification without a key length" - por "Coluna BLOB '%-.64s' usada na especificao de chave sem o comprimento da chave" - rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita" - rus " BLOB '%-.64s' " - serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljua bez navoenja duine kljua" - slo "BLOB column '%-.64s' used in key specification without a key length" - spa "Columna BLOB column '%-.64s' usada en especificacin de clave sin tamao de la clave" - swe "Du har inte angett ngon nyckellngd fr BLOB '%-.64s'" - ukr " BLOB '%-.64s' Φ " + cze "BLOB sloupec '%-.64s' je pou-Bit ve specifikaci kle bez dlky" + dan "BLOB kolonnen '%-.64s' brugt i nglespecifikation uden nglelngde" + nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" + eng "BLOB/TEXT column '%-.64s' used in key specification without a key length" + est "BLOB-tpi tulp '%-.64s' on kasutusel vtmes ilma pikkust mratlemata" + fre "La colonne '%-.64s' de type BLOB est utilise dans une dfinition d'index sans longueur d'index" + ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlsseldefinition ohne Schlssellngenangabe verwendet" + greek "BLOB column '%-.64s' used in key specification without a key length" + hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" + ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" + jpn "BLOB column '%-.64s' used in key specification without a key length" + kor "BLOB column '%-.64s' used in key specification without a key length" + nor "BLOB column '%-.64s' used in key specification without a key length" + norwegian-ny "BLOB column '%-.64s' used in key specification without a key length" + pol "BLOB column '%-.64s' used in key specification without a key length" + por "Coluna BLOB '%-.64s' usada na especificao de chave sem o comprimento da chave" + rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita" + rus " BLOB '%-.64s' " + serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljua bez navoenja duine kljua" + slo "BLOB column '%-.64s' used in key specification without a key length" + spa "Columna BLOB column '%-.64s' usada en especificacin de clave sin tamao de la clave" + swe "Du har inte angett ngon nyckellngd fr BLOB '%-.64s'" + ukr " BLOB '%-.64s' Φ " ER_PRIMARY_CANT_HAVE_NULL 42000 - cze "V-Bechny sti primrnho kle musej bt NOT NULL; pokud potebujete NULL, pouijte UNIQUE" - dan "Alle dele af en PRIMARY KEY skal vre NOT NULL; Hvis du skal bruge NULL i nglen, brug UNIQUE istedet" - nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken" - eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead" - est "Kik PRIMARY KEY peavad olema mratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tpi vtit" - fre "Toutes les parties d'un index PRIMARY KEY doivent tre NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE" - ger "Alle Teile eines PRIMARY KEY mssen als NOT NULL definiert sein. Wenn NULL in einem Schlssel bentigt wird, muss ein UNIQUE-Schlssel verwendet werden" - hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot" - ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE" - por "Todas as partes de uma chave primria devem ser no-nulas. Se voc precisou usar um valor nulo (NULL) em uma chave, use a clusula UNIQUE em seu lugar" - rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb" - rus " (PRIMARY KEY) NOT NULL; NULL , UNIQUE" - serbian "Svi delovi primarnog kljua moraju biti razliiti od NULL; Ako Vam ipak treba NULL vrednost u kljuu, upotrebite 'UNIQUE'" - spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE" - swe "Alla delar av en PRIMARY KEY mste vara NOT NULL; Om du vill ha en nyckel med NULL, anvnd UNIQUE istllet" - ukr "Ӧ PRIMARY KEY Φ NOT NULL; դ NULL ަ, UNIQUE" + cze "V-Bechny sti primrnho kle musej bt NOT NULL; pokud potebujete NULL, pouijte UNIQUE" + dan "Alle dele af en PRIMARY KEY skal vre NOT NULL; Hvis du skal bruge NULL i nglen, brug UNIQUE istedet" + nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken" + eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead" + est "Kik PRIMARY KEY peavad olema mratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tpi vtit" + fre "Toutes les parties d'un index PRIMARY KEY doivent tre NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE" + ger "Alle Teile eines PRIMARY KEY mssen als NOT NULL definiert sein. Wenn NULL in einem Schlssel bentigt wird, muss ein UNIQUE-Schlssel verwendet werden" + hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot" + ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE" + por "Todas as partes de uma chave primria devem ser no-nulas. Se voc precisou usar um valor nulo (NULL) em uma chave, use a clusula UNIQUE em seu lugar" + rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb" + rus " (PRIMARY KEY) NOT NULL; NULL , UNIQUE" + serbian "Svi delovi primarnog kljua moraju biti razliiti od NULL; Ako Vam ipak treba NULL vrednost u kljuu, upotrebite 'UNIQUE'" + spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE" + swe "Alla delar av en PRIMARY KEY mste vara NOT NULL; Om du vill ha en nyckel med NULL, anvnd UNIQUE istllet" + ukr "Ӧ PRIMARY KEY Φ NOT NULL; դ NULL ަ, UNIQUE" ER_TOO_MANY_ROWS 42000 - cze "V-Bsledek obsahuje vce ne jeden dek" - dan "Resultatet bestod af mere end een rkke" - nla "Resultaat bevatte meer dan een rij" - eng "Result consisted of more than one row" - est "Tulemis oli rohkem kui ks kirje" - fre "Le rsultat contient plus d'un enregistrement" - ger "Ergebnis besteht aus mehr als einer Zeile" - hun "Az eredmeny tobb, mint egy sort tartalmaz" - ita "Il risultato consiste di piu` di una riga" - por "O resultado consistiu em mais do que uma linha" - rum "Resultatul constista din mai multe linii" - rus " " - serbian "Rezultat je sainjen od vie slogova" - spa "Resultado compuesto de mas que una lnea" - swe "Resultet bestod av mera n en rad" - ukr " ¦ Φ Φ æ" + cze "V-Bsledek obsahuje vce ne jeden dek" + dan "Resultatet bestod af mere end een rkke" + nla "Resultaat bevatte meer dan een rij" + eng "Result consisted of more than one row" + est "Tulemis oli rohkem kui ks kirje" + fre "Le rsultat contient plus d'un enregistrement" + ger "Ergebnis besteht aus mehr als einer Zeile" + hun "Az eredmeny tobb, mint egy sort tartalmaz" + ita "Il risultato consiste di piu` di una riga" + por "O resultado consistiu em mais do que uma linha" + rum "Resultatul constista din mai multe linii" + rus " " + serbian "Rezultat je sainjen od vie slogova" + spa "Resultado compuesto de mas que una lnea" + swe "Resultet bestod av mera n en rad" + ukr " ¦ Φ Φ æ" ER_REQUIRES_PRIMARY_KEY 42000 - cze "Tento typ tabulky vy-Baduje primrn kl" - dan "Denne tabeltype krver en primrngle" - nla "Dit tabel type heeft een primaire zoeksleutel nodig" - eng "This table type requires a primary key" - est "Antud tabelitp nuab primaarset vtit" - fre "Ce type de table ncessite une cl primaire (PRIMARY KEY)" - ger "Dieser Tabellentyp bentigt einen Primrschlssel (PRIMARY KEY)" - hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo" - ita "Questo tipo di tabella richiede una chiave primaria" - por "Este tipo de tabela requer uma chave primria" - rum "Aceast tip de tabela are nevoie de o cheie primara" - rus " " - serbian "Ovaj tip tabele zahteva da imate definisan primarni klju" - spa "Este tipo de tabla necesita de una primary key" - swe "Denna tabelltyp krver en PRIMARY KEY" - ukr " æ դ " + cze "Tento typ tabulky vy-Baduje primrn kl" + dan "Denne tabeltype krver en primrngle" + nla "Dit tabel type heeft een primaire zoeksleutel nodig" + eng "This table type requires a primary key" + est "Antud tabelitp nuab primaarset vtit" + fre "Ce type de table ncessite une cl primaire (PRIMARY KEY)" + ger "Dieser Tabellentyp bentigt einen Primrschlssel (PRIMARY KEY)" + hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo" + ita "Questo tipo di tabella richiede una chiave primaria" + por "Este tipo de tabela requer uma chave primria" + rum "Aceast tip de tabela are nevoie de o cheie primara" + rus " " + serbian "Ovaj tip tabele zahteva da imate definisan primarni klju" + spa "Este tipo de tabla necesita de una primary key" + swe "Denna tabelltyp krver en PRIMARY KEY" + ukr " æ դ " ER_NO_RAID_COMPILED - cze "Tato verze MySQL nen-B zkompilovna s podporou RAID" - dan "Denne udgave af MySQL er ikke oversat med understttelse af RAID" - nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning" - eng "This version of MySQL is not compiled with RAID support" - est "Antud MySQL versioon on kompileeritud ilma RAID toeta" - fre "Cette version de MySQL n'est pas compile avec le support RAID" - ger "Diese MySQL-Version ist nicht mit RAID-Untersttzung kompiliert" - hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot" - ita "Questa versione di MYSQL non e` compilata con il supporto RAID" - por "Esta verso do MySQL no foi compilada com suporte a RAID" - rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID" - rus " MySQL RAID" - serbian "Ova verzija MySQL servera nije kompajlirana sa podrkom za RAID ureaje" - spa "Esta versin de MySQL no es compilada con soporte RAID" - swe "Denna version av MySQL r inte kompilerad med RAID" - ukr " Ӧ MySQL Ц Ц RAID" + cze "Tato verze MySQL nen-B zkompilovna s podporou RAID" + dan "Denne udgave af MySQL er ikke oversat med understttelse af RAID" + nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning" + eng "This version of MySQL is not compiled with RAID support" + est "Antud MySQL versioon on kompileeritud ilma RAID toeta" + fre "Cette version de MySQL n'est pas compile avec le support RAID" + ger "Diese MySQL-Version ist nicht mit RAID-Untersttzung kompiliert" + hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot" + ita "Questa versione di MYSQL non e` compilata con il supporto RAID" + por "Esta verso do MySQL no foi compilada com suporte a RAID" + rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID" + rus " MySQL RAID" + serbian "Ova verzija MySQL servera nije kompajlirana sa podrkom za RAID ureaje" + spa "Esta versin de MySQL no es compilada con soporte RAID" + swe "Denna version av MySQL r inte kompilerad med RAID" + ukr " Ӧ MySQL Ц Ц RAID" ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE - cze "Update tabulky bez WHERE s kl-Bem nen v mdu bezpench update dovoleno" - dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsgte at opdatere en tabel uden en WHERE klausul, der gr brug af et KEY felt" - nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom" - eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column" - est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita" - fre "Vous tes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index" - ger "MySQL luft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben" - hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column" - ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave" - por "Voc est usando modo de atualizao seguro e tentou atualizar uma tabela sem uma clusula WHERE que use uma coluna chave" - rus " (safe update mode) WHERE" - serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljua" - spa "Tu ests usando modo de actualizacin segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna" - swe "Du anvnder 'sker uppdateringsmod' och frskte uppdatera en tabell utan en WHERE-sats som anvnder sig av en nyckel" - ukr " ͦ WHERE, դ KEY " + cze "Update tabulky bez WHERE s kl-Bem nen v mdu bezpench update dovoleno" + dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsgte at opdatere en tabel uden en WHERE klausul, der gr brug af et KEY felt" + nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom" + eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column" + est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita" + fre "Vous tes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index" + ger "MySQL luft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben" + hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column" + ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave" + por "Voc est usando modo de atualizao seguro e tentou atualizar uma tabela sem uma clusula WHERE que use uma coluna chave" + rus " (safe update mode) WHERE" + serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljua" + spa "Tu ests usando modo de actualizacin segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna" + swe "Du anvnder 'sker uppdateringsmod' och frskte uppdatera en tabell utan en WHERE-sats som anvnder sig av en nyckel" + ukr " ͦ WHERE, դ KEY " ER_KEY_DOES_NOT_EXITS 42000 S1009 - cze "Kl-B '%-.64s' v tabulce '%-.64s' neexistuje" - dan "Nglen '%-.64s' eksisterer ikke i tabellen '%-.64s'" - nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'" - eng "Key '%-.64s' doesn't exist in table '%-.64s'" - est "Vti '%-.64s' ei eksisteeri tabelis '%-.64s'" - fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'" - ger "Schlssel '%-.64s' existiert in der Tabelle '%-.64s' nicht" - hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban" - ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'" - por "Chave '%-.64s' no existe na tabela '%-.64s'" - rus " '%-.64s' '%-.64s'" - serbian "Klju '%-.64s' ne postoji u tabeli '%-.64s'" - spa "Clave '%-.64s' no existe en la tabla '%-.64s'" - swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'" - ukr " '%-.64s' դ æ '%-.64s'" + cze "Kl-B '%-.64s' v tabulce '%-.64s' neexistuje" + dan "Nglen '%-.64s' eksisterer ikke i tabellen '%-.64s'" + nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'" + eng "Key '%-.64s' doesn't exist in table '%-.64s'" + est "Vti '%-.64s' ei eksisteeri tabelis '%-.64s'" + fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'" + ger "Schlssel '%-.64s' existiert in der Tabelle '%-.64s' nicht" + hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban" + ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'" + por "Chave '%-.64s' no existe na tabela '%-.64s'" + rus " '%-.64s' '%-.64s'" + serbian "Klju '%-.64s' ne postoji u tabeli '%-.64s'" + spa "Clave '%-.64s' no existe en la tabla '%-.64s'" + swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'" + ukr " '%-.64s' դ æ '%-.64s'" ER_CHECK_NO_SUCH_TABLE 42000 - cze "Nemohu otev-Bt tabulku" - dan "Kan ikke bne tabellen" - nla "Kan tabel niet openen" - eng "Can't open table" - est "Ei suuda avada tabelit" - fre "Impossible d'ouvrir la table" - ger "Kann Tabelle nicht ffnen" - hun "Nem tudom megnyitni a tablat" - ita "Impossibile aprire la tabella" - por "No pode abrir a tabela" - rus " " - serbian "Ne mogu da otvorim tabelu" - spa "No puedo abrir tabla" - swe "Kan inte ppna tabellen" - ukr " צ " + cze "Nemohu otev-Bt tabulku" + dan "Kan ikke bne tabellen" + nla "Kan tabel niet openen" + eng "Can't open table" + est "Ei suuda avada tabelit" + fre "Impossible d'ouvrir la table" + ger "Kann Tabelle nicht ffnen" + hun "Nem tudom megnyitni a tablat" + ita "Impossibile aprire la tabella" + por "No pode abrir a tabela" + rus " " + serbian "Ne mogu da otvorim tabelu" + spa "No puedo abrir tabla" + swe "Kan inte ppna tabellen" + ukr " צ " ER_CHECK_NOT_IMPLEMENTED 42000 - cze "Handler tabulky nepodporuje %s" - dan "Denne tabeltype understtter ikke %s" - nla "De 'handler' voor de tabel ondersteund geen %s" - eng "The storage engine for the table doesn't support %s" - est "Antud tabelitp ei toeta %s kske" - fre "Ce type de table ne supporte pas les %s" - ger "Die Speicher-Engine fr diese Tabelle untersttzt kein %s" - greek "The handler for the table doesn't support %s" - hun "A tabla kezeloje (handler) nem tamogatja az %s" - ita "Il gestore per la tabella non supporta il %s" - jpn "The handler for the table doesn't support %s" - kor "The handler for the table doesn't support %s" - nor "The handler for the table doesn't support %s" - norwegian-ny "The handler for the table doesn't support %s" - pol "The handler for the table doesn't support %s" - por "O manipulador de tabela no suporta %s" - rum "The handler for the table doesn't support %s" - rus " : %s" - serbian "Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande" - slo "The handler for the table doesn't support %s" - spa "El manipulador de la tabla no permite soporte para %s" - swe "Tabellhanteraren fr denna tabell kan inte gra %s" - ukr "ڦ æ Ц %s" + cze "Handler tabulky nepodporuje %s" + dan "Denne tabeltype understtter ikke %s" + nla "De 'handler' voor de tabel ondersteund geen %s" + eng "The storage engine for the table doesn't support %s" + est "Antud tabelitp ei toeta %s kske" + fre "Ce type de table ne supporte pas les %s" + ger "Die Speicher-Engine fr diese Tabelle untersttzt kein %s" + greek "The handler for the table doesn't support %s" + hun "A tabla kezeloje (handler) nem tamogatja az %s" + ita "Il gestore per la tabella non supporta il %s" + jpn "The handler for the table doesn't support %s" + kor "The handler for the table doesn't support %s" + nor "The handler for the table doesn't support %s" + norwegian-ny "The handler for the table doesn't support %s" + pol "The handler for the table doesn't support %s" + por "O manipulador de tabela no suporta %s" + rum "The handler for the table doesn't support %s" + rus " : %s" + serbian "Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande" + slo "The handler for the table doesn't support %s" + spa "El manipulador de la tabla no permite soporte para %s" + swe "Tabellhanteraren fr denna tabell kan inte gra %s" + ukr "ڦ æ Ц %s" ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 - cze "Proveden-B tohoto pkazu nen v transakci dovoleno" - dan "Du m ikke bruge denne kommando i en transaktion" - nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie" - eng "You are not allowed to execute this command in a transaction" - est "Seda ksku ei saa kasutada transaktsiooni sees" - fre "Vous n'tes pas autoris excute cette commande dans une transaction" - ger "Sie drfen diesen Befehl nicht in einer Transaktion ausfhren" - hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban" - ita "Non puoi eseguire questo comando in una transazione" - por "No lhe permitido executar este comando em uma transao" - rus " " - serbian "Nije Vam dozvoljeno da izvrite ovu komandu u transakciji" - spa "No tienes el permiso para ejecutar este comando en una transicin" - swe "Du fr inte utfra detta kommando i en transaktion" - ukr " æ" + cze "Proveden-B tohoto pkazu nen v transakci dovoleno" + dan "Du m ikke bruge denne kommando i en transaktion" + nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie" + eng "You are not allowed to execute this command in a transaction" + est "Seda ksku ei saa kasutada transaktsiooni sees" + fre "Vous n'tes pas autoris excute cette commande dans une transaction" + ger "Sie drfen diesen Befehl nicht in einer Transaktion ausfhren" + hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban" + ita "Non puoi eseguire questo comando in una transazione" + por "No lhe permitido executar este comando em uma transao" + rus " " + serbian "Nije Vam dozvoljeno da izvrite ovu komandu u transakciji" + spa "No tienes el permiso para ejecutar este comando en una transicin" + swe "Du fr inte utfra detta kommando i en transaktion" + ukr " æ" ER_ERROR_DURING_COMMIT - cze "Chyba %d p-Bi COMMIT" - dan "Modtog fejl %d mens kommandoen COMMIT blev udfrt" - nla "Kreeg fout %d tijdens COMMIT" - eng "Got error %d during COMMIT" - est "Viga %d ksu COMMIT titmisel" - fre "Erreur %d lors du COMMIT" - ger "Fehler %d beim COMMIT" - hun "%d hiba a COMMIT vegrehajtasa soran" - ita "Rilevato l'errore %d durante il COMMIT" - por "Obteve erro %d durante COMMIT" - rus " %d COMMIT" - serbian "Greka %d za vreme izvravanja komande 'COMMIT'" - spa "Obtenido error %d durante COMMIT" - swe "Fick fel %d vid COMMIT" - ukr " %d Ц COMMIT" + cze "Chyba %d p-Bi COMMIT" + dan "Modtog fejl %d mens kommandoen COMMIT blev udfrt" + nla "Kreeg fout %d tijdens COMMIT" + eng "Got error %d during COMMIT" + est "Viga %d ksu COMMIT titmisel" + fre "Erreur %d lors du COMMIT" + ger "Fehler %d beim COMMIT" + hun "%d hiba a COMMIT vegrehajtasa soran" + ita "Rilevato l'errore %d durante il COMMIT" + por "Obteve erro %d durante COMMIT" + rus " %d COMMIT" + serbian "Greka %d za vreme izvravanja komande 'COMMIT'" + spa "Obtenido error %d durante COMMIT" + swe "Fick fel %d vid COMMIT" + ukr " %d Ц COMMIT" ER_ERROR_DURING_ROLLBACK - cze "Chyba %d p-Bi ROLLBACK" - dan "Modtog fejl %d mens kommandoen ROLLBACK blev udfrt" - nla "Kreeg fout %d tijdens ROLLBACK" - eng "Got error %d during ROLLBACK" - est "Viga %d ksu ROLLBACK titmisel" - fre "Erreur %d lors du ROLLBACK" - ger "Fehler %d beim ROLLBACK" - hun "%d hiba a ROLLBACK vegrehajtasa soran" - ita "Rilevato l'errore %d durante il ROLLBACK" - por "Obteve erro %d durante ROLLBACK" - rus " %d ROLLBACK" - serbian "Greka %d za vreme izvravanja komande 'ROLLBACK'" - spa "Obtenido error %d durante ROLLBACK" - swe "Fick fel %d vid ROLLBACK" - ukr " %d Ц ROLLBACK" + cze "Chyba %d p-Bi ROLLBACK" + dan "Modtog fejl %d mens kommandoen ROLLBACK blev udfrt" + nla "Kreeg fout %d tijdens ROLLBACK" + eng "Got error %d during ROLLBACK" + est "Viga %d ksu ROLLBACK titmisel" + fre "Erreur %d lors du ROLLBACK" + ger "Fehler %d beim ROLLBACK" + hun "%d hiba a ROLLBACK vegrehajtasa soran" + ita "Rilevato l'errore %d durante il ROLLBACK" + por "Obteve erro %d durante ROLLBACK" + rus " %d ROLLBACK" + serbian "Greka %d za vreme izvravanja komande 'ROLLBACK'" + spa "Obtenido error %d durante ROLLBACK" + swe "Fick fel %d vid ROLLBACK" + ukr " %d Ц ROLLBACK" ER_ERROR_DURING_FLUSH_LOGS - cze "Chyba %d p-Bi FLUSH_LOGS" - dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udfrt" - nla "Kreeg fout %d tijdens FLUSH_LOGS" - eng "Got error %d during FLUSH_LOGS" - est "Viga %d ksu FLUSH_LOGS titmisel" - fre "Erreur %d lors du FLUSH_LOGS" - ger "Fehler %d bei FLUSH_LOGS" - hun "%d hiba a FLUSH_LOGS vegrehajtasa soran" - ita "Rilevato l'errore %d durante il FLUSH_LOGS" - por "Obteve erro %d durante FLUSH_LOGS" - rus " %d FLUSH_LOGS" - serbian "Greka %d za vreme izvravanja komande 'FLUSH_LOGS'" - spa "Obtenido error %d durante FLUSH_LOGS" - swe "Fick fel %d vid FLUSH_LOGS" - ukr " %d Ц FLUSH_LOGS" + cze "Chyba %d p-Bi FLUSH_LOGS" + dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udfrt" + nla "Kreeg fout %d tijdens FLUSH_LOGS" + eng "Got error %d during FLUSH_LOGS" + est "Viga %d ksu FLUSH_LOGS titmisel" + fre "Erreur %d lors du FLUSH_LOGS" + ger "Fehler %d bei FLUSH_LOGS" + hun "%d hiba a FLUSH_LOGS vegrehajtasa soran" + ita "Rilevato l'errore %d durante il FLUSH_LOGS" + por "Obteve erro %d durante FLUSH_LOGS" + rus " %d FLUSH_LOGS" + serbian "Greka %d za vreme izvravanja komande 'FLUSH_LOGS'" + spa "Obtenido error %d durante FLUSH_LOGS" + swe "Fick fel %d vid FLUSH_LOGS" + ukr " %d Ц FLUSH_LOGS" ER_ERROR_DURING_CHECKPOINT - cze "Chyba %d p-Bi CHECKPOINT" - dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udfrt" - nla "Kreeg fout %d tijdens CHECKPOINT" - eng "Got error %d during CHECKPOINT" - est "Viga %d ksu CHECKPOINT titmisel" - fre "Erreur %d lors du CHECKPOINT" - ger "Fehler %d bei CHECKPOINT" - hun "%d hiba a CHECKPOINT vegrehajtasa soran" - ita "Rilevato l'errore %d durante il CHECKPOINT" - por "Obteve erro %d durante CHECKPOINT" - rus " %d CHECKPOINT" - serbian "Greka %d za vreme izvravanja komande 'CHECKPOINT'" - spa "Obtenido error %d durante CHECKPOINT" - swe "Fick fel %d vid CHECKPOINT" - ukr " %d Ц CHECKPOINT" + cze "Chyba %d p-Bi CHECKPOINT" + dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udfrt" + nla "Kreeg fout %d tijdens CHECKPOINT" + eng "Got error %d during CHECKPOINT" + est "Viga %d ksu CHECKPOINT titmisel" + fre "Erreur %d lors du CHECKPOINT" + ger "Fehler %d bei CHECKPOINT" + hun "%d hiba a CHECKPOINT vegrehajtasa soran" + ita "Rilevato l'errore %d durante il CHECKPOINT" + por "Obteve erro %d durante CHECKPOINT" + rus " %d CHECKPOINT" + serbian "Greka %d za vreme izvravanja komande 'CHECKPOINT'" + spa "Obtenido error %d durante CHECKPOINT" + swe "Fick fel %d vid CHECKPOINT" + ukr " %d Ц CHECKPOINT" ER_NEW_ABORTING_CONNECTION 08S01 - cze "Spojen-B %ld do databze: '%-.64s' uivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo perueno" - dan "Afbrd forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vrt: '%-.64s' (%-.64s)" - nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)" - eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)" - est "hendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)" - fre "Connection %ld avorte vers la bd: '%-.64s' utilisateur: '%-.32s' hte: '%-.64s' (%-.64s)" - ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)" - ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)" - por "Conexo %ld abortada para banco de dados '%-.64s' - usurio '%-.32s' - 'host' '%-.64s' ('%-.64s')" - rus " %ld '%-.64s' '%-.32s' '%-.64s' (%-.64s)" - serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)" - spa "Abortada conexin %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)" - swe "Avbrt lnken fr trd %ld till db '%-.64s', anvndare '%-.32s', host '%-.64s' (%-.64s)" - ukr " ' %ld : '%-.64s' : '%-.32s' : '%-.64s' (%-.64s)" + cze "Spojen-B %ld do databze: '%-.64s' uivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo perueno" + dan "Afbrd forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vrt: '%-.64s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)" + eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)" + est "hendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)" + fre "Connection %ld avorte vers la bd: '%-.64s' utilisateur: '%-.32s' hte: '%-.64s' (%-.64s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)" + ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)" + por "Conexo %ld abortada para banco de dados '%-.64s' - usurio '%-.32s' - 'host' '%-.64s' ('%-.64s')" + rus " %ld '%-.64s' '%-.32s' '%-.64s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)" + spa "Abortada conexin %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)" + swe "Avbrt lnken fr trd %ld till db '%-.64s', anvndare '%-.32s', host '%-.64s' (%-.64s)" + ukr " ' %ld : '%-.64s' : '%-.32s' : '%-.64s' (%-.64s)" ER_DUMP_NOT_IMPLEMENTED - cze "Handler tabulky nepodporuje bin-Brn dump" - dan "Denne tabeltype unsersttter ikke binrt tabeldump" - nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump" - eng "The storage engine for the table does not support binary table dump" - fre "Ce type de table ne supporte pas les copies binaires" - ger "Die Speicher-Engine fr die Tabelle untersttzt keinen binren Tabellen-Dump" - ita "Il gestore per la tabella non supporta il dump binario" - jpn "The handler for the table does not support binary table dump" - por "O manipulador de tabela no suporta 'dump' binrio de tabela" - rum "The handler for the table does not support binary table dump" - rus " (dump)" - serbian "Handler tabele ne podrava binarni dump tabele" - spa "El manipulador de tabla no soporta dump para tabla binaria" - swe "Tabellhanteraren klarar inte en binr kopiering av tabellen" - ukr " æ Цդ ¦ æ" + cze "Handler tabulky nepodporuje bin-Brn dump" + dan "Denne tabeltype unsersttter ikke binrt tabeldump" + nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump" + eng "The storage engine for the table does not support binary table dump" + fre "Ce type de table ne supporte pas les copies binaires" + ger "Die Speicher-Engine fr die Tabelle untersttzt keinen binren Tabellen-Dump" + ita "Il gestore per la tabella non supporta il dump binario" + jpn "The handler for the table does not support binary table dump" + por "O manipulador de tabela no suporta 'dump' binrio de tabela" + rum "The handler for the table does not support binary table dump" + rus " (dump)" + serbian "Handler tabele ne podrava binarni dump tabele" + spa "El manipulador de tabla no soporta dump para tabla binaria" + swe "Tabellhanteraren klarar inte en binr kopiering av tabellen" + ukr " æ Цդ ¦ æ" ER_FLUSH_MASTER_BINLOG_CLOSED - eng "Binlog closed, cannot RESET MASTER" - ger "Binlog geschlossen. Kann RESET MASTER nicht ausfhren" - por "Binlog fechado. No pode fazer RESET MASTER" - rus " , RESET MASTER" - serbian "Binarni log file zatvoren, ne mogu da izvrim komandu 'RESET MASTER'" - ukr "̦æ , RESET MASTER" + eng "Binlog closed, cannot RESET MASTER" + ger "Binlog geschlossen. Kann RESET MASTER nicht ausfhren" + por "Binlog fechado. No pode fazer RESET MASTER" + rus " , RESET MASTER" + serbian "Binarni log file zatvoren, ne mogu da izvrim komandu 'RESET MASTER'" + ukr "̦æ , RESET MASTER" ER_INDEX_REBUILD - cze "P-Bebudovn indexu dumpnut tabulky '%-.64s' nebylo spn" - dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'" - nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'" - eng "Failed rebuilding the index of dumped table '%-.64s'" - fre "La reconstruction de l'index de la table copie '%-.64s' a chou" - ger "Neuerstellung des Index der Dump-Tabelle '%-.64s' fehlgeschlagen" - greek "Failed rebuilding the index of dumped table '%-.64s'" - hun "Failed rebuilding the index of dumped table '%-.64s'" - ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'" - por "Falhou na reconstruo do ndice da tabela 'dumped' '%-.64s'" - rus " '%-.64s'" - serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela" - spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'" - ukr " צ ϧ æ '%-.64s'" + cze "P-Bebudovn indexu dumpnut tabulky '%-.64s' nebylo spn" + dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'" + nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'" + eng "Failed rebuilding the index of dumped table '%-.64s'" + fre "La reconstruction de l'index de la table copie '%-.64s' a chou" + ger "Neuerstellung des Index der Dump-Tabelle '%-.64s' fehlgeschlagen" + greek "Failed rebuilding the index of dumped table '%-.64s'" + hun "Failed rebuilding the index of dumped table '%-.64s'" + ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'" + por "Falhou na reconstruo do ndice da tabela 'dumped' '%-.64s'" + rus " '%-.64s'" + serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela" + spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'" + ukr " צ ϧ æ '%-.64s'" ER_MASTER - cze "Chyba masteru: '%-.64s'" - dan "Fejl fra master: '%-.64s'" - nla "Fout van master: '%-.64s'" - eng "Error from master: '%-.64s'" - fre "Erreur reue du matre: '%-.64s'" - ger "Fehler vom Master: '%-.64s'" - ita "Errore dal master: '%-.64s" - por "Erro no 'master' '%-.64s'" - rus " : '%-.64s'" - serbian "Greka iz glavnog servera '%-.64s' u klasteru" - spa "Error del master: '%-.64s'" - swe "Fick en master: '%-.64s'" - ukr " צ : '%-.64s'" + cze "Chyba masteru: '%-.64s'" + dan "Fejl fra master: '%-.64s'" + nla "Fout van master: '%-.64s'" + eng "Error from master: '%-.64s'" + fre "Erreur reue du matre: '%-.64s'" + ger "Fehler vom Master: '%-.64s'" + ita "Errore dal master: '%-.64s" + por "Erro no 'master' '%-.64s'" + rus " : '%-.64s'" + serbian "Greka iz glavnog servera '%-.64s' u klasteru" + spa "Error del master: '%-.64s'" + swe "Fick en master: '%-.64s'" + ukr " צ : '%-.64s'" ER_MASTER_NET_READ 08S01 - cze "S-Bov chyba pi ten z masteru" - dan "Netvrksfejl ved lsning fra master" - nla "Net fout tijdens lezen van master" - eng "Net error reading from master" - fre "Erreur de lecture rseau reue du matre" - ger "Netzfehler beim Lesen vom Master" - ita "Errore di rete durante la ricezione dal master" - por "Erro de rede lendo do 'master'" - rus " " - serbian "Greka u primanju mrenih paketa sa glavnog servera u klasteru" - spa "Error de red leyendo del master" - swe "Fick ntverksfel vid lsning frn master" - ukr " צ " + cze "S-Bov chyba pi ten z masteru" + dan "Netvrksfejl ved lsning fra master" + nla "Net fout tijdens lezen van master" + eng "Net error reading from master" + fre "Erreur de lecture rseau reue du matre" + ger "Netzfehler beim Lesen vom Master" + ita "Errore di rete durante la ricezione dal master" + por "Erro de rede lendo do 'master'" + rus " " + serbian "Greka u primanju mrenih paketa sa glavnog servera u klasteru" + spa "Error de red leyendo del master" + swe "Fick ntverksfel vid lsning frn master" + ukr " צ " ER_MASTER_NET_WRITE 08S01 - cze "S-Bov chyba pi zpisu na master" - dan "Netvrksfejl ved skrivning til master" - nla "Net fout tijdens schrijven naar master" - eng "Net error writing to master" - fre "Erreur d'criture rseau reue du matre" - ger "Netzfehler beim Schreiben zum Master" - ita "Errore di rete durante l'invio al master" - por "Erro de rede gravando no 'master'" - rus " " - serbian "Greka u slanju mrenih paketa na glavni server u klasteru" - spa "Error de red escribiendo para el master" - swe "Fick ntverksfel vid skrivning till master" - ukr " " + cze "S-Bov chyba pi zpisu na master" + dan "Netvrksfejl ved skrivning til master" + nla "Net fout tijdens schrijven naar master" + eng "Net error writing to master" + fre "Erreur d'criture rseau reue du matre" + ger "Netzfehler beim Schreiben zum Master" + ita "Errore di rete durante l'invio al master" + por "Erro de rede gravando no 'master'" + rus " " + serbian "Greka u slanju mrenih paketa na glavni server u klasteru" + spa "Error de red escribiendo para el master" + swe "Fick ntverksfel vid skrivning till master" + ukr " " ER_FT_MATCHING_KEY_NOT_FOUND - cze "-Bdn sloupec nem vytvoen fulltextov index" - dan "Kan ikke finde en FULLTEXT ngle som svarer til kolonne listen" - nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst" - eng "Can't find FULLTEXT index matching the column list" - est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega" - fre "Impossible de trouver un index FULLTEXT correspondant cette liste de colonnes" - ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht" - ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne" - por "No pode encontrar um ndice para o texto todo que combine com a lista de colunas" - rus " (FULLTEXT) , " - serbian "Ne mogu da pronaem 'FULLTEXT' indeks koli odgovara listi kolona" - spa "No puedo encontrar ndice FULLTEXT correspondiendo a la lista de columnas" - swe "Hittar inte ett FULLTEXT-index i kolumnlistan" - ukr " FULLTEXT , צצ ̦ æ" + cze "-Bdn sloupec nem vytvoen fulltextov index" + dan "Kan ikke finde en FULLTEXT ngle som svarer til kolonne listen" + nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst" + eng "Can't find FULLTEXT index matching the column list" + est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega" + fre "Impossible de trouver un index FULLTEXT correspondant cette liste de colonnes" + ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht" + ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne" + por "No pode encontrar um ndice para o texto todo que combine com a lista de colunas" + rus " (FULLTEXT) , " + serbian "Ne mogu da pronaem 'FULLTEXT' indeks koli odgovara listi kolona" + spa "No puedo encontrar ndice FULLTEXT correspondiendo a la lista de columnas" + swe "Hittar inte ett FULLTEXT-index i kolumnlistan" + ukr " FULLTEXT , צצ ̦ æ" ER_LOCK_OR_ACTIVE_TRANSACTION - cze "Nemohu prov-Bst zadan pkaz, protoe existuj aktivn zamen tabulky nebo aktivn transakce" - dan "Kan ikke udfre den givne kommando fordi der findes aktive, lste tabeller eller fordi der udfres en transaktion" - nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie" - eng "Can't execute the given command because you have active locked tables or an active transaction" - est "Ei suuda tita antud ksku kuna on aktiivseid lukke vi kimasolev transaktsioon" - fre "Impossible d'excuter la commande car vous avez des tables verrouilles ou une transaction active" - ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausfhren" - ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto" - por "No pode executar o comando dado porque voc tem tabelas ativas travadas ou uma transao ativa" - rus " , " - serbian "Ne mogu da izvrim datu komandu zbog toga to su tabele zakljuane ili je transakcija u toku" - spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transicin activa" - swe "Kan inte utfra kommandot emedan du har en lst tabell eller an aktiv transaktion" - ukr " , դ æ" + cze "Nemohu prov-Bst zadan pkaz, protoe existuj aktivn zamen tabulky nebo aktivn transakce" + dan "Kan ikke udfre den givne kommando fordi der findes aktive, lste tabeller eller fordi der udfres en transaktion" + nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie" + eng "Can't execute the given command because you have active locked tables or an active transaction" + est "Ei suuda tita antud ksku kuna on aktiivseid lukke vi kimasolev transaktsioon" + fre "Impossible d'excuter la commande car vous avez des tables verrouilles ou une transaction active" + ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausfhren" + ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto" + por "No pode executar o comando dado porque voc tem tabelas ativas travadas ou uma transao ativa" + rus " , " + serbian "Ne mogu da izvrim datu komandu zbog toga to su tabele zakljuane ili je transakcija u toku" + spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transicin activa" + swe "Kan inte utfra kommandot emedan du har en lst tabell eller an aktiv transaktion" + ukr " , դ æ" ER_UNKNOWN_SYSTEM_VARIABLE - cze "Nezn-Bm systmov promnn '%-.64s'" - dan "Ukendt systemvariabel '%-.64s'" - nla "Onbekende systeem variabele '%-.64s'" - eng "Unknown system variable '%-.64s'" - est "Tundmatu ssteemne muutuja '%-.64s'" - fre "Variable systme '%-.64s' inconnue" - ger "Unbekannte Systemvariable '%-.64s'" - ita "Variabile di sistema '%-.64s' sconosciuta" - por "Varivel de sistema '%-.64s' desconhecida" - rus " '%-.64s'" - serbian "Nepoznata sistemska promenljiva '%-.64s'" - spa "Desconocida variable de sistema '%-.64s'" - swe "Oknd systemvariabel: '%-.64s'" - ukr "צ ͦ '%-.64s'" + cze "Nezn-Bm systmov promnn '%-.64s'" + dan "Ukendt systemvariabel '%-.64s'" + nla "Onbekende systeem variabele '%-.64s'" + eng "Unknown system variable '%-.64s'" + est "Tundmatu ssteemne muutuja '%-.64s'" + fre "Variable systme '%-.64s' inconnue" + ger "Unbekannte Systemvariable '%-.64s'" + ita "Variabile di sistema '%-.64s' sconosciuta" + por "Varivel de sistema '%-.64s' desconhecida" + rus " '%-.64s'" + serbian "Nepoznata sistemska promenljiva '%-.64s'" + spa "Desconocida variable de sistema '%-.64s'" + swe "Oknd systemvariabel: '%-.64s'" + ukr "צ ͦ '%-.64s'" ER_CRASHED_ON_USAGE - cze "Tabulka '%-.64s' je ozna-Bena jako poruen a mla by bt opravena" - dan "Tabellen '%-.64s' er markeret med fejl og br repareres" - nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" - eng "Table '%-.64s' is marked as crashed and should be repaired" - est "Tabel '%-.64s' on mrgitud vigaseks ja tuleb parandada" - fre "La table '%-.64s' est marque 'crashed' et devrait tre rpare" - ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden" - ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata" - por "Tabela '%-.64s' est marcada como danificada e deve ser reparada" - rus " '%-.64s' " - serbian "Tabela '%-.64s' je markirana kao oteena i trebala bi biti popravljena" - spa "Tabla '%-.64s' est marcada como crashed y debe ser reparada" - swe "Tabell '%-.64s' r trasig och br repareras med REPAIR TABLE" - ukr " '%-.64s' ڦ Ҧ צ" + cze "Tabulka '%-.64s' je ozna-Bena jako poruen a mla by bt opravena" + dan "Tabellen '%-.64s' er markeret med fejl og br repareres" + nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" + eng "Table '%-.64s' is marked as crashed and should be repaired" + est "Tabel '%-.64s' on mrgitud vigaseks ja tuleb parandada" + fre "La table '%-.64s' est marque 'crashed' et devrait tre rpare" + ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden" + ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata" + por "Tabela '%-.64s' est marcada como danificada e deve ser reparada" + rus " '%-.64s' " + serbian "Tabela '%-.64s' je markirana kao oteena i trebala bi biti popravljena" + spa "Tabla '%-.64s' est marcada como crashed y debe ser reparada" + swe "Tabell '%-.64s' r trasig och br repareras med REPAIR TABLE" + ukr " '%-.64s' ڦ Ҧ צ" ER_CRASHED_ON_REPAIR - cze "Tabulka '%-.64s' je ozna-Bena jako poruen a posledn (automatick?) oprava se nezdaila" - dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" - nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" - eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed" - est "Tabel '%-.64s' on mrgitud vigaseks ja viimane (automaatne?) parandus ebannestus" - fre "La table '%-.64s' est marque 'crashed' et le dernier 'repair' a chou" - ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" - ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" - por "Tabela '%-.64s' est marcada como danificada e a ltima reparao (automtica?) falhou" - rus " '%-.64s' (?) " - serbian "Tabela '%-.64s' je markirana kao oteena, a zadnja (automatska?) popravka je bila neuspela" - spa "Tabla '%-.64s' est marcada como crashed y la ltima reparacin (automactica?) fall" - swe "Tabell '%-.64s' r trasig och senast (automatiska?) reparation misslyckades" - ukr " '%-.64s' ڦ Τ (?) צ " + cze "Tabulka '%-.64s' je ozna-Bena jako poruen a posledn (automatick?) oprava se nezdaila" + dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" + nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" + eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed" + est "Tabel '%-.64s' on mrgitud vigaseks ja viimane (automaatne?) parandus ebannestus" + fre "La table '%-.64s' est marque 'crashed' et le dernier 'repair' a chou" + ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" + ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" + por "Tabela '%-.64s' est marcada como danificada e a ltima reparao (automtica?) falhou" + rus " '%-.64s' (?) " + serbian "Tabela '%-.64s' je markirana kao oteena, a zadnja (automatska?) popravka je bila neuspela" + spa "Tabla '%-.64s' est marcada como crashed y la ltima reparacin (automactica?) fall" + swe "Tabell '%-.64s' r trasig och senast (automatiska?) reparation misslyckades" + ukr " '%-.64s' ڦ Τ (?) צ " ER_WARNING_NOT_COMPLETE_ROLLBACK - dan "Advarsel: Visse data i tabeller der ikke understtter transaktioner kunne ikke tilbagestilles" - nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen" - eng "Some non-transactional changed tables couldn't be rolled back" - est "Hoiatus: mnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida" - fre "Attention: certaines tables ne supportant pas les transactions ont t changes et elles ne pourront pas tre restitues" - ger "nderungen an einigen nicht transaktionalen Tabellen konnten nicht zurckgerollt werden" - ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)" - por "Aviso: Algumas tabelas no-transacionais alteradas no puderam ser reconstitudas (rolled back)" - rus ": " - serbian "Upozorenje: Neke izmenjene tabele ne podravaju komandu 'ROLLBACK'" - spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back" - swe "Warning: Ngra icke transaktionella tabeller kunde inte terstllas vid ROLLBACK" - ukr ": ˦ æΦ ͦ " + dan "Advarsel: Visse data i tabeller der ikke understtter transaktioner kunne ikke tilbagestilles" + nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen" + eng "Some non-transactional changed tables couldn't be rolled back" + est "Hoiatus: mnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida" + fre "Attention: certaines tables ne supportant pas les transactions ont t changes et elles ne pourront pas tre restitues" + ger "nderungen an einigen nicht transaktionalen Tabellen konnten nicht zurckgerollt werden" + ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)" + por "Aviso: Algumas tabelas no-transacionais alteradas no puderam ser reconstitudas (rolled back)" + rus ": " + serbian "Upozorenje: Neke izmenjene tabele ne podravaju komandu 'ROLLBACK'" + spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back" + swe "Warning: Ngra icke transaktionella tabeller kunde inte terstllas vid ROLLBACK" + ukr ": ˦ æΦ ͦ " ER_TRANS_CACHE_FULL - dan "Fler-udtryks transaktion krvede mere plads en 'max_binlog_cache_size' bytes. Forhj vrdien af denne variabel og prv igen" - nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw" - eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again" - est "Mitme lausendiga transaktsioon nudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja vrtust ja proovi uuesti" - fre "Cette transaction commandes multiples ncessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et ressayez" - ger "Transaktionen, die aus mehreren Befehlen bestehen, bentigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrssern Sie diese Server-Variable versuchen Sie es noch einmal" - ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare" - por "Transaes multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta varivel do mysqld e tente novamente" - rus ", , 'max_binlog_cache_size' . mysqld " - spa "Multipla transicin necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo" - swe "Transaktionen krvde mera n 'max_binlog_cache_size' minne. ka denna mysqld-variabel och frsk p nytt" - ukr "æ ¦ Φ 'max_binlog_cache_size' Ԧ Ҧ. ¦ ͦ mysqld " + dan "Fler-udtryks transaktion krvede mere plads en 'max_binlog_cache_size' bytes. Forhj vrdien af denne variabel og prv igen" + nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw" + eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again" + est "Mitme lausendiga transaktsioon nudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja vrtust ja proovi uuesti" + fre "Cette transaction commandes multiples ncessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et ressayez" + ger "Transaktionen, die aus mehreren Befehlen bestehen, bentigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrssern Sie diese Server-Variable versuchen Sie es noch einmal" + ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare" + por "Transaes multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta varivel do mysqld e tente novamente" + rus ", , 'max_binlog_cache_size' . mysqld " + spa "Multipla transicin necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo" + swe "Transaktionen krvde mera n 'max_binlog_cache_size' minne. ka denna mysqld-variabel och frsk p nytt" + ukr "æ ¦ Φ 'max_binlog_cache_size' Ԧ Ҧ. ¦ ͦ mysqld " ER_SLAVE_MUST_STOP - dan "Denne handling kunne ikke udfres med krende slave, brug frst kommandoen STOP SLAVE" - nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE" - eng "This operation cannot be performed with a running slave; run STOP SLAVE first" - fre "Cette opration ne peut tre ralise avec un esclave actif, faites STOP SLAVE d'abord" - ger "Diese Operation kann bei einem aktiven Slave nicht durchgefhrt werden. Bitte zuerst STOP SLAVE ausfhren" - ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE" - por "Esta operao no pode ser realizada com um 'slave' em execuo. Execute STOP SLAVE primeiro" - rus " . STOP SLAVE" - serbian "Ova operacija ne moe biti izvrena dok je aktivan podreeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreeni server." - spa "Esta operacin no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE" - swe "Denna operation kan inte gras under replikering; Gr STOP SLAVE frst" - ukr "æ Ц, STOP SLAVE" + dan "Denne handling kunne ikke udfres med krende slave, brug frst kommandoen STOP SLAVE" + nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE" + eng "This operation cannot be performed with a running slave; run STOP SLAVE first" + fre "Cette opration ne peut tre ralise avec un esclave actif, faites STOP SLAVE d'abord" + ger "Diese Operation kann bei einem aktiven Slave nicht durchgefhrt werden. Bitte zuerst STOP SLAVE ausfhren" + ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE" + por "Esta operao no pode ser realizada com um 'slave' em execuo. Execute STOP SLAVE primeiro" + rus " . STOP SLAVE" + serbian "Ova operacija ne moe biti izvrena dok je aktivan podreeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreeni server." + spa "Esta operacin no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE" + swe "Denna operation kan inte gras under replikering; Gr STOP SLAVE frst" + ukr "æ Ц, STOP SLAVE" ER_SLAVE_NOT_RUNNING - dan "Denne handling krver en krende slave. Konfigurer en slave og brug kommandoen START SLAVE" - nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE" - eng "This operation requires a running slave; configure slave and do START SLAVE" - fre "Cette opration ncessite un esclave actif, configurez les esclaves et faites START SLAVE" - ger "Diese Operation bentigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren" - ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE" - por "Esta operao requer um 'slave' em execuo. Configure o 'slave' e execute START SLAVE" - rus " . START SLAVE" - serbian "Ova operacija zahteva da je aktivan podreeni server. Konfiguriite prvo podreeni server i onda izvrite komandu 'START SLAVE'" - spa "Esta operacin necesita el esclavo funcionando, configure esclavo y haga el START SLAVE" - swe "Denna operation kan endast gras under replikering; Konfigurera slaven och gr START SLAVE" - ukr "æ Ц, Ʀ Ц START SLAVE" + dan "Denne handling krver en krende slave. Konfigurer en slave og brug kommandoen START SLAVE" + nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE" + eng "This operation requires a running slave; configure slave and do START SLAVE" + fre "Cette opration ncessite un esclave actif, configurez les esclaves et faites START SLAVE" + ger "Diese Operation bentigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren" + ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE" + por "Esta operao requer um 'slave' em execuo. Configure o 'slave' e execute START SLAVE" + rus " . START SLAVE" + serbian "Ova operacija zahteva da je aktivan podreeni server. Konfiguriite prvo podreeni server i onda izvrite komandu 'START SLAVE'" + spa "Esta operacin necesita el esclavo funcionando, configure esclavo y haga el START SLAVE" + swe "Denna operation kan endast gras under replikering; Konfigurera slaven och gr START SLAVE" + ukr "æ Ц, Ʀ Ц START SLAVE" ER_BAD_SLAVE - dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO" - nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO" - eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO" - fre "Le server n'est pas configur comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO" - ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben" - ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO" - por "O servidor no est configurado como 'slave'. Acerte o arquivo de configurao ou use CHANGE MASTER TO" - rus " . CHANGE MASTER TO" - serbian "Server nije konfigurisan kao podreeni server, ispravite konfiguracioni file ili na njemu izvrite komandu 'CHANGE MASTER TO'" - spa "El servidor no est configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO" - swe "Servern r inte konfigurerade som en replikationsslav. ndra konfigurationsfilen eller gr CHANGE MASTER TO" - ukr " Ʀ Ц, ̦ Ʀæ CHANGE MASTER TO" + dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO" + nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO" + eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO" + fre "Le server n'est pas configur comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO" + ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben" + ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO" + por "O servidor no est configurado como 'slave'. Acerte o arquivo de configurao ou use CHANGE MASTER TO" + rus " . CHANGE MASTER TO" + serbian "Server nije konfigurisan kao podreeni server, ispravite konfiguracioni file ili na njemu izvrite komandu 'CHANGE MASTER TO'" + spa "El servidor no est configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO" + swe "Servern r inte konfigurerade som en replikationsslav. ndra konfigurationsfilen eller gr CHANGE MASTER TO" + ukr " Ʀ Ц, ̦ Ʀæ CHANGE MASTER TO" ER_MASTER_INFO - eng "Could not initialize master info structure; more error messages can be found in the MySQL error log" - fre "Impossible d'initialiser les structures d'information de matre, vous trouverez des messages d'erreur supplmentaires dans le journal des erreurs de MySQL" - ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen knnen im MySQL-Error-Log eingesehen werden" - serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'" - swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil fr mera information" + eng "Could not initialize master info structure; more error messages can be found in the MySQL error log" + fre "Impossible d'initialiser les structures d'information de matre, vous trouverez des messages d'erreur supplmentaires dans le journal des erreurs de MySQL" + ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen knnen im MySQL-Error-Log eingesehen werden" + serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'" + swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil fr mera information" ER_SLAVE_THREAD - dan "Kunne ikke danne en slave-trd; check systemressourcerne" - nla "Kon slave thread niet aanmaken, controleer systeem resources" - eng "Could not create slave thread; check system resources" - fre "Impossible de crer une tche esclave, vrifiez les ressources systme" - ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen berprfen" - ita "Impossibile creare il thread 'slave', controllare le risorse di sistema" - por "No conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema" - rus " . " - serbian "Nisam mogao da startujem thread za podreeni server, proverite sistemske resurse" - spa "No puedo crear el thread esclavo, verifique recursos del sistema" - swe "Kunde inte starta en trd fr replikering" - ukr " Ц Ǧ, צ Φ " + dan "Kunne ikke danne en slave-trd; check systemressourcerne" + nla "Kon slave thread niet aanmaken, controleer systeem resources" + eng "Could not create slave thread; check system resources" + fre "Impossible de crer une tche esclave, vrifiez les ressources systme" + ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen berprfen" + ita "Impossibile creare il thread 'slave', controllare le risorse di sistema" + por "No conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema" + rus " . " + serbian "Nisam mogao da startujem thread za podreeni server, proverite sistemske resurse" + spa "No puedo crear el thread esclavo, verifique recursos del sistema" + swe "Kunde inte starta en trd fr replikering" + ukr " Ц Ǧ, צ Φ " ER_TOO_MANY_USER_CONNECTIONS 42000 - dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser" - nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen" - eng "User %-.64s already has more than 'max_user_connections' active connections" - est "Kasutajal %-.64s on juba rohkem hendusi kui lubatud 'max_user_connections' muutujaga" - fre "L'utilisateur %-.64s possde dj plus de 'max_user_connections' connections actives" - ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen" - ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive" - por "Usurio '%-.64s' j possui mais que o valor mximo de conexes (max_user_connections) ativas" - rus " %-.64s 'max_user_connections' " - serbian "Korisnik %-.64s ve ima vie aktivnih konekcija nego to je to odreeno 'max_user_connections' promenljivom" - spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas" - swe "Anvndare '%-.64s' har redan 'max_user_connections' aktiva inloggningar" - ukr " %-.64s ¦ Φ 'max_user_connections' '" + dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser" + nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen" + eng "User %-.64s already has more than 'max_user_connections' active connections" + est "Kasutajal %-.64s on juba rohkem hendusi kui lubatud 'max_user_connections' muutujaga" + fre "L'utilisateur %-.64s possde dj plus de 'max_user_connections' connections actives" + ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen" + ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive" + por "Usurio '%-.64s' j possui mais que o valor mximo de conexes (max_user_connections) ativas" + rus " %-.64s 'max_user_connections' " + serbian "Korisnik %-.64s ve ima vie aktivnih konekcija nego to je to odreeno 'max_user_connections' promenljivom" + spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas" + swe "Anvndare '%-.64s' har redan 'max_user_connections' aktiva inloggningar" + ukr " %-.64s ¦ Φ 'max_user_connections' '" ER_SET_CONSTANTS_ONLY - dan "Du m kun bruge konstantudtryk med SET" - nla "U mag alleen constante expressies gebruiken bij SET" - eng "You may only use constant expressions with SET" - est "Ainult konstantsed suurused on lubatud SET klauslis" - fre "Seules les expressions constantes sont autorises avec SET" - ger "Bei SET drfen nur konstante Ausdrcke verwendet werden" - ita "Si possono usare solo espressioni costanti con SET" - por "Voc pode usar apenas expresses constantes com SET" - rus " SET " - serbian "Moete upotrebiti samo konstantan iskaz sa komandom 'SET'" - spa "Tu solo debes usar expresiones constantes con SET" - swe "Man kan endast anvnda konstantuttryck med SET" - ukr " ڦ SET" + dan "Du m kun bruge konstantudtryk med SET" + nla "U mag alleen constante expressies gebruiken bij SET" + eng "You may only use constant expressions with SET" + est "Ainult konstantsed suurused on lubatud SET klauslis" + fre "Seules les expressions constantes sont autorises avec SET" + ger "Bei SET drfen nur konstante Ausdrcke verwendet werden" + ita "Si possono usare solo espressioni costanti con SET" + por "Voc pode usar apenas expresses constantes com SET" + rus " SET " + serbian "Moete upotrebiti samo konstantan iskaz sa komandom 'SET'" + spa "Tu solo debes usar expresiones constantes con SET" + swe "Man kan endast anvnda konstantuttryck med SET" + ukr " ڦ SET" ER_LOCK_WAIT_TIMEOUT - dan "Lock wait timeout overskredet" - nla "Lock wacht tijd overschreden" - eng "Lock wait timeout exceeded; try restarting transaction" - est "Kontrollaeg letatud luku jrel ootamisel; Proovi transaktsiooni otsast alata" - fre "Timeout sur l'obtention du verrou" - ger "Beim Warten auf eine Sperre wurde die zulssige Wartezeit berschritten. Bitte versuchen Sie, die Transaktion neu zu starten" - ita "E' scaduto il timeout per l'attesa del lock" - por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transao." - rus " ; " - serbian "Vremenski limit za zakljuavanje tabele je istekao; Probajte da ponovo startujete transakciju" - spa "Tiempo de bloqueo de espera excedido" - swe "Fick inte ett ls i tid ; Frsk att starta om transaktionen" - ukr " ަ " + dan "Lock wait timeout overskredet" + nla "Lock wacht tijd overschreden" + eng "Lock wait timeout exceeded; try restarting transaction" + est "Kontrollaeg letatud luku jrel ootamisel; Proovi transaktsiooni otsast alata" + fre "Timeout sur l'obtention du verrou" + ger "Beim Warten auf eine Sperre wurde die zulssige Wartezeit berschritten. Bitte versuchen Sie, die Transaktion neu zu starten" + ita "E' scaduto il timeout per l'attesa del lock" + por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transao." + rus " ; " + serbian "Vremenski limit za zakljuavanje tabele je istekao; Probajte da ponovo startujete transakciju" + spa "Tiempo de bloqueo de espera excedido" + swe "Fick inte ett ls i tid ; Frsk att starta om transaktionen" + ukr " ަ " ER_LOCK_TABLE_FULL - dan "Det totale antal lse overstiger strrelsen p lse-tabellen" - nla "Het totale aantal locks overschrijdt de lock tabel grootte" - eng "The total number of locks exceeds the lock table size" - est "Lukkude koguarv letab lukutabeli suuruse" - fre "Le nombre total de verrou dpasse la taille de la table des verrous" - ger "Die Gesamtzahl der Sperren berschreitet die Gre der Sperrtabelle" - ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock" - por "O nmero total de travamentos excede o tamanho da tabela de travamentos" - rus " " - serbian "Broj totalnih zakljuavanja tabele premauje veliinu tabele zakljuavanja" - spa "El nmero total de bloqueos excede el tamao de bloqueo de la tabla" - swe "Antal ls verskrider antalet reserverade ls" - ukr " ˦˦ ͦ æ" + dan "Det totale antal lse overstiger strrelsen p lse-tabellen" + nla "Het totale aantal locks overschrijdt de lock tabel grootte" + eng "The total number of locks exceeds the lock table size" + est "Lukkude koguarv letab lukutabeli suuruse" + fre "Le nombre total de verrou dpasse la taille de la table des verrous" + ger "Die Gesamtzahl der Sperren berschreitet die Gre der Sperrtabelle" + ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock" + por "O nmero total de travamentos excede o tamanho da tabela de travamentos" + rus " " + serbian "Broj totalnih zakljuavanja tabele premauje veliinu tabele zakljuavanja" + spa "El nmero total de bloqueos excede el tamao de bloqueo de la tabla" + swe "Antal ls verskrider antalet reserverade ls" + ukr " ˦˦ ͦ æ" ER_READ_ONLY_TRANSACTION 25000 - dan "Update ls kan ikke opns under en READ UNCOMMITTED transaktion" - nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie" - eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction" - est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni kigus" - fre "Un verrou en update ne peut tre acquit pendant une transaction READ UNCOMMITTED" - ger "Whrend einer READ-UNCOMMITTED-Transaktion knnen keine UPDATE-Sperren angefordert werden" - ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'" - por "Travamentos de atualizao no podem ser obtidos durante uma transao de tipo READ UNCOMMITTED" - rus " ( READ UNCOMMITTED) " - serbian "Zakljuavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija" - spa "Bloqueos de actualizacin no pueden ser adqueridos durante una transicin READ UNCOMMITTED" - swe "Updateringsls kan inte gras nr man anvnder READ UNCOMMITTED" - ukr " ڦ æ READ UNCOMMITTED" + dan "Update ls kan ikke opns under en READ UNCOMMITTED transaktion" + nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie" + eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction" + est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni kigus" + fre "Un verrou en update ne peut tre acquit pendant une transaction READ UNCOMMITTED" + ger "Whrend einer READ-UNCOMMITTED-Transaktion knnen keine UPDATE-Sperren angefordert werden" + ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'" + por "Travamentos de atualizao no podem ser obtidos durante uma transao de tipo READ UNCOMMITTED" + rus " ( READ UNCOMMITTED) " + serbian "Zakljuavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija" + spa "Bloqueos de actualizacin no pueden ser adqueridos durante una transicin READ UNCOMMITTED" + swe "Updateringsls kan inte gras nr man anvnder READ UNCOMMITTED" + ukr " ڦ æ READ UNCOMMITTED" ER_DROP_DB_WITH_READ_LOCK - dan "DROP DATABASE er ikke tilladt mens en trd holder p globalt read lock" - nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" - eng "DROP DATABASE not allowed while thread is holding global read lock" - est "DROP DATABASE ei ole lubatud kui lim omab globaalset READ lukku" - fre "DROP DATABASE n'est pas autorise pendant qu'une tche possde un verrou global en lecture" - ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hlt" - ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura" - por "DROP DATABASE no permitido enquanto uma 'thread' est mantendo um travamento global de leitura" - rus " DROP DATABASE, " - serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuava itanje podataka" - spa "DROP DATABASE no permitido mientras un thread est ejerciendo un bloqueo de lectura global" - swe "DROP DATABASE r inte tilltet nr man har ett globalt lsls" - ukr "DROP DATABASE Ǧ Ц " + dan "DROP DATABASE er ikke tilladt mens en trd holder p globalt read lock" + nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" + eng "DROP DATABASE not allowed while thread is holding global read lock" + est "DROP DATABASE ei ole lubatud kui lim omab globaalset READ lukku" + fre "DROP DATABASE n'est pas autorise pendant qu'une tche possde un verrou global en lecture" + ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hlt" + ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + por "DROP DATABASE no permitido enquanto uma 'thread' est mantendo um travamento global de leitura" + rus " DROP DATABASE, " + serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuava itanje podataka" + spa "DROP DATABASE no permitido mientras un thread est ejerciendo un bloqueo de lectura global" + swe "DROP DATABASE r inte tilltet nr man har ett globalt lsls" + ukr "DROP DATABASE Ǧ Ц " ER_CREATE_DB_WITH_READ_LOCK - dan "CREATE DATABASE er ikke tilladt mens en trd holder p globalt read lock" - nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" - eng "CREATE DATABASE not allowed while thread is holding global read lock" - est "CREATE DATABASE ei ole lubatud kui lim omab globaalset READ lukku" - fre "CREATE DATABASE n'est pas autorise pendant qu'une tche possde un verrou global en lecture" - ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hlt" - ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura" - por "CREATE DATABASE no permitido enquanto uma 'thread' est mantendo um travamento global de leitura" - rus " CREATE DATABASE, " - serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuava itanje podataka" - spa "CREATE DATABASE no permitido mientras un thread est ejerciendo un bloqueo de lectura global" - swe "CREATE DATABASE r inte tilltet nr man har ett globalt lsls" - ukr "CREATE DATABASE Ǧ Ц " + dan "CREATE DATABASE er ikke tilladt mens en trd holder p globalt read lock" + nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" + eng "CREATE DATABASE not allowed while thread is holding global read lock" + est "CREATE DATABASE ei ole lubatud kui lim omab globaalset READ lukku" + fre "CREATE DATABASE n'est pas autorise pendant qu'une tche possde un verrou global en lecture" + ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hlt" + ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + por "CREATE DATABASE no permitido enquanto uma 'thread' est mantendo um travamento global de leitura" + rus " CREATE DATABASE, " + serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuava itanje podataka" + spa "CREATE DATABASE no permitido mientras un thread est ejerciendo un bloqueo de lectura global" + swe "CREATE DATABASE r inte tilltet nr man har ett globalt lsls" + ukr "CREATE DATABASE Ǧ Ц " ER_WRONG_ARGUMENTS - nla "Foutieve parameters voor %s" - eng "Incorrect arguments to %s" - est "Vigased parameetrid %s-le" - fre "Mauvais arguments %s" - ger "Falsche Argumente fr %s" - ita "Argomenti errati a %s" - por "Argumentos errados para %s" - rus " %s" - serbian "Pogreni argumenti prosleeni na %s" - spa "Argumentos errados para %s" - swe "Felaktiga argument till %s" - ukr " %s" + nla "Foutieve parameters voor %s" + eng "Incorrect arguments to %s" + est "Vigased parameetrid %s-le" + fre "Mauvais arguments %s" + ger "Falsche Argumente fr %s" + ita "Argomenti errati a %s" + por "Argumentos errados para %s" + rus " %s" + serbian "Pogreni argumenti prosleeni na %s" + spa "Argumentos errados para %s" + swe "Felaktiga argument till %s" + ukr " %s" ER_NO_PERMISSION_TO_CREATE_USER 42000 - nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren" - eng "'%-.32s'@'%-.64s' is not allowed to create new users" - est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid" - fre "'%-.32s'@'%-.64s' n'est pas autoris crer de nouveaux utilisateurs" - ger "'%-.32s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufgen" - ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti" - por "No permitido a '%-.32s'@'%-.64s' criar novos usurios" - rus "'%-.32s'@'%-.64s' " - serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike" - spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios" - swe "'%-.32s'@'%-.64s' har inte rttighet att skapa nya anvndare" - ukr " '%-.32s'@'%-.64s' ަ" + nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren" + eng "'%-.32s'@'%-.64s' is not allowed to create new users" + est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid" + fre "'%-.32s'@'%-.64s' n'est pas autoris crer de nouveaux utilisateurs" + ger "'%-.32s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufgen" + ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti" + por "No permitido a '%-.32s'@'%-.64s' criar novos usurios" + rus "'%-.32s'@'%-.64s' " + serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike" + spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios" + swe "'%-.32s'@'%-.64s' har inte rttighet att skapa nya anvndare" + ukr " '%-.32s'@'%-.64s' ަ" ER_UNION_TABLES_IN_DIFFERENT_DIR - nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren" - eng "Incorrect table definition; all MERGE tables must be in the same database" - est "Vigane tabelimratlus; kik MERGE tabeli liikmed peavad asuma samas andmebaasis" - fre "Dfinition de table incorrecte; toutes les tables MERGE doivent tre dans la mme base de donne" - ger "Falsche Tabellendefinition. Alle MERGE-Tabellen mssen sich in derselben Datenbank befinden" - ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database" - por "Definio incorreta da tabela. Todas as tabelas contidas na juno devem estar no mesmo banco de dados." - rus " ; MERGE " - serbian "Pogrena definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka" - spa "Incorrecta definicin de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos" - swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell mste vara i samma databas" + nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren" + eng "Incorrect table definition; all MERGE tables must be in the same database" + est "Vigane tabelimratlus; kik MERGE tabeli liikmed peavad asuma samas andmebaasis" + fre "Dfinition de table incorrecte; toutes les tables MERGE doivent tre dans la mme base de donne" + ger "Falsche Tabellendefinition. Alle MERGE-Tabellen mssen sich in derselben Datenbank befinden" + ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database" + por "Definio incorreta da tabela. Todas as tabelas contidas na juno devem estar no mesmo banco de dados." + rus " ; MERGE " + serbian "Pogrena definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka" + spa "Incorrecta definicin de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos" + swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell mste vara i samma databas" ER_LOCK_DEADLOCK 40001 - nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie" - eng "Deadlock found when trying to get lock; try restarting transaction" - est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast" - fre "Deadlock dcouvert en essayant d'obtenir les verrous : essayez de redmarrer la transaction" - ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten" - ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione" - por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transao." - rus " ; " - serbian "Unakrsno zakljuavanje pronaeno kada sam pokuao da dobijem pravo na zakljuavanje; Probajte da restartujete transakciju" - spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transicin" - swe "Fick 'DEADLOCK' vid lsfrsk av block/rad. Frsk att starta om transaktionen" + nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie" + eng "Deadlock found when trying to get lock; try restarting transaction" + est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast" + fre "Deadlock dcouvert en essayant d'obtenir les verrous : essayez de redmarrer la transaction" + ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten" + ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione" + por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transao." + rus " ; " + serbian "Unakrsno zakljuavanje pronaeno kada sam pokuao da dobijem pravo na zakljuavanje; Probajte da restartujete transakciju" + spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transicin" + swe "Fick 'DEADLOCK' vid lsfrsk av block/rad. Frsk att starta om transaktionen" ER_TABLE_CANT_HANDLE_FT - nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen" - eng "The used table type doesn't support FULLTEXT indexes" - est "Antud tabelitp ei toeta FULLTEXT indekseid" - fre "Le type de table utilis ne supporte pas les index FULLTEXT" - ger "Der verwendete Tabellentyp untersttzt keine FULLTEXT-Indizes" - ita "La tabella usata non supporta gli indici FULLTEXT" - por "O tipo de tabela utilizado no suporta ndices de texto completo (fulltext indexes)" - rus " " - serbian "Upotrebljeni tip tabele ne podrava 'FULLTEXT' indekse" - spa "El tipo de tabla usada no soporta ndices FULLTEXT" - swe "Tabelltypen har inte hantering av FULLTEXT-index" - ukr " æ Цդ FULLTEXT Ӧ" + nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen" + eng "The used table type doesn't support FULLTEXT indexes" + est "Antud tabelitp ei toeta FULLTEXT indekseid" + fre "Le type de table utilis ne supporte pas les index FULLTEXT" + ger "Der verwendete Tabellentyp untersttzt keine FULLTEXT-Indizes" + ita "La tabella usata non supporta gli indici FULLTEXT" + por "O tipo de tabela utilizado no suporta ndices de texto completo (fulltext indexes)" + rus " " + serbian "Upotrebljeni tip tabele ne podrava 'FULLTEXT' indekse" + spa "El tipo de tabla usada no soporta ndices FULLTEXT" + swe "Tabelltypen har inte hantering av FULLTEXT-index" + ukr " æ Цդ FULLTEXT Ӧ" ER_CANNOT_ADD_FOREIGN - nla "Kan foreign key beperking niet toevoegen" - eng "Cannot add foreign key constraint" - fre "Impossible d'ajouter des contraintes d'index externe" - ger "Fremdschlssel-Beschrnkung kann nicht hinzugefgt werden" - ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)" - por "No pode acrescentar uma restrio de chave estrangeira" - rus " " - serbian "Ne mogu da dodam proveru spoljnog kljua" - spa "No puede adicionar clave extranjera constraint" - swe "Kan inte lgga till 'FOREIGN KEY constraint'" + nla "Kan foreign key beperking niet toevoegen" + eng "Cannot add foreign key constraint" + fre "Impossible d'ajouter des contraintes d'index externe" + ger "Fremdschlssel-Beschrnkung kann nicht hinzugefgt werden" + ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)" + por "No pode acrescentar uma restrio de chave estrangeira" + rus " " + serbian "Ne mogu da dodam proveru spoljnog kljua" + spa "No puede adicionar clave extranjera constraint" + swe "Kan inte lgga till 'FOREIGN KEY constraint'" ER_NO_REFERENCED_ROW 23000 - nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald" - eng "Cannot add or update a child row: a foreign key constraint fails" - fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empche" - ger "Hinzufgen oder Aktualisieren eines Kind-Datensatzes schlug aufgrund einer Fremdschlssel-Beschrnkung fehl" - greek "Cannot add a child row: a foreign key constraint fails" - hun "Cannot add a child row: a foreign key constraint fails" - ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto" - norwegian-ny "Cannot add a child row: a foreign key constraint fails" - por "No pode acrescentar uma linha filha: uma restrio de chave estrangeira falhou" - rus " : " - spa "No puede adicionar una lnea hijo: falla de clave extranjera constraint" - swe "FOREIGN KEY-konflikt: Kan inte skriva barn" + nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald" + eng "Cannot add or update a child row: a foreign key constraint fails" + fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empche" + ger "Hinzufgen oder Aktualisieren eines Kind-Datensatzes schlug aufgrund einer Fremdschlssel-Beschrnkung fehl" + greek "Cannot add a child row: a foreign key constraint fails" + hun "Cannot add a child row: a foreign key constraint fails" + ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + norwegian-ny "Cannot add a child row: a foreign key constraint fails" + por "No pode acrescentar uma linha filha: uma restrio de chave estrangeira falhou" + rus " : " + spa "No puede adicionar una lnea hijo: falla de clave extranjera constraint" + swe "FOREIGN KEY-konflikt: Kan inte skriva barn" ER_ROW_IS_REFERENCED 23000 - eng "Cannot delete or update a parent row: a foreign key constraint fails" - fre "Impossible de supprimer un enregistrement pre : une constrainte externe l'empche" - ger "Lschen oder Aktualisieren eines Eltern-Datensatzes schlug aufgrund einer Fremdschlssel-Beschrnkung fehl" - greek "Cannot delete a parent row: a foreign key constraint fails" - hun "Cannot delete a parent row: a foreign key constraint fails" - ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto" - por "No pode apagar uma linha pai: uma restrio de chave estrangeira falhou" - rus " : " - serbian "Ne mogu da izbriem roditeljski slog: provera spoljnog kljua je neuspela" - spa "No puede deletar una lnea padre: falla de clave extranjera constraint" - swe "FOREIGN KEY-konflikt: Kan inte radera fader" + eng "Cannot delete or update a parent row: a foreign key constraint fails" + fre "Impossible de supprimer un enregistrement pre : une constrainte externe l'empche" + ger "Lschen oder Aktualisieren eines Eltern-Datensatzes schlug aufgrund einer Fremdschlssel-Beschrnkung fehl" + greek "Cannot delete a parent row: a foreign key constraint fails" + hun "Cannot delete a parent row: a foreign key constraint fails" + ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + por "No pode apagar uma linha pai: uma restrio de chave estrangeira falhou" + rus " : " + serbian "Ne mogu da izbriem roditeljski slog: provera spoljnog kljua je neuspela" + spa "No puede deletar una lnea padre: falla de clave extranjera constraint" + swe "FOREIGN KEY-konflikt: Kan inte radera fader" ER_CONNECT_TO_MASTER 08S01 - nla "Fout bij opbouwen verbinding naar master: %-.128s" - eng "Error connecting to master: %-.128s" - ger "Fehler bei der Verbindung zum Master: %-.128s" - ita "Errore durante la connessione al master: %-.128s" - por "Erro conectando com o master: %-.128s" - rus " : %-.128s" - spa "Error de coneccion a master: %-.128s" - swe "Fick fel vid anslutning till master: %-.128s" + nla "Fout bij opbouwen verbinding naar master: %-.128s" + eng "Error connecting to master: %-.128s" + ger "Fehler bei der Verbindung zum Master: %-.128s" + ita "Errore durante la connessione al master: %-.128s" + por "Erro conectando com o master: %-.128s" + rus " : %-.128s" + spa "Error de coneccion a master: %-.128s" + swe "Fick fel vid anslutning till master: %-.128s" ER_QUERY_ON_MASTER - nla "Fout bij uitvoeren query op master: %-.128s" - eng "Error running query on master: %-.128s" - ger "Beim Ausfhren einer Abfrage auf dem Master trat ein Fehler auf: %-.128s" - ita "Errore eseguendo una query sul master: %-.128s" - por "Erro rodando consulta no master: %-.128s" - rus " : %-.128s" - spa "Error executando el query en master: %-.128s" - swe "Fick fel vid utfrande av command p mastern: %-.128s" + nla "Fout bij uitvoeren query op master: %-.128s" + eng "Error running query on master: %-.128s" + ger "Beim Ausfhren einer Abfrage auf dem Master trat ein Fehler auf: %-.128s" + ita "Errore eseguendo una query sul master: %-.128s" + por "Erro rodando consulta no master: %-.128s" + rus " : %-.128s" + spa "Error executando el query en master: %-.128s" + swe "Fick fel vid utfrande av command p mastern: %-.128s" ER_ERROR_WHEN_EXECUTING_COMMAND - nla "Fout tijdens uitvoeren van commando %s: %-.128s" - eng "Error when executing command %s: %-.128s" - est "Viga ksu %s titmisel: %-.128s" - ger "Fehler beim Ausfhren des Befehls %s: %-.128s" - ita "Errore durante l'esecuzione del comando %s: %-.128s" - por "Erro quando executando comando %s: %-.128s" - rus " %s: %-.128s" - serbian "Greka pri izvravanju komande %s: %-.128s" - spa "Error de %s: %-.128s" - swe "Fick fel vid utfrande av %s: %-.128s" + nla "Fout tijdens uitvoeren van commando %s: %-.128s" + eng "Error when executing command %s: %-.128s" + est "Viga ksu %s titmisel: %-.128s" + ger "Fehler beim Ausfhren des Befehls %s: %-.128s" + ita "Errore durante l'esecuzione del comando %s: %-.128s" + por "Erro quando executando comando %s: %-.128s" + rus " %s: %-.128s" + serbian "Greka pri izvravanju komande %s: %-.128s" + spa "Error de %s: %-.128s" + swe "Fick fel vid utfrande av %s: %-.128s" ER_WRONG_USAGE - nla "Foutief gebruik van %s en %s" - eng "Incorrect usage of %s and %s" - est "Vigane %s ja %s kasutus" - ger "Falsche Verwendung von %s und %s" - ita "Uso errato di %s e %s" - por "Uso errado de %s e %s" - rus " %s %s" - serbian "Pogrena upotreba %s i %s" - spa "Equivocado uso de %s y %s" - swe "Felaktig anvnding av %s and %s" - ukr "Wrong usage of %s and %s" + nla "Foutief gebruik van %s en %s" + eng "Incorrect usage of %s and %s" + est "Vigane %s ja %s kasutus" + ger "Falsche Verwendung von %s und %s" + ita "Uso errato di %s e %s" + por "Uso errado de %s e %s" + rus " %s %s" + serbian "Pogrena upotreba %s i %s" + spa "Equivocado uso de %s y %s" + swe "Felaktig anvnding av %s and %s" + ukr "Wrong usage of %s and %s" ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000 - nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen" - eng "The used SELECT statements have a different number of columns" - est "Tulpade arv kasutatud SELECT lausetes ei kattu" - ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurck" - ita "La SELECT utilizzata ha un numero di colonne differente" - por "Os comandos SELECT usados tm diferente nmero de colunas" - rus " (SELECT) " - serbian "Upotrebljene 'SELECT' komande adresiraju razliit broj kolona" - spa "El comando SELECT usado tiene diferente nmero de columnas" - swe "SELECT-kommandona har olika antal kolumner" + nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen" + eng "The used SELECT statements have a different number of columns" + est "Tulpade arv kasutatud SELECT lausetes ei kattu" + ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurck" + ita "La SELECT utilizzata ha un numero di colonne differente" + por "Os comandos SELECT usados tm diferente nmero de colunas" + rus " (SELECT) " + serbian "Upotrebljene 'SELECT' komande adresiraju razliit broj kolona" + spa "El comando SELECT usado tiene diferente nmero de columnas" + swe "SELECT-kommandona har olika antal kolumner" ER_CANT_UPDATE_WITH_READLOCK - nla "Kan de query niet uitvoeren vanwege een conflicterende read lock" - eng "Can't execute the query because you have a conflicting read lock" - est "Ei suuda tita pringut konfliktse luku tttu" - ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgefhrt werden" - ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura" - por "No posso executar a consulta porque voc tem um conflito de travamento de leitura" - rus " , " - serbian "Ne mogu da izvrim upit zbog toga to imate zakljuavanja itanja podataka u konfliktu" - spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura" - swe "Kan inte utfra kommandot emedan du har ett READ-ls" + nla "Kan de query niet uitvoeren vanwege een conflicterende read lock" + eng "Can't execute the query because you have a conflicting read lock" + est "Ei suuda tita pringut konfliktse luku tttu" + ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgefhrt werden" + ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura" + por "No posso executar a consulta porque voc tem um conflito de travamento de leitura" + rus " , " + serbian "Ne mogu da izvrim upit zbog toga to imate zakljuavanja itanja podataka u konfliktu" + spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura" + swe "Kan inte utfra kommandot emedan du har ett READ-ls" ER_MIXING_NOT_ALLOWED - nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld." - eng "Mixing of transactional and non-transactional tables is disabled" - est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud" - ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsuntersttzung ist deaktiviert" - ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali" - por "Mistura de tabelas transacional e no-transacional est desabilitada" - rus " " - serbian "Meanje tabela koje podravaju transakcije i onih koje ne podravaju transakcije je iskljueno" - spa "Mezla de transancional y no-transancional tablas est deshabilitada" - swe "Blandning av transaktionella och icke-transaktionella tabeller r inaktiverat" + nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld." + eng "Mixing of transactional and non-transactional tables is disabled" + est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud" + ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsuntersttzung ist deaktiviert" + ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali" + por "Mistura de tabelas transacional e no-transacional est desabilitada" + rus " " + serbian "Meanje tabela koje podravaju transakcije i onih koje ne podravaju transakcije je iskljueno" + spa "Mezla de transancional y no-transancional tablas est deshabilitada" + swe "Blandning av transaktionella och icke-transaktionella tabeller r inaktiverat" ER_DUP_ARGUMENT - nla "Optie '%s' tweemaal gebruikt in opdracht" - eng "Option '%s' used twice in statement" - est "Mrangut '%s' on lauses kasutatud topelt" - ger "Option '%s' wird im Befehl zweimal verwendet" - ita "L'opzione '%s' e' stata usata due volte nel comando" - por "Opo '%s' usada duas vezes no comando" - rus " '%s' " - spa "Opcin '%s' usada dos veces en el comando" - swe "Option '%s' anvndes tv gnger" + nla "Optie '%s' tweemaal gebruikt in opdracht" + eng "Option '%s' used twice in statement" + est "Mrangut '%s' on lauses kasutatud topelt" + ger "Option '%s' wird im Befehl zweimal verwendet" + ita "L'opzione '%s' e' stata usata due volte nel comando" + por "Opo '%s' usada duas vezes no comando" + rus " '%s' " + spa "Opcin '%s' usada dos veces en el comando" + swe "Option '%s' anvndes tv gnger" ER_USER_LIMIT_REACHED 42000 - nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)" - eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)" - ger "Benutzer '%-.64s' hat die Ressourcenbeschrnkung '%s' berschritten (aktueller Wert: %ld)" - ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)" - por "Usurio '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)" - rus " '%-.64s' '%s' ( : %ld)" - spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)" - swe "Anvndare '%-.64s' har verskridit '%s' (nuvarande vrde: %ld)" + nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)" + eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)" + ger "Benutzer '%-.64s' hat die Ressourcenbeschrnkung '%s' berschritten (aktueller Wert: %ld)" + ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)" + por "Usurio '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)" + rus " '%-.64s' '%s' ( : %ld)" + spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)" + swe "Anvndare '%-.64s' har verskridit '%s' (nuvarande vrde: %ld)" ER_SPECIFIC_ACCESS_DENIED_ERROR 42000 - nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie" - eng "Access denied; you need the %-.128s privilege for this operation" - ger "Kein Zugriff. Hierfr wird die Berechtigung %-.128s bentigt" - ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione" - por "Acesso negado. Voc precisa o privilgio %-.128s para essa operao" - rus " . %-.128s " - spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operacin" - swe "Du har inte privlegiet '%-.128s' som behvs fr denna operation" - ukr "Access denied. You need the %-.128s privilege for this operation" + nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie" + eng "Access denied; you need the %-.128s privilege for this operation" + ger "Kein Zugriff. Hierfr wird die Berechtigung %-.128s bentigt" + ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione" + por "Acesso negado. Voc precisa o privilgio %-.128s para essa operao" + rus " . %-.128s " + spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operacin" + swe "Du har inte privlegiet '%-.128s' som behvs fr denna operation" + ukr "Access denied. You need the %-.128s privilege for this operation" ER_LOCAL_VARIABLE - nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL" - eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL" - ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verndert werden" - ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL" - por "Varivel '%-.64s' uma SESSION varivel e no pode ser usada com SET GLOBAL" - rus " '%-.64s' (SESSION) SET GLOBAL" - spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL" - swe "Variabel '%-.64s' r en SESSION variabel och kan inte ndrad med SET GLOBAL" + nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL" + eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL" + ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verndert werden" + ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL" + por "Varivel '%-.64s' uma SESSION varivel e no pode ser usada com SET GLOBAL" + rus " '%-.64s' (SESSION) SET GLOBAL" + spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL" + swe "Variabel '%-.64s' r en SESSION variabel och kan inte ndrad med SET GLOBAL" ER_GLOBAL_VARIABLE - nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL" - eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL" - ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verndert werden" - ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL" - por "Varivel '%-.64s' uma GLOBAL varivel e deve ser configurada com SET GLOBAL" - rus " '%-.64s' (GLOBAL) , SET GLOBAL" - spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL" - swe "Variabel '%-.64s' r en GLOBAL variabel och br sttas med SET GLOBAL" + nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL" + eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL" + ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verndert werden" + ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL" + por "Varivel '%-.64s' uma GLOBAL varivel e deve ser configurada com SET GLOBAL" + rus " '%-.64s' (GLOBAL) , SET GLOBAL" + spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL" + swe "Variabel '%-.64s' r en GLOBAL variabel och br sttas med SET GLOBAL" ER_NO_DEFAULT 42000 - nla "Variabele '%-.64s' heeft geen standaard waarde" - eng "Variable '%-.64s' doesn't have a default value" - ger "Variable '%-.64s' hat keinen Vorgabewert" - ita "La variabile '%-.64s' non ha un valore di default" - por "Varivel '%-.64s' no tem um valor padro" - rus " '%-.64s' " - spa "Variable '%-.64s' no tiene un valor patrn" - swe "Variabel '%-.64s' har inte ett DEFAULT-vrde" + nla "Variabele '%-.64s' heeft geen standaard waarde" + eng "Variable '%-.64s' doesn't have a default value" + ger "Variable '%-.64s' hat keinen Vorgabewert" + ita "La variabile '%-.64s' non ha un valore di default" + por "Varivel '%-.64s' no tem um valor padro" + rus " '%-.64s' " + spa "Variable '%-.64s' no tiene un valor patrn" + swe "Variabel '%-.64s' har inte ett DEFAULT-vrde" ER_WRONG_VALUE_FOR_VAR 42000 - nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'" - eng "Variable '%-.64s' can't be set to the value of '%-.64s'" - ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden" - ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'" - por "Varivel '%-.64s' no pode ser configurada para o valor de '%-.64s'" - rus " '%-.64s' '%-.64s'" - spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'" - swe "Variabel '%-.64s' kan inte sttas till '%-.64s'" + nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'" + eng "Variable '%-.64s' can't be set to the value of '%-.64s'" + ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden" + ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'" + por "Varivel '%-.64s' no pode ser configurada para o valor de '%-.64s'" + rus " '%-.64s' '%-.64s'" + spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'" + swe "Variabel '%-.64s' kan inte sttas till '%-.64s'" ER_WRONG_TYPE_FOR_VAR 42000 - nla "Foutief argumenttype voor variabele '%-.64s'" - eng "Incorrect argument type to variable '%-.64s'" - ger "Falscher Argumenttyp fr Variable '%-.64s'" - ita "Tipo di valore errato per la variabile '%-.64s'" - por "Tipo errado de argumento para varivel '%-.64s'" - rus " '%-.64s'" - spa "Tipo de argumento equivocado para variable '%-.64s'" - swe "Fel typ av argument till variabel '%-.64s'" + nla "Foutief argumenttype voor variabele '%-.64s'" + eng "Incorrect argument type to variable '%-.64s'" + ger "Falscher Argumenttyp fr Variable '%-.64s'" + ita "Tipo di valore errato per la variabile '%-.64s'" + por "Tipo errado de argumento para varivel '%-.64s'" + rus " '%-.64s'" + spa "Tipo de argumento equivocado para variable '%-.64s'" + swe "Fel typ av argument till variabel '%-.64s'" ER_VAR_CANT_BE_READ - nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen" - eng "Variable '%-.64s' can only be set, not read" - ger "Variable '%-.64s' kann nur verndert, nicht gelesen werden" - ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto" - por "Varivel '%-.64s' somente pode ser configurada, no lida" - rus " '%-.64s' , " - spa "Variable '%-.64s' solamente puede ser configurada, no leda" - swe "Variabeln '%-.64s' kan endast sttas, inte lsas" + nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen" + eng "Variable '%-.64s' can only be set, not read" + ger "Variable '%-.64s' kann nur verndert, nicht gelesen werden" + ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto" + por "Varivel '%-.64s' somente pode ser configurada, no lida" + rus " '%-.64s' , " + spa "Variable '%-.64s' solamente puede ser configurada, no leda" + swe "Variabeln '%-.64s' kan endast sttas, inte lsas" ER_CANT_USE_OPTION_HERE 42000 - nla "Foutieve toepassing/plaatsing van '%s'" - eng "Incorrect usage/placement of '%s'" - ger "Falsche Verwendung oder Platzierung von '%s'" - ita "Uso/posizione di '%s' sbagliato" - por "Errado uso/colocao de '%s'" - rus " '%s'" - spa "Equivocado uso/colocacin de '%s'" - swe "Fel anvnding/placering av '%s'" + nla "Foutieve toepassing/plaatsing van '%s'" + eng "Incorrect usage/placement of '%s'" + ger "Falsche Verwendung oder Platzierung von '%s'" + ita "Uso/posizione di '%s' sbagliato" + por "Errado uso/colocao de '%s'" + rus " '%s'" + spa "Equivocado uso/colocacin de '%s'" + swe "Fel anvnding/placering av '%s'" ER_NOT_SUPPORTED_YET 42000 - nla "Deze versie van MySQL ondersteunt nog geen '%s'" - eng "This version of MySQL doesn't yet support '%s'" - ger "Diese MySQL-Version untersttzt '%s' nicht" - ita "Questa versione di MySQL non supporta ancora '%s'" - por "Esta verso de MySQL no suporta ainda '%s'" - rus " MySQL '%s'" - spa "Esta versin de MySQL no soporta todavia '%s'" - swe "Denna version av MySQL kan nnu inte utfra '%s'" + nla "Deze versie van MySQL ondersteunt nog geen '%s'" + eng "This version of MySQL doesn't yet support '%s'" + ger "Diese MySQL-Version untersttzt '%s' nicht" + ita "Questa versione di MySQL non supporta ancora '%s'" + por "Esta verso de MySQL no suporta ainda '%s'" + rus " MySQL '%s'" + spa "Esta versin de MySQL no soporta todavia '%s'" + swe "Denna version av MySQL kan nnu inte utfra '%s'" ER_MASTER_FATAL_ERROR_READING_BINLOG - nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log" - eng "Got fatal error %d: '%-.128s' from master when reading data from binary log" - ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binren Logs" - ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario" - por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log" - rus " %d: '%-.128s' " - spa "Recibi fatal error %d: '%-.128s' del master cuando leyendo datos del binary log" - swe "Fick fatalt fel %d: '%-.128s' frn master vid lsning av binrloggen" + nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log" + eng "Got fatal error %d: '%-.128s' from master when reading data from binary log" + ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binren Logs" + ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario" + por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log" + rus " %d: '%-.128s' " + spa "Recibi fatal error %d: '%-.128s' del master cuando leyendo datos del binary log" + swe "Fick fatalt fel %d: '%-.128s' frn master vid lsning av binrloggen" ER_SLAVE_IGNORED_TABLE - eng "Slave SQL thread ignored the query because of replicate-*-table rules" - ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert" - por "Slave SQL thread ignorado a consulta devido s normas de replicao-*-tabela" - spa "Slave SQL thread ignorado el query debido a las reglas de replicacin-*-tabla" - swe "Slav SQL trden ignorerade frgan pga en replicate-*-table regel" + eng "Slave SQL thread ignored the query because of replicate-*-table rules" + ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert" + por "Slave SQL thread ignorado a consulta devido s normas de replicao-*-tabela" + spa "Slave SQL thread ignorado el query debido a las reglas de replicacin-*-tabla" + swe "Slav SQL trden ignorerade frgan pga en replicate-*-table regel" ER_INCORRECT_GLOBAL_LOCAL_VAR - eng "Variable '%-.64s' is a %s variable" - serbian "Incorrect foreign key definition for '%-.64s': %s" - ger "Variable '%-.64s' ist eine %s-Variable" - spa "Variable '%-.64s' es una %s variable" - swe "Variabel '%-.64s' r av typ %s" + eng "Variable '%-.64s' is a %s variable" + serbian "Incorrect foreign key definition for '%-.64s': %s" + ger "Variable '%-.64s' ist eine %s-Variable" + spa "Variable '%-.64s' es una %s variable" + swe "Variabel '%-.64s' r av typ %s" ER_WRONG_FK_DEF 42000 - eng "Incorrect foreign key definition for '%-.64s': %s" - ger "Falsche Fremdschlssel-Definition fr '%-.64s': %s" - por "Definio errada da chave estrangeira para '%-.64s': %s" - spa "Equivocada definicin de llave extranjera para '%-.64s': %s" - swe "Felaktig FOREIGN KEY-definition fr '%-.64s': %s" + eng "Incorrect foreign key definition for '%-.64s': %s" + ger "Falsche Fremdschlssel-Definition fr '%-.64s': %s" + por "Definio errada da chave estrangeira para '%-.64s': %s" + spa "Equivocada definicin de llave extranjera para '%-.64s': %s" + swe "Felaktig FOREIGN KEY-definition fr '%-.64s': %s" ER_KEY_REF_DO_NOT_MATCH_TABLE_REF - eng "Key reference and table reference don't match" - ger "Schlssel- und Tabellenverweis passen nicht zusammen" - por "Referncia da chave e referncia da tabela no coincidem" - spa "Referencia de llave y referencia de tabla no coinciden" - swe "Nyckelreferensen och tabellreferensen stmmer inte verens" + eng "Key reference and table reference don't match" + ger "Schlssel- und Tabellenverweis passen nicht zusammen" + por "Referncia da chave e referncia da tabela no coincidem" + spa "Referencia de llave y referencia de tabla no coinciden" + swe "Nyckelreferensen och tabellreferensen stmmer inte verens" ER_OPERAND_COLUMNS 21000 - eng "Operand should contain %d column(s)" - ger "Operand sollte %d Spalte(n) enthalten" - rus " %d " - spa "Operando debe tener %d columna(s)" - ukr " %d æ" + eng "Operand should contain %d column(s)" + ger "Operand sollte %d Spalte(n) enthalten" + rus " %d " + spa "Operando debe tener %d columna(s)" + ukr " %d æ" ER_SUBQUERY_NO_1_ROW 21000 - eng "Subquery returns more than 1 row" - ger "Unterabfrage lieferte mehr als einen Datensatz zurck" - por "Subconsulta retorna mais que 1 registro" - rus " " - spa "Subconsulta retorna mas que 1 lnea" - swe "Subquery returnerade mer n 1 rad" - ukr " ¦ i 1 " + eng "Subquery returns more than 1 row" + ger "Unterabfrage lieferte mehr als einen Datensatz zurck" + por "Subconsulta retorna mais que 1 registro" + rus " " + spa "Subconsulta retorna mas que 1 lnea" + swe "Subquery returnerade mer n 1 rad" + ukr " ¦ i 1 " ER_UNKNOWN_STMT_HANDLER - dan "Unknown prepared statement handler (%.*s) given to %s" - eng "Unknown prepared statement handler (%.*s) given to %s" - ger "Unbekannter Prepared-Statement-Handler (%.*s) fr %s angegeben" - por "Desconhecido manipulador de declarao preparado (%.*s) determinado para %s" - spa "Desconocido preparado comando handler (%.*s) dado para %s" - swe "Oknd PREPARED STATEMENT id (%.*s) var given till %s" - ukr "Unknown prepared statement handler (%.*s) given to %s" + dan "Unknown prepared statement handler (%.*s) given to %s" + eng "Unknown prepared statement handler (%.*s) given to %s" + ger "Unbekannter Prepared-Statement-Handler (%.*s) fr %s angegeben" + por "Desconhecido manipulador de declarao preparado (%.*s) determinado para %s" + spa "Desconocido preparado comando handler (%.*s) dado para %s" + swe "Oknd PREPARED STATEMENT id (%.*s) var given till %s" + ukr "Unknown prepared statement handler (%.*s) given to %s" ER_CORRUPT_HELP_DB - eng "Help database is corrupt or does not exist" - ger "Die Hilfe-Datenbank ist beschdigt oder existiert nicht" - por "Banco de dado de ajuda corrupto ou no existente" - spa "Base de datos Help est corrupto o no existe" - swe "Hjlpdatabasen finns inte eller r skadad" + eng "Help database is corrupt or does not exist" + ger "Die Hilfe-Datenbank ist beschdigt oder existiert nicht" + por "Banco de dado de ajuda corrupto ou no existente" + spa "Base de datos Help est corrupto o no existe" + swe "Hjlpdatabasen finns inte eller r skadad" ER_CYCLIC_REFERENCE - eng "Cyclic reference on subqueries" - ger "Zyklischer Verweis in Unterabfragen" - por "Referncia cclica em subconsultas" - rus " " - spa "Cclica referencia en subconsultas" - swe "Cyklisk referens i subqueries" - ukr "̦ Ц" + eng "Cyclic reference on subqueries" + ger "Zyklischer Verweis in Unterabfragen" + por "Referncia cclica em subconsultas" + rus " " + spa "Cclica referencia en subconsultas" + swe "Cyklisk referens i subqueries" + ukr "̦ Ц" ER_AUTO_CONVERT - eng "Converting column '%s' from %s to %s" - ger "Feld '%s' wird von %s nach %s umgewandelt" - por "Convertendo coluna '%s' de %s para %s" - rus " '%s' %s %s" - spa "Convirtiendo columna '%s' de %s para %s" - swe "Konvertar kolumn '%s' frn %s till %s" - ukr " '%s' %s %s" + eng "Converting column '%s' from %s to %s" + ger "Feld '%s' wird von %s nach %s umgewandelt" + por "Convertendo coluna '%s' de %s para %s" + rus " '%s' %s %s" + spa "Convirtiendo columna '%s' de %s para %s" + swe "Konvertar kolumn '%s' frn %s till %s" + ukr " '%s' %s %s" ER_ILLEGAL_REFERENCE 42S22 - eng "Reference '%-.64s' not supported (%s)" - ger "Verweis '%-.64s' wird nicht untersttzt (%s)" - por "Referncia '%-.64s' no suportada (%s)" - rus " '%-.64s' (%s)" - spa "Referencia '%-.64s' no soportada (%s)" - swe "Referens '%-.64s' stds inte (%s)" - ukr " '%-.64s' i (%s)" + eng "Reference '%-.64s' not supported (%s)" + ger "Verweis '%-.64s' wird nicht untersttzt (%s)" + por "Referncia '%-.64s' no suportada (%s)" + rus " '%-.64s' (%s)" + spa "Referencia '%-.64s' no soportada (%s)" + swe "Referens '%-.64s' stds inte (%s)" + ukr " '%-.64s' i (%s)" ER_DERIVED_MUST_HAVE_ALIAS 42000 - eng "Every derived table must have its own alias" - ger "Fr jede abgeleitete Tabelle muss ein eigener Alias angegeben werden" - por "Cada tabela derivada deve ter seu prprio alias" - spa "Cada tabla derivada debe tener su propio alias" - swe "Varje 'derived table' mste ha sitt eget alias" + eng "Every derived table must have its own alias" + ger "Fr jede abgeleitete Tabelle muss ein eigener Alias angegeben werden" + por "Cada tabela derivada deve ter seu prprio alias" + spa "Cada tabla derivada debe tener su propio alias" + swe "Varje 'derived table' mste ha sitt eget alias" ER_SELECT_REDUCED 01000 - eng "Select %u was reduced during optimization" - ger "Select %u wurde whrend der Optimierung reduziert" - por "Select %u foi reduzido durante otimizao" - rus "Select %u " - spa "Select %u fu reducido durante optimizacin" - swe "Select %u reducerades vid optimiering" - ukr "Select %u was iii" + eng "Select %u was reduced during optimization" + ger "Select %u wurde whrend der Optimierung reduziert" + por "Select %u foi reduzido durante otimizao" + rus "Select %u " + spa "Select %u fu reducido durante optimizacin" + swe "Select %u reducerades vid optimiering" + ukr "Select %u was iii" ER_TABLENAME_NOT_ALLOWED_HERE 42000 - eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s" - ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" - por "Tabela '%-.64s' de um dos SELECTs no pode ser usada em %-.32s" - spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s" - swe "Tabell '%-.64s' frn en SELECT kan inte anvndas i %-.32s" + eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s" + ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" + por "Tabela '%-.64s' de um dos SELECTs no pode ser usada em %-.32s" + spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s" + swe "Tabell '%-.64s' frn en SELECT kan inte anvndas i %-.32s" ER_NOT_SUPPORTED_AUTH_MODE 08004 - eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client" - ger "Client untersttzt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client" - por "Cliente no suporta o protocolo de autenticao exigido pelo servidor; considere a atualizao do cliente MySQL" - spa "Cliente no soporta protocolo de autenticacin solicitado por el servidor; considere actualizar el cliente MySQL" - swe "Klienten stder inte autentiseringsprotokollet som begrts av servern; vervg uppgradering av klientprogrammet." + eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client" + ger "Client untersttzt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client" + por "Cliente no suporta o protocolo de autenticao exigido pelo servidor; considere a atualizao do cliente MySQL" + spa "Cliente no soporta protocolo de autenticacin solicitado por el servidor; considere actualizar el cliente MySQL" + swe "Klienten stder inte autentiseringsprotokollet som begrts av servern; vervg uppgradering av klientprogrammet." ER_SPATIAL_CANT_HAVE_NULL 42000 - eng "All parts of a SPATIAL index must be NOT NULL" - ger "Alle Teile eines SPATIAL-Index mssen als NOT NULL deklariert sein" - por "Todas as partes de uma SPATIAL index devem ser NOT NULL" - spa "Todas las partes de una SPATIAL index deben ser NOT NULL" - swe "Alla delar av en SPATIAL index mste vara NOT NULL" + eng "All parts of a SPATIAL index must be NOT NULL" + ger "Alle Teile eines SPATIAL-Index mssen als NOT NULL deklariert sein" + por "Todas as partes de uma SPATIAL index devem ser NOT NULL" + spa "Todas las partes de una SPATIAL index deben ser NOT NULL" + swe "Alla delar av en SPATIAL index mste vara NOT NULL" ER_COLLATION_CHARSET_MISMATCH 42000 - eng "COLLATION '%s' is not valid for CHARACTER SET '%s'" - ger "COLLATION '%s' ist fr CHARACTER SET '%s' ungltig" - por "COLLATION '%s' no vlida para CHARACTER SET '%s'" - spa "COLLATION '%s' no es vlido para CHARACTER SET '%s'" - swe "COLLATION '%s' r inte tilltet fr CHARACTER SET '%s'" + eng "COLLATION '%s' is not valid for CHARACTER SET '%s'" + ger "COLLATION '%s' ist fr CHARACTER SET '%s' ungltig" + por "COLLATION '%s' no vlida para CHARACTER SET '%s'" + spa "COLLATION '%s' no es vlido para CHARACTER SET '%s'" + swe "COLLATION '%s' r inte tilltet fr CHARACTER SET '%s'" ER_SLAVE_WAS_RUNNING - eng "Slave is already running" - ger "Slave luft bereits" - por "O slave j est rodando" - spa "Slave ya est funcionando" - swe "Slaven har redan startat" + eng "Slave is already running" + ger "Slave luft bereits" + por "O slave j est rodando" + spa "Slave ya est funcionando" + swe "Slaven har redan startat" ER_SLAVE_WAS_NOT_RUNNING - eng "Slave already has been stopped" - ger "Slave wurde bereits angehalten" - por "O slave j est parado" - spa "Slave ya fu parado" - swe "Slaven har redan stoppat" + eng "Slave already has been stopped" + ger "Slave wurde bereits angehalten" + por "O slave j est parado" + spa "Slave ya fu parado" + swe "Slaven har redan stoppat" ER_TOO_BIG_FOR_UNCOMPRESS - eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)" - ger "Unkomprimierte Daten sind zu gro. Die maximale Gre betrgt %d (wahrscheinlich wurde die Lnge der unkomprimierten Daten beschdigt)" - por "Tamanho muito grande dos dados des comprimidos. O mximo tamanho %d. (provavelmente, o comprimento dos dados descomprimidos est corrupto)" - spa "Tamao demasiado grande para datos descomprimidos. El mximo tamao es %d. (probablemente, extensin de datos descomprimidos fu corrompida)" + eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)" + ger "Unkomprimierte Daten sind zu gro. Die maximale Gre betrgt %d (wahrscheinlich wurde die Lnge der unkomprimierten Daten beschdigt)" + por "Tamanho muito grande dos dados des comprimidos. O mximo tamanho %d. (provavelmente, o comprimento dos dados descomprimidos est corrupto)" + spa "Tamao demasiado grande para datos descomprimidos. El mximo tamao es %d. (probablemente, extensin de datos descomprimidos fu corrompida)" ER_ZLIB_Z_MEM_ERROR - eng "ZLIB: Not enough memory" - ger "ZLIB: Nicht genug Speicher" - por "ZLIB: No suficiente memria disponvel" - spa "Z_MEM_ERROR: No suficiente memoria para zlib" + eng "ZLIB: Not enough memory" + ger "ZLIB: Nicht genug Speicher" + por "ZLIB: No suficiente memria disponvel" + spa "Z_MEM_ERROR: No suficiente memoria para zlib" ER_ZLIB_Z_BUF_ERROR - eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)" - ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Lnge der unkomprimierten Daten beschdigt)" - por "ZLIB: No suficiente espao no buffer emissor (provavelmente, o comprimento dos dados descomprimidos est corrupto)" - spa "Z_BUF_ERROR: No suficiente espacio en el bfer de salida para zlib (probablemente, extensin de datos descomprimidos fu corrompida)" + eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)" + ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Lnge der unkomprimierten Daten beschdigt)" + por "ZLIB: No suficiente espao no buffer emissor (provavelmente, o comprimento dos dados descomprimidos est corrupto)" + spa "Z_BUF_ERROR: No suficiente espacio en el bfer de salida para zlib (probablemente, extensin de datos descomprimidos fu corrompida)" ER_ZLIB_Z_DATA_ERROR - eng "ZLIB: Input data corrupted" - ger "ZLIB: Eingabedaten beschdigt" - por "ZLIB: Dados de entrada est corrupto" - spa "ZLIB: Dato de entrada fu corrompido para zlib" + eng "ZLIB: Input data corrupted" + ger "ZLIB: Eingabedaten beschdigt" + por "ZLIB: Dados de entrada est corrupto" + spa "ZLIB: Dato de entrada fu corrompido para zlib" ER_CUT_VALUE_GROUP_CONCAT - eng "%d line(s) were cut by GROUP_CONCAT()" - ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten" - por "%d linha(s) foram cortada(s) por GROUP_CONCAT()" - spa "%d lnea(s) fue(fueron) cortadas por group_concat()" - swe "%d rad(er) kapades av group_concat()" - ukr "%d line(s) was(were) cut by group_concat()" + eng "%d line(s) were cut by GROUP_CONCAT()" + ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten" + por "%d linha(s) foram cortada(s) por GROUP_CONCAT()" + spa "%d lnea(s) fue(fueron) cortadas por group_concat()" + swe "%d rad(er) kapades av group_concat()" + ukr "%d line(s) was(were) cut by group_concat()" ER_WARN_TOO_FEW_RECORDS 01000 - eng "Row %ld doesn't contain data for all columns" - ger "Zeile %ld enthlt nicht fr alle Felder Daten" - por "Conta de registro menor que a conta de coluna na linha %ld" - spa "Lnea %ld no contiene datos para todas las columnas" + eng "Row %ld doesn't contain data for all columns" + ger "Zeile %ld enthlt nicht fr alle Felder Daten" + por "Conta de registro menor que a conta de coluna na linha %ld" + spa "Lnea %ld no contiene datos para todas las columnas" ER_WARN_TOO_MANY_RECORDS 01000 - eng "Row %ld was truncated; it contained more data than there were input columns" - ger "Zeile %ld gekrzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt" - por "Conta de registro maior que a conta de coluna na linha %ld" - spa "Lnea %ld fu truncada; La misma contine mas datos que las que existen en las columnas de entrada" + eng "Row %ld was truncated; it contained more data than there were input columns" + ger "Zeile %ld gekrzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt" + por "Conta de registro maior que a conta de coluna na linha %ld" + spa "Lnea %ld fu truncada; La misma contine mas datos que las que existen en las columnas de entrada" ER_WARN_NULL_TO_NOTNULL 22004 - eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld" - ger "Feld auf Vorgabewert gesetzt, da NULL fr NOT-NULL-Feld '%s' in Zeile %ld angegeben" - por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld" - spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la lnea %ld" + eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld" + ger "Feld auf Vorgabewert gesetzt, da NULL fr NOT-NULL-Feld '%s' in Zeile %ld angegeben" + por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld" + spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la lnea %ld" ER_WARN_DATA_OUT_OF_RANGE 22003 - eng "Out of range value for column '%s' at row %ld" + eng "Out of range value for column '%s' at row %ld" WARN_DATA_TRUNCATED 01000 - eng "Data truncated for column '%s' at row %ld" - ger "Daten abgeschnitten fr Feld '%s' in Zeile %ld" - por "Dado truncado para coluna '%s' na linha %ld" - spa "Datos truncados para columna '%s' en la lnea %ld" + eng "Data truncated for column '%s' at row %ld" + ger "Daten abgeschnitten fr Feld '%s' in Zeile %ld" + por "Dado truncado para coluna '%s' na linha %ld" + spa "Datos truncados para columna '%s' en la lnea %ld" ER_WARN_USING_OTHER_HANDLER - eng "Using storage engine %s for table '%s'" - ger "Fr Tabelle '%s' wird Speicher-Engine %s benutzt" - por "Usando engine de armazenamento %s para tabela '%s'" - spa "Usando motor de almacenamiento %s para tabla '%s'" - swe "Anvnder handler %s fr tabell '%s'" + eng "Using storage engine %s for table '%s'" + ger "Fr Tabelle '%s' wird Speicher-Engine %s benutzt" + por "Usando engine de armazenamento %s para tabela '%s'" + spa "Usando motor de almacenamiento %s para tabla '%s'" + swe "Anvnder handler %s fr tabell '%s'" ER_CANT_AGGREGATE_2COLLATIONS - eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'" - ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) fr Operation '%s'" - por "Combinao ilegal de collations (%s,%s) e (%s,%s) para operao '%s'" - spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operacin '%s'" + eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'" + ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) fr Operation '%s'" + por "Combinao ilegal de collations (%s,%s) e (%s,%s) para operao '%s'" + spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operacin '%s'" ER_DROP_USER - eng "Cannot drop one or more of the requested users" - ger "Kann einen oder mehrere der angegebenen Benutzer nicht lschen" + eng "Cannot drop one or more of the requested users" + ger "Kann einen oder mehrere der angegebenen Benutzer nicht lschen" ER_REVOKE_GRANTS - eng "Can't revoke all privileges for one or more of the requested users" - ger "Kann nicht alle Berechtigungen widerrufen, die fr einen oder mehrere Benutzer gewhrt wurden" - por "No pode revocar todos os privilgios, grant para um ou mais dos usurios pedidos" - spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados" + eng "Can't revoke all privileges for one or more of the requested users" + ger "Kann nicht alle Berechtigungen widerrufen, die fr einen oder mehrere Benutzer gewhrt wurden" + por "No pode revocar todos os privilgios, grant para um ou mais dos usurios pedidos" + spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados" ER_CANT_AGGREGATE_3COLLATIONS - eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'" - ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) fr Operation '%s'" - por "Ilegal combinao de collations (%s,%s), (%s,%s), (%s,%s) para operao '%s'" - spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operacin '%s'" + eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'" + ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) fr Operation '%s'" + por "Ilegal combinao de collations (%s,%s), (%s,%s), (%s,%s) para operao '%s'" + spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operacin '%s'" ER_CANT_AGGREGATE_NCOLLATIONS - eng "Illegal mix of collations for operation '%s'" - ger "Unerlaubte Mischung von Sortierreihenfolgen fr Operation '%s'" - por "Ilegal combinao de collations para operao '%s'" - spa "Ilegal mezcla de collations para operacin '%s'" + eng "Illegal mix of collations for operation '%s'" + ger "Unerlaubte Mischung von Sortierreihenfolgen fr Operation '%s'" + por "Ilegal combinao de collations para operao '%s'" + spa "Ilegal mezcla de collations para operacin '%s'" ER_VARIABLE_IS_NOT_STRUCT - eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)" - ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)" - por "Varivel '%-.64s' no uma varivel componente (No pode ser usada como XXXX.varivel_nome)" - spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)" + eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)" + ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)" + por "Varivel '%-.64s' no uma varivel componente (No pode ser usada como XXXX.varivel_nome)" + spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)" ER_UNKNOWN_COLLATION - eng "Unknown collation: '%-.64s'" - ger "Unbekannte Sortierreihenfolge: '%-.64s'" - por "Collation desconhecida: '%-.64s'" - spa "Collation desconocida: '%-.64s'" + eng "Unknown collation: '%-.64s'" + ger "Unbekannte Sortierreihenfolge: '%-.64s'" + por "Collation desconhecida: '%-.64s'" + spa "Collation desconocida: '%-.64s'" ER_SLAVE_IGNORED_SSL_PARAMS - eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started" - ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Untersttzung kompiliert wurde. Sie knnen aber spter verwendet werden, wenn ein MySQL-Slave mit SSL gestartet wird" - por "SSL parmetros em CHANGE MASTER so ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado." - spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado" + eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started" + ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Untersttzung kompiliert wurde. Sie knnen aber spter verwendet werden, wenn ein MySQL-Slave mit SSL gestartet wird" + por "SSL parmetros em CHANGE MASTER so ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado." + spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado" ER_SERVER_IS_IN_SECURE_AUTH_MODE - eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format" - ger "Server luft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ndern" - por "Servidor est rodando em --secure-auth modo, porm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato" - rus " --secure-auth ( ), '%s'@'%s' Σ ; " - spa "Servidor est rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" + eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format" + ger "Server luft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ndern" + por "Servidor est rodando em --secure-auth modo, porm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato" + rus " --secure-auth ( ), '%s'@'%s' Σ ; " + spa "Servidor est rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" ER_WARN_FIELD_RESOLVED - eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d" - ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelst" - por "Campo ou referncia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d" - rus " '%-.64s%s%-.64s%s%-.64s' SELECT #%d SELECT #%d" - spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d" - ukr " '%-.64s%s%-.64s%s%-.64s' SELECT #%d SELECT #%d" + eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d" + ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelst" + por "Campo ou referncia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d" + rus " '%-.64s%s%-.64s%s%-.64s' SELECT #%d SELECT #%d" + spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d" + ukr " '%-.64s%s%-.64s%s%-.64s' SELECT #%d SELECT #%d" ER_BAD_SLAVE_UNTIL_COND - eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL" - ger "Falscher Parameter oder falsche Kombination von Parametern fr START SLAVE UNTIL" - por "Parmetro ou combinao de parmetros errado para START SLAVE UNTIL" - spa "Parametro equivocado o combinacin de parametros para START SLAVE UNTIL" + eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL" + ger "Falscher Parameter oder falsche Kombination von Parametern fr START SLAVE UNTIL" + por "Parmetro ou combinao de parmetros errado para START SLAVE UNTIL" + spa "Parametro equivocado o combinacin de parametros para START SLAVE UNTIL" ER_MISSING_SKIP_SLAVE - eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart" - ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-fr-Schritt-Replikation ausgefhrt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet" - por " recomendado para rodar com --skip-slave-start quando fazendo replicao passo-por-passo com START SLAVE UNTIL, de outra forma voc no est seguro em caso de inesperada reinicialio do mysqld escravo" - spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicacin step-by-step con START SLAVE UNTIL, a menos que usted no est seguro en caso de inesperada reinicializacin del mysqld slave" + eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart" + ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-fr-Schritt-Replikation ausgefhrt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet" + por " recomendado para rodar com --skip-slave-start quando fazendo replicao passo-por-passo com START SLAVE UNTIL, de outra forma voc no est seguro em caso de inesperada reinicialio do mysqld escravo" + spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicacin step-by-step con START SLAVE UNTIL, a menos que usted no est seguro en caso de inesperada reinicializacin del mysqld slave" ER_UNTIL_COND_IGNORED - eng "SQL thread is not to be started so UNTIL options are ignored" - ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert" - por "Thread SQL no pode ser inicializado tal que opes UNTIL so ignoradas" - spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas" + eng "SQL thread is not to be started so UNTIL options are ignored" + ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert" + por "Thread SQL no pode ser inicializado tal que opes UNTIL so ignoradas" + spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas" ER_WRONG_NAME_FOR_INDEX 42000 - eng "Incorrect index name '%-.100s'" - ger "Falscher Indexname '%-.100s'" - por "Incorreto nome de ndice '%-.100s'" - spa "Nombre de ndice incorrecto '%-.100s'" - swe "Felaktigt index namn '%-.100s'" + eng "Incorrect index name '%-.100s'" + ger "Falscher Indexname '%-.100s'" + por "Incorreto nome de ndice '%-.100s'" + spa "Nombre de ndice incorrecto '%-.100s'" + swe "Felaktigt index namn '%-.100s'" ER_WRONG_NAME_FOR_CATALOG 42000 - eng "Incorrect catalog name '%-.100s'" - ger "Falscher Katalogname '%-.100s'" - por "Incorreto nome de catlogo '%-.100s'" - spa "Nombre de catalog incorrecto '%-.100s'" - swe "Felaktigt katalog namn '%-.100s'" + eng "Incorrect catalog name '%-.100s'" + ger "Falscher Katalogname '%-.100s'" + por "Incorreto nome de catlogo '%-.100s'" + spa "Nombre de catalog incorrecto '%-.100s'" + swe "Felaktigt katalog namn '%-.100s'" ER_WARN_QC_RESIZE - eng "Query cache failed to set size %lu; new query cache size is %lu" - ger "nderung der Query-Cache-Gre auf %lu fehlgeschlagen; neue Query-Cache-Gre ist %lu" - por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache %lu" - rus " %lu, - %lu" - spa "Query cache fallada para configurar tamao %lu, nuevo tamao de query cache es %lu" - swe "Storleken av "Query cache" kunde inte sttas till %lu, ny storlek r %lu" - ukr " Ԧ ͦ %lu, ͦ Ԧ - %lu" + eng "Query cache failed to set size %lu; new query cache size is %lu" + ger "nderung der Query-Cache-Gre auf %lu fehlgeschlagen; neue Query-Cache-Gre ist %lu" + por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache %lu" + rus " %lu, - %lu" + spa "Query cache fallada para configurar tamao %lu, nuevo tamao de query cache es %lu" + swe "Storleken av "Query cache" kunde inte sttas till %lu, ny storlek r %lu" + ukr " Ԧ ͦ %lu, ͦ Ԧ - %lu" ER_BAD_FT_COLUMN - eng "Column '%-.64s' cannot be part of FULLTEXT index" - ger "Feld '%-.64s' kann nicht Teil eines FULLTEXT-Index sein" - por "Coluna '%-.64s' no pode ser parte de ndice FULLTEXT" - spa "Columna '%-.64s' no puede ser parte de FULLTEXT index" - swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index" + eng "Column '%-.64s' cannot be part of FULLTEXT index" + ger "Feld '%-.64s' kann nicht Teil eines FULLTEXT-Index sein" + por "Coluna '%-.64s' no pode ser parte de ndice FULLTEXT" + spa "Columna '%-.64s' no puede ser parte de FULLTEXT index" + swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index" ER_UNKNOWN_KEY_CACHE - eng "Unknown key cache '%-.100s'" - ger "Unbekannter Schlssel-Cache '%-.100s'" - por "Key cache desconhecida '%-.100s'" - spa "Desconocida key cache '%-.100s'" - swe "Oknd nyckel cache '%-.100s'" + eng "Unknown key cache '%-.100s'" + ger "Unbekannter Schlssel-Cache '%-.100s'" + por "Key cache desconhecida '%-.100s'" + spa "Desconocida key cache '%-.100s'" + swe "Oknd nyckel cache '%-.100s'" ER_WARN_HOSTNAME_WONT_WORK - eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work" - ger "MySQL wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe mglich ist" - por "MySQL foi inicializado em modo --skip-name-resolve. Voc necesita reincializ-lo sem esta opo para este grant funcionar" - spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opcin para este derecho funcionar" + eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work" + ger "MySQL wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe mglich ist" + por "MySQL foi inicializado em modo --skip-name-resolve. Voc necesita reincializ-lo sem esta opo para este grant funcionar" + spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opcin para este derecho funcionar" ER_UNKNOWN_STORAGE_ENGINE 42000 - eng "Unknown table engine '%s'" - ger "Unbekannte Speicher-Engine '%s'" - por "Motor de tabela desconhecido '%s'" - spa "Desconocido motor de tabla '%s'" + eng "Unknown table engine '%s'" + ger "Unbekannte Speicher-Engine '%s'" + por "Motor de tabela desconhecido '%s'" + spa "Desconocido motor de tabla '%s'" ER_UNUSED_1 - eng "'%s' is deprecated; use '%s' instead" - ger "'%s' ist veraltet. Bitte benutzen Sie '%s'" - por "'%s' desatualizado. Use '%s' em seu lugar" - spa "'%s' est desaprobado, use '%s' en su lugar" + eng "'%s' is deprecated; use '%s' instead" + ger "'%s' ist veraltet. Bitte benutzen Sie '%s'" + por "'%s' desatualizado. Use '%s' em seu lugar" + spa "'%s' est desaprobado, use '%s' en su lugar" ER_NON_UPDATABLE_TABLE - eng "The target table %-.100s of the %s is not updatable" - ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar" - por "A tabela destino %-.100s do %s no atualizvel" - rus " %-.100s %s " - spa "La tabla destino %-.100s del %s no es actualizable" - swe "Tabell %-.100s anvnd med '%s' r inte uppdateringsbar" - ukr " %-.100s %s " + eng "The target table %-.100s of the %s is not updatable" + ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar" + por "A tabela destino %-.100s do %s no atualizvel" + rus " %-.100s %s " + spa "La tabla destino %-.100s del %s no es actualizable" + swe "Tabell %-.100s anvnd med '%s' r inte uppdateringsbar" + ukr " %-.100s %s " ER_FEATURE_DISABLED - eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working" - ger "Das Feature '%s' ist ausgeschaltet, Sie mssen MySQL mit '%s' bersetzen, damit es verfgbar ist" - por "O recurso '%s' foi desativado; voc necessita MySQL construdo com '%s' para ter isto funcionando" - spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando" - swe "'%s' r inte aktiverad; Fr att aktivera detta mste du bygga om MySQL med '%s' definerad" + eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working" + ger "Das Feature '%s' ist ausgeschaltet, Sie mssen MySQL mit '%s' bersetzen, damit es verfgbar ist" + por "O recurso '%s' foi desativado; voc necessita MySQL construdo com '%s' para ter isto funcionando" + spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando" + swe "'%s' r inte aktiverad; Fr att aktivera detta mste du bygga om MySQL med '%s' definerad" ER_OPTION_PREVENTS_STATEMENT - eng "The MySQL server is running with the %s option so it cannot execute this statement" - ger "Der MySQL-Server luft mit der Option %s und kann diese Anweisung deswegen nicht ausfhren" - por "O servidor MySQL est rodando com a opo %s razo pela qual no pode executar esse commando" - spa "El servidor MySQL est rodando con la opcin %s tal que no puede ejecutar este comando" - swe "MySQL r startad med %s. Pga av detta kan du inte anvnda detta kommando" + eng "The MySQL server is running with the %s option so it cannot execute this statement" + ger "Der MySQL-Server luft mit der Option %s und kann diese Anweisung deswegen nicht ausfhren" + por "O servidor MySQL est rodando com a opo %s razo pela qual no pode executar esse commando" + spa "El servidor MySQL est rodando con la opcin %s tal que no puede ejecutar este comando" + swe "MySQL r startad med %s. Pga av detta kan du inte anvnda detta kommando" ER_DUPLICATED_VALUE_IN_TYPE - eng "Column '%-.100s' has duplicated value '%-.64s' in %s" - ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s" - por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" - spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s" + eng "Column '%-.100s' has duplicated value '%-.64s' in %s" + ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s" + por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" + spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s" ER_TRUNCATED_WRONG_VALUE 22007 - eng "Truncated incorrect %-.32s value: '%-.128s'" - ger "Falscher %-.32s-Wert gekrzt: '%-.128s'" - por "Truncado errado %-.32s valor: '%-.128s'" - spa "Equivocado truncado %-.32s valor: '%-.128s'" + eng "Truncated incorrect %-.32s value: '%-.128s'" + ger "Falscher %-.32s-Wert gekrzt: '%-.128s'" + por "Truncado errado %-.32s valor: '%-.128s'" + spa "Equivocado truncado %-.32s valor: '%-.128s'" ER_TOO_MUCH_AUTO_TIMESTAMP_COLS - eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" - ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben" - por "Incorreta definio de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE clusula" - spa "Incorrecta definicin de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE clusula" + eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" + ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben" + por "Incorreta definio de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE clusula" + spa "Incorrecta definicin de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE clusula" ER_INVALID_ON_UPDATE - eng "Invalid ON UPDATE clause for '%-.64s' column" - ger "Ungltige ON-UPDATE-Klausel fr Spalte '%-.64s'" - por "Invlida clusula ON UPDATE para campo '%-.64s'" - spa "Invlido ON UPDATE clusula para campo '%-.64s'" + eng "Invalid ON UPDATE clause for '%-.64s' column" + ger "Ungltige ON-UPDATE-Klausel fr Spalte '%-.64s'" + por "Invlida clusula ON UPDATE para campo '%-.64s'" + spa "Invlido ON UPDATE clusula para campo '%-.64s'" ER_UNSUPPORTED_PS - eng "This command is not supported in the prepared statement protocol yet" - ger "Dieser Befehl wird im Protokoll fr vorbereitete Anweisungen noch nicht untersttzt" + eng "This command is not supported in the prepared statement protocol yet" + ger "Dieser Befehl wird im Protokoll fr vorbereitete Anweisungen noch nicht untersttzt" ER_GET_ERRMSG - dan "Modtog fejl %d '%-.100s' fra %s" - eng "Got error %d '%-.100s' from %s" - ger "Fehler %d '%-.100s' von %s" - nor "Mottok feil %d '%-.100s' fa %s" - norwegian-ny "Mottok feil %d '%-.100s' fra %s" + dan "Modtog fejl %d '%-.100s' fra %s" + eng "Got error %d '%-.100s' from %s" + ger "Fehler %d '%-.100s' von %s" + nor "Mottok feil %d '%-.100s' fa %s" + norwegian-ny "Mottok feil %d '%-.100s' fra %s" ER_GET_TEMPORARY_ERRMSG - dan "Modtog temporary fejl %d '%-.100s' fra %s" - eng "Got temporary error %d '%-.100s' from %s" - ger "Temporrer Fehler %d '%-.100s' von %s" - nor "Mottok temporary feil %d '%-.100s' fra %s" - norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s" + dan "Modtog temporary fejl %d '%-.100s' fra %s" + eng "Got temporary error %d '%-.100s' from %s" + ger "Temporrer Fehler %d '%-.100s' von %s" + nor "Mottok temporary feil %d '%-.100s' fra %s" + norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s" ER_UNKNOWN_TIME_ZONE - eng "Unknown or incorrect time zone: '%-.64s'" - ger "Unbekannte oder falsche Zeitzone: '%-.64s'" + eng "Unknown or incorrect time zone: '%-.64s'" + ger "Unbekannte oder falsche Zeitzone: '%-.64s'" ER_WARN_INVALID_TIMESTAMP - eng "Invalid TIMESTAMP value in column '%s' at row %ld" - ger "Ungltiger TIMESTAMP-Wert in Feld '%s', Zeile %ld" + eng "Invalid TIMESTAMP value in column '%s' at row %ld" + ger "Ungltiger TIMESTAMP-Wert in Feld '%s', Zeile %ld" ER_INVALID_CHARACTER_STRING - eng "Invalid %s character string: '%.64s'" - ger "Ungltiger %s-Zeichen-String: '%.64s'" + eng "Invalid %s character string: '%.64s'" + ger "Ungltiger %s-Zeichen-String: '%.64s'" ER_WARN_ALLOWED_PACKET_OVERFLOWED - eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated" - ger "Ergebnis von %s() war grer als max_allowed_packet (%ld) Bytes und wurde deshalb gekrzt" + eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated" + ger "Ergebnis von %s() war grer als max_allowed_packet (%ld) Bytes und wurde deshalb gekrzt" ER_CONFLICTING_DECLARATIONS - eng "Conflicting declarations: '%s%s' and '%s%s'" - ger "Widersprchliche Deklarationen: '%s%s' und '%s%s'" + eng "Conflicting declarations: '%s%s' and '%s%s'" + ger "Widersprchliche Deklarationen: '%s%s' und '%s%s'" ER_SP_NO_RECURSIVE_CREATE 2F003 - eng "Can't create a %s from within another stored routine" - ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen" + eng "Can't create a %s from within another stored routine" + ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen" ER_SP_ALREADY_EXISTS 42000 - eng "%s %s already exists" - ger "%s %s existiert bereits" + eng "%s %s already exists" + ger "%s %s existiert bereits" ER_SP_DOES_NOT_EXIST 42000 - eng "%s %s does not exist" - ger "%s %s existiert nicht" + eng "%s %s does not exist" + ger "%s %s existiert nicht" ER_SP_DROP_FAILED - eng "Failed to DROP %s %s" - ger "DROP %s %s ist fehlgeschlagen" + eng "Failed to DROP %s %s" + ger "DROP %s %s ist fehlgeschlagen" ER_SP_STORE_FAILED - eng "Failed to CREATE %s %s" - ger "CREATE %s %s ist fehlgeschlagen" + eng "Failed to CREATE %s %s" + ger "CREATE %s %s ist fehlgeschlagen" ER_SP_LILABEL_MISMATCH 42000 - eng "%s with no matching label: %s" - ger "%s ohne passende Marke: %s" + eng "%s with no matching label: %s" + ger "%s ohne passende Marke: %s" ER_SP_LABEL_REDEFINE 42000 - eng "Redefining label %s" - ger "Neudefinition der Marke %s" + eng "Redefining label %s" + ger "Neudefinition der Marke %s" ER_SP_LABEL_MISMATCH 42000 - eng "End-label %s without match" - ger "Ende-Marke %s ohne zugehrigen Anfang" + eng "End-label %s without match" + ger "Ende-Marke %s ohne zugehrigen Anfang" ER_SP_UNINIT_VAR 01000 - eng "Referring to uninitialized variable %s" - ger "Zugriff auf nichtinitialisierte Variable %s" + eng "Referring to uninitialized variable %s" + ger "Zugriff auf nichtinitialisierte Variable %s" ER_SP_BADSELECT 0A000 - eng "PROCEDURE %s can't return a result set in the given context" - ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurckgeben" + eng "PROCEDURE %s can't return a result set in the given context" + ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurckgeben" ER_SP_BADRETURN 42000 - eng "RETURN is only allowed in a FUNCTION" - ger "RETURN ist nur innerhalb einer FUNCTION erlaubt" + eng "RETURN is only allowed in a FUNCTION" + ger "RETURN ist nur innerhalb einer FUNCTION erlaubt" ER_SP_BADSTATEMENT 0A000 - eng "%s is not allowed in stored procedures" - ger "%s ist in gespeicherten Prozeduren nicht erlaubt" + eng "%s is not allowed in stored procedures" + ger "%s ist in gespeicherten Prozeduren nicht erlaubt" ER_UPDATE_LOG_DEPRECATED_IGNORED 42000 - eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" - ger "Das Update-Log ist veraltet und wurde durch das Binr-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert" + eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" + ger "Das Update-Log ist veraltet und wurde durch das Binr-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert" ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000 - eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" - ger "Das Update-Log ist veraltet und wurde durch das Binr-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN bersetzt" + eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" + ger "Das Update-Log ist veraltet und wurde durch das Binr-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN bersetzt" ER_QUERY_INTERRUPTED 70100 - eng "Query execution was interrupted" - ger "Ausfhrung der Abfrage wurde unterbrochen" + eng "Query execution was interrupted" + ger "Ausfhrung der Abfrage wurde unterbrochen" ER_SP_WRONG_NO_OF_ARGS 42000 - eng "Incorrect number of arguments for %s %s; expected %u, got %u" - ger "Falsche Anzahl von Argumenten fr %s %s; erwarte %u, erhalte %u" + eng "Incorrect number of arguments for %s %s; expected %u, got %u" + ger "Falsche Anzahl von Argumenten fr %s %s; erwarte %u, erhalte %u" ER_SP_COND_MISMATCH 42000 - eng "Undefined CONDITION: %s" - ger "Undefinierte CONDITION: %s" + eng "Undefined CONDITION: %s" + ger "Undefinierte CONDITION: %s" ER_SP_NORETURN 42000 - eng "No RETURN found in FUNCTION %s" - ger "Kein RETURN in FUNCTION %s gefunden" + eng "No RETURN found in FUNCTION %s" + ger "Kein RETURN in FUNCTION %s gefunden" ER_SP_NORETURNEND 2F005 - eng "FUNCTION %s ended without RETURN" - ger "FUNCTION %s endete ohne RETURN" + eng "FUNCTION %s ended without RETURN" + ger "FUNCTION %s endete ohne RETURN" ER_SP_BAD_CURSOR_QUERY 42000 - eng "Cursor statement must be a SELECT" - ger "Cursor-Anweisung muss ein SELECT sein" + eng "Cursor statement must be a SELECT" + ger "Cursor-Anweisung muss ein SELECT sein" ER_SP_BAD_CURSOR_SELECT 42000 - eng "Cursor SELECT must not have INTO" - ger "Cursor-SELECT darf kein INTO haben" + eng "Cursor SELECT must not have INTO" + ger "Cursor-SELECT darf kein INTO haben" ER_SP_CURSOR_MISMATCH 42000 - eng "Undefined CURSOR: %s" - ger "Undefinierter CURSOR: %s" + eng "Undefined CURSOR: %s" + ger "Undefinierter CURSOR: %s" ER_SP_CURSOR_ALREADY_OPEN 24000 - eng "Cursor is already open" - ger "Cursor ist schon geffnet" + eng "Cursor is already open" + ger "Cursor ist schon geffnet" ER_SP_CURSOR_NOT_OPEN 24000 - eng "Cursor is not open" - ger "Cursor ist nicht geffnet" + eng "Cursor is not open" + ger "Cursor ist nicht geffnet" ER_SP_UNDECLARED_VAR 42000 - eng "Undeclared variable: %s" - ger "Nicht deklarierte Variable: %s" + eng "Undeclared variable: %s" + ger "Nicht deklarierte Variable: %s" ER_SP_WRONG_NO_OF_FETCH_ARGS - eng "Incorrect number of FETCH variables" - ger "Falsche Anzahl von FETCH-Variablen" + eng "Incorrect number of FETCH variables" + ger "Falsche Anzahl von FETCH-Variablen" ER_SP_FETCH_NO_DATA 02000 - eng "No data - zero rows fetched, selected, or processed" - ger "Keine Daten - null Zeilen geholt (fetch), ausgewhlt oder verarbeitet" + eng "No data - zero rows fetched, selected, or processed" + ger "Keine Daten - null Zeilen geholt (fetch), ausgewhlt oder verarbeitet" ER_SP_DUP_PARAM 42000 - eng "Duplicate parameter: %s" - ger "Doppelter Parameter: %s" + eng "Duplicate parameter: %s" + ger "Doppelter Parameter: %s" ER_SP_DUP_VAR 42000 - eng "Duplicate variable: %s" - ger "Doppelte Variable: %s" + eng "Duplicate variable: %s" + ger "Doppelte Variable: %s" ER_SP_DUP_COND 42000 - eng "Duplicate condition: %s" - ger "Doppelte Bedingung: %s" + eng "Duplicate condition: %s" + ger "Doppelte Bedingung: %s" ER_SP_DUP_CURS 42000 - eng "Duplicate cursor: %s" - ger "Doppelter Cursor: %s" + eng "Duplicate cursor: %s" + ger "Doppelter Cursor: %s" ER_SP_CANT_ALTER - eng "Failed to ALTER %s %s" - ger "ALTER %s %s fehlgeschlagen" + eng "Failed to ALTER %s %s" + ger "ALTER %s %s fehlgeschlagen" ER_SP_SUBSELECT_NYI 0A000 - eng "Subquery value not supported" - ger "Subquery-Wert wird nicht untersttzt" + eng "Subquery value not supported" + ger "Subquery-Wert wird nicht untersttzt" ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG 0A000 eng "%s is not allowed in stored function or trigger" - ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt" + ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt" ER_SP_VARCOND_AFTER_CURSHNDLR 42000 - eng "Variable or condition declaration after cursor or handler declaration" - ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers" + eng "Variable or condition declaration after cursor or handler declaration" + ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers" ER_SP_CURSOR_AFTER_HANDLER 42000 - eng "Cursor declaration after handler declaration" - ger "Deklaration eines Cursors nach der Deklaration eines Handlers" + eng "Cursor declaration after handler declaration" + ger "Deklaration eines Cursors nach der Deklaration eines Handlers" ER_SP_CASE_NOT_FOUND 20000 - eng "Case not found for CASE statement" - ger "Fall fr CASE-Anweisung nicht gefunden" + eng "Case not found for CASE statement" + ger "Fall fr CASE-Anweisung nicht gefunden" ER_FPARSER_TOO_BIG_FILE - eng "Configuration file '%-.64s' is too big" - ger "Konfigurationsdatei '%-.64s' ist zu gro" - rus " '%-.64s'" - ukr " Ʀæ '%-.64s'" + eng "Configuration file '%-.64s' is too big" + ger "Konfigurationsdatei '%-.64s' ist zu gro" + rus " '%-.64s'" + ukr " Ʀæ '%-.64s'" ER_FPARSER_BAD_HEADER - eng "Malformed file type header in file '%-.64s'" - ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.64s'" - rus " '%-.64s'" - ukr "צ ̦ '%-.64s'" + eng "Malformed file type header in file '%-.64s'" + ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.64s'" + rus " '%-.64s'" + ukr "צ ̦ '%-.64s'" ER_FPARSER_EOF_IN_COMMENT - eng "Unexpected end of file while parsing comment '%-.200s'" - ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.64s'" - rus " '%-.64s'" - ukr "Ħ ˦ Ҧ '%-.64s'" + eng "Unexpected end of file while parsing comment '%-.200s'" + ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.64s'" + rus " '%-.64s'" + ukr "Ħ ˦ Ҧ '%-.64s'" ER_FPARSER_ERROR_IN_PARAMETER - eng "Error while parsing parameter '%-.64s' (line: '%-.64s')" - ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')" - rus " '%-.64s' (: '%-.64s')" - ukr " ЦΦ '%-.64s' (: '%-.64s')" + eng "Error while parsing parameter '%-.64s' (line: '%-.64s')" + ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')" + rus " '%-.64s' (: '%-.64s')" + ukr " ЦΦ '%-.64s' (: '%-.64s')" ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER - eng "Unexpected end of file while skipping unknown parameter '%-.64s'" - ger "Unerwartetes Dateiende beim berspringen des unbekannten Parameters '%-.64s'" - rus " '%-.64s'" - ukr "Ħ ˦ ¦ צ '%-.64s'" + eng "Unexpected end of file while skipping unknown parameter '%-.64s'" + ger "Unerwartetes Dateiende beim berspringen des unbekannten Parameters '%-.64s'" + rus " '%-.64s'" + ukr "Ħ ˦ ¦ צ '%-.64s'" ER_VIEW_NO_EXPLAIN - eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" - ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte fr zugrunde liegende Tabelle fehlen" - rus "EXPLAIN/SHOW ; " - ukr "EXPLAIN/SHOW צ; æ " + eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" + ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte fr zugrunde liegende Tabelle fehlen" + rus "EXPLAIN/SHOW ; " + ukr "EXPLAIN/SHOW צ; æ " ER_FRM_UNKNOWN_TYPE - eng "File '%-.64s' has unknown type '%-.64s' in its header" - ger "Datei '%-.64s' hat unbekannten Typ '%-.64s' im Header" - rus " '%-.64s' '%-.64s' " - ukr " '%-.64s' צ '%-.64s' " + eng "File '%-.64s' has unknown type '%-.64s' in its header" + ger "Datei '%-.64s' hat unbekannten Typ '%-.64s' im Header" + rus " '%-.64s' '%-.64s' " + ukr " '%-.64s' צ '%-.64s' " ER_WRONG_OBJECT - eng "'%-.64s.%-.64s' is not %s" - ger "'%-.64s.%-.64s' ist nicht %s" - rus "'%-.64s.%-.64s' - %s" - ukr "'%-.64s.%-.64s' %s" + eng "'%-.64s.%-.64s' is not %s" + ger "'%-.64s.%-.64s' ist nicht %s" + rus "'%-.64s.%-.64s' - %s" + ukr "'%-.64s.%-.64s' %s" ER_NONUPDATEABLE_COLUMN - eng "Column '%-.64s' is not updatable" - ger "Feld '%-.64s' ist nicht aktualisierbar" - rus " '%-.64s' " - ukr " '%-.64s' " + eng "Column '%-.64s' is not updatable" + ger "Feld '%-.64s' ist nicht aktualisierbar" + rus " '%-.64s' " + ukr " '%-.64s' " ER_VIEW_SELECT_DERIVED - eng "View's SELECT contains a subquery in the FROM clause" - ger "SELECT der View enthlt eine Subquery in der FROM-Klausel" - rus "View SELECT FROM" - ukr "View SELECT Ц æ FROM" + eng "View's SELECT contains a subquery in the FROM clause" + ger "SELECT der View enthlt eine Subquery in der FROM-Klausel" + rus "View SELECT FROM" + ukr "View SELECT Ц æ FROM" ER_VIEW_SELECT_CLAUSE - eng "View's SELECT contains a '%s' clause" - ger "SELECT der View enthlt eine '%s'-Klausel" - rus "View SELECT '%s'" - ukr "View SELECT æ '%s'" + eng "View's SELECT contains a '%s' clause" + ger "SELECT der View enthlt eine '%s'-Klausel" + rus "View SELECT '%s'" + ukr "View SELECT æ '%s'" ER_VIEW_SELECT_VARIABLE - eng "View's SELECT contains a variable or parameter" - ger "SELECT der View enthlt eine Variable oder einen Parameter" - rus "View SELECT " - ukr "View SELECT " + eng "View's SELECT contains a variable or parameter" + ger "SELECT der View enthlt eine Variable oder einen Parameter" + rus "View SELECT " + ukr "View SELECT " ER_VIEW_SELECT_TMPTABLE - eng "View's SELECT refers to a temporary table '%-.64s'" - ger "SELECT der View verweist auf eine temporre Tabelle '%-.64s'" - rus "View SELECT '%-.64s'" - ukr "View SELECT դ '%-.64s'" + eng "View's SELECT refers to a temporary table '%-.64s'" + ger "SELECT der View verweist auf eine temporre Tabelle '%-.64s'" + rus "View SELECT '%-.64s'" + ukr "View SELECT դ '%-.64s'" ER_VIEW_WRONG_LIST - eng "View's SELECT and view's field list have different column counts" - ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten" - rus "View SELECT view " - ukr "View SELECT ̦ æ view Ҧ ˦˦ æ" + eng "View's SELECT and view's field list have different column counts" + ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten" + rus "View SELECT view " + ukr "View SELECT ̦ æ view Ҧ ˦˦ æ" ER_WARN_VIEW_MERGE - eng "View merge algorithm can't be used here for now (assumed undefined algorithm)" - ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)" - rus " view ( )" - ukr " view ( )" + eng "View merge algorithm can't be used here for now (assumed undefined algorithm)" + ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)" + rus " view ( )" + ukr " view ( )" ER_WARN_VIEW_WITHOUT_KEY - eng "View being updated does not have complete key of underlying table in it" - ger "Die aktualisierte View enthlt nicht den vollstndigen Schlssel der zugrunde liegenden Tabelle" - rus " view () ()" - ukr "View, , ͦ æ(), Ҧ " + eng "View being updated does not have complete key of underlying table in it" + ger "Die aktualisierte View enthlt nicht den vollstndigen Schlssel der zugrunde liegenden Tabelle" + rus " view () ()" + ukr "View, , ͦ æ(), Ҧ " ER_VIEW_INVALID - eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" + eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" ER_SP_NO_DROP_SP - eng "Can't drop or alter a %s from within another stored routine" - ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine lschen oder ndern" + eng "Can't drop or alter a %s from within another stored routine" + ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine lschen oder ndern" ER_SP_GOTO_IN_HNDLR - eng "GOTO is not allowed in a stored procedure handler" - ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt" + eng "GOTO is not allowed in a stored procedure handler" + ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt" ER_TRG_ALREADY_EXISTS - eng "Trigger already exists" - ger "Trigger existiert bereits" + eng "Trigger already exists" + ger "Trigger existiert bereits" ER_TRG_DOES_NOT_EXIST - eng "Trigger does not exist" - ger "Trigger existiert nicht" + eng "Trigger does not exist" + ger "Trigger existiert nicht" ER_TRG_ON_VIEW_OR_TEMP_TABLE - eng "Trigger's '%-.64s' is view or temporary table" - ger "'%-.64s' des Triggers ist View oder temporre Tabelle" + eng "Trigger's '%-.64s' is view or temporary table" + ger "'%-.64s' des Triggers ist View oder temporre Tabelle" ER_TRG_CANT_CHANGE_ROW - eng "Updating of %s row is not allowed in %strigger" - ger "Aktualisieren einer %s-Zeile ist in einem %-Trigger nicht erlaubt" + eng "Updating of %s row is not allowed in %strigger" + ger "Aktualisieren einer %s-Zeile ist in einem %-Trigger nicht erlaubt" ER_TRG_NO_SUCH_ROW_IN_TRG - eng "There is no %s row in %s trigger" - ger "Es gibt keine %s-Zeile im %s-Trigger" + eng "There is no %s row in %s trigger" + ger "Es gibt keine %s-Zeile im %s-Trigger" ER_NO_DEFAULT_FOR_FIELD - eng "Field '%-.64s' doesn't have a default value" - ger "Feld '%-.64s' hat keinen Vorgabewert" + eng "Field '%-.64s' doesn't have a default value" + ger "Feld '%-.64s' hat keinen Vorgabewert" ER_DIVISION_BY_ZERO 22012 - eng "Division by 0" - ger "Division durch 0" + eng "Division by 0" + ger "Division durch 0" ER_TRUNCATED_WRONG_VALUE_FOR_FIELD - eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld" - ger "Falscher %-.32s-Wert: '%-.128s' fr Feld '%.64s' in Zeile %ld" + eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld" + ger "Falscher %-.32s-Wert: '%-.128s' fr Feld '%.64s' in Zeile %ld" ER_ILLEGAL_VALUE_FOR_TYPE 22007 - eng "Illegal %s '%-.64s' value found during parsing" - ger "Nicht zulssiger %s-Wert '%-.64s' beim Parsen gefunden" + eng "Illegal %s '%-.64s' value found during parsing" + ger "Nicht zulssiger %s-Wert '%-.64s' beim Parsen gefunden" ER_VIEW_NONUPD_CHECK - eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'" - ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.64s.%-.64s'" - rus "CHECK OPTION VIEW '%-.64s.%-.64s'" - ukr "CHECK OPTION VIEW '%-.64s.%-.64s' " + eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'" + ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.64s.%-.64s'" + rus "CHECK OPTION VIEW '%-.64s.%-.64s'" + ukr "CHECK OPTION VIEW '%-.64s.%-.64s' " ER_VIEW_CHECK_FAILED - eng "CHECK OPTION failed '%-.64s.%-.64s'" - ger "CHECK OPTION fehlgeschlagen: '%-.64s.%-.64s'" - rus " CHECK OPTION VIEW '%-.64s.%-.64s' " - ukr "צ CHECK OPTION VIEW '%-.64s.%-.64s' " + eng "CHECK OPTION failed '%-.64s.%-.64s'" + ger "CHECK OPTION fehlgeschlagen: '%-.64s.%-.64s'" + rus " CHECK OPTION VIEW '%-.64s.%-.64s' " + ukr "צ CHECK OPTION VIEW '%-.64s.%-.64s' " ER_PROCACCESS_DENIED_ERROR 42000 - eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'" - ger "Befehl %-.16s nicht zulssig fr Benutzer '%-.32s'@'%-.64s' in Routine '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'" + ger "Befehl %-.16s nicht zulssig fr Benutzer '%-.32s'@'%-.64s' in Routine '%-.64s'" ER_RELAY_LOG_FAIL - eng "Failed purging old relay logs: %s" - ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s" + eng "Failed purging old relay logs: %s" + ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s" ER_PASSWD_LENGTH - eng "Password hash should be a %d-digit hexadecimal number" - ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein" + eng "Password hash should be a %d-digit hexadecimal number" + ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein" ER_UNKNOWN_TARGET_BINLOG - eng "Target log not found in binlog index" - ger "Ziel-Log im Binlog-Index nicht gefunden" + eng "Target log not found in binlog index" + ger "Ziel-Log im Binlog-Index nicht gefunden" ER_IO_ERR_LOG_INDEX_READ - eng "I/O error reading log index file" - ger "Fehler beim Lesen der Log-Index-Datei" + eng "I/O error reading log index file" + ger "Fehler beim Lesen der Log-Index-Datei" ER_BINLOG_PURGE_PROHIBITED - eng "Server configuration does not permit binlog purge" - ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung" + eng "Server configuration does not permit binlog purge" + ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung" ER_FSEEK_FAIL - eng "Failed on fseek()" - ger "fseek() fehlgeschlagen" + eng "Failed on fseek()" + ger "fseek() fehlgeschlagen" ER_BINLOG_PURGE_FATAL_ERR - eng "Fatal error during log purge" - ger "Schwerwiegender Fehler bei der Log-Bereinigung" + eng "Fatal error during log purge" + ger "Schwerwiegender Fehler bei der Log-Bereinigung" ER_LOG_IN_USE - eng "A purgeable log is in use, will not purge" - ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung" + eng "A purgeable log is in use, will not purge" + ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung" ER_LOG_PURGE_UNKNOWN_ERR - eng "Unknown error during log purge" - ger "Unbekannter Fehler bei Log-Bereinigung" + eng "Unknown error during log purge" + ger "Unbekannter Fehler bei Log-Bereinigung" ER_RELAY_LOG_INIT - eng "Failed initializing relay log position: %s" - ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s" + eng "Failed initializing relay log position: %s" + ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s" ER_NO_BINARY_LOGGING - eng "You are not using binary logging" - ger "Sie verwenden keine Binrlogs" + eng "You are not using binary logging" + ger "Sie verwenden keine Binrlogs" ER_RESERVED_SYNTAX - eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server" - ger "Die Schreibweise '%-.64s' ist fr interne Zwecke des MySQL-Servers reserviert" + eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server" + ger "Die Schreibweise '%-.64s' ist fr interne Zwecke des MySQL-Servers reserviert" ER_WSAS_FAILED - eng "WSAStartup Failed" - ger "WSAStartup fehlgeschlagen" + eng "WSAStartup Failed" + ger "WSAStartup fehlgeschlagen" ER_DIFF_GROUPS_PROC - eng "Can't handle procedures with different groups yet" - ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten" + eng "Can't handle procedures with different groups yet" + ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten" ER_NO_GROUP_FOR_PROC - eng "Select must have a group with this procedure" - ger "SELECT muss bei dieser Prozedur ein GROUP BY haben" + eng "Select must have a group with this procedure" + ger "SELECT muss bei dieser Prozedur ein GROUP BY haben" ER_ORDER_WITH_PROC - eng "Can't use ORDER clause with this procedure" - ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden" + eng "Can't use ORDER clause with this procedure" + ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden" ER_LOGGING_PROHIBIT_CHANGING_OF - eng "Binary logging and replication forbid changing the global server %s" - ger "Binrlogs und Replikation verhindern Wechsel des globalen Servers %s" + eng "Binary logging and replication forbid changing the global server %s" + ger "Binrlogs und Replikation verhindern Wechsel des globalen Servers %s" ER_NO_FILE_MAPPING - eng "Can't map file: %-.200s, errno: %d" - ger "Kann Datei nicht abbilden: %-.64s, Fehler: %d" + eng "Can't map file: %-.200s, errno: %d" + ger "Kann Datei nicht abbilden: %-.64s, Fehler: %d" ER_WRONG_MAGIC - eng "Wrong magic in %-.64s" - ger "Falsche magische Zahlen in %-.64s" + eng "Wrong magic in %-.64s" + ger "Falsche magische Zahlen in %-.64s" ER_PS_MANY_PARAM - eng "Prepared statement contains too many placeholders" - ger "Vorbereitete Anweisung enthlt zu viele Platzhalter" + eng "Prepared statement contains too many placeholders" + ger "Vorbereitete Anweisung enthlt zu viele Platzhalter" ER_KEY_PART_0 - eng "Key part '%-.64s' length cannot be 0" - ger "Lnge des Schlsselteils '%-.64s' kann nicht 0 sein" + eng "Key part '%-.64s' length cannot be 0" + ger "Lnge des Schlsselteils '%-.64s' kann nicht 0 sein" ER_VIEW_CHECKSUM - eng "View text checksum failed" - ger "View-Text-Prfsumme fehlgeschlagen" - rus " VIEW " - ukr "צ ϧ VIEW " + eng "View text checksum failed" + ger "View-Text-Prfsumme fehlgeschlagen" + rus " VIEW " + ukr "צ ϧ VIEW " ER_VIEW_MULTIUPDATE - eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'" - ger "Kann nicht mehr als eine Basistabelle ber Join-View '%-.64s.%-.64s' ndern" - rus " VIEW '%-.64s.%-.64s'" - ukr " ¦ VIEW '%-.64s.%-.64s', ͦԦ ˦ " + eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'" + ger "Kann nicht mehr als eine Basistabelle ber Join-View '%-.64s.%-.64s' ndern" + rus " VIEW '%-.64s.%-.64s'" + ukr " ¦ VIEW '%-.64s.%-.64s', ͦԦ ˦ " ER_VIEW_NO_INSERT_FIELD_LIST - eng "Can not insert into join view '%-.64s.%-.64s' without fields list" - ger "Kann nicht ohne Feldliste in Join-View '%-.64s.%-.64s' einfgen" - rus " VIEW '%-.64s.%-.64s' " - ukr " VIEW '%-.64s.%-.64s', ͦ ˦ , æ" + eng "Can not insert into join view '%-.64s.%-.64s' without fields list" + ger "Kann nicht ohne Feldliste in Join-View '%-.64s.%-.64s' einfgen" + rus " VIEW '%-.64s.%-.64s' " + ukr " VIEW '%-.64s.%-.64s', ͦ ˦ , æ" ER_VIEW_DELETE_MERGE_VIEW - eng "Can not delete from join view '%-.64s.%-.64s'" - ger "Kann nicht aus Join-View '%-.64s.%-.64s' lschen" - rus " VIEW '%-.64s.%-.64s'" - ukr " VIEW '%-.64s.%-.64s', ͦ ˦ " + eng "Can not delete from join view '%-.64s.%-.64s'" + ger "Kann nicht aus Join-View '%-.64s.%-.64s' lschen" + rus " VIEW '%-.64s.%-.64s'" + ukr " VIEW '%-.64s.%-.64s', ͦ ˦ " ER_CANNOT_USER - eng "Operation %s failed for %.256s" - ger "Operation %s schlug fehl fr %.256s" - norwegian-ny "Operation %s failed for '%.256s'" + eng "Operation %s failed for %.256s" + ger "Operation %s schlug fehl fr %.256s" + norwegian-ny "Operation %s failed for '%.256s'" ER_XAER_NOTA XAE04 eng "XAER_NOTA: Unknown XID" - ger "XAER_NOTA: Unbekannte XID" + ger "XAER_NOTA: Unbekannte XID" ER_XAER_INVAL XAE05 eng "XAER_INVAL: Invalid arguments (or unsupported command)" - ger "XAER_INVAL: Ungltige Argumente (oder nicht untersttzter Befehl)" + ger "XAER_INVAL: Ungltige Argumente (oder nicht untersttzter Befehl)" ER_XAER_RMFAIL XAE07 eng "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state" ger "XAER_RMFAIL: DEr Befehl kann nicht ausgefhrt werden, wenn die globale Transaktion im Zustand %.64s ist" rus "XAER_RMFAIL: '%.64s'" ER_XAER_OUTSIDE XAE09 eng "XAER_OUTSIDE: Some work is done outside global transaction" - ger "XAER_OUTSIDE: Einige Arbeiten werden auerhalb der globalen Transaktion verrichtet" + ger "XAER_OUTSIDE: Einige Arbeiten werden auerhalb der globalen Transaktion verrichtet" ER_XAER_RMERR XAE03 eng "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency" - ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prfen Sie Ihre Daten auf Konsistenz" + ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prfen Sie Ihre Daten auf Konsistenz" ER_XA_RBROLLBACK XA100 eng "XA_RBROLLBACK: Transaction branch was rolled back" - ger "XA_RBROLLBACK: Transaktionszweig wurde zurckgerollt" + ger "XA_RBROLLBACK: Transaktionszweig wurde zurckgerollt" ER_NONEXISTING_PROC_GRANT 42000 - eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'" - ger "Es gibt diese Berechtigung fr Benutzer '%-.32s' auf Host '%-.64s' fr Routine '%-.64s' nicht" + eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'" + ger "Es gibt diese Berechtigung fr Benutzer '%-.32s' auf Host '%-.64s' fr Routine '%-.64s' nicht" ER_PROC_AUTO_GRANT_FAIL - eng "Failed to grant EXECUTE and ALTER ROUTINE privileges" - ger "Gewhrung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen" + eng "Failed to grant EXECUTE and ALTER ROUTINE privileges" + ger "Gewhrung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen" ER_PROC_AUTO_REVOKE_FAIL - eng "Failed to revoke all privileges to dropped routine" - ger "Rcknahme aller Rechte fr die gelschte Routine fehlgeschlagen" + eng "Failed to revoke all privileges to dropped routine" + ger "Rcknahme aller Rechte fr die gelschte Routine fehlgeschlagen" ER_DATA_TOO_LONG 22001 - eng "Data too long for column '%s' at row %ld" - ger "Daten zu lang fr Feld '%s' in Zeile %ld" + eng "Data too long for column '%s' at row %ld" + ger "Daten zu lang fr Feld '%s' in Zeile %ld" ER_SP_BAD_SQLSTATE 42000 - eng "Bad SQLSTATE: '%s'" - ger "Ungltiger SQLSTATE: '%s'" + eng "Bad SQLSTATE: '%s'" + ger "Ungltiger SQLSTATE: '%s'" ER_STARTUP - eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s" - ger "%s: bereit fr Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s" + eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s" + ger "%s: bereit fr Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s" ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR eng "Can't load value from file with fixed size rows to variable" - ger "Kann Wert aus Datei mit Zeilen fester Gre nicht in Variable laden" + ger "Kann Wert aus Datei mit Zeilen fester Gre nicht in Variable laden" ER_CANT_CREATE_USER_WITH_GRANT 42000 - eng "You are not allowed to create a user with GRANT" - ger "Sie drfen keinen Benutzer mit GRANT anlegen" + eng "You are not allowed to create a user with GRANT" + ger "Sie drfen keinen Benutzer mit GRANT anlegen" ER_WRONG_VALUE_FOR_TYPE - eng "Incorrect %-.32s value: '%-.128s' for function %-.32s" - ger "Falscher %-.32s-Wert: '%-.128s' fr Funktion %-.32s" + eng "Incorrect %-.32s value: '%-.128s' for function %-.32s" + ger "Falscher %-.32s-Wert: '%-.128s' fr Funktion %-.32s" ER_TABLE_DEF_CHANGED - eng "Table definition has changed, please retry transaction" - ger "Tabellendefinition wurde gendert, bitte starten Sie die Transaktion neu" + eng "Table definition has changed, please retry transaction" + ger "Tabellendefinition wurde gendert, bitte starten Sie die Transaktion neu" ER_SP_DUP_HANDLER 42000 - eng "Duplicate handler declared in the same block" - ger "Doppelter Handler im selben Block deklariert" + eng "Duplicate handler declared in the same block" + ger "Doppelter Handler im selben Block deklariert" ER_SP_NOT_VAR_ARG 42000 - eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger" - ger "OUT- oder INOUT-Argument %d fr Routine %s ist keine Variable" + eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger" + ger "OUT- oder INOUT-Argument %d fr Routine %s ist keine Variable" ER_SP_NO_RETSET 0A000 - eng "Not allowed to return a result set from a %s" - ger "Rckgabe einer Ergebnismenge aus einer %s ist nicht erlaubt" + eng "Not allowed to return a result set from a %s" + ger "Rckgabe einer Ergebnismenge aus einer %s ist nicht erlaubt" ER_CANT_CREATE_GEOMETRY_OBJECT 22003 - eng "Cannot get geometry object from data you send to the GEOMETRY field" - ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld bergeben haben" + eng "Cannot get geometry object from data you send to the GEOMETRY field" + ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld bergeben haben" ER_FAILED_ROUTINE_BREAK_BINLOG - eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes" - ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binrlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthlt das Binrlog ihre nderungen nicht" + eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes" + ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binrlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthlt das Binrlog ihre nderungen nicht" ER_BINLOG_UNSAFE_ROUTINE - eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" - ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binrlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)" + eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" + ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binrlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)" ER_BINLOG_CREATE_ROUTINE_NEED_SUPER - eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" - ger "Sie haben keine SUPER-Berechtigung und Binrlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)" + eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" + ger "Sie haben keine SUPER-Berechtigung und Binrlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)" ER_EXEC_STMT_WITH_OPEN_CURSOR - eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it." - ger "Sie knnen keine vorbereitete Anweisung ausfhren, die mit einem geffneten Cursor verknpft ist. Setzen Sie die Anweisung zurck, um sie neu auszufhren" + eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it." + ger "Sie knnen keine vorbereitete Anweisung ausfhren, die mit einem geffneten Cursor verknpft ist. Setzen Sie die Anweisung zurck, um sie neu auszufhren" ER_STMT_HAS_NO_OPEN_CURSOR - eng "The statement (%lu) has no open cursor." - ger "Die Anweisung (%lu) hat keinen geffneten Cursor" + eng "The statement (%lu) has no open cursor." + ger "Die Anweisung (%lu) hat keinen geffneten Cursor" ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG eng "Explicit or implicit commit is not allowed in stored function or trigger." - ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt" + ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt" ER_NO_DEFAULT_FOR_VIEW_FIELD eng "Field of view '%-.64s.%-.64s' underlying table doesn't have a default value" - ger "Ein Feld der dem View '%-.64s.%-.64s' zugrundeliegenden Tabelle hat keinen Vorgabewert" + ger "Ein Feld der dem View '%-.64s.%-.64s' zugrundeliegenden Tabelle hat keinen Vorgabewert" ER_SP_NO_RECURSION eng "Recursive stored functions and triggers are not allowed." - ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt" + ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt" ER_TOO_BIG_SCALE 42000 S1009 eng "Too big scale %d specified for column '%-.64s'. Maximum is %d." - ger "Zu groer Skalierungsfaktor %d fr Feld '%-.64s' angegeben. Maximum ist %d" + ger "Zu groer Skalierungsfaktor %d fr Feld '%-.64s' angegeben. Maximum ist %d" ER_TOO_BIG_PRECISION 42000 S1009 eng "Too big precision %d specified for column '%-.64s'. Maximum is %d." - ger "Zu groe Genauigkeit %d fr Feld '%-.64s' angegeben. Maximum ist %d" + ger "Zu groe Genauigkeit %d fr Feld '%-.64s' angegeben. Maximum ist %d" ER_M_BIGGER_THAN_D 42000 S1009 eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.64s')." - ger "Fr FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')" + ger "Fr FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')" ER_WRONG_LOCK_OF_SYSTEM_TABLE eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables" - ger "Sie knnen Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren" + ger "Sie knnen Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren" ER_CONNECT_TO_FOREIGN_DATA_SOURCE eng "Unable to connect to foreign data source: %.64s" - ger "Kann nicht mit Fremddatenquelle verbinden: %.64s" + ger "Kann nicht mit Fremddatenquelle verbinden: %.64s" ER_QUERY_ON_FOREIGN_DATA_SOURCE eng "There was a problem processing the query on the foreign data source. Data source error: %-.64" - ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s" + ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s" ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST eng "The foreign data source you are trying to reference does not exist. Data source error: %-.64s" - ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s" + ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s" ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format" ger "Kann fderierte Tabelle nicht erzeugen. Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" ER_FOREIGN_DATA_STRING_INVALID eng "The data source connection string '%-.64s' is not in the correct format" - ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" + ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" ER_CANT_CREATE_FEDERATED_TABLE - eng "Can't create federated table. Foreign data src error: %-.64s" - ger "Kann fderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s" + eng "Can't create federated table. Foreign data src error: %-.64s" + ger "Kann fderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s" ER_TRG_IN_WRONG_SCHEMA - eng "Trigger in wrong schema" - ger "Trigger im falschen Schema" + eng "Trigger in wrong schema" + ger "Trigger im falschen Schema" ER_STACK_OVERRUN_NEED_MORE - eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack." - ger "Thread-Stack-berlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes bentigt. Verwenden Sie 'mysqld -O thread_stack=#', um einen greren Stack anzugeben" + eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack." + ger "Thread-Stack-berlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes bentigt. Verwenden Sie 'mysqld -O thread_stack=#', um einen greren Stack anzugeben" ER_TOO_LONG_BODY 42000 S1009 - eng "Routine body for '%-.100s' is too long" - ger "Routinen-Body fr '%-.100s' ist zu lang" + eng "Routine body for '%-.100s' is too long" + ger "Routinen-Body fr '%-.100s' ist zu lang" ER_WARN_CANT_DROP_DEFAULT_KEYCACHE - eng "Cannot drop default keycache" + eng "Cannot drop default keycache" ger "Der vorgabemige Schlssel-Cache kann nicht gelscht werden" ER_TOO_BIG_DISPLAYWIDTH 42000 S1009 - eng "Display width out of range for column '%-.64s' (max = %d)" - ger "Anzeigebreite auerhalb des zulssigen Bereichs fr Spalte '%-.64s' (Maximum: %d)" + eng "Display width out of range for column '%-.64s' (max = %d)" + ger "Anzeigebreite auerhalb des zulssigen Bereichs fr Spalte '%-.64s' (Maximum: %d)" ER_XAER_DUPID XAE08 eng "XAER_DUPID: The XID already exists" - ger "XAER_DUPID: Die XID existiert bereits" + ger "XAER_DUPID: Die XID existiert bereits" ER_DATETIME_FUNCTION_OVERFLOW 22008 eng "Datetime function: %-.32s field overflow" - ger "Datetime-Funktion: %-.32s Feldberlauf" + ger "Datetime-Funktion: %-.32s Feldberlauf" ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG eng "Can't update table '%-.64s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger." - ger "Kann Tabelle '%-.64s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief" + ger "Kann Tabelle '%-.64s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief" ER_VIEW_PREVENT_UPDATE eng "The definition of table '%-.64s' prevents operation %.64s on table '%-.64s'." - ger "Die Definition der Tabelle '%-.64s' verhindert die Operation %.64s auf Tabelle '%-.64s'" + ger "Die Definition der Tabelle '%-.64s' verhindert die Operation %.64s auf Tabelle '%-.64s'" ER_PS_NO_RECURSION eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner" - ger "Die vorbereitete Anweisung enthlt einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszufhren" + ger "Die vorbereitete Anweisung enthlt einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszufhren" ER_SP_CANT_SET_AUTOCOMMIT - eng "Not allowed to set autocommit from a stored function or trigger" + eng "Not allowed to set autocommit from a stored function or trigger" ger "Es ist nicht erlaubt, innerhalb einer gespeicherten Funktion oder eines Triggers AUTOCOMMIT zu setzen" ER_MALFORMED_DEFINER - eng "Definer is not fully qualified" - ger "Definierer des View ist nicht vollstndig spezifiziert" + eng "Definer is not fully qualified" + ger "Definierer des View ist nicht vollstndig spezifiziert" ER_VIEW_FRM_NO_USER eng "View '%-.64s'.'%-.64s' has no definer information (old table format). Current user is used as definer. Please recreate the view!" - ger "View '%-.64s'.'%-.64s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu" + ger "View '%-.64s'.'%-.64s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu" ER_VIEW_OTHER_USER - eng "You need the SUPER privilege for creation view with '%-.64s'@'%-.64s' definer" - ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.64s'@'%-.64s' zu erzeugen" + eng "You need the SUPER privilege for creation view with '%-.64s'@'%-.64s' definer" + ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.64s'@'%-.64s' zu erzeugen" ER_NO_SUCH_USER eng "There is no '%-.64s'@'%-.64s' registered" - ger "'%-.64s'@'%-.64s' ist nicht registriert" + ger "'%-.64s'@'%-.64s' ist nicht registriert" ER_FORBID_SCHEMA_CHANGE - eng "Changing schema from '%-.64s' to '%-.64s' is not allowed." - ger "Wechsel des Schemas von '%-.64s' auf '%-.64s' ist nicht erlaubt" + eng "Changing schema from '%-.64s' to '%-.64s' is not allowed." + ger "Wechsel des Schemas von '%-.64s' auf '%-.64s' ist nicht erlaubt" ER_ROW_IS_REFERENCED_2 23000 - eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)" - ger "Kann Eltern-Zeile nicht lschen oder aktualisieren: eine Fremdschlsselbedingung schlgt fehl (%.192s)" + eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)" + ger "Kann Eltern-Zeile nicht lschen oder aktualisieren: eine Fremdschlsselbedingung schlgt fehl (%.192s)" ER_NO_REFERENCED_ROW_2 23000 - eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)" - ger "Kann Kind-Zeile nicht hinzufgen oder aktualisieren: eine Fremdschlsselbedingung schlgt fehl (%.192s)" + eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)" + ger "Kann Kind-Zeile nicht hinzufgen oder aktualisieren: eine Fremdschlsselbedingung schlgt fehl (%.192s)" ER_SP_BAD_VAR_SHADOW 42000 eng "Variable '%-.64s' must be quoted with `...`, or renamed" ger "Variable '%-.64s' muss mit `...` geschtzt oder aber umbenannt werden" @@ -5579,270 +5579,375 @@ ER_SP_RECURSION_LIMIT eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s" ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde fr Routine %.64s berschritten" ER_SP_PROC_TABLE_CORRUPT - eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" + eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" + ger "Routine %-64s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschdigt, oder enthlt fehlerhaften Daten (interner Code: %d)" ER_SP_WRONG_NAME 42000 - eng "Incorrect routine name '%-.64s'" + eng "Incorrect routine name '%-.64s'" + ger "Ungltiger Routinenname '%-.64s'" ER_TABLE_NEEDS_UPGRADE - eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!" + eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!" + ger "Tabellenaktualisierung erforderlich. Bitte zum Reparieren \"REPAIR TABLE `%-.32s`\" eingeben!" ER_SP_NO_AGGREGATE 42000 - eng "AGGREGATE is not supported for stored functions" + eng "AGGREGATE is not supported for stored functions" + ger "AGGREGATE wird bei gespeicherten Funktionen nicht untersttzt" ER_MAX_PREPARED_STMT_COUNT_REACHED 42000 eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)" + ger "Kann nicht mehr Anweisungen als max_prepared_stmt_count erzeugen (aktueller Wert: %lu)" ER_VIEW_RECURSIVE eng "`%-.64s`.`%-.64s` contains view recursion" + ger "`%-.64s`.`%-.64s` enthlt View-Rekursion" ER_NON_GROUPING_FIELD_USED 42000 - eng "non-grouping field '%-.64s' is used in %-.64s clause" + eng "non-grouping field '%-.64s' is used in %-.64s clause" + ger "In der %-.64s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet" ER_TABLE_CANT_HANDLE_SPKEYS eng "The used table type doesn't support SPATIAL indexes" + ger "Der verwendete Tabellentyp untersttzt keine SPATIAL-Indizes" ER_ILLEGAL_HA_CREATE_OPTION eng "Table storage engine '%-.64s' does not support the create option '%.64s'" + ger "Speicher-Engine '%-.64s' der Tabelle untersttzt die Option '%.64s' nicht" ER_PARTITION_REQUIRES_VALUES_ERROR eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition" + ger "%-.64s-PARTITIONierung erfordert Definition von VALUES %-.64s fr jede Partition" swe "%-.64s PARTITIONering krver definition av VALUES %-.64s fr varje partition" ER_PARTITION_WRONG_VALUES_ERROR eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition" + ger "Nur %-.64s-PARTITIONierung kann VALUES %-.64s in der Partitionsdefinition verwenden" swe "Endast %-.64s partitionering kan anvnda VALUES %-.64s i definition av partitionen" ER_PARTITION_MAXVALUE_ERROR eng "MAXVALUE can only be used in last partition definition" + ger "MAXVALUE kann nur fr die Definition der letzten Partition verwendet werden" swe "MAXVALUE kan bara anvndas i definitionen av den sista partitionen" ER_PARTITION_SUBPARTITION_ERROR eng "Subpartitions can only be hash partitions and by key" + ger "Unterpartitionen drfen nur HASH- oder KEY-Partitionen sein" swe "Subpartitioner kan bara vara hash och key partitioner" ER_PARTITION_SUBPART_MIX_ERROR eng "Must define subpartitions on all partitions if on one partition" + ger "Unterpartitionen knnen nur Hash- oder Key-Partitionen sein" swe "Subpartitioner måste definieras på alla partitioner om på en" - ER_PARTITION_WRONG_NO_PART_ERROR eng "Wrong number of partitions defined, mismatch with previous setting" + ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen berein" swe "Antal partitioner definierade och antal partitioner r inte lika" ER_PARTITION_WRONG_NO_SUBPART_ERROR eng "Wrong number of subpartitions defined, mismatch with previous setting" + ger "Falsche Anzahl von Unterpartitionen definiert, stimmt nicht mit vorherigen Einstellungen berein" swe "Antal subpartitioner definierade och antal subpartitioner r inte lika" ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR eng "Constant/Random expression in (sub)partitioning function is not allowed" + ger "Konstante oder Random-Ausdrcke in (Unter-)Partitionsfunktionen sind nicht erlaubt" swe "Konstanta uttryck eller slumpmssiga uttryck r inte tilltna (sub)partitioneringsfunktioner" ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR eng "Expression in RANGE/LIST VALUES must be constant" + ger "Ausdrcke in RANGE/LIST VALUES mssen konstant sein" swe "Uttryck i RANGE/LIST VALUES mste vara ett konstant uttryck" ER_FIELD_NOT_FOUND_PART_ERROR eng "Field in list of fields for partition function not found in table" + ger "Felder in der Feldliste der Partitionierungsfunktion wurden in der Tabelle nicht gefunden" swe "Flt i listan av flt fr partitionering med key inte funnen i tabellen" ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR eng "List of fields is only allowed in KEY partitions" + ger "Eine Feldliste ist nur in KEY-Partitionen erlaubt" swe "En lista av flt r endast tilltet fr KEY partitioner" ER_INCONSISTENT_PARTITION_INFO_ERROR eng "The partition info in the frm file is not consistent with what can be written into the frm file" + ger "Die Partitionierungsinformationen in der frm-Datei stimmen nicht mit dem berein, was in die frm-Datei geschrieben werden kann" swe "Partitioneringsinformationen i frm-filen r inte konsistent med vad som kan skrivas i frm-filen" ER_PARTITION_FUNC_NOT_ALLOWED_ERROR eng "The %-.64s function returns the wrong type" + ger "Die %-.64s-Funktion gibt einen falschen Typ zurck" swe "%-.64s-funktionen returnerar felaktig typ" ER_PARTITIONS_MUST_BE_DEFINED_ERROR eng "For %-.64s partitions each partition must be defined" + ger "Fr %-.64s-Partitionen muss jede Partition definiert sein" swe "Fr %-.64s partitionering s mste varje partition definieras" ER_RANGE_NOT_INCREASING_ERROR eng "VALUES LESS THAN value must be strictly increasing for each partition" + ger "Werte in VALUES LESS THAN mssen fr jede Partition strikt aufsteigend sein" swe "Vrden i VALUES LESS THAN mste vara strikt vxande fr varje partition" ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR eng "VALUES value must be of same type as partition function" + ger "VALUES-Werte mssen vom selben Typ wie die Partitionierungsfunktion sein" swe "Vrden i VALUES mste vara av samma typ som partitioneringsfunktionen" ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR eng "Multiple definition of same constant in list partitioning" + ger "Mehrfachdefinition derselben Konstante bei Listen-Partitionierung" swe "Multipel definition av samma konstant i list partitionering" ER_PARTITION_ENTRY_ERROR eng "Partitioning can not be used stand-alone in query" + ger "Partitionierung kann in einer Abfrage nicht alleinstehend benutzt werden" swe "Partitioneringssyntax kan inte anvndas p egen hand i en SQL-frga" ER_MIX_HANDLER_ERROR eng "The mix of handlers in the partitions is not allowed in this version of MySQL" + ger "Das Vermischen von Handlern in Partitionen ist in dieser Version von MySQL nicht erlaubt" swe "Denna mix av lagringsmotorer r inte tillten i denna version av MySQL" ER_PARTITION_NOT_DEFINED_ERROR eng "For the partitioned engine it is necessary to define all %-.64s" + ger "Fr die partitionierte Engine mssen alle %-.64s definiert sein" swe "Fr partitioneringsmotorn s r det ndvndigt att definiera alla %-.64s" ER_TOO_MANY_PARTITIONS_ERROR eng "Too many partitions (including subpartitions) were defined" + ger "Es wurden zu vielen Partitionen (einschlielich Unterpartitionen) definiert" swe "Fr mnga partitioner (inkluderande subpartitioner) definierades" ER_SUBPARTITION_ERROR eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning" + ger "RANGE/LIST-Partitionierung kann bei Unterpartitionen nur zusammen mit HASH/KEY-Partitionierung verwendet werden" swe "Det r endast mjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering fr subpartitionering" ER_CANT_CREATE_HANDLER_FILE eng "Failed to create specific handler file" + ger "Erzeugen einer spezifischen Handler-Datei fehlgeschlagen" swe "Misslyckades med att skapa specifik fil i lagringsmotor" ER_BLOB_FIELD_IN_PART_FUNC_ERROR eng "A BLOB field is not allowed in partition function" + ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt" swe "Ett BLOB-flt r inte tilltet i partitioneringsfunktioner" ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF eng "A %-.64s need to include all fields in the partition function" + ger "Ein %-.64s muss alle Spalten der Partitionierungsfunktion umfassen" swe "En %-.64s behver inkludera alla flt i partitioneringsfunktionen fr denna lagringsmotor" ER_NO_PARTS_ERROR eng "Number of %-.64s = 0 is not an allowed value" + ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert" swe "Antal %-.64s = 0 r inte ett tillten vrde" ER_PARTITION_MGMT_ON_NONPARTITIONED eng "Partition management on a not partitioned table is not possible" + ger "Partitionsverwaltung einer nicht partitionierten Tabelle ist nicht mglich" swe "Partitioneringskommando p en opartitionerad tabell r inte mjligt" ER_FOREIGN_KEY_ON_PARTITIONED eng "Foreign key condition is not yet supported in conjunction with partitioning" + ger "Fremdschlssel-Beschrnkungen sind im Zusammenhang mit Partitionierung nicht zulssig" swe "Foreign key villkor r inte nnu implementerad i kombination med partitionering" ER_DROP_PARTITION_NON_EXISTENT eng "Error in list of partitions to %-.64s" + ger "Fehler in der Partitionsliste bei %-.64s" swe "Fel i listan av partitioner att %-.64s" ER_DROP_LAST_PARTITION eng "Cannot remove all partitions, use DROP TABLE instead" + ger "Es lassen sich nicht smtliche Partitionen lschen, benutzen Sie statt dessen DROP TABLE" swe "Det r inte tilltet att ta bort alla partitioner, anvnd DROP TABLE istllet" ER_COALESCE_ONLY_ON_HASH_PARTITION eng "COALESCE PARTITION can only be used on HASH/KEY partitions" + ger "COALESCE PARTITION kann nur auf HASH- oder KEY-Partitionen benutzt werden" swe "COALESCE PARTITION kan bara anvndas p HASH/KEY partitioner" ER_REORG_HASH_ONLY_ON_SAME_NO eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers" + ger "REORGANIZE PARTITION kann nur zur Reorganisation von Partitionen verwendet werden, nicht, um ihre Nummern zu ndern" swe "REORGANISE PARTITION kan bara anvndas fr att omorganisera partitioner, inte fr att ndra deras antal" ER_REORG_NO_PARAM_ERROR eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs" + ger "REORGANIZE PARTITION ohne Parameter kann nur fr auto-partitionierte Tabellen verwendet werden, die HASH-Partitionierung benutzen" swe "REORGANISE PARTITION utan parametrar kan bara anvndas p auto-partitionerade tabeller som anvnder HASH partitionering" ER_ONLY_ON_RANGE_LIST_PARTITION eng "%-.64s PARTITION can only be used on RANGE/LIST partitions" + ger "%-.64s PARTITION kann nur fr RANGE- oder LIST-Partitionen verwendet werden" swe "%-.64s PARTITION kan bara anvndas p RANGE/LIST-partitioner" ER_ADD_PARTITION_SUBPART_ERROR eng "Trying to Add partition(s) with wrong number of subpartitions" + ger "Es wurde versucht, eine oder mehrere Partitionen mit der falschen Anzahl von Unterpartitionen hinzuzufgen" swe "ADD PARTITION med fel antal subpartitioner" ER_ADD_PARTITION_NO_NEW_PARTITION eng "At least one partition must be added" + ger "Es muss zumindest eine Partition hinzugefgt werden" swe "tminstone en partition mste lggas till vid ADD PARTITION" ER_COALESCE_PARTITION_NO_PARTITION eng "At least one partition must be coalesced" + ger "Zumindest eine Partition muss mit COALESCE PARTITION zusammengefgt werden" swe "tminstone en partition mste sls ihop vid COALESCE PARTITION" ER_REORG_PARTITION_NOT_EXIST eng "More partitions to reorganise than there are partitions" + ger "Es wurde versucht, mehr Partitionen als vorhanden zu reorganisieren" swe "Fler partitioner att reorganisera n det finns partitioner" ER_SAME_NAME_PARTITION eng "Duplicate partition name %-.64s" + ger "Doppelter Partitionsname: %-.64s" swe "Duplicerat partitionsnamn %-.64s" ER_NO_BINLOG_ERROR eng "It is not allowed to shut off binlog on this command" + ger "Es es nicht erlaubt, bei diesem Befehl binlog abzuschalten" swe "Det r inte tilltet att stnga av binlog p detta kommando" ER_CONSECUTIVE_REORG_PARTITIONS eng "When reorganising a set of partitions they must be in consecutive order" + ger "Bei der Reorganisation eines Satzes von Partitionen mssen diese in geordneter Reihenfolge vorliegen" swe "Nr ett antal partitioner omorganiseras mste de vara i konsekutiv ordning" ER_REORG_OUTSIDE_RANGE eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range" + ger "Die Reorganisation von RANGE-Partitionen kann Gesamtbereiche nicht verndern, mit Ausnahme der letzten Partition, die den Bereich erweitern kann" swe "Reorganisering av rangepartitioner kan inte ndra den totala intervallet utom fr den sista partitionen dr intervallet kan utkas" ER_PARTITION_FUNCTION_FAILURE eng "Partition function not supported in this version for this handler" + ger "Partitionsfunktion in dieser Version dieses Handlers nicht untersttzt" ER_PART_STATE_ERROR eng "Partition state cannot be defined from CREATE/ALTER TABLE" + ger "Partitionszustand kann nicht von CREATE oder ALTER TABLE aus definiert werden" swe "Partition state kan inte definieras frn CREATE/ALTER TABLE" ER_LIMITED_PART_RANGE eng "The %-.64s handler only supports 32 bit integers in VALUES" + ger "Der Handler %-.64s untersttzt in VALUES nur 32-Bit-Integers" swe "%-.64s stdjer endast 32 bitar i integers i VALUES" ER_PLUGIN_IS_NOT_LOADED - eng "Plugin '%-.64s' is not loaded" + eng "Plugin '%-.64s' is not loaded" + ger "Plugin '%-.64s' ist nicht geladen" ER_WRONG_VALUE - eng "Incorrect %-.32s value: '%-.128s'" + eng "Incorrect %-.32s value: '%-.128s'" + ger "Falscher %-.32s-Wert: '%-.128s'" ER_NO_PARTITION_FOR_GIVEN_VALUE - eng "Table has no partition for value %-.64s" + eng "Table has no partition for value %-.64s" + ger "Tabelle hat fr den Wert %-.64s keine Partition" ER_FILEGROUP_OPTION_ONLY_ONCE eng "It is not allowed to specify %s more than once" + ger "%s darf nicht mehr als einmal angegegeben werden" ER_CREATE_FILEGROUP_FAILED eng "Failed to create %s" + ger "Anlegen von %s fehlgeschlagen" ER_DROP_FILEGROUP_FAILED eng "Failed to drop %s" + ger "Lschen (drop) von %s fehlgeschlagen" ER_TABLESPACE_AUTO_EXTEND_ERROR eng "The handler doesn't support autoextend of tablespaces" + ger "Der Handler untersttzt keine automatische Erweiterung (Autoextend) von Tablespaces" ER_WRONG_SIZE_NUMBER eng "A size parameter was incorrectly specified, either number or on the form 10M" + ger "Ein Gren-Parameter wurde unkorrekt angegeben, muss entweder Zahl sein oder im Format 10M" ER_SIZE_OVERFLOW_ERROR eng "The size number was correct but we don't allow the digit part to be more than 2 billion" + ger "Die Zahl fr die Gre war korrekt, aber der Zahlanteil darf nicht grer als 2 Milliarden sein" ER_ALTER_FILEGROUP_FAILED eng "Failed to alter: %s" + ger "nderung von %s fehlgeschlagen" ER_BINLOG_ROW_LOGGING_FAILED - eng "Writing one row to the row-based binary log failed" + eng "Writing one row to the row-based binary log failed" + ger "Schreiben einer Zeilen ins zeilenbasierte Binrlog fehlgeschlagen" ER_BINLOG_ROW_WRONG_TABLE_DEF - eng "Table definition on master and slave does not match" + eng "Table definition on master and slave does not match" + ger "Tabellendefinition auf Master und Slave stimmt nicht berein" ER_BINLOG_ROW_RBR_TO_SBR - eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events" + eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events" + ger "Slave, die mit --log-slave-updates laufen, mssen zeilenbasiertes Loggen verwenden, um zeilenbasierte Binrlog-Ereignisse loggen zu knnen" ER_EVENT_ALREADY_EXISTS eng "Event '%-.64s' already exists" + ger "Event '%-.64s' existiert bereits" ER_EVENT_STORE_FAILED eng "Failed to store event %s. Error code %d from storage engine." + ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %d" ER_EVENT_DOES_NOT_EXIST eng "Unknown event '%-.64s'" + ger "Unbekanntes Event '%-.64s'" ER_EVENT_CANT_ALTER eng "Failed to alter event '%-.64s'" + ger "ndern des Events '%-.64s' fehlgeschlagen" ER_EVENT_DROP_FAILED eng "Failed to drop %s" + ger "Lschen von %s fehlgeschlagen" ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG eng "INTERVAL is either not positive or too big" + ger "INTERVAL ist entweder nicht positiv oder zu gro" ER_EVENT_ENDS_BEFORE_STARTS eng "ENDS is either invalid or before STARTS" + ger "ENDS ist entweder ungltig oder liegt vor STARTS" ER_EVENT_EXEC_TIME_IN_THE_PAST eng "Activation (AT) time is in the past" + ger "Aktivierungszeit (AT) liegt in der Vergangenheit" ER_EVENT_OPEN_TABLE_FAILED eng "Failed to open mysql.event" + ger "ffnen von mysql.event fehlgeschlagen" ER_EVENT_NEITHER_M_EXPR_NOR_M_AT eng "No datetime expression provided" + ger "Kein DATETIME-Ausdruck angegeben" ER_COL_COUNT_DOESNT_MATCH_CORRUPTED eng "Column count of mysql.%s is wrong. Expected %d, found %d. Table probably corrupted" + ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschdigt" ER_CANNOT_LOAD_FROM_TABLE eng "Cannot load from mysql.%s. Table probably corrupted. See error log." + ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschdigt, siehe Fehlerlog" ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" + ger "Lschen des Events aus mysql.event fehlgeschlagen" ER_EVENT_COMPILE_ERROR eng "Error during compilation of event's body" + ger "Fehler beim Kompilieren des Event-Bodys" ER_EVENT_SAME_NAME eng "Same old and new event name" + ger "Alter und neuer Event-Name sind gleich" ER_EVENT_DATA_TOO_LONG eng "Data for column '%s' too long" + ger "Daten der Spalte '%s' zu lang" ER_DROP_INDEX_FK eng "Cannot drop index '%-.64s': needed in a foreign key constraint" ger "Kann Index '%-.64s' nicht lschen: wird fr einen Fremdschlssel bentigt" ER_WARN_DEPRECATED_SYNTAX - eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead." + eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead" + ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt. Bitte benutzen Sie statt dessen %s" ER_CANT_WRITE_LOCK_LOG_TABLE - eng "You can't write-lock a log table. Only read access is possible." + eng "You can't write-lock a log table. Only read access is possible" + ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff mglich" ER_CANT_READ_LOCK_LOG_TABLE - eng "You can't use usual read lock with log tables. Try READ LOCAL instead." + eng "You can't use usual read lock with log tables. Try READ LOCAL instead" + ger "Log-Tabellen knnen nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL" ER_FOREIGN_DUPLICATE_KEY 23000 S1009 - eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry" + eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry" + ger "Aufrechterhalten der Fremdschlssel-Constraints fr Tabelle '%.64s', Eintrag '%-.64s', Schlssel %d wrde zu einem doppelten Eintrag fhren" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use scripts/mysql_fix_privilege_tables" + ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie scripts/mysql_fix_privilege_tables, um den Fehler zu beheben" ER_REMOVED_SPACES eng "Leading spaces are removed from name '%s'" + ger "Fhrende Leerzeichen werden aus dem Namen '%s' entfernt" ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR - eng "Cannot switch out of the row-based binary log format when the session has open temporary tables" + eng "Cannot switch out of the row-based binary log format when the session has open temporary tables" + ger "Kann nicht aus dem zeilenbasierten Binrlog-Format herauswechseln, wenn die Sitzung offene temporre Tabellen hat" ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT eng "Cannot change the binary logging format inside a stored function or trigger" + ger "Das Binrlog-Format kann innerhalb einer gespeicherten Funktion oder eines Triggers nicht gendert werden" ER_NDB_CANT_SWITCH_BINLOG_FORMAT - eng "The NDB cluster engine does not support changing the binlog format on the fly yet" + eng "The NDB cluster engine does not support changing the binlog format on the fly yet" + ger "Die Speicher-Engine NDB Cluster untersttzt das ndern des Binrlog-Formats zur Laufzeit noch nicht" ER_PARTITION_NO_TEMPORARY - eng "Cannot create temporary table with partitions" + eng "Cannot create temporary table with partitions" + ger "Anlegen temporrer Tabellen mit Partitionen nicht mglich" ER_PARTITION_CONST_DOMAIN_ERROR eng "Partition constant is out of partition function domain" + ger "Partitionskonstante liegt auerhalb der Partitionsfunktionsdomne" swe "Partitionskonstanten r utanfr partitioneringsfunktionens domn" ER_PARTITION_FUNCTION_IS_NOT_ALLOWED eng "This partition function is not allowed" + ger "Diese Partitionierungsfunktion ist nicht erlaubt" swe "Denna partitioneringsfunktion r inte tillten" ER_DDL_LOG_ERROR eng "Error in DDL log" + ger "Fehler im DDL-Log" ER_NULL_IN_VALUES_LESS_THAN eng "Not allowed to use NULL value in VALUES LESS THAN" + ger "In VALUES LESS THAN drfen keine NULL-Werte verwendet werden" swe "Det r inte tilltet att anvnda NULL-vrden i VALUES LESS THAN" ER_WRONG_PARTITION_NAME eng "Incorrect partition name" + ger "Falscher Partitionsname" swe "Felaktigt partitionsnamn" ER_CANT_CHANGE_TX_ISOLATION 25001 - eng "Transaction isolation level can't be changed while a transaction is in progress" + eng "Transaction isolation level can't be changed while a transaction is in progress" + ger "Transaktionsisolationsebene kann whrend einer laufenden Transaktion nicht gendert werden" ER_DUP_ENTRY_AUTOINCREMENT_CASE eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.64s' for key '%-.64s'" + ger "ALTER TABLE fhrt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.64s' fr Schlssel '%-.64s' auftritt" ER_EVENT_MODIFY_QUEUE_ERROR eng "Internal scheduler error %d" + ger "Interner Scheduler-Fehler %d" ER_EVENT_SET_VAR_ERROR eng "Error during starting/stopping of the scheduler. Error code %u" + ger "Fehler whrend des Startens oder Anhalten des Schedulers. Fehlercode %u" ER_PARTITION_MERGE_ERROR eng "%s handler cannot be used in partitioned tables" + ger "%s-Handler kann in partitionierten Tabellen nicht verwendet werden" swe "%s kan inte anvndas i en partitionerad tabell" ER_CANT_ACTIVATE_LOG - eng "Cannot activate '%-.64s' log." + eng "Cannot activate '%-.64s' log" + ger "Kann Logdatei '%-.64s' nicht aktivieren" ER_RBR_NOT_AVAILABLE eng "The server was not built with row-based replication" + ger "Der Server hat keine zeilenbasierte Replikation" ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA - eng "Triggers can not be created on system tables" + eng "Triggers can not be created on system tables" + ger "Trigger knnen nicht auf Systemtabellen erzeugt werden" ER_CANT_ALTER_LOG_TABLE eng "You can't alter a log table if logging is enabled" ER_BAD_LOG_ENGINE diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc index f580bcb16d9..b3c67ab5db7 100644 --- a/sql/sql_manager.cc +++ b/sql/sql_manager.cc @@ -23,7 +23,6 @@ */ #include "mysql_priv.h" -#include "sql_manager.h" ulong volatile manager_status; bool volatile manager_thread_in_use; diff --git a/sql/sql_manager.h b/sql/sql_manager.h deleted file mode 100644 index d42deb8ff81..00000000000 --- a/sql/sql_manager.h +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifdef WITH_BERKELEY_STORAGE_ENGINE -void berkeley_cleanup_log_files(void); -#endif /* WITH_BERKELEY_STORAGE_ENGINE */ diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index ba4c652efb7..eb3d0d40817 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -146,7 +146,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token BEFORE_SYM %token BEGIN_SYM %token BENCHMARK_SYM -%token BERKELEY_DB_SYM %token BIGINT %token BINARY %token BINLOG_SYM @@ -8354,30 +8353,6 @@ show_param: if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS)) YYABORT; } - | BERKELEY_DB_SYM LOGS_SYM - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; - if (!(lex->create_info.db_type= - ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB))) - { - my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB"); - YYABORT; - } - WARN_DEPRECATED(yythd, "5.2", "SHOW BDB LOGS", "'SHOW ENGINE BDB LOGS'"); - } - | LOGS_SYM - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; - if (!(lex->create_info.db_type= - ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB))) - { - my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB"); - YYABORT; - } - WARN_DEPRECATED(yythd, "5.2", "SHOW LOGS", "'SHOW ENGINE BDB LOGS'"); - } | GRANTS { LEX *lex=Lex; @@ -9408,7 +9383,6 @@ keyword_sp: | AUTOEXTEND_SIZE_SYM {} | AVG_ROW_LENGTH {} | AVG_SYM {} - | BERKELEY_DB_SYM {} | BINLOG_SYM {} | BIT_SYM {} | BOOL_SYM {} diff --git a/storage/bdb/CMakeLists.txt b/storage/bdb/CMakeLists.txt deleted file mode 100644 index c27665d902c..00000000000 --- a/storage/bdb/CMakeLists.txt +++ /dev/null @@ -1,67 +0,0 @@ -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/bdb/build_win32 - ${CMAKE_SOURCE_DIR}/storage/bdb/dbinc - ${CMAKE_SOURCE_DIR}/storage/bdb) - -# BDB needs a number of source files that are auto-generated by the unix -# configure. So to build BDB, it is necessary to copy these over to the Windows -# bitkeeper tree, or to use a source .tar.gz package which already has these -# files. -ADD_LIBRARY(bdb crypto/aes_method.c btree/bt_compact.c btree/bt_compare.c - btree/bt_conv.c btree/bt_curadj.c btree/bt_cursor.c - btree/bt_delete.c btree/bt_method.c btree/bt_open.c btree/bt_put.c - btree/bt_rec.c btree/bt_reclaim.c btree/bt_recno.c - btree/bt_rsearch.c btree/bt_search.c btree/bt_split.c - btree/bt_stat.c btree/bt_upgrade.c btree/bt_verify.c - btree/btree_auto.c db/crdel_auto.c db/crdel_rec.c crypto/crypto.c - db/db.c db/db_am.c db/db_auto.c common/db_byteorder.c db/db_cam.c - common/db_clock.c db/db_conv.c db/db_dispatch.c db/db_dup.c - common/db_err.c common/db_getlong.c common/db_idspace.c - db/db_iface.c db/db_join.c common/db_log2.c db/db_meta.c - db/db_method.c db/db_open.c db/db_overflow.c db/db_ovfl_vrfy.c - db/db_pr.c db/db_rec.c db/db_reclaim.c db/db_remove.c - db/db_rename.c db/db_ret.c env/db_salloc.c db/db_setid.c - db/db_setlsn.c env/db_shash.c db/db_stati.c db/db_truncate.c - db/db_upg.c db/db_upg_opd.c db/db_vrfy.c db/db_vrfyutil.c - dbm/dbm.c dbreg/dbreg.c dbreg/dbreg_auto.c dbreg/dbreg_rec.c - dbreg/dbreg_stat.c dbreg/dbreg_util.c env/env_failchk.c - env/env_file.c env/env_method.c env/env_open.c env/env_recover.c - env/env_region.c env/env_register.c env/env_stat.c - fileops/fileops_auto.c fileops/fop_basic.c fileops/fop_rec.c - fileops/fop_util.c hash/hash.c hash/hash_auto.c hash/hash_conv.c - hash/hash_dup.c hash/hash_func.c hash/hash_meta.c - hash/hash_method.c hash/hash_open.c hash/hash_page.c - hash/hash_rec.c hash/hash_reclaim.c hash/hash_stat.c - hash/hash_upgrade.c hash/hash_verify.c hmac/hmac.c - hsearch/hsearch.c lock/lock.c lock/lock_deadlock.c - lock/lock_failchk.c lock/lock_id.c lock/lock_list.c - lock/lock_method.c lock/lock_region.c lock/lock_stat.c - lock/lock_timer.c lock/lock_util.c log/log.c log/log_archive.c - log/log_compare.c log/log_debug.c log/log_get.c log/log_method.c - log/log_put.c log/log_stat.c mp/mp_alloc.c mp/mp_bh.c mp/mp_fget.c - mp/mp_fmethod.c mp/mp_fopen.c mp/mp_fput.c mp/mp_fset.c - mp/mp_method.c mp/mp_region.c mp/mp_register.c mp/mp_stat.c - mp/mp_sync.c mp/mp_trickle.c crypto/mersenne/mt19937db.c - mutex/mut_alloc.c mutex/mut_method.c mutex/mut_region.c - mutex/mut_stat.c mutex/mut_tas.c mutex/mut_win32.c - os_win32/os_abs.c os/os_alloc.c os_win32/os_clock.c - os_win32/os_config.c os_win32/os_dir.c os_win32/os_errno.c - os_win32/os_fid.c os_win32/os_flock.c os_win32/os_fsync.c - os_win32/os_handle.c os/os_id.c os_win32/os_map.c os/os_method.c - os/os_oflags.c os_win32/os_open.c os/os_region.c - os_win32/os_rename.c os/os_root.c os/os_rpath.c os_win32/os_rw.c - os_win32/os_seek.c os_win32/os_sleep.c os_win32/os_spin.c - os_win32/os_stat.c os/os_tmpdir.c os_win32/os_truncate.c - os/os_unlink.c qam/qam.c qam/qam_auto.c qam/qam_conv.c - qam/qam_files.c qam/qam_method.c qam/qam_open.c qam/qam_rec.c - qam/qam_stat.c qam/qam_upgrade.c qam/qam_verify.c rep/rep_auto.c - rep/rep_backup.c rep/rep_elect.c rep/rep_log.c rep/rep_method.c - rep/rep_record.c rep/rep_region.c rep/rep_stat.c rep/rep_stub.c - rep/rep_util.c rep/rep_verify.c crypto/rijndael/rijndael-alg-fst.c - crypto/rijndael/rijndael-api-fst.c hmac/sha1.c clib/strcasecmp.c - txn/txn.c txn/txn_auto.c txn/txn_chkpt.c txn/txn_failchk.c - txn/txn_method.c txn/txn_rec.c txn/txn_recover.c txn/txn_region.c - txn/txn_stat.c txn/txn_util.c common/util_log.c common/util_sig.c - xa/xa.c xa/xa_db.c xa/xa_map.c) diff --git a/storage/bdb/LICENSE b/storage/bdb/LICENSE deleted file mode 100644 index 87fa4cc6d1b..00000000000 --- a/storage/bdb/LICENSE +++ /dev/null @@ -1,102 +0,0 @@ -/*- - * $Id: LICENSE,v 12.1 2005/06/16 20:20:10 bostic Exp $ - */ - -The following is the license that applies to this copy of the Berkeley DB -software. For a license to use the Berkeley DB software under conditions -other than those described here, or to purchase support for this software, -please contact Sleepycat Software by email at info@sleepycat.com, or on -the Web at http://www.sleepycat.com. - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -/* - * Copyright (c) 1990-2005 - * Sleepycat Software. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Redistributions in any form must be accompanied by information on - * how to obtain complete source code for the DB software and any - * accompanying software that uses the DB software. The source code - * must either be included in the distribution or be available for no - * more than the cost of distribution plus a nominal fee, and must be - * freely redistributable under reasonable conditions. For an - * executable file, complete source code means the source code for all - * modules it contains. It does not include source code for modules or - * files that typically accompany the major components of the operating - * system on which the executable file runs. - * - * THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR - * NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/* - * Copyright (c) 1995, 1996 - * The President and Fellows of Harvard University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ diff --git a/storage/bdb/Makefile.in b/storage/bdb/Makefile.in deleted file mode 100644 index 6d7da66edf4..00000000000 --- a/storage/bdb/Makefile.in +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -# Adaptor makefile to translate between what automake expects and what -# BDB provides (or vice versa). - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ - -# distdir and top_distdir are set by the calling Makefile - -bdb_build = build_unix -files = LICENSE Makefile Makefile.in README CMakeLists.txt -subdirs = btree build_win32 clib common cxx db dbinc \ - dbinc_auto db185 db_archive db_checkpoint db_deadlock db_dump \ - db_dump185 db_hotbackup db_load db_printlog db_recover db_stat db_upgrade \ - db_verify dbm dbreg dist env fileops hash \ - hsearch hmac include lock log mp mutex os \ - os_win32 qam rep txn xa sequence crypto - -@SET_MAKE@ - -all: - cd $(bdb_build) && $(MAKE) all - -clean: - cd $(bdb_build) && $(MAKE) clean - -distclean: - cd $(bdb_build) && $(MAKE) distclean - -# May want to fix this, and MYSQL/configure, to install things -install dvi check installcheck: - -distdir: - for s in $(subdirs); do \ - cp -pr $(srcdir)/$$s $(distdir)/$$s; \ - done - for f in $(files); do \ - test -f $(distdir)/$$f || cp -p $(srcdir)/$$f $(distdir)/$$f; \ - done - mkdir $(distdir)/$(bdb_build) - cp -p $(srcdir)/$(bdb_build)/.IGNORE_ME $(distdir)/$(bdb_build) diff --git a/storage/bdb/btree/bt_compact.c b/storage/bdb/btree/bt_compact.c deleted file mode 100644 index 3cc04b9aa19..00000000000 --- a/storage/bdb/btree/bt_compact.c +++ /dev/null @@ -1,2348 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_compact.c,v 12.34 2005/11/10 21:07:48 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -static int __bam_compact_dups __P((DBC *, - PAGE *, u_int32_t, int, DB_COMPACT *, int *)); -static int __bam_compact_int __P((DBC *, - DBT *, DBT *, u_int32_t, int *, DB_COMPACT *, int *)); -static int __bam_csearch __P((DBC *, DBT *, u_int32_t, int)); -static int __bam_merge __P((DBC *, - DBC *, u_int32_t, DBT *, DB_COMPACT *,int *)); -static int __bam_merge_internal __P((DBC *, DBC *, int, DB_COMPACT *, int *)); -static int __bam_merge_pages __P((DBC *, DBC *, DB_COMPACT *)); -static int __bam_merge_records __P((DBC *, DBC*, u_int32_t, DB_COMPACT *)); -static int __bam_truncate_internal_overflow __P((DBC *, PAGE *, DB_COMPACT *)); -static int __bam_truncate_overflow __P((DBC *, - db_pgno_t, db_pgno_t, DB_COMPACT *)); -static int __bam_truncate_page __P((DBC *, PAGE **, int)); -static int __bam_truncate_root_page __P((DBC *, - PAGE *, u_int32_t, DB_COMPACT *)); - -#ifdef HAVE_FTRUNCATE -static int __bam_free_freelist __P((DB *, DB_TXN *)); -static int __bam_savekey __P((DBC *, int, DBT *)); -static int __bam_setup_freelist __P((DB *, struct pglist *, u_int32_t)); -static int __bam_truncate_internal __P((DB *, DB_TXN *, DB_COMPACT *)); -#endif - -#define SAVE_START \ - do { \ - save_data = *c_data; \ - ret = __db_retcopy(dbenv, \ - &save_start, end->data, end->size, \ - &save_start.data, &save_start.ulen); \ - } while (0) - -/* - * Only restore those things that are negated by aborting the - * transaction. We don't restore the number of deadlocks, for example. - */ - -#define RESTORE_START \ - do { \ - c_data->compact_pages_free = \ - save_data.compact_pages_free; \ - c_data->compact_levels = save_data.compact_levels; \ - c_data->compact_truncate = save_data.compact_truncate; \ - ret = __db_retcopy(dbenv, end, \ - save_start.data, save_start.size, \ - &end->data, &end->ulen); \ - } while (0) -/* - * __bam_compact -- compact a btree. - * - * PUBLIC: int __bam_compact __P((DB *, DB_TXN *, - * PUBLIC: DBT *, DBT *, DB_COMPACT *, u_int32_t, DBT *)); - */ -int -__bam_compact(dbp, txn, start, stop, c_data, flags, end) - DB *dbp; - DB_TXN *txn; - DBT *start, *stop; - DB_COMPACT *c_data; - u_int32_t flags; - DBT *end; -{ - DBT current, save_start; - DBC *dbc; - DB_COMPACT save_data; - DB_ENV *dbenv; - db_pgno_t last_pgno; - struct pglist *list; - u_int32_t factor, nelems, truncated; - int deadlock, done, ret, span, t_ret, txn_local; - - dbenv = dbp->dbenv; - - memset(¤t, 0, sizeof(current)); - memset(&save_start, 0, sizeof(save_start)); - dbc = NULL; - deadlock = 0; - done = 0; - factor = 0; - ret = 0; - span = 0; - truncated = 0; - last_pgno = 0; - - /* - * We pass "end" to the internal routine, indicating where - * that routine should begin its work and expecting that it - * will return to us the last key that it processed. - */ - if (end == NULL) - end = ¤t; - if (start != NULL && (ret = __db_retcopy(dbenv, - end, start->data, start->size, &end->data, &end->ulen)) != 0) - return (ret); - - list = NULL; - nelems = 0; - - if (IS_DB_AUTO_COMMIT(dbp, txn)) - txn_local = 1; - else - txn_local = 0; - if (!LF_ISSET(DB_FREE_SPACE | DB_FREELIST_ONLY)) - goto no_free; - if (LF_ISSET(DB_FREELIST_ONLY)) - LF_SET(DB_FREE_SPACE); - -#ifdef HAVE_FTRUNCATE - /* Sort the freelist and set up the in-memory list representation. */ - if (txn_local && (ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - - if ((ret = __db_free_truncate(dbp, - txn, flags, c_data, &list, &nelems, &last_pgno)) != 0) { - LF_CLR(DB_FREE_SPACE); - goto terr; - } - - /* If the freelist is empty and we are not filling, get out. */ - if (nelems == 0 && LF_ISSET(DB_FREELIST_ONLY)) { - ret = 0; - LF_CLR(DB_FREE_SPACE); - goto terr; - } - if ((ret = __bam_setup_freelist(dbp, list, nelems)) != 0) { - /* Someone else owns the free list. */ - if (ret == EBUSY) - ret = 0; - } - - /* Commit the txn and release the meta page lock. */ -terr: if (txn_local) { - if ((t_ret = __txn_commit(txn, DB_TXN_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - txn = NULL; - } - if (ret != 0) - goto err; - - /* Save the number truncated so far, we will add what we get below. */ - truncated = c_data->compact_pages_truncated; - if (LF_ISSET(DB_FREELIST_ONLY)) - goto done; -#endif - - /* - * We want factor to be the target number of free bytes on each page, - * so we know when to stop adding items to a page. Make sure to - * subtract the page overhead when computing this target. This can - * result in a 1-2% error on the smallest page. - * First figure out how many bytes we should use: - */ -no_free: - factor = dbp->pgsize - SIZEOF_PAGE; - if (c_data->compact_fillpercent != 0) { - factor *= c_data->compact_fillpercent; - factor /= 100; - } - /* Now convert to the number of free bytes to target. */ - factor = (dbp->pgsize - SIZEOF_PAGE) - factor; - - if (c_data->compact_pages == 0) - c_data->compact_pages = DB_MAX_PAGES; - - do { - deadlock = 0; - - SAVE_START; - if (ret != 0) - break; - - if (txn_local) { - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - break; - - if (c_data->compact_timeout != 0 && - (ret = __txn_set_timeout(txn, - c_data->compact_timeout, DB_SET_LOCK_TIMEOUT)) != 0) - goto err; - } - - if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) - goto err; - - if ((ret = __bam_compact_int(dbc, end, stop, factor, - &span, c_data, &done)) == DB_LOCK_DEADLOCK && txn_local) { - /* - * We retry on deadlock. Cancel the statistics - * and reset the start point to before this - * iteration. - */ - deadlock = 1; - c_data->compact_deadlock++; - RESTORE_START; - } - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - -err: if (txn_local && txn != NULL) { - if (ret == 0 && deadlock == 0) - ret = __txn_commit(txn, DB_TXN_NOSYNC); - else if ((t_ret = __txn_abort(txn)) != 0 && ret == 0) - ret = t_ret; - txn = NULL; - } - } while (ret == 0 && !done); - - if (current.data != NULL) - __os_free(dbenv, current.data); - if (save_start.data != NULL) - __os_free(dbenv, save_start.data); - -#ifdef HAVE_FTRUNCATE - /* - * Finish up truncation work. If there are pages left in the free - * list then search the internal nodes of the tree as we may have - * missed some while walking the leaf nodes. Then calculate how - * many pages we have truncated and release the in-memory free list. - */ -done: if (LF_ISSET(DB_FREE_SPACE)) { - DBMETA *meta; - db_pgno_t pgno; - - pgno = PGNO_BASE_MD; - done = 1; - if (ret == 0 && !LF_ISSET(DB_FREELIST_ONLY) && - (t_ret = __memp_fget(dbp->mpf, &pgno, 0, &meta)) == 0) { - done = meta->free == PGNO_INVALID; - ret = __memp_fput(dbp->mpf, meta, 0); - } - - if (!done) - ret = __bam_truncate_internal(dbp, txn, c_data); - - /* Clean up the free list. */ - if (list != NULL) - __os_free(dbenv, list); - - if ((t_ret = __memp_fget(dbp->mpf, &pgno, 0, &meta)) == 0) { - c_data->compact_pages_truncated = - truncated + last_pgno - meta->last_pgno; - if ((t_ret = - __memp_fput(dbp->mpf, meta, 0)) != 0 && ret == 0) - ret = t_ret; - } else if (ret == 0) - ret = t_ret; - - if ((t_ret = __bam_free_freelist(dbp, txn)) != 0 && ret == 0) - t_ret = ret; - } -#endif - - return (ret); -} - -/* - * __bam_csearch -- isolate search code for bam_compact. - * This routine hides the differences between searching - * a BTREE and a RECNO from the rest of the code. - */ -#define CS_READ 0 /* We are just reading. */ -#define CS_PARENT 1 /* We want the parent too, write lock. */ -#define CS_NEXT 2 /* Get the next page. */ -#define CS_NEXT_WRITE 3 /* Get the next page and write lock. */ -#define CS_DEL 4 /* Get a stack to delete a page. */ -#define CS_START 5 /* Starting level for stack, write lock. */ -#define CS_GETRECNO 0x80 /* Extract record number from start. */ - -static int -__bam_csearch(dbc, start, sflag, level) - DBC *dbc; - DBT *start; - u_int32_t sflag; - int level; -{ - BTREE_CURSOR *cp; - int not_used, ret; - - cp = (BTREE_CURSOR *)dbc->internal; - - if (dbc->dbtype == DB_RECNO) { - /* If GETRECNO is not set the cp->recno is what we want. */ - if (FLD_ISSET(sflag, CS_GETRECNO)) { - if (start == NULL || start->size == 0) - cp->recno = 1; - else if ((ret = - __ram_getno(dbc, start, &cp->recno, 0)) != 0) - return (ret); - FLD_CLR(sflag, CS_GETRECNO); - } - switch (sflag) { - case CS_READ: - sflag = S_READ; - break; - case CS_NEXT: - sflag = S_PARENT | S_READ; - break; - case CS_START: - level = LEAFLEVEL; - /* FALLTHROUGH */ - case CS_DEL: - case CS_NEXT_WRITE: - sflag = S_STACK; - break; - case CS_PARENT: - sflag = S_PARENT | S_WRITE; - break; - default: - return (__db_panic(dbc->dbp->dbenv, EINVAL)); - } - if ((ret = __bam_rsearch(dbc, - &cp->recno, sflag, level, ¬_used)) != 0) - return (ret); - /* Reset the cursor's recno to the beginning of the page. */ - cp->recno -= cp->csp->indx; - } else { - FLD_CLR(sflag, CS_GETRECNO); - switch (sflag) { - case CS_READ: - sflag = S_READ | S_DUPFIRST; - break; - case CS_DEL: - sflag = S_DEL; - break; - case CS_NEXT: - sflag = S_NEXT; - break; - case CS_NEXT_WRITE: - sflag = S_NEXT | S_WRITE; - break; - case CS_START: - sflag = S_START | S_WRITE; - break; - case CS_PARENT: - sflag = S_PARENT | S_WRITE; - break; - default: - return (__db_panic(dbc->dbp->dbenv, EINVAL)); - } - if (start == NULL || start->size == 0) - FLD_SET(sflag, S_MIN); - - if ((ret = __bam_search(dbc, - cp->root, start, sflag, level, NULL, ¬_used)) != 0) - return (ret); - } - - return (0); -} - -/* - * __bam_compact_int -- internal compaction routine. - * Called either with a cursor on the main database - * or a cursor initialized to the root of an off page duplicate - * tree. - */ -static int -__bam_compact_int(dbc, start, stop, factor, spanp, c_data, donep) - DBC *dbc; - DBT *start, *stop; - u_int32_t factor; - int *spanp; - DB_COMPACT *c_data; - int *donep; -{ - BTREE_CURSOR *cp, *ncp; - DB *dbp; - DBC *ndbc; - DB_ENV *dbenv; - DB_LOCK nolock; - EPG *epg; - DB_MPOOLFILE *dbmp; - PAGE *pg, *ppg, *npg; - db_pgno_t npgno; - db_recno_t next_recno; - u_int32_t sflag; - int check_dups, check_trunc, done, level; - int merged, nentry, next_page, pgs_done, ret, t_ret, tdone; -#ifdef DEBUG - DBT trace; - char buf[256]; -#define CTRACE(dbc, location, t, start, f) do { \ - trace.data = t; \ - trace.size = (u_int32_t)strlen(t); \ - DEBUG_LWRITE(dbc, dbc->txn, location, &trace, start, f) \ - } while (0) -#define PTRACE(dbc, location, p, start, f) do { \ - (void)sprintf(buf, "pgno: %lu", (u_long)p); \ - CTRACE(dbc, location, buf, start, f); \ - } while (0) -#else -#define CTRACE(dbc, location, t, start, f) -#define PTRACE(dbc, location, p, start, f) -#endif - - ndbc = NULL; - pg = NULL; - npg = NULL; - done = 0; - tdone = 0; - pgs_done = 0; - next_recno = 0; - next_page = 0; - LOCK_INIT(nolock); - check_trunc = c_data->compact_truncate != PGNO_INVALID; - check_dups = (!F_ISSET(dbc, DBC_OPD) && - F_ISSET(dbc->dbp, DB_AM_DUP)) || check_trunc; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - dbmp = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - /* Search down the tree for the starting point. */ - if ((ret = __bam_csearch(dbc, - start, CS_READ | CS_GETRECNO, LEAFLEVEL)) != 0) { - /* Its not an error to compact an empty db. */ - if (ret == DB_NOTFOUND) - ret = 0; - goto err; - } - - /* - * Get the first leaf page. The loop below will change pg so - * we clear the stack reference so we don't put a a page twice. - */ - pg = cp->csp->page; - cp->csp->page = NULL; - next_recno = cp->recno; -next: /* - * This is the start of the main compaction loop. There are 3 - * parts to the process: - * 1) Walk the leaf pages of the tree looking for a page to - * process. We do this with read locks. Save the - * key from the page and release it. - * 2) Set up a cursor stack which will write lock the page - * and enough of its ancestors to get the job done. - * This could go to the root if we might delete a subtree - * or we have record numbers to update. - * 3) Loop fetching pages after the above page and move enough - * data to fill it. - * We exit the loop if we are at the end of the leaf pages, are - * about to lock a new subtree (we span) or on error. - */ - - /* Walk the pages looking for something to fill up. */ - while ((npgno = NEXT_PGNO(pg)) != PGNO_INVALID) { - c_data->compact_pages_examine++; - PTRACE(dbc, "Next", PGNO(pg), start, 0); - - /* If we have fetched the next page, get the new key. */ - if (next_page == 1 && - dbc->dbtype != DB_RECNO && NUM_ENT(pg) != 0) { - if ((ret = __db_ret(dbp, pg, - 0, start, &start->data, &start->ulen)) != 0) - goto err; - } - next_recno += NUM_ENT(pg); - if (P_FREESPACE(dbp, pg) > factor || - (check_trunc && PGNO(pg) > c_data->compact_truncate)) - break; - /* - * The page does not need more data or to be swapped, - * check to see if we want to look at possible duplicate - * trees or overflow records and the move on to the next page. - */ - cp->recno += NUM_ENT(pg); - next_page = 1; - tdone = pgs_done; - PTRACE(dbc, "Dups", PGNO(pg), start, 0); - if (check_dups && (ret = __bam_compact_dups( - dbc, pg, factor, 0, c_data, &pgs_done)) != 0) - goto err; - npgno = NEXT_PGNO(pg); - if ((ret = __memp_fput(dbmp, pg, 0)) != 0) - goto err; - pg = NULL; - /* - * If we don't do anything we don't need to hold - * the lock on the previous page, so couple always. - */ - if ((ret = __db_lget(dbc, - tdone == pgs_done ? LCK_COUPLE_ALWAYS : LCK_COUPLE, - npgno, DB_LOCK_READ, 0, &cp->csp->lock)) != 0) - goto err; - if ((ret = __memp_fget(dbmp, &npgno, 0, &pg)) != 0) - goto err; - } - - /* - * When we get here we have 3 cases: - * 1) We've reached the end of the leaf linked list and are done. - * 2) A page whose freespace exceeds our target and therefore needs - * to have data added to it. - * 3) A page that doesn't have too much free space but needs to be - * checked for truncation. - * In both cases 2 and 3, we need that page's first key or record - * number. We may already have it, if not get it here. - */ - if ((nentry = NUM_ENT(pg)) != 0) { - next_page = 0; - /* Get a copy of the first recno on the page. */ - if (dbc->dbtype == DB_RECNO) { - if ((ret = __db_retcopy(dbp->dbenv, start, - &cp->recno, sizeof(cp->recno), - &start->data, &start->ulen)) != 0) - goto err; - } else if (start->size == 0 && - (ret = __db_ret(dbp, pg, - 0, start, &start->data, &start->ulen)) != 0) - goto err; - - if (npgno == PGNO_INVALID) { - /* End of the tree, check its duplicates and exit. */ - PTRACE(dbc, "GoDone", PGNO(pg), start, 0); - if (check_dups && (ret = - __bam_compact_dups(dbc, - pg, factor, 0, c_data, &pgs_done)) != 0) - goto err; - c_data->compact_pages_examine++; - done = 1; - goto done; - } - } - - /* Release the page so we don't deadlock getting its parent. */ - BT_STK_CLR(cp); - if ((ret = __LPUT(dbc, cp->csp->lock)) != 0) - goto err; - if ((ret = __memp_fput(dbmp, pg, 0)) != 0) - goto err; - pg = NULL; - - /* - * Setup the cursor stack. There are 3 cases: - * 1) the page is empty and will be deleted: nentry == 0. - * 2) the next page has the same parent: *spanp == 0. - * 3) the next page has a different parent: *spanp == 1. - * - * We now need to search the tree again, getting a write lock - * on the page we are going to merge or delete. We do this by - * searching down the tree and locking as much of the subtree - * above the page as needed. In the case of a delete we will - * find the maximal subtree that can be deleted. In the case - * of merge if the current page and the next page are siblings - * with the same parent then we only need to lock the parent. - * Otherwise *span will be set and we need to search to find the - * lowest common ancestor. Dbc will be set to contain the subtree - * containing the page to be merged or deleted. Ndbc will contain - * the minimal subtree containing that page and its next sibling. - * In all cases for DB_RECNO we simplify things and get the whole - * tree if we need more than a single parent. - */ - - /* Case 1 -- page is empty. */ - if (nentry == 0) { - CTRACE(dbc, "Empty", "", start, 0); - if (next_page == 1) - sflag = CS_NEXT_WRITE; - else - sflag = CS_DEL; - if ((ret = __bam_csearch(dbc, start, sflag, LEAFLEVEL)) != 0) - goto err; - - pg = cp->csp->page; - /* Check to see if the page is still empty. */ - if (NUM_ENT(pg) != 0) - npgno = PGNO(pg); - else { - npgno = NEXT_PGNO(pg); - /* If this is now the root, we are very done. */ - if (PGNO(pg) == cp->root) - done = 1; - else { - if ((ret = __bam_dpages(dbc, 0, 0)) != 0) - goto err; - c_data->compact_pages_free++; - goto next_no_release; - } - } - goto next_page; - } - - /* case 3 -- different parents. */ - if (*spanp) { - CTRACE(dbc, "Span", "", start, 0); - if (ndbc == NULL && (ret = __db_c_dup(dbc, &ndbc, 0)) != 0) - goto err; - ncp = (BTREE_CURSOR *)ndbc->internal; - ncp->recno = next_recno; - /* - * Search the tree looking for the next page after the - * current key. For RECNO get the whole stack. - * For BTREE the return will contain the stack that - * dominates both the current and next pages. - */ - if ((ret = __bam_csearch(ndbc, start, CS_NEXT_WRITE, 0)) != 0) - goto err; - - if (dbc->dbtype == DB_RECNO) { - /* - * The record we are looking for may have moved - * to the previous page. This page should - * be at the beginning of its parent. - * If not, then start over. - */ - if (ncp->csp[-1].indx != 0) { - *spanp = 0; - goto deleted; - } - - } - PTRACE(dbc, "SDups", PGNO(ncp->csp->page), start, 0); - if (check_dups && - (ret = __bam_compact_dups(ndbc, - ncp->csp->page, factor, 1, c_data, &pgs_done)) != 0) - goto err; - - /* - * We need the stacks to be the same height - * so that we can merge parents. - */ - level = LEVEL(ncp->sp->page); - sflag = CS_START; - if ((ret = __bam_csearch(dbc, start, sflag, level)) != 0) - goto err; - pg = cp->csp->page; - *spanp = 0; - - /* - * The page may have emptied while we waited for the lock. - * Reset npgno so we re-get this page when we go back to the - * top. - */ - if (NUM_ENT(pg) == 0) { - npgno = PGNO(pg); - goto next_page; - } - if (check_trunc && PGNO(pg) > c_data->compact_truncate) { - pgs_done++; - /* Get a fresh low numbered page. */ - if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) - goto err1; - } - - npgno = NEXT_PGNO(pg); - PTRACE(dbc, "SDups", PGNO(pg), start, 0); - if (check_dups && (ret = - __bam_compact_dups(dbc, pg, - factor, 1, c_data, &pgs_done)) != 0) - goto err1; - - /* - * We may have dropped our locks, check again - * to see if we still need to fill this page and - * we are in a spanning situation. - */ - - if (P_FREESPACE(dbp, pg) <= factor || - cp->csp[-1].indx != NUM_ENT(cp->csp[-1].page) - 1) - goto next_page; - - /* - * Try to move things into a single parent. - */ - merged = 0; - for (epg = cp->sp; epg != cp->csp; epg++) { - if (PGNO(epg->page) == cp->root) - continue; - PTRACE(dbc, "PMerge", PGNO(epg->page), start, 0); - if ((ret = __bam_merge_internal(dbc, - ndbc, LEVEL(epg->page), c_data, &merged)) != 0) - goto err1; - if (merged) - break; - } - - /* If we merged the parent, then we nolonger span. */ - if (merged) { - pgs_done++; - if (cp->csp->page == NULL) - goto deleted; - npgno = PGNO(pg); - goto next_page; - } - PTRACE(dbc, "SMerge", PGNO(cp->csp->page), start, 0); - npgno = NEXT_PGNO(ncp->csp->page); - if ((ret = __bam_merge(dbc, - ndbc, factor, stop, c_data, &done)) != 0) - goto err1; - pgs_done++; - /* - * __bam_merge could have freed our stack if it - * deleted a page possibly collapsing the tree. - */ - if (cp->csp->page == NULL) - goto deleted; - cp->recno += NUM_ENT(pg); - - /* If we did not bump to the next page something did not fit. */ - if (npgno != NEXT_PGNO(pg)) { - npgno = NEXT_PGNO(pg); - goto next_page; - } - } else { - /* Case 2 -- same parents. */ - CTRACE(dbc, "Sib", "", start, 0); - if ((ret = - __bam_csearch(dbc, start, CS_PARENT, LEAFLEVEL)) != 0) - goto err; - - pg = cp->csp->page; - DB_ASSERT(cp->csp - cp->sp == 1); - npgno = PGNO(pg); - - /* We now have a write lock, recheck the page. */ - if ((nentry = NUM_ENT(pg)) == 0) - goto next_page; - - npgno = NEXT_PGNO(pg); - - /* Check duplicate trees, we have a write lock on the page. */ - PTRACE(dbc, "SibDup", PGNO(pg), start, 0); - if (check_dups && (ret = - __bam_compact_dups(dbc, pg, - factor, 1, c_data, &pgs_done)) != 0) - goto err1; - - if (check_trunc && PGNO(pg) > c_data->compact_truncate) { - pgs_done++; - /* Get a fresh low numbered page. */ - if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) - goto err1; - } - - /* After re-locking check to see if we still need to fill. */ - if (P_FREESPACE(dbp, pg) <= factor) - goto next_page; - - /* If they have the same parent, just dup the cursor */ - if (ndbc != NULL && (ret = __db_c_close(ndbc)) != 0) - goto err1; - if ((ret = __db_c_dup(dbc, &ndbc, DB_POSITION)) != 0) - goto err1; - ncp = (BTREE_CURSOR *)ndbc->internal; - - /* - * ncp->recno needs to have the recno of the next page. - * Bump it by the number of records on the current page. - */ - ncp->recno += NUM_ENT(pg); - } - - /* Fetch pages until we fill this one. */ - while (!done && npgno != PGNO_INVALID && - P_FREESPACE(dbp, pg) > factor && c_data->compact_pages != 0) { - /* - * If our current position is the last one on a parent - * page, then we are about to merge across different - * internal nodes. Thus, we need to lock higher up - * in the tree. We will exit the routine and commit - * what we have done so far. Set spanp so we know - * we are in this case when we come back. - */ - if (cp->csp[-1].indx == NUM_ENT(cp->csp[-1].page) - 1) { - *spanp = 1; - npgno = PGNO(pg); - next_recno = cp->recno; - goto next_page; - } - - /* Lock and get the next page. */ - if ((ret = __db_lget(dbc, LCK_COUPLE, - npgno, DB_LOCK_WRITE, 0, &ncp->lock)) != 0) - goto err1; - if ((ret = __memp_fget(dbmp, &npgno, 0, &npg)) != 0) - goto err1; - - /* Fix up the next page cursor with its parent node. */ - if ((ret = __memp_fget(dbmp, - &PGNO(cp->csp[-1].page), 0, &ppg)) != 0) - goto err1; - BT_STK_PUSH(dbenv, ncp, ppg, - cp->csp[-1].indx + 1, nolock, DB_LOCK_NG, ret); - if (ret != 0) - goto err1; - - /* Put the page on the stack. */ - BT_STK_ENTER(dbenv, ncp, npg, 0, ncp->lock, DB_LOCK_WRITE, ret); - - LOCK_INIT(ncp->lock); - npg = NULL; - - c_data->compact_pages_examine++; - - PTRACE(dbc, "MDups", PGNO(ncp->csp->page), start, 0); - if (check_dups && (ret = __bam_compact_dups(ndbc, - ncp->csp->page, factor, 1, c_data, &pgs_done)) != 0) - goto err1; - - npgno = NEXT_PGNO(ncp->csp->page); - /* - * Merge the pages. This will either free the next - * page or just update its parent pointer. - */ - PTRACE(dbc, "Merge", PGNO(cp->csp->page), start, 0); - if ((ret = __bam_merge(dbc, - ndbc, factor, stop, c_data, &done)) != 0) - goto err1; - - pgs_done++; - - /* - * __bam_merge could have freed our stack if it - * deleted a page possibly collapsing the tree. - */ - if (cp->csp->page == NULL) - goto deleted; - /* If we did not bump to the next page something did not fit. */ - if (npgno != NEXT_PGNO(pg)) - break; - } - - /* Bottom of the main loop. Move to the next page. */ - npgno = NEXT_PGNO(pg); - cp->recno += NUM_ENT(pg); - next_recno = cp->recno; - -next_page: - if ((ret = __bam_stkrel(dbc, pgs_done == 0 ? STK_NOLOCK : 0)) != 0) - goto err1; - if (ndbc != NULL && - (ret = __bam_stkrel(ndbc, pgs_done == 0 ? STK_NOLOCK : 0)) != 0) - goto err1; - -next_no_release: - pg = NULL; - - if (npgno == PGNO_INVALID || c_data->compact_pages == 0) - done = 1; - if (!done) { - /* - * If we are at the end of this parent commit the - * transaction so we don't tie things up. - */ - if (pgs_done != 0 && *spanp) { -deleted: if (((ret = __bam_stkrel(ndbc, 0)) != 0 || - (ret = __db_c_close(ndbc)) != 0)) - goto err; - *donep = 0; - return (0); - } - - /* Reget the next page to look at. */ - cp->recno = next_recno; - if ((ret = __memp_fget(dbmp, &npgno, 0, &pg)) != 0) - goto err; - next_page = 1; - goto next; - } - -done: - if (0) { - /* We come here if pg is the same as cp->csp->page. */ -err1: pg = NULL; - } -err: if (dbc != NULL && - (t_ret = __bam_stkrel(dbc, STK_CLRDBC)) != 0 && ret == 0) - ret = t_ret; - if (ndbc != NULL) { - if ((t_ret = __bam_stkrel(ndbc, STK_CLRDBC)) != 0 && ret == 0) - ret = t_ret; - else if ((t_ret = __db_c_close(ndbc)) != 0 && ret == 0) - ret = t_ret; - } - - if (pg != NULL && (t_ret = __memp_fput(dbmp, pg, 0) != 0) && ret == 0) - ret = t_ret; - if (npg != NULL && (t_ret = __memp_fput(dbmp, npg, 0) != 0) && ret == 0) - ret = t_ret; - - *donep = done; - - return (ret); -} - -/* - * __bam_merge -- do actual merging of leaf pages. - */ -static int -__bam_merge(dbc, ndbc, factor, stop, c_data, donep) - DBC *dbc, *ndbc; - u_int32_t factor; - DBT *stop; - DB_COMPACT *c_data; - int *donep; -{ - BTREE_CURSOR *cp, *ncp; - BTREE *t; - DB *dbp; - PAGE *pg, *npg; - db_indx_t adj, nent; - db_recno_t recno; - int cmp, ret; - int (*func) __P((DB *, const DBT *, const DBT *)); - - dbp = dbc->dbp; - t = dbp->bt_internal; - cp = (BTREE_CURSOR *)dbc->internal; - ncp = (BTREE_CURSOR *)ndbc->internal; - pg = cp->csp->page; - npg = ncp->csp->page; - - nent = NUM_ENT(npg); - - /* If the page is empty just throw it away. */ - if (nent == 0) - goto free; - adj = TYPE(npg) == P_LBTREE ? P_INDX : O_INDX; - /* Find if the stopping point is on this page. */ - if (stop != NULL && stop->size != 0) { - if (dbc->dbtype == DB_RECNO) { - if ((ret = __ram_getno(dbc, stop, &recno, 0)) != 0) - goto err; - if (ncp->recno > recno) { - *donep = 1; - if (cp->recno > recno) - goto done; - } - } else { - func = TYPE(npg) == P_LBTREE ? - (dbp->dup_compare == NULL ? - __bam_defcmp : dbp->dup_compare) : t->bt_compare; - - if ((ret = __bam_cmp(dbp, - stop, npg, nent - adj, func, &cmp)) != 0) - goto err; - - /* - * If the last record is beyond the stopping - * point we are done after this page. If the - * first record is beyond the stopping point - * don't even bother with this page. - */ - if (cmp <= 0) { - *donep = 1; - if ((ret = __bam_cmp(dbp, - stop, npg, 0, func, &cmp)) != 0) - goto err; - if (cmp <= 0) - goto done; - } - } - } - - /* - * If there is too much data then just move records one at a time. - * Otherwise copy the data space over and fix up the index table. - * If we are on the left most child we will effect our parent's - * index entry so we call merge_records to figure out key sizes. - */ - if ((dbc->dbtype == DB_BTREE && - ncp->csp[-1].indx == 0 && ncp->csp[-1].entries != 1) || - (int)(P_FREESPACE(dbp, pg) - - ((dbp->pgsize - P_OVERHEAD(dbp)) - - P_FREESPACE(dbp, npg))) < (int)factor) - ret = __bam_merge_records(dbc, ndbc, factor, c_data); - else -free: ret = __bam_merge_pages(dbc, ndbc, c_data); - -done: -err: return (ret); -} - -static int -__bam_merge_records(dbc, ndbc, factor, c_data) - DBC *dbc, *ndbc; - u_int32_t factor; - DB_COMPACT *c_data; -{ - BKEYDATA *bk, *tmp_bk; - BINTERNAL *bi; - BTREE *t; - BTREE_CURSOR *cp, *ncp; - DB *dbp; - DBT a, b, data, hdr; - EPG *epg; - PAGE *pg, *npg; - db_indx_t adj, indx, nent, *ninp, pind; - int32_t adjust; - u_int32_t free, nksize, pfree, size; - int first_dup, is_dup, next_dup, n_ok, ret; - size_t (*func) __P((DB *, const DBT *, const DBT *)); - - dbp = dbc->dbp; - t = dbp->bt_internal; - cp = (BTREE_CURSOR *)dbc->internal; - ncp = (BTREE_CURSOR *)ndbc->internal; - pg = cp->csp->page; - npg = ncp->csp->page; - memset(&hdr, 0, sizeof(hdr)); - pind = NUM_ENT(pg); - n_ok = 0; - adjust = 0; - ret = 0; - nent = NUM_ENT(npg); - - DB_ASSERT (nent != 0); - - /* See if we want to swap out this page. */ - if (c_data->compact_truncate != PGNO_INVALID && - PGNO(npg) > c_data->compact_truncate) { - /* Get a fresh low numbered page. */ - if ((ret = __bam_truncate_page(ndbc, &npg, 1)) != 0) - goto err; - } - - ninp = P_INP(dbp, npg); - - /* - * pg is the page that is being filled, it is in the stack in cp. - * npg is the next page, it is in the stack in ncp. - */ - free = P_FREESPACE(dbp, pg); - - adj = TYPE(npg) == P_LBTREE ? P_INDX : O_INDX; - /* - * Loop through the records and find the stopping point. - */ - for (indx = 0; indx < nent; indx += adj) { - bk = GET_BKEYDATA(dbp, npg, indx); - - /* Size of the key. */ - size = BITEM_PSIZE(bk); - - /* Size of the data. */ - if (TYPE(pg) == P_LBTREE) - size += BITEM_PSIZE(GET_BKEYDATA(dbp, npg, indx + 1)); - /* - * If we are at a duplicate set, skip ahead to see and - * get the total size for the group. - */ - n_ok = adj; - if (TYPE(pg) == P_LBTREE && - indx < nent - adj && - ninp[indx] == ninp[indx + adj]) { - do { - /* Size of index for key reference. */ - size += sizeof(db_indx_t); - n_ok++; - /* Size of data item. */ - size += BITEM_PSIZE( - GET_BKEYDATA(dbp, npg, indx + n_ok)); - n_ok++; - } while (indx + n_ok < nent && - ninp[indx] == ninp[indx + n_ok]); - } - /* if the next set will not fit on the page we are done. */ - if (free < size) - break; - - /* - * Otherwise figure out if we are past the goal and if - * adding this set will put us closer to the goal than - * we are now. - */ - if ((free - size) < factor) { - if (free - factor > factor - (free - size)) - indx += n_ok; - break; - } - free -= size; - indx += n_ok - adj; - } - if (indx == 0) - goto done; - if (TYPE(pg) != P_LBTREE) { - if (indx == nent) - return (__bam_merge_pages(dbc, ndbc, c_data)); - goto no_check; - } - /* - * We need to update npg's parent key. Avoid creating a new key - * that will be too big. Get what space will be available on the - * parents. Then if there will not be room for this key, see if - * prefix compression will make it work, if not backup till we - * find something that will. (Needless to say, this is a very - * unlikely event.) If we are deleting this page then we will - * need to propagate the next key to our grand parents, so we - * see if that will fit. - */ - pfree = dbp->pgsize; - for (epg = &ncp->csp[-1]; epg >= ncp->sp; epg--) - if ((free = P_FREESPACE(dbp, epg->page)) < pfree) { - bi = GET_BINTERNAL(dbp, epg->page, epg->indx); - /* Add back in the key we will be deleting. */ - free += BINTERNAL_PSIZE(bi->len); - if (free < pfree) - pfree = free; - if (epg->indx != 0) - break; - } - - /* - * If we are at the end, we will delete this page. We need to - * check the next parent key only if we are the leftmost page and - * will therefore have to propagate the key up the tree. - */ - if (indx == nent) { - if (ncp->csp[-1].indx != 0 || - BINTERNAL_PSIZE(GET_BINTERNAL(dbp, - ncp->csp[-1].page, 1)->len) <= pfree) - return (__bam_merge_pages(dbc, ndbc, c_data)); - indx -= adj; - } - bk = GET_BKEYDATA(dbp, npg, indx); - if (indx != 0 && BINTERNAL_SIZE(bk->len) >= pfree) { - if (F_ISSET(dbc, DBC_OPD)) { - if (dbp->dup_compare == __bam_defcmp) - func = __bam_defpfx; - else - func = NULL; - } else - func = t->bt_prefix; - } else - func = NULL; - - /* Skip to the beginning of a duplicate set. */ - while (indx != 0 && ninp[indx] == ninp[indx - adj]) - indx -= adj; - - while (indx != 0 && BINTERNAL_SIZE(bk->len) >= pfree) { - if (B_TYPE(bk->type) != B_KEYDATA) - goto noprefix; - /* - * Figure out if we can truncate this key. - * Code borrowed from bt_split.c - */ - if (func == NULL) - goto noprefix; - tmp_bk = GET_BKEYDATA(dbp, npg, indx - adj); - if (B_TYPE(tmp_bk->type) != B_KEYDATA) - goto noprefix; - memset(&a, 0, sizeof(a)); - a.size = tmp_bk->len; - a.data = tmp_bk->data; - memset(&b, 0, sizeof(b)); - b.size = bk->len; - b.data = bk->data; - nksize = (u_int32_t)func(dbp, &a, &b); - if (BINTERNAL_PSIZE(nksize) < pfree) - break; -noprefix: - /* Skip to the beginning of a duplicate set. */ - do { - indx -= adj; - } while (indx != 0 && ninp[indx] == ninp[indx - adj]); - - bk = GET_BKEYDATA(dbp, npg, indx); - } - - if (indx == 0) - goto done; - DB_ASSERT(indx <= nent); - - /* Loop through the records and move them from npg to pg. */ -no_check: is_dup = first_dup = next_dup = 0; - do { - bk = GET_BKEYDATA(dbp, npg, 0); - /* Figure out if we are in a duplicate group or not. */ - if ((NUM_ENT(npg) % 2) == 0) { - if (NUM_ENT(npg) > 2 && ninp[0] == ninp[2]) { - if (!is_dup) { - first_dup = 1; - is_dup = 1; - } else - first_dup = 0; - - next_dup = 1; - } else if (next_dup) { - is_dup = 1; - first_dup = 0; - next_dup = 0; - } else - is_dup = 0; - } - - if (is_dup && !first_dup && (pind % 2) == 0) { - /* Duplicate key. */ - if ((ret = __bam_adjindx(dbc, - pg, pind, pind - P_INDX, 1)) != 0) - goto err; - if (!next_dup) - is_dup = 0; - } else switch (B_TYPE(bk->type)) { - case B_KEYDATA: - hdr.data = bk; - hdr.size = SSZA(BKEYDATA, data); - data.size = bk->len; - data.data = bk->data; - if ((ret = __db_pitem(dbc, pg, pind, - BKEYDATA_SIZE(bk->len), &hdr, &data)) != 0) - goto err; - break; - case B_OVERFLOW: - case B_DUPLICATE: - data.size = BOVERFLOW_SIZE; - data.data = bk; - if ((ret = __db_pitem(dbc, pg, pind, - BOVERFLOW_SIZE, &data, NULL)) != 0) - goto err; - break; - default: - __db_err(dbp->dbenv, - "Unknown record format, page %lu, indx 0", - (u_long)PGNO(pg)); - ret = EINVAL; - goto err; - } - pind++; - if (next_dup && (NUM_ENT(npg) % 2) == 0) { - if ((ret = __bam_adjindx(ndbc, - npg, 0, O_INDX, 0)) != 0) - goto err; - } else { - if ((ret = __db_ditem(ndbc, - npg, 0, BITEM_SIZE(bk))) != 0) - goto err; - } - adjust++; - } while (--indx != 0); - - DB_ASSERT(NUM_ENT(npg) != 0); - if ((ret = __memp_fset(dbp->mpf, npg, DB_MPOOL_DIRTY)) != 0) - goto err; - - if (adjust != 0 && - (F_ISSET(cp, C_RECNUM) || F_ISSET(dbc, DBC_OPD))) { - DB_ASSERT(cp->csp - cp->sp == ncp->csp - ncp->sp); - if (TYPE(pg) == P_LBTREE) - adjust /= P_INDX; - if ((ret = __bam_adjust(ndbc, -adjust)) != 0) - goto err; - - if ((ret = __bam_adjust(dbc, adjust)) != 0) - goto err; - } - - /* Update parent with new key. */ - if (ndbc->dbtype == DB_BTREE && - (ret = __bam_pupdate(ndbc, pg)) != 0) - goto err; - if ((ret = __memp_fset(dbp->mpf, pg, DB_MPOOL_DIRTY)) != 0) - goto err; - -done: ret = __bam_stkrel(ndbc, STK_CLRDBC); - -err: return (ret); -} - -static int -__bam_merge_pages(dbc, ndbc, c_data) - DBC *dbc, *ndbc; - DB_COMPACT *c_data; -{ - BTREE_CURSOR *cp, *ncp; - DB *dbp; - DB_MPOOLFILE *dbmp; - DBT data, hdr, ind; - PAGE *pg, *npg; - db_indx_t nent, *ninp, *pinp; - db_pgno_t ppgno; - u_int8_t *bp; - u_int32_t len; - int i, level, ret; - - COMPQUIET(ppgno, PGNO_INVALID); - dbp = dbc->dbp; - dbmp = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - ncp = (BTREE_CURSOR *)ndbc->internal; - pg = cp->csp->page; - npg = ncp->csp->page; - memset(&hdr, 0, sizeof(hdr)); - nent = NUM_ENT(npg); - - /* If the page is empty just throw it away. */ - if (nent == 0) - goto free; - /* Bulk copy the data to the new page. */ - len = dbp->pgsize - HOFFSET(npg); - if (DBC_LOGGING(dbc)) { - data.data = (u_int8_t *)npg + HOFFSET(npg); - data.size = len; - ind.data = P_INP(dbp, npg); - ind.size = NUM_ENT(npg) * sizeof(db_indx_t); - if ((ret = __bam_merge_log(dbp, - dbc->txn, &LSN(pg), 0, PGNO(pg), - &LSN(pg), PGNO(npg), &LSN(npg), NULL, &data, &ind)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(pg)); - LSN(npg) = LSN(pg); - bp = (u_int8_t *)pg + HOFFSET(pg) - len; - memcpy(bp, (u_int8_t *)npg + HOFFSET(npg), len); - - /* Copy index table offset by what was there already. */ - pinp = P_INP(dbp, pg) + NUM_ENT(pg); - ninp = P_INP(dbp, npg); - for (i = 0; i < NUM_ENT(npg); i++) - *pinp++ = *ninp++ - (dbp->pgsize - HOFFSET(pg)); - HOFFSET(pg) -= len; - NUM_ENT(pg) += i; - - NUM_ENT(npg) = 0; - HOFFSET(npg) += len; - - if (F_ISSET(cp, C_RECNUM) || F_ISSET(dbc, DBC_OPD)) { - DB_ASSERT(cp->csp - cp->sp == ncp->csp - ncp->sp); - if (TYPE(pg) == P_LBTREE) - i /= P_INDX; - if ((ret = __bam_adjust(ndbc, -i)) != 0) - goto err; - - if ((ret = __bam_adjust(dbc, i)) != 0) - goto err; - } - ret = __memp_fset(dbp->mpf, pg, DB_MPOOL_DIRTY); - -free: /* - * __bam_dpages may decide to collapse the tree. - * This can happen if we have the root and there - * are exactly 2 pointers left in it. - * If it can collapse the tree we must free the other - * stack since it will nolonger be valid. This - * must be done before hand because we cannot - * hold a page pinned if it might be truncated. - */ - if (PGNO(ncp->sp->page) == ncp->root && - NUM_ENT(ncp->sp->page) == 2) { - if ((ret = __bam_stkrel(dbc, STK_CLRDBC | STK_PGONLY)) != 0) - goto err; - level = LEVEL(ncp->sp->page); - ppgno = PGNO(ncp->csp[-1].page); - } else - level = 0; - if (c_data->compact_truncate > PGNO(npg)) - c_data->compact_truncate--; - if ((ret = __bam_dpages(ndbc, - 0, ndbc->dbtype == DB_RECNO ? 0 : 1)) != 0) - goto err; - npg = NULL; - c_data->compact_pages_free++; - c_data->compact_pages--; - if (level != 0) { - if ((ret = __memp_fget(dbmp, &ncp->root, 0, &npg)) != 0) - goto err; - if (level == LEVEL(npg)) - level = 0; - if ((ret = __memp_fput(dbmp, npg, 0)) != 0) - goto err; - npg = NULL; - if (level != 0) { - c_data->compact_levels++; - c_data->compact_pages_free++; - if (c_data->compact_truncate > ppgno) - c_data->compact_truncate--; - if (c_data->compact_pages != 0) - c_data->compact_pages--; - } - } - -err: return (ret); -} - -/* - * __bam_merge_internal -- - * Merge internal nodes of the tree. - */ -static int -__bam_merge_internal(dbc, ndbc, level, c_data, merged) - DBC *dbc, *ndbc; - int level; - DB_COMPACT *c_data; - int *merged; -{ - BINTERNAL bi, *bip, *fip; - BTREE_CURSOR *cp, *ncp; - DB_MPOOLFILE *dbmp; - DB *dbp; - DBT data, hdr; - EPG *epg, *save_csp, *nsave_csp; - PAGE *pg, *npg; - RINTERNAL *rk; - db_indx_t indx, pind; - db_pgno_t ppgno; - int32_t trecs; - u_int16_t size; - u_int32_t free, pfree; - int ret; - - COMPQUIET(bip, NULL); - COMPQUIET(ppgno, PGNO_INVALID); - - /* - * ndbc will contain the the dominating parent of the subtree. - * dbc will have the tree containing the left child. - * - * The stacks descend to the leaf level. - * If this is a recno tree then both stacks will start at the root. - */ - dbp = dbc->dbp; - dbmp = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - ncp = (BTREE_CURSOR *)ndbc->internal; - *merged = 0; - ret = 0; - - /* - * Set the stacks to the level requested. - * Save the old value to restore when we exit. - */ - save_csp = cp->csp; - epg = &cp->csp[-level + 1]; - cp->csp = epg; - pg = epg->page; - pind = NUM_ENT(pg); - - nsave_csp = ncp->csp; - epg = &ncp->csp[-level + 1]; - ncp->csp = epg; - npg = epg->page; - indx = NUM_ENT(npg); - - /* - * The caller may have two stacks that include common ancestors, we - * check here for convenience. - */ - if (npg == pg) - goto done; - - if (TYPE(pg) == P_IBTREE) { - /* - * Check for overflow keys on both pages while we have - * them locked. - */ - if ((ret = - __bam_truncate_internal_overflow(dbc, pg, c_data)) != 0) - goto err; - if ((ret = - __bam_truncate_internal_overflow(dbc, npg, c_data)) != 0) - goto err; - } - - /* - * If we are about to move data off the left most page of an - * internal node we will need to update its parents, make sure there - * will be room for the new key on all the parents in the stack. - * If not, move less data. - */ - fip = NULL; - if (TYPE(pg) == P_IBTREE) { - /* See where we run out of space. */ - free = P_FREESPACE(dbp, pg); - /* - * The leftmost key of an internal page is not accurate. - * Go up the tree to find a non-leftmost parent. - */ - while (--epg >= ncp->sp && epg->indx == 0) - continue; - fip = bip = GET_BINTERNAL(dbp, epg->page, epg->indx); - epg = ncp->csp; - - for (indx = 0;;) { - size = BINTERNAL_PSIZE(bip->len); - if (size > free) - break; - free -= size; - if (++indx >= NUM_ENT(npg)) - break; - bip = GET_BINTERNAL(dbp, npg, indx); - } - - /* See if we are deleting the page and we are not left most. */ - if (indx == NUM_ENT(npg) && epg[-1].indx != 0) - goto fits; - - pfree = dbp->pgsize; - for (epg--; epg >= ncp->sp; epg--) - if ((free = P_FREESPACE(dbp, epg->page)) < pfree) { - bip = GET_BINTERNAL(dbp, epg->page, epg->indx); - /* Add back in the key we will be deleting. */ - free += BINTERNAL_PSIZE(bip->len); - if (free < pfree) - pfree = free; - if (epg->indx != 0) - break; - } - epg = ncp->csp; - - /* If we are at the end of the page we will delete it. */ - if (indx == NUM_ENT(npg)) - bip = - GET_BINTERNAL(dbp, epg[-1].page, epg[-1].indx + 1); - else - bip = GET_BINTERNAL(dbp, npg, indx); - - /* Back up until we have a key that fits. */ - while (indx != 0 && BINTERNAL_PSIZE(bip->len) > pfree) { - indx--; - bip = GET_BINTERNAL(dbp, npg, indx); - } - if (indx == 0) - goto done; - } - -fits: memset(&bi, 0, sizeof(bi)); - memset(&hdr, 0, sizeof(hdr)); - memset(&data, 0, sizeof(data)); - trecs = 0; - - /* - * Copy data between internal nodes till one is full - * or the other is empty. - */ - do { - if (dbc->dbtype == DB_BTREE) { - bip = GET_BINTERNAL(dbp, npg, 0); - size = fip == NULL ? - BINTERNAL_SIZE(bip->len) : - BINTERNAL_SIZE(fip->len); - if (P_FREESPACE(dbp, pg) < size + sizeof(db_indx_t)) - break; - - if (fip == NULL) { - data.size = bip->len; - data.data = bip->data; - } else { - data.size = fip->len; - data.data = fip->data; - } - bi.len = data.size; - B_TSET(bi.type, bip->type, 0); - bi.pgno = bip->pgno; - bi.nrecs = bip->nrecs; - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - if (F_ISSET(cp, C_RECNUM) || F_ISSET(dbc, DBC_OPD)) - trecs += (int32_t)bip->nrecs; - } else { - rk = GET_RINTERNAL(dbp, npg, 0); - size = RINTERNAL_SIZE; - if (P_FREESPACE(dbp, pg) < size + sizeof(db_indx_t)) - break; - - hdr.data = rk; - hdr.size = size; - trecs += (int32_t)rk->nrecs; - } - if ((ret = __db_pitem(dbc, pg, pind, size, &hdr, &data)) != 0) - goto err; - pind++; - if (fip != NULL) { - /* reset size to be for the record being deleted. */ - size = BINTERNAL_SIZE(bip->len); - fip = NULL; - } - if ((ret = __db_ditem(ndbc, npg, 0, size)) != 0) - goto err; - *merged = 1; - } while (--indx != 0); - - if (c_data->compact_truncate != PGNO_INVALID && - PGNO(pg) > c_data->compact_truncate && cp->csp != cp->sp) { - if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) - goto err; - } - - if (NUM_ENT(npg) != 0 && c_data->compact_truncate != PGNO_INVALID && - PGNO(npg) > c_data->compact_truncate && ncp->csp != ncp->sp) { - if ((ret = __bam_truncate_page(ndbc, &npg, 1)) != 0) - goto err; - } - - if (!*merged) - goto done; - - if ((ret = __memp_fset(dbmp, pg, DB_MPOOL_DIRTY)) != 0) - goto err; - if ((ret = __memp_fset(dbmp, npg, DB_MPOOL_DIRTY)) != 0) - goto err; - - if (trecs != 0) { - DB_ASSERT(cp->csp - cp->sp == ncp->csp - ncp->sp); - cp->csp--; - if ((ret = __bam_adjust(dbc, trecs)) != 0) - goto err; - - ncp->csp--; - if ((ret = __bam_adjust(ndbc, -trecs)) != 0) - goto err; - ncp->csp++; - } - cp->csp = save_csp; - - /* - * Either we emptied the page or we need to update its - * parent to reflect the first page we now point to. - * First get rid of the bottom of the stack, - * bam_dpages will clear the stack. We can drop - * the locks on those pages as we have not done - * anything to them. - */ - do { - if ((ret = __memp_fput(dbmp, nsave_csp->page, 0)) != 0) - goto err; - if ((ret = __LPUT(dbc, nsave_csp->lock)) != 0) - goto err; - nsave_csp--; - } while (nsave_csp != ncp->csp); - - if (NUM_ENT(npg) == 0) { - /* - * __bam_dpages may decide to collapse the tree - * so we need to free our other stack. The tree - * will change in hight and our stack will nolonger - * be valid. - */ - if (PGNO(ncp->sp->page) == ncp->root && - NUM_ENT(ncp->sp->page) == 2) { - if ((ret = __bam_stkrel(dbc, STK_CLRDBC)) != 0) - goto err; - level = LEVEL(ncp->sp->page); - ppgno = PGNO(ncp->csp[-1].page); - } else - level = 0; - - if (c_data->compact_truncate > PGNO(npg)) - c_data->compact_truncate--; - ret = __bam_dpages(ndbc, - 0, ndbc->dbtype == DB_RECNO ? 0 : 1); - c_data->compact_pages_free++; - if (ret == 0 && level != 0) { - if ((ret = __memp_fget(dbmp, &ncp->root, 0, &npg)) != 0) - goto err; - if (level == LEVEL(npg)) - level = 0; - if ((ret = __memp_fput(dbmp, npg, 0)) != 0) - goto err; - npg = NULL; - if (level != 0) { - c_data->compact_levels++; - c_data->compact_pages_free++; - if (c_data->compact_truncate > ppgno) - c_data->compact_truncate--; - if (c_data->compact_pages != 0) - c_data->compact_pages--; - } - } - } else - ret = __bam_pupdate(ndbc, npg); - return (ret); - -done: -err: cp->csp = save_csp; - ncp->csp = nsave_csp; - - return (ret); -} - -/* - * __bam_compact_dups -- try to compress off page dup trees. - * We may or may not have a write lock on this page. - */ -static int -__bam_compact_dups(dbc, pg, factor, have_lock, c_data, donep) - DBC *dbc; - PAGE *pg; - u_int32_t factor; - int have_lock; - DB_COMPACT *c_data; - int *donep; -{ - BTREE_CURSOR *cp; - BOVERFLOW *bo; - DB *dbp; - DBC *opd; - DBT start; - DB_MPOOLFILE *dbmp; - PAGE *dpg; - db_indx_t i; - int done, level, ret, span, t_ret; - - span = 0; - ret = 0; - opd = NULL; - - dbp = dbc->dbp; - dbmp = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - for (i = 0; i < NUM_ENT(pg); i++) { - bo = GET_BOVERFLOW(dbp, pg, i); - if (B_TYPE(bo->type) == B_KEYDATA) - continue; - c_data->compact_pages_examine++; - if (bo->pgno > c_data->compact_truncate) { - (*donep)++; - if (!have_lock) { - if ((ret = __db_lget(dbc, 0, PGNO(pg), - DB_LOCK_WRITE, 0, &cp->csp->lock)) != 0) - goto err; - have_lock = 1; - } - if ((ret = - __bam_truncate_root_page(dbc, pg, i, c_data)) != 0) - goto err; - /* Just in case it should move. Could it? */ - bo = GET_BOVERFLOW(dbp, pg, i); - } - - if (B_TYPE(bo->type) == B_OVERFLOW) { - if ((ret = __bam_truncate_overflow(dbc, bo->pgno, - have_lock ? PGNO_INVALID : PGNO(pg), c_data)) != 0) - goto err; - (*donep)++; - continue; - } - /* - * Take a peek at the root. If it's a leaf then - * there is no tree here, avoid all the trouble. - */ - if ((ret = __memp_fget(dbmp, &bo->pgno, 0, &dpg)) != 0) - goto err; - - level = dpg->level; - if ((ret = __memp_fput(dbmp, dpg, 0)) != 0) - goto err; - if (level == LEAFLEVEL) - continue; - if ((ret = __db_c_newopd(dbc, bo->pgno, NULL, &opd)) != 0) - return (ret); - if (!have_lock) { - if ((ret = __db_lget(dbc, 0, - PGNO(pg), DB_LOCK_WRITE, 0, &cp->csp->lock)) != 0) - goto err; - have_lock = 1; - } - (*donep)++; - memset(&start, 0, sizeof(start)); - do { - if ((ret = __bam_compact_int(opd, &start, - NULL, factor, &span, c_data, &done)) != 0) - break; - } while (!done); - - if (start.data != NULL) - __os_free(dbp->dbenv, start.data); - - if (ret != 0) - goto err; - - ret = __db_c_close(opd); - opd = NULL; - if (ret != 0) - goto err; - } - -err: if (opd != NULL && (t_ret = __db_c_close(opd)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __bam_truncate_page -- swap a page with a lower numbered page. - * The cusor has a stack which includes at least the - * immediate parent of this page. - */ -static int -__bam_truncate_page(dbc, pgp, update_parent) - DBC *dbc; - PAGE **pgp; - int update_parent; -{ - BTREE_CURSOR *cp; - DB *dbp; - DBT data, hdr, ind; - DB_LSN lsn; - EPG *epg; - PAGE *newpage; - db_pgno_t newpgno, *pgnop; - int ret; - - dbp = dbc->dbp; - - /* - * We want to free a page that lives in the part of the file that - * can be truncated, so we're going to move it onto a free page - * that is in the part of the file that need not be truncated. - * Since the freelist is ordered now, we can simply call __db_new - * which will grab the first element off the freelist; we know this - * is the lowest numbered free page. - */ - - if ((ret = __db_new(dbc, P_DONTEXTEND | TYPE(*pgp), &newpage)) != 0) - return (ret); - - /* - * If newpage is null then __db_new would have had to allocate - * a new page from the filesystem, so there is no reason - * to continue this action. - */ - if (newpage == NULL) - return (0); - - /* - * It is possible that a higher page is allocated if other threads - * are allocating at the same time, if so, just put it back. - */ - if (PGNO(newpage) > PGNO(*pgp)) { - /* Its unfortunate but you can't just free a new overflow. */ - if (TYPE(newpage) == P_OVERFLOW) - OV_LEN(newpage) = 0; - return (__db_free(dbc, newpage)); - } - - /* Log if necessary. */ - if (DBC_LOGGING(dbc)) { - hdr.data = *pgp; - hdr.size = P_OVERHEAD(dbp); - if (TYPE(*pgp) == P_OVERFLOW) { - data.data = (u_int8_t *)*pgp + P_OVERHEAD(dbp); - data.size = OV_LEN(*pgp); - ind.size = 0; - } else { - data.data = (u_int8_t *)*pgp + HOFFSET(*pgp); - data.size = dbp->pgsize - HOFFSET(*pgp); - ind.data = P_INP(dbp, *pgp); - ind.size = NUM_ENT(*pgp) * sizeof(db_indx_t); - } - if ((ret = __bam_merge_log(dbp, dbc->txn, - &LSN(newpage), 0, PGNO(newpage), &LSN(newpage), - PGNO(*pgp), &LSN(*pgp), &hdr, &data, &ind)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(newpage)); - - newpgno = PGNO(newpage); - lsn = LSN(newpage); - memcpy(newpage, *pgp, dbp->pgsize); - PGNO(newpage) = newpgno; - LSN(newpage) = lsn; - - /* Empty the old page. */ - if (TYPE(*pgp) == P_OVERFLOW) - OV_LEN(*pgp) = 0; - else { - HOFFSET(*pgp) = dbp->pgsize; - NUM_ENT(*pgp) = 0; - } - LSN(*pgp) = lsn; - - if ((ret = __memp_fset(dbp->mpf, newpage, DB_MPOOL_DIRTY)) != 0) - goto err; - - /* Update siblings. */ - switch (TYPE(newpage)) { - case P_OVERFLOW: - case P_LBTREE: - case P_LRECNO: - case P_LDUP: - if (NEXT_PGNO(newpage) == PGNO_INVALID && - PREV_PGNO(newpage) == PGNO_INVALID) - break; - if ((ret = __bam_relink(dbc, *pgp, PGNO(newpage))) != 0) - goto err; - break; - default: - break; - } - cp = (BTREE_CURSOR*)dbc->internal; - - /* - * Now, if we free this page, it will get truncated, when we free - * all the pages after it in the file. - */ - ret = __db_free(dbc, *pgp); - /* db_free always puts the page. */ - *pgp = newpage; - - if (ret != 0) - return (ret); - - if (!update_parent) - goto done; - - /* Update the parent. */ - epg = &cp->csp[-1]; - switch (TYPE(epg->page)) { - case P_IBTREE: - pgnop = &GET_BINTERNAL(dbp, epg->page, epg->indx)->pgno; - break; - case P_IRECNO: - pgnop = &GET_RINTERNAL(dbp, epg->page, epg->indx)->pgno; - break; - default: - pgnop = &GET_BOVERFLOW(dbp, epg->page, epg->indx)->pgno; - break; - } - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_pgno_log(dbp, dbc->txn, &LSN(epg->page), - 0, PGNO(epg->page), &LSN(epg->page), (u_int32_t)epg->indx, - *pgnop, PGNO(newpage))) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(epg->page)); - - *pgnop = PGNO(newpage); - cp->csp->page = newpage; - if ((ret = __memp_fset(dbp->mpf, epg->page, DB_MPOOL_DIRTY)) != 0) - return (ret); - -done: return (0); - -err: (void)__memp_fput(dbp->mpf, newpage, 0); - return (ret); -} - -/* - * __bam_truncate_overflow -- find overflow pages to truncate. - * Walk the pages of an overflow chain and swap out - * high numbered pages. We are passed the first page - * but only deal with the second and subsequent pages. - */ - -static int -__bam_truncate_overflow(dbc, pgno, pg_lock, c_data) - DBC *dbc; - db_pgno_t pgno; - db_pgno_t pg_lock; - DB_COMPACT *c_data; -{ - DB *dbp; - DB_LOCK lock; - PAGE *page; - int ret, t_ret; - - dbp = dbc->dbp; - page = NULL; - LOCK_INIT(lock); - - if ((ret = __memp_fget(dbp->mpf, &pgno, 0, &page)) != 0) - return (ret); - - while ((pgno = NEXT_PGNO(page)) != PGNO_INVALID) { - if ((ret = __memp_fput(dbp->mpf, page, 0)) != 0) - return (ret); - if ((ret = __memp_fget(dbp->mpf, &pgno, 0, &page)) != 0) - return (ret); - if (pgno <= c_data->compact_truncate) - continue; - if (pg_lock != PGNO_INVALID) { - if ((ret = __db_lget(dbc, - 0, pg_lock, DB_LOCK_WRITE, 0, &lock)) != 0) - break; - pg_lock = PGNO_INVALID; - } - if ((ret = __bam_truncate_page(dbc, &page, 0)) != 0) - break; - } - - if (page != NULL && - (t_ret = __memp_fput(dbp->mpf, page, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __bam_truncate_root_page -- swap a page which is - * the root of an off page dup tree or the head of an overflow. - * The page is reference by the pg/indx passed in. - */ -static int -__bam_truncate_root_page(dbc, pg, indx, c_data) - DBC *dbc; - PAGE *pg; - u_int32_t indx; - DB_COMPACT *c_data; -{ - BINTERNAL *bi; - BOVERFLOW *bo; - DB *dbp; - DBT orig; - PAGE *page; - db_pgno_t newpgno, *pgnop; - int ret, t_ret; - - COMPQUIET(c_data, NULL); - COMPQUIET(bo, NULL); - COMPQUIET(newpgno, PGNO_INVALID); - dbp = dbc->dbp; - page = NULL; - if (TYPE(pg) == P_IBTREE) { - bi = GET_BINTERNAL(dbp, pg, indx); - if (B_TYPE(bi->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)(bi->data); - pgnop = &bo->pgno; - } else - pgnop = &bi->pgno; - } else { - bo = GET_BOVERFLOW(dbp, pg, indx); - pgnop = &bo->pgno; - } - - if ((ret = __memp_fget(dbp->mpf, pgnop, 0, &page)) != 0) - goto err; - - /* - * If this is a multiply reference overflow key, then we will just - * copy it and decrement the reference count. This is part of a - * fix to get rid of multiple references. - */ - if (TYPE(page) == P_OVERFLOW && OV_REF(page) > 1) { - if ((ret = __db_ovref(dbc, bo->pgno, -1)) != 0) - goto err; - memset(&orig, 0, sizeof(orig)); - if ((ret = __db_goff(dbp, &orig, - bo->tlen, bo->pgno, &orig.data, &orig.size)) == 0) - ret = __db_poff(dbc, &orig, &newpgno); - if (orig.data != NULL) - __os_free(dbp->dbenv, orig.data); - if (ret != 0) - goto err; - } else { - if ((ret = __bam_truncate_page(dbc, &page, 0)) != 0) - goto err; - newpgno = PGNO(page); - /* If we could not allocate from the free list, give up.*/ - if (newpgno == *pgnop) - goto err; - } - - /* Update the reference. */ - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_pgno_log(dbp, - dbc->txn, &LSN(pg), 0, PGNO(pg), - &LSN(pg), (u_int32_t)indx, *pgnop, newpgno)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(pg)); - - *pgnop = newpgno; - if ((ret = __memp_fset(dbp->mpf, pg, DB_MPOOL_DIRTY)) != 0) - goto err; - -err: if (page != NULL && (t_ret = - __memp_fput(dbp->mpf, page, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * -- bam_truncate_internal_overflow -- find overflow keys - * on internal pages and if they have high page - * numbers swap them with lower pages and truncate them. - * Note that if there are overflow keys in the internal - * nodes they will get copied adding pages to the database. - */ -static int -__bam_truncate_internal_overflow(dbc, page, c_data) - DBC *dbc; - PAGE *page; - DB_COMPACT *c_data; -{ - BINTERNAL *bi; - BOVERFLOW *bo; - db_indx_t indx; - int ret; - - COMPQUIET(bo, NULL); - ret = 0; - for (indx = 0; indx < NUM_ENT(page); indx++) { - bi = GET_BINTERNAL(dbc->dbp, page, indx); - if (B_TYPE(bi->type) != B_OVERFLOW) - continue; - bo = (BOVERFLOW *)(bi->data); - if (bo->pgno > c_data->compact_truncate && (ret = - __bam_truncate_root_page(dbc, page, indx, c_data)) != 0) - break; - if ((ret = __bam_truncate_overflow( - dbc, bo->pgno, PGNO_INVALID, c_data)) != 0) - break; - } - return (ret); -} - -#ifdef HAVE_FTRUNCATE -/* - * __bam_savekey -- save the key from an internal page. - * We need to save information so that we can - * fetch then next internal node of the tree. This means - * we need the btree key on this current page, or the - * next record number. - */ -static int -__bam_savekey(dbc, next, start) - DBC *dbc; - int next; - DBT *start; -{ - BINTERNAL *bi; - BOVERFLOW *bo; - BTREE_CURSOR *cp; - DB *dbp; - DB_ENV *dbenv; - PAGE *pg; - RINTERNAL *ri; - db_indx_t indx, top; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - cp = (BTREE_CURSOR *)dbc->internal; - pg = cp->csp->page; - - if (dbc->dbtype == DB_RECNO) { - if (next) - for (indx = 0, top = NUM_ENT(pg); indx != top; indx++) { - ri = GET_RINTERNAL(dbp, pg, indx); - cp->recno += ri->nrecs; - } - return (__db_retcopy(dbenv, start, &cp->recno, - sizeof(cp->recno), &start->data, &start->ulen)); - - } - bi = GET_BINTERNAL(dbp, pg, NUM_ENT(pg) - 1); - if (B_TYPE(bi->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)(bi->data); - return (__db_goff(dbp, start, - bo->tlen, bo->pgno, &start->data, &start->ulen)); - } - return (__db_retcopy(dbenv, - start, bi->data, bi->len, &start->data, &start->ulen)); -} - -/* - * bam_truncate_internal -- - * Find high numbered pages in the internal nodes of a tree and - * swap them. - */ -static int -__bam_truncate_internal(dbp, txn, c_data) - DB *dbp; - DB_TXN *txn; - DB_COMPACT *c_data; -{ - BTREE_CURSOR *cp; - DBC *dbc; - DBT start; - PAGE *pg; - db_pgno_t pgno; - u_int32_t sflag; - int level, local_txn, ret, t_ret; - - dbc = NULL; - memset(&start, 0, sizeof(start)); - - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - local_txn = 1; - txn = NULL; - } else - local_txn = 0; - - level = LEAFLEVEL + 1; - sflag = CS_READ | CS_GETRECNO; - -new_txn: - if (local_txn && (ret = __txn_begin(dbp->dbenv, NULL, &txn, 0)) != 0) - goto err; - - if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) - goto err; - cp = (BTREE_CURSOR *)dbc->internal; - - pgno = PGNO_INVALID; - do { - if ((ret = __bam_csearch(dbc, &start, sflag, level)) != 0) { - /* No more at this level, go up one. */ - if (ret == DB_NOTFOUND) { - level++; - if (start.data != NULL) - __os_free(dbp->dbenv, start.data); - memset(&start, 0, sizeof(start)); - sflag = CS_READ | CS_GETRECNO; - continue; - } - goto err; - } - c_data->compact_pages_examine++; - - pg = cp->csp->page; - pgno = PGNO(pg); - - sflag = CS_NEXT | CS_GETRECNO; - /* Grab info about the page and drop the stack. */ - if (pgno != cp->root && (ret = __bam_savekey(dbc, - pgno <= c_data->compact_truncate, &start)) != 0) - goto err; - - if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) - goto err; - if (pgno == cp->root) - break; - - if (pgno <= c_data->compact_truncate) - continue; - - /* Reget the page with a write lock, and its parent too. */ - if ((ret = __bam_csearch(dbc, - &start, CS_PARENT | CS_GETRECNO, level)) != 0) - goto err; - pg = cp->csp->page; - pgno = PGNO(pg); - - if (pgno > c_data->compact_truncate) { - if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) - goto err; - } - if ((ret = __bam_stkrel(dbc, - pgno > c_data->compact_truncate ? 0 : STK_NOLOCK)) != 0) - goto err; - - /* We are locking subtrees, so drop the write locks asap. */ - if (local_txn && pgno > c_data->compact_truncate) - break; - } while (pgno != cp->root); - - if ((ret = __db_c_close(dbc)) != 0) - goto err; - dbc = NULL; - if (local_txn) { - if ((ret = __txn_commit(txn, DB_TXN_NOSYNC)) != 0) - goto err; - txn = NULL; - } - if (pgno != ((BTREE *)dbp->bt_internal)->bt_root) - goto new_txn; - -err: if (dbc != NULL && (t_ret = __bam_stkrel(dbc, 0)) != 0 && ret == 0) - ret = t_ret; - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - if (local_txn && - txn != NULL && (t_ret = __txn_abort(txn)) != 0 && ret == 0) - ret = t_ret; - if (start.data != NULL) - __os_free(dbp->dbenv, start.data); - return (ret); -} - -static int -__bam_setup_freelist(dbp, list, nelems) - DB *dbp; - struct pglist *list; - u_int32_t nelems; -{ - DB_MPOOLFILE *mpf; - db_pgno_t *plist; - int ret; - - mpf = dbp->mpf; - - if ((ret = __memp_alloc_freelist(mpf, nelems, &plist)) != 0) - return (ret); - - while (nelems-- != 0) - *plist++ = list++->pgno; - - return (0); -} - -static int -__bam_free_freelist(dbp, txn) - DB *dbp; - DB_TXN *txn; -{ - DBC *dbc; - DB_LOCK lock; - int ret, t_ret; - - LOCK_INIT(lock); - ret = 0; - - /* - * If we are not in a transaction then we need to get - * a lock on the meta page, otherwise we should already - * have the lock. - */ - - dbc = NULL; - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - /* Get a cursor so we can call __db_lget. */ - if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - return (ret); - - if ((ret = __db_lget(dbc, - 0, PGNO_BASE_MD, DB_LOCK_WRITE, 0, &lock)) != 0) - goto err; - } - - __memp_free_freelist(dbp->mpf); - -err: if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} -#endif diff --git a/storage/bdb/btree/bt_compare.c b/storage/bdb/btree/bt_compare.c deleted file mode 100644 index 126788f3100..00000000000 --- a/storage/bdb/btree/bt_compare.c +++ /dev/null @@ -1,213 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_compare.c,v 12.1 2005/06/16 20:20:13 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" - -/* - * __bam_cmp -- - * Compare a key to a given record. - * - * PUBLIC: int __bam_cmp __P((DB *, const DBT *, PAGE *, - * PUBLIC: u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *)); - */ -int -__bam_cmp(dbp, dbt, h, indx, func, cmpp) - DB *dbp; - const DBT *dbt; - PAGE *h; - u_int32_t indx; - int (*func)__P((DB *, const DBT *, const DBT *)); - int *cmpp; -{ - BINTERNAL *bi; - BKEYDATA *bk; - BOVERFLOW *bo; - DBT pg_dbt; - - /* - * Returns: - * < 0 if dbt is < page record - * = 0 if dbt is = page record - * > 0 if dbt is > page record - * - * !!! - * We do not clear the pg_dbt DBT even though it's likely to contain - * random bits. That should be okay, because the app's comparison - * routine had better not be looking at fields other than data/size. - * We don't clear it because we go through this path a lot and it's - * expensive. - */ - switch (TYPE(h)) { - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_OVERFLOW) - bo = (BOVERFLOW *)bk; - else { - pg_dbt.data = bk->data; - pg_dbt.size = bk->len; - *cmpp = func(dbp, dbt, &pg_dbt); - return (0); - } - break; - case P_IBTREE: - /* - * The following code guarantees that the left-most key on an - * internal page at any place in the tree sorts less than any - * user-specified key. The reason is that if we have reached - * this internal page, we know the user key must sort greater - * than the key we're storing for this page in any internal - * pages at levels above us in the tree. It then follows that - * any user-specified key cannot sort less than the first page - * which we reference, and so there's no reason to call the - * comparison routine. While this may save us a comparison - * routine call or two, the real reason for this is because - * we don't maintain a copy of the smallest key in the tree, - * so that we don't have to update all the levels of the tree - * should the application store a new smallest key. And, so, - * we may not have a key to compare, which makes doing the - * comparison difficult and error prone. - */ - if (indx == 0) { - *cmpp = 1; - return (0); - } - - bi = GET_BINTERNAL(dbp, h, indx); - if (B_TYPE(bi->type) == B_OVERFLOW) - bo = (BOVERFLOW *)(bi->data); - else { - pg_dbt.data = bi->data; - pg_dbt.size = bi->len; - *cmpp = func(dbp, dbt, &pg_dbt); - return (0); - } - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(h))); - } - - /* - * Overflow. - */ - return (__db_moff(dbp, dbt, - bo->pgno, bo->tlen, func == __bam_defcmp ? NULL : func, cmpp)); -} - -/* - * __bam_defcmp -- - * Default comparison routine. - * - * PUBLIC: int __bam_defcmp __P((DB *, const DBT *, const DBT *)); - */ -int -__bam_defcmp(dbp, a, b) - DB *dbp; - const DBT *a, *b; -{ - size_t len; - u_int8_t *p1, *p2; - - COMPQUIET(dbp, NULL); - - /* - * Returns: - * < 0 if a is < b - * = 0 if a is = b - * > 0 if a is > b - * - * XXX - * If a size_t doesn't fit into a long, or if the difference between - * any two characters doesn't fit into an int, this routine can lose. - * What we need is a signed integral type that's guaranteed to be at - * least as large as a size_t, and there is no such thing. - */ - len = a->size > b->size ? b->size : a->size; - for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2) - if (*p1 != *p2) - return ((long)*p1 - (long)*p2); - return ((long)a->size - (long)b->size); -} - -/* - * __bam_defpfx -- - * Default prefix routine. - * - * PUBLIC: size_t __bam_defpfx __P((DB *, const DBT *, const DBT *)); - */ -size_t -__bam_defpfx(dbp, a, b) - DB *dbp; - const DBT *a, *b; -{ - size_t cnt, len; - u_int8_t *p1, *p2; - - COMPQUIET(dbp, NULL); - - cnt = 1; - len = a->size > b->size ? b->size : a->size; - for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2, ++cnt) - if (*p1 != *p2) - return (cnt); - - /* - * They match up to the smaller of the two sizes. - * Collate the longer after the shorter. - */ - if (a->size < b->size) - return (a->size + 1); - if (b->size < a->size) - return (b->size + 1); - return (b->size); -} diff --git a/storage/bdb/btree/bt_conv.c b/storage/bdb/btree/bt_conv.c deleted file mode 100644 index 74bf823088a..00000000000 --- a/storage/bdb/btree/bt_conv.c +++ /dev/null @@ -1,100 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_conv.c,v 12.2 2005/06/16 20:20:13 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_swap.h" -#include "dbinc/btree.h" - -/* - * __bam_pgin -- - * Convert host-specific page layout from the host-independent format - * stored on disk. - * - * PUBLIC: int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *)); - */ -int -__bam_pgin(dbenv, dummydbp, pg, pp, cookie) - DB_ENV *dbenv; - DB *dummydbp; - db_pgno_t pg; - void *pp; - DBT *cookie; -{ - DB_PGINFO *pginfo; - PAGE *h; - - pginfo = (DB_PGINFO *)cookie->data; - if (!F_ISSET(pginfo, DB_AM_SWAP)) - return (0); - - h = pp; - return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) : - __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1)); -} - -/* - * __bam_pgout -- - * Convert host-specific page layout to the host-independent format - * stored on disk. - * - * PUBLIC: int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *)); - */ -int -__bam_pgout(dbenv, dummydbp, pg, pp, cookie) - DB_ENV *dbenv; - DB *dummydbp; - db_pgno_t pg; - void *pp; - DBT *cookie; -{ - DB_PGINFO *pginfo; - PAGE *h; - - pginfo = (DB_PGINFO *)cookie->data; - if (!F_ISSET(pginfo, DB_AM_SWAP)) - return (0); - - h = pp; - return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) : - __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0)); -} - -/* - * __bam_mswap -- - * Swap the bytes on the btree metadata page. - * - * PUBLIC: int __bam_mswap __P((PAGE *)); - */ -int -__bam_mswap(pg) - PAGE *pg; -{ - u_int8_t *p; - - __db_metaswap(pg); - - p = (u_int8_t *)pg + sizeof(DBMETA); - - p += sizeof(u_int32_t); /* unused */ - SWAP32(p); /* minkey */ - SWAP32(p); /* re_len */ - SWAP32(p); /* re_pad */ - SWAP32(p); /* root */ - p += 92 * sizeof(u_int32_t); /* unused */ - SWAP32(p); /* crypto_magic */ - - return (0); -} diff --git a/storage/bdb/btree/bt_curadj.c b/storage/bdb/btree/bt_curadj.c deleted file mode 100644 index e2128666cec..00000000000 --- a/storage/bdb/btree/bt_curadj.c +++ /dev/null @@ -1,590 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_curadj.c,v 12.3 2005/07/20 16:50:45 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" - -static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t)); - -/* - * Cursor adjustments are logged if they are for subtransactions. This is - * because it's possible for a subtransaction to adjust cursors which will - * still be active after the subtransaction aborts, and so which must be - * restored to their previous locations. Cursors that can be both affected - * by our cursor adjustments and active after our transaction aborts can - * only be found in our parent transaction -- cursors in other transactions, - * including other child transactions of our parent, must have conflicting - * locker IDs, and so cannot be affected by adjustments in this transaction. - */ - -/* - * __bam_ca_delete -- - * Update the cursors when items are deleted and when already deleted - * items are overwritten. Return the number of relevant cursors found. - * - * PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int, int *)); - */ -int -__bam_ca_delete(dbp, pgno, indx, delete, countp) - DB *dbp; - db_pgno_t pgno; - u_int32_t indx; - int delete, *countp; -{ - BTREE_CURSOR *cp; - DB *ldbp; - DB_ENV *dbenv; - DBC *dbc; - int count; /* !!!: Has to contain max number of cursors. */ - - dbenv = dbp->dbenv; - - /* - * Adjust the cursors. We have the page write locked, so the - * only other cursors that can be pointing at a page are - * those in the same thread of control. Unfortunately, we don't - * know that they're using the same DB handle, so traverse - * all matching DB handles in the same DB_ENV, then all cursors - * on each matching DB handle. - * - * Each cursor is single-threaded, so we only need to lock the - * list of DBs and then the list of cursors in each DB. - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (count = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - cp = (BTREE_CURSOR *)dbc->internal; - if (cp->pgno == pgno && cp->indx == indx) { - /* - * [#8032] This assert is checking - * for possible race conditions where we - * hold a cursor position without a lock. - * Unfortunately, there are paths in the - * Btree code that do not satisfy these - * conditions. None of them are known to - * be a problem, but this assert should - * be re-activated when the Btree stack - * code is re-written. - DB_ASSERT(!STD_LOCKING(dbc) || - cp->lock_mode != DB_LOCK_NG); - */ - if (delete) - F_SET(cp, C_DELETED); - else - F_CLR(cp, C_DELETED); - ++count; - } - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - if (countp != NULL) - *countp = count; - return (0); -} - -/* - * __ram_ca_delete -- - * Return if any relevant cursors found. - * - * PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t, int *)); - */ -int -__ram_ca_delete(dbp, root_pgno, foundp) - DB *dbp; - db_pgno_t root_pgno; - int *foundp; -{ - DB *ldbp; - DBC *dbc; - DB_ENV *dbenv; - int found; - - found = 0; - dbenv = dbp->dbenv; - - /* - * Review the cursors. See the comment in __bam_ca_delete(). - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - found == 0 && ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - found == 0 && dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if (dbc->internal->root == root_pgno) - found = 1; - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - *foundp = found; - return (0); -} - -/* - * __bam_ca_di -- - * Adjust the cursors during a delete or insert. - * - * PUBLIC: int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int)); - */ -int -__bam_ca_di(my_dbc, pgno, indx, adjust) - DBC *my_dbc; - db_pgno_t pgno; - u_int32_t indx; - int adjust; -{ - DB *dbp, *ldbp; - DB_ENV *dbenv; - DB_LSN lsn; - DB_TXN *my_txn; - DBC *dbc; - DBC_INTERNAL *cp; - int found, ret; - - dbp = my_dbc->dbp; - dbenv = dbp->dbenv; - - my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL; - - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - */ - found = 0; - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - if (dbc->dbtype == DB_RECNO) - continue; - cp = dbc->internal; - if (cp->pgno == pgno && cp->indx >= indx) { - /* Cursor indices should never be negative. */ - DB_ASSERT(cp->indx != 0 || adjust > 0); - /* [#8032] - DB_ASSERT(!STD_LOCKING(dbc) || - cp->lock_mode != DB_LOCK_NG); - */ - cp->indx += adjust; - if (my_txn != NULL && dbc->txn != my_txn) - found = 1; - } - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - if (found != 0 && DBC_LOGGING(my_dbc)) { - if ((ret = __bam_curadj_log(dbp, my_dbc->txn, &lsn, 0, - DB_CA_DI, pgno, 0, 0, (u_int32_t)adjust, indx, 0)) != 0) - return (ret); - } - - return (0); -} - -/* - * __bam_opd_cursor -- create a new opd cursor. - */ -static int -__bam_opd_cursor(dbp, dbc, first, tpgno, ti) - DB *dbp; - DBC *dbc; - db_pgno_t tpgno; - u_int32_t first, ti; -{ - BTREE_CURSOR *cp, *orig_cp; - DBC *dbc_nopd; - int ret; - - orig_cp = (BTREE_CURSOR *)dbc->internal; - dbc_nopd = NULL; - - /* - * Allocate a new cursor and create the stack. If duplicates - * are sorted, we've just created an off-page duplicate Btree. - * If duplicates aren't sorted, we've just created a Recno tree. - * - * Note that in order to get here at all, there shouldn't be - * an old off-page dup cursor--to augment the checking db_c_newopd - * will do, assert this. - */ - DB_ASSERT(orig_cp->opd == NULL); - if ((ret = __db_c_newopd(dbc, tpgno, orig_cp->opd, &dbc_nopd)) != 0) - return (ret); - - cp = (BTREE_CURSOR *)dbc_nopd->internal; - cp->pgno = tpgno; - cp->indx = ti; - - if (dbp->dup_compare == NULL) { - /* - * Converting to off-page Recno trees is tricky. The - * record number for the cursor is the index + 1 (to - * convert to 1-based record numbers). - */ - cp->recno = ti + 1; - } - - /* - * Transfer the deleted flag from the top-level cursor to the - * created one. - */ - if (F_ISSET(orig_cp, C_DELETED)) { - F_SET(cp, C_DELETED); - F_CLR(orig_cp, C_DELETED); - } - - /* Stack the cursors and reset the initial cursor's index. */ - orig_cp->opd = dbc_nopd; - orig_cp->indx = first; - return (0); -} - -/* - * __bam_ca_dup -- - * Adjust the cursors when moving items from a leaf page to a duplicates - * page. - * - * PUBLIC: int __bam_ca_dup __P((DBC *, - * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t)); - */ -int -__bam_ca_dup(my_dbc, first, fpgno, fi, tpgno, ti) - DBC *my_dbc; - db_pgno_t fpgno, tpgno; - u_int32_t first, fi, ti; -{ - BTREE_CURSOR *orig_cp; - DB *dbp, *ldbp; - DBC *dbc; - DB_ENV *dbenv; - DB_LSN lsn; - DB_TXN *my_txn; - int found, ret; - - dbp = my_dbc->dbp; - dbenv = dbp->dbenv; - my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL; - - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - */ - found = 0; - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { -loop: MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - /* Find cursors pointing to this record. */ - orig_cp = (BTREE_CURSOR *)dbc->internal; - if (orig_cp->pgno != fpgno || orig_cp->indx != fi) - continue; - - /* - * Since we rescan the list see if this is already - * converted. - */ - if (orig_cp->opd != NULL) - continue; - - MUTEX_UNLOCK(dbenv, dbp->mutex); - /* [#8032] - DB_ASSERT(!STD_LOCKING(dbc) || - orig_cp->lock_mode != DB_LOCK_NG); - */ - if ((ret = __bam_opd_cursor(dbp, - dbc, first, tpgno, ti)) !=0) - return (ret); - if (my_txn != NULL && dbc->txn != my_txn) - found = 1; - /* We released the mutex to get a cursor, start over. */ - goto loop; - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - if (found != 0 && DBC_LOGGING(my_dbc)) { - if ((ret = __bam_curadj_log(dbp, my_dbc->txn, - &lsn, 0, DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0) - return (ret); - } - return (0); -} - -/* - * __bam_ca_undodup -- - * Adjust the cursors when returning items to a leaf page - * from a duplicate page. - * Called only during undo processing. - * - * PUBLIC: int __bam_ca_undodup __P((DB *, - * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, u_int32_t)); - */ -int -__bam_ca_undodup(dbp, first, fpgno, fi, ti) - DB *dbp; - db_pgno_t fpgno; - u_int32_t first, fi, ti; -{ - BTREE_CURSOR *orig_cp; - DB *ldbp; - DBC *dbc; - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { -loop: MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - orig_cp = (BTREE_CURSOR *)dbc->internal; - - /* - * A note on the orig_cp->opd != NULL requirement here: - * it's possible that there's a cursor that refers to - * the same duplicate set, but which has no opd cursor, - * because it refers to a different item and we took - * care of it while processing a previous record. - */ - if (orig_cp->pgno != fpgno || - orig_cp->indx != first || - orig_cp->opd == NULL || ((BTREE_CURSOR *) - orig_cp->opd->internal)->indx != ti) - continue; - MUTEX_UNLOCK(dbenv, dbp->mutex); - if ((ret = __db_c_close(orig_cp->opd)) != 0) - return (ret); - orig_cp->opd = NULL; - orig_cp->indx = fi; - /* - * We released the mutex to free a cursor, - * start over. - */ - goto loop; - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - return (0); -} - -/* - * __bam_ca_rsplit -- - * Adjust the cursors when doing reverse splits. - * - * PUBLIC: int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t)); - */ -int -__bam_ca_rsplit(my_dbc, fpgno, tpgno) - DBC* my_dbc; - db_pgno_t fpgno, tpgno; -{ - DB *dbp, *ldbp; - DBC *dbc; - DB_ENV *dbenv; - DB_LSN lsn; - DB_TXN *my_txn; - int found, ret; - - dbp = my_dbc->dbp; - dbenv = dbp->dbenv; - my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL; - - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - */ - found = 0; - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - if (dbc->dbtype == DB_RECNO) - continue; - if (dbc->internal->pgno == fpgno) { - dbc->internal->pgno = tpgno; - /* [#8032] - DB_ASSERT(!STD_LOCKING(dbc) || - dbc->internal->lock_mode != DB_LOCK_NG); - */ - if (my_txn != NULL && dbc->txn != my_txn) - found = 1; - } - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - if (found != 0 && DBC_LOGGING(my_dbc)) { - if ((ret = __bam_curadj_log(dbp, my_dbc->txn, - &lsn, 0, DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0) - return (ret); - } - return (0); -} - -/* - * __bam_ca_split -- - * Adjust the cursors when splitting a page. - * - * PUBLIC: int __bam_ca_split __P((DBC *, - * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int)); - */ -int -__bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft) - DBC *my_dbc; - db_pgno_t ppgno, lpgno, rpgno; - u_int32_t split_indx; - int cleft; -{ - DB *dbp, *ldbp; - DBC *dbc; - DBC_INTERNAL *cp; - DB_ENV *dbenv; - DB_LSN lsn; - DB_TXN *my_txn; - int found, ret; - - dbp = my_dbc->dbp; - dbenv = dbp->dbenv; - my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL; - - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - * - * If splitting the page that a cursor was on, the cursor has to be - * adjusted to point to the same record as before the split. Most - * of the time we don't adjust pointers to the left page, because - * we're going to copy its contents back over the original page. If - * the cursor is on the right page, it is decremented by the number of - * records split to the left page. - */ - found = 0; - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - if (dbc->dbtype == DB_RECNO) - continue; - cp = dbc->internal; - if (cp->pgno == ppgno) { - /* [#8032] - DB_ASSERT(!STD_LOCKING(dbc) || - cp->lock_mode != DB_LOCK_NG); - */ - if (my_txn != NULL && dbc->txn != my_txn) - found = 1; - if (cp->indx < split_indx) { - if (cleft) - cp->pgno = lpgno; - } else { - cp->pgno = rpgno; - cp->indx -= split_indx; - } - } - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - if (found != 0 && DBC_LOGGING(my_dbc)) { - if ((ret = __bam_curadj_log(dbp, - my_dbc->txn, &lsn, 0, DB_CA_SPLIT, ppgno, rpgno, - cleft ? lpgno : PGNO_INVALID, 0, split_indx, 0)) != 0) - return (ret); - } - - return (0); -} - -/* - * __bam_ca_undosplit -- - * Adjust the cursors when undoing a split of a page. - * If we grew a level we will execute this for both the - * left and the right pages. - * Called only during undo processing. - * - * PUBLIC: int __bam_ca_undosplit __P((DB *, - * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t)); - */ -int -__bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx) - DB *dbp; - db_pgno_t frompgno, topgno, lpgno; - u_int32_t split_indx; -{ - DB *ldbp; - DBC *dbc; - DB_ENV *dbenv; - DBC_INTERNAL *cp; - - dbenv = dbp->dbenv; - - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - * - * When backing out a split, we move the cursor back - * to the original offset and bump it by the split_indx. - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - if (dbc->dbtype == DB_RECNO) - continue; - cp = dbc->internal; - if (cp->pgno == topgno) { - cp->pgno = frompgno; - cp->indx += split_indx; - } else if (cp->pgno == lpgno) - cp->pgno = frompgno; - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - return (0); -} diff --git a/storage/bdb/btree/bt_cursor.c b/storage/bdb/btree/bt_cursor.c deleted file mode 100644 index 808dd7aa873..00000000000 --- a/storage/bdb/btree/bt_cursor.c +++ /dev/null @@ -1,2682 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_cursor.c,v 12.7 2005/08/08 14:27:59 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" - -static int __bam_bulk __P((DBC *, DBT *, u_int32_t)); -static int __bam_c_close __P((DBC *, db_pgno_t, int *)); -static int __bam_c_del __P((DBC *)); -static int __bam_c_destroy __P((DBC *)); -static int __bam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); -static int __bam_c_getstack __P((DBC *)); -static int __bam_c_next __P((DBC *, int, int)); -static int __bam_c_physdel __P((DBC *)); -static int __bam_c_prev __P((DBC *)); -static int __bam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); -static int __bam_c_search __P((DBC *, - db_pgno_t, const DBT *, u_int32_t, int *)); -static int __bam_c_writelock __P((DBC *)); -static int __bam_getboth_finddatum __P((DBC *, DBT *, u_int32_t)); -static int __bam_getbothc __P((DBC *, DBT *)); -static int __bam_get_prev __P((DBC *)); -static int __bam_isopd __P((DBC *, db_pgno_t *)); - -/* - * Acquire a new page/lock. If we hold a page/lock, discard the page, and - * lock-couple the lock. - * - * !!! - * We have to handle both where we have a lock to lock-couple and where we - * don't -- we don't duplicate locks when we duplicate cursors if we are - * running in a transaction environment as there's no point if locks are - * never discarded. This means that the cursor may or may not hold a lock. - * In the case where we are descending the tree we always want to unlock - * the held interior page so we use ACQUIRE_COUPLE. - */ -#undef ACQUIRE -#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) do { \ - DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \ - if ((pagep) != NULL) { \ - ret = __memp_fput(__mpf, pagep, 0); \ - pagep = NULL; \ - } else \ - ret = 0; \ - if ((ret) == 0 && STD_LOCKING(dbc)) \ - ret = __db_lget(dbc, LCK_COUPLE, lpgno, mode, 0, &(lock));\ - if ((ret) == 0) \ - ret = __memp_fget(__mpf, &(fpgno), 0, &(pagep)); \ -} while (0) - -/* Acquire a new page/lock for a cursor. */ -#undef ACQUIRE_CUR -#define ACQUIRE_CUR(dbc, mode, p, ret) do { \ - BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ - if (p != __cp->pgno) \ - __cp->pgno = PGNO_INVALID; \ - ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \ - if ((ret) == 0) { \ - __cp->pgno = p; \ - __cp->lock_mode = (mode); \ - } \ -} while (0) - -/* - * Acquire a write lock if we don't already have one. - * - * !!! - * See ACQUIRE macro on why we handle cursors that don't have locks. - */ -#undef ACQUIRE_WRITE_LOCK -#define ACQUIRE_WRITE_LOCK(dbc, ret) do { \ - BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ - ret = 0; \ - if (STD_LOCKING(dbc) && \ - __cp->lock_mode != DB_LOCK_WRITE && \ - ((ret) = __db_lget(dbc, \ - LOCK_ISSET(__cp->lock) ? LCK_COUPLE : 0, \ - __cp->pgno, DB_LOCK_WRITE, 0, &__cp->lock)) == 0) \ - __cp->lock_mode = DB_LOCK_WRITE; \ -} while (0) - -/* Discard the current page/lock for a cursor. */ -#undef DISCARD_CUR -#define DISCARD_CUR(dbc, ret) do { \ - BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ - DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \ - int __t_ret; \ - if ((__cp->page) != NULL) { \ - __t_ret = __memp_fput(__mpf, __cp->page, 0); \ - __cp->page = NULL; \ - } else \ - __t_ret = 0; \ - if (__t_ret != 0 && (ret) == 0) \ - ret = __t_ret; \ - __t_ret = __TLPUT((dbc), __cp->lock); \ - if (__t_ret != 0 && (ret) == 0) \ - ret = __t_ret; \ - if ((ret) == 0 && !LOCK_ISSET(__cp->lock)) \ - __cp->lock_mode = DB_LOCK_NG; \ -} while (0) - -/* If on-page item is a deleted record. */ -#undef IS_DELETED -#define IS_DELETED(dbp, page, indx) \ - B_DISSET(GET_BKEYDATA(dbp, page, \ - (indx) + (TYPE(page) == P_LBTREE ? O_INDX : 0))->type) -#undef IS_CUR_DELETED -#define IS_CUR_DELETED(dbc) \ - IS_DELETED((dbc)->dbp, (dbc)->internal->page, (dbc)->internal->indx) - -/* - * Test to see if two cursors could point to duplicates of the same key. - * In the case of off-page duplicates they are they same, as the cursors - * will be in the same off-page duplicate tree. In the case of on-page - * duplicates, the key index offsets must be the same. For the last test, - * as the original cursor may not have a valid page pointer, we use the - * current cursor's. - */ -#undef IS_DUPLICATE -#define IS_DUPLICATE(dbc, i1, i2) \ - (P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i1] == \ - P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i2]) -#undef IS_CUR_DUPLICATE -#define IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx) \ - (F_ISSET(dbc, DBC_OPD) || \ - (orig_pgno == (dbc)->internal->pgno && \ - IS_DUPLICATE(dbc, (dbc)->internal->indx, orig_indx))) - -/* - * __bam_c_init -- - * Initialize the access private portion of a cursor - * - * PUBLIC: int __bam_c_init __P((DBC *, DBTYPE)); - */ -int -__bam_c_init(dbc, dbtype) - DBC *dbc; - DBTYPE dbtype; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbc->dbp->dbenv; - - /* Allocate/initialize the internal structure. */ - if (dbc->internal == NULL && (ret = - __os_calloc(dbenv, 1, sizeof(BTREE_CURSOR), &dbc->internal)) != 0) - return (ret); - - /* Initialize methods. */ - dbc->c_close = __db_c_close_pp; - dbc->c_count = __db_c_count_pp; - dbc->c_del = __db_c_del_pp; - dbc->c_dup = __db_c_dup_pp; - dbc->c_get = __db_c_get_pp; - dbc->c_pget = __db_c_pget_pp; - dbc->c_put = __db_c_put_pp; - if (dbtype == DB_BTREE) { - dbc->c_am_bulk = __bam_bulk; - dbc->c_am_close = __bam_c_close; - dbc->c_am_del = __bam_c_del; - dbc->c_am_destroy = __bam_c_destroy; - dbc->c_am_get = __bam_c_get; - dbc->c_am_put = __bam_c_put; - dbc->c_am_writelock = __bam_c_writelock; - } else { - dbc->c_am_bulk = __bam_bulk; - dbc->c_am_close = __bam_c_close; - dbc->c_am_del = __ram_c_del; - dbc->c_am_destroy = __bam_c_destroy; - dbc->c_am_get = __ram_c_get; - dbc->c_am_put = __ram_c_put; - dbc->c_am_writelock = __bam_c_writelock; - } - - return (0); -} - -/* - * __bam_c_refresh - * Set things up properly for cursor re-use. - * - * PUBLIC: int __bam_c_refresh __P((DBC *)); - */ -int -__bam_c_refresh(dbc) - DBC *dbc; -{ - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - - dbp = dbc->dbp; - t = dbp->bt_internal; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * If our caller set the root page number, it's because the root was - * known. This is always the case for off page dup cursors. Else, - * pull it out of our internal information. - */ - if (cp->root == PGNO_INVALID) - cp->root = t->bt_root; - - LOCK_INIT(cp->lock); - cp->lock_mode = DB_LOCK_NG; - - if (cp->sp == NULL) { - cp->sp = cp->stack; - cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]); - } - BT_STK_CLR(cp); - - /* - * The btree leaf page data structures require that two key/data pairs - * (or four items) fit on a page, but other than that there's no fixed - * requirement. The btree off-page duplicates only require two items, - * to be exact, but requiring four for them as well seems reasonable. - * - * Recno uses the btree bt_ovflsize value -- it's close enough. - */ - cp->ovflsize = B_MINKEY_TO_OVFLSIZE( - dbp, F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey, dbp->pgsize); - - cp->recno = RECNO_OOB; - cp->order = INVALID_ORDER; - cp->flags = 0; - - /* Initialize for record numbers. */ - if (F_ISSET(dbc, DBC_OPD) || - dbc->dbtype == DB_RECNO || F_ISSET(dbp, DB_AM_RECNUM)) { - F_SET(cp, C_RECNUM); - - /* - * All btrees that support record numbers, optionally standard - * recno trees, and all off-page duplicate recno trees have - * mutable record numbers. - */ - if ((F_ISSET(dbc, DBC_OPD) && dbc->dbtype == DB_RECNO) || - F_ISSET(dbp, DB_AM_RECNUM | DB_AM_RENUMBER)) - F_SET(cp, C_RENUMBER); - } - - return (0); -} - -/* - * __bam_c_close -- - * Close down the cursor. - */ -static int -__bam_c_close(dbc, root_pgno, rmroot) - DBC *dbc; - db_pgno_t root_pgno; - int *rmroot; -{ - BTREE_CURSOR *cp, *cp_opd, *cp_c; - DB *dbp; - DBC *dbc_opd, *dbc_c; - DB_MPOOLFILE *mpf; - PAGE *h; - int cdb_lock, count, ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - cp_opd = (dbc_opd = cp->opd) == NULL ? - NULL : (BTREE_CURSOR *)dbc_opd->internal; - cdb_lock = ret = 0; - - /* - * There are 3 ways this function is called: - * - * 1. Closing a primary cursor: we get called with a pointer to a - * primary cursor that has a NULL opd field. This happens when - * closing a btree/recno database cursor without an associated - * off-page duplicate tree. - * - * 2. Closing a primary and an off-page duplicate cursor stack: we - * get called with a pointer to the primary cursor which has a - * non-NULL opd field. This happens when closing a btree cursor - * into database with an associated off-page btree/recno duplicate - * tree. (It can't be a primary recno database, recno databases - * don't support duplicates.) - * - * 3. Closing an off-page duplicate cursor stack: we get called with - * a pointer to the off-page duplicate cursor. This happens when - * closing a non-btree database that has an associated off-page - * btree/recno duplicate tree or for a btree database when the - * opd tree is not empty (root_pgno == PGNO_INVALID). - * - * If either the primary or off-page duplicate cursor deleted a btree - * key/data pair, check to see if the item is still referenced by a - * different cursor. If it is, confirm that cursor's delete flag is - * set and leave it to that cursor to do the delete. - * - * NB: The test for == 0 below is correct. Our caller already removed - * our cursor argument from the active queue, we won't find it when we - * search the queue in __bam_ca_delete(). - * NB: It can't be true that both the primary and off-page duplicate - * cursors have deleted a btree key/data pair. Either the primary - * cursor may have deleted an item and there's no off-page duplicate - * cursor, or there's an off-page duplicate cursor and it may have - * deleted an item. - * - * Primary recno databases aren't an issue here. Recno keys are either - * deleted immediately or never deleted, and do not have to be handled - * here. - * - * Off-page duplicate recno databases are an issue here, cases #2 and - * #3 above can both be off-page recno databases. The problem is the - * same as the final problem for off-page duplicate btree databases. - * If we no longer need the off-page duplicate tree, we want to remove - * it. For off-page duplicate btrees, we are done with the tree when - * we delete the last item it contains, i.e., there can be no further - * references to it when it's empty. For off-page duplicate recnos, - * we remove items from the tree as the application calls the remove - * function, so we are done with the tree when we close the last cursor - * that references it. - * - * We optionally take the root page number from our caller. If the - * primary database is a btree, we can get it ourselves because dbc - * is the primary cursor. If the primary database is not a btree, - * the problem is that we may be dealing with a stack of pages. The - * cursor we're using to do the delete points at the bottom of that - * stack and we need the top of the stack. - */ - if (F_ISSET(cp, C_DELETED)) { - dbc_c = dbc; - switch (dbc->dbtype) { - case DB_BTREE: /* Case #1, #3. */ - if ((ret = __bam_ca_delete( - dbp, cp->pgno, cp->indx, 1, &count)) != 0) - goto err; - if (count == 0) - goto lock; - goto done; - case DB_RECNO: - if (!F_ISSET(dbc, DBC_OPD)) /* Case #1. */ - goto done; - /* Case #3. */ - if ((ret = __ram_ca_delete(dbp, cp->root, &count)) != 0) - goto err; - if (count == 0) - goto lock; - goto done; - case DB_HASH: - case DB_QUEUE: - case DB_UNKNOWN: - default: - ret = __db_unknown_type(dbp->dbenv, - "__bam_c_close", dbc->dbtype); - goto err; - } - } - - if (dbc_opd == NULL) - goto done; - - if (F_ISSET(cp_opd, C_DELETED)) { /* Case #2. */ - /* - * We will not have been provided a root page number. Acquire - * one from the primary database. - */ - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &h)) != 0) - goto err; - root_pgno = GET_BOVERFLOW(dbp, h, cp->indx + O_INDX)->pgno; - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - - dbc_c = dbc_opd; - switch (dbc_opd->dbtype) { - case DB_BTREE: - if ((ret = __bam_ca_delete( - dbp, cp_opd->pgno, cp_opd->indx, 1, &count)) != 0) - goto err; - if (count == 0) - goto lock; - goto done; - case DB_RECNO: - if ((ret = - __ram_ca_delete(dbp, cp_opd->root, &count)) != 0) - goto err; - if (count == 0) - goto lock; - goto done; - case DB_HASH: - case DB_QUEUE: - case DB_UNKNOWN: - default: - ret = __db_unknown_type( - dbp->dbenv, "__bam_c_close", dbc->dbtype); - goto err; - } - } - goto done; - -lock: cp_c = (BTREE_CURSOR *)dbc_c->internal; - - /* - * If this is CDB, upgrade the lock if necessary. While we acquired - * the write lock to logically delete the record, we released it when - * we returned from that call, and so may not be holding a write lock - * at the moment. - */ - if (CDB_LOCKING(dbp->dbenv)) { - if (F_ISSET(dbc, DBC_WRITECURSOR)) { - if ((ret = __lock_get(dbp->dbenv, - dbc->locker, DB_LOCK_UPGRADE, &dbc->lock_dbt, - DB_LOCK_WRITE, &dbc->mylock)) != 0) - goto err; - cdb_lock = 1; - } - goto delete; - } - - /* - * The variable dbc_c has been initialized to reference the cursor in - * which we're going to do the delete. Initialize the cursor's lock - * structures as necessary. - * - * First, we may not need to acquire any locks. If we're in case #3, - * that is, the primary database isn't a btree database, our caller - * is responsible for acquiring any necessary locks before calling us. - */ - if (F_ISSET(dbc, DBC_OPD)) - goto delete; - - /* - * Otherwise, acquire a write lock on the primary database's page. - * - * Lock the primary database page, regardless of whether we're deleting - * an item on a primary database page or an off-page duplicates page. - * - * If the cursor that did the initial logical deletion (and had a write - * lock) is not the same cursor doing the physical deletion (which may - * have only ever had a read lock on the item), we need to upgrade to a - * write lock. The confusion comes as follows: - * - * C1 created, acquires item read lock - * C2 dup C1, create C2, also has item read lock. - * C1 acquire write lock, delete item - * C1 close - * C2 close, needs a write lock to physically delete item. - * - * If we're in a TXN, we know that C2 will be able to acquire the write - * lock, because no locker other than the one shared by C1 and C2 can - * acquire a write lock -- the original write lock C1 acquired was never - * discarded. - * - * If we're not in a TXN, it's nastier. Other cursors might acquire - * read locks on the item after C1 closed, discarding its write lock, - * and such locks would prevent C2 from acquiring a read lock. That's - * OK, though, we'll simply wait until we can acquire a write lock, or - * we'll deadlock. (Which better not happen, since we're not in a TXN.) - * - * There are similar scenarios with dirty reads, where the cursor may - * have downgraded its write lock to a was-write lock. - */ - if (STD_LOCKING(dbc)) - if ((ret = __db_lget(dbc, - LCK_COUPLE, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0) - goto err; - -delete: /* - * If the delete occurred in a Btree, we're going to look at the page - * to see if the item has to be physically deleted. Otherwise, we do - * not need the actual page (and it may not even exist, it might have - * been truncated from the file after an allocation aborted). - * - * Delete the on-page physical item referenced by the cursor. - */ - if (dbc_c->dbtype == DB_BTREE) { - if ((ret = __memp_fget(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0) - goto err; - if ((ret = __bam_c_physdel(dbc_c)) != 0) - goto err; - } - - /* - * If we're not working in an off-page duplicate tree, then we're - * done. - */ - if (!F_ISSET(dbc_c, DBC_OPD) || root_pgno == PGNO_INVALID) - goto done; - - /* - * We may have just deleted the last element in the off-page duplicate - * tree, and closed the last cursor in the tree. For an off-page btree - * there are no other cursors in the tree by definition, if the tree is - * empty. For an off-page recno we know we have closed the last cursor - * in the tree because the __ram_ca_delete call above returned 0 only - * in that case. So, if the off-page duplicate tree is empty at this - * point, we want to remove it. - */ - if ((ret = __memp_fget(mpf, &root_pgno, 0, &h)) != 0) - goto err; - if (NUM_ENT(h) == 0) { - DISCARD_CUR(dbc_c, ret); - if (ret != 0) - goto err; - if ((ret = __db_free(dbc, h)) != 0) - goto err; - } else { - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - goto done; - } - - /* - * When removing the tree, we have to do one of two things. If this is - * case #2, that is, the primary tree is a btree, delete the key that's - * associated with the tree from the btree leaf page. We know we are - * the only reference to it and we already have the correct lock. We - * detect this case because the cursor that was passed to us references - * an off-page duplicate cursor. - * - * If this is case #3, that is, the primary tree isn't a btree, pass - * the information back to our caller, it's their job to do cleanup on - * the primary page. - */ - if (dbc_opd != NULL) { - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &cp->page)) != 0) - goto err; - if ((ret = __bam_c_physdel(dbc)) != 0) - goto err; - } else - *rmroot = 1; -err: -done: /* - * Discard the page references and locks, and confirm that the stack - * has been emptied. - */ - if (dbc_opd != NULL) - DISCARD_CUR(dbc_opd, ret); - DISCARD_CUR(dbc, ret); - - /* Downgrade any CDB lock we acquired. */ - if (cdb_lock) - (void)__lock_downgrade( - dbp->dbenv, &dbc->mylock, DB_LOCK_IWRITE, 0); - - return (ret); -} - -/* - * __bam_c_destroy -- - * Close a single cursor -- internal version. - */ -static int -__bam_c_destroy(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - - cp = (BTREE_CURSOR *)dbc->internal; - - /* Discard the structures. */ - if (cp->sp != cp->stack) - __os_free(dbc->dbp->dbenv, cp->sp); - __os_free(dbc->dbp->dbenv, cp); - - return (0); -} - -/* - * __bam_c_count -- - * Return a count of on and off-page duplicates. - * - * PUBLIC: int __bam_c_count __P((DBC *, db_recno_t *)); - */ -int -__bam_c_count(dbc, recnop) - DBC *dbc; - db_recno_t *recnop; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_MPOOLFILE *mpf; - db_indx_t indx, top; - db_recno_t recno; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Called with the top-level cursor that may reference an off-page - * duplicates tree. We don't have to acquire any new locks, we have - * to have a read lock to even get here. - */ - if (cp->opd == NULL) { - /* - * On-page duplicates, get the page and count. - */ - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &cp->page)) != 0) - return (ret); - - /* - * Move back to the beginning of the set of duplicates and - * then count forward. - */ - for (indx = cp->indx;; indx -= P_INDX) - if (indx == 0 || - !IS_DUPLICATE(dbc, indx, indx - P_INDX)) - break; - for (recno = 0, - top = NUM_ENT(cp->page) - P_INDX;; indx += P_INDX) { - if (!IS_DELETED(dbp, cp->page, indx)) - ++recno; - if (indx == top || - !IS_DUPLICATE(dbc, indx, indx + P_INDX)) - break; - } - } else { - /* - * Off-page duplicates tree, get the root page of the off-page - * duplicate tree. - */ - if ((ret = __memp_fget( - mpf, &cp->opd->internal->root, 0, &cp->page)) != 0) - return (ret); - - /* - * If the page is an internal page use the page's count as it's - * up-to-date and reflects the status of cursors in the tree. - * If the page is a leaf page for unsorted duplicates, use the - * page's count as cursors don't mark items deleted on the page - * and wait, cursor delete items immediately. - * If the page is a leaf page for sorted duplicates, there may - * be cursors on the page marking deleted items -- count. - */ - if (TYPE(cp->page) == P_LDUP) - for (recno = 0, indx = 0, - top = NUM_ENT(cp->page) - O_INDX;; indx += O_INDX) { - if (!IS_DELETED(dbp, cp->page, indx)) - ++recno; - if (indx == top) - break; - } - else - recno = RE_NREC(cp->page); - } - - *recnop = recno; - - ret = __memp_fput(mpf, cp->page, 0); - cp->page = NULL; - - return (ret); -} - -/* - * __bam_c_del -- - * Delete using a cursor. - */ -static int -__bam_c_del(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_MPOOLFILE *mpf; - int count, ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - ret = 0; - - /* If the item was already deleted, return failure. */ - if (F_ISSET(cp, C_DELETED)) - return (DB_KEYEMPTY); - - /* - * This code is always called with a page lock but no page. - */ - DB_ASSERT(cp->page == NULL); - - /* - * We don't physically delete the record until the cursor moves, so - * we have to have a long-lived write lock on the page instead of a - * a long-lived read lock. Note, we have to have a read lock to even - * get here. - * - * If we're maintaining record numbers, we lock the entire tree, else - * we lock the single page. - */ - if (F_ISSET(cp, C_RECNUM)) { - if ((ret = __bam_c_getstack(dbc)) != 0) - goto err; - cp->page = cp->csp->page; - } else { - ACQUIRE_CUR(dbc, DB_LOCK_WRITE, cp->pgno, ret); - if (ret != 0) - goto err; - } - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_cdel_log(dbp, dbc->txn, &LSN(cp->page), 0, - PGNO(cp->page), &LSN(cp->page), cp->indx)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(cp->page)); - - /* Set the intent-to-delete flag on the page. */ - if (TYPE(cp->page) == P_LBTREE) - B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx + O_INDX)->type); - else - B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type); - - /* Mark the page dirty. */ - ret = __memp_fset(mpf, cp->page, DB_MPOOL_DIRTY); - -err: /* - * If we've been successful so far and the tree has record numbers, - * adjust the record counts. Either way, release acquired page(s). - */ - if (F_ISSET(cp, C_RECNUM)) { - if (ret == 0) - ret = __bam_adjust(dbc, -1); - (void)__bam_stkrel(dbc, 0); - } else - if (cp->page != NULL && - (t_ret = __memp_fput(mpf, cp->page, 0)) != 0 && ret == 0) - ret = t_ret; - - cp->page = NULL; - - /* - * Update the cursors last, after all chance of recoverable failure - * is past. - */ - if (ret == 0) - ret = __bam_ca_delete(dbp, cp->pgno, cp->indx, 1, &count); - - return (ret); -} - -/* - * __bam_c_dup -- - * Duplicate a btree cursor, such that the new one holds appropriate - * locks for the position of the original. - * - * PUBLIC: int __bam_c_dup __P((DBC *, DBC *)); - */ -int -__bam_c_dup(orig_dbc, new_dbc) - DBC *orig_dbc, *new_dbc; -{ - BTREE_CURSOR *orig, *new; - int ret; - - orig = (BTREE_CURSOR *)orig_dbc->internal; - new = (BTREE_CURSOR *)new_dbc->internal; - - /* - * If we're holding a lock we need to acquire a copy of it, unless - * we're in a transaction. We don't need to copy any lock we're - * holding inside a transaction because all the locks are retained - * until the transaction commits or aborts. - */ - if (orig_dbc->txn == NULL && LOCK_ISSET(orig->lock)) - if ((ret = __db_lget(new_dbc, - 0, new->pgno, new->lock_mode, 0, &new->lock)) != 0) - return (ret); - - new->ovflsize = orig->ovflsize; - new->recno = orig->recno; - new->flags = orig->flags; - - return (0); -} - -/* - * __bam_c_get -- - * Get using a cursor (btree). - */ -static int -__bam_c_get(dbc, key, data, flags, pgnop) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; - db_pgno_t *pgnop; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_MPOOLFILE *mpf; - db_pgno_t orig_pgno; - db_indx_t orig_indx; - int exact, newopd, ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - orig_pgno = cp->pgno; - orig_indx = cp->indx; - - newopd = 0; - switch (flags) { - case DB_CURRENT: - /* It's not possible to return a deleted record. */ - if (F_ISSET(cp, C_DELETED)) { - ret = DB_KEYEMPTY; - goto err; - } - - /* - * Acquire the current page. We have at least a read-lock - * already. The caller may have set DB_RMW asking for a - * write lock, but upgrading to a write lock has no better - * chance of succeeding now instead of later, so don't try. - */ - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &cp->page)) != 0) - goto err; - break; - case DB_FIRST: - newopd = 1; - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, NULL, flags, &exact)) != 0) - goto err; - break; - case DB_GET_BOTH: - case DB_GET_BOTH_RANGE: - /* - * There are two ways to get here based on DBcursor->c_get - * with the DB_GET_BOTH/DB_GET_BOTH_RANGE flags set: - * - * 1. Searching a sorted off-page duplicate tree: do a tree - * search. - * - * 2. Searching btree: do a tree search. If it returns a - * reference to off-page duplicate tree, return immediately - * and let our caller deal with it. If the search doesn't - * return a reference to off-page duplicate tree, continue - * with an on-page search. - */ - if (F_ISSET(dbc, DBC_OPD)) { - if ((ret = __bam_c_search( - dbc, PGNO_INVALID, data, flags, &exact)) != 0) - goto err; - if (flags == DB_GET_BOTH) { - if (!exact) { - ret = DB_NOTFOUND; - goto err; - } - break; - } - - /* - * We didn't require an exact match, so the search may - * may have returned an entry past the end of the page, - * or we may be referencing a deleted record. If so, - * move to the next entry. - */ - if ((cp->indx == NUM_ENT(cp->page) || - IS_CUR_DELETED(dbc)) && - (ret = __bam_c_next(dbc, 1, 0)) != 0) - goto err; - } else { - if ((ret = __bam_c_search( - dbc, PGNO_INVALID, key, flags, &exact)) != 0) - return (ret); - if (!exact) { - ret = DB_NOTFOUND; - goto err; - } - - if (pgnop != NULL && __bam_isopd(dbc, pgnop)) { - newopd = 1; - break; - } - if ((ret = - __bam_getboth_finddatum(dbc, data, flags)) != 0) - goto err; - } - break; - case DB_GET_BOTHC: - if ((ret = __bam_getbothc(dbc, data)) != 0) - goto err; - break; - case DB_LAST: - newopd = 1; - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, NULL, flags, &exact)) != 0) - goto err; - break; - case DB_NEXT: - newopd = 1; - if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, NULL, DB_FIRST, &exact)) != 0) - goto err; - } else - if ((ret = __bam_c_next(dbc, 1, 0)) != 0) - goto err; - break; - case DB_NEXT_DUP: - if ((ret = __bam_c_next(dbc, 1, 0)) != 0) - goto err; - if (!IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)) { - ret = DB_NOTFOUND; - goto err; - } - break; - case DB_NEXT_NODUP: - newopd = 1; - if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, NULL, DB_FIRST, &exact)) != 0) - goto err; - } else - do { - if ((ret = __bam_c_next(dbc, 1, 0)) != 0) - goto err; - } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)); - break; - case DB_PREV: - newopd = 1; - if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, NULL, DB_LAST, &exact)) != 0) - goto err; - } else - if ((ret = __bam_c_prev(dbc)) != 0) - goto err; - break; - case DB_PREV_NODUP: - newopd = 1; - if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, NULL, DB_LAST, &exact)) != 0) - goto err; - } else - do { - if ((ret = __bam_c_prev(dbc)) != 0) - goto err; - } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)); - break; - case DB_SET: - case DB_SET_RECNO: - newopd = 1; - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, key, flags, &exact)) != 0) - goto err; - break; - case DB_SET_RANGE: - newopd = 1; - if ((ret = __bam_c_search(dbc, - PGNO_INVALID, key, flags, &exact)) != 0) - goto err; - - /* - * As we didn't require an exact match, the search function - * may have returned an entry past the end of the page. Or, - * we may be referencing a deleted record. If so, move to - * the next entry. - */ - if (cp->indx == NUM_ENT(cp->page) || IS_CUR_DELETED(dbc)) - if ((ret = __bam_c_next(dbc, 0, 0)) != 0) - goto err; - break; - default: - ret = __db_unknown_flag(dbp->dbenv, "__bam_c_get", flags); - goto err; - } - - /* - * We may have moved to an off-page duplicate tree. Return that - * information to our caller. - */ - if (newopd && pgnop != NULL) - (void)__bam_isopd(dbc, pgnop); - - /* - * Don't return the key, it was passed to us (this is true even if the - * application defines a compare function returning equality for more - * than one key value, since in that case which actual value we store - * in the database is undefined -- and particularly true in the case of - * duplicates where we only store one key value). - */ - if (flags == DB_GET_BOTH || - flags == DB_GET_BOTH_RANGE || flags == DB_SET) - F_SET(key, DB_DBT_ISSET); - -err: /* - * Regardless of whether we were successful or not, if the cursor - * moved, clear the delete flag, DBcursor->c_get never references - * a deleted key, if it moved at all. - */ - if (F_ISSET(cp, C_DELETED) && - (cp->pgno != orig_pgno || cp->indx != orig_indx)) - F_CLR(cp, C_DELETED); - - return (ret); -} - -static int -__bam_get_prev(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - DBT key, data; - db_pgno_t pgno; - int ret; - - if ((ret = __bam_c_prev(dbc)) != 0) - return (ret); - - if (__bam_isopd(dbc, &pgno)) { - cp = (BTREE_CURSOR *)dbc->internal; - if ((ret = __db_c_newopd(dbc, pgno, cp->opd, &cp->opd)) != 0) - return (ret); - if ((ret = cp->opd->c_am_get(cp->opd, - &key, &data, DB_LAST, NULL)) != 0) - return (ret); - } - - return (0); -} - -/* - * __bam_bulk -- Return bulk data from a btree. - */ -static int -__bam_bulk(dbc, data, flags) - DBC *dbc; - DBT *data; - u_int32_t flags; -{ - BKEYDATA *bk; - BOVERFLOW *bo; - BTREE_CURSOR *cp; - PAGE *pg; - db_indx_t *inp, indx, pg_keyoff; - int32_t *endp, key_off, *offp, *saveoffp; - u_int8_t *dbuf, *dp, *np; - u_int32_t key_size, pagesize, size, space; - int adj, is_key, need_pg, next_key, no_dup, rec_key, ret; - - ret = 0; - key_off = 0; - size = 0; - pagesize = dbc->dbp->pgsize; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * dp tracks the beginning of the page in the buffer. - * np is the next place to copy things into the buffer. - * dbuf always stays at the beginning of the buffer. - */ - dbuf = data->data; - np = dp = dbuf; - - /* Keep track of space that is left. There is a termination entry */ - space = data->ulen; - space -= sizeof(*offp); - - /* Build the offset/size table from the end up. */ - endp = (int32_t *)((u_int8_t *)dbuf + data->ulen); - endp--; - offp = endp; - - key_size = 0; - - /* - * Distinguish between BTREE and RECNO. - * There are no keys in RECNO. If MULTIPLE_KEY is specified - * then we return the record numbers. - * is_key indicates that multiple btree keys are returned. - * rec_key is set if we are returning record numbers. - * next_key is set if we are going after the next key rather than dup. - */ - if (dbc->dbtype == DB_BTREE) { - is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1: 0; - rec_key = 0; - next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP; - adj = 2; - } else { - is_key = 0; - rec_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0; - next_key = LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP; - adj = 1; - } - no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP; - -next_pg: - indx = cp->indx; - pg = cp->page; - - inp = P_INP(dbc->dbp, pg); - /* The current page is not yet in the buffer. */ - need_pg = 1; - - /* - * Keep track of the offset of the current key on the page. - * If we are returning keys, set it to 0 first so we force - * the copy of the key to the buffer. - */ - pg_keyoff = 0; - if (is_key == 0) - pg_keyoff = inp[indx]; - - do { - if (IS_DELETED(dbc->dbp, pg, indx)) { - if (dbc->dbtype != DB_RECNO) - continue; - - cp->recno++; - /* - * If we are not returning recnos then we - * need to fill in every slot so the user - * can calculate the record numbers. - */ - if (rec_key != 0) - continue; - - space -= 2 * sizeof(*offp); - /* Check if space as underflowed. */ - if (space > data->ulen) - goto back_up; - - /* Just mark the empty recno slots. */ - *offp-- = 0; - *offp-- = 0; - continue; - } - - /* - * Check to see if we have a new key. - * If so, then see if we need to put the - * key on the page. If its already there - * then we just point to it. - */ - if (is_key && pg_keyoff != inp[indx]) { - bk = GET_BKEYDATA(dbc->dbp, pg, indx); - if (B_TYPE(bk->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)bk; - size = key_size = bo->tlen; - if (key_size > space) - goto get_key_space; - if ((ret = __bam_bulk_overflow(dbc, - bo->tlen, bo->pgno, np)) != 0) - return (ret); - space -= key_size; - key_off = (int32_t)(np - dbuf); - np += key_size; - } else { - if (need_pg) { - dp = np; - size = pagesize - HOFFSET(pg); - if (space < size) { -get_key_space: - /* Nothing added, then error. */ - if (offp == endp) { - data->size = (u_int32_t) - DB_ALIGN(size + - pagesize, 1024); - return - (DB_BUFFER_SMALL); - } - /* - * We need to back up to the - * last record put into the - * buffer so that it is - * CURRENT. - */ - if (indx != 0) - indx -= P_INDX; - else { - if ((ret = - __bam_get_prev( - dbc)) != 0) - return (ret); - indx = cp->indx; - pg = cp->page; - } - break; - } - /* - * Move the data part of the page - * to the buffer. - */ - memcpy(dp, - (u_int8_t *)pg + HOFFSET(pg), size); - need_pg = 0; - space -= size; - np += size; - } - key_size = bk->len; - key_off = (int32_t)((inp[indx] - HOFFSET(pg)) - + (dp - dbuf) + SSZA(BKEYDATA, data)); - pg_keyoff = inp[indx]; - } - } - - /* - * Reserve space for the pointers and sizes. - * Either key/data pair or just for a data item. - */ - space -= (is_key ? 4 : 2) * sizeof(*offp); - if (rec_key) - space -= sizeof(*offp); - - /* Check to see if space has underflowed. */ - if (space > data->ulen) - goto back_up; - - /* - * Determine if the next record is in the - * buffer already or if it needs to be copied in. - * If we have an off page dup, then copy as many - * as will fit into the buffer. - */ - bk = GET_BKEYDATA(dbc->dbp, pg, indx + adj - 1); - if (B_TYPE(bk->type) == B_DUPLICATE) { - bo = (BOVERFLOW *)bk; - if (is_key) { - *offp-- = (int32_t)key_off; - *offp-- = (int32_t)key_size; - } - /* - * We pass the offset of the current key. - * On return we check to see if offp has - * moved to see if any data fit. - */ - saveoffp = offp; - if ((ret = __bam_bulk_duplicates(dbc, bo->pgno, - dbuf, is_key ? offp + P_INDX : NULL, - &offp, &np, &space, no_dup)) != 0) { - if (ret == DB_BUFFER_SMALL) { - size = space; - space = 0; - /* If nothing was added, then error. */ - if (offp == saveoffp) { - offp += 2; - goto back_up; - } - goto get_space; - } - return (ret); - } - } else if (B_TYPE(bk->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)bk; - size = bo->tlen; - if (size > space) - goto back_up; - if ((ret = - __bam_bulk_overflow(dbc, - bo->tlen, bo->pgno, np)) != 0) - return (ret); - space -= size; - if (is_key) { - *offp-- = (int32_t)key_off; - *offp-- = (int32_t)key_size; - } else if (rec_key) - *offp-- = (int32_t)cp->recno; - *offp-- = (int32_t)(np - dbuf); - np += size; - *offp-- = (int32_t)size; - } else { - if (need_pg) { - dp = np; - size = pagesize - HOFFSET(pg); - if (space < size) { -back_up: - /* - * Back up the index so that the - * last record in the buffer is CURRENT - */ - if (indx >= adj) - indx -= adj; - else { - if ((ret = - __bam_get_prev(dbc)) != 0 && - ret != DB_NOTFOUND) - return (ret); - indx = cp->indx; - pg = cp->page; - } - if (dbc->dbtype == DB_RECNO) - cp->recno--; -get_space: - /* - * See if we put anything in the - * buffer or if we are doing a DBP->get - * did we get all of the data. - */ - if (offp >= - (is_key ? &endp[-1] : endp) || - F_ISSET(dbc, DBC_TRANSIENT)) { - data->size = (u_int32_t) - DB_ALIGN(size + - data->ulen - space, 1024); - return (DB_BUFFER_SMALL); - } - break; - } - memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size); - need_pg = 0; - space -= size; - np += size; - } - /* - * Add the offsets and sizes to the end of the buffer. - * First add the key info then the data info. - */ - if (is_key) { - *offp-- = (int32_t)key_off; - *offp-- = (int32_t)key_size; - } else if (rec_key) - *offp-- = (int32_t)cp->recno; - *offp-- = (int32_t)((inp[indx + adj - 1] - HOFFSET(pg)) - + (dp - dbuf) + SSZA(BKEYDATA, data)); - *offp-- = bk->len; - } - if (dbc->dbtype == DB_RECNO) - cp->recno++; - else if (no_dup) { - while (indx + adj < NUM_ENT(pg) && - pg_keyoff == inp[indx + adj]) - indx += adj; - } - /* - * Stop when we either run off the page or we move to the next key and - * we are not returning multiple keys. - */ - } while ((indx += adj) < NUM_ENT(pg) && - (next_key || pg_keyoff == inp[indx])); - - /* If we are off the page then try to the next page. */ - if (ret == 0 && next_key && indx >= NUM_ENT(pg)) { - cp->indx = indx; - ret = __bam_c_next(dbc, 0, 1); - if (ret == 0) - goto next_pg; - if (ret != DB_NOTFOUND) - return (ret); - } - - /* - * If we did a DBP->get we must error if we did not return - * all the data for the current key because there is - * no way to know if we did not get it all, nor any - * interface to fetch the balance. - */ - - if (ret == 0 && indx < pg->entries && - F_ISSET(dbc, DBC_TRANSIENT) && pg_keyoff == inp[indx]) { - data->size = (data->ulen - space) + size; - return (DB_BUFFER_SMALL); - } - /* - * Must leave the index pointing at the last record fetched. - * If we are not fetching keys, we may have stepped to the - * next key. - */ - if (ret == DB_BUFFER_SMALL || next_key || pg_keyoff == inp[indx]) - cp->indx = indx; - else - cp->indx = indx - P_INDX; - - if (rec_key == 1) - *offp = RECNO_OOB; - else - *offp = -1; - return (0); -} - -/* - * __bam_bulk_overflow -- - * Dump overflow record into the buffer. - * The space requirements have already been checked. - * PUBLIC: int __bam_bulk_overflow - * PUBLIC: __P((DBC *, u_int32_t, db_pgno_t, u_int8_t *)); - */ -int -__bam_bulk_overflow(dbc, len, pgno, dp) - DBC *dbc; - u_int32_t len; - db_pgno_t pgno; - u_int8_t *dp; -{ - DBT dbt; - - memset(&dbt, 0, sizeof(dbt)); - F_SET(&dbt, DB_DBT_USERMEM); - dbt.ulen = len; - dbt.data = (void *)dp; - return (__db_goff(dbc->dbp, &dbt, len, pgno, NULL, NULL)); -} - -/* - * __bam_bulk_duplicates -- - * Put as many off page duplicates as will fit into the buffer. - * This routine will adjust the cursor to reflect the position in - * the overflow tree. - * PUBLIC: int __bam_bulk_duplicates __P((DBC *, - * PUBLIC: db_pgno_t, u_int8_t *, int32_t *, - * PUBLIC: int32_t **, u_int8_t **, u_int32_t *, int)); - */ -int -__bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup) - DBC *dbc; - db_pgno_t pgno; - u_int8_t *dbuf; - int32_t *keyoff, **offpp; - u_int8_t **dpp; - u_int32_t *spacep; - int no_dup; -{ - DB *dbp; - BKEYDATA *bk; - BOVERFLOW *bo; - BTREE_CURSOR *cp; - DBC *opd; - DBT key, data; - PAGE *pg; - db_indx_t indx, *inp; - int32_t *offp; - u_int32_t pagesize, size, space; - u_int8_t *dp, *np; - int first, need_pg, ret, t_ret; - - ret = 0; - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - opd = cp->opd; - - if (opd == NULL) { - if ((ret = __db_c_newopd(dbc, pgno, NULL, &opd)) != 0) - return (ret); - cp->opd = opd; - if ((ret = opd->c_am_get(opd, - &key, &data, DB_FIRST, NULL)) != 0) - goto close_opd; - } - - pagesize = opd->dbp->pgsize; - cp = (BTREE_CURSOR *)opd->internal; - space = *spacep; - /* Get current offset slot. */ - offp = *offpp; - - /* - * np is the next place to put data. - * dp is the beginning of the current page in the buffer. - */ - np = dp = *dpp; - first = 1; - indx = cp->indx; - - do { - /* Fetch the current record. No initial move. */ - if ((ret = __bam_c_next(opd, 0, 0)) != 0) - break; - pg = cp->page; - indx = cp->indx; - inp = P_INP(dbp, pg); - /* We need to copy the page to the buffer. */ - need_pg = 1; - - do { - if (IS_DELETED(dbp, pg, indx)) - goto contin; - bk = GET_BKEYDATA(dbp, pg, indx); - space -= 2 * sizeof(*offp); - /* Allocate space for key if needed. */ - if (first == 0 && keyoff != NULL) - space -= 2 * sizeof(*offp); - - /* Did space underflow? */ - if (space > *spacep) { - ret = DB_BUFFER_SMALL; - if (first == 1) { - /* Get the absolute value. */ - space = -(int32_t)space; - space = *spacep + space; - if (need_pg) - space += pagesize - HOFFSET(pg); - } - break; - } - if (B_TYPE(bk->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)bk; - size = bo->tlen; - if (size > space) { - ret = DB_BUFFER_SMALL; - space = *spacep + size; - break; - } - if (first == 0 && keyoff != NULL) { - *offp-- = keyoff[0]; - *offp-- = keyoff[-1]; - } - if ((ret = __bam_bulk_overflow(dbc, - bo->tlen, bo->pgno, np)) != 0) - return (ret); - space -= size; - *offp-- = (int32_t)(np - dbuf); - np += size; - } else { - if (need_pg) { - dp = np; - size = pagesize - HOFFSET(pg); - if (space < size) { - ret = DB_BUFFER_SMALL; - /* Return space required. */ - space = *spacep + size; - break; - } - memcpy(dp, - (u_int8_t *)pg + HOFFSET(pg), size); - need_pg = 0; - space -= size; - np += size; - } - if (first == 0 && keyoff != NULL) { - *offp-- = keyoff[0]; - *offp-- = keyoff[-1]; - } - size = bk->len; - *offp-- = (int32_t)((inp[indx] - HOFFSET(pg)) - + (dp - dbuf) + SSZA(BKEYDATA, data)); - } - *offp-- = (int32_t)size; - first = 0; - if (no_dup) - break; -contin: - indx++; - if (opd->dbtype == DB_RECNO) - cp->recno++; - } while (indx < NUM_ENT(pg)); - if (no_dup) - break; - cp->indx = indx; - - } while (ret == 0); - - /* Return the updated information. */ - *spacep = space; - *offpp = offp; - *dpp = np; - - /* - * If we ran out of space back up the pointer. - * If we did not return any dups or reached the end, close the opd. - */ - if (ret == DB_BUFFER_SMALL) { - if (opd->dbtype == DB_RECNO) { - if (--cp->recno == 0) - goto close_opd; - } else if (indx != 0) - cp->indx--; - else { - t_ret = __bam_c_prev(opd); - if (t_ret == DB_NOTFOUND) - goto close_opd; - if (t_ret != 0) - ret = t_ret; - } - } else if (keyoff == NULL && ret == DB_NOTFOUND) { - cp->indx--; - if (opd->dbtype == DB_RECNO) - --cp->recno; - } else if (indx == 0 || ret == DB_NOTFOUND) { -close_opd: - if (ret == DB_NOTFOUND) - ret = 0; - if ((t_ret = __db_c_close(opd)) != 0 && ret == 0) - ret = t_ret; - ((BTREE_CURSOR *)dbc->internal)->opd = NULL; - } - if (ret == DB_NOTFOUND) - ret = 0; - - return (ret); -} - -/* - * __bam_getbothc -- - * Search for a matching data item on a join. - */ -static int -__bam_getbothc(dbc, data) - DBC *dbc; - DBT *data; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_MPOOLFILE *mpf; - int cmp, exact, ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Acquire the current page. We have at least a read-lock - * already. The caller may have set DB_RMW asking for a - * write lock, but upgrading to a write lock has no better - * chance of succeeding now instead of later, so don't try. - */ - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &cp->page)) != 0) - return (ret); - - /* - * An off-page duplicate cursor. Search the remaining duplicates - * for one which matches (do a normal btree search, then verify - * that the retrieved record is greater than the original one). - */ - if (F_ISSET(dbc, DBC_OPD)) { - /* - * Check to make sure the desired item comes strictly after - * the current position; if it doesn't, return DB_NOTFOUND. - */ - if ((ret = __bam_cmp(dbp, data, cp->page, cp->indx, - dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare, - &cmp)) != 0) - return (ret); - - if (cmp <= 0) - return (DB_NOTFOUND); - - /* Discard the current page, we're going to do a full search. */ - if ((ret = __memp_fput(mpf, cp->page, 0)) != 0) - return (ret); - cp->page = NULL; - - return (__bam_c_search(dbc, - PGNO_INVALID, data, DB_GET_BOTH, &exact)); - } - - /* - * We're doing a DBC->c_get(DB_GET_BOTHC) and we're already searching - * a set of on-page duplicates (either sorted or unsorted). Continue - * a linear search from after the current position. - * - * (Note that we could have just finished a "set" of one duplicate, - * i.e. not a duplicate at all, but the following check will always - * return DB_NOTFOUND in this case, which is the desired behavior.) - */ - if (cp->indx + P_INDX >= NUM_ENT(cp->page) || - !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX)) - return (DB_NOTFOUND); - cp->indx += P_INDX; - - return (__bam_getboth_finddatum(dbc, data, DB_GET_BOTH)); -} - -/* - * __bam_getboth_finddatum -- - * Find a matching on-page data item. - */ -static int -__bam_getboth_finddatum(dbc, data, flags) - DBC *dbc; - DBT *data; - u_int32_t flags; -{ - BTREE_CURSOR *cp; - DB *dbp; - db_indx_t base, lim, top; - int cmp, ret; - - COMPQUIET(cmp, 0); - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Called (sometimes indirectly) from DBC->get to search on-page data - * item(s) for a matching value. If the original flag was DB_GET_BOTH - * or DB_GET_BOTH_RANGE, the cursor is set to the first undeleted data - * item for the key. If the original flag was DB_GET_BOTHC, the cursor - * argument is set to the first data item we can potentially return. - * In both cases, there may or may not be additional duplicate data - * items to search. - * - * If the duplicates are not sorted, do a linear search. - */ - if (dbp->dup_compare == NULL) { - for (;; cp->indx += P_INDX) { - if (!IS_CUR_DELETED(dbc) && - (ret = __bam_cmp(dbp, data, cp->page, - cp->indx + O_INDX, __bam_defcmp, &cmp)) != 0) - return (ret); - if (cmp == 0) - return (0); - - if (cp->indx + P_INDX >= NUM_ENT(cp->page) || - !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX)) - break; - } - return (DB_NOTFOUND); - } - - /* - * If the duplicates are sorted, do a binary search. The reason for - * this is that large pages and small key/data pairs result in large - * numbers of on-page duplicates before they get pushed off-page. - * - * Find the top and bottom of the duplicate set. Binary search - * requires at least two items, don't loop if there's only one. - */ - for (base = top = cp->indx; top < NUM_ENT(cp->page); top += P_INDX) - if (!IS_DUPLICATE(dbc, cp->indx, top)) - break; - if (base == (top - P_INDX)) { - if ((ret = __bam_cmp(dbp, data, - cp->page, cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0) - return (ret); - return (cmp == 0 || - (cmp < 0 && flags == DB_GET_BOTH_RANGE) ? 0 : DB_NOTFOUND); - } - - for (lim = (top - base) / (db_indx_t)P_INDX; lim != 0; lim >>= 1) { - cp->indx = base + ((lim >> 1) * P_INDX); - if ((ret = __bam_cmp(dbp, data, cp->page, - cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0) - return (ret); - if (cmp == 0) { - /* - * XXX - * No duplicate duplicates in sorted duplicate sets, - * so there can be only one. - */ - if (!IS_CUR_DELETED(dbc)) - return (0); - break; - } - if (cmp > 0) { - base = cp->indx + P_INDX; - --lim; - } - } - - /* No match found; if we're looking for an exact match, we're done. */ - if (flags == DB_GET_BOTH) - return (DB_NOTFOUND); - - /* - * Base is the smallest index greater than the data item, may be zero - * or a last + O_INDX index, and may be deleted. Find an undeleted - * item. - */ - cp->indx = base; - while (cp->indx < top && IS_CUR_DELETED(dbc)) - cp->indx += P_INDX; - return (cp->indx < top ? 0 : DB_NOTFOUND); -} - -/* - * __bam_c_put -- - * Put using a cursor. - */ -static int -__bam_c_put(dbc, key, data, flags, pgnop) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; - db_pgno_t *pgnop; -{ - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - DBT dbt; - DB_MPOOLFILE *mpf; - db_pgno_t root_pgno; - u_int32_t iiop; - int cmp, exact, own, ret, stack; - void *arg; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - root_pgno = cp->root; - -split: ret = stack = 0; - switch (flags) { - case DB_CURRENT: - if (F_ISSET(cp, C_DELETED)) - return (DB_NOTFOUND); - /* FALLTHROUGH */ - - case DB_AFTER: - case DB_BEFORE: - iiop = flags; - own = 1; - - /* Acquire the current page with a write lock. */ - ACQUIRE_WRITE_LOCK(dbc, ret); - if (ret != 0) - goto err; - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &cp->page)) != 0) - goto err; - break; - case DB_KEYFIRST: - case DB_KEYLAST: - case DB_NODUPDATA: - own = 0; - /* - * Searching off-page, sorted duplicate tree: do a tree search - * for the correct item; __bam_c_search returns the smallest - * slot greater than the key, use it. - * - * See comment below regarding where we can start the search. - */ - if (F_ISSET(dbc, DBC_OPD)) { - if ((ret = __bam_c_search(dbc, - F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno, - data, flags, &exact)) != 0) - goto err; - stack = 1; - - /* Disallow "sorted" duplicate duplicates. */ - if (exact) { - if (IS_DELETED(dbp, cp->page, cp->indx)) { - iiop = DB_CURRENT; - break; - } - ret = __db_duperr(dbp, flags); - goto err; - } - iiop = DB_BEFORE; - break; - } - - /* - * Searching a btree. - * - * If we've done a split, we can start the search from the - * parent of the split page, which __bam_split returned - * for us in root_pgno, unless we're in a Btree with record - * numbering. In that case, we'll need the true root page - * in order to adjust the record count. - */ - if ((ret = __bam_c_search(dbc, - F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno, key, - flags == DB_KEYFIRST || dbp->dup_compare != NULL ? - DB_KEYFIRST : DB_KEYLAST, &exact)) != 0) - goto err; - stack = 1; - - /* - * If we don't have an exact match, __bam_c_search returned - * the smallest slot greater than the key, use it. - */ - if (!exact) { - iiop = DB_KEYFIRST; - break; - } - - /* - * If duplicates aren't supported, replace the current item. - * (If implementing the DB->put function, our caller already - * checked the DB_NOOVERWRITE flag.) - */ - if (!F_ISSET(dbp, DB_AM_DUP)) { - iiop = DB_CURRENT; - break; - } - - /* - * If we find a matching entry, it may be an off-page duplicate - * tree. Return the page number to our caller, we need a new - * cursor. - */ - if (pgnop != NULL && __bam_isopd(dbc, pgnop)) - goto done; - - /* If the duplicates aren't sorted, move to the right slot. */ - if (dbp->dup_compare == NULL) { - if (flags == DB_KEYFIRST) - iiop = DB_BEFORE; - else - for (;; cp->indx += P_INDX) - if (cp->indx + P_INDX >= - NUM_ENT(cp->page) || - !IS_DUPLICATE(dbc, cp->indx, - cp->indx + P_INDX)) { - iiop = DB_AFTER; - break; - } - break; - } - - /* - * We know that we're looking at the first of a set of sorted - * on-page duplicates. Walk the list to find the right slot. - */ - for (;; cp->indx += P_INDX) { - if ((ret = __bam_cmp(dbp, data, cp->page, - cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0) - goto err; - if (cmp < 0) { - iiop = DB_BEFORE; - break; - } - - /* Disallow "sorted" duplicate duplicates. */ - if (cmp == 0) { - if (IS_DELETED(dbp, cp->page, cp->indx)) { - iiop = DB_CURRENT; - break; - } - ret = __db_duperr(dbp, flags); - goto err; - } - - if (cp->indx + P_INDX >= NUM_ENT(cp->page) || - P_INP(dbp, ((PAGE *)cp->page))[cp->indx] != - P_INP(dbp, ((PAGE *)cp->page))[cp->indx + P_INDX]) { - iiop = DB_AFTER; - break; - } - } - break; - default: - ret = __db_unknown_flag(dbp->dbenv, "__bam_c_put", flags); - goto err; - } - - switch (ret = __bam_iitem(dbc, key, data, iiop, 0)) { - case 0: - break; - case DB_NEEDSPLIT: - /* - * To split, we need a key for the page. Either use the key - * argument or get a copy of the key from the page. - */ - if (flags == DB_AFTER || - flags == DB_BEFORE || flags == DB_CURRENT) { - memset(&dbt, 0, sizeof(DBT)); - if ((ret = __db_ret(dbp, cp->page, 0, &dbt, - &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0) - goto err; - arg = &dbt; - } else - arg = F_ISSET(dbc, DBC_OPD) ? data : key; - - /* - * Discard any locks and pinned pages (the locks are discarded - * even if we're running with transactions, as they lock pages - * that we're sorry we ever acquired). If stack is set and the - * cursor entries are valid, they point to the same entries as - * the stack, don't free them twice. - */ - if (stack) - ret = __bam_stkrel(dbc, STK_CLRDBC | STK_NOLOCK); - else - DISCARD_CUR(dbc, ret); - if (ret != 0) - goto err; - - /* - * SR [#6059] - * If we do not own a lock on the page any more, then clear the - * cursor so we don't point at it. Even if we call __bam_stkrel - * above we still may have entered the routine with the cursor - * positioned to a particular record. This is in the case - * where C_RECNUM is set. - */ - if (own == 0) { - cp->pgno = PGNO_INVALID; - cp->indx = 0; - } - - /* Split the tree. */ - if ((ret = __bam_split(dbc, arg, &root_pgno)) != 0) - return (ret); - - goto split; - default: - goto err; - } - -err: -done: /* - * If we inserted a key into the first or last slot of the tree, - * remember where it was so we can do it more quickly next time. - * If the tree has record numbers, we need a complete stack so - * that we can adjust the record counts, so skipping the tree search - * isn't possible. For subdatabases we need to be careful that the - * page does not move from one db to another, so we track its LSN. - * - * If there are duplicates and we are inserting into the last slot, - * the cursor will point _to_ the last item, not after it, which - * is why we subtract P_INDX below. - */ - - t = dbp->bt_internal; - if (ret == 0 && TYPE(cp->page) == P_LBTREE && - (flags == DB_KEYFIRST || flags == DB_KEYLAST) && - !F_ISSET(cp, C_RECNUM) && - (!F_ISSET(dbp, DB_AM_SUBDB) || - (LOGGING_ON(dbp->dbenv) && !F_ISSET(dbp, DB_AM_NOT_DURABLE))) && - ((NEXT_PGNO(cp->page) == PGNO_INVALID && - cp->indx >= NUM_ENT(cp->page) - P_INDX) || - (PREV_PGNO(cp->page) == PGNO_INVALID && cp->indx == 0))) { - t->bt_lpgno = cp->pgno; - if (F_ISSET(dbp, DB_AM_SUBDB)) - t->bt_llsn = LSN(cp->page); - } else - t->bt_lpgno = PGNO_INVALID; - /* - * Discard any pages pinned in the tree and their locks, except for - * the leaf page. Note, the leaf page participated in any stack we - * acquired, and so we have to adjust the stack as necessary. If - * there was only a single page on the stack, we don't have to free - * further stack pages. - */ - if (stack && BT_STK_POP(cp) != NULL) - (void)__bam_stkrel(dbc, 0); - - /* - * Regardless of whether we were successful or not, clear the delete - * flag. If we're successful, we either moved the cursor or the item - * is no longer deleted. If we're not successful, then we're just a - * copy, no need to have the flag set. - * - * We may have instantiated off-page duplicate cursors during the put, - * so clear the deleted bit from the off-page duplicate cursor as well. - */ - F_CLR(cp, C_DELETED); - if (cp->opd != NULL) { - cp = (BTREE_CURSOR *)cp->opd->internal; - F_CLR(cp, C_DELETED); - } - - return (ret); -} - -/* - * __bam_c_rget -- - * Return the record number for a cursor. - * - * PUBLIC: int __bam_c_rget __P((DBC *, DBT *)); - */ -int -__bam_c_rget(dbc, data) - DBC *dbc; - DBT *data; -{ - BTREE_CURSOR *cp; - DB *dbp; - DBT dbt; - DB_MPOOLFILE *mpf; - db_recno_t recno; - int exact, ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Get the page with the current item on it. - * Get a copy of the key. - * Release the page, making sure we don't release it twice. - */ - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &cp->page)) != 0) - return (ret); - memset(&dbt, 0, sizeof(DBT)); - if ((ret = __db_ret(dbp, cp->page, - cp->indx, &dbt, &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0) - goto err; - ret = __memp_fput(mpf, cp->page, 0); - cp->page = NULL; - if (ret != 0) - return (ret); - - if ((ret = __bam_search(dbc, PGNO_INVALID, &dbt, - F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND, - 1, &recno, &exact)) != 0) - goto err; - - ret = __db_retcopy(dbp->dbenv, data, - &recno, sizeof(recno), &dbc->rdata->data, &dbc->rdata->ulen); - - /* Release the stack. */ -err: if ((t_ret = __bam_stkrel(dbc, 0)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __bam_c_writelock -- - * Upgrade the cursor to a write lock. - */ -static int -__bam_c_writelock(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - int ret; - - cp = (BTREE_CURSOR *)dbc->internal; - - if (cp->lock_mode == DB_LOCK_WRITE) - return (0); - - /* - * When writing to an off-page duplicate tree, we need to have the - * appropriate page in the primary tree locked. The general DBC - * code calls us first with the primary cursor so we can acquire the - * appropriate lock. - */ - ACQUIRE_WRITE_LOCK(dbc, ret); - return (ret); -} - -/* - * __bam_c_next -- - * Move to the next record. - */ -static int -__bam_c_next(dbc, initial_move, deleted_okay) - DBC *dbc; - int initial_move, deleted_okay; -{ - BTREE_CURSOR *cp; - db_indx_t adjust; - db_lockmode_t lock_mode; - db_pgno_t pgno; - int ret; - - cp = (BTREE_CURSOR *)dbc->internal; - ret = 0; - - /* - * We're either moving through a page of duplicates or a btree leaf - * page. - * - * !!! - * This code handles empty pages and pages with only deleted entries. - */ - if (F_ISSET(dbc, DBC_OPD)) { - adjust = O_INDX; - lock_mode = DB_LOCK_NG; - } else { - adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX; - lock_mode = - F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ; - } - if (cp->page == NULL) { - ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret); - if (ret != 0) - return (ret); - } - - if (initial_move) - cp->indx += adjust; - - for (;;) { - /* - * If at the end of the page, move to a subsequent page. - * - * !!! - * Check for >= NUM_ENT. If the original search landed us on - * NUM_ENT, we may have incremented indx before the test. - */ - if (cp->indx >= NUM_ENT(cp->page)) { - if ((pgno - = NEXT_PGNO(cp->page)) == PGNO_INVALID) - return (DB_NOTFOUND); - - ACQUIRE_CUR(dbc, lock_mode, pgno, ret); - if (ret != 0) - return (ret); - cp->indx = 0; - continue; - } - if (!deleted_okay && IS_CUR_DELETED(dbc)) { - cp->indx += adjust; - continue; - } - break; - } - return (0); -} - -/* - * __bam_c_prev -- - * Move to the previous record. - */ -static int -__bam_c_prev(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - db_indx_t adjust; - db_lockmode_t lock_mode; - db_pgno_t pgno; - int ret; - - cp = (BTREE_CURSOR *)dbc->internal; - ret = 0; - - /* - * We're either moving through a page of duplicates or a btree leaf - * page. - * - * !!! - * This code handles empty pages and pages with only deleted entries. - */ - if (F_ISSET(dbc, DBC_OPD)) { - adjust = O_INDX; - lock_mode = DB_LOCK_NG; - } else { - adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX; - lock_mode = - F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ; - } - if (cp->page == NULL) { - ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret); - if (ret != 0) - return (ret); - } - - for (;;) { - /* If at the beginning of the page, move to a previous one. */ - if (cp->indx == 0) { - if ((pgno = - PREV_PGNO(cp->page)) == PGNO_INVALID) - return (DB_NOTFOUND); - - ACQUIRE_CUR(dbc, lock_mode, pgno, ret); - if (ret != 0) - return (ret); - - if ((cp->indx = NUM_ENT(cp->page)) == 0) - continue; - } - - /* Ignore deleted records. */ - cp->indx -= adjust; - if (IS_CUR_DELETED(dbc)) - continue; - - break; - } - return (0); -} - -/* - * __bam_c_search -- - * Move to a specified record. - */ -static int -__bam_c_search(dbc, root_pgno, key, flags, exactp) - DBC *dbc; - db_pgno_t root_pgno; - const DBT *key; - u_int32_t flags; - int *exactp; -{ - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - PAGE *h; - db_indx_t indx, *inp; - db_pgno_t bt_lpgno; - db_recno_t recno; - u_int32_t sflags; - int cmp, ret, t_ret; - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - t = dbp->bt_internal; - ret = 0; - - /* - * Find an entry in the database. Discard any lock we currently hold, - * we're going to search the tree. - */ - DISCARD_CUR(dbc, ret); - if (ret != 0) - return (ret); - - switch (flags) { - case DB_FIRST: - sflags = (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_MIN; - goto search; - case DB_LAST: - sflags = (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_MAX; - goto search; - case DB_SET_RECNO: - if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0) - return (ret); - sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT; - if ((ret = __bam_rsearch(dbc, &recno, sflags, 1, exactp)) != 0) - return (ret); - break; - case DB_SET: - case DB_GET_BOTH: - sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT; - goto search; - case DB_GET_BOTH_RANGE: - sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND); - goto search; - case DB_SET_RANGE: - sflags = - (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_DUPFIRST; - goto search; - case DB_KEYFIRST: - sflags = S_KEYFIRST; - goto fast_search; - case DB_KEYLAST: - case DB_NODUPDATA: - sflags = S_KEYLAST; -fast_search: /* - * If the application has a history of inserting into the first - * or last pages of the database, we check those pages first to - * avoid doing a full search. - */ - if (F_ISSET(dbc, DBC_OPD)) - goto search; - - /* - * !!! - * We do not mutex protect the t->bt_lpgno field, which means - * that it can only be used in an advisory manner. If we find - * page we can use, great. If we don't, we don't care, we do - * it the slow way instead. Regardless, copy it into a local - * variable, otherwise we might acquire a lock for a page and - * then read a different page because it changed underfoot. - */ - bt_lpgno = t->bt_lpgno; - - /* - * If the tree has no history of insertion, do it the slow way. - */ - if (bt_lpgno == PGNO_INVALID) - goto search; - - /* - * Lock and retrieve the page on which we last inserted. - * - * The page may not exist: if a transaction created the page - * and then aborted, the page might have been truncated from - * the end of the file. - */ - h = NULL; - ACQUIRE_CUR(dbc, DB_LOCK_WRITE, bt_lpgno, ret); - if (ret != 0) { - if (ret == DB_PAGE_NOTFOUND) - ret = 0; - goto fast_miss; - } - - h = cp->page; - inp = P_INP(dbp, h); - - /* - * It's okay if the page type isn't right or it's empty, it - * just means that the world changed. - */ - if (TYPE(h) != P_LBTREE || NUM_ENT(h) == 0) - goto fast_miss; - - /* Verify that this page cannot have moved to another db. */ - if (F_ISSET(dbp, DB_AM_SUBDB) && - log_compare(&t->bt_llsn, &LSN(h)) != 0) - goto fast_miss; - /* - * What we do here is test to see if we're at the beginning or - * end of the tree and if the new item sorts before/after the - * first/last page entry. We don't try and catch inserts into - * the middle of the tree (although we could, as long as there - * were two keys on the page and we saved both the index and - * the page number of the last insert). - */ - if (h->next_pgno == PGNO_INVALID) { - indx = NUM_ENT(h) - P_INDX; - if ((ret = __bam_cmp(dbp, - key, h, indx, t->bt_compare, &cmp)) != 0) - return (ret); - - if (cmp < 0) - goto try_begin; - if (cmp > 0) { - indx += P_INDX; - goto fast_hit; - } - - /* - * Found a duplicate. If doing DB_KEYLAST, we're at - * the correct position, otherwise, move to the first - * of the duplicates. If we're looking at off-page - * duplicates, duplicate duplicates aren't permitted, - * so we're done. - */ - if (flags == DB_KEYLAST) - goto fast_hit; - for (; - indx > 0 && inp[indx - P_INDX] == inp[indx]; - indx -= P_INDX) - ; - goto fast_hit; - } -try_begin: if (h->prev_pgno == PGNO_INVALID) { - indx = 0; - if ((ret = __bam_cmp(dbp, - key, h, indx, t->bt_compare, &cmp)) != 0) - return (ret); - - if (cmp > 0) - goto fast_miss; - if (cmp < 0) - goto fast_hit; - - /* - * Found a duplicate. If doing DB_KEYFIRST, we're at - * the correct position, otherwise, move to the last - * of the duplicates. If we're looking at off-page - * duplicates, duplicate duplicates aren't permitted, - * so we're done. - */ - if (flags == DB_KEYFIRST) - goto fast_hit; - for (; - indx < (db_indx_t)(NUM_ENT(h) - P_INDX) && - inp[indx] == inp[indx + P_INDX]; - indx += P_INDX) - ; - goto fast_hit; - } - goto fast_miss; - -fast_hit: /* Set the exact match flag, we may have found a duplicate. */ - *exactp = cmp == 0; - - /* - * Insert the entry in the stack. (Our caller is likely to - * call __bam_stkrel() after our return.) - */ - BT_STK_CLR(cp); - BT_STK_ENTER(dbp->dbenv, - cp, h, indx, cp->lock, cp->lock_mode, ret); - if (ret != 0) - return (ret); - break; - -fast_miss: /* - * This was not the right page, so we do not need to retain - * the lock even in the presence of transactions. - * - * This is also an error path, so ret may have been set. - */ - DISCARD_CUR(dbc, ret); - cp->pgno = PGNO_INVALID; - if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - -search: if ((ret = __bam_search(dbc, root_pgno, - key, sflags, 1, NULL, exactp)) != 0) - return (ret); - break; - default: - return (__db_unknown_flag(dbp->dbenv, "__bam_c_search", flags)); - } - /* Initialize the cursor from the stack. */ - cp->page = cp->csp->page; - cp->pgno = cp->csp->page->pgno; - cp->indx = cp->csp->indx; - cp->lock = cp->csp->lock; - cp->lock_mode = cp->csp->lock_mode; - - /* If on an empty page or a deleted record, move to the next one. */ - if (flags == DB_FIRST && - (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))) - if ((ret = __bam_c_next(dbc, 0, 0)) != 0) - return (ret); - if (flags == DB_LAST && - (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))) - if ((ret = __bam_c_prev(dbc)) != 0) - return (ret); - - return (0); -} - -/* - * __bam_c_physdel -- - * Physically remove an item from the page. - */ -static int -__bam_c_physdel(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - DB *dbp; - DBT key; - int delete_page, empty_page, exact, ret; - - dbp = dbc->dbp; - memset(&key, 0, sizeof(DBT)); - cp = (BTREE_CURSOR *)dbc->internal; - delete_page = empty_page = ret = 0; - - /* If the page is going to be emptied, consider deleting it. */ - delete_page = empty_page = - NUM_ENT(cp->page) == (TYPE(cp->page) == P_LBTREE ? 2 : 1); - - /* - * Check if the application turned off reverse splits. Applications - * can't turn off reverse splits in off-page duplicate trees, that - * space will never be reused unless the exact same key is specified. - */ - if (delete_page && - !F_ISSET(dbc, DBC_OPD) && F_ISSET(dbp, DB_AM_REVSPLITOFF)) - delete_page = 0; - - /* - * We never delete the last leaf page. (Not really true -- we delete - * the last leaf page of off-page duplicate trees, but that's handled - * by our caller, not down here.) - */ - if (delete_page && cp->pgno == cp->root) - delete_page = 0; - - /* - * To delete a leaf page other than an empty root page, we need a - * copy of a key from the page. Use the 0th page index since it's - * the last key the page held. - * - * !!! - * Note that because __bam_c_physdel is always called from a cursor - * close, it should be safe to use the cursor's own "my_rkey" memory - * to temporarily hold this key. We shouldn't own any returned-data - * memory of interest--if we do, we're in trouble anyway. - */ - if (delete_page) - if ((ret = __db_ret(dbp, cp->page, - 0, &key, &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0) - return (ret); - - /* - * Delete the items. If page isn't empty, we adjust the cursors. - * - * !!! - * The following operations to delete a page may deadlock. The easy - * scenario is if we're deleting an item because we're closing cursors - * because we've already deadlocked and want to call txn->abort. If - * we fail due to deadlock, we'll leave a locked, possibly empty page - * in the tree, which won't be empty long because we'll undo the delete - * when we undo the transaction's modifications. - * - * !!! - * Delete the key item first, otherwise the on-page duplicate checks - * in __bam_ditem() won't work! - */ - if (TYPE(cp->page) == P_LBTREE) { - if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0) - return (ret); - if (!empty_page) - if ((ret = __bam_ca_di(dbc, - PGNO(cp->page), cp->indx, -1)) != 0) - return (ret); - } - if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0) - return (ret); - - /* Clear the deleted flag, the item is gone. */ - F_CLR(cp, C_DELETED); - - if (!empty_page) - if ((ret = __bam_ca_di(dbc, PGNO(cp->page), cp->indx, -1)) != 0) - return (ret); - - /* If we're not going to try and delete the page, we're done. */ - if (!delete_page) - return (0); - - ret = __bam_search(dbc, PGNO_INVALID, &key, S_DEL, 0, NULL, &exact); - - /* - * If everything worked, delete the stack, otherwise, release the - * stack and page locks without further damage. - */ - if (ret == 0) - DISCARD_CUR(dbc, ret); - if (ret == 0) - ret = __bam_dpages(dbc, 1, 0); - else - (void)__bam_stkrel(dbc, 0); - - return (ret); -} - -/* - * __bam_c_getstack -- - * Acquire a full stack for a cursor. - */ -static int -__bam_c_getstack(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - DB *dbp; - DBT dbt; - DB_MPOOLFILE *mpf; - PAGE *h; - int exact, ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Get the page with the current item on it. The caller of this - * routine has to already hold a read lock on the page, so there - * is no additional lock to acquire. - */ - if ((ret = __memp_fget(mpf, &cp->pgno, 0, &h)) != 0) - return (ret); - - /* Get a copy of a key from the page. */ - memset(&dbt, 0, sizeof(DBT)); - if ((ret = __db_ret(dbp, - h, 0, &dbt, &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0) - goto err; - - /* Get a write-locked stack for the page. */ - exact = 0; - ret = __bam_search(dbc, PGNO_INVALID, - &dbt, S_KEYFIRST, 1, NULL, &exact); - -err: /* Discard the key and the page. */ - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __bam_isopd -- - * Return if the cursor references an off-page duplicate tree via its - * page number. - */ -static int -__bam_isopd(dbc, pgnop) - DBC *dbc; - db_pgno_t *pgnop; -{ - BOVERFLOW *bo; - - if (TYPE(dbc->internal->page) != P_LBTREE) - return (0); - - bo = GET_BOVERFLOW(dbc->dbp, - dbc->internal->page, dbc->internal->indx + O_INDX); - if (B_TYPE(bo->type) == B_DUPLICATE) { - *pgnop = bo->pgno; - return (1); - } - return (0); -} diff --git a/storage/bdb/btree/bt_delete.c b/storage/bdb/btree/bt_delete.c deleted file mode 100644 index 1e54687453f..00000000000 --- a/storage/bdb/btree/bt_delete.c +++ /dev/null @@ -1,643 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_delete.c,v 12.13 2005/10/20 18:14:59 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" - -/* - * __bam_ditem -- - * Delete one or more entries from a page. - * - * PUBLIC: int __bam_ditem __P((DBC *, PAGE *, u_int32_t)); - */ -int -__bam_ditem(dbc, h, indx) - DBC *dbc; - PAGE *h; - u_int32_t indx; -{ - BINTERNAL *bi; - BKEYDATA *bk; - DB *dbp; - DB_MPOOLFILE *mpf; - u_int32_t nbytes; - int ret; - db_indx_t *inp; - - dbp = dbc->dbp; - mpf = dbp->mpf; - inp = P_INP(dbp, h); - - switch (TYPE(h)) { - case P_IBTREE: - bi = GET_BINTERNAL(dbp, h, indx); - switch (B_TYPE(bi->type)) { - case B_DUPLICATE: - case B_KEYDATA: - nbytes = BINTERNAL_SIZE(bi->len); - break; - case B_OVERFLOW: - nbytes = BINTERNAL_SIZE(bi->len); - if ((ret = - __db_doff(dbc, ((BOVERFLOW *)bi->data)->pgno)) != 0) - return (ret); - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(h))); - } - break; - case P_IRECNO: - nbytes = RINTERNAL_SIZE; - break; - case P_LBTREE: - /* - * If it's a duplicate key, discard the index and don't touch - * the actual page item. - * - * !!! - * This works because no data item can have an index matching - * any other index so even if the data item is in a key "slot", - * it won't match any other index. - */ - if ((indx % 2) == 0) { - /* - * Check for a duplicate after us on the page. NOTE: - * we have to delete the key item before deleting the - * data item, otherwise the "indx + P_INDX" calculation - * won't work! - */ - if (indx + P_INDX < (u_int32_t)NUM_ENT(h) && - inp[indx] == inp[indx + P_INDX]) - return (__bam_adjindx(dbc, - h, indx, indx + O_INDX, 0)); - /* - * Check for a duplicate before us on the page. It - * doesn't matter if we delete the key item before or - * after the data item for the purposes of this one. - */ - if (indx > 0 && inp[indx] == inp[indx - P_INDX]) - return (__bam_adjindx(dbc, - h, indx, indx - P_INDX, 0)); - } - /* FALLTHROUGH */ - case P_LDUP: - case P_LRECNO: - bk = GET_BKEYDATA(dbp, h, indx); - switch (B_TYPE(bk->type)) { - case B_DUPLICATE: - nbytes = BOVERFLOW_SIZE; - break; - case B_OVERFLOW: - nbytes = BOVERFLOW_SIZE; - if ((ret = __db_doff( - dbc, (GET_BOVERFLOW(dbp, h, indx))->pgno)) != 0) - return (ret); - break; - case B_KEYDATA: - nbytes = BKEYDATA_SIZE(bk->len); - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(h))); - } - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(h))); - } - - /* Delete the item and mark the page dirty. */ - if ((ret = __db_ditem(dbc, h, indx, nbytes)) != 0) - return (ret); - if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0) - return (ret); - - return (0); -} - -/* - * __bam_adjindx -- - * Adjust an index on the page. - * - * PUBLIC: int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int)); - */ -int -__bam_adjindx(dbc, h, indx, indx_copy, is_insert) - DBC *dbc; - PAGE *h; - u_int32_t indx, indx_copy; - int is_insert; -{ - DB *dbp; - DB_MPOOLFILE *mpf; - db_indx_t copy, *inp; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - inp = P_INP(dbp, h); - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_adj_log(dbp, dbc->txn, &LSN(h), 0, - PGNO(h), &LSN(h), indx, indx_copy, (u_int32_t)is_insert)) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(h)); - - /* Shuffle the indices and mark the page dirty. */ - if (is_insert) { - copy = inp[indx_copy]; - if (indx != NUM_ENT(h)) - memmove(&inp[indx + O_INDX], &inp[indx], - sizeof(db_indx_t) * (NUM_ENT(h) - indx)); - inp[indx] = copy; - ++NUM_ENT(h); - } else { - --NUM_ENT(h); - if (indx != NUM_ENT(h)) - memmove(&inp[indx], &inp[indx + O_INDX], - sizeof(db_indx_t) * (NUM_ENT(h) - indx)); - } - if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0) - return (ret); - - return (0); -} - -/* - * __bam_dpages -- - * Delete a set of locked pages. - * - * PUBLIC: int __bam_dpages __P((DBC *, int, int)); - */ -int -__bam_dpages(dbc, use_top, update) - DBC *dbc; - int use_top; - int update; -{ - BTREE_CURSOR *cp; - BINTERNAL *bi; - DB *dbp; - DBT a, b; - DB_LOCK c_lock, p_lock; - DB_MPOOLFILE *mpf; - EPG *epg, *save_sp, *stack_epg; - PAGE *child, *parent; - db_indx_t nitems; - db_pgno_t pgno, root_pgno; - db_recno_t rcnt; - int done, ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - nitems = 0; - pgno = PGNO_INVALID; - - /* - * We have the entire stack of deletable pages locked. - * - * Btree calls us with the first page in the stack is to have a - * single item deleted, and the rest of the pages are to be removed. - * - * Recno always has a stack to the root and __bam_merge operations - * may have unneeded items in the sack. We find the lowest page - * in the stack that has more than one record in it and start there. - */ - ret = 0; - if (use_top) - stack_epg = cp->sp; - else - for (stack_epg = cp->csp; stack_epg > cp->sp; --stack_epg) - if (NUM_ENT(stack_epg->page) > 1) - break; - epg = stack_epg; - /* - * !!! - * There is an interesting deadlock situation here. We have to relink - * the leaf page chain around the leaf page being deleted. Consider - * a cursor walking through the leaf pages, that has the previous page - * read-locked and is waiting on a lock for the page we're deleting. - * It will deadlock here. Before we unlink the subtree, we relink the - * leaf page chain. - */ - if (LEVEL(cp->csp->page) == 1 && - (ret = __bam_relink(dbc, cp->csp->page, PGNO_INVALID)) != 0) - goto discard; - - /* - * Delete the last item that references the underlying pages that are - * to be deleted, and adjust cursors that reference that page. Then, - * save that page's page number and item count and release it. If - * the application isn't retaining locks because it's running without - * transactions, this lets the rest of the tree get back to business - * immediately. - */ - if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0) - goto discard; - if ((ret = __bam_ca_di(dbc, PGNO(epg->page), epg->indx, -1)) != 0) - goto discard; - - if (update && epg->indx == 0) { - save_sp = cp->csp; - cp->csp = epg; - ret = __bam_pupdate(dbc, epg->page); - cp->csp = save_sp; - if (ret != 0) - goto discard; - } - - pgno = PGNO(epg->page); - nitems = NUM_ENT(epg->page); - - ret = __memp_fput(mpf, epg->page, 0); - if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err_inc; - - /* Then, discard any pages that we don't care about. */ -discard: for (epg = cp->sp; epg < stack_epg; ++epg) { - if ((t_ret = __memp_fput(mpf, epg->page, 0)) != 0 && ret == 0) - ret = t_ret; - epg->page = NULL; - if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) - ret = t_ret; - } - if (ret != 0) - goto err; - - /* Free the rest of the pages in the stack. */ - while (++epg <= cp->csp) { - /* - * Delete page entries so they will be restored as part of - * recovery. We don't need to do cursor adjustment here as - * the pages are being emptied by definition and so cannot - * be referenced by a cursor. - */ - if (NUM_ENT(epg->page) != 0) { - DB_ASSERT(LEVEL(epg->page) != 1); - - if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0) - goto err; - /* - * Sheer paranoia: if we find any pages that aren't - * emptied by the delete, someone else added an item - * while we were walking the tree, and we discontinue - * the delete. Shouldn't be possible, but we check - * regardless. - */ - if (NUM_ENT(epg->page) != 0) - goto err; - } - - ret = __db_free(dbc, epg->page); - if (cp->page == epg->page) - cp->page = NULL; - epg->page = NULL; - if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err_inc; - } - - if (0) { -err_inc: ++epg; -err: for (; epg <= cp->csp; ++epg) { - if (epg->page != NULL) - (void)__memp_fput(mpf, epg->page, 0); - (void)__TLPUT(dbc, epg->lock); - } - BT_STK_CLR(cp); - return (ret); - } - BT_STK_CLR(cp); - - /* - * If we just deleted the next-to-last item from the root page, the - * tree can collapse one or more levels. While there remains only a - * single item on the root page, write lock the last page referenced - * by the root page and copy it over the root page. - */ - root_pgno = cp->root; - if (pgno != root_pgno || nitems != 1) - return (0); - - for (done = 0; !done;) { - /* Initialize. */ - parent = child = NULL; - LOCK_INIT(p_lock); - LOCK_INIT(c_lock); - - /* Lock the root. */ - pgno = root_pgno; - if ((ret = - __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &p_lock)) != 0) - goto stop; - if ((ret = __memp_fget(mpf, &pgno, 0, &parent)) != 0) - goto stop; - - if (NUM_ENT(parent) != 1) - goto stop; - - switch (TYPE(parent)) { - case P_IBTREE: - /* - * If this is overflow, then try to delete it. - * The child may or may not still point at it. - */ - bi = GET_BINTERNAL(dbp, parent, 0); - if (B_TYPE(bi->type) == B_OVERFLOW) - if ((ret = __db_doff(dbc, - ((BOVERFLOW *)bi->data)->pgno)) != 0) - goto stop; - pgno = bi->pgno; - break; - case P_IRECNO: - pgno = GET_RINTERNAL(dbp, parent, 0)->pgno; - break; - default: - goto stop; - } - - /* Lock the child page. */ - if ((ret = - __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &c_lock)) != 0) - goto stop; - if ((ret = __memp_fget(mpf, &pgno, 0, &child)) != 0) - goto stop; - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - memset(&a, 0, sizeof(a)); - a.data = child; - a.size = dbp->pgsize; - memset(&b, 0, sizeof(b)); - b.data = P_ENTRY(dbp, parent, 0); - b.size = TYPE(parent) == P_IRECNO ? RINTERNAL_SIZE : - BINTERNAL_SIZE(((BINTERNAL *)b.data)->len); - if ((ret = __bam_rsplit_log(dbp, dbc->txn, - &child->lsn, 0, PGNO(child), &a, PGNO(parent), - RE_NREC(parent), &b, &parent->lsn)) != 0) - goto stop; - } else - LSN_NOT_LOGGED(child->lsn); - - /* - * Make the switch. - * - * One fixup -- internal pages below the top level do not store - * a record count, so we have to preserve it if we're not - * converting to a leaf page. Note also that we are about to - * overwrite the parent page, including its LSN. This is OK - * because the log message we wrote describing this update - * stores its LSN on the child page. When the child is copied - * onto the parent, the correct LSN is copied into place. - */ - COMPQUIET(rcnt, 0); - if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL) - rcnt = RE_NREC(parent); - memcpy(parent, child, dbp->pgsize); - PGNO(parent) = root_pgno; - if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL) - RE_NREC_SET(parent, rcnt); - - /* Mark the pages dirty. */ - if ((ret = __memp_fset(mpf, parent, DB_MPOOL_DIRTY)) != 0) - goto stop; - if ((ret = __memp_fset(mpf, child, DB_MPOOL_DIRTY)) != 0) - goto stop; - - /* Adjust the cursors. */ - if ((ret = __bam_ca_rsplit(dbc, PGNO(child), root_pgno)) != 0) - goto stop; - - /* - * Free the page copied onto the root page and discard its - * lock. (The call to __db_free() discards our reference - * to the page.) - */ - if ((ret = __db_free(dbc, child)) != 0) { - child = NULL; - goto stop; - } - child = NULL; - - if (0) { -stop: done = 1; - } - if ((t_ret = __TLPUT(dbc, p_lock)) != 0 && ret == 0) - ret = t_ret; - if (parent != NULL && - (t_ret = __memp_fput(mpf, parent, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, c_lock)) != 0 && ret == 0) - ret = t_ret; - if (child != NULL && - (t_ret = __memp_fput(mpf, child, 0)) != 0 && ret == 0) - ret = t_ret; - } - - return (ret); -} - -/* - * __bam_relink -- - * Relink around a deleted page. - * - * PUBLIC: int __bam_relink __P((DBC *, PAGE *, db_pgno_t)); - */ -int -__bam_relink(dbc, pagep, new_pgno) - DBC *dbc; - PAGE *pagep; - db_pgno_t new_pgno; -{ - DB *dbp; - PAGE *np, *pp; - DB_LOCK npl, ppl; - DB_LSN *nlsnp, *plsnp, ret_lsn; - DB_MPOOLFILE *mpf; - int ret, t_ret; - - dbp = dbc->dbp; - np = pp = NULL; - LOCK_INIT(npl); - LOCK_INIT(ppl); - nlsnp = plsnp = NULL; - mpf = dbp->mpf; - ret = 0; - - /* - * Retrieve and lock the one/two pages. For a remove, we may need - * two pages (the before and after). For an add, we only need one - * because, the split took care of the prev. - */ - if (pagep->next_pgno != PGNO_INVALID) { - if ((ret = __db_lget(dbc, - 0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pagep->next_pgno, 0, &np)) != 0) { - ret = __db_pgerr(dbp, pagep->next_pgno, ret); - goto err; - } - nlsnp = &np->lsn; - } - if (pagep->prev_pgno != PGNO_INVALID) { - if ((ret = __db_lget(dbc, - 0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pagep->prev_pgno, 0, &pp)) != 0) { - ret = __db_pgerr(dbp, pagep->prev_pgno, ret); - goto err; - } - plsnp = &pp->lsn; - } - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_relink_log(dbp, dbc->txn, &ret_lsn, 0, - pagep->pgno, new_pgno, pagep->prev_pgno, plsnp, - pagep->next_pgno, nlsnp)) != 0) - goto err; - } else - LSN_NOT_LOGGED(ret_lsn); - if (np != NULL) - np->lsn = ret_lsn; - if (pp != NULL) - pp->lsn = ret_lsn; - - /* - * Modify and release the two pages. - */ - if (np != NULL) { - if (new_pgno == PGNO_INVALID) - np->prev_pgno = pagep->prev_pgno; - else - np->prev_pgno = new_pgno; - ret = __memp_fput(mpf, np, DB_MPOOL_DIRTY); - if ((t_ret = __TLPUT(dbc, npl)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - } - - if (pp != NULL) { - if (new_pgno == PGNO_INVALID) - pp->next_pgno = pagep->next_pgno; - else - pp->next_pgno = new_pgno; - ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY); - if ((t_ret = __TLPUT(dbc, ppl)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - } - return (0); - -err: if (np != NULL) - (void)__memp_fput(mpf, np, 0); - (void)__TLPUT(dbc, npl); - if (pp != NULL) - (void)__memp_fput(mpf, pp, 0); - (void)__TLPUT(dbc, ppl); - return (ret); -} - -/* - * __bam_pupdate -- - * Update parent key pointers up the tree. - * - * PUBLIC: int __bam_pupdate __P((DBC *, PAGE *)); - */ -int -__bam_pupdate(dbc, lpg) - DBC *dbc; - PAGE *lpg; -{ - BTREE_CURSOR *cp; - DB_ENV *dbenv; - EPG *epg; - int ret; - - dbenv = dbc->dbp->dbenv; - cp = (BTREE_CURSOR *)dbc->internal; - ret = 0; - - /* - * Update the parents up the tree. __bam_pinsert only looks at the - * left child if is a leaf page, so we don't need to change it. We - * just do a delete and insert; a replace is possible but reusing - * pinsert is better. - */ - for (epg = &cp->csp[-1]; epg >= cp->sp; epg--) { - if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0) - return (ret); - epg->indx--; - if ((ret = __bam_pinsert(dbc, epg, - lpg, epg[1].page, BPI_NORECNUM)) != 0) { - if (ret == DB_NEEDSPLIT) { - /* This should not happen. */ - __db_err(dbenv, - "Not enough room in parent: %s: page %lu", - dbc->dbp->fname, (u_long)PGNO(epg->page)); - ret = __db_panic(dbenv, EINVAL); - } - return (ret); - } - } - return (ret); -} diff --git a/storage/bdb/btree/bt_method.c b/storage/bdb/btree/bt_method.c deleted file mode 100644 index c6bfa869fd1..00000000000 --- a/storage/bdb/btree/bt_method.c +++ /dev/null @@ -1,514 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_method.c,v 12.2 2005/06/16 20:20:16 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" -#include "dbinc/qam.h" - -static int __bam_set_bt_minkey __P((DB *, u_int32_t)); -static int __bam_set_bt_prefix - __P((DB *, size_t(*)(DB *, const DBT *, const DBT *))); -static int __ram_get_re_delim __P((DB *, int *)); -static int __ram_set_re_delim __P((DB *, int)); -static int __ram_set_re_len __P((DB *, u_int32_t)); -static int __ram_set_re_pad __P((DB *, int)); -static int __ram_get_re_source __P((DB *, const char **)); -static int __ram_set_re_source __P((DB *, const char *)); - -/* - * __bam_db_create -- - * Btree specific initialization of the DB structure. - * - * PUBLIC: int __bam_db_create __P((DB *)); - */ -int -__bam_db_create(dbp) - DB *dbp; -{ - BTREE *t; - int ret; - - /* Allocate and initialize the private btree structure. */ - if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(BTREE), &t)) != 0) - return (ret); - dbp->bt_internal = t; - - t->bt_minkey = DEFMINKEYPAGE; /* Btree */ - t->bt_compare = __bam_defcmp; - t->bt_prefix = __bam_defpfx; - - dbp->set_bt_compare = __bam_set_bt_compare; - dbp->get_bt_minkey = __bam_get_bt_minkey; - dbp->set_bt_minkey = __bam_set_bt_minkey; - dbp->set_bt_prefix = __bam_set_bt_prefix; - - t->re_pad = ' '; /* Recno */ - t->re_delim = '\n'; - t->re_eof = 1; - - dbp->get_re_delim = __ram_get_re_delim; - dbp->set_re_delim = __ram_set_re_delim; - dbp->get_re_len = __ram_get_re_len; - dbp->set_re_len = __ram_set_re_len; - dbp->get_re_pad = __ram_get_re_pad; - dbp->set_re_pad = __ram_set_re_pad; - dbp->get_re_source = __ram_get_re_source; - dbp->set_re_source = __ram_set_re_source; - - return (0); -} - -/* - * __bam_db_close -- - * Btree specific discard of the DB structure. - * - * PUBLIC: int __bam_db_close __P((DB *)); - */ -int -__bam_db_close(dbp) - DB *dbp; -{ - BTREE *t; - - if ((t = dbp->bt_internal) == NULL) - return (0); - /* Recno */ - /* Close any backing source file descriptor. */ - if (t->re_fp != NULL) - (void)fclose(t->re_fp); - - /* Free any backing source file name. */ - if (t->re_source != NULL) - __os_free(dbp->dbenv, t->re_source); - - __os_free(dbp->dbenv, t); - dbp->bt_internal = NULL; - - return (0); -} - -/* - * __bam_map_flags -- - * Map Btree specific flags from public to the internal values. - * - * PUBLIC: void __bam_map_flags __P((DB *, u_int32_t *, u_int32_t *)); - */ -void -__bam_map_flags(dbp, inflagsp, outflagsp) - DB *dbp; - u_int32_t *inflagsp, *outflagsp; -{ - COMPQUIET(dbp, NULL); - - if (FLD_ISSET(*inflagsp, DB_DUP)) { - FLD_SET(*outflagsp, DB_AM_DUP); - FLD_CLR(*inflagsp, DB_DUP); - } - if (FLD_ISSET(*inflagsp, DB_DUPSORT)) { - FLD_SET(*outflagsp, DB_AM_DUP | DB_AM_DUPSORT); - FLD_CLR(*inflagsp, DB_DUPSORT); - } - if (FLD_ISSET(*inflagsp, DB_RECNUM)) { - FLD_SET(*outflagsp, DB_AM_RECNUM); - FLD_CLR(*inflagsp, DB_RECNUM); - } - if (FLD_ISSET(*inflagsp, DB_REVSPLITOFF)) { - FLD_SET(*outflagsp, DB_AM_REVSPLITOFF); - FLD_CLR(*inflagsp, DB_REVSPLITOFF); - } -} - -/* - * __bam_set_flags -- - * Set Btree specific flags. - * - * PUBLIC: int __bam_set_flags __P((DB *, u_int32_t *flagsp)); - */ -int -__bam_set_flags(dbp, flagsp) - DB *dbp; - u_int32_t *flagsp; -{ - u_int32_t flags; - - flags = *flagsp; - if (LF_ISSET(DB_DUP | DB_DUPSORT | DB_RECNUM | DB_REVSPLITOFF)) - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags"); - - /* - * The DB_DUP and DB_DUPSORT flags are shared by the Hash - * and Btree access methods. - */ - if (LF_ISSET(DB_DUP | DB_DUPSORT)) - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH); - - if (LF_ISSET(DB_RECNUM | DB_REVSPLITOFF)) - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - - /* DB_DUP/DB_DUPSORT is incompatible with DB_RECNUM. */ - if (LF_ISSET(DB_DUP | DB_DUPSORT) && F_ISSET(dbp, DB_AM_RECNUM)) - goto incompat; - - /* DB_RECNUM is incompatible with DB_DUP/DB_DUPSORT. */ - if (LF_ISSET(DB_RECNUM) && F_ISSET(dbp, DB_AM_DUP)) - goto incompat; - - if (LF_ISSET(DB_DUPSORT) && dbp->dup_compare == NULL) - dbp->dup_compare = __bam_defcmp; - - __bam_map_flags(dbp, flagsp, &dbp->flags); - return (0); - -incompat: - return (__db_ferr(dbp->dbenv, "DB->set_flags", 1)); -} - -/* - * __bam_set_bt_compare -- - * Set the comparison function. - * - * PUBLIC: int __bam_set_bt_compare - * PUBLIC: __P((DB *, int (*)(DB *, const DBT *, const DBT *))); - */ -int -__bam_set_bt_compare(dbp, func) - DB *dbp; - int (*func) __P((DB *, const DBT *, const DBT *)); -{ - BTREE *t; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_compare"); - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - - t = dbp->bt_internal; - - /* - * Can't default the prefix routine if the user supplies a comparison - * routine; shortening the keys can break their comparison algorithm. - */ - t->bt_compare = func; - if (t->bt_prefix == __bam_defpfx) - t->bt_prefix = NULL; - - return (0); -} - -/* - * __db_get_bt_minkey -- - * Get the minimum keys per page. - * - * PUBLIC: int __bam_get_bt_minkey __P((DB *, u_int32_t *)); - */ -int -__bam_get_bt_minkey(dbp, bt_minkeyp) - DB *dbp; - u_int32_t *bt_minkeyp; -{ - BTREE *t; - - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - - t = dbp->bt_internal; - *bt_minkeyp = t->bt_minkey; - return (0); -} - -/* - * __bam_set_bt_minkey -- - * Set the minimum keys per page. - */ -static int -__bam_set_bt_minkey(dbp, bt_minkey) - DB *dbp; - u_int32_t bt_minkey; -{ - BTREE *t; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_minkey"); - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - - t = dbp->bt_internal; - - if (bt_minkey < 2) { - __db_err(dbp->dbenv, "minimum bt_minkey value is 2"); - return (EINVAL); - } - - t->bt_minkey = bt_minkey; - return (0); -} - -/* - * __bam_set_bt_prefix -- - * Set the prefix function. - */ -static int -__bam_set_bt_prefix(dbp, func) - DB *dbp; - size_t (*func) __P((DB *, const DBT *, const DBT *)); -{ - BTREE *t; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_prefix"); - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - - t = dbp->bt_internal; - - t->bt_prefix = func; - return (0); -} - -/* - * __ram_map_flags -- - * Map Recno specific flags from public to the internal values. - * - * PUBLIC: void __ram_map_flags __P((DB *, u_int32_t *, u_int32_t *)); - */ -void -__ram_map_flags(dbp, inflagsp, outflagsp) - DB *dbp; - u_int32_t *inflagsp, *outflagsp; -{ - COMPQUIET(dbp, NULL); - - if (FLD_ISSET(*inflagsp, DB_RENUMBER)) { - FLD_SET(*outflagsp, DB_AM_RENUMBER); - FLD_CLR(*inflagsp, DB_RENUMBER); - } - if (FLD_ISSET(*inflagsp, DB_SNAPSHOT)) { - FLD_SET(*outflagsp, DB_AM_SNAPSHOT); - FLD_CLR(*inflagsp, DB_SNAPSHOT); - } -} - -/* - * __ram_set_flags -- - * Set Recno specific flags. - * - * PUBLIC: int __ram_set_flags __P((DB *, u_int32_t *flagsp)); - */ -int -__ram_set_flags(dbp, flagsp) - DB *dbp; - u_int32_t *flagsp; -{ - u_int32_t flags; - - flags = *flagsp; - if (LF_ISSET(DB_RENUMBER | DB_SNAPSHOT)) { - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags"); - DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO); - } - - __ram_map_flags(dbp, flagsp, &dbp->flags); - return (0); -} - -/* - * __db_get_re_delim -- - * Get the variable-length input record delimiter. - */ -static int -__ram_get_re_delim(dbp, re_delimp) - DB *dbp; - int *re_delimp; -{ - BTREE *t; - - DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO); - t = dbp->bt_internal; - *re_delimp = t->re_delim; - return (0); -} - -/* - * __ram_set_re_delim -- - * Set the variable-length input record delimiter. - */ -static int -__ram_set_re_delim(dbp, re_delim) - DB *dbp; - int re_delim; -{ - BTREE *t; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_delim"); - DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO); - - t = dbp->bt_internal; - - t->re_delim = re_delim; - F_SET(dbp, DB_AM_DELIMITER); - - return (0); -} - -/* - * __db_get_re_len -- - * Get the variable-length input record length. - * - * PUBLIC: int __ram_get_re_len __P((DB *, u_int32_t *)); - */ -int -__ram_get_re_len(dbp, re_lenp) - DB *dbp; - u_int32_t *re_lenp; -{ - BTREE *t; - QUEUE *q; - - DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - - /* - * This has to work for all access methods, before or after opening the - * database. When the record length is set with __ram_set_re_len, the - * value in both the BTREE and QUEUE structs will be correct. - * Otherwise, this only makes sense after the database in opened, in - * which case we know the type. - */ - if (dbp->type == DB_QUEUE) { - q = dbp->q_internal; - *re_lenp = q->re_len; - } else { - t = dbp->bt_internal; - *re_lenp = t->re_len; - } - - return (0); -} - -/* - * __ram_set_re_len -- - * Set the variable-length input record length. - */ -static int -__ram_set_re_len(dbp, re_len) - DB *dbp; - u_int32_t re_len; -{ - BTREE *t; - QUEUE *q; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_len"); - DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - - t = dbp->bt_internal; - t->re_len = re_len; - - q = dbp->q_internal; - q->re_len = re_len; - - F_SET(dbp, DB_AM_FIXEDLEN); - - return (0); -} - -/* - * __db_get_re_pad -- - * Get the fixed-length record pad character. - * - * PUBLIC: int __ram_get_re_pad __P((DB *, int *)); - */ -int -__ram_get_re_pad(dbp, re_padp) - DB *dbp; - int *re_padp; -{ - BTREE *t; - QUEUE *q; - - DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - - /* - * This has to work for all access methods, before or after opening the - * database. When the record length is set with __ram_set_re_pad, the - * value in both the BTREE and QUEUE structs will be correct. - * Otherwise, this only makes sense after the database in opened, in - * which case we know the type. - */ - if (dbp->type == DB_QUEUE) { - q = dbp->q_internal; - *re_padp = q->re_pad; - } else { - t = dbp->bt_internal; - *re_padp = t->re_pad; - } - - return (0); -} - -/* - * __ram_set_re_pad -- - * Set the fixed-length record pad character. - */ -static int -__ram_set_re_pad(dbp, re_pad) - DB *dbp; - int re_pad; -{ - BTREE *t; - QUEUE *q; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_pad"); - DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - - t = dbp->bt_internal; - t->re_pad = re_pad; - - q = dbp->q_internal; - q->re_pad = re_pad; - - F_SET(dbp, DB_AM_PAD); - - return (0); -} - -/* - * __db_get_re_source -- - * Get the backing source file name. - */ -static int -__ram_get_re_source(dbp, re_sourcep) - DB *dbp; - const char **re_sourcep; -{ - BTREE *t; - - DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO); - - t = dbp->bt_internal; - *re_sourcep = t->re_source; - return (0); -} - -/* - * __ram_set_re_source -- - * Set the backing source file name. - */ -static int -__ram_set_re_source(dbp, re_source) - DB *dbp; - const char *re_source; -{ - BTREE *t; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_source"); - DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO); - - t = dbp->bt_internal; - - return (__os_strdup(dbp->dbenv, re_source, &t->re_source)); -} diff --git a/storage/bdb/btree/bt_open.c b/storage/bdb/btree/bt_open.c deleted file mode 100644 index d1fcaa76597..00000000000 --- a/storage/bdb/btree/bt_open.c +++ /dev/null @@ -1,607 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_open.c,v 12.5 2005/09/28 17:44:17 margo Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/crypto.h" -#include "dbinc/db_page.h" -#include "dbinc/db_swap.h" -#include "dbinc/btree.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/fop.h" - -static void __bam_init_meta __P((DB *, BTMETA *, db_pgno_t, DB_LSN *)); - -/* - * __bam_open -- - * Open a btree. - * - * PUBLIC: int __bam_open __P((DB *, - * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t)); - */ -int -__bam_open(dbp, txn, name, base_pgno, flags) - DB *dbp; - DB_TXN *txn; - const char *name; - db_pgno_t base_pgno; - u_int32_t flags; -{ - BTREE *t; - - COMPQUIET(name, NULL); - t = dbp->bt_internal; - - /* - * We don't permit the user to specify a prefix routine if they didn't - * also specify a comparison routine, they can't know enough about our - * comparison routine to get it right. - */ - if (t->bt_compare == __bam_defcmp && t->bt_prefix != __bam_defpfx) { - __db_err(dbp->dbenv, -"prefix comparison may not be specified for default comparison routine"); - return (EINVAL); - } - - /* - * Verify that the bt_minkey value specified won't cause the - * calculation of ovflsize to underflow [#2406] for this pagesize. - */ - if (B_MINKEY_TO_OVFLSIZE(dbp, t->bt_minkey, dbp->pgsize) > - B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) { - __db_err(dbp->dbenv, - "bt_minkey value of %lu too high for page size of %lu", - (u_long)t->bt_minkey, (u_long)dbp->pgsize); - return (EINVAL); - } - - /* Start up the tree. */ - return (__bam_read_root(dbp, txn, base_pgno, flags)); -} - -/* - * __bam_metachk -- - * - * PUBLIC: int __bam_metachk __P((DB *, const char *, BTMETA *)); - */ -int -__bam_metachk(dbp, name, btm) - DB *dbp; - const char *name; - BTMETA *btm; -{ - DB_ENV *dbenv; - u_int32_t vers; - int ret; - - dbenv = dbp->dbenv; - - /* - * At this point, all we know is that the magic number is for a Btree. - * Check the version, the database may be out of date. - */ - vers = btm->dbmeta.version; - if (F_ISSET(dbp, DB_AM_SWAP)) - M_32_SWAP(vers); - switch (vers) { - case 6: - case 7: - __db_err(dbenv, - "%s: btree version %lu requires a version upgrade", - name, (u_long)vers); - return (DB_OLD_VERSION); - case 8: - case 9: - break; - default: - __db_err(dbenv, - "%s: unsupported btree version: %lu", name, (u_long)vers); - return (EINVAL); - } - - /* Swap the page if we need to. */ - if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __bam_mswap((PAGE *)btm)) != 0) - return (ret); - - /* - * Check application info against metadata info, and set info, flags, - * and type based on metadata info. - */ - if ((ret = - __db_fchk(dbenv, "DB->open", btm->dbmeta.flags, BTM_MASK)) != 0) - return (ret); - - if (F_ISSET(&btm->dbmeta, BTM_RECNO)) { - if (dbp->type == DB_BTREE) - goto wrong_type; - dbp->type = DB_RECNO; - DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO); - } else { - if (dbp->type == DB_RECNO) - goto wrong_type; - dbp->type = DB_BTREE; - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - } - - if (F_ISSET(&btm->dbmeta, BTM_DUP)) - F_SET(dbp, DB_AM_DUP); - else - if (F_ISSET(dbp, DB_AM_DUP)) { - __db_err(dbenv, - "%s: DB_DUP specified to open method but not set in database", - name); - return (EINVAL); - } - - if (F_ISSET(&btm->dbmeta, BTM_RECNUM)) { - if (dbp->type != DB_BTREE) - goto wrong_type; - F_SET(dbp, DB_AM_RECNUM); - - if ((ret = __db_fcchk(dbenv, - "DB->open", dbp->flags, DB_AM_DUP, DB_AM_RECNUM)) != 0) - return (ret); - } else - if (F_ISSET(dbp, DB_AM_RECNUM)) { - __db_err(dbenv, - "%s: DB_RECNUM specified to open method but not set in database", - name); - return (EINVAL); - } - - if (F_ISSET(&btm->dbmeta, BTM_FIXEDLEN)) { - if (dbp->type != DB_RECNO) - goto wrong_type; - F_SET(dbp, DB_AM_FIXEDLEN); - } else - if (F_ISSET(dbp, DB_AM_FIXEDLEN)) { - __db_err(dbenv, - "%s: DB_FIXEDLEN specified to open method but not set in database", - name); - return (EINVAL); - } - - if (F_ISSET(&btm->dbmeta, BTM_RENUMBER)) { - if (dbp->type != DB_RECNO) - goto wrong_type; - F_SET(dbp, DB_AM_RENUMBER); - } else - if (F_ISSET(dbp, DB_AM_RENUMBER)) { - __db_err(dbenv, - "%s: DB_RENUMBER specified to open method but not set in database", - name); - return (EINVAL); - } - - if (F_ISSET(&btm->dbmeta, BTM_SUBDB)) - F_SET(dbp, DB_AM_SUBDB); - else - if (F_ISSET(dbp, DB_AM_SUBDB)) { - __db_err(dbenv, - "%s: multiple databases specified but not supported by file", - name); - return (EINVAL); - } - - if (F_ISSET(&btm->dbmeta, BTM_DUPSORT)) { - if (dbp->dup_compare == NULL) - dbp->dup_compare = __bam_defcmp; - F_SET(dbp, DB_AM_DUPSORT); - } else - if (dbp->dup_compare != NULL) { - __db_err(dbenv, - "%s: duplicate sort specified but not supported in database", - name); - return (EINVAL); - } - - /* Set the page size. */ - dbp->pgsize = btm->dbmeta.pagesize; - - /* Copy the file's ID. */ - memcpy(dbp->fileid, btm->dbmeta.uid, DB_FILE_ID_LEN); - - return (0); - -wrong_type: - if (dbp->type == DB_BTREE) - __db_err(dbenv, - "open method type is Btree, database type is Recno"); - else - __db_err(dbenv, - "open method type is Recno, database type is Btree"); - return (EINVAL); -} - -/* - * __bam_read_root -- - * Read the root page and check a tree. - * - * PUBLIC: int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t)); - */ -int -__bam_read_root(dbp, txn, base_pgno, flags) - DB *dbp; - DB_TXN *txn; - db_pgno_t base_pgno; - u_int32_t flags; -{ - BTMETA *meta; - BTREE *t; - DBC *dbc; - DB_LOCK metalock; - DB_MPOOLFILE *mpf; - int ret, t_ret; - - COMPQUIET(flags, 0); - meta = NULL; - t = dbp->bt_internal; - LOCK_INIT(metalock); - mpf = dbp->mpf; - ret = 0; - - /* Get a cursor. */ - if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) - return (ret); - - /* Get the metadata page. */ - if ((ret = - __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &base_pgno, 0, &meta)) != 0) - goto err; - - /* - * If the magic number is set, the tree has been created. Correct - * any fields that may not be right. Note, all of the local flags - * were set by DB->open. - * - * Otherwise, we'd better be in recovery or abort, in which case the - * metadata page will be created/initialized elsewhere. - */ - if (meta->dbmeta.magic == DB_BTREEMAGIC) { - t->bt_minkey = meta->minkey; - t->re_pad = (int)meta->re_pad; - t->re_len = meta->re_len; - - t->bt_meta = base_pgno; - t->bt_root = meta->root; - } else { - DB_ASSERT(IS_RECOVERING(dbp->dbenv) || - F_ISSET(dbp, DB_AM_RECOVER)); - } - - /* - * !!! - * If creating a subdatabase, we've already done an insert when - * we put the subdatabase's entry into the master database, so - * our last-page-inserted value is wrongly initialized for the - * master database, not the subdatabase we're creating. I'm not - * sure where the *right* place to clear this value is, it's not - * intuitively obvious that it belongs here. - */ - t->bt_lpgno = PGNO_INVALID; - -err: /* Put the metadata page back. */ - if (meta != NULL && - (t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __bam_init_meta -- - * - * Initialize a btree meta-data page. The following fields may need - * to be updated later: last_pgno, root. - */ -static void -__bam_init_meta(dbp, meta, pgno, lsnp) - DB *dbp; - BTMETA *meta; - db_pgno_t pgno; - DB_LSN *lsnp; -{ - BTREE *t; - - memset(meta, 0, sizeof(BTMETA)); - meta->dbmeta.lsn = *lsnp; - meta->dbmeta.pgno = pgno; - meta->dbmeta.magic = DB_BTREEMAGIC; - meta->dbmeta.version = DB_BTREEVERSION; - meta->dbmeta.pagesize = dbp->pgsize; - if (F_ISSET(dbp, DB_AM_CHKSUM)) - FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM); - if (F_ISSET(dbp, DB_AM_ENCRYPT)) { - meta->dbmeta.encrypt_alg = - ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg; - DB_ASSERT(meta->dbmeta.encrypt_alg != 0); - meta->crypto_magic = meta->dbmeta.magic; - } - meta->dbmeta.type = P_BTREEMETA; - meta->dbmeta.free = PGNO_INVALID; - meta->dbmeta.last_pgno = pgno; - if (F_ISSET(dbp, DB_AM_DUP)) - F_SET(&meta->dbmeta, BTM_DUP); - if (F_ISSET(dbp, DB_AM_FIXEDLEN)) - F_SET(&meta->dbmeta, BTM_FIXEDLEN); - if (F_ISSET(dbp, DB_AM_RECNUM)) - F_SET(&meta->dbmeta, BTM_RECNUM); - if (F_ISSET(dbp, DB_AM_RENUMBER)) - F_SET(&meta->dbmeta, BTM_RENUMBER); - if (F_ISSET(dbp, DB_AM_SUBDB)) - F_SET(&meta->dbmeta, BTM_SUBDB); - if (dbp->dup_compare != NULL) - F_SET(&meta->dbmeta, BTM_DUPSORT); - if (dbp->type == DB_RECNO) - F_SET(&meta->dbmeta, BTM_RECNO); - memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN); - - t = dbp->bt_internal; - meta->minkey = t->bt_minkey; - meta->re_len = t->re_len; - meta->re_pad = (u_int32_t)t->re_pad; -} - -/* - * __bam_new_file -- - * Create the necessary pages to begin a new database file. - * - * This code appears more complex than it is because of the two cases (named - * and unnamed). The way to read the code is that for each page being created, - * there are three parts: 1) a "get page" chunk (which either uses malloc'd - * memory or calls __memp_fget), 2) the initialization, and 3) the "put page" - * chunk which either does a fop write or an __memp_fput. - * - * PUBLIC: int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *)); - */ -int -__bam_new_file(dbp, txn, fhp, name) - DB *dbp; - DB_TXN *txn; - DB_FH *fhp; - const char *name; -{ - BTMETA *meta; - DB_ENV *dbenv; - DB_LSN lsn; - DB_MPOOLFILE *mpf; - DB_PGINFO pginfo; - DBT pdbt; - PAGE *root; - db_pgno_t pgno; - int ret, t_ret; - void *buf; - - dbenv = dbp->dbenv; - mpf = dbp->mpf; - root = NULL; - meta = NULL; - buf = NULL; - - if (F_ISSET(dbp, DB_AM_INMEM)) { - /* Build the meta-data page. */ - pgno = PGNO_BASE_MD; - if ((ret = - __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &meta)) != 0) - return (ret); - LSN_NOT_LOGGED(lsn); - __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); - meta->root = 1; - meta->dbmeta.last_pgno = 1; - if ((ret = - __db_log_page(dbp, txn, &lsn, pgno, (PAGE *)meta)) != 0) - goto err; - ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); - meta = NULL; - if (ret != 0) - goto err; - - /* Build the root page. */ - pgno = 1; - if ((ret = - __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0) - goto err; - P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID, - LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE); - LSN_NOT_LOGGED(root->lsn); - if ((ret = - __db_log_page(dbp, txn, &root->lsn, pgno, root)) != 0) - goto err; - ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY); - root = NULL; - if (ret != 0) - goto err; - } else { - memset(&pdbt, 0, sizeof(pdbt)); - - /* Build the meta-data page. */ - pginfo.db_pagesize = dbp->pgsize; - pginfo.flags = - F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP)); - pginfo.type = dbp->type; - pdbt.data = &pginfo; - pdbt.size = sizeof(pginfo); - if ((ret = __os_calloc(dbenv, 1, dbp->pgsize, &buf)) != 0) - return (ret); - meta = (BTMETA *)buf; - LSN_NOT_LOGGED(lsn); - __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); - meta->root = 1; - meta->dbmeta.last_pgno = 1; - if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0) - goto err; - if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp, - dbp->pgsize, 0, 0, buf, dbp->pgsize, 1, F_ISSET( - dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0) - goto err; - meta = NULL; - - /* Build the root page. */ -#ifdef DIAGNOSTIC - memset(buf, CLEAR_BYTE, dbp->pgsize); -#endif - root = (PAGE *)buf; - P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID, - LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE); - LSN_NOT_LOGGED(root->lsn); - if ((ret = __db_pgout(dbenv, root->pgno, root, &pdbt)) != 0) - goto err; - if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp, - dbp->pgsize, 1, 0, buf, dbp->pgsize, 1, F_ISSET( - dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0) - goto err; - root = NULL; - } - -err: if (buf != NULL) - __os_free(dbenv, buf); - else { - if (meta != NULL && - (t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) - ret = t_ret; - if (root != NULL && - (t_ret = __memp_fput(mpf, root, 0)) != 0 && ret == 0) - ret = t_ret; - } - return (ret); -} - -/* - * __bam_new_subdb -- - * Create a metadata page and a root page for a new btree. - * - * PUBLIC: int __bam_new_subdb __P((DB *, DB *, DB_TXN *)); - */ -int -__bam_new_subdb(mdbp, dbp, txn) - DB *mdbp, *dbp; - DB_TXN *txn; -{ - BTMETA *meta; - DBC *dbc; - DB_ENV *dbenv; - DB_LOCK metalock; - DB_LSN lsn; - DB_MPOOLFILE *mpf; - PAGE *root; - int ret, t_ret; - - dbenv = mdbp->dbenv; - mpf = mdbp->mpf; - dbc = NULL; - meta = NULL; - root = NULL; - - if ((ret = __db_cursor(mdbp, txn, - &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0) - return (ret); - - /* Get, and optionally create the metadata page. */ - if ((ret = __db_lget(dbc, - 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0) - goto err; - if ((ret = - __memp_fget(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0) - goto err; - - /* Build meta-data page. */ - lsn = meta->dbmeta.lsn; - __bam_init_meta(dbp, meta, dbp->meta_pgno, &lsn); - if ((ret = __db_log_page(mdbp, - txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0) - goto err; - - /* Create and initialize a root page. */ - if ((ret = __db_new(dbc, - dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE, &root)) != 0) - goto err; - root->level = LEAFLEVEL; - - if (DBENV_LOGGING(dbenv) && - (ret = __bam_root_log(mdbp, txn, &meta->dbmeta.lsn, 0, - meta->dbmeta.pgno, root->pgno, &meta->dbmeta.lsn)) != 0) - goto err; - - meta->root = root->pgno; - if ((ret = - __db_log_page(mdbp, txn, &root->lsn, root->pgno, root)) != 0) - goto err; - - /* Release the metadata and root pages. */ - if ((ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY)) != 0) - goto err; - meta = NULL; - if ((ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY)) != 0) - goto err; - root = NULL; -err: - if (meta != NULL) - if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) - ret = t_ret; - if (root != NULL) - if ((t_ret = __memp_fput(mpf, root, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - if (dbc != NULL) - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} diff --git a/storage/bdb/btree/bt_put.c b/storage/bdb/btree/bt_put.c deleted file mode 100644 index dd56f9d3523..00000000000 --- a/storage/bdb/btree/bt_put.c +++ /dev/null @@ -1,912 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_put.c,v 12.10 2005/10/20 18:57:00 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/mp.h" - -static int __bam_build - __P((DBC *, u_int32_t, DBT *, PAGE *, u_int32_t, u_int32_t)); -static int __bam_dup_check __P((DBC *, u_int32_t, - PAGE *, u_int32_t, u_int32_t, db_indx_t *)); -static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t, u_int32_t)); -static int __bam_ovput - __P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *)); -static u_int32_t - __bam_partsize __P((DB *, u_int32_t, DBT *, PAGE *, u_int32_t)); - -/* - * __bam_iitem -- - * Insert an item into the tree. - * - * PUBLIC: int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t)); - */ -int -__bam_iitem(dbc, key, data, op, flags) - DBC *dbc; - DBT *key, *data; - u_int32_t op, flags; -{ - DB_ENV *dbenv; - BKEYDATA *bk, bk_tmp; - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - DBT bk_hdr, tdbt; - DB_MPOOLFILE *mpf; - PAGE *h; - db_indx_t cnt, indx; - u_int32_t data_size, have_bytes, need_bytes, needed, pages, pagespace; - int cmp, bigkey, bigdata, dupadjust, padrec, replace, ret, was_deleted; - - COMPQUIET(bk, NULL); - COMPQUIET(cnt, 0); - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - t = dbp->bt_internal; - h = cp->page; - indx = cp->indx; - dupadjust = replace = was_deleted = 0; - - /* - * Fixed-length records with partial puts: it's an error to specify - * anything other simple overwrite. - */ - if (F_ISSET(dbp, DB_AM_FIXEDLEN) && - F_ISSET(data, DB_DBT_PARTIAL) && data->size != data->dlen) - return (__db_rec_repl(dbenv, data->size, data->dlen)); - - /* - * Figure out how much space the data will take, including if it's a - * partial record. - * - * Fixed-length records: it's an error to specify a record that's - * longer than the fixed-length, and we never require less than - * the fixed-length record size. - */ - data_size = F_ISSET(data, DB_DBT_PARTIAL) ? - __bam_partsize(dbp, op, data, h, indx) : data->size; - padrec = 0; - if (F_ISSET(dbp, DB_AM_FIXEDLEN)) { - if (data_size > t->re_len) - return (__db_rec_toobig(dbenv, data_size, t->re_len)); - - /* Records that are deleted anyway needn't be padded out. */ - if (!LF_ISSET(BI_DELETED) && data_size < t->re_len) { - padrec = 1; - data_size = t->re_len; - } - } - - /* - * Handle partial puts or short fixed-length records: build the - * real record. - */ - if (padrec || F_ISSET(data, DB_DBT_PARTIAL)) { - tdbt = *data; - if ((ret = - __bam_build(dbc, op, &tdbt, h, indx, data_size)) != 0) - return (ret); - data = &tdbt; - } - - /* - * If the user has specified a duplicate comparison function, return - * an error if DB_CURRENT was specified and the replacement data - * doesn't compare equal to the current data. This stops apps from - * screwing up the duplicate sort order. We have to do this after - * we build the real record so that we're comparing the real items. - */ - if (op == DB_CURRENT && dbp->dup_compare != NULL) { - if ((ret = __bam_cmp(dbp, data, h, - indx + (TYPE(h) == P_LBTREE ? O_INDX : 0), - dbp->dup_compare, &cmp)) != 0) - return (ret); - if (cmp != 0) { - __db_err(dbenv, - "Existing data sorts differently from put data"); - return (EINVAL); - } - } - - /* - * If the key or data item won't fit on a page, we'll have to store - * them on overflow pages. - */ - needed = 0; - bigdata = data_size > cp->ovflsize; - switch (op) { - case DB_KEYFIRST: - /* We're adding a new key and data pair. */ - bigkey = key->size > cp->ovflsize; - if (bigkey) - needed += BOVERFLOW_PSIZE; - else - needed += BKEYDATA_PSIZE(key->size); - if (bigdata) - needed += BOVERFLOW_PSIZE; - else - needed += BKEYDATA_PSIZE(data_size); - break; - case DB_AFTER: - case DB_BEFORE: - case DB_CURRENT: - /* - * We're either overwriting the data item of a key/data pair - * or we're creating a new on-page duplicate and only adding - * a data item. - * - * !!! - * We're not currently correcting for space reclaimed from - * already deleted items, but I don't think it's worth the - * complexity. - */ - bigkey = 0; - if (op == DB_CURRENT) { - bk = GET_BKEYDATA(dbp, h, - indx + (TYPE(h) == P_LBTREE ? O_INDX : 0)); - if (B_TYPE(bk->type) == B_KEYDATA) - have_bytes = BKEYDATA_PSIZE(bk->len); - else - have_bytes = BOVERFLOW_PSIZE; - need_bytes = 0; - } else { - have_bytes = 0; - need_bytes = sizeof(db_indx_t); - } - if (bigdata) - need_bytes += BOVERFLOW_PSIZE; - else - need_bytes += BKEYDATA_PSIZE(data_size); - - if (have_bytes < need_bytes) - needed += need_bytes - have_bytes; - break; - default: - return (__db_unknown_flag(dbenv, "DB->put", op)); - } - - /* Split the page if there's not enough room. */ - if (P_FREESPACE(dbp, h) < needed) - return (DB_NEEDSPLIT); - - /* - * Check to see if we will convert to off page duplicates -- if - * so, we'll need a page. - */ - if (F_ISSET(dbp, DB_AM_DUP) && - TYPE(h) == P_LBTREE && op != DB_KEYFIRST && - P_FREESPACE(dbp, h) - needed <= dbp->pgsize / 2 && - __bam_dup_check(dbc, op, h, indx, needed, &cnt)) { - pages = 1; - dupadjust = 1; - } else - pages = 0; - - /* - * If we are not using transactions and there is a page limit - * set on the file, then figure out if things will fit before - * taking action. - */ - if (dbc->txn == NULL && dbp->mpf->mfp->maxpgno != 0) { - pagespace = P_MAXSPACE(dbp, dbp->pgsize); - if (bigdata) - pages += ((data_size - 1) / pagespace) + 1; - if (bigkey) - pages += ((key->size - 1) / pagespace) + 1; - - if (pages > (dbp->mpf->mfp->maxpgno - dbp->mpf->mfp->last_pgno)) - return (__db_space_err(dbp)); - } - - /* - * The code breaks it up into five cases: - * - * 1. Insert a new key/data pair. - * 2. Append a new data item (a new duplicate). - * 3. Insert a new data item (a new duplicate). - * 4. Delete and re-add the data item (overflow item). - * 5. Overwrite the data item. - */ - switch (op) { - case DB_KEYFIRST: /* 1. Insert a new key/data pair. */ - if (bigkey) { - if ((ret = __bam_ovput(dbc, - B_OVERFLOW, PGNO_INVALID, h, indx, key)) != 0) - return (ret); - } else - if ((ret = __db_pitem(dbc, h, indx, - BKEYDATA_SIZE(key->size), NULL, key)) != 0) - return (ret); - - if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0) - return (ret); - ++indx; - break; - case DB_AFTER: /* 2. Append a new data item. */ - if (TYPE(h) == P_LBTREE) { - /* Copy the key for the duplicate and adjust cursors. */ - if ((ret = - __bam_adjindx(dbc, h, indx + P_INDX, indx, 1)) != 0) - return (ret); - if ((ret = - __bam_ca_di(dbc, PGNO(h), indx + P_INDX, 1)) != 0) - return (ret); - - indx += 3; - - cp->indx += 2; - } else { - ++indx; - cp->indx += 1; - } - break; - case DB_BEFORE: /* 3. Insert a new data item. */ - if (TYPE(h) == P_LBTREE) { - /* Copy the key for the duplicate and adjust cursors. */ - if ((ret = __bam_adjindx(dbc, h, indx, indx, 1)) != 0) - return (ret); - if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0) - return (ret); - - ++indx; - } - break; - case DB_CURRENT: - /* - * Clear the cursor's deleted flag. The problem is that if - * we deadlock or fail while deleting the overflow item or - * replacing the non-overflow item, a subsequent cursor close - * will try and remove the item because the cursor's delete - * flag is set. - */ - if ((ret = __bam_ca_delete(dbp, PGNO(h), indx, 0, NULL)) != 0) - return (ret); - - if (TYPE(h) == P_LBTREE) { - ++indx; - } - - /* - * In a Btree deleted records aren't counted (deleted records - * are counted in a Recno because all accesses are based on - * record number). If it's a Btree and it's a DB_CURRENT - * operation overwriting a previously deleted record, increment - * the record count. - */ - if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) - was_deleted = B_DISSET(bk->type); - - /* - * 4. Delete and re-add the data item. - * - * If we're changing the type of the on-page structure, or we - * are referencing offpage items, we have to delete and then - * re-add the item. We do not do any cursor adjustments here - * because we're going to immediately re-add the item into the - * same slot. - */ - if (bigdata || B_TYPE(bk->type) != B_KEYDATA) { - if ((ret = __bam_ditem(dbc, h, indx)) != 0) - return (ret); - break; - } - - /* 5. Overwrite the data item. */ - replace = 1; - break; - default: - return (__db_unknown_flag(dbenv, "DB->put", op)); - } - - /* Add the data. */ - if (bigdata) { - /* - * We do not have to handle deleted (BI_DELETED) records - * in this case; the actual records should never be created. - */ - DB_ASSERT(!LF_ISSET(BI_DELETED)); - if ((ret = __bam_ovput(dbc, - B_OVERFLOW, PGNO_INVALID, h, indx, data)) != 0) - return (ret); - } else { - if (LF_ISSET(BI_DELETED)) { - B_TSET(bk_tmp.type, B_KEYDATA, 1); - bk_tmp.len = data->size; - bk_hdr.data = &bk_tmp; - bk_hdr.size = SSZA(BKEYDATA, data); - ret = __db_pitem(dbc, h, indx, - BKEYDATA_SIZE(data->size), &bk_hdr, data); - } else if (replace) - ret = __bam_ritem(dbc, h, indx, data); - else - ret = __db_pitem(dbc, h, indx, - BKEYDATA_SIZE(data->size), NULL, data); - if (ret != 0) - return (ret); - } - if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0) - return (ret); - - /* - * Re-position the cursors if necessary and reset the current cursor - * to point to the new item. - */ - if (op != DB_CURRENT) { - if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0) - return (ret); - cp->indx = TYPE(h) == P_LBTREE ? indx - O_INDX : indx; - } - - /* - * If we've changed the record count, update the tree. There's no - * need to adjust the count if the operation not performed on the - * current record or when the current record was previously deleted. - */ - if (F_ISSET(cp, C_RECNUM) && (op != DB_CURRENT || was_deleted)) - if ((ret = __bam_adjust(dbc, 1)) != 0) - return (ret); - - /* - * If a Btree leaf page is at least 50% full and we may have added or - * modified a duplicate data item, see if the set of duplicates takes - * up at least 25% of the space on the page. If it does, move it onto - * its own page. - */ - if (dupadjust && - (ret = __bam_dup_convert(dbc, h, indx - O_INDX, cnt)) != 0) - return (ret); - - /* If we've modified a recno file, set the flag. */ - if (dbc->dbtype == DB_RECNO) - t->re_modified = 1; - - return (ret); -} - -/* - * __bam_partsize -- - * Figure out how much space a partial data item is in total. - */ -static u_int32_t -__bam_partsize(dbp, op, data, h, indx) - DB *dbp; - u_int32_t op, indx; - DBT *data; - PAGE *h; -{ - BKEYDATA *bk; - u_int32_t nbytes; - - /* - * If the record doesn't already exist, it's simply the data we're - * provided. - */ - if (op != DB_CURRENT) - return (data->doff + data->size); - - /* - * Otherwise, it's the data provided plus any already existing data - * that we're not replacing. - */ - bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0)); - nbytes = - B_TYPE(bk->type) == B_OVERFLOW ? ((BOVERFLOW *)bk)->tlen : bk->len; - - return (__db_partsize(nbytes, data)); -} - -/* - * __bam_build -- - * Build the real record for a partial put, or short fixed-length record. - */ -static int -__bam_build(dbc, op, dbt, h, indx, nbytes) - DBC *dbc; - u_int32_t op, indx, nbytes; - DBT *dbt; - PAGE *h; -{ - BKEYDATA *bk, tbk; - BOVERFLOW *bo; - BTREE *t; - DB *dbp; - DBT copy, *rdata; - u_int32_t len, tlen; - u_int8_t *p; - int ret; - - COMPQUIET(bo, NULL); - - dbp = dbc->dbp; - t = dbp->bt_internal; - - /* We use the record data return memory, it's only a short-term use. */ - rdata = &dbc->my_rdata; - if (rdata->ulen < nbytes) { - if ((ret = __os_realloc(dbp->dbenv, - nbytes, &rdata->data)) != 0) { - rdata->ulen = 0; - rdata->data = NULL; - return (ret); - } - rdata->ulen = nbytes; - } - - /* - * We use nul or pad bytes for any part of the record that isn't - * specified; get it over with. - */ - memset(rdata->data, - F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_pad : 0, nbytes); - - /* - * In the next clauses, we need to do three things: a) set p to point - * to the place at which to copy the user's data, b) set tlen to the - * total length of the record, not including the bytes contributed by - * the user, and c) copy any valid data from an existing record. If - * it's not a partial put (this code is called for both partial puts - * and fixed-length record padding) or it's a new key, we can cut to - * the chase. - */ - if (!F_ISSET(dbt, DB_DBT_PARTIAL) || op != DB_CURRENT) { - p = (u_int8_t *)rdata->data + dbt->doff; - tlen = dbt->doff; - goto user_copy; - } - - /* Find the current record. */ - if (indx < NUM_ENT(h)) { - bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ? - O_INDX : 0)); - bo = (BOVERFLOW *)bk; - } else { - bk = &tbk; - B_TSET(bk->type, B_KEYDATA, 0); - bk->len = 0; - } - if (B_TYPE(bk->type) == B_OVERFLOW) { - /* - * In the case of an overflow record, we shift things around - * in the current record rather than allocate a separate copy. - */ - memset(©, 0, sizeof(copy)); - if ((ret = __db_goff(dbp, ©, bo->tlen, - bo->pgno, &rdata->data, &rdata->ulen)) != 0) - return (ret); - - /* Skip any leading data from the original record. */ - tlen = dbt->doff; - p = (u_int8_t *)rdata->data + dbt->doff; - - /* - * Copy in any trailing data from the original record. - * - * If the original record was larger than the original offset - * plus the bytes being deleted, there is trailing data in the - * original record we need to preserve. If we aren't deleting - * the same number of bytes as we're inserting, copy it up or - * down, into place. - * - * Use memmove(), the regions may overlap. - */ - if (bo->tlen > dbt->doff + dbt->dlen) { - len = bo->tlen - (dbt->doff + dbt->dlen); - if (dbt->dlen != dbt->size) - memmove(p + dbt->size, p + dbt->dlen, len); - tlen += len; - } - } else { - /* Copy in any leading data from the original record. */ - memcpy(rdata->data, - bk->data, dbt->doff > bk->len ? bk->len : dbt->doff); - tlen = dbt->doff; - p = (u_int8_t *)rdata->data + dbt->doff; - - /* Copy in any trailing data from the original record. */ - len = dbt->doff + dbt->dlen; - if (bk->len > len) { - memcpy(p + dbt->size, bk->data + len, bk->len - len); - tlen += bk->len - len; - } - } - -user_copy: - /* - * Copy in the application provided data -- p and tlen must have been - * initialized above. - */ - memcpy(p, dbt->data, dbt->size); - tlen += dbt->size; - - /* Set the DBT to reference our new record. */ - rdata->size = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : tlen; - rdata->dlen = 0; - rdata->doff = 0; - rdata->flags = 0; - *dbt = *rdata; - return (0); -} - -/* - * __bam_ritem -- - * Replace an item on a page. - * - * PUBLIC: int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *)); - */ -int -__bam_ritem(dbc, h, indx, data) - DBC *dbc; - PAGE *h; - u_int32_t indx; - DBT *data; -{ - BKEYDATA *bk; - DB *dbp; - DBT orig, repl; - db_indx_t cnt, lo, ln, min, off, prefix, suffix; - int32_t nbytes; - int ret; - db_indx_t *inp; - u_int8_t *p, *t; - - dbp = dbc->dbp; - - /* - * Replace a single item onto a page. The logic figuring out where - * to insert and whether it fits is handled in the caller. All we do - * here is manage the page shuffling. - */ - bk = GET_BKEYDATA(dbp, h, indx); - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - /* - * We might as well check to see if the two data items share - * a common prefix and suffix -- it can save us a lot of log - * message if they're large. - */ - min = data->size < bk->len ? data->size : bk->len; - for (prefix = 0, - p = bk->data, t = data->data; - prefix < min && *p == *t; ++prefix, ++p, ++t) - ; - - min -= prefix; - for (suffix = 0, - p = (u_int8_t *)bk->data + bk->len - 1, - t = (u_int8_t *)data->data + data->size - 1; - suffix < min && *p == *t; ++suffix, --p, --t) - ; - - /* We only log the parts of the keys that have changed. */ - orig.data = (u_int8_t *)bk->data + prefix; - orig.size = bk->len - (prefix + suffix); - repl.data = (u_int8_t *)data->data + prefix; - repl.size = data->size - (prefix + suffix); - if ((ret = __bam_repl_log(dbp, dbc->txn, &LSN(h), 0, PGNO(h), - &LSN(h), (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type), - &orig, &repl, (u_int32_t)prefix, (u_int32_t)suffix)) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(h)); - - /* - * Set references to the first in-use byte on the page and the - * first byte of the item being replaced. - */ - inp = P_INP(dbp, h); - p = (u_int8_t *)h + HOFFSET(h); - t = (u_int8_t *)bk; - - /* - * If the entry is growing in size, shift the beginning of the data - * part of the page down. If the entry is shrinking in size, shift - * the beginning of the data part of the page up. Use memmove(3), - * the regions overlap. - */ - lo = BKEYDATA_SIZE(bk->len); - ln = (db_indx_t)BKEYDATA_SIZE(data->size); - if (lo != ln) { - nbytes = lo - ln; /* Signed difference. */ - if (p == t) /* First index is fast. */ - inp[indx] += nbytes; - else { /* Else, shift the page. */ - memmove(p + nbytes, p, (size_t)(t - p)); - - /* Adjust the indices' offsets. */ - off = inp[indx]; - for (cnt = 0; cnt < NUM_ENT(h); ++cnt) - if (inp[cnt] <= off) - inp[cnt] += nbytes; - } - - /* Clean up the page and adjust the item's reference. */ - HOFFSET(h) += nbytes; - t += nbytes; - } - - /* Copy the new item onto the page. */ - bk = (BKEYDATA *)t; - B_TSET(bk->type, B_KEYDATA, 0); - bk->len = data->size; - memcpy(bk->data, data->data, data->size); - - return (0); -} - -/* - * __bam_dup_check -- - * Check to see if the duplicate set at indx should have its own page. - */ -static int -__bam_dup_check(dbc, op, h, indx, sz, cntp) - DBC *dbc; - u_int32_t op; - PAGE *h; - u_int32_t indx, sz; - db_indx_t *cntp; -{ - BKEYDATA *bk; - DB *dbp; - db_indx_t cnt, first, *inp; - - dbp = dbc->dbp; - inp = P_INP(dbp, h); - - /* - * Count the duplicate records and calculate how much room they're - * using on the page. - */ - while (indx > 0 && inp[indx] == inp[indx - P_INDX]) - indx -= P_INDX; - - /* Count the key once. */ - bk = GET_BKEYDATA(dbp, h, indx); - sz += B_TYPE(bk->type) == B_KEYDATA ? - BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE; - - /* Sum up all the data items. */ - first = indx; - - /* - * Account for the record being inserted. If we are replacing it, - * don't count it twice. - * - * We execute the loop with first == indx to get the size of the - * first record. - */ - cnt = op == DB_CURRENT ? 0 : 1; - for (first = indx; - indx < NUM_ENT(h) && inp[first] == inp[indx]; - ++cnt, indx += P_INDX) { - bk = GET_BKEYDATA(dbp, h, indx + O_INDX); - sz += B_TYPE(bk->type) == B_KEYDATA ? - BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE; - } - - /* - * We have to do these checks when the user is replacing the cursor's - * data item -- if the application replaces a duplicate item with a - * larger data item, it can increase the amount of space used by the - * duplicates, requiring this check. But that means we may have done - * this check when it wasn't a duplicate item after all. - */ - if (cnt == 1) - return (0); - - /* - * If this set of duplicates is using more than 25% of the page, move - * them off. The choice of 25% is a WAG, but the value must be small - * enough that we can always split a page without putting duplicates - * on two different pages. - */ - if (sz < dbp->pgsize / 4) - return (0); - - *cntp = cnt; - return (1); -} - -/* - * __bam_dup_convert -- - * Move a set of duplicates off-page and into their own tree. - */ -static int -__bam_dup_convert(dbc, h, indx, cnt) - DBC *dbc; - PAGE *h; - u_int32_t indx, cnt; -{ - BKEYDATA *bk; - DB *dbp; - DBT hdr; - DB_MPOOLFILE *mpf; - PAGE *dp; - db_indx_t cpindx, dindx, first, *inp; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - inp = P_INP(dbp, h); - - /* Move to the beginning of the dup set. */ - while (indx > 0 && inp[indx] == inp[indx - P_INDX]) - indx -= P_INDX; - - /* Get a new page. */ - if ((ret = __db_new(dbc, - dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0) - return (ret); - P_INIT(dp, dbp->pgsize, dp->pgno, - PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp)); - - /* - * Move this set of duplicates off the page. First points to the first - * key of the first duplicate key/data pair, cnt is the number of pairs - * we're dealing with. - */ - memset(&hdr, 0, sizeof(hdr)); - first = indx; - dindx = indx; - cpindx = 0; - do { - /* Move cursors referencing the old entry to the new entry. */ - if ((ret = __bam_ca_dup(dbc, first, - PGNO(h), indx, PGNO(dp), cpindx)) != 0) - goto err; - - /* - * Copy the entry to the new page. If the off-duplicate page - * If the off-duplicate page is a Btree page (i.e. dup_compare - * will be non-NULL, we use Btree pages for sorted dups, - * and Recno pages for unsorted dups), move all entries - * normally, even deleted ones. If it's a Recno page, - * deleted entries are discarded (if the deleted entry is - * overflow, then free up those pages). - */ - bk = GET_BKEYDATA(dbp, h, dindx + 1); - hdr.data = bk; - hdr.size = B_TYPE(bk->type) == B_KEYDATA ? - BKEYDATA_SIZE(bk->len) : BOVERFLOW_SIZE; - if (dbp->dup_compare == NULL && B_DISSET(bk->type)) { - /* - * Unsorted dups, i.e. recno page, and we have - * a deleted entry, don't move it, but if it was - * an overflow entry, we need to free those pages. - */ - if (B_TYPE(bk->type) == B_OVERFLOW && - (ret = __db_doff(dbc, - (GET_BOVERFLOW(dbp, h, dindx + 1))->pgno)) != 0) - goto err; - } else { - if ((ret = __db_pitem( - dbc, dp, cpindx, hdr.size, &hdr, NULL)) != 0) - goto err; - ++cpindx; - } - /* Delete all but the last reference to the key. */ - if (cnt != 1) { - if ((ret = __bam_adjindx(dbc, - h, dindx, first + 1, 0)) != 0) - goto err; - } else - dindx++; - - /* Delete the data item. */ - if ((ret = __db_ditem(dbc, h, dindx, hdr.size)) != 0) - goto err; - indx += P_INDX; - } while (--cnt); - - /* Put in a new data item that points to the duplicates page. */ - if ((ret = __bam_ovput(dbc, - B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0) - goto err; - - /* Adjust cursors for all the above movements. */ - if ((ret = __bam_ca_di(dbc, - PGNO(h), first + P_INDX, (int)(first + P_INDX - indx))) != 0) - goto err; - - return (__memp_fput(mpf, dp, DB_MPOOL_DIRTY)); - -err: (void)__memp_fput(mpf, dp, 0); - return (ret); -} - -/* - * __bam_ovput -- - * Build an item for an off-page duplicates page or overflow page and - * insert it on the page. - */ -static int -__bam_ovput(dbc, type, pgno, h, indx, item) - DBC *dbc; - u_int32_t type, indx; - db_pgno_t pgno; - PAGE *h; - DBT *item; -{ - BOVERFLOW bo; - DBT hdr; - int ret; - - UMRW_SET(bo.unused1); - B_TSET(bo.type, type, 0); - UMRW_SET(bo.unused2); - - /* - * If we're creating an overflow item, do so and acquire the page - * number for it. If we're creating an off-page duplicates tree, - * we are giving the page number as an argument. - */ - if (type == B_OVERFLOW) { - if ((ret = __db_poff(dbc, item, &bo.pgno)) != 0) - return (ret); - bo.tlen = item->size; - } else { - bo.pgno = pgno; - bo.tlen = 0; - } - - /* Store the new record on the page. */ - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &bo; - hdr.size = BOVERFLOW_SIZE; - return (__db_pitem(dbc, h, indx, BOVERFLOW_SIZE, &hdr, NULL)); -} diff --git a/storage/bdb/btree/bt_rec.c b/storage/bdb/btree/bt_rec.c deleted file mode 100644 index 3667ee12c58..00000000000 --- a/storage/bdb/btree/bt_rec.c +++ /dev/null @@ -1,1389 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_rec.c,v 12.11 2005/10/20 18:57:01 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" - -#define IS_BTREE_PAGE(pagep) \ - (TYPE(pagep) == P_IBTREE || \ - TYPE(pagep) == P_LBTREE || TYPE(pagep) == P_LDUP) - -/* - * __bam_split_recover -- - * Recovery function for split. - * - * PUBLIC: int __bam_split_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_split_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_split_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *_lp, *lp, *np, *pp, *_rp, *rp, *sp; - db_pgno_t pgno, root_pgno; - u_int32_t ptype; - int cmp, l_update, p_update, r_update, rc, ret, rootsplit, t_ret; - - COMPQUIET(info, NULL); - REC_PRINT(__bam_split_print); - - mpf = NULL; - _lp = lp = np = pp = _rp = rp = NULL; - sp = NULL; - - REC_INTRO(__bam_split_read, 1, 0); - - /* - * There are two kinds of splits that we have to recover from. The - * first is a root-page split, where the root page is split from a - * leaf page into an internal page and two new leaf pages are created. - * The second is where a page is split into two pages, and a new key - * is inserted into the parent page. - * - * DBTs are not aligned in log records, so we need to copy the page - * so that we can access fields within it throughout this routine. - * Although we could hardcode the unaligned copies in this routine, - * we will be calling into regular btree functions with this page, - * so it's got to be aligned. Copying it into allocated memory is - * the only way to guarantee this. - */ - if ((ret = __os_malloc(dbenv, argp->pg.size, &sp)) != 0) - goto out; - memcpy(sp, argp->pg.data, argp->pg.size); - - pgno = PGNO(sp); - root_pgno = argp->root_pgno; - rootsplit = root_pgno != PGNO_INVALID; - REC_FGET(mpf, argp->left, &lp, right); -right: REC_FGET(mpf, argp->right, &rp, redo); - -redo: if (DB_REDO(op)) { - l_update = r_update = p_update = 0; - /* - * Decide if we need to resplit the page. - * - * If this is a root split, then the root has to exist unless - * we have truncated it due to a future deallocation. - */ - if (rootsplit) { - REC_FGET(mpf, root_pgno, &pp, do_left); - cmp = - log_compare(&LSN(pp), &LSN(argp->pg.data)); - CHECK_LSN(dbenv, op, - cmp, &LSN(pp), &LSN(argp->pg.data)); - p_update = cmp == 0; - } - -do_left: if (lp != NULL) { - cmp = log_compare(&LSN(lp), &argp->llsn); - CHECK_LSN(dbenv, op, cmp, &LSN(lp), &argp->llsn); - if (cmp == 0) - l_update = 1; - } - - if (rp != NULL) { - cmp = log_compare(&LSN(rp), &argp->rlsn); - CHECK_LSN(dbenv, op, cmp, &LSN(rp), &argp->rlsn); - if (cmp == 0) - r_update = 1; - } - - if (!p_update && !l_update && !r_update) - goto check_next; - - /* Allocate and initialize new left/right child pages. */ - if ((ret = __os_malloc(dbenv, file_dbp->pgsize, &_lp)) != 0 || - (ret = __os_malloc(dbenv, file_dbp->pgsize, &_rp)) != 0) - goto out; - if (rootsplit) { - P_INIT(_lp, file_dbp->pgsize, argp->left, - PGNO_INVALID, - ISINTERNAL(sp) ? PGNO_INVALID : argp->right, - LEVEL(sp), TYPE(sp)); - P_INIT(_rp, file_dbp->pgsize, argp->right, - ISINTERNAL(sp) ? PGNO_INVALID : argp->left, - PGNO_INVALID, LEVEL(sp), TYPE(sp)); - } else { - P_INIT(_lp, file_dbp->pgsize, PGNO(sp), - ISINTERNAL(sp) ? PGNO_INVALID : PREV_PGNO(sp), - ISINTERNAL(sp) ? PGNO_INVALID : argp->right, - LEVEL(sp), TYPE(sp)); - P_INIT(_rp, file_dbp->pgsize, argp->right, - ISINTERNAL(sp) ? PGNO_INVALID : sp->pgno, - ISINTERNAL(sp) ? PGNO_INVALID : NEXT_PGNO(sp), - LEVEL(sp), TYPE(sp)); - } - - /* Split the page. */ - if ((ret = __bam_copy(file_dbp, sp, _lp, 0, argp->indx)) != 0 || - (ret = __bam_copy(file_dbp, sp, _rp, argp->indx, - NUM_ENT(sp))) != 0) - goto out; - - if (l_update) { - memcpy(lp, _lp, file_dbp->pgsize); - lp->lsn = *lsnp; - if ((ret = __memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0) - goto out; - lp = NULL; - } - - if (r_update) { - memcpy(rp, _rp, file_dbp->pgsize); - rp->lsn = *lsnp; - if ((ret = __memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0) - goto out; - rp = NULL; - } - - /* - * If the parent page is wrong, update it. This is of interest - * only if it was a root split, since root splits create parent - * pages. All other splits modify a parent page, but those are - * separately logged and recovered. - */ - if (rootsplit && p_update) { - if (IS_BTREE_PAGE(sp)) { - ptype = P_IBTREE; - rc = argp->opflags & SPL_NRECS ? 1 : 0; - } else { - ptype = P_IRECNO; - rc = 1; - } - - P_INIT(pp, file_dbp->pgsize, root_pgno, - PGNO_INVALID, PGNO_INVALID, _lp->level + 1, ptype); - RE_NREC_SET(pp, rc ? __bam_total(file_dbp, _lp) + - __bam_total(file_dbp, _rp) : 0); - - pp->lsn = *lsnp; - if ((ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0) - goto out; - pp = NULL; - } - -check_next: /* - * Finally, redo the next-page link if necessary. This is of - * interest only if it wasn't a root split -- inserting a new - * page in the tree requires that any following page have its - * previous-page pointer updated to our new page. The next - * page must exist because we're redoing the operation. - */ - if (!rootsplit && argp->npgno != PGNO_INVALID) { - if ((ret = - __memp_fget(mpf, &argp->npgno, 0, &np)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr( - file_dbp, argp->npgno, ret); - goto out; - } else - goto done; - } - cmp = log_compare(&LSN(np), &argp->nlsn); - CHECK_LSN(dbenv, op, cmp, &LSN(np), &argp->nlsn); - if (cmp == 0) { - PREV_PGNO(np) = argp->right; - np->lsn = *lsnp; - if ((ret = - __memp_fput(mpf, np, DB_MPOOL_DIRTY)) != 0) - goto out; - np = NULL; - } - } - } else { - /* - * If the split page is wrong, replace its contents with the - * logged page contents. If the page doesn't exist, it means - * that the create of the page never happened, nor did any of - * the adds onto the page that caused the split, and there's - * really no undo-ing to be done. - */ - if ((ret = __memp_fget(mpf, &pgno, 0, &pp)) != 0) { - pp = NULL; - goto lrundo; - } - if (log_compare(lsnp, &LSN(pp)) == 0) { - memcpy(pp, argp->pg.data, argp->pg.size); - if ((ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0) - goto out; - pp = NULL; - } - - /* - * If it's a root split and the left child ever existed, update - * its LSN. (If it's not a root split, we've updated the left - * page already -- it's the same as the split page.) If the - * right child ever existed, root split or not, update its LSN. - * The undo of the page allocation(s) will restore them to the - * free list. - */ -lrundo: if ((rootsplit && lp != NULL) || rp != NULL) { - if (rootsplit && lp != NULL && - log_compare(lsnp, &LSN(lp)) == 0) { - lp->lsn = argp->llsn; - if ((ret = - __memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0) - goto out; - lp = NULL; - } - if (rp != NULL && - log_compare(lsnp, &LSN(rp)) == 0) { - rp->lsn = argp->rlsn; - if ((ret = - __memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0) - goto out; - rp = NULL; - } - } - - /* - * Finally, undo the next-page link if necessary. This is of - * interest only if it wasn't a root split -- inserting a new - * page in the tree requires that any following page have its - * previous-page pointer updated to our new page. Since it's - * possible that the next-page never existed, we ignore it as - * if there's nothing to undo. - */ - if (!rootsplit && argp->npgno != PGNO_INVALID) { - if ((ret = - __memp_fget(mpf, &argp->npgno, 0, &np)) != 0) { - np = NULL; - goto done; - } - if (log_compare(lsnp, &LSN(np)) == 0) { - PREV_PGNO(np) = argp->left; - np->lsn = argp->nlsn; - if (__memp_fput(mpf, np, DB_MPOOL_DIRTY)) - goto out; - np = NULL; - } - } - } - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: /* Free any pages that weren't dirtied. */ - if (pp != NULL && (t_ret = __memp_fput(mpf, pp, 0)) != 0 && ret == 0) - ret = t_ret; - if (lp != NULL && (t_ret = __memp_fput(mpf, lp, 0)) != 0 && ret == 0) - ret = t_ret; - if (np != NULL && (t_ret = __memp_fput(mpf, np, 0)) != 0 && ret == 0) - ret = t_ret; - if (rp != NULL && (t_ret = __memp_fput(mpf, rp, 0)) != 0 && ret == 0) - ret = t_ret; - - /* Free any allocated space. */ - if (_lp != NULL) - __os_free(dbenv, _lp); - if (_rp != NULL) - __os_free(dbenv, _rp); - if (sp != NULL) - __os_free(dbenv, sp); - - REC_CLOSE; -} - -/* - * __bam_rsplit_recover -- - * Recovery function for a reverse split. - * - * PUBLIC: int __bam_rsplit_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_rsplit_args *argp; - DB *file_dbp; - DBC *dbc; - DB_LSN copy_lsn; - DB_MPOOLFILE *mpf; - PAGE *pagep; - db_pgno_t pgno, root_pgno; - db_recno_t rcnt; - int cmp_n, cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_rsplit_print); - REC_INTRO(__bam_rsplit_read, 1, 1); - - /* Fix the root page. */ - pgno = root_pgno = argp->root_pgno; - if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, pgno, ret); - goto out; - } else - goto do_page; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->rootlsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->rootlsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* - * Copy the new data to the root page. If it is not now a - * leaf page we need to restore the record number. We could - * try to determine if C_RECNUM was set in the btree, but - * that's not really necessary since the field is not used - * otherwise. - */ - rcnt = RE_NREC(pagep); - memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size); - if (LEVEL(pagep) > LEAFLEVEL) - RE_NREC_SET(pagep, rcnt); - pagep->pgno = root_pgno; - pagep->lsn = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - P_INIT(pagep, file_dbp->pgsize, root_pgno, - argp->nrec, PGNO_INVALID, pagep->level + 1, - IS_BTREE_PAGE(pagep) ? P_IBTREE : P_IRECNO); - if ((ret = __db_pitem(dbc, pagep, 0, - argp->rootent.size, &argp->rootent, NULL)) != 0) - goto out; - pagep->lsn = argp->rootlsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - -do_page: - /* - * Fix the page copied over the root page. It's possible that the - * page never made it to disk, so if we're undo-ing and the page - * doesn't exist, it's okay and there's nothing further to do. - */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto done; - } - modified = 0; - (void)__ua_memcpy(©_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN)); - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), ©_lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), ©_lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - pagep->lsn = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size); - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __bam_adj_recover -- - * Recovery function for adj. - * - * PUBLIC: int __bam_adj_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_adj_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_adj_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_adj_print); - REC_INTRO(__bam_adj_read, 1, 1); - - /* Get the page; if it never existed and we're undoing, we're done. */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto done; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - if ((ret = __bam_adjindx(dbc, - pagep, argp->indx, argp->indx_copy, argp->is_insert)) != 0) - goto out; - - LSN(pagep) = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - if ((ret = __bam_adjindx(dbc, - pagep, argp->indx, argp->indx_copy, !argp->is_insert)) != 0) - goto out; - - LSN(pagep) = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __bam_cadjust_recover -- - * Recovery function for the adjust of a count change in an internal - * page. - * - * PUBLIC: int __bam_cadjust_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_cadjust_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_cadjust_print); - REC_INTRO(__bam_cadjust_read, 1, 0); - - /* Get the page; if it never existed and we're undoing, we're done. */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto done; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - if (IS_BTREE_PAGE(pagep)) { - GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs += - argp->adjust; - if (argp->opflags & CAD_UPDATEROOT) - RE_NREC_ADJ(pagep, argp->adjust); - } else { - GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs += - argp->adjust; - if (argp->opflags & CAD_UPDATEROOT) - RE_NREC_ADJ(pagep, argp->adjust); - } - - LSN(pagep) = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - if (IS_BTREE_PAGE(pagep)) { - GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs -= - argp->adjust; - if (argp->opflags & CAD_UPDATEROOT) - RE_NREC_ADJ(pagep, -(argp->adjust)); - } else { - GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs -= - argp->adjust; - if (argp->opflags & CAD_UPDATEROOT) - RE_NREC_ADJ(pagep, -(argp->adjust)); - } - LSN(pagep) = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __bam_cdel_recover -- - * Recovery function for the intent-to-delete of a cursor record. - * - * PUBLIC: int __bam_cdel_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_cdel_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_cdel_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - u_int32_t indx; - int cmp_n, cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_cdel_print); - REC_INTRO(__bam_cdel_read, 1, 0); - - /* Get the page; if it never existed and we're undoing, we're done. */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto done; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0); - B_DSET(GET_BKEYDATA(file_dbp, pagep, indx)->type); - - LSN(pagep) = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0); - B_DCLR(GET_BKEYDATA(file_dbp, pagep, indx)->type); - - if ((ret = __bam_ca_delete( - file_dbp, argp->pgno, argp->indx, 0, NULL)) != 0) - goto out; - - LSN(pagep) = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __bam_repl_recover -- - * Recovery function for page item replacement. - * - * PUBLIC: int __bam_repl_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_repl_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_repl_args *argp; - BKEYDATA *bk; - DB *file_dbp; - DBC *dbc; - DBT dbt; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - u_int8_t *p; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_repl_print); - REC_INTRO(__bam_repl_read, 1, 1); - - /* Get the page; if it never existed and we're undoing, we're done. */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto done; - } - bk = GET_BKEYDATA(file_dbp, pagep, argp->indx); - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* - * Need to redo update described. - * - * Re-build the replacement item. - */ - memset(&dbt, 0, sizeof(dbt)); - dbt.size = argp->prefix + argp->suffix + argp->repl.size; - if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0) - goto out; - p = dbt.data; - memcpy(p, bk->data, argp->prefix); - p += argp->prefix; - memcpy(p, argp->repl.data, argp->repl.size); - p += argp->repl.size; - memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix); - - ret = __bam_ritem(dbc, pagep, argp->indx, &dbt); - __os_free(dbenv, dbt.data); - if (ret != 0) - goto out; - - LSN(pagep) = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* - * Need to undo update described. - * - * Re-build the original item. - */ - memset(&dbt, 0, sizeof(dbt)); - dbt.size = argp->prefix + argp->suffix + argp->orig.size; - if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0) - goto out; - p = dbt.data; - memcpy(p, bk->data, argp->prefix); - p += argp->prefix; - memcpy(p, argp->orig.data, argp->orig.size); - p += argp->orig.size; - memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix); - - ret = __bam_ritem(dbc, pagep, argp->indx, &dbt); - __os_free(dbenv, dbt.data); - if (ret != 0) - goto out; - - /* Reset the deleted flag, if necessary. */ - if (argp->isdeleted) - B_DSET(GET_BKEYDATA(file_dbp, pagep, argp->indx)->type); - - LSN(pagep) = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __bam_root_recover -- - * Recovery function for setting the root page on the meta-data page. - * - * PUBLIC: int __bam_root_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_root_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_root_args *argp; - BTMETA *meta; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - int cmp_n, cmp_p, modified, ret; - - meta = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_root_print); - REC_INTRO(__bam_root_read, 0, 0); - - if ((ret = __memp_fget(mpf, &argp->meta_pgno, 0, &meta)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->meta_pgno, ret); - goto out; - } else - goto done; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(meta)); - cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(meta), &argp->meta_lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - meta->root = argp->root_pgno; - meta->dbmeta.lsn = *lsnp; - ((BTREE *)file_dbp->bt_internal)->bt_root = meta->root; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Nothing to undo except lsn. */ - meta->dbmeta.lsn = argp->meta_lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - meta = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (meta != NULL) - (void)__memp_fput(mpf, meta, 0); - REC_CLOSE; -} - -/* - * __bam_curadj_recover -- - * Transaction abort function to undo cursor adjustments. - * This should only be triggered by subtransaction aborts. - * - * PUBLIC: int __bam_curadj_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_curadj_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_curadj_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - int ret; - - COMPQUIET(info, NULL); - COMPQUIET(mpf, NULL); - - REC_PRINT(__bam_curadj_print); - REC_INTRO(__bam_curadj_read, 0, 1); - - ret = 0; - if (op != DB_TXN_ABORT) - goto done; - - switch (argp->mode) { - case DB_CA_DI: - if ((ret = __bam_ca_di(dbc, argp->from_pgno, - argp->from_indx, -(int)argp->first_indx)) != 0) - goto out; - break; - case DB_CA_DUP: - if ((ret = __bam_ca_undodup(file_dbp, argp->first_indx, - argp->from_pgno, argp->from_indx, argp->to_indx)) != 0) - goto out; - break; - - case DB_CA_RSPLIT: - if ((ret = - __bam_ca_rsplit(dbc, argp->to_pgno, argp->from_pgno)) != 0) - goto out; - break; - - case DB_CA_SPLIT: - if ((ret = __bam_ca_undosplit(file_dbp, argp->from_pgno, - argp->to_pgno, argp->left_pgno, argp->from_indx)) != 0) - goto out; - break; - } - -done: *lsnp = argp->prev_lsn; -out: REC_CLOSE; -} - -/* - * __bam_rcuradj_recover -- - * Transaction abort function to undo cursor adjustments in rrecno. - * This should only be triggered by subtransaction aborts. - * - * PUBLIC: int __bam_rcuradj_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_rcuradj_args *argp; - BTREE_CURSOR *cp; - DB *file_dbp; - DBC *dbc, *rdbc; - DB_MPOOLFILE *mpf; - int ret, t_ret; - - COMPQUIET(info, NULL); - COMPQUIET(mpf, NULL); - rdbc = NULL; - - REC_PRINT(__bam_rcuradj_print); - REC_INTRO(__bam_rcuradj_read, 0, 1); - - ret = t_ret = 0; - - if (op != DB_TXN_ABORT) - goto done; - - /* - * We don't know whether we're in an offpage dup set, and - * thus don't know whether the dbc REC_INTRO has handed us is - * of a reasonable type. It's certainly unset, so if this is - * an offpage dup set, we don't have an OPD cursor. The - * simplest solution is just to allocate a whole new cursor - * for our use; we're only really using it to hold pass some - * state into __ram_ca, and this way we don't need to make - * this function know anything about how offpage dups work. - */ - if ((ret = __db_cursor_int(file_dbp, - NULL, DB_RECNO, argp->root, 0, DB_LOCK_INVALIDID, &rdbc)) != 0) - goto out; - - cp = (BTREE_CURSOR *)rdbc->internal; - F_SET(cp, C_RENUMBER); - cp->recno = argp->recno; - - switch (argp->mode) { - case CA_DELETE: - /* - * The way to undo a delete is with an insert. Since - * we're undoing it, the delete flag must be set. - */ - F_SET(cp, C_DELETED); - F_SET(cp, C_RENUMBER); /* Just in case. */ - cp->order = argp->order; - (void)__ram_ca(rdbc, CA_ICURRENT); - break; - case CA_IAFTER: - case CA_IBEFORE: - case CA_ICURRENT: - /* - * The way to undo an insert is with a delete. The delete - * flag is unset to start with. - */ - F_CLR(cp, C_DELETED); - cp->order = INVALID_ORDER; - (void)__ram_ca(rdbc, CA_DELETE); - break; - } - -done: *lsnp = argp->prev_lsn; -out: if (rdbc != NULL && (t_ret = __db_c_close(rdbc)) != 0 && ret == 0) - ret = t_ret; - REC_CLOSE; -} - -/* - * __bam_relink_recover -- - * Recovery function for relink. - * - * PUBLIC: int __bam_relink_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_relink_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_relink_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__bam_relink_print); - REC_INTRO(__bam_relink_read, 1, 0); - - /* - * There are up to three pages we need to check -- the page, and the - * previous and next pages, if they existed. For a page add operation, - * the current page is the result of a split and is being recovered - * elsewhere, so all we need do is recover the next page. - */ - if ((ret = __memp_fget(mpf, &argp->next, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->next, ret); - goto out; - } else - goto prev; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn_next); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn_next); - if (cmp_p == 0 && DB_REDO(op)) { - /* Redo the remove or replace. */ - if (argp->new_pgno == PGNO_INVALID) - pagep->prev_pgno = argp->prev; - else - pagep->prev_pgno = argp->new_pgno; - - pagep->lsn = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Undo the remove or replace. */ - pagep->prev_pgno = argp->pgno; - - pagep->lsn = argp->lsn_next; - modified = 1; - } - - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -prev: if ((ret = __memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->prev, ret); - goto out; - } else - goto done; - } - - modified = 0; - cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn_prev); - if (cmp_p == 0 && DB_REDO(op)) { - /* Redo the relink. */ - if (argp->new_pgno == PGNO_INVALID) - pagep->next_pgno = argp->next; - else - pagep->next_pgno = argp->new_pgno; - - pagep->lsn = *lsnp; - modified = 1; - } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { - /* Undo the relink. */ - pagep->next_pgno = argp->pgno; - pagep->lsn = argp->lsn_prev; - - modified = 1; - } - - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __bam_merge_recover -- - * Recovery function for merge. - * - * PUBLIC: int __bam_merge_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_merge_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __bam_merge_args *argp; - BKEYDATA *bk; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - db_indx_t indx, *ninp, *pinp; - u_int32_t size; - u_int8_t *bp; - int cmp_n, cmp_p, i, modified, ret; - - COMPQUIET(info, NULL); - - REC_PRINT(__bam_merge_print); - REC_INTRO(__bam_merge_read, 1, 1); - - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto next; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(file_dbp->dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - - if (cmp_p == 0 && DB_REDO(op)) { - /* - * If the header is provided the page is empty, copy the - * needed data. - */ - DB_ASSERT(argp->hdr.size == 0 || NUM_ENT(pagep) == 0); - if (argp->hdr.size != 0) { - P_INIT(pagep, file_dbp->pgsize, pagep->pgno, - PREV_PGNO(argp->hdr.data), - NEXT_PGNO(argp->hdr.data), - LEVEL(argp->hdr.data), TYPE(argp->hdr.data)); - } - if (TYPE(pagep) == P_OVERFLOW) { - OV_REF(pagep) = OV_REF(argp->hdr.data); - OV_LEN(pagep) = OV_LEN(argp->hdr.data); - bp = (u_int8_t *) pagep + P_OVERHEAD(file_dbp); - memcpy(bp, argp->data.data, argp->data.size); - } else { - /* Copy the data segment. */ - bp = (u_int8_t *)pagep + - (db_indx_t)(HOFFSET(pagep) - argp->data.size); - memcpy(bp, argp->data.data, argp->data.size); - - /* Copy index table offset past the current entries. */ - pinp = P_INP(file_dbp, pagep) + NUM_ENT(pagep); - ninp = argp->ind.data; - for (i = 0; - i < (int)(argp->ind.size / sizeof(*ninp)); i++) - *pinp++ = *ninp++ - - (file_dbp->pgsize - HOFFSET(pagep)); - HOFFSET(pagep) -= argp->data.size; - NUM_ENT(pagep) += i; - } - pagep->lsn = *lsnp; - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* - * Since logging is logical at the page level - * we cannot just truncate the data space. Delete - * the proper number of items from the logical end - * of the page. - */ - for (i = 0; i < (int)(argp->ind.size / sizeof(*ninp)); i++) { - indx = NUM_ENT(pagep) - 1; - if (P_INP(file_dbp, pagep)[indx] == - P_INP(file_dbp, pagep)[indx - P_INDX]) { - NUM_ENT(pagep)--; - continue; - } - switch (TYPE(pagep)) { - case P_LBTREE: - case P_LRECNO: - case P_LDUP: - bk = GET_BKEYDATA(file_dbp, pagep, indx); - size = BITEM_SIZE(bk); - break; - - case P_IBTREE: - size = BINTERNAL_SIZE( - GET_BINTERNAL(file_dbp, pagep, indx)->len); - break; - case P_IRECNO: - size = RINTERNAL_SIZE; - break; - - default: - ret = __db_pgfmt(dbenv, PGNO(pagep)); - goto out; - } - if ((ret = - __db_ditem(dbc, pagep, indx, size)) != 0) - goto out; - } - if (argp->ind.size == 0) - HOFFSET(pagep) = file_dbp->pgsize; - pagep->lsn = argp->lsn; - modified = 1; - } - - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - -next: if ((ret = __memp_fget(mpf, &argp->npgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto done; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->nlsn); - CHECK_LSN(file_dbp->dbenv, op, cmp_p, &LSN(pagep), &argp->nlsn); - - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to truncate the page. */ - HOFFSET(pagep) = file_dbp->pgsize; - NUM_ENT(pagep) = 0; - pagep->lsn = *lsnp; - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to put the data back on the page. */ - if (TYPE(pagep) == P_OVERFLOW) { - OV_REF(pagep) = OV_REF(argp->hdr.data); - OV_LEN(pagep) = OV_LEN(argp->hdr.data); - bp = (u_int8_t *) pagep + P_OVERHEAD(file_dbp); - memcpy(bp, argp->data.data, argp->data.size); - } else { - bp = (u_int8_t *)pagep + - (db_indx_t)(HOFFSET(pagep) - argp->data.size); - memcpy(bp, argp->data.data, argp->data.size); - - /* Copy index table. */ - pinp = P_INP(file_dbp, pagep) + NUM_ENT(pagep); - ninp = argp->ind.data; - for (i = 0; - i < (int)(argp->ind.size / sizeof(*ninp)); i++) - *pinp++ = *ninp++; - HOFFSET(pagep) -= argp->data.size; - NUM_ENT(pagep) = i; - } - pagep->lsn = argp->nlsn; - modified = 1; - } - - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; -done: - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - -/* - * __bam_pgno_recover -- - * Recovery function for page number replacment. - * - * PUBLIC: int __bam_pgno_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__bam_pgno_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - BINTERNAL *bi; - __bam_pgno_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep, *npagep; - db_pgno_t *pgnop; - int cmp_n, cmp_p, modified, ret; - - COMPQUIET(info, NULL); - - REC_PRINT(__bam_pgno_print); - REC_INTRO(__bam_pgno_read, 1, 0); - - REC_FGET(mpf, argp->pgno, &pagep, done); - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(file_dbp->dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - - if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && !DB_REDO(op))) { - switch (TYPE(pagep)) { - case P_IBTREE: - /* - * An internal record can have both a overflow - * and child pointer. Fetch the page to see - * which it is. - */ - bi = GET_BINTERNAL(file_dbp, pagep, argp->indx); - if (B_TYPE(bi->type) == B_OVERFLOW) { - REC_FGET(mpf, argp->npgno, &npagep, out); - - if (TYPE(npagep) == P_OVERFLOW) - pgnop = - &((BOVERFLOW *)(bi->data))->pgno; - else - pgnop = &bi->pgno; - if ((ret = __memp_fput(mpf, npagep, 0)) != 0) - goto out; - break; - } - pgnop = &bi->pgno; - break; - case P_IRECNO: - pgnop = - &GET_RINTERNAL(file_dbp, pagep, argp->indx)->pgno; - break; - default: - pgnop = - &GET_BOVERFLOW(file_dbp, pagep, argp->indx)->pgno; - break; - } - - if (DB_REDO(op)) { - /* Need to redo update described. */ - *pgnop = argp->npgno; - pagep->lsn = *lsnp; - modified = 1; - } else { - *pgnop = argp->opgno; - pagep->lsn = argp->lsn; - modified = 1; - } - } - - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - -done: - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} diff --git a/storage/bdb/btree/bt_reclaim.c b/storage/bdb/btree/bt_reclaim.c deleted file mode 100644 index d7884a79e0c..00000000000 --- a/storage/bdb/btree/bt_reclaim.c +++ /dev/null @@ -1,76 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_reclaim.c,v 12.2 2005/06/16 20:20:19 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" - -/* - * __bam_reclaim -- - * Free a database. - * - * PUBLIC: int __bam_reclaim __P((DB *, DB_TXN *)); - */ -int -__bam_reclaim(dbp, txn) - DB *dbp; - DB_TXN *txn; -{ - DBC *dbc; - int ret, t_ret; - - /* Acquire a cursor. */ - if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) - return (ret); - - /* Walk the tree, freeing pages. */ - ret = __bam_traverse(dbc, - DB_LOCK_WRITE, dbc->internal->root, __db_reclaim_callback, dbc); - - /* Discard the cursor. */ - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __bam_truncate -- - * Truncate a database. - * - * PUBLIC: int __bam_truncate __P((DBC *, u_int32_t *)); - */ -int -__bam_truncate(dbc, countp) - DBC *dbc; - u_int32_t *countp; -{ - db_trunc_param trunc; - int ret; - - trunc.count = 0; - trunc.dbc = dbc; - - /* Walk the tree, freeing pages. */ - ret = __bam_traverse(dbc, - DB_LOCK_WRITE, dbc->internal->root, __db_truncate_callback, &trunc); - - if (countp != NULL) - *countp = trunc.count; - - return (ret); -} diff --git a/storage/bdb/btree/bt_recno.c b/storage/bdb/btree/bt_recno.c deleted file mode 100644 index a7da96ded4d..00000000000 --- a/storage/bdb/btree/bt_recno.c +++ /dev/null @@ -1,1331 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_recno.c,v 12.6 2005/08/08 14:27:59 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" - -static int __ram_add __P((DBC *, db_recno_t *, DBT *, u_int32_t, u_int32_t)); -static int __ram_source __P((DB *)); -static int __ram_sread __P((DBC *, db_recno_t)); -static int __ram_update __P((DBC *, db_recno_t, int)); - -/* - * In recno, there are two meanings to the on-page "deleted" flag. If we're - * re-numbering records, it means the record was implicitly created. We skip - * over implicitly created records if doing a cursor "next" or "prev", and - * return DB_KEYEMPTY if they're explicitly requested.. If not re-numbering - * records, it means that the record was implicitly created, or was deleted. - * We skip over implicitly created or deleted records if doing a cursor "next" - * or "prev", and return DB_KEYEMPTY if they're explicitly requested. - * - * If we're re-numbering records, then we have to detect in the cursor that - * a record was deleted, and adjust the cursor as necessary on the next get. - * If we're not re-numbering records, then we can detect that a record has - * been deleted by looking at the actual on-page record, so we completely - * ignore the cursor's delete flag. This is different from the B+tree code. - * It also maintains whether the cursor references a deleted record in the - * cursor, and it doesn't always check the on-page value. - */ -#define CD_SET(cp) { \ - if (F_ISSET(cp, C_RENUMBER)) \ - F_SET(cp, C_DELETED); \ -} -#define CD_CLR(cp) { \ - if (F_ISSET(cp, C_RENUMBER)) { \ - F_CLR(cp, C_DELETED); \ - cp->order = INVALID_ORDER; \ - } \ -} -#define CD_ISSET(cp) \ - (F_ISSET(cp, C_RENUMBER) && F_ISSET(cp, C_DELETED) ? 1 : 0) - -/* - * Macros for comparing the ordering of two cursors. - * cp1 comes before cp2 iff one of the following holds: - * cp1's recno is less than cp2's recno - * recnos are equal, both deleted, and cp1's order is less than cp2's - * recnos are equal, cp1 deleted, and cp2 not deleted - */ -#define C_LESSTHAN(cp1, cp2) \ - (((cp1)->recno < (cp2)->recno) || \ - (((cp1)->recno == (cp2)->recno) && \ - ((CD_ISSET((cp1)) && CD_ISSET((cp2)) && (cp1)->order < (cp2)->order) || \ - (CD_ISSET((cp1)) && !CD_ISSET((cp2)))))) - -/* - * cp1 is equal to cp2 iff their recnos and delete flags are identical, - * and if the delete flag is set their orders are also identical. - */ -#define C_EQUAL(cp1, cp2) \ - ((cp1)->recno == (cp2)->recno && CD_ISSET((cp1)) == CD_ISSET((cp2)) && \ - (!CD_ISSET((cp1)) || (cp1)->order == (cp2)->order)) - -/* - * Do we need to log the current cursor adjustment? - */ -#define CURADJ_LOG(dbc) \ - (DBC_LOGGING((dbc)) && (dbc)->txn != NULL && (dbc)->txn->parent != NULL) - -/* - * After a search, copy the found page into the cursor, discarding any - * currently held lock. - */ -#define STACK_TO_CURSOR(cp, ret) { \ - int __t_ret; \ - (cp)->page = (cp)->csp->page; \ - (cp)->pgno = (cp)->csp->page->pgno; \ - (cp)->indx = (cp)->csp->indx; \ - if ((__t_ret = __TLPUT(dbc, (cp)->lock)) != 0 && (ret) == 0) \ - ret = __t_ret; \ - (cp)->lock = (cp)->csp->lock; \ - (cp)->lock_mode = (cp)->csp->lock_mode; \ -} - -/* - * __ram_open -- - * Recno open function. - * - * PUBLIC: int __ram_open __P((DB *, - * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t)); - */ -int -__ram_open(dbp, txn, name, base_pgno, flags) - DB *dbp; - DB_TXN *txn; - const char *name; - db_pgno_t base_pgno; - u_int32_t flags; -{ - BTREE *t; - DBC *dbc; - int ret, t_ret; - - COMPQUIET(name, NULL); - t = dbp->bt_internal; - - /* Start up the tree. */ - if ((ret = __bam_read_root(dbp, txn, base_pgno, flags)) != 0) - return (ret); - - /* - * If the user specified a source tree, open it and map it in. - * - * !!! - * We don't complain if the user specified transactions or threads. - * It's possible to make it work, but you'd better know what you're - * doing! - */ - if (t->re_source != NULL && (ret = __ram_source(dbp)) != 0) - return (ret); - - /* If we're snapshotting an underlying source file, do it now. */ - if (F_ISSET(dbp, DB_AM_SNAPSHOT)) { - /* Allocate a cursor. */ - if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - return (ret); - - /* Do the snapshot. */ - if ((ret = __ram_update(dbc, - DB_MAX_RECORDS, 0)) != 0 && ret == DB_NOTFOUND) - ret = 0; - - /* Discard the cursor. */ - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - } - - return (ret); -} - -/* - * __ram_append -- - * Recno append function. - * - * PUBLIC: int __ram_append __P((DBC *, DBT *, DBT *)); - */ -int -__ram_append(dbc, key, data) - DBC *dbc; - DBT *key, *data; -{ - BTREE_CURSOR *cp; - int ret; - - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Make sure we've read in all of the backing source file. If - * we found the record or it simply didn't exist, add the - * user's record. - */ - ret = __ram_update(dbc, DB_MAX_RECORDS, 0); - if (ret == 0 || ret == DB_NOTFOUND) - ret = __ram_add(dbc, &cp->recno, data, DB_APPEND, 0); - - /* Return the record number. */ - if (ret == 0) - ret = __db_retcopy(dbc->dbp->dbenv, key, &cp->recno, - sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen); - - return (ret); -} - -/* - * __ram_c_del -- - * Recno cursor->c_del function. - * - * PUBLIC: int __ram_c_del __P((DBC *)); - */ -int -__ram_c_del(dbc) - DBC *dbc; -{ - BKEYDATA bk; - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - DB_LSN lsn; - DBT hdr, data; - int exact, ret, stack, t_ret; - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - t = dbp->bt_internal; - stack = 0; - - /* - * The semantics of cursors during delete are as follows: in - * non-renumbering recnos, records are replaced with a marker - * containing a delete flag. If the record referenced by this cursor - * has already been deleted, we will detect that as part of the delete - * operation, and fail. - * - * In renumbering recnos, cursors which represent deleted items - * are flagged with the C_DELETED flag, and it is an error to - * call c_del a second time without an intervening cursor motion. - */ - if (CD_ISSET(cp)) - return (DB_KEYEMPTY); - - /* Search the tree for the key; delete only deletes exact matches. */ - if ((ret = __bam_rsearch(dbc, &cp->recno, S_DELETE, 1, &exact)) != 0) - goto err; - if (!exact) { - ret = DB_NOTFOUND; - goto err; - } - stack = 1; - - /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp, ret); - if (ret != 0) - goto err; - - /* - * If re-numbering records, the on-page deleted flag can only mean - * that this record was implicitly created. Applications aren't - * permitted to delete records they never created, return an error. - * - * If not re-numbering records, the on-page deleted flag means that - * this record was implicitly created, or, was deleted at some time. - * The former is an error because applications aren't permitted to - * delete records they never created, the latter is an error because - * if the record was "deleted", we could never have found it. - */ - if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type)) { - ret = DB_KEYEMPTY; - goto err; - } - - if (F_ISSET(cp, C_RENUMBER)) { - /* Delete the item, adjust the counts, adjust the cursors. */ - if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0) - goto err; - if ((ret = __bam_adjust(dbc, -1)) != 0) - goto err; - if (__ram_ca(dbc, CA_DELETE) > 0 && - CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp, dbc->txn, - &lsn, 0, CA_DELETE, cp->root, cp->recno, cp->order)) != 0) - goto err; - - /* - * If the page is empty, delete it. - * - * We never delete a root page. First, root pages of primary - * databases never go away, recno or otherwise. However, if - * it's the root page of an off-page duplicates database, then - * it can be deleted. We don't delete it here because we have - * no way of telling the primary database page holder (e.g., - * the hash access method) that its page element should cleaned - * up because the underlying tree is gone. So, we keep the page - * around until the last cursor referencing the empty tree is - * are closed, and then clean it up. - */ - if (NUM_ENT(cp->page) == 0 && PGNO(cp->page) != cp->root) { - /* - * We want to delete a single item out of the last page - * that we're not deleting. - */ - ret = __bam_dpages(dbc, 0, 0); - - /* - * Regardless of the return from __bam_dpages, it will - * discard our stack and pinned page. - */ - stack = 0; - cp->page = NULL; - } - } else { - /* Use a delete/put pair to replace the record with a marker. */ - if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0) - goto err; - - B_TSET(bk.type, B_KEYDATA, 1); - bk.len = 0; - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &bk; - hdr.size = SSZA(BKEYDATA, data); - memset(&data, 0, sizeof(data)); - data.data = (void *)""; - data.size = 0; - if ((ret = __db_pitem(dbc, - cp->page, cp->indx, BKEYDATA_SIZE(0), &hdr, &data)) != 0) - goto err; - } - - t->re_modified = 1; - -err: if (stack && (t_ret = __bam_stkrel(dbc, STK_CLRDBC)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __ram_c_get -- - * Recno cursor->c_get function. - * - * PUBLIC: int __ram_c_get - * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); - */ -int -__ram_c_get(dbc, key, data, flags, pgnop) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; - db_pgno_t *pgnop; -{ - BTREE_CURSOR *cp; - DB *dbp; - int cmp, exact, ret; - - COMPQUIET(pgnop, NULL); - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - - LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY); -retry: switch (flags) { - case DB_CURRENT: - /* - * If we're using mutable records and the deleted flag is - * set, the cursor is pointing at a nonexistent record; - * return an error. - */ - if (CD_ISSET(cp)) - return (DB_KEYEMPTY); - break; - case DB_NEXT_DUP: - /* - * If we're not in an off-page dup set, we know there's no - * next duplicate since recnos don't have them. If we - * are in an off-page dup set, the next item assuredly is - * a dup, so we set flags to DB_NEXT and keep going. - */ - if (!F_ISSET(dbc, DBC_OPD)) - return (DB_NOTFOUND); - /* FALLTHROUGH */ - case DB_NEXT_NODUP: - /* - * Recno databases don't have duplicates, set flags to DB_NEXT - * and keep going. - */ - /* FALLTHROUGH */ - case DB_NEXT: - flags = DB_NEXT; - /* - * If record numbers are mutable: if we just deleted a record, - * we have to avoid incrementing the record number so that we - * return the right record by virtue of renumbering the tree. - */ - if (CD_ISSET(cp)) { - /* - * Clear the flag, we've moved off the deleted record. - */ - CD_CLR(cp); - break; - } - - if (cp->recno != RECNO_OOB) { - ++cp->recno; - break; - } - /* FALLTHROUGH */ - case DB_FIRST: - flags = DB_NEXT; - cp->recno = 1; - break; - case DB_PREV_NODUP: - /* - * Recno databases don't have duplicates, set flags to DB_PREV - * and keep going. - */ - /* FALLTHROUGH */ - case DB_PREV: - flags = DB_PREV; - if (cp->recno != RECNO_OOB) { - if (cp->recno == 1) { - ret = DB_NOTFOUND; - goto err; - } - --cp->recno; - break; - } - /* FALLTHROUGH */ - case DB_LAST: - flags = DB_PREV; - if (((ret = __ram_update(dbc, - DB_MAX_RECORDS, 0)) != 0) && ret != DB_NOTFOUND) - goto err; - if ((ret = __bam_nrecs(dbc, &cp->recno)) != 0) - goto err; - if (cp->recno == 0) { - ret = DB_NOTFOUND; - goto err; - } - break; - case DB_GET_BOTHC: - /* - * If we're doing a join and these are offpage dups, - * we want to keep searching forward from after the - * current cursor position. Increment the recno by 1, - * then proceed as for a DB_SET. - * - * Otherwise, we know there are no additional matching - * data, as recnos don't have dups. return DB_NOTFOUND. - */ - if (F_ISSET(dbc, DBC_OPD)) { - cp->recno++; - break; - } - ret = DB_NOTFOUND; - goto err; - /* NOTREACHED */ - case DB_GET_BOTH: - case DB_GET_BOTH_RANGE: - /* - * If we're searching a set of off-page dups, we start - * a new linear search from the first record. Otherwise, - * we compare the single data item associated with the - * requested record for a match. - */ - if (F_ISSET(dbc, DBC_OPD)) { - cp->recno = 1; - break; - } - /* FALLTHROUGH */ - case DB_SET: - case DB_SET_RANGE: - if ((ret = __ram_getno(dbc, key, &cp->recno, 0)) != 0) - goto err; - break; - default: - ret = __db_unknown_flag(dbp->dbenv, "__ram_c_get", flags); - goto err; - } - - /* - * For DB_PREV, DB_LAST, DB_SET and DB_SET_RANGE, we have already - * called __ram_update() to make sure sufficient records have been - * read from the backing source file. Do it now for DB_CURRENT (if - * the current record was deleted we may need more records from the - * backing file for a DB_CURRENT operation), DB_FIRST and DB_NEXT. - * (We don't have to test for flags == DB_FIRST, because the switch - * statement above re-set flags to DB_NEXT in that case.) - */ - if ((flags == DB_NEXT || flags == DB_CURRENT) && ((ret = - __ram_update(dbc, cp->recno, 0)) != 0) && ret != DB_NOTFOUND) - goto err; - - for (;; ++cp->recno) { - /* Search the tree for the record. */ - if ((ret = __bam_rsearch(dbc, &cp->recno, - F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND, - 1, &exact)) != 0) - goto err; - if (!exact) { - ret = DB_NOTFOUND; - goto err; - } - - /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp, ret); - if (ret != 0) - goto err; - - /* - * If re-numbering records, the on-page deleted flag means this - * record was implicitly created. If not re-numbering records, - * the on-page deleted flag means this record was implicitly - * created, or, it was deleted at some time. Regardless, we - * skip such records if doing cursor next/prev operations or - * walking through off-page duplicates, and fail if they were - * requested explicitly by the application. - */ - if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type)) - switch (flags) { - case DB_NEXT: - case DB_PREV: - (void)__bam_stkrel(dbc, STK_CLRDBC); - goto retry; - case DB_GET_BOTH: - case DB_GET_BOTH_RANGE: - /* - * If we're an OPD tree, we don't care about - * matching a record number on a DB_GET_BOTH - * -- everything belongs to the same tree. A - * normal recno should give up and return - * DB_NOTFOUND if the matching recno is deleted. - */ - if (F_ISSET(dbc, DBC_OPD)) { - (void)__bam_stkrel(dbc, STK_CLRDBC); - continue; - } - ret = DB_NOTFOUND; - goto err; - default: - ret = DB_KEYEMPTY; - goto err; - } - - if (flags == DB_GET_BOTH || - flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) { - if ((ret = __bam_cmp(dbp, data, - cp->page, cp->indx, __bam_defcmp, &cmp)) != 0) - return (ret); - if (cmp == 0) - break; - if (!F_ISSET(dbc, DBC_OPD)) { - ret = DB_NOTFOUND; - goto err; - } - (void)__bam_stkrel(dbc, STK_CLRDBC); - } else - break; - } - - /* Return the key if the user didn't give us one. */ - if (!F_ISSET(dbc, DBC_OPD)) { - if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE && - flags != DB_SET && flags != DB_SET_RANGE) - ret = __db_retcopy(dbp->dbenv, - key, &cp->recno, sizeof(cp->recno), - &dbc->rkey->data, &dbc->rkey->ulen); - F_SET(key, DB_DBT_ISSET); - } - - /* The cursor was reset, no further delete adjustment is necessary. */ -err: CD_CLR(cp); - - return (ret); -} - -/* - * __ram_c_put -- - * Recno cursor->c_put function. - * - * PUBLIC: int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); - */ -int -__ram_c_put(dbc, key, data, flags, pgnop) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; - db_pgno_t *pgnop; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_LSN lsn; - int exact, nc, ret, t_ret; - u_int32_t iiflags; - void *arg; - - COMPQUIET(pgnop, NULL); - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * DB_KEYFIRST and DB_KEYLAST mean different things if they're - * used in an off-page duplicate tree. If we're an off-page - * duplicate tree, they really mean "put at the beginning of the - * tree" and "put at the end of the tree" respectively, so translate - * them to something else. - */ - if (F_ISSET(dbc, DBC_OPD)) - switch (flags) { - case DB_KEYFIRST: - cp->recno = 1; - flags = DB_BEFORE; - break; - case DB_KEYLAST: - if ((ret = __ram_add(dbc, - &cp->recno, data, DB_APPEND, 0)) != 0) - return (ret); - if (CURADJ_LOG(dbc) && - (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, - CA_ICURRENT, cp->root, cp->recno, cp->order)) != 0) - return (ret); - return (0); - default: - break; - } - - /* - * Handle normal DB_KEYFIRST/DB_KEYLAST; for a recno, which has - * no duplicates, these are identical and mean "put the given - * datum at the given recno". - * - * Note that the code here used to be in __ram_put; now, we - * go through the access-method-common __db_put function, which - * handles DB_NOOVERWRITE, so we and __ram_add don't have to. - */ - if (flags == DB_KEYFIRST || flags == DB_KEYLAST) { - ret = __ram_getno(dbc, key, &cp->recno, 1); - if (ret == 0 || ret == DB_NOTFOUND) - ret = __ram_add(dbc, &cp->recno, data, 0, 0); - return (ret); - } - - /* - * If we're putting with a cursor that's marked C_DELETED, we need to - * take special care; the cursor doesn't "really" reference the item - * corresponding to its current recno, but instead is "between" that - * record and the current one. Translate the actual insert into - * DB_BEFORE, and let the __ram_ca work out the gory details of what - * should wind up pointing where. - */ - if (CD_ISSET(cp)) - iiflags = DB_BEFORE; - else - iiflags = flags; - -split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0) - goto err; - /* - * An inexact match is okay; it just means we're one record past the - * end, which is reasonable if we're marked deleted. - */ - DB_ASSERT(exact || CD_ISSET(cp)); - - /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp, ret); - if (ret != 0) - goto err; - - ret = __bam_iitem(dbc, key, data, iiflags, 0); - t_ret = __bam_stkrel(dbc, STK_CLRDBC); - - if (t_ret != 0 && (ret == 0 || ret == DB_NEEDSPLIT)) - ret = t_ret; - else if (ret == DB_NEEDSPLIT) { - arg = &cp->recno; - if ((ret = __bam_split(dbc, arg, NULL)) != 0) - goto err; - goto split; - } - if (ret != 0) - goto err; - - switch (flags) { /* Adjust the cursors. */ - case DB_AFTER: - nc = __ram_ca(dbc, CA_IAFTER); - - /* - * We only need to adjust this cursor forward if we truly added - * the item after the current recno, rather than remapping it - * to DB_BEFORE. - */ - if (iiflags == DB_AFTER) - ++cp->recno; - - /* Only log if __ram_ca found any relevant cursors. */ - if (nc > 0 && CURADJ_LOG(dbc) && - (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IAFTER, - cp->root, cp->recno, cp->order)) != 0) - goto err; - break; - case DB_BEFORE: - nc = __ram_ca(dbc, CA_IBEFORE); - --cp->recno; - - /* Only log if __ram_ca found any relevant cursors. */ - if (nc > 0 && CURADJ_LOG(dbc) && - (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IBEFORE, - cp->root, cp->recno, cp->order)) != 0) - goto err; - break; - case DB_CURRENT: - /* - * We only need to do an adjustment if we actually - * added an item, which we only would have done if the - * cursor was marked deleted. - * - * Only log if __ram_ca found any relevant cursors. - */ - if (CD_ISSET(cp) && __ram_ca(dbc, CA_ICURRENT) > 0 && - CURADJ_LOG(dbc) && - (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, - CA_ICURRENT, cp->root, cp->recno, cp->order)) != 0) - goto err; - break; - default: - break; - } - - /* Return the key if we've created a new record. */ - if (!F_ISSET(dbc, DBC_OPD) && (flags == DB_AFTER || flags == DB_BEFORE)) - ret = __db_retcopy(dbp->dbenv, key, &cp->recno, - sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen); - - /* The cursor was reset, no further delete adjustment is necessary. */ -err: CD_CLR(cp); - - return (ret); -} - -/* - * __ram_ca -- - * Adjust cursors. Returns the number of relevant cursors. - * - * PUBLIC: int __ram_ca __P((DBC *, ca_recno_arg)); - */ -int -__ram_ca(dbc_arg, op) - DBC *dbc_arg; - ca_recno_arg op; -{ - BTREE_CURSOR *cp, *cp_arg; - DB *dbp, *ldbp; - DB_ENV *dbenv; - DBC *dbc; - db_recno_t recno; - int adjusted, found; - u_int32_t order; - - dbp = dbc_arg->dbp; - dbenv = dbp->dbenv; - cp_arg = (BTREE_CURSOR *)dbc_arg->internal; - recno = cp_arg->recno; - - found = 0; - - /* - * It only makes sense to adjust cursors if we're a renumbering - * recno; we should only be called if this is one. - */ - DB_ASSERT(F_ISSET(cp_arg, C_RENUMBER)); - - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - /* - * Adjust the cursors. See the comment in __bam_ca_delete(). - */ - /* - * If we're doing a delete, we need to find the highest - * order of any cursor currently pointing at this item, - * so we can assign a higher order to the newly deleted - * cursor. Unfortunately, this requires a second pass through - * the cursor list. - */ - if (op == CA_DELETE) { - order = 1; - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - cp = (BTREE_CURSOR *)dbc->internal; - if (cp_arg->root == cp->root && - recno == cp->recno && CD_ISSET(cp) && - order <= cp->order) - order = cp->order + 1; - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - } - } else - order = INVALID_ORDER; - - /* Now go through and do the actual adjustments. */ - for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - cp = (BTREE_CURSOR *)dbc->internal; - if (cp_arg->root != cp->root) - continue; - ++found; - adjusted = 0; - switch (op) { - case CA_DELETE: - if (recno < cp->recno) { - --cp->recno; - /* - * If the adjustment made them equal, - * we have to merge the orders. - */ - if (recno == cp->recno && CD_ISSET(cp)) - cp->order += order; - } else if (recno == cp->recno && - !CD_ISSET(cp)) { - CD_SET(cp); - cp->order = order; - } - break; - case CA_IBEFORE: - /* - * IBEFORE is just like IAFTER, except that we - * adjust cursors on the current record too. - */ - if (C_EQUAL(cp_arg, cp)) { - ++cp->recno; - adjusted = 1; - } - goto iafter; - case CA_ICURRENT: - - /* - * If the original cursor wasn't deleted, we - * just did a replacement and so there's no - * need to adjust anything--we shouldn't have - * gotten this far. Otherwise, we behave - * much like an IAFTER, except that all - * cursors pointing to the current item get - * marked undeleted and point to the new - * item. - */ - DB_ASSERT(CD_ISSET(cp_arg)); - if (C_EQUAL(cp_arg, cp)) { - CD_CLR(cp); - break; - } - /* FALLTHROUGH */ - case CA_IAFTER: -iafter: if (!adjusted && C_LESSTHAN(cp_arg, cp)) { - ++cp->recno; - adjusted = 1; - } - if (recno == cp->recno && adjusted) - /* - * If we've moved this cursor's recno, - * split its order number--i.e., - * decrement it by enough so that - * the lowest cursor moved has order 1. - * cp_arg->order is the split point, - * so decrement by one less than that. - */ - cp->order -= (cp_arg->order - 1); - break; - } - } - MUTEX_UNLOCK(dbp->dbenv, dbp->mutex); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - return (found); -} - -/* - * __ram_getno -- - * Check the user's record number, and make sure we've seen it. - * - * PUBLIC: int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int)); - */ -int -__ram_getno(dbc, key, rep, can_create) - DBC *dbc; - const DBT *key; - db_recno_t *rep; - int can_create; -{ - DB *dbp; - db_recno_t recno; - - dbp = dbc->dbp; - - /* Check the user's record number. */ - if ((recno = *(db_recno_t *)key->data) == 0) { - __db_err(dbp->dbenv, "illegal record number of 0"); - return (EINVAL); - } - if (rep != NULL) - *rep = recno; - - /* - * Btree can neither create records nor read them in. Recno can - * do both, see if we can find the record. - */ - return (dbc->dbtype == DB_RECNO ? - __ram_update(dbc, recno, can_create) : 0); -} - -/* - * __ram_update -- - * Ensure the tree has records up to and including the specified one. - */ -static int -__ram_update(dbc, recno, can_create) - DBC *dbc; - db_recno_t recno; - int can_create; -{ - BTREE *t; - DB *dbp; - DBT *rdata; - db_recno_t nrecs; - int ret; - - dbp = dbc->dbp; - t = dbp->bt_internal; - - /* - * If we can't create records and we've read the entire backing input - * file, we're done. - */ - if (!can_create && t->re_eof) - return (0); - - /* - * If we haven't seen this record yet, try to get it from the original - * file. - */ - if ((ret = __bam_nrecs(dbc, &nrecs)) != 0) - return (ret); - if (!t->re_eof && recno > nrecs) { - if ((ret = __ram_sread(dbc, recno)) != 0 && ret != DB_NOTFOUND) - return (ret); - if ((ret = __bam_nrecs(dbc, &nrecs)) != 0) - return (ret); - } - - /* - * If we can create records, create empty ones up to the requested - * record. - */ - if (!can_create || recno <= nrecs + 1) - return (0); - - rdata = &dbc->my_rdata; - rdata->flags = 0; - rdata->size = 0; - - while (recno > ++nrecs) - if ((ret = __ram_add(dbc, - &nrecs, rdata, 0, BI_DELETED)) != 0) - return (ret); - return (0); -} - -/* - * __ram_source -- - * Load information about the backing file. - */ -static int -__ram_source(dbp) - DB *dbp; -{ - BTREE *t; - char *source; - int ret; - - t = dbp->bt_internal; - - /* Find the real name, and swap out the one we had before. */ - if ((ret = __db_appname(dbp->dbenv, - DB_APP_DATA, t->re_source, 0, NULL, &source)) != 0) - return (ret); - __os_free(dbp->dbenv, t->re_source); - t->re_source = source; - - /* - * !!! - * It's possible that the backing source file is read-only. We don't - * much care other than we'll complain if there are any modifications - * when it comes time to write the database back to the source. - */ - if ((t->re_fp = fopen(t->re_source, "r")) == NULL) { - ret = __os_get_errno(); - __db_err(dbp->dbenv, "%s: %s", t->re_source, db_strerror(ret)); - return (ret); - } - - t->re_eof = 0; - return (0); -} - -/* - * __ram_writeback -- - * Rewrite the backing file. - * - * PUBLIC: int __ram_writeback __P((DB *)); - */ -int -__ram_writeback(dbp) - DB *dbp; -{ - BTREE *t; - DB_ENV *dbenv; - DBC *dbc; - DBT key, data; - FILE *fp; - db_recno_t keyno; - int ret, t_ret; - u_int8_t delim, *pad; - - t = dbp->bt_internal; - dbenv = dbp->dbenv; - fp = NULL; - pad = NULL; - - /* If the file wasn't modified, we're done. */ - if (!t->re_modified) - return (0); - - /* If there's no backing source file, we're done. */ - if (t->re_source == NULL) { - t->re_modified = 0; - return (0); - } - - /* - * We step through the records, writing each one out. Use the record - * number and the dbp->get() function, instead of a cursor, so we find - * and write out "deleted" or non-existent records. The DB handle may - * be threaded, so allocate memory as we go. - */ - memset(&key, 0, sizeof(key)); - key.size = sizeof(db_recno_t); - key.data = &keyno; - memset(&data, 0, sizeof(data)); - F_SET(&data, DB_DBT_REALLOC); - - /* Allocate a cursor. */ - if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - return (ret); - - /* - * Read any remaining records into the tree. - * - * !!! - * This is why we can't support transactions when applications specify - * backing (re_source) files. At this point we have to read in the - * rest of the records from the file so that we can write all of the - * records back out again, which could modify a page for which we'd - * have to log changes and which we don't have locked. This could be - * partially fixed by taking a snapshot of the entire file during the - * DB->open as DB->open is transaction protected. But, if a checkpoint - * occurs then, the part of the log holding the copy of the file could - * be discarded, and that would make it impossible to recover in the - * face of disaster. This could all probably be fixed, but it would - * require transaction protecting the backing source file. - * - * XXX - * This could be made to work now that we have transactions protecting - * file operations. Margo has specifically asked for the privilege of - * doing this work. - */ - if ((ret = - __ram_update(dbc, DB_MAX_RECORDS, 0)) != 0 && ret != DB_NOTFOUND) - goto err; - - /* - * Close any existing file handle and re-open the file, truncating it. - */ - if (t->re_fp != NULL) { - if (fclose(t->re_fp) != 0) { - ret = __os_get_errno(); - goto err; - } - t->re_fp = NULL; - } - if ((fp = fopen(t->re_source, "w")) == NULL) { - ret = __os_get_errno(); - __db_err(dbenv, "%s: %s", t->re_source, db_strerror(ret)); - goto err; - } - - /* - * We'll need the delimiter if we're doing variable-length records, - * and the pad character if we're doing fixed-length records. - */ - delim = t->re_delim; - for (keyno = 1;; ++keyno) { - switch (ret = __db_get(dbp, NULL, &key, &data, 0)) { - case 0: - if (data.size != 0 && - fwrite(data.data, 1, data.size, fp) != data.size) - goto write_err; - break; - case DB_KEYEMPTY: - if (F_ISSET(dbp, DB_AM_FIXEDLEN)) { - if (pad == NULL) { - if ((ret = __os_malloc( - dbenv, t->re_len, &pad)) != 0) - goto err; - memset(pad, t->re_pad, t->re_len); - } - if (fwrite(pad, 1, t->re_len, fp) != t->re_len) - goto write_err; - } - break; - case DB_NOTFOUND: - ret = 0; - goto done; - default: - goto err; - } - if (!F_ISSET(dbp, DB_AM_FIXEDLEN) && - fwrite(&delim, 1, 1, fp) != 1) { -write_err: ret = __os_get_errno(); - __db_err(dbenv, - "%s: write failed to backing file: %s", - t->re_source, strerror(ret)); - goto err; - } - } - -err: -done: /* Close the file descriptor. */ - if (fp != NULL && fclose(fp) != 0) { - t_ret = __os_get_errno(); - if (ret == 0) - ret = t_ret; - __db_err(dbenv, "%s: %s", t->re_source, db_strerror(t_ret)); - } - - /* Discard the cursor. */ - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - /* Discard memory allocated to hold the data items. */ - if (data.data != NULL) - __os_ufree(dbenv, data.data); - if (pad != NULL) - __os_free(dbenv, pad); - - if (ret == 0) - t->re_modified = 0; - - return (ret); -} - -/* - * __ram_sread -- - * Read records from a source file. - */ -static int -__ram_sread(dbc, top) - DBC *dbc; - db_recno_t top; -{ - BTREE *t; - DB *dbp; - DBT data, *rdata; - db_recno_t recno; - size_t len; - int ch, ret, was_modified; - - t = dbc->dbp->bt_internal; - dbp = dbc->dbp; - was_modified = t->re_modified; - - if ((ret = __bam_nrecs(dbc, &recno)) != 0) - return (ret); - - /* - * Use the record key return memory, it's only a short-term use. - * The record data return memory is used by __bam_iitem, which - * we'll indirectly call, so use the key so as not to collide. - */ - len = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : 256; - rdata = &dbc->my_rkey; - if (rdata->ulen < len) { - if ((ret = __os_realloc( - dbp->dbenv, len, &rdata->data)) != 0) { - rdata->ulen = 0; - rdata->data = NULL; - return (ret); - } - rdata->ulen = (u_int32_t)len; - } - - memset(&data, 0, sizeof(data)); - while (recno < top) { - data.data = rdata->data; - data.size = 0; - if (F_ISSET(dbp, DB_AM_FIXEDLEN)) - for (len = t->re_len; len > 0; --len) { - if ((ch = getc(t->re_fp)) == EOF) { - if (data.size == 0) - goto eof; - break; - } - ((u_int8_t *)data.data)[data.size++] = ch; - } - else - for (;;) { - if ((ch = getc(t->re_fp)) == EOF) { - if (data.size == 0) - goto eof; - break; - } - if (ch == t->re_delim) - break; - - ((u_int8_t *)data.data)[data.size++] = ch; - if (data.size == rdata->ulen) { - if ((ret = __os_realloc(dbp->dbenv, - rdata->ulen *= 2, - &rdata->data)) != 0) { - rdata->ulen = 0; - rdata->data = NULL; - return (ret); - } else - data.data = rdata->data; - } - } - - /* - * Another process may have read this record from the input - * file and stored it into the database already, in which - * case we don't need to repeat that operation. We detect - * this by checking if the last record we've read is greater - * or equal to the number of records in the database. - */ - if (t->re_last >= recno) { - ++recno; - if ((ret = __ram_add(dbc, &recno, &data, 0, 0)) != 0) - goto err; - } - ++t->re_last; - } - - if (0) { -eof: t->re_eof = 1; - ret = DB_NOTFOUND; - } -err: if (!was_modified) - t->re_modified = 0; - - return (ret); -} - -/* - * __ram_add -- - * Add records into the tree. - */ -static int -__ram_add(dbc, recnop, data, flags, bi_flags) - DBC *dbc; - db_recno_t *recnop; - DBT *data; - u_int32_t flags, bi_flags; -{ - BTREE_CURSOR *cp; - int exact, ret, stack, t_ret; - - cp = (BTREE_CURSOR *)dbc->internal; - -retry: /* Find the slot for insertion. */ - if ((ret = __bam_rsearch(dbc, recnop, - S_INSERT | (flags == DB_APPEND ? S_APPEND : 0), 1, &exact)) != 0) - return (ret); - stack = 1; - - /* Copy the page into the cursor. */ - STACK_TO_CURSOR(cp, ret); - if (ret != 0) - goto err; - - /* - * The application may modify the data based on the selected record - * number. - */ - if (flags == DB_APPEND && dbc->dbp->db_append_recno != NULL && - (ret = dbc->dbp->db_append_recno(dbc->dbp, data, *recnop)) != 0) - goto err; - - /* - * Select the arguments for __bam_iitem() and do the insert. If the - * key is an exact match, or we're replacing the data item with a - * new data item, replace the current item. If the key isn't an exact - * match, we're inserting a new key/data pair, before the search - * location. - */ - switch (ret = __bam_iitem(dbc, - NULL, data, exact ? DB_CURRENT : DB_BEFORE, bi_flags)) { - case 0: - /* - * Don't adjust anything. - * - * If we inserted a record, no cursors need adjusting because - * the only new record it's possible to insert is at the very - * end of the tree. The necessary adjustments to the internal - * page counts were made by __bam_iitem(). - * - * If we overwrote a record, no cursors need adjusting because - * future DBcursor->get calls will simply return the underlying - * record (there's no adjustment made for the DB_CURRENT flag - * when a cursor get operation immediately follows a cursor - * delete operation, and the normal adjustment for the DB_NEXT - * flag is still correct). - */ - break; - case DB_NEEDSPLIT: - /* Discard the stack of pages and split the page. */ - (void)__bam_stkrel(dbc, STK_CLRDBC); - stack = 0; - - if ((ret = __bam_split(dbc, recnop, NULL)) != 0) - goto err; - - goto retry; - /* NOTREACHED */ - default: - goto err; - } - -err: if (stack && (t_ret = __bam_stkrel(dbc, STK_CLRDBC)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} diff --git a/storage/bdb/btree/bt_rsearch.c b/storage/bdb/btree/bt_rsearch.c deleted file mode 100644 index 8e93ee213dd..00000000000 --- a/storage/bdb/btree/bt_rsearch.c +++ /dev/null @@ -1,431 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_rsearch.c,v 12.5 2005/08/08 03:37:05 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" - -/* - * __bam_rsearch -- - * Search a btree for a record number. - * - * PUBLIC: int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *)); - */ -int -__bam_rsearch(dbc, recnop, flags, stop, exactp) - DBC *dbc; - db_recno_t *recnop; - u_int32_t flags; - int stop, *exactp; -{ - BINTERNAL *bi; - BTREE_CURSOR *cp; - DB *dbp; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - RINTERNAL *ri; - db_indx_t adjust, deloffset, indx, top; - db_lockmode_t lock_mode; - db_pgno_t pg; - db_recno_t recno, t_recno, total; - int ret, stack, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - h = NULL; - - BT_STK_CLR(cp); - - /* - * There are several ways we search a btree tree. The flags argument - * specifies if we're acquiring read or write locks and if we are - * locking pairs of pages. In addition, if we're adding or deleting - * an item, we have to lock the entire tree, regardless. See btree.h - * for more details. - * - * If write-locking pages, we need to know whether or not to acquire a - * write lock on a page before getting it. This depends on how deep it - * is in tree, which we don't know until we acquire the root page. So, - * if we need to lock the root page we may have to upgrade it later, - * because we won't get the correct lock initially. - * - * Retrieve the root page. - */ - - if ((ret = __bam_get_root(dbc, cp->root, stop, flags, &stack)) != 0) - return (ret); - lock_mode = cp->csp->lock_mode; - lock = cp->csp->lock; - h = cp->csp->page; - - BT_STK_CLR(cp); - /* - * If appending to the tree, set the record number now -- we have the - * root page locked. - * - * Delete only deletes exact matches, read only returns exact matches. - * Note, this is different from __bam_search(), which returns non-exact - * matches for read. - * - * The record may not exist. We can only return the correct location - * for the record immediately after the last record in the tree, so do - * a fast check now. - */ - total = RE_NREC(h); - if (LF_ISSET(S_APPEND)) { - *exactp = 0; - *recnop = recno = total + 1; - } else { - recno = *recnop; - if (recno <= total) - *exactp = 1; - else { - *exactp = 0; - if (!LF_ISSET(S_PAST_EOF) || recno > total + 1) { - /* - * Keep the page locked for serializability. - * - * XXX - * This leaves the root page locked, which will - * eliminate any concurrency. A possible fix - * would be to lock the last leaf page instead. - */ - ret = __memp_fput(mpf, h, 0); - if ((t_ret = - __TLPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - return (ret == 0 ? DB_NOTFOUND : ret); - } - } - } - - /* - * !!! - * Record numbers in the tree are 0-based, but the recno is - * 1-based. All of the calculations below have to take this - * into account. - */ - for (total = 0;;) { - switch (TYPE(h)) { - case P_LBTREE: - case P_LDUP: - recno -= total; - /* - * There may be logically deleted records on the page. - * If there are enough, the record may not exist. - */ - if (TYPE(h) == P_LBTREE) { - adjust = P_INDX; - deloffset = O_INDX; - } else { - adjust = O_INDX; - deloffset = 0; - } - for (t_recno = 0, indx = 0;; indx += adjust) { - if (indx >= NUM_ENT(h)) { - *exactp = 0; - if (!LF_ISSET(S_PAST_EOF) || - recno > t_recno + 1) { - ret = __memp_fput(mpf, h, 0); - h = NULL; - if ((t_ret = __TLPUT(dbc, - lock)) != 0 && ret == 0) - ret = t_ret; - if (ret == 0) - ret = DB_NOTFOUND; - goto err; - } - } - if (!B_DISSET(GET_BKEYDATA(dbp, h, - indx + deloffset)->type) && - ++t_recno == recno) - break; - } - - /* Correct from 1-based to 0-based for a page offset. */ - BT_STK_ENTER(dbp->dbenv, - cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - return (0); - case P_IBTREE: - for (indx = 0, top = NUM_ENT(h);;) { - bi = GET_BINTERNAL(dbp, h, indx); - if (++indx == top || total + bi->nrecs >= recno) - break; - total += bi->nrecs; - } - pg = bi->pgno; - break; - case P_LRECNO: - recno -= total; - - /* Correct from 1-based to 0-based for a page offset. */ - --recno; - BT_STK_ENTER(dbp->dbenv, - cp, h, recno, lock, lock_mode, ret); - if (ret != 0) - goto err; - return (0); - case P_IRECNO: - for (indx = 0, top = NUM_ENT(h);;) { - ri = GET_RINTERNAL(dbp, h, indx); - if (++indx == top || total + ri->nrecs >= recno) - break; - total += ri->nrecs; - } - pg = ri->pgno; - break; - default: - return (__db_pgfmt(dbp->dbenv, h->pgno)); - } - --indx; - - /* Return if this is the lowest page wanted. */ - if (stop == LEVEL(h)) { - BT_STK_ENTER(dbp->dbenv, - cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - return (0); - } - if (stack) { - BT_STK_PUSH(dbp->dbenv, - cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - h = NULL; - - lock_mode = DB_LOCK_WRITE; - if ((ret = - __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - goto err; - } else { - /* - * Decide if we want to return a pointer to the next - * page in the stack. If we do, write lock it and - * never unlock it. - */ - if ((LF_ISSET(S_PARENT) && - (u_int8_t)(stop + 1) >= (u_int8_t)(LEVEL(h) - 1)) || - (LEVEL(h) - 1) == LEAFLEVEL) - stack = 1; - - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - h = NULL; - - lock_mode = stack && - LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ; - if ((ret = __db_lget(dbc, - LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) { - /* - * If we fail, discard the lock we held. This - * is OK because this only happens when we are - * descending the tree holding read-locks. - */ - (void)__LPUT(dbc, lock); - goto err; - } - } - - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) - goto err; - } - /* NOTREACHED */ - -err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - - BT_STK_POP(cp); - __bam_stkrel(dbc, 0); - - return (ret); -} - -/* - * __bam_adjust -- - * Adjust the tree after adding or deleting a record. - * - * PUBLIC: int __bam_adjust __P((DBC *, int32_t)); - */ -int -__bam_adjust(dbc, adjust) - DBC *dbc; - int32_t adjust; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_MPOOLFILE *mpf; - EPG *epg; - PAGE *h; - db_pgno_t root_pgno; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - root_pgno = cp->root; - - /* Update the record counts for the tree. */ - for (epg = cp->sp; epg <= cp->csp; ++epg) { - h = epg->page; - if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO) { - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_cadjust_log(dbp, dbc->txn, - &LSN(h), 0, PGNO(h), &LSN(h), - (u_int32_t)epg->indx, adjust, - PGNO(h) == root_pgno ? - CAD_UPDATEROOT : 0)) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(h)); - - if (TYPE(h) == P_IBTREE) - GET_BINTERNAL(dbp, h, epg->indx)->nrecs += - adjust; - else - GET_RINTERNAL(dbp, h, epg->indx)->nrecs += - adjust; - - if (PGNO(h) == root_pgno) - RE_NREC_ADJ(h, adjust); - - if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0) - return (ret); - } - } - return (0); -} - -/* - * __bam_nrecs -- - * Return the number of records in the tree. - * - * PUBLIC: int __bam_nrecs __P((DBC *, db_recno_t *)); - */ -int -__bam_nrecs(dbc, rep) - DBC *dbc; - db_recno_t *rep; -{ - DB *dbp; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t pgno; - int ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - - pgno = dbc->internal->root; - if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - return (ret); - - *rep = RE_NREC(h); - - ret = __memp_fput(mpf, h, 0); - if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __bam_total -- - * Return the number of records below a page. - * - * PUBLIC: db_recno_t __bam_total __P((DB *, PAGE *)); - */ -db_recno_t -__bam_total(dbp, h) - DB *dbp; - PAGE *h; -{ - db_recno_t nrecs; - db_indx_t indx, top; - - nrecs = 0; - top = NUM_ENT(h); - - switch (TYPE(h)) { - case P_LBTREE: - /* Check for logically deleted records. */ - for (indx = 0; indx < top; indx += P_INDX) - if (!B_DISSET( - GET_BKEYDATA(dbp, h, indx + O_INDX)->type)) - ++nrecs; - break; - case P_LDUP: - /* Check for logically deleted records. */ - for (indx = 0; indx < top; indx += O_INDX) - if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type)) - ++nrecs; - break; - case P_IBTREE: - for (indx = 0; indx < top; indx += O_INDX) - nrecs += GET_BINTERNAL(dbp, h, indx)->nrecs; - break; - case P_LRECNO: - nrecs = NUM_ENT(h); - break; - case P_IRECNO: - for (indx = 0; indx < top; indx += O_INDX) - nrecs += GET_RINTERNAL(dbp, h, indx)->nrecs; - break; - } - - return (nrecs); -} diff --git a/storage/bdb/btree/bt_search.c b/storage/bdb/btree/bt_search.c deleted file mode 100644 index aedd5304a91..00000000000 --- a/storage/bdb/btree/bt_search.c +++ /dev/null @@ -1,706 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_search.c,v 12.17 2005/11/10 21:17:13 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" - -/* - * __bam_get_root -- - * Fetch the root of a tree and see if we want to keep - * it in the stack. - * - * PUBLIC: int __bam_get_root __P((DBC *, db_pgno_t, int, u_int32_t, int *)); - */ -int -__bam_get_root(dbc, pg, slevel, flags, stack) - DBC *dbc; - db_pgno_t pg; - int slevel; - u_int32_t flags; - int *stack; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - db_lockmode_t lock_mode; - int ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - /* - * If write-locking pages, we need to know whether or not to acquire a - * write lock on a page before getting it. This depends on how deep it - * is in tree, which we don't know until we acquire the root page. So, - * if we need to lock the root page we may have to upgrade it later, - * because we won't get the correct lock initially. - * - * Retrieve the root page. - */ -try_again: - *stack = LF_ISSET(S_STACK) && - (dbc->dbtype == DB_RECNO || F_ISSET(cp, C_RECNUM)); - lock_mode = DB_LOCK_READ; - if (*stack || - LF_ISSET(S_DEL) || (LF_ISSET(S_NEXT) && LF_ISSET(S_WRITE))) - lock_mode = DB_LOCK_WRITE; - if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { - /* Did not read it, so we can release the lock */ - (void)__LPUT(dbc, lock); - return (ret); - } - - /* - * Decide if we need to save this page; if we do, write lock it. - * We deliberately don't lock-couple on this call. If the tree - * is tiny, i.e., one page, and two threads are busily updating - * the root page, we're almost guaranteed deadlocks galore, as - * each one gets a read lock and then blocks the other's attempt - * for a write lock. - */ - if (!*stack && - ((LF_ISSET(S_PARENT) && (u_int8_t)(slevel + 1) >= LEVEL(h)) || - (LF_ISSET(S_WRITE) && LEVEL(h) == LEAFLEVEL) || - (LF_ISSET(S_START) && slevel == LEVEL(h)))) { - if (!STD_LOCKING(dbc)) - goto no_relock; - ret = __memp_fput(mpf, h, 0); - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - lock_mode = DB_LOCK_WRITE; - if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { - /* Did not read it, so we can release the lock */ - (void)__LPUT(dbc, lock); - return (ret); - } - if (!((LF_ISSET(S_PARENT) && - (u_int8_t)(slevel + 1) >= LEVEL(h)) || - (LF_ISSET(S_WRITE) && LEVEL(h) == LEAFLEVEL) || - (LF_ISSET(S_START) && slevel == LEVEL(h)))) { - /* Someone else split the root, start over. */ - ret = __memp_fput(mpf, h, 0); - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - goto try_again; - } -no_relock: *stack = 1; - } - BT_STK_ENTER(dbp->dbenv, cp, h, 0, lock, lock_mode, ret); - - return (ret); -} - -/* - * __bam_search -- - * Search a btree for a key. - * - * PUBLIC: int __bam_search __P((DBC *, db_pgno_t, - * PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *)); - */ -int -__bam_search(dbc, root_pgno, key, flags, slevel, recnop, exactp) - DBC *dbc; - db_pgno_t root_pgno; - const DBT *key; - u_int32_t flags; - int slevel, *exactp; - db_recno_t *recnop; -{ - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - db_indx_t base, i, indx, *inp, lim; - db_lockmode_t lock_mode; - db_pgno_t pg; - db_recno_t recno; - int adjust, cmp, deloffset, ret, stack, t_ret; - int (*func) __P((DB *, const DBT *, const DBT *)); - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - h = NULL; - t = dbp->bt_internal; - recno = 0; - - BT_STK_CLR(cp); - - /* - * There are several ways we search a btree tree. The flags argument - * specifies if we're acquiring read or write locks, if we position - * to the first or last item in a set of duplicates, if we return - * deleted items, and if we are locking pairs of pages. In addition, - * if we're modifying record numbers, we have to lock the entire tree - * regardless. See btree.h for more details. - */ - - if (root_pgno == PGNO_INVALID) - root_pgno = cp->root; - if ((ret = __bam_get_root(dbc, root_pgno, slevel, flags, &stack)) != 0) - return (ret); - lock_mode = cp->csp->lock_mode; - lock = cp->csp->lock; - h = cp->csp->page; - - BT_STK_CLR(cp); - - /* Choose a comparison function. */ - func = F_ISSET(dbc, DBC_OPD) ? - (dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare) : - t->bt_compare; - - for (;;) { - inp = P_INP(dbp, h); - adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX; - if (LF_ISSET(S_MIN | S_MAX)) { - if (LF_ISSET(S_MIN) || NUM_ENT(h) == 0) - indx = 0; - else if (TYPE(h) == P_LBTREE) - indx = NUM_ENT(h) - 2; - else - indx = NUM_ENT(h) - 1; - - if (LEVEL(h) == LEAFLEVEL || - (!LF_ISSET(S_START) && LEVEL(h) == slevel)) { - if (LF_ISSET(S_NEXT)) - goto get_next; - goto found; - } - goto next; - } - /* - * Do a binary search on the current page. If we're searching - * a Btree leaf page, we have to walk the indices in groups of - * two. If we're searching an internal page or a off-page dup - * page, they're an index per page item. If we find an exact - * match on a leaf page, we're done. - */ - for (base = 0, - lim = NUM_ENT(h) / (db_indx_t)adjust; lim != 0; lim >>= 1) { - indx = base + ((lim >> 1) * adjust); - if ((ret = - __bam_cmp(dbp, key, h, indx, func, &cmp)) != 0) - goto err; - if (cmp == 0) { - if (LEVEL(h) == LEAFLEVEL || - (!LF_ISSET(S_START) && - LEVEL(h) == slevel)) { - if (LF_ISSET(S_NEXT)) - goto get_next; - goto found; - } - goto next; - } - if (cmp > 0) { - base = indx + adjust; - --lim; - } - } - - /* - * No match found. Base is the smallest index greater than - * key and may be zero or a last + O_INDX index. - * - * If it's a leaf page or the stopping point, - * return base as the "found" value. - * Delete only deletes exact matches. - */ - if (LEVEL(h) == LEAFLEVEL || - (!LF_ISSET(S_START) && LEVEL(h) == slevel)) { - *exactp = 0; - - if (LF_ISSET(S_EXACT)) { - ret = DB_NOTFOUND; - goto err; - } - - if (LF_ISSET(S_STK_ONLY)) { - BT_STK_NUM(dbp->dbenv, cp, h, base, ret); - if ((t_ret = - __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = - __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - return (ret); - } - if (LF_ISSET(S_NEXT)) { -get_next: /* - * The caller could have asked for a NEXT - * at the root if the tree recently collapsed. - */ - if (PGNO(h) == root_pgno) { - ret = DB_NOTFOUND; - goto err; - } - /* - * Save the root of the subtree - * and drop the rest of the subtree - * and search down again starting at - * the next child. - */ - if ((ret = __LPUT(dbc, lock)) != 0) - goto err; - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - h = NULL; - LF_SET(S_MIN); - LF_CLR(S_NEXT); - indx = cp->sp->indx + 1; - if (indx == NUM_ENT(cp->sp->page)) { - ret = DB_NOTFOUND; - cp->csp++; - goto err; - } - h = cp->sp->page; - cp->sp->page = NULL; - lock = cp->sp->lock; - LOCK_INIT(cp->sp->lock); - if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) - goto err; - stack = 1; - goto next; - } - - /* - * !!! - * Possibly returning a deleted record -- DB_SET_RANGE, - * DB_KEYFIRST and DB_KEYLAST don't require an exact - * match, and we don't want to walk multiple pages here - * to find an undeleted record. This is handled by the - * calling routine. - */ - if (LF_ISSET(S_DEL) && cp->csp == cp->sp) - cp->csp++; - BT_STK_ENTER(dbp->dbenv, - cp, h, base, lock, lock_mode, ret); - if (ret != 0) - goto err; - return (0); - } - - /* - * If it's not a leaf page, record the internal page (which is - * a parent page for the key). Decrement the base by 1 if it's - * non-zero so that if a split later occurs, the inserted page - * will be to the right of the saved page. - */ - indx = base > 0 ? base - O_INDX : base; - - /* - * If we're trying to calculate the record number, sum up - * all the record numbers on this page up to the indx point. - */ -next: if (recnop != NULL) - for (i = 0; i < indx; ++i) - recno += GET_BINTERNAL(dbp, h, i)->nrecs; - - pg = GET_BINTERNAL(dbp, h, indx)->pgno; - - /* See if we are at the level to start stacking. */ - if (LF_ISSET(S_START) && slevel == LEVEL(h)) - stack = 1; - - if (LF_ISSET(S_STK_ONLY)) { - if (slevel == LEVEL(h)) { - BT_STK_NUM(dbp->dbenv, cp, h, indx, ret); - if ((t_ret = - __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = - __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - return (ret); - } - BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret); - (void)__memp_fput(mpf, h, 0); - h = NULL; - if ((ret = __db_lget(dbc, - LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) { - /* - * Discard our lock and return on failure. This - * is OK because it only happens when descending - * the tree holding read-locks. - */ - (void)__LPUT(dbc, lock); - return (ret); - } - } else if (stack) { - /* Return if this is the lowest page wanted. */ - if (LF_ISSET(S_PARENT) && slevel == LEVEL(h)) { - BT_STK_ENTER(dbp->dbenv, - cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - return (0); - } - if (LF_ISSET(S_DEL) && NUM_ENT(h) > 1) { - /* - * There was a page with a singleton pointer - * to a non-empty subtree. - */ - cp->csp--; - if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) - goto err; - stack = 0; - goto do_del; - } - BT_STK_PUSH(dbp->dbenv, - cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - h = NULL; - - lock_mode = DB_LOCK_WRITE; - if ((ret = - __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - goto err; - } else { - /* - * Decide if we want to return a reference to the next - * page in the return stack. If so, lock it and never - * unlock it. - */ - if ((LF_ISSET(S_PARENT) && - (u_int8_t)(slevel + 1) >= (LEVEL(h) - 1)) || - (LEVEL(h) - 1) == LEAFLEVEL) - stack = 1; - - /* - * Returning a subtree. See if we have hit the start - * point if so save the parent and set stack. - * Otherwise free the parent and temporarily - * save this one. - * For S_DEL we need to find a page with 1 entry. - * For S_NEXT we want find the minimal subtree - * that contains the key and the next page. - * We save pages as long as we are at the right - * edge of the subtree. When we leave the right - * edge, then drop the subtree. - */ - if (!LF_ISSET(S_DEL | S_NEXT)) { - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - goto lock_next; - } - - if ((LF_ISSET(S_DEL) && NUM_ENT(h) == 1)) { - stack = 1; - LF_SET(S_WRITE); - /* Push the parent. */ - cp->csp++; - /* Push this node. */ - BT_STK_PUSH(dbp->dbenv, cp, h, - indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - LOCK_INIT(lock); - } else { - /* - * See if we want to save the tree so far. - * If we are looking for the next key, - * then we must save this node if we are - * at the end of the page. If not then - * discard anything we have saved so far. - * For delete only keep one node until - * we find a singleton. - */ -do_del: if (cp->csp->page != NULL) { - if (LF_ISSET(S_NEXT) && - indx == NUM_ENT(h) - 1) - cp->csp++; - else if ((ret = - __bam_stkrel(dbc, STK_NOLOCK)) != 0) - goto err; - } - /* Save this node. */ - BT_STK_ENTER(dbp->dbenv, cp, - h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - LOCK_INIT(lock); - } - -lock_next: h = NULL; - - if (stack && LF_ISSET(S_WRITE)) - lock_mode = DB_LOCK_WRITE; - if ((ret = __db_lget(dbc, - LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) { - /* - * If we fail, discard the lock we held. This - * is OK because this only happens when we are - * descending the tree holding read-locks. - */ - (void)__LPUT(dbc, lock); - if (LF_ISSET(S_DEL | S_NEXT)) - cp->csp++; - goto err; - } - } - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) - goto err; - } - /* NOTREACHED */ - -found: *exactp = 1; - - /* - * If we got here, we know that we have a Btree leaf or off-page - * duplicates page. If it's a Btree leaf page, we have to handle - * on-page duplicates. - * - * If there are duplicates, go to the first/last one. This is - * safe because we know that we're not going to leave the page, - * all duplicate sets that are not on overflow pages exist on a - * single leaf page. - */ - if (TYPE(h) == P_LBTREE && NUM_ENT(h) > P_INDX) { - if (LF_ISSET(S_DUPLAST)) - while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) && - inp[indx] == inp[indx + P_INDX]) - indx += P_INDX; - else if (LF_ISSET(S_DUPFIRST)) - while (indx > 0 && - inp[indx] == inp[indx - P_INDX]) - indx -= P_INDX; - } - - /* - * Now check if we are allowed to return deleted items; if not, then - * find the next (or previous) non-deleted duplicate entry. (We do - * not move from the original found key on the basis of the S_DELNO - * flag.) - */ - DB_ASSERT(recnop == NULL || LF_ISSET(S_DELNO)); - if (LF_ISSET(S_DELNO)) { - deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0; - if (LF_ISSET(S_DUPLAST)) - while (B_DISSET(GET_BKEYDATA(dbp, - h, indx + deloffset)->type) && indx > 0 && - inp[indx] == inp[indx - adjust]) - indx -= adjust; - else - while (B_DISSET(GET_BKEYDATA(dbp, - h, indx + deloffset)->type) && - indx < (db_indx_t)(NUM_ENT(h) - adjust) && - inp[indx] == inp[indx + adjust]) - indx += adjust; - - /* - * If we weren't able to find a non-deleted duplicate, return - * DB_NOTFOUND. - */ - if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type)) { - ret = DB_NOTFOUND; - goto err; - } - - /* - * Increment the record counter to point to the found element. - * Ignore any deleted key/data pairs. There doesn't need to - * be any correction for duplicates, as Btree doesn't support - * duplicates and record numbers in the same tree. - */ - if (recnop != NULL) { - DB_ASSERT(TYPE(h) == P_LBTREE); - - for (i = 0; i < indx; i += P_INDX) - if (!B_DISSET( - GET_BKEYDATA(dbp, h, i + O_INDX)->type)) - ++recno; - - /* Correct the number for a 0-base. */ - *recnop = recno + 1; - } - } - - if (LF_ISSET(S_STK_ONLY)) { - BT_STK_NUM(dbp->dbenv, cp, h, indx, ret); - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - } else { - if (LF_ISSET(S_DEL) && cp->csp == cp->sp) - cp->csp++; - BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret); - } - if (ret != 0) - goto err; - - return (0); - -err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - - /* Keep any not-found page locked for serializability. */ - if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - - BT_STK_POP(cp); - __bam_stkrel(dbc, 0); - - return (ret); -} - -/* - * __bam_stkrel -- - * Release all pages currently held in the stack. - * - * PUBLIC: int __bam_stkrel __P((DBC *, u_int32_t)); - */ -int -__bam_stkrel(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - BTREE_CURSOR *cp; - DB *dbp; - DB_MPOOLFILE *mpf; - EPG *epg; - int ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * Release inner pages first. - * - * The caller must be sure that setting STK_NOLOCK will not effect - * either serializability or recoverability. - */ - for (ret = 0, epg = cp->sp; epg <= cp->csp; ++epg) { - if (epg->page != NULL) { - if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) { - cp->page = NULL; - LOCK_INIT(cp->lock); - } - if ((t_ret = - __memp_fput(mpf, epg->page, 0)) != 0 && ret == 0) - ret = t_ret; - /* - * XXX - * Temporary fix for #3243 -- under certain deadlock - * conditions we call here again and re-free the page. - * The correct fix is to never release a stack that - * doesn't hold items. - */ - epg->page = NULL; - } - /* - * We set this if we need to release our pins, - * but are not logically ready to have the pages - * visible. - */ - if (LF_ISSET(STK_PGONLY)) - continue; - if (LF_ISSET(STK_NOLOCK)) { - if ((t_ret = __LPUT(dbc, epg->lock)) != 0 && ret == 0) - ret = t_ret; - } else - if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) - ret = t_ret; - } - - /* Clear the stack, all pages have been released. */ - if (!LF_ISSET(STK_PGONLY)) - BT_STK_CLR(cp); - - return (ret); -} - -/* - * __bam_stkgrow -- - * Grow the stack. - * - * PUBLIC: int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *)); - */ -int -__bam_stkgrow(dbenv, cp) - DB_ENV *dbenv; - BTREE_CURSOR *cp; -{ - EPG *p; - size_t entries; - int ret; - - entries = cp->esp - cp->sp; - - if ((ret = __os_calloc(dbenv, entries * 2, sizeof(EPG), &p)) != 0) - return (ret); - memcpy(p, cp->sp, entries * sizeof(EPG)); - if (cp->sp != cp->stack) - __os_free(dbenv, cp->sp); - cp->sp = p; - cp->csp = p + entries; - cp->esp = p + entries * 2; - return (0); -} diff --git a/storage/bdb/btree/bt_split.c b/storage/bdb/btree/bt_split.c deleted file mode 100644 index fb696ebf768..00000000000 --- a/storage/bdb/btree/bt_split.c +++ /dev/null @@ -1,1194 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: bt_split.c,v 12.4 2005/06/16 20:20:22 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#include "dbinc/btree.h" - -static int __bam_broot __P((DBC *, PAGE *, PAGE *, PAGE *)); -static int __bam_page __P((DBC *, EPG *, EPG *)); -static int __bam_psplit __P((DBC *, EPG *, PAGE *, PAGE *, db_indx_t *)); -static int __bam_root __P((DBC *, EPG *)); -static int __ram_root __P((DBC *, PAGE *, PAGE *, PAGE *)); - -/* - * __bam_split -- - * Split a page. - * - * PUBLIC: int __bam_split __P((DBC *, void *, db_pgno_t *)); - */ -int -__bam_split(dbc, arg, root_pgnop) - DBC *dbc; - void *arg; - db_pgno_t *root_pgnop; -{ - BTREE_CURSOR *cp; - enum { UP, DOWN } dir; - db_pgno_t root_pgno; - int exact, level, ret; - - cp = (BTREE_CURSOR *)dbc->internal; - root_pgno = cp->root; - - /* - * The locking protocol we use to avoid deadlock to acquire locks by - * walking down the tree, but we do it as lazily as possible, locking - * the root only as a last resort. We expect all stack pages to have - * been discarded before we're called; we discard all short-term locks. - * - * When __bam_split is first called, we know that a leaf page was too - * full for an insert. We don't know what leaf page it was, but we - * have the key/recno that caused the problem. We call XX_search to - * reacquire the leaf page, but this time get both the leaf page and - * its parent, locked. We then split the leaf page and see if the new - * internal key will fit into the parent page. If it will, we're done. - * - * If it won't, we discard our current locks and repeat the process, - * only this time acquiring the parent page and its parent, locked. - * This process repeats until we succeed in the split, splitting the - * root page as the final resort. The entire process then repeats, - * as necessary, until we split a leaf page. - * - * XXX - * A traditional method of speeding this up is to maintain a stack of - * the pages traversed in the original search. You can detect if the - * stack is correct by storing the page's LSN when it was searched and - * comparing that LSN with the current one when it's locked during the - * split. This would be an easy change for this code, but I have no - * numbers that indicate it's worthwhile. - */ - for (dir = UP, level = LEAFLEVEL;; dir == UP ? ++level : --level) { - /* - * Acquire a page and its parent, locked. - */ - if ((ret = (dbc->dbtype == DB_BTREE ? - __bam_search(dbc, PGNO_INVALID, - arg, S_WRPAIR, level, NULL, &exact) : - __bam_rsearch(dbc, - (db_recno_t *)arg, S_WRPAIR, level, &exact))) != 0) - break; - - if (root_pgnop != NULL) - *root_pgnop = cp->csp[0].page->pgno == root_pgno ? - root_pgno : cp->csp[-1].page->pgno; - /* - * Split the page if it still needs it (it's possible another - * thread of control has already split the page). If we are - * guaranteed that two items will fit on the page, the split - * is no longer necessary. - */ - if (2 * B_MAXSIZEONPAGE(cp->ovflsize) - <= (db_indx_t)P_FREESPACE(dbc->dbp, cp->csp[0].page)) { - __bam_stkrel(dbc, STK_NOLOCK); - break; - } - ret = cp->csp[0].page->pgno == root_pgno ? - __bam_root(dbc, &cp->csp[0]) : - __bam_page(dbc, &cp->csp[-1], &cp->csp[0]); - BT_STK_CLR(cp); - - switch (ret) { - case 0: - /* Once we've split the leaf page, we're done. */ - if (level == LEAFLEVEL) - return (0); - - /* Switch directions. */ - if (dir == UP) - dir = DOWN; - break; - case DB_NEEDSPLIT: - /* - * It's possible to fail to split repeatedly, as other - * threads may be modifying the tree, or the page usage - * is sufficiently bad that we don't get enough space - * the first time. - */ - if (dir == DOWN) - dir = UP; - break; - default: - goto err; - } - } - -err: if (root_pgnop != NULL) - *root_pgnop = cp->root; - return (ret); -} - -/* - * __bam_root -- - * Split the root page of a btree. - */ -static int -__bam_root(dbc, cp) - DBC *dbc; - EPG *cp; -{ - DB *dbp; - DBT log_dbt; - DB_LSN log_lsn; - DB_MPOOLFILE *mpf; - PAGE *lp, *rp; - db_indx_t split; - u_int32_t opflags; - int ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - lp = rp = NULL; - - /* Yeah, right. */ - if (cp->page->level >= MAXBTREELEVEL) { - __db_err(dbp->dbenv, - "Too many btree levels: %d", cp->page->level); - ret = ENOSPC; - goto err; - } - - /* Create new left and right pages for the split. */ - if ((ret = __db_new(dbc, TYPE(cp->page), &lp)) != 0 || - (ret = __db_new(dbc, TYPE(cp->page), &rp)) != 0) - goto err; - P_INIT(lp, dbp->pgsize, lp->pgno, - PGNO_INVALID, ISINTERNAL(cp->page) ? PGNO_INVALID : rp->pgno, - cp->page->level, TYPE(cp->page)); - P_INIT(rp, dbp->pgsize, rp->pgno, - ISINTERNAL(cp->page) ? PGNO_INVALID : lp->pgno, PGNO_INVALID, - cp->page->level, TYPE(cp->page)); - - /* Split the page. */ - if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0) - goto err; - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - memset(&log_dbt, 0, sizeof(log_dbt)); - log_dbt.data = cp->page; - log_dbt.size = dbp->pgsize; - ZERO_LSN(log_lsn); - opflags = F_ISSET( - (BTREE_CURSOR *)dbc->internal, C_RECNUM) ? SPL_NRECS : 0; - if ((ret = __bam_split_log(dbp, - dbc->txn, &LSN(cp->page), 0, PGNO(lp), &LSN(lp), PGNO(rp), - &LSN(rp), (u_int32_t)NUM_ENT(lp), 0, &log_lsn, - dbc->internal->root, &log_dbt, opflags)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(cp->page)); - LSN(lp) = LSN(cp->page); - LSN(rp) = LSN(cp->page); - - /* Clean up the new root page. */ - if ((ret = (dbc->dbtype == DB_RECNO ? - __ram_root(dbc, cp->page, lp, rp) : - __bam_broot(dbc, cp->page, lp, rp))) != 0) - goto err; - - /* Adjust any cursors. */ - ret = __bam_ca_split(dbc, cp->page->pgno, lp->pgno, rp->pgno, split, 1); - - /* Success or error: release pages and locks. */ -err: if ((t_ret = - __memp_fput(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0) - ret = t_ret; - if (lp != NULL && - (t_ret = __memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if (rp != NULL && - (t_ret = __memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __bam_page -- - * Split the non-root page of a btree. - */ -static int -__bam_page(dbc, pp, cp) - DBC *dbc; - EPG *pp, *cp; -{ - BTREE_CURSOR *bc; - DBT log_dbt; - DB_LSN log_lsn; - DB *dbp; - DB_LOCK rplock, tplock; - DB_MPOOLFILE *mpf; - DB_LSN save_lsn; - PAGE *lp, *rp, *alloc_rp, *tp; - db_indx_t split; - u_int32_t opflags; - int ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - alloc_rp = lp = rp = tp = NULL; - LOCK_INIT(rplock); - LOCK_INIT(tplock); - ret = -1; - - /* - * Create a new right page for the split, and fill in everything - * except its LSN and page number. - * - * We malloc space for both the left and right pages, so we don't get - * a new page from the underlying buffer pool until we know the split - * is going to succeed. The reason is that we can't release locks - * acquired during the get-a-new-page process because metadata page - * locks can't be discarded on failure since we may have modified the - * free list. So, if you assume that we're holding a write lock on the - * leaf page which ran out of space and started this split (e.g., we - * have already written records to the page, or we retrieved a record - * from it with the DB_RMW flag set), failing in a split with both a - * leaf page locked and the metadata page locked can potentially lock - * up the tree badly, because we've violated the rule of always locking - * down the tree, and never up. - */ - if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &rp)) != 0) - goto err; - P_INIT(rp, dbp->pgsize, 0, - ISINTERNAL(cp->page) ? PGNO_INVALID : PGNO(cp->page), - ISINTERNAL(cp->page) ? PGNO_INVALID : NEXT_PGNO(cp->page), - cp->page->level, TYPE(cp->page)); - - /* - * Create new left page for the split, and fill in everything - * except its LSN and next-page page number. - */ - if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &lp)) != 0) - goto err; - P_INIT(lp, dbp->pgsize, PGNO(cp->page), - ISINTERNAL(cp->page) ? PGNO_INVALID : PREV_PGNO(cp->page), - ISINTERNAL(cp->page) ? PGNO_INVALID : 0, - cp->page->level, TYPE(cp->page)); - - /* - * Split right. - * - * Only the indices are sorted on the page, i.e., the key/data pairs - * aren't, so it's simpler to copy the data from the split page onto - * two new pages instead of copying half the data to a new right page - * and compacting the left page in place. Since the left page can't - * change, we swap the original and the allocated left page after the - * split. - */ - if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0) - goto err; - - /* - * Test to see if we are going to be able to insert the new pages into - * the parent page. The interesting failure here is that the parent - * page can't hold the new keys, and has to be split in turn, in which - * case we want to release all the locks we can. - */ - if ((ret = __bam_pinsert(dbc, pp, lp, rp, BPI_SPACEONLY)) != 0) - goto err; - - /* - * Fix up the previous pointer of any leaf page following the split - * page. - * - * There's interesting deadlock situations here as we try to write-lock - * a page that's not in our direct ancestry. Consider a cursor walking - * backward through the leaf pages, that has our following page locked, - * and is waiting on a lock for the page we're splitting. In that case - * we're going to deadlock here. It's probably OK, stepping backward - * through the tree isn't a common operation. - */ - if (ISLEAF(cp->page) && NEXT_PGNO(cp->page) != PGNO_INVALID) { - if ((ret = __db_lget(dbc, - 0, NEXT_PGNO(cp->page), DB_LOCK_WRITE, 0, &tplock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &NEXT_PGNO(cp->page), 0, &tp)) != 0) - goto err; - } - - /* - * We've got everything locked down we need, and we know the split - * is going to succeed. Go and get the additional page we'll need. - */ - if ((ret = __db_new(dbc, TYPE(cp->page), &alloc_rp)) != 0) - goto err; - - /* - * Lock the new page. We need to do this for two reasons: first, the - * fast-lookup code might have a reference to this page in bt_lpgno if - * the page was recently deleted from the tree, and that code doesn't - * walk the tree and so won't encounter the parent's page lock. - * Second, a dirty reader could get to this page via the parent or old - * page after the split is done but before the transaction is committed - * or aborted. - */ - if ((ret = __db_lget(dbc, - 0, PGNO(alloc_rp), DB_LOCK_WRITE, 0, &rplock)) != 0) - goto err; - - /* - * Fix up the page numbers we didn't have before. We have to do this - * before calling __bam_pinsert because it may copy a page number onto - * the parent page and it takes the page number from its page argument. - */ - PGNO(rp) = NEXT_PGNO(lp) = PGNO(alloc_rp); - - /* Actually update the parent page. */ - if ((ret = __bam_pinsert(dbc, pp, lp, rp, 0)) != 0) - goto err; - - bc = (BTREE_CURSOR *)dbc->internal; - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - memset(&log_dbt, 0, sizeof(log_dbt)); - log_dbt.data = cp->page; - log_dbt.size = dbp->pgsize; - if (tp == NULL) - ZERO_LSN(log_lsn); - opflags = F_ISSET(bc, C_RECNUM) ? SPL_NRECS : 0; - if ((ret = __bam_split_log(dbp, dbc->txn, &LSN(cp->page), 0, - PGNO(cp->page), &LSN(cp->page), PGNO(alloc_rp), - &LSN(alloc_rp), (u_int32_t)NUM_ENT(lp), - tp == NULL ? 0 : PGNO(tp), - tp == NULL ? &log_lsn : &LSN(tp), - PGNO_INVALID, &log_dbt, opflags)) != 0) - goto err; - - } else - LSN_NOT_LOGGED(LSN(cp->page)); - - /* Update the LSNs for all involved pages. */ - LSN(alloc_rp) = LSN(cp->page); - LSN(lp) = LSN(cp->page); - LSN(rp) = LSN(cp->page); - if (tp != NULL) - LSN(tp) = LSN(cp->page); - - /* - * Copy the left and right pages into place. There are two paths - * through here. Either we are logging and we set the LSNs in the - * logging path. However, if we are not logging, then we do not - * have valid LSNs on lp or rp. The correct LSNs to use are the - * ones on the page we got from __db_new or the one that was - * originally on cp->page. In both cases, we save the LSN from the - * real database page (not a malloc'd one) and reapply it after we - * do the copy. - */ - save_lsn = alloc_rp->lsn; - memcpy(alloc_rp, rp, LOFFSET(dbp, rp)); - memcpy((u_int8_t *)alloc_rp + HOFFSET(rp), - (u_int8_t *)rp + HOFFSET(rp), dbp->pgsize - HOFFSET(rp)); - alloc_rp->lsn = save_lsn; - - save_lsn = cp->page->lsn; - memcpy(cp->page, lp, LOFFSET(dbp, lp)); - memcpy((u_int8_t *)cp->page + HOFFSET(lp), - (u_int8_t *)lp + HOFFSET(lp), dbp->pgsize - HOFFSET(lp)); - cp->page->lsn = save_lsn; - - /* Fix up the next-page link. */ - if (tp != NULL) - PREV_PGNO(tp) = PGNO(rp); - - /* Adjust any cursors. */ - if ((ret = __bam_ca_split(dbc, - PGNO(cp->page), PGNO(cp->page), PGNO(rp), split, 0)) != 0) - goto err; - - __os_free(dbp->dbenv, lp); - __os_free(dbp->dbenv, rp); - - /* - * Success -- write the real pages back to the store. As we never - * acquired any sort of lock on the new page, we release it before - * releasing locks on the pages that reference it. We're finished - * modifying the page so it's not really necessary, but it's neater. - */ - if ((t_ret = - __memp_fput(mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, rplock)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = - __memp_fput(mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, pp->lock)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = - __memp_fput(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0) - ret = t_ret; - if (tp != NULL) { - if ((t_ret = - __memp_fput(mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, tplock)) != 0 && ret == 0) - ret = t_ret; - } - return (ret); - -err: if (lp != NULL) - __os_free(dbp->dbenv, lp); - if (rp != NULL) - __os_free(dbp->dbenv, rp); - if (alloc_rp != NULL) - (void)__memp_fput(mpf, alloc_rp, 0); - if (tp != NULL) - (void)__memp_fput(mpf, tp, 0); - - /* We never updated the new or next pages, we can release them. */ - (void)__LPUT(dbc, rplock); - (void)__LPUT(dbc, tplock); - - (void)__memp_fput(mpf, pp->page, 0); - if (ret == DB_NEEDSPLIT) - (void)__LPUT(dbc, pp->lock); - else - (void)__TLPUT(dbc, pp->lock); - - (void)__memp_fput(mpf, cp->page, 0); - if (ret == DB_NEEDSPLIT) - (void)__LPUT(dbc, cp->lock); - else - (void)__TLPUT(dbc, cp->lock); - - return (ret); -} - -/* - * __bam_broot -- - * Fix up the btree root page after it has been split. - */ -static int -__bam_broot(dbc, rootp, lp, rp) - DBC *dbc; - PAGE *rootp, *lp, *rp; -{ - BINTERNAL bi, *child_bi; - BKEYDATA *child_bk; - BTREE_CURSOR *cp; - DB *dbp; - DBT hdr, data; - db_pgno_t root_pgno; - int ret; - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - - /* - * If the root page was a leaf page, change it into an internal page. - * We copy the key we split on (but not the key's data, in the case of - * a leaf page) to the new root page. - */ - root_pgno = cp->root; - P_INIT(rootp, dbp->pgsize, - root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IBTREE); - - memset(&data, 0, sizeof(data)); - memset(&hdr, 0, sizeof(hdr)); - - /* - * The btree comparison code guarantees that the left-most key on any - * internal btree page is never used, so it doesn't need to be filled - * in. Set the record count if necessary. - */ - memset(&bi, 0, sizeof(bi)); - bi.len = 0; - B_TSET(bi.type, B_KEYDATA, 0); - bi.pgno = lp->pgno; - if (F_ISSET(cp, C_RECNUM)) { - bi.nrecs = __bam_total(dbp, lp); - RE_NREC_SET(rootp, bi.nrecs); - } - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - if ((ret = - __db_pitem(dbc, rootp, 0, BINTERNAL_SIZE(0), &hdr, NULL)) != 0) - return (ret); - - switch (TYPE(rp)) { - case P_IBTREE: - /* Copy the first key of the child page onto the root page. */ - child_bi = GET_BINTERNAL(dbp, rp, 0); - - bi.len = child_bi->len; - B_TSET(bi.type, child_bi->type, 0); - bi.pgno = rp->pgno; - if (F_ISSET(cp, C_RECNUM)) { - bi.nrecs = __bam_total(dbp, rp); - RE_NREC_ADJ(rootp, bi.nrecs); - } - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - data.data = child_bi->data; - data.size = child_bi->len; - if ((ret = __db_pitem(dbc, rootp, 1, - BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0) - return (ret); - - /* Increment the overflow ref count. */ - if (B_TYPE(child_bi->type) == B_OVERFLOW) - if ((ret = __db_ovref(dbc, - ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0) - return (ret); - break; - case P_LDUP: - case P_LBTREE: - /* Copy the first key of the child page onto the root page. */ - child_bk = GET_BKEYDATA(dbp, rp, 0); - switch (B_TYPE(child_bk->type)) { - case B_KEYDATA: - bi.len = child_bk->len; - B_TSET(bi.type, child_bk->type, 0); - bi.pgno = rp->pgno; - if (F_ISSET(cp, C_RECNUM)) { - bi.nrecs = __bam_total(dbp, rp); - RE_NREC_ADJ(rootp, bi.nrecs); - } - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - data.data = child_bk->data; - data.size = child_bk->len; - if ((ret = __db_pitem(dbc, rootp, 1, - BINTERNAL_SIZE(child_bk->len), &hdr, &data)) != 0) - return (ret); - break; - case B_DUPLICATE: - case B_OVERFLOW: - bi.len = BOVERFLOW_SIZE; - B_TSET(bi.type, child_bk->type, 0); - bi.pgno = rp->pgno; - if (F_ISSET(cp, C_RECNUM)) { - bi.nrecs = __bam_total(dbp, rp); - RE_NREC_ADJ(rootp, bi.nrecs); - } - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - data.data = child_bk; - data.size = BOVERFLOW_SIZE; - if ((ret = __db_pitem(dbc, rootp, 1, - BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0) - return (ret); - - /* Increment the overflow ref count. */ - if (B_TYPE(child_bk->type) == B_OVERFLOW) - if ((ret = __db_ovref(dbc, - ((BOVERFLOW *)child_bk)->pgno, 1)) != 0) - return (ret); - break; - default: - return (__db_pgfmt(dbp->dbenv, rp->pgno)); - } - break; - default: - return (__db_pgfmt(dbp->dbenv, rp->pgno)); - } - return (0); -} - -/* - * __ram_root -- - * Fix up the recno root page after it has been split. - */ -static int -__ram_root(dbc, rootp, lp, rp) - DBC *dbc; - PAGE *rootp, *lp, *rp; -{ - DB *dbp; - DBT hdr; - RINTERNAL ri; - db_pgno_t root_pgno; - int ret; - - dbp = dbc->dbp; - root_pgno = dbc->internal->root; - - /* Initialize the page. */ - P_INIT(rootp, dbp->pgsize, - root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IRECNO); - - /* Initialize the header. */ - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &ri; - hdr.size = RINTERNAL_SIZE; - - /* Insert the left and right keys, set the header information. */ - ri.pgno = lp->pgno; - ri.nrecs = __bam_total(dbp, lp); - if ((ret = __db_pitem(dbc, rootp, 0, RINTERNAL_SIZE, &hdr, NULL)) != 0) - return (ret); - RE_NREC_SET(rootp, ri.nrecs); - ri.pgno = rp->pgno; - ri.nrecs = __bam_total(dbp, rp); - if ((ret = __db_pitem(dbc, rootp, 1, RINTERNAL_SIZE, &hdr, NULL)) != 0) - return (ret); - RE_NREC_ADJ(rootp, ri.nrecs); - return (0); -} - -/* - * __bam_pinsert -- - * Insert a new key into a parent page, completing the split. - * - * PUBLIC: int __bam_pinsert __P((DBC *, EPG *, PAGE *, PAGE *, int)); - */ -int -__bam_pinsert(dbc, parent, lchild, rchild, flags) - DBC *dbc; - EPG *parent; - PAGE *lchild, *rchild; - int flags; -{ - BINTERNAL bi, *child_bi; - BKEYDATA *child_bk, *tmp_bk; - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - DBT a, b, hdr, data; - PAGE *ppage; - RINTERNAL ri; - db_indx_t off; - db_recno_t nrecs; - size_t (*func) __P((DB *, const DBT *, const DBT *)); - u_int32_t n, nbytes, nksize; - int ret; - - dbp = dbc->dbp; - cp = (BTREE_CURSOR *)dbc->internal; - t = dbp->bt_internal; - ppage = parent->page; - - /* If handling record numbers, count records split to the right page. */ - nrecs = F_ISSET(cp, C_RECNUM) && - !LF_ISSET(BPI_SPACEONLY) ? __bam_total(dbp, rchild) : 0; - - /* - * Now we insert the new page's first key into the parent page, which - * completes the split. The parent points to a PAGE and a page index - * offset, where the new key goes ONE AFTER the index, because we split - * to the right. - * - * XXX - * Some btree algorithms replace the key for the old page as well as - * the new page. We don't, as there's no reason to believe that the - * first key on the old page is any better than the key we have, and, - * in the case of a key being placed at index 0 causing the split, the - * key is unavailable. - */ - off = parent->indx + O_INDX; - - /* - * Calculate the space needed on the parent page. - * - * Prefix trees: space hack used when inserting into BINTERNAL pages. - * Retain only what's needed to distinguish between the new entry and - * the LAST entry on the page to its left. If the keys compare equal, - * retain the entire key. We ignore overflow keys, and the entire key - * must be retained for the next-to-leftmost key on the leftmost page - * of each level, or the search will fail. Applicable ONLY to internal - * pages that have leaf pages as children. Further reduction of the - * key between pairs of internal pages loses too much information. - */ - switch (TYPE(rchild)) { - case P_IBTREE: - child_bi = GET_BINTERNAL(dbp, rchild, 0); - nbytes = BINTERNAL_PSIZE(child_bi->len); - - if (P_FREESPACE(dbp, ppage) < nbytes) - return (DB_NEEDSPLIT); - if (LF_ISSET(BPI_SPACEONLY)) - return (0); - - /* Add a new record for the right page. */ - memset(&bi, 0, sizeof(bi)); - bi.len = child_bi->len; - B_TSET(bi.type, child_bi->type, 0); - bi.pgno = rchild->pgno; - bi.nrecs = nrecs; - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - memset(&data, 0, sizeof(data)); - data.data = child_bi->data; - data.size = child_bi->len; - if ((ret = __db_pitem(dbc, ppage, off, - BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0) - return (ret); - - /* Increment the overflow ref count. */ - if (B_TYPE(child_bi->type) == B_OVERFLOW) - if ((ret = __db_ovref(dbc, - ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0) - return (ret); - break; - case P_LDUP: - case P_LBTREE: - child_bk = GET_BKEYDATA(dbp, rchild, 0); - switch (B_TYPE(child_bk->type)) { - case B_KEYDATA: - nbytes = BINTERNAL_PSIZE(child_bk->len); - nksize = child_bk->len; - - /* - * Prefix compression: - * We set t->bt_prefix to NULL if we have a comparison - * callback but no prefix compression callback. But, - * if we're splitting in an off-page duplicates tree, - * we still have to do some checking. If using the - * default off-page duplicates comparison routine we - * can use the default prefix compression callback. If - * not using the default off-page duplicates comparison - * routine, we can't do any kind of prefix compression - * as there's no way for an application to specify a - * prefix compression callback that corresponds to its - * comparison callback. - * - * No prefix compression if we don't have a compression - * function, or the key we'd compress isn't a normal - * key (for example, it references an overflow page). - * - * Generate a parent page key for the right child page - * from a comparison of the last key on the left child - * page and the first key on the right child page. - */ - if (F_ISSET(dbc, DBC_OPD)) { - if (dbp->dup_compare == __bam_defcmp) - func = __bam_defpfx; - else - func = NULL; - } else - func = t->bt_prefix; - if (func == NULL) - goto noprefix; - tmp_bk = GET_BKEYDATA(dbp, lchild, NUM_ENT(lchild) - - (TYPE(lchild) == P_LDUP ? O_INDX : P_INDX)); - if (B_TYPE(tmp_bk->type) != B_KEYDATA) - goto noprefix; - memset(&a, 0, sizeof(a)); - a.size = tmp_bk->len; - a.data = tmp_bk->data; - memset(&b, 0, sizeof(b)); - b.size = child_bk->len; - b.data = child_bk->data; - nksize = (u_int32_t)func(dbp, &a, &b); - if ((n = BINTERNAL_PSIZE(nksize)) < nbytes) - nbytes = n; - else - nksize = child_bk->len; - -noprefix: if (P_FREESPACE(dbp, ppage) < nbytes) - return (DB_NEEDSPLIT); - if (LF_ISSET(BPI_SPACEONLY)) - return (0); - - memset(&bi, 0, sizeof(bi)); - bi.len = nksize; - B_TSET(bi.type, child_bk->type, 0); - bi.pgno = rchild->pgno; - bi.nrecs = nrecs; - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - memset(&data, 0, sizeof(data)); - data.data = child_bk->data; - data.size = nksize; - if ((ret = __db_pitem(dbc, ppage, off, - BINTERNAL_SIZE(nksize), &hdr, &data)) != 0) - return (ret); - break; - case B_DUPLICATE: - case B_OVERFLOW: - nbytes = BINTERNAL_PSIZE(BOVERFLOW_SIZE); - - if (P_FREESPACE(dbp, ppage) < nbytes) - return (DB_NEEDSPLIT); - if (LF_ISSET(BPI_SPACEONLY)) - return (0); - - memset(&bi, 0, sizeof(bi)); - bi.len = BOVERFLOW_SIZE; - B_TSET(bi.type, child_bk->type, 0); - bi.pgno = rchild->pgno; - bi.nrecs = nrecs; - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &bi; - hdr.size = SSZA(BINTERNAL, data); - memset(&data, 0, sizeof(data)); - data.data = child_bk; - data.size = BOVERFLOW_SIZE; - if ((ret = __db_pitem(dbc, ppage, off, - BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0) - return (ret); - - /* Increment the overflow ref count. */ - if (B_TYPE(child_bk->type) == B_OVERFLOW) - if ((ret = __db_ovref(dbc, - ((BOVERFLOW *)child_bk)->pgno, 1)) != 0) - return (ret); - break; - default: - return (__db_pgfmt(dbp->dbenv, rchild->pgno)); - } - break; - case P_IRECNO: - case P_LRECNO: - nbytes = RINTERNAL_PSIZE; - - if (P_FREESPACE(dbp, ppage) < nbytes) - return (DB_NEEDSPLIT); - if (LF_ISSET(BPI_SPACEONLY)) - return (0); - - /* Add a new record for the right page. */ - memset(&hdr, 0, sizeof(hdr)); - hdr.data = &ri; - hdr.size = RINTERNAL_SIZE; - ri.pgno = rchild->pgno; - ri.nrecs = nrecs; - if ((ret = __db_pitem(dbc, - ppage, off, RINTERNAL_SIZE, &hdr, NULL)) != 0) - return (ret); - break; - default: - return (__db_pgfmt(dbp->dbenv, rchild->pgno)); - } - - /* - * If a Recno or Btree with record numbers AM page, or an off-page - * duplicates tree, adjust the parent page's left page record count. - */ - if (F_ISSET(cp, C_RECNUM) && !LF_ISSET(BPI_NORECNUM)) { - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - if ((ret = __bam_cadjust_log(dbp, dbc->txn, - &LSN(ppage), 0, PGNO(ppage), &LSN(ppage), - parent->indx, -(int32_t)nrecs, 0)) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(ppage)); - - /* Update the left page count. */ - if (dbc->dbtype == DB_RECNO) - GET_RINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs; - else - GET_BINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs; - } - - return (0); -} - -/* - * __bam_psplit -- - * Do the real work of splitting the page. - */ -static int -__bam_psplit(dbc, cp, lp, rp, splitret) - DBC *dbc; - EPG *cp; - PAGE *lp, *rp; - db_indx_t *splitret; -{ - DB *dbp; - PAGE *pp; - db_indx_t half, *inp, nbytes, off, splitp, top; - int adjust, cnt, iflag, isbigkey, ret; - - dbp = dbc->dbp; - pp = cp->page; - inp = P_INP(dbp, pp); - adjust = TYPE(pp) == P_LBTREE ? P_INDX : O_INDX; - - /* - * If we're splitting the first (last) page on a level because we're - * inserting (appending) a key to it, it's likely that the data is - * sorted. Moving a single item to the new page is less work and can - * push the fill factor higher than normal. This is trivial when we - * are splitting a new page before the beginning of the tree, all of - * the interesting tests are against values of 0. - * - * Catching appends to the tree is harder. In a simple append, we're - * inserting an item that sorts past the end of the tree; the cursor - * will point past the last element on the page. But, in trees with - * duplicates, the cursor may point to the last entry on the page -- - * in this case, the entry will also be the last element of a duplicate - * set (the last because the search call specified the S_DUPLAST flag). - * The only way to differentiate between an insert immediately before - * the last item in a tree or an append after a duplicate set which is - * also the last item in the tree is to call the comparison function. - * When splitting internal pages during an append, the search code - * guarantees the cursor always points to the largest page item less - * than the new internal entry. To summarize, we want to catch three - * possible index values: - * - * NUM_ENT(page) Btree/Recno leaf insert past end-of-tree - * NUM_ENT(page) - O_INDX Btree or Recno internal insert past EOT - * NUM_ENT(page) - P_INDX Btree leaf insert past EOT after a set - * of duplicates - * - * two of which, (NUM_ENT(page) - O_INDX or P_INDX) might be an insert - * near the end of the tree, and not after the end of the tree at all. - * Do a simple test which might be wrong because calling the comparison - * functions is expensive. Regardless, it's not a big deal if we're - * wrong, we'll do the split the right way next time. - */ - off = 0; - if (NEXT_PGNO(pp) == PGNO_INVALID && cp->indx >= NUM_ENT(pp) - adjust) - off = NUM_ENT(pp) - adjust; - else if (PREV_PGNO(pp) == PGNO_INVALID && cp->indx == 0) - off = adjust; - if (off != 0) - goto sort; - - /* - * Split the data to the left and right pages. Try not to split on - * an overflow key. (Overflow keys on internal pages will slow down - * searches.) Refuse to split in the middle of a set of duplicates. - * - * First, find the optimum place to split. - * - * It's possible to try and split past the last record on the page if - * there's a very large record at the end of the page. Make sure this - * doesn't happen by bounding the check at the next-to-last entry on - * the page. - * - * Note, we try and split half the data present on the page. This is - * because another process may have already split the page and left - * it half empty. We don't try and skip the split -- we don't know - * how much space we're going to need on the page, and we may need up - * to half the page for a big item, so there's no easy test to decide - * if we need to split or not. Besides, if two threads are inserting - * data into the same place in the database, we're probably going to - * need more space soon anyway. - */ - top = NUM_ENT(pp) - adjust; - half = (dbp->pgsize - HOFFSET(pp)) / 2; - for (nbytes = 0, off = 0; off < top && nbytes < half; ++off) - switch (TYPE(pp)) { - case P_IBTREE: - if (B_TYPE( - GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA) - nbytes += BINTERNAL_SIZE( - GET_BINTERNAL(dbp, pp, off)->len); - else - nbytes += BINTERNAL_SIZE(BOVERFLOW_SIZE); - break; - case P_LBTREE: - if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) == - B_KEYDATA) - nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp, - pp, off)->len); - else - nbytes += BOVERFLOW_SIZE; - - ++off; - /* FALLTHROUGH */ - case P_LDUP: - case P_LRECNO: - if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) == - B_KEYDATA) - nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp, - pp, off)->len); - else - nbytes += BOVERFLOW_SIZE; - break; - case P_IRECNO: - nbytes += RINTERNAL_SIZE; - break; - default: - return (__db_pgfmt(dbp->dbenv, pp->pgno)); - } -sort: splitp = off; - - /* - * Splitp is either at or just past the optimum split point. If the - * tree type is such that we're going to promote a key to an internal - * page, and our current choice is an overflow key, look for something - * close by that's smaller. - */ - switch (TYPE(pp)) { - case P_IBTREE: - iflag = 1; - isbigkey = - B_TYPE(GET_BINTERNAL(dbp, pp, off)->type) != B_KEYDATA; - break; - case P_LBTREE: - case P_LDUP: - iflag = 0; - isbigkey = B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) != - B_KEYDATA; - break; - default: - iflag = isbigkey = 0; - } - if (isbigkey) - for (cnt = 1; cnt <= 3; ++cnt) { - off = splitp + cnt * adjust; - if (off < (db_indx_t)NUM_ENT(pp) && - ((iflag && B_TYPE( - GET_BINTERNAL(dbp, pp,off)->type) == B_KEYDATA) || - B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) == - B_KEYDATA)) { - splitp = off; - break; - } - if (splitp <= (db_indx_t)(cnt * adjust)) - continue; - off = splitp - cnt * adjust; - if (iflag ? B_TYPE( - GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA : - B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) == - B_KEYDATA) { - splitp = off; - break; - } - } - - /* - * We can't split in the middle a set of duplicates. We know that - * no duplicate set can take up more than about 25% of the page, - * because that's the point where we push it off onto a duplicate - * page set. So, this loop can't be unbounded. - */ - if (TYPE(pp) == P_LBTREE && - inp[splitp] == inp[splitp - adjust]) - for (cnt = 1;; ++cnt) { - off = splitp + cnt * adjust; - if (off < NUM_ENT(pp) && - inp[splitp] != inp[off]) { - splitp = off; - break; - } - if (splitp <= (db_indx_t)(cnt * adjust)) - continue; - off = splitp - cnt * adjust; - if (inp[splitp] != inp[off]) { - splitp = off + adjust; - break; - } - } - - /* We're going to split at splitp. */ - if ((ret = __bam_copy(dbp, pp, lp, 0, splitp)) != 0) - return (ret); - if ((ret = __bam_copy(dbp, pp, rp, splitp, NUM_ENT(pp))) != 0) - return (ret); - - *splitret = splitp; - return (0); -} - -/* - * __bam_copy -- - * Copy a set of records from one page to another. - * - * PUBLIC: int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t)); - */ -int -__bam_copy(dbp, pp, cp, nxt, stop) - DB *dbp; - PAGE *pp, *cp; - u_int32_t nxt, stop; -{ - db_indx_t *cinp, nbytes, off, *pinp; - - cinp = P_INP(dbp, cp); - pinp = P_INP(dbp, pp); - /* - * Nxt is the offset of the next record to be placed on the target page. - */ - for (off = 0; nxt < stop; ++nxt, ++NUM_ENT(cp), ++off) { - switch (TYPE(pp)) { - case P_IBTREE: - if (B_TYPE( - GET_BINTERNAL(dbp, pp, nxt)->type) == B_KEYDATA) - nbytes = BINTERNAL_SIZE( - GET_BINTERNAL(dbp, pp, nxt)->len); - else - nbytes = BINTERNAL_SIZE(BOVERFLOW_SIZE); - break; - case P_LBTREE: - /* - * If we're on a key and it's a duplicate, just copy - * the offset. - */ - if (off != 0 && (nxt % P_INDX) == 0 && - pinp[nxt] == pinp[nxt - P_INDX]) { - cinp[off] = cinp[off - P_INDX]; - continue; - } - /* FALLTHROUGH */ - case P_LDUP: - case P_LRECNO: - if (B_TYPE(GET_BKEYDATA(dbp, pp, nxt)->type) == - B_KEYDATA) - nbytes = BKEYDATA_SIZE(GET_BKEYDATA(dbp, - pp, nxt)->len); - else - nbytes = BOVERFLOW_SIZE; - break; - case P_IRECNO: - nbytes = RINTERNAL_SIZE; - break; - default: - return (__db_pgfmt(dbp->dbenv, pp->pgno)); - } - cinp[off] = HOFFSET(cp) -= nbytes; - memcpy(P_ENTRY(dbp, cp, off), P_ENTRY(dbp, pp, nxt), nbytes); - } - return (0); -} diff --git a/storage/bdb/btree/bt_stat.c b/storage/bdb/btree/bt_stat.c deleted file mode 100644 index 98e3b9561f7..00000000000 --- a/storage/bdb/btree/bt_stat.c +++ /dev/null @@ -1,638 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_stat.c,v 12.3 2005/06/16 20:20:23 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" - -#ifdef HAVE_STATISTICS -/* - * __bam_stat -- - * Gather/print the btree statistics - * - * PUBLIC: int __bam_stat __P((DBC *, void *, u_int32_t)); - */ -int -__bam_stat(dbc, spp, flags) - DBC *dbc; - void *spp; - u_int32_t flags; -{ - BTMETA *meta; - BTREE *t; - BTREE_CURSOR *cp; - DB *dbp; - DB_BTREE_STAT *sp; - DB_ENV *dbenv; - DB_LOCK lock, metalock; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t pgno; - int ret, t_ret, write_meta; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - meta = NULL; - t = dbp->bt_internal; - sp = NULL; - LOCK_INIT(metalock); - LOCK_INIT(lock); - mpf = dbp->mpf; - h = NULL; - ret = write_meta = 0; - - cp = (BTREE_CURSOR *)dbc->internal; - - /* Allocate and clear the structure. */ - if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0) - goto err; - memset(sp, 0, sizeof(*sp)); - - /* Get the metadata page for the entire database. */ - pgno = PGNO_BASE_MD; - if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err; - - if (flags == DB_RECORDCOUNT || flags == DB_CACHED_COUNTS) - flags = DB_FAST_STAT; - if (flags == DB_FAST_STAT) - goto meta_only; - - /* Walk the metadata free list, counting pages. */ - for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) { - ++sp->bt_free; - - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - - pgno = h->next_pgno; - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - h = NULL; - } - - /* Get the root page. */ - pgno = cp->root; - if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - - /* Get the levels from the root page. */ - sp->bt_levels = h->level; - - /* Discard the root page. */ - ret = __memp_fput(mpf, h, 0); - h = NULL; - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - - /* Walk the tree. */ - if ((ret = __bam_traverse(dbc, - DB_LOCK_READ, cp->root, __bam_stat_callback, sp)) != 0) - goto err; - - /* - * Get the subdatabase metadata page if it's not the same as the - * one we already have. - */ - write_meta = !F_ISSET(dbp, DB_AM_RDONLY); -meta_only: - if (t->bt_meta != PGNO_BASE_MD || write_meta != 0) { - ret = __memp_fput(mpf, meta, 0); - meta = NULL; - if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - - if ((ret = __db_lget(dbc, - 0, t->bt_meta, write_meta == 0 ? - DB_LOCK_READ : DB_LOCK_WRITE, 0, &metalock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &t->bt_meta, 0, &meta)) != 0) - goto err; - } - if (flags == DB_FAST_STAT) { - if (dbp->type == DB_RECNO || - (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))) { - if ((ret = __db_lget(dbc, 0, - cp->root, DB_LOCK_READ, 0, &lock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &cp->root, 0, &h)) != 0) - goto err; - - sp->bt_nkeys = RE_NREC(h); - } else - sp->bt_nkeys = meta->dbmeta.key_count; - - sp->bt_ndata = dbp->type == DB_RECNO ? - sp->bt_nkeys : meta->dbmeta.record_count; - } - - /* Get metadata page statistics. */ - sp->bt_metaflags = meta->dbmeta.flags; - sp->bt_minkey = meta->minkey; - sp->bt_re_len = meta->re_len; - sp->bt_re_pad = meta->re_pad; - sp->bt_pagesize = meta->dbmeta.pagesize; - sp->bt_magic = meta->dbmeta.magic; - sp->bt_version = meta->dbmeta.version; - - if (write_meta != 0) { - meta->dbmeta.key_count = sp->bt_nkeys; - meta->dbmeta.record_count = sp->bt_ndata; - } - - *(DB_BTREE_STAT **)spp = sp; - -err: /* Discard the second page. */ - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - - /* Discard the metadata page. */ - if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - if (meta != NULL && (t_ret = __memp_fput( - mpf, meta, write_meta == 0 ? 0 : DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - - if (ret != 0 && sp != NULL) { - __os_ufree(dbenv, sp); - *(DB_BTREE_STAT **)spp = NULL; - } - - return (ret); -} - -/* - * __bam_stat_print -- - * Display btree/recno statistics. - * - * PUBLIC: int __bam_stat_print __P((DBC *, u_int32_t)); - */ -int -__bam_stat_print(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - static const FN fn[] = { - { BTM_DUP, "duplicates" }, - { BTM_RECNO, "recno" }, - { BTM_RECNUM, "record-numbers" }, - { BTM_FIXEDLEN, "fixed-length" }, - { BTM_RENUMBER, "renumber" }, - { BTM_SUBDB, "multiple-databases" }, - { BTM_DUPSORT, "sorted duplicates" }, - { 0, NULL } - }; - DB *dbp; - DB_BTREE_STAT *sp; - DB_ENV *dbenv; - int lorder, ret; - const char *s; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - if ((ret = __bam_stat(dbc, &sp, 0)) != 0) - return (ret); - - if (LF_ISSET(DB_STAT_ALL)) { - __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - __db_msg(dbenv, "Default Btree/Recno database information:"); - } - - __db_msg(dbenv, "%lx\tBtree magic number", (u_long)sp->bt_magic); - __db_msg(dbenv, "%lu\tBtree version number", (u_long)sp->bt_version); - - (void)__db_get_lorder(dbp, &lorder); - switch (lorder) { - case 1234: - s = "Little-endian"; - break; - case 4321: - s = "Big-endian"; - break; - default: - s = "Unrecognized byte order"; - break; - } - __db_msg(dbenv, "%s\tByte order", s); - __db_prflags(dbenv, NULL, sp->bt_metaflags, fn, NULL, "\tFlags"); - if (dbp->type == DB_BTREE) - __db_dl(dbenv, "Minimum keys per-page", (u_long)sp->bt_minkey); - if (dbp->type == DB_RECNO) { - __db_dl(dbenv, - "Fixed-length record size", (u_long)sp->bt_re_len); - __db_msg(dbenv, - "%#x\tFixed-length record pad", (u_int)sp->bt_re_pad); - } - __db_dl(dbenv, - "Underlying database page size", (u_long)sp->bt_pagesize); - __db_dl(dbenv, "Number of levels in the tree", (u_long)sp->bt_levels); - __db_dl(dbenv, dbp->type == DB_BTREE ? - "Number of unique keys in the tree" : - "Number of records in the tree", (u_long)sp->bt_nkeys); - __db_dl(dbenv, - "Number of data items in the tree", (u_long)sp->bt_ndata); - - __db_dl(dbenv, - "Number of tree internal pages", (u_long)sp->bt_int_pg); - __db_dl_pct(dbenv, - "Number of bytes free in tree internal pages", - (u_long)sp->bt_int_pgfree, - DB_PCT_PG(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize), "ff"); - - __db_dl(dbenv, - "Number of tree leaf pages", (u_long)sp->bt_leaf_pg); - __db_dl_pct(dbenv, "Number of bytes free in tree leaf pages", - (u_long)sp->bt_leaf_pgfree, DB_PCT_PG( - sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize), "ff"); - - __db_dl(dbenv, - "Number of tree duplicate pages", (u_long)sp->bt_dup_pg); - __db_dl_pct(dbenv, - "Number of bytes free in tree duplicate pages", - (u_long)sp->bt_dup_pgfree, - DB_PCT_PG(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize), "ff"); - - __db_dl(dbenv, - "Number of tree overflow pages", (u_long)sp->bt_over_pg); - __db_dl_pct(dbenv, "Number of bytes free in tree overflow pages", - (u_long)sp->bt_over_pgfree, DB_PCT_PG( - sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize), "ff"); - __db_dl(dbenv, "Number of empty pages", (u_long)sp->bt_empty_pg); - - __db_dl(dbenv, "Number of pages on the free list", (u_long)sp->bt_free); - - __os_ufree(dbenv, sp); - - return (0); -} - -/* - * __bam_stat_callback -- - * Statistics callback. - * - * PUBLIC: int __bam_stat_callback __P((DB *, PAGE *, void *, int *)); - */ -int -__bam_stat_callback(dbp, h, cookie, putp) - DB *dbp; - PAGE *h; - void *cookie; - int *putp; -{ - DB_BTREE_STAT *sp; - db_indx_t indx, *inp, top; - u_int8_t type; - - sp = cookie; - *putp = 0; - top = NUM_ENT(h); - inp = P_INP(dbp, h); - - switch (TYPE(h)) { - case P_IBTREE: - case P_IRECNO: - ++sp->bt_int_pg; - sp->bt_int_pgfree += P_FREESPACE(dbp, h); - break; - case P_LBTREE: - if (top == 0) - ++sp->bt_empty_pg; - - /* Correct for on-page duplicates and deleted items. */ - for (indx = 0; indx < top; indx += P_INDX) { - type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type; - /* Ignore deleted items. */ - if (B_DISSET(type)) - continue; - - /* Ignore duplicate keys. */ - if (indx + P_INDX >= top || - inp[indx] != inp[indx + P_INDX]) - ++sp->bt_nkeys; - - /* Ignore off-page duplicates. */ - if (B_TYPE(type) != B_DUPLICATE) - ++sp->bt_ndata; - } - - ++sp->bt_leaf_pg; - sp->bt_leaf_pgfree += P_FREESPACE(dbp, h); - break; - case P_LRECNO: - if (top == 0) - ++sp->bt_empty_pg; - - /* - * If walking a recno tree, then each of these items is a key. - * Otherwise, we're walking an off-page duplicate set. - */ - if (dbp->type == DB_RECNO) { - /* - * Correct for deleted items in non-renumbering Recno - * databases. - */ - if (F_ISSET(dbp, DB_AM_RENUMBER)) { - sp->bt_nkeys += top; - sp->bt_ndata += top; - } else - for (indx = 0; indx < top; indx += O_INDX) { - type = GET_BKEYDATA(dbp, h, indx)->type; - if (!B_DISSET(type)) { - ++sp->bt_ndata; - ++sp->bt_nkeys; - } - } - - ++sp->bt_leaf_pg; - sp->bt_leaf_pgfree += P_FREESPACE(dbp, h); - } else { - sp->bt_ndata += top; - - ++sp->bt_dup_pg; - sp->bt_dup_pgfree += P_FREESPACE(dbp, h); - } - break; - case P_LDUP: - if (top == 0) - ++sp->bt_empty_pg; - - /* Correct for deleted items. */ - for (indx = 0; indx < top; indx += O_INDX) - if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type)) - ++sp->bt_ndata; - - ++sp->bt_dup_pg; - sp->bt_dup_pgfree += P_FREESPACE(dbp, h); - break; - case P_OVERFLOW: - ++sp->bt_over_pg; - sp->bt_over_pgfree += P_OVFLSPACE(dbp, dbp->pgsize, h); - break; - default: - return (__db_pgfmt(dbp->dbenv, h->pgno)); - } - return (0); -} - -/* - * __bam_print_cursor -- - * Display the current internal cursor. - * - * PUBLIC: void __bam_print_cursor __P((DBC *)); - */ -void -__bam_print_cursor(dbc) - DBC *dbc; -{ - static const FN fn[] = { - { C_DELETED, "C_DELETED" }, - { C_RECNUM, "C_RECNUM" }, - { C_RENUMBER, "C_RENUMBER" }, - { 0, NULL } - }; - DB_ENV *dbenv; - BTREE_CURSOR *cp; - - dbenv = dbc->dbp->dbenv; - cp = (BTREE_CURSOR *)dbc->internal; - - STAT_ULONG("Overflow size", cp->ovflsize); - if (dbc->dbtype == DB_RECNO) - STAT_ULONG("Recno", cp->recno); - STAT_ULONG("Order", cp->order); - __db_prflags(dbenv, NULL, cp->flags, fn, NULL, "\tInternal Flags"); -} - -#else /* !HAVE_STATISTICS */ - -int -__bam_stat(dbc, spp, flags) - DBC *dbc; - void *spp; - u_int32_t flags; -{ - COMPQUIET(spp, NULL); - COMPQUIET(flags, 0); - - return (__db_stat_not_built(dbc->dbp->dbenv)); -} - -int -__bam_stat_print(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - COMPQUIET(flags, 0); - - return (__db_stat_not_built(dbc->dbp->dbenv)); -} -#endif - -/* - * __bam_key_range -- - * Return proportion of keys relative to given key. The numbers are - * slightly skewed due to on page duplicates. - * - * PUBLIC: int __bam_key_range __P((DBC *, DBT *, DB_KEY_RANGE *, u_int32_t)); - */ -int -__bam_key_range(dbc, dbt, kp, flags) - DBC *dbc; - DBT *dbt; - DB_KEY_RANGE *kp; - u_int32_t flags; -{ - BTREE_CURSOR *cp; - EPG *sp; - double factor; - int exact, ret; - - COMPQUIET(flags, 0); - - if ((ret = __bam_search(dbc, PGNO_INVALID, - dbt, S_STK_ONLY, 1, NULL, &exact)) != 0) - return (ret); - - cp = (BTREE_CURSOR *)dbc->internal; - kp->less = kp->greater = 0.0; - - factor = 1.0; - /* Correct the leaf page. */ - cp->csp->entries /= 2; - cp->csp->indx /= 2; - for (sp = cp->sp; sp <= cp->csp; ++sp) { - /* - * At each level we know that pages greater than indx contain - * keys greater than what we are looking for and those less - * than indx are less than. The one pointed to by indx may - * have some less, some greater or even equal. If indx is - * equal to the number of entries, then the key is out of range - * and everything is less. - */ - if (sp->indx == 0) - kp->greater += factor * (sp->entries - 1)/sp->entries; - else if (sp->indx == sp->entries) - kp->less += factor; - else { - kp->less += factor * sp->indx / sp->entries; - kp->greater += factor * - ((sp->entries - sp->indx) - 1) / sp->entries; - } - factor *= 1.0/sp->entries; - } - - /* - * If there was an exact match then assign 1 n'th to the key itself. - * Otherwise that factor belongs to those greater than the key, unless - * the key was out of range. - */ - if (exact) - kp->equal = factor; - else { - if (kp->less != 1) - kp->greater += factor; - kp->equal = 0; - } - - BT_STK_CLR(cp); - - return (0); -} - -/* - * __bam_traverse -- - * Walk a Btree database. - * - * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t, - * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); - */ -int -__bam_traverse(dbc, mode, root_pgno, callback, cookie) - DBC *dbc; - db_lockmode_t mode; - db_pgno_t root_pgno; - int (*callback)__P((DB *, PAGE *, void *, int *)); - void *cookie; -{ - BINTERNAL *bi; - BKEYDATA *bk; - DB *dbp; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - RINTERNAL *ri; - db_indx_t indx, *inp; - int already_put, ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - already_put = 0; - - if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &root_pgno, 0, &h)) != 0) { - (void)__TLPUT(dbc, lock); - return (ret); - } - - switch (TYPE(h)) { - case P_IBTREE: - for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { - bi = GET_BINTERNAL(dbp, h, indx); - if (B_TYPE(bi->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - ((BOVERFLOW *)bi->data)->pgno, - callback, cookie)) != 0) - goto err; - if ((ret = __bam_traverse( - dbc, mode, bi->pgno, callback, cookie)) != 0) - goto err; - } - break; - case P_IRECNO: - for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { - ri = GET_RINTERNAL(dbp, h, indx); - if ((ret = __bam_traverse( - dbc, mode, ri->pgno, callback, cookie)) != 0) - goto err; - } - break; - case P_LBTREE: - inp = P_INP(dbp, h); - for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) { - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_OVERFLOW && - (indx + P_INDX >= NUM_ENT(h) || - inp[indx] != inp[indx + P_INDX])) { - if ((ret = __db_traverse_big(dbp, - GET_BOVERFLOW(dbp, h, indx)->pgno, - callback, cookie)) != 0) - goto err; - } - bk = GET_BKEYDATA(dbp, h, indx + O_INDX); - if (B_TYPE(bk->type) == B_DUPLICATE && - (ret = __bam_traverse(dbc, mode, - GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno, - callback, cookie)) != 0) - goto err; - if (B_TYPE(bk->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno, - callback, cookie)) != 0) - goto err; - } - break; - case P_LDUP: - case P_LRECNO: - for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) { - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_OVERFLOW && - (ret = __db_traverse_big(dbp, - GET_BOVERFLOW(dbp, h, indx)->pgno, - callback, cookie)) != 0) - goto err; - } - break; - default: - return (__db_pgfmt(dbp->dbenv, h->pgno)); - } - - ret = callback(dbp, h, cookie, &already_put); - -err: if (!already_put && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} diff --git a/storage/bdb/btree/bt_upgrade.c b/storage/bdb/btree/bt_upgrade.c deleted file mode 100644 index 8ace2864cd3..00000000000 --- a/storage/bdb/btree/bt_upgrade.c +++ /dev/null @@ -1,159 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_upgrade.c,v 12.1 2005/06/16 20:20:23 bostic Exp $ - */ -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_upgrade.h" -#include "dbinc/btree.h" - -/* - * __bam_30_btreemeta -- - * Upgrade the metadata pages from version 6 to version 7. - * - * PUBLIC: int __bam_30_btreemeta __P((DB *, char *, u_int8_t *)); - */ -int -__bam_30_btreemeta(dbp, real_name, buf) - DB *dbp; - char *real_name; - u_int8_t *buf; -{ - BTMETA30 *newmeta; - BTMETA2X *oldmeta; - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - newmeta = (BTMETA30 *)buf; - oldmeta = (BTMETA2X *)buf; - - /* - * Move things from the end up, so we do not overwrite things. - * We are going to create a new uid, so we can move the stuff - * at the end of the structure first, overwriting the uid. - */ - - newmeta->re_pad = oldmeta->re_pad; - newmeta->re_len = oldmeta->re_len; - newmeta->minkey = oldmeta->minkey; - newmeta->maxkey = oldmeta->maxkey; - newmeta->dbmeta.free = oldmeta->free; - newmeta->dbmeta.flags = oldmeta->flags; - newmeta->dbmeta.type = P_BTREEMETA; - - newmeta->dbmeta.version = 7; - /* Replace the unique ID. */ - if ((ret = __os_fileid(dbenv, real_name, 1, buf + 36)) != 0) - return (ret); - - newmeta->root = 1; - - return (0); -} - -/* - * __bam_31_btreemeta -- - * Upgrade the database from version 7 to version 8. - * - * PUBLIC: int __bam_31_btreemeta - * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)); - */ -int -__bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp) - DB *dbp; - char *real_name; - u_int32_t flags; - DB_FH *fhp; - PAGE *h; - int *dirtyp; -{ - BTMETA31 *newmeta; - BTMETA30 *oldmeta; - - COMPQUIET(dbp, NULL); - COMPQUIET(real_name, NULL); - COMPQUIET(fhp, NULL); - - newmeta = (BTMETA31 *)h; - oldmeta = (BTMETA30 *)h; - - /* - * Copy the effected fields down the page. - * The fields may overlap each other so we - * start at the bottom and use memmove. - */ - newmeta->root = oldmeta->root; - newmeta->re_pad = oldmeta->re_pad; - newmeta->re_len = oldmeta->re_len; - newmeta->minkey = oldmeta->minkey; - newmeta->maxkey = oldmeta->maxkey; - memmove(newmeta->dbmeta.uid, - oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid)); - newmeta->dbmeta.flags = oldmeta->dbmeta.flags; - newmeta->dbmeta.record_count = 0; - newmeta->dbmeta.key_count = 0; - ZERO_LSN(newmeta->dbmeta.unused3); - - /* Set the version number. */ - newmeta->dbmeta.version = 8; - - /* Upgrade the flags. */ - if (LF_ISSET(DB_DUPSORT)) - F_SET(&newmeta->dbmeta, BTM_DUPSORT); - - *dirtyp = 1; - return (0); -} - -/* - * __bam_31_lbtree -- - * Upgrade the database btree leaf pages. - * - * PUBLIC: int __bam_31_lbtree - * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)); - */ -int -__bam_31_lbtree(dbp, real_name, flags, fhp, h, dirtyp) - DB *dbp; - char *real_name; - u_int32_t flags; - DB_FH *fhp; - PAGE *h; - int *dirtyp; -{ - BKEYDATA *bk; - db_pgno_t pgno; - db_indx_t indx; - int ret; - - ret = 0; - for (indx = O_INDX; indx < NUM_ENT(h); indx += P_INDX) { - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_DUPLICATE) { - pgno = GET_BOVERFLOW(dbp, h, indx)->pgno; - if ((ret = __db_31_offdup(dbp, real_name, fhp, - LF_ISSET(DB_DUPSORT) ? 1 : 0, &pgno)) != 0) - break; - if (pgno != GET_BOVERFLOW(dbp, h, indx)->pgno) { - *dirtyp = 1; - GET_BOVERFLOW(dbp, h, indx)->pgno = pgno; - } - } - } - - return (ret); -} diff --git a/storage/bdb/btree/bt_verify.c b/storage/bdb/btree/bt_verify.c deleted file mode 100644 index 055cc46892e..00000000000 --- a/storage/bdb/btree/bt_verify.c +++ /dev/null @@ -1,2469 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: bt_verify.c,v 12.13 2005/11/11 20:27:49 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_verify.h" -#include "dbinc/btree.h" -#include "dbinc/mp.h" - -static int __bam_safe_getdata __P((DB *, PAGE *, u_int32_t, int, DBT *, int *)); -static int __bam_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, - db_indx_t *, u_int32_t)); -static int __bam_vrfy_treeorder __P((DB *, db_pgno_t, PAGE *, BINTERNAL *, - BINTERNAL *, int (*)(DB *, const DBT *, const DBT *), u_int32_t)); -static int __ram_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, - db_indx_t *, u_int32_t)); - -/* - * __bam_vrfy_meta -- - * Verify the btree-specific part of a metadata page. - * - * PUBLIC: int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *, - * PUBLIC: db_pgno_t, u_int32_t)); - */ -int -__bam_vrfy_meta(dbp, vdp, meta, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - BTMETA *meta; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int isbad, t_ret, ret; - db_indx_t ovflsize; - - dbenv = dbp->dbenv; - isbad = 0; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - /* - * If VRFY_INCOMPLETE is not set, then we didn't come through - * __db_vrfy_pagezero and didn't incompletely - * check this page--we haven't checked it at all. - * Thus we need to call __db_vrfy_meta and check the common fields. - * - * If VRFY_INCOMPLETE is set, we've already done all the same work - * in __db_vrfy_pagezero, so skip the check. - */ - if (!F_ISSET(pip, VRFY_INCOMPLETE) && - (ret = __db_vrfy_meta(dbp, vdp, &meta->dbmeta, pgno, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* bt_minkey: must be >= 2; must produce sensible ovflsize */ - - /* avoid division by zero */ - ovflsize = meta->minkey > 0 ? - B_MINKEY_TO_OVFLSIZE(dbp, meta->minkey, dbp->pgsize) : 0; - - if (meta->minkey < 2 || - ovflsize > B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) { - pip->bt_minkey = 0; - isbad = 1; - EPRINT((dbenv, - "Page %lu: nonsensical bt_minkey value %lu on metadata page", - (u_long)pgno, (u_long)meta->minkey)); - } else - pip->bt_minkey = meta->minkey; - - /* re_len: no constraints on this (may be zero or huge--we make rope) */ - pip->re_pad = meta->re_pad; - pip->re_len = meta->re_len; - - /* - * The root must not be current page or 0 and it must be within - * database. If this metadata page is the master meta data page - * of the file, then the root page had better be page 1. - */ - pip->root = 0; - if (meta->root == PGNO_INVALID || - meta->root == pgno || !IS_VALID_PGNO(meta->root) || - (pgno == PGNO_BASE_MD && meta->root != 1)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: nonsensical root page %lu on metadata page", - (u_long)pgno, (u_long)meta->root)); - } else - pip->root = meta->root; - - /* Flags. */ - if (F_ISSET(&meta->dbmeta, BTM_RENUMBER)) - F_SET(pip, VRFY_IS_RRECNO); - - if (F_ISSET(&meta->dbmeta, BTM_SUBDB)) { - /* - * If this is a master db meta page, it had better not have - * duplicates. - */ - if (F_ISSET(&meta->dbmeta, BTM_DUP) && pgno == PGNO_BASE_MD) { - isbad = 1; - EPRINT((dbenv, -"Page %lu: Btree metadata page has both duplicates and multiple databases", - (u_long)pgno)); - } - F_SET(pip, VRFY_HAS_SUBDBS); - } - - if (F_ISSET(&meta->dbmeta, BTM_DUP)) - F_SET(pip, VRFY_HAS_DUPS); - if (F_ISSET(&meta->dbmeta, BTM_DUPSORT)) - F_SET(pip, VRFY_HAS_DUPSORT); - if (F_ISSET(&meta->dbmeta, BTM_RECNUM)) - F_SET(pip, VRFY_HAS_RECNUMS); - if (F_ISSET(pip, VRFY_HAS_RECNUMS) && F_ISSET(pip, VRFY_HAS_DUPS)) { - EPRINT((dbenv, - "Page %lu: Btree metadata page illegally has both recnums and dups", - (u_long)pgno)); - isbad = 1; - } - - if (F_ISSET(&meta->dbmeta, BTM_RECNO)) { - F_SET(pip, VRFY_IS_RECNO); - dbp->type = DB_RECNO; - } else if (F_ISSET(pip, VRFY_IS_RRECNO)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: metadata page has renumber flag set but is not recno", - (u_long)pgno)); - } - - if (F_ISSET(pip, VRFY_IS_RECNO) && F_ISSET(pip, VRFY_HAS_DUPS)) { - EPRINT((dbenv, - "Page %lu: recno metadata page specifies duplicates", - (u_long)pgno)); - isbad = 1; - } - - if (F_ISSET(&meta->dbmeta, BTM_FIXEDLEN)) - F_SET(pip, VRFY_IS_FIXEDLEN); - else if (pip->re_len > 0) { - /* - * It's wrong to have an re_len if it's not a fixed-length - * database - */ - isbad = 1; - EPRINT((dbenv, - "Page %lu: re_len of %lu in non-fixed-length database", - (u_long)pgno, (u_long)pip->re_len)); - } - - /* - * We do not check that the rest of the page is 0, because it may - * not be and may still be correct. - */ - -err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - if (LF_ISSET(DB_SALVAGE) && - (t_ret = __db_salvage_markdone(vdp, pgno)) != 0 && ret == 0) - ret = t_ret; - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __ram_vrfy_leaf -- - * Verify a recno leaf page. - * - * PUBLIC: int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, - * PUBLIC: u_int32_t)); - */ -int -__ram_vrfy_leaf(dbp, vdp, h, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t flags; -{ - BKEYDATA *bk; - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - db_indx_t i; - int ret, t_ret, isbad; - u_int32_t re_len_guess, len; - - dbenv = dbp->dbenv; - isbad = 0; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - if (TYPE(h) != P_LRECNO) { - /* We should not have been called. */ - TYPE_ERR_PRINT(dbenv, "__ram_vrfy_leaf", pgno, TYPE(h)); - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - /* - * Verify (and, if relevant, save off) page fields common to - * all PAGEs. - */ - if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* - * Verify inp[]. Return immediately if it returns DB_VERIFY_BAD; - * further checks are dangerous. - */ - if ((ret = __bam_vrfy_inp(dbp, - vdp, h, pgno, &pip->entries, flags)) != 0) - goto err; - - if (F_ISSET(pip, VRFY_HAS_DUPS)) { - EPRINT((dbenv, - "Page %lu: Recno database has dups", (u_long)pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - - /* - * Walk through inp and see if the lengths of all the records are the - * same--if so, this may be a fixed-length database, and we want to - * save off this value. We know inp to be safe if we've gotten this - * far. - */ - re_len_guess = 0; - for (i = 0; i < NUM_ENT(h); i++) { - bk = GET_BKEYDATA(dbp, h, i); - /* KEYEMPTY. Go on. */ - if (B_DISSET(bk->type)) - continue; - if (bk->type == B_OVERFLOW) - len = ((BOVERFLOW *)bk)->tlen; - else if (bk->type == B_KEYDATA) - len = bk->len; - else { - isbad = 1; - EPRINT((dbenv, - "Page %lu: nonsensical type for item %lu", - (u_long)pgno, (u_long)i)); - continue; - } - if (re_len_guess == 0) - re_len_guess = len; - - /* - * Is this item's len the same as the last one's? If not, - * reset to 0 and break--we don't have a single re_len. - * Otherwise, go on to the next item. - */ - if (re_len_guess != len) { - re_len_guess = 0; - break; - } - } - pip->re_len = re_len_guess; - - /* Save off record count. */ - pip->rec_cnt = NUM_ENT(h); - -err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __bam_vrfy -- - * Verify a btree leaf or internal page. - * - * PUBLIC: int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, - * PUBLIC: u_int32_t)); - */ -int -__bam_vrfy(dbp, vdp, h, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int ret, t_ret, isbad; - - dbenv = dbp->dbenv; - isbad = 0; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - switch (TYPE(h)) { - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LDUP: - break; - default: - TYPE_ERR_PRINT(dbenv, "__bam_vrfy", pgno, TYPE(h)); - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - /* - * Verify (and, if relevant, save off) page fields common to - * all PAGEs. - */ - if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* - * The record count is, on internal pages, stored in an overloaded - * next_pgno field. Save it off; we'll verify it when we check - * overall database structure. We could overload the field - * in VRFY_PAGEINFO, too, but this seems gross, and space - * is not at such a premium. - */ - pip->rec_cnt = RE_NREC(h); - - /* - * Verify inp[]. - */ - if (TYPE(h) == P_IRECNO) { - if ((ret = __ram_vrfy_inp(dbp, - vdp, h, pgno, &pip->entries, flags)) != 0) - goto err; - } else if ((ret = __bam_vrfy_inp(dbp, - vdp, h, pgno, &pip->entries, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - EPRINT((dbenv, - "Page %lu: item order check unsafe: skipping", - (u_long)pgno)); - } else if (!LF_ISSET(DB_NOORDERCHK) && (ret = - __bam_vrfy_itemorder(dbp, vdp, h, pgno, 0, 0, 0, flags)) != 0) { - /* - * We know that the elements of inp are reasonable. - * - * Check that elements fall in the proper order. - */ - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - -err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __ram_vrfy_inp -- - * Verify that all entries in a P_IRECNO inp[] array are reasonable, - * and count them. Note that P_LRECNO uses __bam_vrfy_inp; - * P_IRECNOs are a special, and simpler, case, since they have - * RINTERNALs rather than BKEYDATA/BINTERNALs. - */ -static int -__ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - db_indx_t *nentriesp; - u_int32_t flags; -{ - DB_ENV *dbenv; - RINTERNAL *ri; - VRFY_CHILDINFO child; - VRFY_PAGEINFO *pip; - int ret, t_ret, isbad; - u_int32_t himark, i, offset, nentries; - db_indx_t *inp; - u_int8_t *pagelayout, *p; - - dbenv = dbp->dbenv; - isbad = 0; - memset(&child, 0, sizeof(VRFY_CHILDINFO)); - nentries = 0; - pagelayout = NULL; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - if (TYPE(h) != P_IRECNO) { - TYPE_ERR_PRINT(dbenv, "__ram_vrfy_inp", pgno, TYPE(h)); - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - himark = dbp->pgsize; - if ((ret = __os_malloc(dbenv, dbp->pgsize, &pagelayout)) != 0) - goto err; - memset(pagelayout, 0, dbp->pgsize); - inp = P_INP(dbp, h); - for (i = 0; i < NUM_ENT(h); i++) { - if ((u_int8_t *)inp + i >= (u_int8_t *)h + himark) { - EPRINT((dbenv, - "Page %lu: entries listing %lu overlaps data", - (u_long)pgno, (u_long)i)); - ret = DB_VERIFY_BAD; - goto err; - } - offset = inp[i]; - /* - * Check that the item offset is reasonable: it points - * somewhere after the inp array and before the end of the - * page. - */ - if (offset <= (u_int32_t)((u_int8_t *)inp + i - - (u_int8_t *)h) || - offset > (u_int32_t)(dbp->pgsize - RINTERNAL_SIZE)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: bad offset %lu at index %lu", - (u_long)pgno, (u_long)offset, (u_long)i)); - continue; - } - - /* Update the high-water mark (what HOFFSET should be) */ - if (offset < himark) - himark = offset; - - nentries++; - - /* Make sure this RINTERNAL is not multiply referenced. */ - ri = GET_RINTERNAL(dbp, h, i); - if (pagelayout[offset] == 0) { - pagelayout[offset] = 1; - child.pgno = ri->pgno; - child.type = V_RECNO; - child.nrecs = ri->nrecs; - if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0) - goto err; - } else { - EPRINT((dbenv, - "Page %lu: RINTERNAL structure at offset %lu referenced twice", - (u_long)pgno, (u_long)offset)); - isbad = 1; - } - } - - for (p = pagelayout + himark; - p < pagelayout + dbp->pgsize; - p += RINTERNAL_SIZE) - if (*p != 1) { - EPRINT((dbenv, - "Page %lu: gap between items at offset %lu", - (u_long)pgno, (u_long)(p - pagelayout))); - isbad = 1; - } - - if ((db_indx_t)himark != HOFFSET(h)) { - EPRINT((dbenv, - "Page %lu: bad HOFFSET %lu, appears to be %lu", - (u_long)pgno, (u_long)(HOFFSET(h)), (u_long)himark)); - isbad = 1; - } - - *nentriesp = nentries; - -err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - if (pagelayout != NULL) - __os_free(dbenv, pagelayout); - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -typedef enum { VRFY_ITEM_NOTSET=0, VRFY_ITEM_BEGIN, VRFY_ITEM_END } VRFY_ITEM; - -/* - * __bam_vrfy_inp -- - * Verify that all entries in inp[] array are reasonable; - * count them. - */ -static int -__bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - db_indx_t *nentriesp; - u_int32_t flags; -{ - BKEYDATA *bk; - BOVERFLOW *bo; - DB_ENV *dbenv; - VRFY_CHILDINFO child; - VRFY_ITEM *pagelayout; - VRFY_PAGEINFO *pip; - u_int32_t himark, offset; /* - * These would be db_indx_ts - * but for alignment. - */ - u_int32_t i, endoff, nentries; - int isbad, initem, isdupitem, ret, t_ret; - - dbenv = dbp->dbenv; - isbad = isdupitem = 0; - nentries = 0; - memset(&child, 0, sizeof(VRFY_CHILDINFO)); - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - switch (TYPE(h)) { - case P_IBTREE: - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - break; - default: - /* - * In the salvager, we might call this from a page which - * we merely suspect is a btree page. Otherwise, it - * shouldn't get called--if it is, that's a verifier bug. - */ - if (LF_ISSET(DB_SALVAGE)) - break; - TYPE_ERR_PRINT(dbenv, "__bam_vrfy_inp", pgno, TYPE(h)); - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - /* - * Loop through inp[], the array of items, until we either - * run out of entries or collide with the data. Keep track - * of h_offset in himark. - * - * For each element in inp[i], make sure it references a region - * that starts after the end of the inp array (as defined by - * NUM_ENT(h)), ends before the beginning of the page, doesn't - * overlap any other regions, and doesn't have a gap between - * it and the region immediately after it. - */ - himark = dbp->pgsize; - if ((ret = __os_calloc( - dbenv, dbp->pgsize, sizeof(pagelayout[0]), &pagelayout)) != 0) - goto err; - for (i = 0; i < NUM_ENT(h); i++) { - switch (ret = __db_vrfy_inpitem(dbp, - h, pgno, i, 1, flags, &himark, &offset)) { - case 0: - break; - case DB_VERIFY_BAD: - isbad = 1; - continue; - case DB_VERIFY_FATAL: - isbad = 1; - goto err; - default: - DB_ASSERT(ret != 0); - break; - } - - /* - * We now have a plausible beginning for the item, and we know - * its length is safe. - * - * Mark the beginning and end in pagelayout so we can make sure - * items have no overlaps or gaps. - */ - bk = GET_BKEYDATA(dbp, h, i); - if (pagelayout[offset] == VRFY_ITEM_NOTSET) - pagelayout[offset] = VRFY_ITEM_BEGIN; - else if (pagelayout[offset] == VRFY_ITEM_BEGIN) { - /* - * Having two inp entries that point at the same patch - * of page is legal if and only if the page is - * a btree leaf and they're onpage duplicate keys-- - * that is, if (i % P_INDX) == 0. - */ - if ((i % P_INDX == 0) && (TYPE(h) == P_LBTREE)) { - /* Flag for later. */ - F_SET(pip, VRFY_HAS_DUPS); - - /* Bump up nentries so we don't undercount. */ - nentries++; - - /* - * We'll check to make sure the end is - * equal, too. - */ - isdupitem = 1; - } else { - isbad = 1; - EPRINT((dbenv, "Page %lu: duplicated item %lu", - (u_long)pgno, (u_long)i)); - } - } - - /* - * Mark the end. Its location varies with the page type - * and the item type. - * - * If the end already has a sign other than 0, do nothing-- - * it's an overlap that we'll catch later. - */ - switch (B_TYPE(bk->type)) { - case B_KEYDATA: - if (TYPE(h) == P_IBTREE) - /* It's a BINTERNAL. */ - endoff = offset + BINTERNAL_SIZE(bk->len) - 1; - else - endoff = offset + BKEYDATA_SIZE(bk->len) - 1; - break; - case B_DUPLICATE: - /* - * Flag that we have dups; we'll check whether - * that's okay during the structure check. - */ - F_SET(pip, VRFY_HAS_DUPS); - /* FALLTHROUGH */ - case B_OVERFLOW: - /* - * Overflow entries on internal pages are stored - * as the _data_ of a BINTERNAL; overflow entries - * on leaf pages are stored as the entire entry. - */ - endoff = offset + - ((TYPE(h) == P_IBTREE) ? - BINTERNAL_SIZE(BOVERFLOW_SIZE) : - BOVERFLOW_SIZE) - 1; - break; - default: - /* - * We'll complain later; for now, just mark - * a minimum. - */ - endoff = offset + BKEYDATA_SIZE(0) - 1; - break; - } - - /* - * If this is an onpage duplicate key we've seen before, - * the end had better coincide too. - */ - if (isdupitem && pagelayout[endoff] != VRFY_ITEM_END) { - EPRINT((dbenv, "Page %lu: duplicated item %lu", - (u_long)pgno, (u_long)i)); - isbad = 1; - } else if (pagelayout[endoff] == VRFY_ITEM_NOTSET) - pagelayout[endoff] = VRFY_ITEM_END; - isdupitem = 0; - - /* - * There should be no deleted items in a quiescent tree, - * except in recno. - */ - if (B_DISSET(bk->type) && TYPE(h) != P_LRECNO) { - isbad = 1; - EPRINT((dbenv, "Page %lu: item %lu marked deleted", - (u_long)pgno, (u_long)i)); - } - - /* - * Check the type and such of bk--make sure it's reasonable - * for the pagetype. - */ - switch (B_TYPE(bk->type)) { - case B_KEYDATA: - /* - * This is a normal, non-overflow BKEYDATA or BINTERNAL. - * The only thing to check is the len, and that's - * already been done. - */ - break; - case B_DUPLICATE: - if (TYPE(h) == P_IBTREE) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: duplicate page referenced by internal btree page at item %lu", - (u_long)pgno, (u_long)i)); - break; - } else if (TYPE(h) == P_LRECNO) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: duplicate page referenced by recno page at item %lu", - (u_long)pgno, (u_long)i)); - break; - } - /* FALLTHROUGH */ - case B_OVERFLOW: - bo = (TYPE(h) == P_IBTREE) ? - (BOVERFLOW *)(((BINTERNAL *)bk)->data) : - (BOVERFLOW *)bk; - - if (B_TYPE(bk->type) == B_OVERFLOW) - /* Make sure tlen is reasonable. */ - if (bo->tlen > dbp->pgsize * vdp->last_pgno) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: impossible tlen %lu, item %lu", - (u_long)pgno, - (u_long)bo->tlen, (u_long)i)); - /* Don't save as a child. */ - break; - } - - if (!IS_VALID_PGNO(bo->pgno) || bo->pgno == pgno || - bo->pgno == PGNO_INVALID) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: offpage item %lu has bad pgno %lu", - (u_long)pgno, (u_long)i, (u_long)bo->pgno)); - /* Don't save as a child. */ - break; - } - - child.pgno = bo->pgno; - child.type = (B_TYPE(bk->type) == B_OVERFLOW ? - V_OVERFLOW : V_DUPLICATE); - child.tlen = bo->tlen; - if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0) - goto err; - break; - default: - isbad = 1; - EPRINT((dbenv, "Page %lu: item %lu of invalid type %lu", - (u_long)pgno, (u_long)i, (u_long)B_TYPE(bk->type))); - break; - } - } - - /* - * Now, loop through and make sure the items are contiguous and - * non-overlapping. - */ - initem = 0; - for (i = himark; i < dbp->pgsize; i++) - if (initem == 0) - switch (pagelayout[i]) { - case VRFY_ITEM_NOTSET: - /* May be just for alignment. */ - if (i != DB_ALIGN(i, sizeof(u_int32_t))) - continue; - - isbad = 1; - EPRINT((dbenv, - "Page %lu: gap between items at offset %lu", - (u_long)pgno, (u_long)i)); - /* Find the end of the gap */ - for (; pagelayout[i + 1] == VRFY_ITEM_NOTSET && - (size_t)(i + 1) < dbp->pgsize; i++) - ; - break; - case VRFY_ITEM_BEGIN: - /* We've found an item. Check its alignment. */ - if (i != DB_ALIGN(i, sizeof(u_int32_t))) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: offset %lu unaligned", - (u_long)pgno, (u_long)i)); - } - initem = 1; - nentries++; - break; - case VRFY_ITEM_END: - /* - * We've hit the end of an item even though - * we don't think we're in one; must - * be an overlap. - */ - isbad = 1; - EPRINT((dbenv, - "Page %lu: overlapping items at offset %lu", - (u_long)pgno, (u_long)i)); - break; - } - else - switch (pagelayout[i]) { - case VRFY_ITEM_NOTSET: - /* In the middle of an item somewhere. Okay. */ - break; - case VRFY_ITEM_END: - /* End of an item; switch to out-of-item mode.*/ - initem = 0; - break; - case VRFY_ITEM_BEGIN: - /* - * Hit a second item beginning without an - * end. Overlap. - */ - isbad = 1; - EPRINT((dbenv, - "Page %lu: overlapping items at offset %lu", - (u_long)pgno, (u_long)i)); - break; - } - - __os_free(dbenv, pagelayout); - - /* Verify HOFFSET. */ - if ((db_indx_t)himark != HOFFSET(h)) { - EPRINT((dbenv, "Page %lu: bad HOFFSET %lu, appears to be %lu", - (u_long)pgno, (u_long)HOFFSET(h), (u_long)himark)); - isbad = 1; - } - -err: if (nentriesp != NULL) - *nentriesp = nentries; - - if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - - return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret); -} - -/* - * __bam_vrfy_itemorder -- - * Make sure the items on a page sort correctly. - * - * Assumes that NUM_ENT(h) and inp[0]..inp[NUM_ENT(h) - 1] are - * reasonable; be sure that __bam_vrfy_inp has been called first. - * - * If ovflok is set, it also assumes that overflow page chains - * hanging off the current page have been sanity-checked, and so we - * can use __bam_cmp to verify their ordering. If it is not set, - * and we run into an overflow page, carp and return DB_VERIFY_BAD; - * we shouldn't be called if any exist. - * - * PUBLIC: int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *, - * PUBLIC: db_pgno_t, u_int32_t, int, int, u_int32_t)); - */ -int -__bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t nentries; - int ovflok, hasdups; - u_int32_t flags; -{ - BINTERNAL *bi; - BKEYDATA *bk; - BOVERFLOW *bo; - BTREE *bt; - DBT dbta, dbtb, dup_1, dup_2, *p1, *p2, *tmp; - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - db_indx_t i; - int cmp, freedup_1, freedup_2, isbad, ret, t_ret; - int (*dupfunc) __P((DB *, const DBT *, const DBT *)); - int (*func) __P((DB *, const DBT *, const DBT *)); - void *buf1, *buf2, *tmpbuf; - - /* - * We need to work in the ORDERCHKONLY environment where we might - * not have a pip, but we also may need to work in contexts where - * NUM_ENT isn't safe. - */ - if (vdp != NULL) { - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - nentries = pip->entries; - } else - pip = NULL; - - dbenv = dbp->dbenv; - ret = isbad = 0; - bo = NULL; /* Shut up compiler. */ - - memset(&dbta, 0, sizeof(DBT)); - F_SET(&dbta, DB_DBT_REALLOC); - - memset(&dbtb, 0, sizeof(DBT)); - F_SET(&dbtb, DB_DBT_REALLOC); - - buf1 = buf2 = NULL; - - DB_ASSERT(!LF_ISSET(DB_NOORDERCHK)); - - dupfunc = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare; - if (TYPE(h) == P_LDUP) - func = dupfunc; - else { - func = __bam_defcmp; - if (dbp->bt_internal != NULL) { - bt = (BTREE *)dbp->bt_internal; - if (bt->bt_compare != NULL) - func = bt->bt_compare; - } - } - - /* - * We alternate our use of dbta and dbtb so that we can walk - * through the page key-by-key without copying a dbt twice. - * p1 is always the dbt for index i - 1, and p2 for index i. - */ - p1 = &dbta; - p2 = &dbtb; - - /* - * Loop through the entries. nentries ought to contain the - * actual count, and so is a safe way to terminate the loop; whether - * we inc. by one or two depends on whether we're a leaf page-- - * on a leaf page, we care only about keys. On internal pages - * and LDUP pages, we want to check the order of all entries. - * - * Note that on IBTREE pages, we start with item 1, since item - * 0 doesn't get looked at by __bam_cmp. - */ - for (i = (TYPE(h) == P_IBTREE) ? 1 : 0; i < nentries; - i += (TYPE(h) == P_LBTREE) ? P_INDX : O_INDX) { - /* - * Put key i-1, now in p2, into p1, by swapping DBTs and bufs. - */ - tmp = p1; - p1 = p2; - p2 = tmp; - tmpbuf = buf1; - buf1 = buf2; - buf2 = tmpbuf; - - /* - * Get key i into p2. - */ - switch (TYPE(h)) { - case P_IBTREE: - bi = GET_BINTERNAL(dbp, h, i); - if (B_TYPE(bi->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)(bi->data); - goto overflow; - } else { - p2->data = bi->data; - p2->size = bi->len; - } - - /* - * The leftmost key on an internal page must be - * len 0, since it's just a placeholder and - * automatically sorts less than all keys. - * - * XXX - * This criterion does not currently hold! - * See todo list item #1686. Meanwhile, it's harmless - * to just not check for it. - */ -#if 0 - if (i == 0 && bi->len != 0) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: lowest key on internal page of nonzero length", - (u_long)pgno)); - } -#endif - break; - case P_LBTREE: - case P_LDUP: - bk = GET_BKEYDATA(dbp, h, i); - if (B_TYPE(bk->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)bk; - goto overflow; - } else { - p2->data = bk->data; - p2->size = bk->len; - } - break; - default: - /* - * This means our caller screwed up and sent us - * an inappropriate page. - */ - TYPE_ERR_PRINT(dbenv, - "__bam_vrfy_itemorder", pgno, TYPE(h)) - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - if (0) { - /* - * If ovflok != 1, we can't safely go chasing - * overflow pages with the normal routines now; - * they might be unsafe or nonexistent. Mark this - * page as incomplete and return. - * - * Note that we don't need to worry about freeing - * buffers, since they can't have been allocated - * if overflow items are unsafe. - */ -overflow: if (!ovflok) { - F_SET(pip, VRFY_INCOMPLETE); - goto err; - } - - /* - * Overflow items are safe to chase. Do so. - * Fetch the overflow item into p2->data, - * NULLing it or reallocing it as appropriate. - * - * (We set p2->data to buf2 before the call - * so we're sure to realloc if we can and if p2 - * was just pointing at a non-overflow item.) - */ - p2->data = buf2; - if ((ret = __db_goff(dbp, - p2, bo->tlen, bo->pgno, NULL, NULL)) != 0) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: error %lu in fetching overflow item %lu", - (u_long)pgno, (u_long)ret, (u_long)i)); - } - /* In case it got realloc'ed and thus changed. */ - buf2 = p2->data; - } - - /* Compare with the last key. */ - if (p1->data != NULL && p2->data != NULL) { - cmp = func(dbp, p1, p2); - - /* comparison succeeded */ - if (cmp > 0) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: out-of-order key at entry %lu", - (u_long)pgno, (u_long)i)); - /* proceed */ - } else if (cmp == 0) { - /* - * If they compared equally, this - * had better be a (sub)database with dups. - * Mark it so we can check during the - * structure check. - */ - if (pip != NULL) - F_SET(pip, VRFY_HAS_DUPS); - else if (hasdups == 0) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: database with no duplicates has duplicated keys", - (u_long)pgno)); - } - - /* - * If we're a btree leaf, check to see - * if the data items of these on-page dups are - * in sorted order. If not, flag this, so - * that we can make sure during the - * structure checks that the DUPSORT flag - * is unset. - * - * At this point i points to a duplicate key. - * Compare the datum before it (same key) - * to the datum after it, i.e. i-1 to i+1. - */ - if (TYPE(h) == P_LBTREE) { - /* - * Unsafe; continue and we'll pick - * up the bogus nentries later. - */ - if (i + 1 >= (db_indx_t)nentries) - continue; - - /* - * We don't bother with clever memory - * management with on-page dups, - * as it's only really a big win - * in the overflow case, and overflow - * dups are probably (?) rare. - */ - if (((ret = __bam_safe_getdata(dbp, - h, i - 1, ovflok, &dup_1, - &freedup_1)) != 0) || - ((ret = __bam_safe_getdata(dbp, - h, i + 1, ovflok, &dup_2, - &freedup_2)) != 0)) - goto err; - - /* - * If either of the data are NULL, - * it's because they're overflows and - * it's not safe to chase them now. - * Mark an incomplete and return. - */ - if (dup_1.data == NULL || - dup_2.data == NULL) { - DB_ASSERT(!ovflok); - F_SET(pip, VRFY_INCOMPLETE); - goto err; - } - - /* - * If the dups are out of order, - * flag this. It's not an error - * until we do the structure check - * and see whether DUPSORT is set. - */ - if (dupfunc(dbp, &dup_1, &dup_2) > 0) - F_SET(pip, VRFY_DUPS_UNSORTED); - - if (freedup_1) - __os_ufree(dbenv, dup_1.data); - if (freedup_2) - __os_ufree(dbenv, dup_2.data); - } - } - } - } - -err: if (pip != NULL && ((t_ret = - __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) && ret == 0) - ret = t_ret; - - if (buf1 != NULL) - __os_ufree(dbenv, buf1); - if (buf2 != NULL) - __os_ufree(dbenv, buf2); - - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __bam_vrfy_structure -- - * Verify the tree structure of a btree database (including the master - * database containing subdbs). - * - * PUBLIC: int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, - * PUBLIC: u_int32_t)); - */ -int -__bam_vrfy_structure(dbp, vdp, meta_pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t meta_pgno; - u_int32_t flags; -{ - DB *pgset; - DB_ENV *dbenv; - VRFY_PAGEINFO *mip, *rip; - db_pgno_t root, p; - int t_ret, ret; - u_int32_t nrecs, level, relen, stflags; - - dbenv = dbp->dbenv; - mip = rip = 0; - pgset = vdp->pgset; - - if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &mip)) != 0) - return (ret); - - if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, (int *)&p)) != 0) - goto err; - if (p != 0) { - EPRINT((dbenv, - "Page %lu: btree metadata page observed twice", - (u_long)meta_pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0) - goto err; - - root = mip->root; - - if (root == 0) { - EPRINT((dbenv, - "Page %lu: btree metadata page has no root", - (u_long)meta_pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - - if ((ret = __db_vrfy_getpageinfo(vdp, root, &rip)) != 0) - goto err; - - switch (rip->type) { - case P_IBTREE: - case P_LBTREE: - stflags = flags | ST_TOPLEVEL; - if (F_ISSET(mip, VRFY_HAS_DUPS)) - stflags |= ST_DUPOK; - if (F_ISSET(mip, VRFY_HAS_DUPSORT)) - stflags |= ST_DUPSORT; - if (F_ISSET(mip, VRFY_HAS_RECNUMS)) - stflags |= ST_RECNUM; - ret = __bam_vrfy_subtree(dbp, - vdp, root, NULL, NULL, stflags, NULL, NULL, NULL); - break; - case P_IRECNO: - case P_LRECNO: - stflags = flags | ST_RECNUM | ST_IS_RECNO | ST_TOPLEVEL; - if (mip->re_len > 0) - stflags |= ST_RELEN; - if ((ret = __bam_vrfy_subtree(dbp, vdp, - root, NULL, NULL, stflags, &level, &nrecs, &relen)) != 0) - goto err; - /* - * Even if mip->re_len > 0, re_len may come back zero if the - * tree is empty. It should be okay to just skip the check in - * this case, as if there are any non-deleted keys at all, - * that should never happen. - */ - if (mip->re_len > 0 && relen > 0 && mip->re_len != relen) { - EPRINT((dbenv, - "Page %lu: recno database has bad re_len %lu", - (u_long)meta_pgno, (u_long)relen)); - ret = DB_VERIFY_BAD; - goto err; - } - ret = 0; - break; - case P_LDUP: - EPRINT((dbenv, - "Page %lu: duplicate tree referenced from metadata page", - (u_long)meta_pgno)); - ret = DB_VERIFY_BAD; - break; - default: - EPRINT((dbenv, - "Page %lu: btree root of incorrect type %lu on metadata page", - (u_long)meta_pgno, (u_long)rip->type)); - ret = DB_VERIFY_BAD; - break; - } - -err: if (mip != NULL && ((t_ret = - __db_vrfy_putpageinfo(dbenv, vdp, mip)) != 0) && ret == 0) - ret = t_ret; - if (rip != NULL && ((t_ret = - __db_vrfy_putpageinfo(dbenv, vdp, rip)) != 0) && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __bam_vrfy_subtree-- - * Verify a subtree (or entire) btree with specified root. - * - * Note that this is public because it must be called to verify - * offpage dup trees, including from hash. - * - * PUBLIC: int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *, - * PUBLIC: void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *)); - */ -int -__bam_vrfy_subtree(dbp, vdp, pgno, l, r, flags, levelp, nrecsp, relenp) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - void *l, *r; - u_int32_t flags, *levelp, *nrecsp, *relenp; -{ - BINTERNAL *li, *ri, *lp, *rp; - DB *pgset; - DBC *cc; - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *h; - VRFY_CHILDINFO *child; - VRFY_PAGEINFO *pip; - db_indx_t i; - db_pgno_t next_pgno, prev_pgno; - db_recno_t child_nrecs, nrecs; - u_int32_t child_level, child_relen, j, level, relen, stflags; - u_int8_t leaf_type; - int (*func) __P((DB *, const DBT *, const DBT *)); - int isbad, p, ret, t_ret, toplevel; - - dbenv = dbp->dbenv; - mpf = dbp->mpf; - ret = isbad = 0; - nrecs = 0; - h = NULL; - relen = 0; - leaf_type = P_INVALID; - next_pgno = prev_pgno = PGNO_INVALID; - rp = (BINTERNAL *)r; - lp = (BINTERNAL *)l; - - /* Provide feedback on our progress to the application. */ - if (!LF_ISSET(DB_SALVAGE)) - __db_vrfy_struct_feedback(dbp, vdp); - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - cc = NULL; - level = pip->bt_level; - - toplevel = LF_ISSET(ST_TOPLEVEL) ? 1 : 0; - LF_CLR(ST_TOPLEVEL); - - /* - * If this is the root, initialize the vdp's prev- and next-pgno - * accounting. - * - * For each leaf page we hit, we'll want to make sure that - * vdp->prev_pgno is the same as pip->prev_pgno and vdp->next_pgno is - * our page number. Then, we'll set vdp->next_pgno to pip->next_pgno - * and vdp->prev_pgno to our page number, and the next leaf page in - * line should be able to do the same verification. - */ - if (toplevel) { - /* - * Cache the values stored in the vdp so that if we're an - * auxiliary tree such as an off-page duplicate set, our - * caller's leaf page chain doesn't get lost. - */ - prev_pgno = vdp->prev_pgno; - next_pgno = vdp->next_pgno; - leaf_type = vdp->leaf_type; - vdp->next_pgno = vdp->prev_pgno = PGNO_INVALID; - vdp->leaf_type = P_INVALID; - } - - /* - * We are recursively descending a btree, starting from the root - * and working our way out to the leaves. - * - * There are four cases we need to deal with: - * 1. pgno is a recno leaf page. Any children are overflows. - * 2. pgno is a duplicate leaf page. Any children - * are overflow pages; traverse them, and then return - * level and nrecs. - * 3. pgno is an ordinary leaf page. Check whether dups are - * allowed, and if so, traverse any off-page dups or - * overflows. Then return nrecs and level. - * 4. pgno is a recno internal page. Recursively check any - * child pages, making sure their levels are one lower - * and their nrecs sum to ours. - * 5. pgno is a btree internal page. Same as #4, plus we - * must verify that for each pair of BINTERNAL entries - * N and N+1, the leftmost item on N's child sorts - * greater than N, and the rightmost item on N's child - * sorts less than N+1. - * - * Furthermore, in any sorted page type (P_LDUP, P_LBTREE, P_IBTREE), - * we need to verify the internal sort order is correct if, - * due to overflow items, we were not able to do so earlier. - */ - switch (pip->type) { - case P_LRECNO: - case P_LDUP: - case P_LBTREE: - /* - * Cases 1, 2 and 3. - * - * We're some sort of leaf page; verify - * that our linked list of leaves is consistent. - */ - if (vdp->leaf_type == P_INVALID) { - /* - * First leaf page. Set the type that all its - * successors should be, and verify that our prev_pgno - * is PGNO_INVALID. - */ - vdp->leaf_type = pip->type; - if (pip->prev_pgno != PGNO_INVALID) - goto bad_prev; - } else { - /* - * Successor leaf page. Check our type, the previous - * page's next_pgno, and our prev_pgno. - */ - if (pip->type != vdp->leaf_type) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: unexpected page type %lu found in leaf chain (expected %lu)", - (u_long)pip->pgno, (u_long)pip->type, - (u_long)vdp->leaf_type)); - } - - /* - * Don't do the prev/next_pgno checks if we've lost - * leaf pages due to another corruption. - */ - if (!F_ISSET(vdp, VRFY_LEAFCHAIN_BROKEN)) { - if (pip->pgno != vdp->next_pgno) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: incorrect next_pgno %lu found in leaf chain (should be %lu)", - (u_long)vdp->prev_pgno, - (u_long)vdp->next_pgno, - (u_long)pip->pgno)); - } - if (pip->prev_pgno != vdp->prev_pgno) { -bad_prev: isbad = 1; - EPRINT((dbenv, - "Page %lu: incorrect prev_pgno %lu found in leaf chain (should be %lu)", - (u_long)pip->pgno, - (u_long)pip->prev_pgno, - (u_long)vdp->prev_pgno)); - } - } - } - vdp->prev_pgno = pip->pgno; - vdp->next_pgno = pip->next_pgno; - F_CLR(vdp, VRFY_LEAFCHAIN_BROKEN); - - /* - * Overflow pages are common to all three leaf types; - * traverse the child list, looking for overflows. - */ - if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0) - goto err; - for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0; - ret = __db_vrfy_ccnext(cc, &child)) - if (child->type == V_OVERFLOW && - (ret = __db_vrfy_ovfl_structure(dbp, vdp, - child->pgno, child->tlen, - flags | ST_OVFL_LEAF)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto done; - } - - if ((ret = __db_vrfy_ccclose(cc)) != 0) - goto err; - cc = NULL; - - /* Case 1 */ - if (pip->type == P_LRECNO) { - if (!LF_ISSET(ST_IS_RECNO) && - !(LF_ISSET(ST_DUPOK) && !LF_ISSET(ST_DUPSORT))) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: recno leaf page non-recno tree", - (u_long)pgno)); - goto done; - } - goto leaf; - } else if (LF_ISSET(ST_IS_RECNO)) { - /* - * It's a non-recno leaf. Had better not be a recno - * subtree. - */ - isbad = 1; - EPRINT((dbenv, - "Page %lu: non-recno leaf page in recno tree", - (u_long)pgno)); - goto done; - } - - /* Case 2--no more work. */ - if (pip->type == P_LDUP) - goto leaf; - - /* Case 3 */ - - /* Check if we have any dups. */ - if (F_ISSET(pip, VRFY_HAS_DUPS)) { - /* If dups aren't allowed in this btree, trouble. */ - if (!LF_ISSET(ST_DUPOK)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: duplicates in non-dup btree", - (u_long)pgno)); - } else { - /* - * We correctly have dups. If any are off-page, - * traverse those btrees recursively. - */ - if ((ret = - __db_vrfy_childcursor(vdp, &cc)) != 0) - goto err; - for (ret = __db_vrfy_ccset(cc, pgno, &child); - ret == 0; - ret = __db_vrfy_ccnext(cc, &child)) { - stflags = flags | ST_RECNUM | ST_DUPSET; - /* Skip any overflow entries. */ - if (child->type == V_DUPLICATE) { - if ((ret = __db_vrfy_duptype( - dbp, vdp, child->pgno, - stflags)) != 0) { - isbad = 1; - /* Next child. */ - continue; - } - if ((ret = __bam_vrfy_subtree( - dbp, vdp, child->pgno, NULL, - NULL, stflags | ST_TOPLEVEL, - NULL, NULL, NULL)) != 0) { - if (ret == - DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - } - } - - if ((ret = __db_vrfy_ccclose(cc)) != 0) - goto err; - cc = NULL; - - /* - * If VRFY_DUPS_UNSORTED is set, - * ST_DUPSORT had better not be. - */ - if (F_ISSET(pip, VRFY_DUPS_UNSORTED) && - LF_ISSET(ST_DUPSORT)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: unsorted duplicate set in sorted-dup database", - (u_long)pgno)); - } - } - } - goto leaf; - case P_IBTREE: - case P_IRECNO: - /* We handle these below. */ - break; - default: - /* - * If a P_IBTREE or P_IRECNO contains a reference to an - * invalid page, we'll wind up here; handle it gracefully. - * Note that the code at the "done" label assumes that the - * current page is a btree/recno one of some sort; this - * is not the case here, so we goto err. - * - * If the page is entirely zeroed, its pip->type will be a lie - * (we assumed it was a hash page, as they're allowed to be - * zeroed); handle this case specially. - */ - if (F_ISSET(pip, VRFY_IS_ALLZEROES)) - ZEROPG_ERR_PRINT(dbenv, pgno, "btree or recno page"); - else - EPRINT((dbenv, - "Page %lu: btree or recno page is of inappropriate type %lu", - (u_long)pgno, (u_long)pip->type)); - - /* - * We probably lost a leaf page (or more if this was an - * internal page) from our prev/next_pgno chain. Flag - * that this is expected; we don't want or need to - * spew error messages about erroneous prev/next_pgnos, - * since that's probably not the real problem. - */ - F_SET(vdp, VRFY_LEAFCHAIN_BROKEN); - - ret = DB_VERIFY_BAD; - goto err; - } - - /* - * Cases 4 & 5: This is a btree or recno internal page. For each child, - * recurse, keeping a running count of nrecs and making sure the level - * is always reasonable. - */ - if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0) - goto err; - for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0; - ret = __db_vrfy_ccnext(cc, &child)) - if (child->type == V_RECNO) { - if (pip->type != P_IRECNO) { - TYPE_ERR_PRINT(dbenv, "__bam_vrfy_subtree", - pgno, pip->type); - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - if ((ret = __bam_vrfy_subtree(dbp, vdp, child->pgno, - NULL, NULL, flags, &child_level, &child_nrecs, - &child_relen)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto done; - } - - if (LF_ISSET(ST_RELEN)) { - if (relen == 0) - relen = child_relen; - /* - * child_relen may be zero if the child subtree - * is empty. - */ - else if (child_relen > 0 && - relen != child_relen) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: recno page returned bad re_len %lu", - (u_long)child->pgno, - (u_long)child_relen)); - } - if (relenp) - *relenp = relen; - } - if (LF_ISSET(ST_RECNUM)) { - if (child->nrecs != child_nrecs) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: record count incorrect: actual %lu, in record %lu", - (u_long)child->pgno, - (u_long)child_nrecs, - (u_long)child->nrecs)); - } - nrecs += child_nrecs; - } - if (isbad == 0 && level != child_level + 1) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: recno level incorrect: got %lu, expected %lu", - (u_long)child->pgno, (u_long)child_level, - (u_long)(level - 1))); - } - } else if (child->type == V_OVERFLOW) { - /* - * It is possible for one internal page to reference - * a single overflow page twice, if all the items - * in the subtree referenced by slot 0 are deleted, - * then a similar number of items are put back - * before the key that formerly had been in slot 1. - * - * (Btree doesn't look at the key in slot 0, so the - * fact that the key formerly at slot 1 is the "wrong" - * parent of the stuff in the slot 0 subtree isn't - * really incorrect.) - * - * __db_vrfy_ovfl_structure is designed to be - * efficiently called multiple times for multiple - * references; call it here as many times as is - * appropriate. - */ - - /* Otherwise, __db_vrfy_childput would be broken. */ - DB_ASSERT(child->refcnt >= 1); - - /* - * An overflow referenced more than twice here - * shouldn't happen. - */ - if (child->refcnt > 2) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: overflow page %lu referenced more than twice from internal page", - (u_long)pgno, (u_long)child->pgno)); - } else - for (j = 0; j < child->refcnt; j++) - if ((ret = __db_vrfy_ovfl_structure(dbp, - vdp, child->pgno, child->tlen, - flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto done; - } - } - - if ((ret = __db_vrfy_ccclose(cc)) != 0) - goto err; - cc = NULL; - - /* We're done with case 4. */ - if (pip->type == P_IRECNO) - goto done; - - /* - * Case 5. Btree internal pages. - * As described above, we need to iterate through all the - * items on the page and make sure that our children sort appropriately - * with respect to them. - * - * For each entry, li will be the "left-hand" key for the entry - * itself, which must sort lower than all entries on its child; - * ri will be the key to its right, which must sort greater. - */ - if (h == NULL && (ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - for (i = 0; i < pip->entries; i += O_INDX) { - li = GET_BINTERNAL(dbp, h, i); - ri = (i + O_INDX < pip->entries) ? - GET_BINTERNAL(dbp, h, i + O_INDX) : rp; - - /* - * The leftmost key is forcibly sorted less than all entries, - * so don't bother passing it. - */ - if ((ret = __bam_vrfy_subtree(dbp, vdp, li->pgno, - i == 0 ? NULL : li, ri, flags, &child_level, - &child_nrecs, NULL)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto done; - } - - if (LF_ISSET(ST_RECNUM)) { - /* - * Keep a running tally on the actual record count so - * we can return it to our parent (if we have one) or - * compare it to the NRECS field if we're a root page. - */ - nrecs += child_nrecs; - - /* - * Make sure the actual record count of the child - * is equal to the value in the BINTERNAL structure. - */ - if (li->nrecs != child_nrecs) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: item %lu has incorrect record count of %lu, should be %lu", - (u_long)pgno, (u_long)i, (u_long)li->nrecs, - (u_long)child_nrecs)); - } - } - - if (level != child_level + 1) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: Btree level incorrect: got %lu, expected %lu", - (u_long)li->pgno, - (u_long)child_level, (u_long)(level - 1))); - } - } - - if (0) { -leaf: level = LEAFLEVEL; - if (LF_ISSET(ST_RECNUM)) - nrecs = pip->rec_cnt; - - /* XXX - * We should verify that the record count on a leaf page - * is the sum of the number of keys and the number of - * records in its off-page dups. This requires looking - * at the page again, however, and it may all be changing - * soon, so for now we don't bother. - */ - - if (LF_ISSET(ST_RELEN) && relenp) - *relenp = pip->re_len; - } -done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) { - /* - * During the page-by-page pass, item order verification was - * not finished due to the presence of overflow items. If - * isbad == 0, though, it's now safe to do so, as we've - * traversed any child overflow pages. Do it. - */ - if (h == NULL && (ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - if ((ret = __bam_vrfy_itemorder(dbp, - vdp, h, pgno, 0, 1, 0, flags)) != 0) - goto err; - F_CLR(pip, VRFY_INCOMPLETE); - } - - /* - * It's possible to get to this point with a page that has no - * items, but without having detected any sort of failure yet. - * Having zero items is legal if it's a leaf--it may be the - * root page in an empty tree, or the tree may have been - * modified with the DB_REVSPLITOFF flag set (there's no way - * to tell from what's on disk). For an internal page, - * though, having no items is a problem (all internal pages - * must have children). - */ - if (isbad == 0 && ret == 0) { - if (h == NULL && (ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - - if (NUM_ENT(h) == 0 && ISINTERNAL(h)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: internal page is empty and should not be", - (u_long)pgno)); - goto err; - } - } - - /* - * Our parent has sent us BINTERNAL pointers to parent records - * so that we can verify our place with respect to them. If it's - * appropriate--we have a default sort function--verify this. - */ - if (isbad == 0 && ret == 0 && !LF_ISSET(DB_NOORDERCHK) && lp != NULL) { - if (h == NULL && (ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - - /* - * __bam_vrfy_treeorder needs to know what comparison function - * to use. If ST_DUPSET is set, we're in a duplicate tree - * and we use the duplicate comparison function; otherwise, - * use the btree one. If unset, use the default, of course. - */ - func = LF_ISSET(ST_DUPSET) ? dbp->dup_compare : - ((BTREE *)dbp->bt_internal)->bt_compare; - if (func == NULL) - func = __bam_defcmp; - - if ((ret = __bam_vrfy_treeorder( - dbp, pgno, h, lp, rp, func, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - } - - /* - * This is guaranteed to succeed for leaf pages, but no harm done. - * - * Internal pages below the top level do not store their own - * record numbers, so we skip them. - */ - if (LF_ISSET(ST_RECNUM) && nrecs != pip->rec_cnt && toplevel) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: bad record count: has %lu records, claims %lu", - (u_long)pgno, (u_long)nrecs, (u_long)pip->rec_cnt)); - } - - if (levelp) - *levelp = level; - if (nrecsp) - *nrecsp = nrecs; - - pgset = vdp->pgset; - if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0) - goto err; - if (p != 0) { - isbad = 1; - EPRINT((dbenv, "Page %lu: linked twice", (u_long)pgno)); - } else if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0) - goto err; - - if (toplevel) - /* - * The last page's next_pgno in the leaf chain should have been - * PGNO_INVALID. - */ - if (vdp->next_pgno != PGNO_INVALID) { - isbad = 1; - EPRINT((dbenv, "Page %lu: unterminated leaf chain", - (u_long)vdp->prev_pgno)); - } - -err: if (toplevel) { - /* Restore our caller's settings. */ - vdp->next_pgno = next_pgno; - vdp->prev_pgno = prev_pgno; - vdp->leaf_type = leaf_type; - } - - if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0) - ret = t_ret; - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __bam_vrfy_treeorder -- - * Verify that the lowest key on a page sorts greater than the - * BINTERNAL which points to it (lp), and the highest key - * sorts less than the BINTERNAL above that (rp). - * - * If lp is NULL, this means that it was the leftmost key on the - * parent, which (regardless of sort function) sorts less than - * all keys. No need to check it. - * - * If rp is NULL, lp was the highest key on the parent, so there's - * no higher key we must sort less than. - */ -static int -__bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags) - DB *dbp; - db_pgno_t pgno; - PAGE *h; - BINTERNAL *lp, *rp; - int (*func) __P((DB *, const DBT *, const DBT *)); - u_int32_t flags; -{ - BOVERFLOW *bo; - DB_ENV *dbenv; - DBT dbt; - db_indx_t last; - int ret, cmp; - - dbenv = dbp->dbenv; - memset(&dbt, 0, sizeof(DBT)); - F_SET(&dbt, DB_DBT_MALLOC); - ret = 0; - - /* - * Empty pages are sorted correctly by definition. We check - * to see whether they ought to be empty elsewhere; leaf - * pages legally may be. - */ - if (NUM_ENT(h) == 0) - return (0); - - switch (TYPE(h)) { - case P_IBTREE: - case P_LDUP: - last = NUM_ENT(h) - O_INDX; - break; - case P_LBTREE: - last = NUM_ENT(h) - P_INDX; - break; - default: - TYPE_ERR_PRINT(dbenv, "__bam_vrfy_treeorder", pgno, TYPE(h)); - DB_ASSERT(0); - return (EINVAL); - } - - /* - * The key on page h, the child page, is more likely to be - * an overflow page, so we pass its offset, rather than lp/rp's, - * into __bam_cmp. This will take advantage of __db_moff. - */ - - /* - * Skip first-item check if we're an internal page--the first - * entry on an internal page is treated specially by __bam_cmp, - * so what's on the page shouldn't matter. (Plus, since we're passing - * our page and item 0 as to __bam_cmp, we'll sort before our - * parent and falsely report a failure.) - */ - if (lp != NULL && TYPE(h) != P_IBTREE) { - if (lp->type == B_KEYDATA) { - dbt.data = lp->data; - dbt.size = lp->len; - } else if (lp->type == B_OVERFLOW) { - bo = (BOVERFLOW *)lp->data; - if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno, - NULL, NULL)) != 0) - return (ret); - } else { - DB_ASSERT(0); - EPRINT((dbenv, - "Page %lu: unknown type for internal record", - (u_long)PGNO(h))); - return (EINVAL); - } - - /* On error, fall through, free if needed, and return. */ - if ((ret = __bam_cmp(dbp, &dbt, h, 0, func, &cmp)) == 0) { - if (cmp > 0) { - EPRINT((dbenv, - "Page %lu: first item on page sorted greater than parent entry", - (u_long)PGNO(h))); - ret = DB_VERIFY_BAD; - } - } else - EPRINT((dbenv, - "Page %lu: first item on page had comparison error", - (u_long)PGNO(h))); - - if (dbt.data != lp->data) - __os_ufree(dbenv, dbt.data); - if (ret != 0) - return (ret); - } - - if (rp != NULL) { - if (rp->type == B_KEYDATA) { - dbt.data = rp->data; - dbt.size = rp->len; - } else if (rp->type == B_OVERFLOW) { - bo = (BOVERFLOW *)rp->data; - if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno, - NULL, NULL)) != 0) - return (ret); - } else { - DB_ASSERT(0); - EPRINT((dbenv, - "Page %lu: unknown type for internal record", - (u_long)PGNO(h))); - return (EINVAL); - } - - /* On error, fall through, free if needed, and return. */ - if ((ret = __bam_cmp(dbp, &dbt, h, last, func, &cmp)) == 0) { - if (cmp < 0) { - EPRINT((dbenv, - "Page %lu: last item on page sorted greater than parent entry", - (u_long)PGNO(h))); - ret = DB_VERIFY_BAD; - } - } else - EPRINT((dbenv, - "Page %lu: last item on page had comparison error", - (u_long)PGNO(h))); - - if (dbt.data != rp->data) - __os_ufree(dbenv, dbt.data); - } - - return (ret); -} - -/* - * __bam_salvage -- - * Safely dump out anything that looks like a key on an alleged - * btree leaf page. - * - * PUBLIC: int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, - * PUBLIC: PAGE *, void *, int (*)(void *, const void *), DBT *, - * PUBLIC: u_int32_t)); - */ -int -__bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - u_int32_t pgtype; - PAGE *h; - void *handle; - int (*callback) __P((void *, const void *)); - DBT *key; - u_int32_t flags; -{ - BKEYDATA *bk; - BOVERFLOW *bo; - DBT dbt, unknown_key, unknown_data; - DB_ENV *dbenv; - VRFY_ITEM *pgmap; - db_indx_t i, last, beg, end, *inp; - u_int32_t himark; - void *ovflbuf; - int ret, t_ret, t2_ret; - - dbenv = dbp->dbenv; - ovflbuf = pgmap = NULL; - inp = P_INP(dbp, h); - - memset(&dbt, 0, sizeof(DBT)); - dbt.flags = DB_DBT_REALLOC; - - memset(&unknown_key, 0, sizeof(DBT)); - unknown_key.size = (u_int32_t)strlen("UNKNOWN_KEY"); - unknown_key.data = "UNKNOWN_KEY"; - memset(&unknown_data, 0, sizeof(DBT)); - unknown_data.size = (u_int32_t)strlen("UNKNOWN_DATA"); - unknown_data.data = "UNKNOWN_DATA"; - - /* - * Allocate a buffer for overflow items. Start at one page; - * __db_safe_goff will realloc as needed. - */ - if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0) - goto err; - - if (LF_ISSET(DB_AGGRESSIVE) && (ret = - __os_calloc(dbenv, dbp->pgsize, sizeof(pgmap[0]), &pgmap)) != 0) - goto err; - - /* - * Loop through the inp array, spitting out key/data pairs. - * - * If we're salvaging normally, loop from 0 through NUM_ENT(h). If - * we're being aggressive, loop until we hit the end of the page -- - * NUM_ENT() may be bogus. - */ - himark = dbp->pgsize; - for (i = 0, last = UINT16_MAX;; i += O_INDX) { - /* If we're not aggressive, break when we hit NUM_ENT(h). */ - if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h)) - break; - - /* Verify the current item. */ - t_ret = - __db_vrfy_inpitem(dbp, h, pgno, i, 1, flags, &himark, NULL); - - if (t_ret != 0) { - /* - * If this is a btree leaf and we've printed out a key - * but not its associated data item, fix this imbalance - * by printing an "UNKNOWN_DATA". - */ - if (pgtype == P_LBTREE && i % P_INDX == 1 && - last == i - 1 && (t2_ret = __db_vrfy_prdbt( - &unknown_data, - 0, " ", handle, callback, 0, vdp)) != 0) { - if (ret == 0) - ret = t2_ret; - goto err; - } - - /* - * Don't return DB_VERIFY_FATAL; it's private and means - * only that we can't go on with this page, not with - * the whole database. It's not even an error if we've - * run into it after NUM_ENT(h). - */ - if (t_ret == DB_VERIFY_FATAL) { - if (i < NUM_ENT(h) && ret == 0) - ret = DB_VERIFY_BAD; - break; - } - continue; - } - - /* - * If this returned 0, it's safe to print or (carefully) - * try to fetch. - * - * We only print deleted items if DB_AGGRESSIVE is set. - */ - bk = GET_BKEYDATA(dbp, h, i); - if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type)) - continue; - - /* - * If this is a btree leaf and we're about to print out a data - * item for which we didn't print out a key, fix this imbalance - * by printing an "UNKNOWN_KEY". - */ - if (pgtype == P_LBTREE && i % P_INDX == 1 && - last != i - 1 && (t_ret = __db_vrfy_prdbt( - &unknown_key, 0, " ", handle, callback, 0, vdp)) != 0) { - if (ret == 0) - ret = t_ret; - goto err; - } - last = i; - - /* - * We're going to go try to print the next item. If key is - * non-NULL, we're a dup page, so we've got to print the key - * first, unless SA_SKIPFIRSTKEY is set and we're on the first - * entry. - */ - if (key != NULL && (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY))) - if ((t_ret = __db_vrfy_prdbt(key, - 0, " ", handle, callback, 0, vdp)) != 0) { - if (ret == 0) - ret = t_ret; - goto err; - } - - beg = inp[i]; - switch (B_TYPE(bk->type)) { - case B_DUPLICATE: - end = beg + BOVERFLOW_SIZE - 1; - /* - * If we're not on a normal btree leaf page, there - * shouldn't be off-page dup sets. Something's - * confused; just drop it, and the code to pick up - * unlinked offpage dup sets will print it out - * with key "UNKNOWN" later. - */ - if (pgtype != P_LBTREE) - break; - - bo = (BOVERFLOW *)bk; - - /* - * If the page number is unreasonable, or if this is - * supposed to be a key item, output "UNKNOWN_KEY" -- - * the best we can do is run into the data items in - * the unlinked offpage dup pass. - */ - if (!IS_VALID_PGNO(bo->pgno) || (i % P_INDX == 0)) { - /* Not much to do on failure. */ - if ((t_ret = __db_vrfy_prdbt(&unknown_key, - 0, " ", handle, callback, 0, vdp)) != 0) { - if (ret == 0) - ret = t_ret; - goto err; - } - break; - } - - /* Don't stop on error. */ - if ((t_ret = __db_salvage_duptree(dbp, - vdp, bo->pgno, &dbt, handle, callback, - flags | SA_SKIPFIRSTKEY)) != 0 && ret == 0) - ret = t_ret; - - break; - case B_KEYDATA: - end = (db_indx_t)DB_ALIGN( - beg + bk->len, sizeof(u_int32_t)) - 1; - dbt.data = bk->data; - dbt.size = bk->len; - if ((t_ret = __db_vrfy_prdbt(&dbt, - 0, " ", handle, callback, 0, vdp)) != 0) { - if (ret == 0) - ret = t_ret; - goto err; - } - break; - case B_OVERFLOW: - end = beg + BOVERFLOW_SIZE - 1; - bo = (BOVERFLOW *)bk; - - /* Don't stop on error. */ - if ((t_ret = __db_safe_goff(dbp, vdp, - bo->pgno, &dbt, &ovflbuf, flags)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __db_vrfy_prdbt( - t_ret == 0 ? &dbt : &unknown_key, - 0, " ", handle, callback, 0, vdp)) != 0 && ret == 0) - ret = t_ret; - break; - default: - /* - * We should never get here; __db_vrfy_inpitem should - * not be returning 0 if bk->type is unrecognizable. - */ - DB_ASSERT(0); - if (ret == 0) - ret = EINVAL; - goto err; - } - - /* - * If we're being aggressive, mark the beginning and end of - * the item; we'll come back and print whatever "junk" is in - * the gaps in case we had any bogus inp elements and thereby - * missed stuff. - */ - if (LF_ISSET(DB_AGGRESSIVE)) { - pgmap[beg] = VRFY_ITEM_BEGIN; - pgmap[end] = VRFY_ITEM_END; - } - } - -err: if (pgmap != NULL) - __os_free(dbenv, pgmap); - if (ovflbuf != NULL) - __os_free(dbenv, ovflbuf); - - /* Mark this page as done. */ - if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __bam_salvage_walkdupint -- - * Walk a known-good btree or recno internal page which is part of - * a dup tree, calling __db_salvage_duptree on each child page. - * - * PUBLIC: int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *, - * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t)); - */ -int -__bam_salvage_walkdupint(dbp, vdp, h, key, handle, callback, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - DBT *key; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - RINTERNAL *ri; - BINTERNAL *bi; - int ret, t_ret; - db_indx_t i; - - ret = 0; - for (i = 0; i < NUM_ENT(h); i++) { - switch (TYPE(h)) { - case P_IBTREE: - bi = GET_BINTERNAL(dbp, h, i); - if ((t_ret = __db_salvage_duptree(dbp, - vdp, bi->pgno, key, handle, callback, flags)) != 0) - ret = t_ret; - break; - case P_IRECNO: - ri = GET_RINTERNAL(dbp, h, i); - if ((t_ret = __db_salvage_duptree(dbp, - vdp, ri->pgno, key, handle, callback, flags)) != 0) - ret = t_ret; - break; - default: - __db_err(dbp->dbenv, - "__bam_salvage_walkdupint called on non-int. page"); - DB_ASSERT(0); - return (EINVAL); - } - /* Pass SA_SKIPFIRSTKEY, if set, on to the 0th child only. */ - flags &= ~LF_ISSET(SA_SKIPFIRSTKEY); - } - - return (ret); -} - -/* - * __bam_meta2pgset -- - * Given a known-good meta page, return in pgsetp a 0-terminated list of - * db_pgno_t's corresponding to the pages in the btree. - * - * We do this by a somewhat sleazy method, to avoid having to traverse the - * btree structure neatly: we walk down the left side to the very - * first leaf page, then we mark all the pages in the chain of - * NEXT_PGNOs (being wary of cycles and invalid ones), then we - * consolidate our scratch array into a nice list, and return. This - * avoids the memory management hassles of recursion and the - * trouble of walking internal pages--they just don't matter, except - * for the left branch. - * - * PUBLIC: int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *, - * PUBLIC: u_int32_t, DB *)); - */ -int -__bam_meta2pgset(dbp, vdp, btmeta, flags, pgset) - DB *dbp; - VRFY_DBINFO *vdp; - BTMETA *btmeta; - u_int32_t flags; - DB *pgset; -{ - BINTERNAL *bi; - DB_MPOOLFILE *mpf; - PAGE *h; - RINTERNAL *ri; - db_pgno_t current, p; - int err_ret, ret; - - mpf = dbp->mpf; - h = NULL; - ret = err_ret = 0; - DB_ASSERT(pgset != NULL); - for (current = btmeta->root;;) { - if (!IS_VALID_PGNO(current) || current == PGNO(btmeta)) { - err_ret = DB_VERIFY_BAD; - goto err; - } - if ((ret = __memp_fget(mpf, ¤t, 0, &h)) != 0) { - err_ret = ret; - goto err; - } - - switch (TYPE(h)) { - case P_IBTREE: - case P_IRECNO: - if ((ret = __bam_vrfy(dbp, - vdp, h, current, flags | DB_NOORDERCHK)) != 0) { - err_ret = ret; - goto err; - } - if (TYPE(h) == P_IBTREE) { - bi = GET_BINTERNAL(dbp, h, 0); - current = bi->pgno; - } else { /* P_IRECNO */ - ri = GET_RINTERNAL(dbp, h, 0); - current = ri->pgno; - } - break; - case P_LBTREE: - case P_LRECNO: - goto traverse; - default: - err_ret = DB_VERIFY_BAD; - goto err; - } - - if ((ret = __memp_fput(mpf, h, 0)) != 0) - err_ret = ret; - h = NULL; - } - - /* - * At this point, current is the pgno of leaf page h, the 0th in the - * tree we're concerned with. - */ -traverse: - while (IS_VALID_PGNO(current) && current != PGNO_INVALID) { - if (h == NULL && - (ret = __memp_fget(mpf, ¤t, 0, &h)) != 0) { - err_ret = ret; - break; - } - - if ((ret = __db_vrfy_pgset_get(pgset, current, (int *)&p)) != 0) - goto err; - - if (p != 0) { - /* - * We've found a cycle. Return success anyway-- - * our caller may as well use however much of - * the pgset we've come up with. - */ - break; - } - if ((ret = __db_vrfy_pgset_inc(pgset, current)) != 0) - goto err; - - current = NEXT_PGNO(h); - if ((ret = __memp_fput(mpf, h, 0)) != 0) - err_ret = ret; - h = NULL; - } - -err: if (h != NULL) - (void)__memp_fput(mpf, h, 0); - - return (ret == 0 ? err_ret : ret); -} - -/* - * __bam_safe_getdata -- - * - * Utility function for __bam_vrfy_itemorder. Safely gets the datum at - * index i, page h, and sticks it in DBT dbt. If ovflok is 1 and i's an - * overflow item, we do a safe_goff to get the item and signal that we need - * to free dbt->data; if ovflok is 0, we leaves the DBT zeroed. - */ -static int -__bam_safe_getdata(dbp, h, i, ovflok, dbt, freedbtp) - DB *dbp; - PAGE *h; - u_int32_t i; - int ovflok; - DBT *dbt; - int *freedbtp; -{ - BKEYDATA *bk; - BOVERFLOW *bo; - - memset(dbt, 0, sizeof(DBT)); - *freedbtp = 0; - - bk = GET_BKEYDATA(dbp, h, i); - if (B_TYPE(bk->type) == B_OVERFLOW) { - if (!ovflok) - return (0); - - bo = (BOVERFLOW *)bk; - F_SET(dbt, DB_DBT_MALLOC); - - *freedbtp = 1; - return (__db_goff(dbp, dbt, bo->tlen, bo->pgno, NULL, NULL)); - } else { - dbt->data = bk->data; - dbt->size = bk->len; - } - - return (0); -} diff --git a/storage/bdb/btree/btree.src b/storage/bdb/btree/btree.src deleted file mode 100644 index 1827cffcc53..00000000000 --- a/storage/bdb/btree/btree.src +++ /dev/null @@ -1,252 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: btree.src,v 12.3 2005/08/08 03:37:05 ubell Exp $ - */ - -PREFIX __bam -DBPRIVATE - -INCLUDE #ifndef NO_SYSTEM_INCLUDES -INCLUDE #include -INCLUDE -INCLUDE #include -INCLUDE #include -INCLUDE #endif -INCLUDE -INCLUDE #include "db_int.h" -INCLUDE #include "dbinc/crypto.h" -INCLUDE #include "dbinc/db_page.h" -INCLUDE #include "dbinc/db_dispatch.h" -INCLUDE #include "dbinc/db_am.h" -INCLUDE #include "dbinc/btree.h" -INCLUDE #include "dbinc/log.h" -INCLUDE #include "dbinc/txn.h" -INCLUDE - -/* - * BTREE-split: used to log a page split. - * - * left: the page number for the low-order contents. - * llsn: the left page's original LSN. - * right: the page number for the high-order contents. - * rlsn: the right page's original LSN. - * indx: the number of entries that went to the left page. - * npgno: the next page number - * nlsn: the next page's original LSN (or 0 if no next page). - * root_pgno: the root page number - * pg: the split page's contents before the split. - * opflags: SPL_NRECS: if splitting a tree that maintains a record count. - */ -BEGIN split 62 -DB fileid int32_t ld -ARG left db_pgno_t lu -POINTER llsn DB_LSN * lu -ARG right db_pgno_t lu -POINTER rlsn DB_LSN * lu -ARG indx u_int32_t lu -ARG npgno db_pgno_t lu -POINTER nlsn DB_LSN * lu -ARG root_pgno db_pgno_t lu -PGDBT pg DBT s -ARG opflags u_int32_t lu -END - -/* - * BTREE-rsplit: used to log a reverse-split - * - * pgno: the page number of the page copied over the root. - * pgdbt: the page being copied on the root page. - * root_pgno: the root page number. - * nrec: the tree's record count. - * rootent: last entry on the root page. - * rootlsn: the root page's original lsn. - */ -BEGIN rsplit 63 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -PGDBT pgdbt DBT s -ARG root_pgno db_pgno_t lu -ARG nrec db_pgno_t lu -DBT rootent DBT s -POINTER rootlsn DB_LSN * lu -END - -/* - * BTREE-adj: used to log the adjustment of an index. - * - * pgno: the page modified. - * lsn: the page's original lsn. - * indx: the index adjusted. - * indx_copy: the index to copy if inserting. - * is_insert: 0 if a delete, 1 if an insert. - */ -BEGIN adj 55 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG indx u_int32_t lu -ARG indx_copy u_int32_t lu -ARG is_insert u_int32_t lu -END - -/* - * BTREE-cadjust: used to adjust the count change in an internal page. - * - * pgno: the page modified. - * lsn: the page's original lsn. - * indx: the index to be adjusted. - * adjust: the signed adjustment. - * opflags: CAD_UPDATEROOT: if root page count was adjusted. - */ -BEGIN cadjust 56 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG indx u_int32_t lu -ARG adjust int32_t ld -ARG opflags u_int32_t lu -END - -/* - * BTREE-cdel: used to log the intent-to-delete of a cursor record. - * - * pgno: the page modified. - * lsn: the page's original lsn. - * indx: the index to be deleted. - */ -BEGIN cdel 57 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG indx u_int32_t lu -END - -/* - * BTREE-repl: used to log the replacement of an item. - * - * pgno: the page modified. - * lsn: the page's original lsn. - * indx: the index to be replaced. - * isdeleted: set if the record was previously deleted. - * orig: the original data. - * repl: the replacement data. - * prefix: the prefix of the replacement that matches the original. - * suffix: the suffix of the replacement that matches the original. - */ -BEGIN repl 58 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG indx u_int32_t lu -ARG isdeleted u_int32_t lu -DBT orig DBT s -DBT repl DBT s -ARG prefix u_int32_t lu -ARG suffix u_int32_t lu -END - -/* - * BTREE-root: log the assignment of a root btree page. - */ -BEGIN root 59 -DB fileid int32_t ld -ARG meta_pgno db_pgno_t lu -ARG root_pgno db_pgno_t lu -POINTER meta_lsn DB_LSN * lu -END - -/* - * BTREE-curadj: undo cursor adjustments on txn abort. - * Should only be processed during DB_TXN_ABORT. - * NOTE: the first_indx field gets used to hold - * signed index adjustment in one case. - * care should be taken if its size is changed. - */ -BEGIN curadj 64 -/* Fileid of db affected. */ -DB fileid int32_t ld -/* Which adjustment. */ -ARG mode db_ca_mode ld -/* Page entry is from. */ -ARG from_pgno db_pgno_t lu -/* Page entry went to. */ -ARG to_pgno db_pgno_t lu -/* Left page of root split. */ -ARG left_pgno db_pgno_t lu -/* First index of dup set. Also used as adjustment. */ -ARG first_indx u_int32_t lu -/* Index entry is from. */ -ARG from_indx u_int32_t lu -/* Index where entry went. */ -ARG to_indx u_int32_t lu -END - -/* - * BTREE-rcuradj: undo cursor adjustments on txn abort in - * renumbering recno trees. - * Should only be processed during DB_TXN_ABORT. - */ -BEGIN rcuradj 65 -/* Fileid of db affected. */ -DB fileid int32_t ld -/* Which adjustment. */ -ARG mode ca_recno_arg ld -/* Root page number. */ -ARG root db_pgno_t ld -/* Recno of the adjustment. */ -ARG recno db_recno_t ld -/* Order number of the adjustment. */ -ARG order u_int32_t ld -END - -/* - * BTREE-relink -- Handles relinking around a deleted leaf page. - * - */ -BEGIN relink 147 -/* Fileid of db affected. */ -DB fileid int32_t ld -/* The page being removed. */ -ARG pgno db_pgno_t lu -/* The new page number, if any. */ -ARG new_pgno db_pgno_t lu -/* The previous page. */ -ARG prev db_pgno_t lu -/* The previous page's original lsn. */ -POINTER lsn_prev DB_LSN * lu -/* The next page. */ -ARG next db_pgno_t lu -/* The previous page's original lsn. */ -POINTER lsn_next DB_LSN * lu -END - -/* - * BTREE-merge -- Handles merging of pages during a compaction. - */ -BEGIN merge 148 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG npgno db_pgno_t lu -POINTER nlsn DB_LSN * lu -DBT hdr DBT s -DBT data DBT s -DBT ind DBT s -END - -/* - * BTREE-pgno -- Handles replacing a page number in the record - * refernece on pgno by indx. - */ -BEGIN pgno 149 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER lsn DB_LSN * lu -ARG indx u_int32_t lu -ARG opgno db_pgno_t lu -ARG npgno db_pgno_t lu -END diff --git a/storage/bdb/build_unix/.IGNORE_ME b/storage/bdb/build_unix/.IGNORE_ME deleted file mode 100644 index 558fd496f0c..00000000000 --- a/storage/bdb/build_unix/.IGNORE_ME +++ /dev/null @@ -1,3 +0,0 @@ -Some combinations of the gzip and tar archive exploders found -on Linux systems ignore directories that don't have any files -(other than symbolic links) in them. So, here's a file. diff --git a/storage/bdb/build_win32/Berkeley_DB.dsw b/storage/bdb/build_win32/Berkeley_DB.dsw deleted file mode 100644 index 91440869b83..00000000000 --- a/storage/bdb/build_win32/Berkeley_DB.dsw +++ /dev/null @@ -1,782 +0,0 @@ -Microsoft Developer Studio Workspace File, Format Version 6.00 -# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! - -############################################################################### - -Project: "build_all"=.\build_all.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_archive - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_checkpoint - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_deadlock - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_dump - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_hotbackup - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_load - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_printlog - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_recover - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_stat - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_upgrade - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_verify - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_static - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_access - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_btrec - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_env - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_lock - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_mpool - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_sequence - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_tpcb - End Project Dependency - Begin Project Dependency - Project_Dep_Name example_database_load - End Project Dependency - Begin Project Dependency - Project_Dep_Name example_database_read - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_access - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_btrec - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_env - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_lock - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_mpool - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_sequence - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_tpcb - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_example_database_load - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_example_database_read - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_repquote - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_txnguide - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_txnguide_inmem - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_txnguide - End Project Dependency - Begin Project Dependency - Project_Dep_Name excxx_txnguide_inmem - End Project Dependency -}}} - -############################################################################### - -Project: "db_archive"=.\db_archive.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_deadlock"=.\db_deadlock.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_dll"=.\db_dll.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ -}}} - -############################################################################### - -Project: "db_dump"=.\db_dump.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_hotbackup"=.\db_hotbackup.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_java"=.\db_java.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_load"=.\db_load.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_recover"=.\db_recover.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_small"=.\db_small.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ -}}} - -############################################################################### - -Project: "db_stat"=.\db_stat.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_static"=.\db_static.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ -}}} - -############################################################################### - -Project: "db_tcl"=.\db_tcl.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_test"=.\db_test.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name build_all - End Project Dependency - Begin Project Dependency - Project_Dep_Name db_tcl - End Project Dependency -}}} - -############################################################################### - -Project: "db_upgrade"=.\db_upgrade.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "db_verify"=.\db_verify.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_access"=.\ex_access.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_btrec"=.\ex_btrec.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_csvcode"=.\ex_csvcode.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_csvload"=.\ex_csvload.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_csvcode - End Project Dependency -}}} - -############################################################################### - -Project: "ex_csvquery"=.\ex_csvquery.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency - Begin Project Dependency - Project_Dep_Name ex_csvcode - End Project Dependency -}}} - -############################################################################### - -Project: "ex_env"=.\ex_env.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_lock"=.\ex_lock.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_mpool"=.\ex_mpool.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_repquote"=.\ex_repquote.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_sequence"=.\ex_sequence.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_tpcb"=.\ex_tpcb.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_txnguide"=.\ex_txnguide.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "ex_txnguide_inmem"=.\ex_txnguide_inmem.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "example_database_load"=.\example_database_load.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "example_database_read"=.\example_database_read.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_access"=.\excxx_access.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_btrec"=.\excxx_btrec.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_env"=.\excxx_env.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_example_database_load"=.\excxx_example_database_load.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_example_database_read"=.\excxx_example_database_read.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_lock"=.\excxx_lock.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_mpool"=.\excxx_mpool.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_sequence"=.\excxx_sequence.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_tpcb"=.\excxx_tpcb.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_txnguide"=.\excxx_txnguide.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Project: "excxx_txnguide_inmem"=.\excxx_txnguide_inmem.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name db_dll - End Project Dependency -}}} - -############################################################################### - -Global: - -Package=<5> -{{{ -}}} - -Package=<3> -{{{ -}}} - -############################################################################### - diff --git a/storage/bdb/build_win32/app_dsp.src b/storage/bdb/build_win32/app_dsp.src deleted file mode 100644 index e8644c71299..00000000000 --- a/storage/bdb/build_win32/app_dsp.src +++ /dev/null @@ -1,261 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 ASCII Debug" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 ASCII Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win64 Debug AMD64" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win64 Release AMD64" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win64 Debug IA64" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win64 Release IA64" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@bin_rel_dest@" -# PROP BASE Intermediate_Dir "Release/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@bin_rel_dest@" -# PROP Intermediate_Dir "Release/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@bin_debug_dest@" -# PROP BASE Intermediate_Dir "Debug/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@bin_debug_dest@" -# PROP Intermediate_Dir "Debug/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@bin_debug_dest@_ASCII" -# PROP BASE Intermediate_Dir "Debug_ASCII/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@bin_debug_dest@_ASCII" -# PROP Intermediate_Dir "Debug_ASCII/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@_ASCII" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@bin_rel_dest@_ASCII" -# PROP BASE Intermediate_Dir "Release_ASCII/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@bin_rel_dest@_ASCII" -# PROP Intermediate_Dir "Release_ASCII/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@_ASCII" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug AMD64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@bin_debug_dest@_AMD64" -# PROP BASE Intermediate_Dir "Debug_AMD64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@bin_debug_dest@_AMD64" -# PROP Intermediate_Dir "Debug_AMD64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:AMD64 /nodefaultlib:"libcmtd" /fixed:no -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:AMD64 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@_AMD64" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release AMD64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@bin_rel_dest@_AMD64" -# PROP BASE Intermediate_Dir "Release_AMD64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@bin_rel_dest@_AMD64" -# PROP Intermediate_Dir "Release_AMD64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:AMD64 /nodefaultlib:"libcmt" -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:AMD64 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@_AMD64" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug IA64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@bin_debug_dest@_IA64" -# PROP BASE Intermediate_Dir "Debug_IA64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@bin_debug_dest@_IA64" -# PROP Intermediate_Dir "Debug_IA64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@_IA64" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release IA64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@bin_rel_dest@_IA64" -# PROP BASE Intermediate_Dir "Release_IA64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@bin_rel_dest@_IA64" -# PROP Intermediate_Dir "Release_IA64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" -# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@_IA64" -@POST_BUILD@ - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -# Name "@project_name@ - Win32 ASCII Debug" -# Name "@project_name@ - Win32 ASCII Release" -# Name "@project_name@ - Win64 Debug AMD64" -# Name "@project_name@ - Win64 Release AMD64" -# Name "@project_name@ - Win64 Debug IA64" -# Name "@project_name@ - Win64 Release IA64" -@SOURCE_FILES@ -# Begin Source File - -SOURCE=..\clib\getopt.c -# End Source File -# End Target -# End Project diff --git a/storage/bdb/build_win32/db_java_xaj.mak b/storage/bdb/build_win32/db_java_xaj.mak deleted file mode 100644 index c2dbc920d17..00000000000 --- a/storage/bdb/build_win32/db_java_xaj.mak +++ /dev/null @@ -1,21 +0,0 @@ -JAVA_XADIR=../java/src/com/sleepycat/db/xa - -JAVA_XASRCS=\ - $(JAVA_XADIR)/DbXAResource.java \ - $(JAVA_XADIR)/DbXid.java - -Release/dbxa.jar : $(JAVA_XASRCS) - @echo compiling Berkeley DB XA classes - @javac -g -d ./Release/classes -classpath "$(CLASSPATH);./Release/classes" $(JAVA_XASRCS) - @echo creating jar file - @cd .\Release\classes - @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class - @echo Java XA build finished - -Debug/dbxa.jar : $(JAVA_XASRCS) - @echo compiling Berkeley DB XA classes - @javac -g -d ./Debug/classes -classpath "$(CLASSPATH);./Debug/classes" $(JAVA_XASRCS) - @echo creating jar file - @cd .\Debug\classes - @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class - @echo Java XA build finished diff --git a/storage/bdb/build_win32/db_test.src b/storage/bdb/build_win32/db_test.src deleted file mode 100644 index 38e7fcd9b40..00000000000 --- a/storage/bdb/build_win32/db_test.src +++ /dev/null @@ -1,97 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386 -# Begin Special Build Tool -SOURCE="$(InputPath)" -PostBuild_Desc=Copy built executable files. -PostBuild_Cmds=copy Release\*.exe . -# End Special Build Tool - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no -# Begin Special Build Tool -SOURCE="$(InputPath)" -PostBuild_Desc=Copy built executable files. -PostBuild_Cmds=copy Debug\*.exe . -# End Special Build Tool - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win32/dbkill.cpp b/storage/bdb/build_win32/dbkill.cpp deleted file mode 100644 index 7a7082188f6..00000000000 --- a/storage/bdb/build_win32/dbkill.cpp +++ /dev/null @@ -1,131 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: dbkill.cpp,v 12.1 2005/06/16 20:20:43 bostic Exp $ - */ -/* - * Kill - - * Simulate Unix kill on Windows/NT and Windows/9X. - * This good enough to support the Berkeley DB test suite, - * but may be missing some favorite features. - * - * Would have used MKS kill, but it didn't seem to work well - * on Win/9X. Cygnus kill works within the Gnu/Cygnus environment - * (where processes are given small pids, with presumably a translation - * table between small pids and actual process handles), but our test - * environment, via Tcl, does not use the Cygnus environment. - * - * Compile this and install it as c:/tools/kill.exe (or as indicated - * by build_win32/include.tcl ). - */ - -#include -#include -#include -#include - -/* - * Like atol, with specified base. Would use stdlib, but - * strtol("0xFFFF1234", NULL, 16) returns 0x7FFFFFFF and - * strtol("4294712487", NULL, 16) returns 0x7FFFFFFF w/ VC++ - */ -long -myatol(char *s, int base) -{ - long result = 0; - char ch; - int sign = 1; /* + */ - if (base == 0) - base = 10; - if (base != 10 && base != 16) - return LONG_MAX; - while ((ch = *s++) != '\0') { - if (ch == '-') { - sign = -sign; - } - else if (ch >= '0' && ch <= '9') { - result = result * base + (ch - '0'); - } - else if (ch == 'x' || ch == 'X') { - /* Allow leading 0x..., and switch to base 16 */ - base = 16; - } - else if (base == 16 && ch >= 'a' && ch <= 'f') { - result = result * base + (ch - 'a' + 10); - } - else if (base == 16 && ch >= 'A' && ch <= 'F') { - result = result * base + (ch - 'A' + 10); - } - else { - if (sign > 1) - return LONG_MAX; - else - return LONG_MIN; - } - } - return sign * result; -} - -void -usage_exit() -{ - fprintf(stderr, "Usage: kill [ -sig ] pid\n"); - fprintf(stderr, " for win32, sig must be or 0, 15 (TERM)\n"); - exit(EXIT_FAILURE); -} - -int -main(int argc, char **argv) -{ - HANDLE hProcess ; - DWORD accessflag; - long pid; - int sig = 15; - - if (argc > 2) { - if (argv[1][0] != '-') - usage_exit(); - - if (strcmp(argv[1], "-TERM") == 0) - sig = 15; - else { - /* currently sig is more or less ignored, - * we only care if it is zero or not - */ - sig = atoi(&argv[1][1]); - if (sig < 0) - usage_exit(); - } - argc--; - argv++; - } - if (argc < 2) - usage_exit(); - - pid = myatol(argv[1], 10); - /*printf("pid = %ld (0x%lx) (command line %s)\n", pid, pid, argv[1]);*/ - if (pid == LONG_MAX || pid == LONG_MIN) - usage_exit(); - - if (sig == 0) - accessflag = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ; - else - accessflag = STANDARD_RIGHTS_REQUIRED | PROCESS_TERMINATE; - hProcess = OpenProcess(accessflag, FALSE, pid); - if (hProcess == NULL) { - fprintf(stderr, "dbkill: %s: no such process\n", argv[1]); - exit(EXIT_FAILURE); - } - if (sig == 0) - exit(EXIT_SUCCESS); - if (!TerminateProcess(hProcess, 99)) { - DWORD err = GetLastError(); - fprintf(stderr, - "dbkill: cannot kill process: error %d (0x%lx)\n", err, err); - exit(EXIT_FAILURE); - } - return EXIT_SUCCESS; -} diff --git a/storage/bdb/build_win32/dllmain.c b/storage/bdb/build_win32/dllmain.c deleted file mode 100644 index 70c2e849d66..00000000000 --- a/storage/bdb/build_win32/dllmain.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * -------------------------------------------------------------------------- - * Copyright (C) 1997 Netscape Communications Corporation - * -------------------------------------------------------------------------- - * - * dllmain.c - * - * $Id: dllmain.c,v 1.3 2000/10/26 21:58:48 bostic Exp $ - */ - -#define WIN32_LEAN_AND_MEAN -#include - -static int ProcessesAttached = 0; -static HINSTANCE Instance; /* Global library instance handle. */ - -/* - * The following declaration is for the VC++ DLL entry point. - */ - -BOOL APIENTRY DllMain (HINSTANCE hInst, - DWORD reason, LPVOID reserved); - -/* - *---------------------------------------------------------------------- - * - * DllEntryPoint -- - * - * This wrapper function is used by Borland to invoke the - * initialization code for Tcl. It simply calls the DllMain - * routine. - * - * Results: - * See DllMain. - * - * Side effects: - * See DllMain. - * - *---------------------------------------------------------------------- - */ - -BOOL APIENTRY -DllEntryPoint(hInst, reason, reserved) - HINSTANCE hInst; /* Library instance handle. */ - DWORD reason; /* Reason this function is being called. */ - LPVOID reserved; /* Not used. */ -{ - return DllMain(hInst, reason, reserved); -} - -/* - *---------------------------------------------------------------------- - * - * DllMain -- - * - * This routine is called by the VC++ C run time library init - * code, or the DllEntryPoint routine. It is responsible for - * initializing various dynamically loaded libraries. - * - * Results: - * TRUE on sucess, FALSE on failure. - * - * Side effects: - * Establishes 32-to-16 bit thunk and initializes sockets library. - * - *---------------------------------------------------------------------- - */ -BOOL APIENTRY -DllMain(hInst, reason, reserved) - HINSTANCE hInst; /* Library instance handle. */ - DWORD reason; /* Reason this function is being called. */ - LPVOID reserved; /* Not used. */ -{ - switch (reason) { - case DLL_PROCESS_ATTACH: - - /* - * Registration of UT need to be done only once for first - * attaching process. At that time set the tclWin32s flag - * to indicate if the DLL is executing under Win32s or not. - */ - - if (ProcessesAttached++) { - return FALSE; /* Not the first initialization. */ - } - - Instance = hInst; - return TRUE; - - case DLL_PROCESS_DETACH: - - ProcessesAttached--; - break; - } - - return TRUE; -} diff --git a/storage/bdb/build_win32/dynamic_dsp.src b/storage/bdb/build_win32/dynamic_dsp.src deleted file mode 100644 index 641ef221369..00000000000 --- a/storage/bdb/build_win32/dynamic_dsp.src +++ /dev/null @@ -1,281 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 ASCII Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 ASCII Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win64 Debug AMD64" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win64 Release AMD64" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win64 Debug IA64" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win64 Release IA64" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@" -# PROP BASE Intermediate_Dir "Release/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@" -# PROP Intermediate_Dir "Release/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"@bin_rel_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@" -# PROP BASE Intermediate_Dir "Debug/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@" -# PROP Intermediate_Dir "Debug/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /pdb:none /debug /machine:I386 /out:"@bin_debug_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@_ASCII" -# PROP BASE Intermediate_Dir "Debug_ASCII/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@_ASCII" -# PROP Intermediate_Dir "Debug_ASCII/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c -# SUBTRACT BASE CPP /Fr -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"@bin_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no -# ADD LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /pdb:none /debug /machine:I386 /out:"@bin_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@_ASCII" -# PROP BASE Intermediate_Dir "Release_ASCII/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@_ASCII" -# PROP Intermediate_Dir "Release_ASCII/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"@bin_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" -# ADD LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"@bin_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug AMD64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@_AMD64" -# PROP BASE Intermediate_Dir "Debug_AMD64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@_AMD64" -# PROP Intermediate_Dir "Debug_AMD64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# SUBTRACT BASE CPP /Fr -# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /Wp64 /c -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:AMD64 /out:"@bin_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no -# ADD LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /debug /machine:AMD64 /out:"@bin_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release AMD64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@_AMD64" -# PROP BASE Intermediate_Dir "Release_AMD64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@_AMD64" -# PROP Intermediate_Dir "Release_AMD64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:AMD64 /out:"@bin_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" -# ADD LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:AMD64 /out:"@bin_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug IA64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@_IA64" -# PROP BASE Intermediate_Dir "Debug_IA64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@_IA64" -# PROP Intermediate_Dir "Debug_IA64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# SUBTRACT BASE CPP /Fr -# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"@bin_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no -# ADD LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /debug /machine:IA64 /out:"@bin_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release IA64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@_IA64" -# PROP BASE Intermediate_Dir "Release_IA64/@project_name@" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@_IA64" -# PROP Intermediate_Dir "Release_IA64/@project_name@" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"@bin_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" -# ADD LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"@bin_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" -@POST_BUILD@ - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -# Name "@project_name@ - Win32 ASCII Debug" -# Name "@project_name@ - Win32 ASCII Release" -# Name "@project_name@ - Win64 Debug AMD64" -# Name "@project_name@ - Win64 Release AMD64" -# Name "@project_name@ - Win64 Debug IA64" -# Name "@project_name@ - Win64 Release IA64" -@SOURCE_FILES@ - -# End Target -# End Project diff --git a/storage/bdb/build_win32/java_dsp.src b/storage/bdb/build_win32/java_dsp.src deleted file mode 100644 index c06cb2b3544..00000000000 --- a/storage/bdb/build_win32/java_dsp.src +++ /dev/null @@ -1,129 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" -# Begin Custom Build - Compiling java files using javac -ProjDir=. -InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll -SOURCE="$(InputPath)" - -"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" - echo compiling Berkeley DB classes - mkdir "$(OUTDIR)\classes" - javac -O -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java - echo compiling examples - mkdir "$(OUTDIR)\classes.ex" - javac -O -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java - echo creating jar files - jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . - jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . - echo Java build finished - -# End Custom Build - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 2 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no -# Begin Custom Build - Compiling java files using javac -ProjDir=. -InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll -SOURCE="$(InputPath)" - -"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" - echo compiling Berkeley DB classes - mkdir "$(OUTDIR)\classes" - javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java - echo compiling examples - mkdir "$(OUTDIR)\classes.ex" - javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java - echo creating jar files - jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . - jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . - echo Java build finished - -# End Custom Build - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win32/libdb_tcl.def b/storage/bdb/build_win32/libdb_tcl.def deleted file mode 100644 index 01a89e44dff..00000000000 --- a/storage/bdb/build_win32/libdb_tcl.def +++ /dev/null @@ -1,7 +0,0 @@ -; $Id: libdb_tcl.def,v 12.0 2004/11/17 03:48:15 bostic Exp $ - -DESCRIPTION 'Berkeley DB TCL interface Library' -EXPORTS - Db_tcl_Init - _NameToPtr - diff --git a/storage/bdb/build_win32/libdbrc.src b/storage/bdb/build_win32/libdbrc.src deleted file mode 100644 index ec5ba9b3c6c..00000000000 --- a/storage/bdb/build_win32/libdbrc.src +++ /dev/null @@ -1,33 +0,0 @@ -1 VERSIONINFO - FILEVERSION %MAJOR%,0,%MINOR%,%PATCH% - PRODUCTVERSION %MAJOR%,0,%MINOR%,%PATCH% - FILEFLAGSMASK 0x3fL -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x4L - FILETYPE 0x2L - FILESUBTYPE 0x0L - -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904b0" - BEGIN - VALUE "CompanyName", "Sleepycat Software\0" - VALUE "FileDescription", "Berkeley DB %MAJOR%.%MINOR% DLL\0" - VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0" - VALUE "InternalName", "libdb%MAJOR%%MINOR%.dll\0" - VALUE "LegalCopyright", "Copyright Sleepycat Software Inc. 1997-2005\0" - VALUE "OriginalFilename", "libdb%MAJOR%%MINOR%.dll\0" - VALUE "ProductName", "Sleepycat Software libdb\0" - VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x409, 1200 - END -END diff --git a/storage/bdb/build_win32/small_dsp.src b/storage/bdb/build_win32/small_dsp.src deleted file mode 100644 index a54160f6b2a..00000000000 --- a/storage/bdb/build_win32/small_dsp.src +++ /dev/null @@ -1,85 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Static Library" 0x0104 - -CFG=@project_name@ - Win32 Debug Static -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release_small" -# PROP BASE Intermediate_Dir "Release_small" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_small" -# PROP Intermediate_Dir "Release_small" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Release_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"Release_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" - -# PROP BASE Use_MFC 1 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug_small" -# PROP BASE Intermediate_Dir "Debug_small" -# PROP BASE Target_Dir "" -# PROP Use_MFC 1 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_small" -# PROP Intermediate_Dir "Debug_small" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Debug_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"Debug_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win32/srcfile_dsp.src b/storage/bdb/build_win32/srcfile_dsp.src deleted file mode 100644 index 572350e6356..00000000000 --- a/storage/bdb/build_win32/srcfile_dsp.src +++ /dev/null @@ -1,4 +0,0 @@ -# Begin Source File - -SOURCE=@srcdir@\@srcfile@ -# End Source File diff --git a/storage/bdb/build_win32/static_dsp.src b/storage/bdb/build_win32/static_dsp.src deleted file mode 100644 index 84c3d298792..00000000000 --- a/storage/bdb/build_win32/static_dsp.src +++ /dev/null @@ -1,235 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Static Library" 0x0104 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 ASCII Release" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 ASCII Debug" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win64 Debug AMD64" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win64 Release AMD64" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win64 Debug IA64" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win64 Release IA64" (based on "Win32 (x86) Static Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@" -# PROP BASE Intermediate_Dir "Release/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@" -# PROP Intermediate_Dir "Release/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"@lib_rel_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@" -# PROP BASE Intermediate_Dir "Debug/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@" -# PROP Intermediate_Dir "Debug/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"@lib_debug_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@_ASCII" -# PROP BASE Intermediate_Dir "Release_ASCII/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@_ASCII" -# PROP Intermediate_Dir "Release_ASCII/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"@lib_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@_ASCII" -# PROP BASE Intermediate_Dir "Debug_ASCII/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@_ASCII" -# PROP Intermediate_Dir "Debug_ASCII/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"@lib_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug AMD64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@_AMD64" -# PROP BASE Intermediate_Dir "Debug/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@_AMD64" -# PROP Intermediate_Dir "Debug_AMD64/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /Wp64 /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"@lib_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release AMD64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@_AMD64" -# PROP BASE Intermediate_Dir "Release_AMD64/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@_AMD64" -# PROP Intermediate_Dir "Release_AMD64/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"@lib_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug IA64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "@lib_debug_dest@_IA64" -# PROP BASE Intermediate_Dir "Debug_IA64/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "@lib_debug_dest@_IA64" -# PROP Intermediate_Dir "Debug_IA64/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"@lib_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release IA64" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "@lib_rel_dest@_IA64" -# PROP BASE Intermediate_Dir "Release_IA64/@project_name@" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "@lib_rel_dest@_IA64" -# PROP Intermediate_Dir "Release_IA64/@project_name@" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"@lib_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -# Name "@project_name@ - Win32 ASCII Release" -# Name "@project_name@ - Win32 ASCII Debug" -# Name "@project_name@ - Win64 Debug AMD64" -# Name "@project_name@ - Win64 Release AMD64" -# Name "@project_name@ - Win64 Debug IA64" -# Name "@project_name@ - Win64 Release IA64" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win32/tcl_dsp.src b/storage/bdb/build_win32/tcl_dsp.src deleted file mode 100644 index fc7b2177e00..00000000000 --- a/storage/bdb/build_win32/tcl_dsp.src +++ /dev/null @@ -1,93 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl84.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 2 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl84g.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win64/app_dsp.src b/storage/bdb/build_win64/app_dsp.src deleted file mode 100644 index 4218e010b8a..00000000000 --- a/storage/bdb/build_win64/app_dsp.src +++ /dev/null @@ -1,145 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=@project_name@ - Win32 Debug Static -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_static" -# PROP Intermediate_Dir "Release_static" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib /nologo /subsystem:console /machine:IA64 - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_static" -# PROP Intermediate_Dir "Debug_static" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no -# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" -@SOURCE_FILES@ -# Begin Source File - -SOURCE=..\clib\getopt.c -# End Source File -# End Target -# End Project diff --git a/storage/bdb/build_win64/db_test.src b/storage/bdb/build_win64/db_test.src deleted file mode 100644 index 2628128bbcf..00000000000 --- a/storage/bdb/build_win64/db_test.src +++ /dev/null @@ -1,97 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:IA64 -# Begin Special Build Tool -SOURCE="$(InputPath)" -PostBuild_Desc=Copy built executable files. -PostBuild_Cmds=copy Release\*.exe . -# End Special Build Tool - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /out:"Debug/dbkill.exe" /fixed:no -# Begin Special Build Tool -SOURCE="$(InputPath)" -PostBuild_Desc=Copy built executable files. -PostBuild_Cmds=copy Debug\*.exe . -# End Special Build Tool - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win64/dynamic_dsp.src b/storage/bdb/build_win64/dynamic_dsp.src deleted file mode 100644 index 176ca0fb5ae..00000000000 --- a/storage/bdb/build_win64/dynamic_dsp.src +++ /dev/null @@ -1,93 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 -# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 2 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win64/ex_repquote.src b/storage/bdb/build_win64/ex_repquote.src deleted file mode 100644 index 02352f8216b..00000000000 --- a/storage/bdb/build_win64/ex_repquote.src +++ /dev/null @@ -1,145 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=@project_name@ - Win32 Debug Static -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib ws2_32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_static" -# PROP Intermediate_Dir "Release_static" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib ws2_32.lib /nologo /subsystem:console /machine:IA64 - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Ignore_Export_Lib 0 -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_static" -# PROP Intermediate_Dir "Debug_static" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no -# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib ws2_32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:console /debug /machine:IA64 /fixed:no - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" -@SOURCE_FILES@ -# Begin Source File - -SOURCE=..\clib\getopt.c -# End Source File -# End Target -# End Project diff --git a/storage/bdb/build_win64/java_dsp.src b/storage/bdb/build_win64/java_dsp.src deleted file mode 100644 index 954dad7d80c..00000000000 --- a/storage/bdb/build_win64/java_dsp.src +++ /dev/null @@ -1,129 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" -# Begin Custom Build - Compiling java files using javac -ProjDir=. -InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll -SOURCE="$(InputPath)" - -"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" - echo compiling Berkeley DB classes - mkdir "$(OUTDIR)\classes" - javac -O -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java - echo compiling examples - mkdir "$(OUTDIR)\classes.ex" - javac -O -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java - echo creating jar files - jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . - jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . - echo Java build finished - -# End Custom Build - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 2 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no -# Begin Custom Build - Compiling java files using javac -ProjDir=. -InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll -SOURCE="$(InputPath)" - -"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)" - echo compiling Berkeley DB classes - mkdir "$(OUTDIR)\classes" - javac -g -d "$(OUTDIR)\classes" -classpath "$(OUTDIR)/classes" ..\java\src\com\sleepycat\db\*.java ..\java\src\com\sleepycat\db\internal\*.java ..\java\src\com\sleepycat\bind\*.java ..\java\src\com\sleepycat\bind\serial\*.java ..\java\src\com\sleepycat\bind\tuple\*.java ..\java\src\com\sleepycat\collections\*.java ..\java\src\com\sleepycat\compat\*.java ..\java\src\com\sleepycat\util\*.java - echo compiling examples - mkdir "$(OUTDIR)\classes.ex" - javac -g -d "$(OUTDIR)\classes.ex" -classpath "$(OUTDIR)\classes;$(OUTDIR)\classes.ex" ..\examples_java\src\com\sleepycat\examples\db\*.java ..\examples_java\src\com\sleepycat\examples\db\GettingStarted\*.java ..\examples_java\src\com\sleepycat\examples\collections\access\*.java ..\examples_java\src\com\sleepycat\examples\collections\hello\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\basic\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\entity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\tuple\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\sentity\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\marshal\*.java ..\examples_java\src\com\sleepycat\examples\collections\ship\factory\*.java - echo creating jar files - jar cf "$(OUTDIR)\db.jar" -C "$(OUTDIR)\classes" . - jar cf "$(OUTDIR)\dbexamples.jar" -C "$(OUTDIR)\classes.ex" . - echo Java build finished - -# End Custom Build - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win64/libdbrc.src b/storage/bdb/build_win64/libdbrc.src deleted file mode 100644 index 4c644ea9f4f..00000000000 --- a/storage/bdb/build_win64/libdbrc.src +++ /dev/null @@ -1,33 +0,0 @@ -1 VERSIONINFO - FILEVERSION %MAJOR%,0,%MINOR%,%PATCH% - PRODUCTVERSION %MAJOR%,0,%MINOR%,%PATCH% - FILEFLAGSMASK 0x3fL -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x4L - FILETYPE 0x2L - FILESUBTYPE 0x0L - -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904b0" - BEGIN - VALUE "CompanyName", "Sleepycat Software\0" - VALUE "FileDescription", "Berkeley DB 3.0 DLL\0" - VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0" - VALUE "InternalName", "libdb.dll\0" - VALUE "LegalCopyright", "Copyright Sleepycat Software Inc. 1997-2004\0" - VALUE "OriginalFilename", "libdb.dll\0" - VALUE "ProductName", "Sleepycat Software libdb\0" - VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x409, 1200 - END -END diff --git a/storage/bdb/build_win64/small_dsp.src b/storage/bdb/build_win64/small_dsp.src deleted file mode 100644 index a54160f6b2a..00000000000 --- a/storage/bdb/build_win64/small_dsp.src +++ /dev/null @@ -1,85 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Static Library" 0x0104 - -CFG=@project_name@ - Win32 Debug Static -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release_small" -# PROP BASE Intermediate_Dir "Release_small" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_small" -# PROP Intermediate_Dir "Release_small" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Release_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"Release_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" - -# PROP BASE Use_MFC 1 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug_small" -# PROP BASE Intermediate_Dir "Debug_small" -# PROP BASE Target_Dir "" -# PROP Use_MFC 1 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_small" -# PROP Intermediate_Dir "Debug_small" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "HAVE_SMALLBUILD" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Debug_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"Debug_small/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win64/srcfile_dsp.src b/storage/bdb/build_win64/srcfile_dsp.src deleted file mode 100644 index 572350e6356..00000000000 --- a/storage/bdb/build_win64/srcfile_dsp.src +++ /dev/null @@ -1,4 +0,0 @@ -# Begin Source File - -SOURCE=@srcdir@\@srcfile@ -# End Source File diff --git a/storage/bdb/build_win64/static_dsp.src b/storage/bdb/build_win64/static_dsp.src deleted file mode 100644 index 411e8df8d07..00000000000 --- a/storage/bdb/build_win64/static_dsp.src +++ /dev/null @@ -1,85 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Static Library" 0x0104 - -CFG=@project_name@ - Win32 Debug Static -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release Static" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release_static" -# PROP BASE Intermediate_Dir "Release_static" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_static" -# PROP Intermediate_Dir "Release_static" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" - -# PROP BASE Use_MFC 1 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug_static" -# PROP BASE Intermediate_Dir "Debug_static" -# PROP BASE Target_Dir "" -# PROP Use_MFC 1 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_static" -# PROP Intermediate_Dir "Debug_static" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c -# ADD BASE RSC /l 0xc09 -# ADD RSC /l 0xc09 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/build_win64/tcl_dsp.src b/storage/bdb/build_win64/tcl_dsp.src deleted file mode 100644 index 57bb6f21303..00000000000 --- a/storage/bdb/build_win64/tcl_dsp.src +++ /dev/null @@ -1,93 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 - -CFG=@project_name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "@project_name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /machine:IA64 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl84.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" - -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 2 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c -# SUBTRACT CPP /Fr -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib /nologo /subsystem:windows /dll /debug /machine:IA64 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl84g.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no - -!ENDIF - -# Begin Target - -# Name "@project_name@ - Win32 Release" -# Name "@project_name@ - Win32 Debug" -@SOURCE_FILES@ -# End Target -# End Project diff --git a/storage/bdb/clib/getcwd.c b/storage/bdb/clib/getcwd.c deleted file mode 100644 index 367950640c9..00000000000 --- a/storage/bdb/clib/getcwd.c +++ /dev/null @@ -1,270 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1989, 1991, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: getcwd.c,v 12.1 2005/06/16 20:20:48 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include - -#if HAVE_DIRENT_H -# include -# define NAMLEN(dirent) strlen((dirent)->d_name) -#else -# define dirent direct -# define NAMLEN(dirent) (dirent)->d_namlen -# if HAVE_SYS_NDIR_H -# include -# endif -# if HAVE_SYS_DIR_H -# include -# endif -# if HAVE_NDIR_H -# include -# endif -#endif - -#include -#include -#include -#include -#endif - -#include "db_int.h" - -#define ISDOT(dp) \ - (dp->d_name[0] == '.' && (dp->d_name[1] == '\0' || \ - (dp->d_name[1] == '.' && dp->d_name[2] == '\0'))) - -#ifndef dirfd -#define dirfd(dirp) ((dirp)->dd_fd) -#endif - -/* - * getcwd -- - * Get the current working directory. - * - * PUBLIC: #ifndef HAVE_GETCWD - * PUBLIC: char *getcwd __P((char *, size_t)); - * PUBLIC: #endif - */ -char * -getcwd(pt, size) - char *pt; - size_t size; -{ - register struct dirent *dp; - register DIR *dir; - register dev_t dev; - register ino_t ino; - register int first; - register char *bpt, *bup; - struct stat s; - dev_t root_dev; - ino_t root_ino; - size_t ptsize, upsize; - int ret, save_errno; - char *ept, *eup, *up; - - /* - * If no buffer specified by the user, allocate one as necessary. - * If a buffer is specified, the size has to be non-zero. The path - * is built from the end of the buffer backwards. - */ - if (pt) { - ptsize = 0; - if (!size) { - __os_set_errno(EINVAL); - return (NULL); - } - if (size == 1) { - __os_set_errno(ERANGE); - return (NULL); - } - ept = pt + size; - } else { - if ((ret = - __os_malloc(NULL, ptsize = 1024 - 4, &pt)) != 0) { - __os_set_errno(ret); - return (NULL); - } - ept = pt + ptsize; - } - bpt = ept - 1; - *bpt = '\0'; - - /* - * Allocate bytes (1024 - malloc space) for the string of "../"'s. - * Should always be enough (it's 340 levels). If it's not, allocate - * as necessary. Special case the first stat, it's ".", not "..". - */ - if ((ret = __os_malloc(NULL, upsize = 1024 - 4, &up)) != 0) - goto err; - eup = up + 1024; - bup = up; - up[0] = '.'; - up[1] = '\0'; - - /* Save root values, so know when to stop. */ - if (stat("/", &s)) - goto err; - root_dev = s.st_dev; - root_ino = s.st_ino; - - __os_set_errno(0); /* XXX readdir has no error return. */ - - for (first = 1;; first = 0) { - /* Stat the current level. */ - if (lstat(up, &s)) - goto err; - - /* Save current node values. */ - ino = s.st_ino; - dev = s.st_dev; - - /* Check for reaching root. */ - if (root_dev == dev && root_ino == ino) { - *--bpt = PATH_SEPARATOR[0]; - /* - * It's unclear that it's a requirement to copy the - * path to the beginning of the buffer, but it's always - * been that way and stuff would probably break. - */ - bcopy(bpt, pt, ept - bpt); - __os_free(NULL, up); - return (pt); - } - - /* - * Build pointer to the parent directory, allocating memory - * as necessary. Max length is 3 for "../", the largest - * possible component name, plus a trailing NULL. - */ - if (bup + 3 + MAXNAMLEN + 1 >= eup) { - if (__os_realloc(NULL, upsize *= 2, &up) != 0) - goto err; - bup = up; - eup = up + upsize; - } - *bup++ = '.'; - *bup++ = '.'; - *bup = '\0'; - - /* Open and stat parent directory. */ - if (!(dir = opendir(up)) || fstat(dirfd(dir), &s)) - goto err; - - /* Add trailing slash for next directory. */ - *bup++ = PATH_SEPARATOR[0]; - - /* - * If it's a mount point, have to stat each element because - * the inode number in the directory is for the entry in the - * parent directory, not the inode number of the mounted file. - */ - save_errno = 0; - if (s.st_dev == dev) { - for (;;) { - if (!(dp = readdir(dir))) - goto notfound; - if (dp->d_fileno == ino) - break; - } - } else - for (;;) { - if (!(dp = readdir(dir))) - goto notfound; - if (ISDOT(dp)) - continue; - bcopy(dp->d_name, bup, dp->d_namlen + 1); - - /* Save the first error for later. */ - if (lstat(up, &s)) { - if (save_errno == 0) - save_errno = __os_get_errno(); - __os_set_errno(0); - continue; - } - if (s.st_dev == dev && s.st_ino == ino) - break; - } - - /* - * Check for length of the current name, preceding slash, - * leading slash. - */ - if (bpt - pt < dp->d_namlen + (first ? 1 : 2)) { - size_t len, off; - - if (!ptsize) { - __os_set_errno(ERANGE); - goto err; - } - off = bpt - pt; - len = ept - bpt; - if (__os_realloc(NULL, ptsize *= 2, &pt) != 0) - goto err; - bpt = pt + off; - ept = pt + ptsize; - bcopy(bpt, ept - len, len); - bpt = ept - len; - } - if (!first) - *--bpt = PATH_SEPARATOR[0]; - bpt -= dp->d_namlen; - bcopy(dp->d_name, bpt, dp->d_namlen); - (void)closedir(dir); - - /* Truncate any file name. */ - *bup = '\0'; - } - -notfound: - /* - * If readdir set errno, use it, not any saved error; otherwise, - * didn't find the current directory in its parent directory, set - * errno to ENOENT. - */ - if (__os_get_errno_ret_zero() == 0) - __os_set_errno(save_errno == 0 ? ENOENT : save_errno); - /* FALLTHROUGH */ -err: - if (ptsize) - __os_free(NULL, pt); - __os_free(NULL, up); - return (NULL); -} diff --git a/storage/bdb/clib/getopt.c b/storage/bdb/clib/getopt.c deleted file mode 100644 index 54bfed16362..00000000000 --- a/storage/bdb/clib/getopt.c +++ /dev/null @@ -1,152 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1987, 1993, 1994 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: getopt.c,v 12.1 2005/06/16 20:20:48 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#include -#endif - -#include "db_int.h" - -int __db_getopt_reset; /* global reset for VxWorks. */ - -int opterr = 1, /* if error message should be printed */ - optind = 1, /* index into parent argv vector */ - optopt, /* character checked for validity */ - optreset; /* reset getopt */ -char *optarg; /* argument associated with option */ - -#undef BADCH -#define BADCH (int)'?' -#undef BADARG -#define BADARG (int)':' -#undef EMSG -#define EMSG "" - -/* - * getopt -- - * Parse argc/argv argument vector. - * - * PUBLIC: #ifndef HAVE_GETOPT - * PUBLIC: int getopt __P((int, char * const *, const char *)); - * PUBLIC: #endif - */ -int -getopt(nargc, nargv, ostr) - int nargc; - char * const *nargv; - const char *ostr; -{ - static char *progname; - static char *place = EMSG; /* option letter processing */ - char *oli; /* option letter list index */ - - /* - * VxWorks needs to be able to repeatedly call getopt from multiple - * programs within its global name space. - */ - if (__db_getopt_reset) { - __db_getopt_reset = 0; - - opterr = optind = 1; - optopt = optreset = 0; - optarg = NULL; - progname = NULL; - place = EMSG; - } - if (!progname) { - if ((progname = __db_rpath(*nargv)) == NULL) - progname = *nargv; - else - ++progname; - } - - if (optreset || !*place) { /* update scanning pointer */ - optreset = 0; - if (optind >= nargc || *(place = nargv[optind]) != '-') { - place = EMSG; - return (EOF); - } - if (place[1] && *++place == '-') { /* found "--" */ - ++optind; - place = EMSG; - return (EOF); - } - } /* option letter okay? */ - if ((optopt = (int)*place++) == (int)':' || - !(oli = strchr(ostr, optopt))) { - /* - * if the user didn't specify '-' as an option, - * assume it means EOF. - */ - if (optopt == (int)'-') - return (EOF); - if (!*place) - ++optind; - if (opterr && *ostr != ':') - (void)fprintf(stderr, - "%s: illegal option -- %c\n", progname, optopt); - return (BADCH); - } - if (*++oli != ':') { /* don't need argument */ - optarg = NULL; - if (!*place) - ++optind; - } - else { /* need an argument */ - if (*place) /* no white space */ - optarg = place; - else if (nargc <= ++optind) { /* no arg */ - place = EMSG; - if (*ostr == ':') - return (BADARG); - if (opterr) - (void)fprintf(stderr, - "%s: option requires an argument -- %c\n", - progname, optopt); - return (BADCH); - } - else /* white space */ - optarg = nargv[optind]; - place = EMSG; - ++optind; - } - return (optopt); /* dump back option letter */ -} diff --git a/storage/bdb/clib/memcmp.c b/storage/bdb/clib/memcmp.c deleted file mode 100644 index e7400c1c40b..00000000000 --- a/storage/bdb/clib/memcmp.c +++ /dev/null @@ -1,65 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: memcmp.c,v 12.1 2005/06/16 20:20:48 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -/* - * memcmp -- - * - * PUBLIC: #ifndef HAVE_MEMCMP - * PUBLIC: int memcmp __P((const void *, const void *, size_t)); - * PUBLIC: #endif - */ -int -memcmp(s1, s2, n) - char *s1, *s2; - size_t n; -{ - if (n != 0) { - unsigned char *p1 = (unsigned char *)s1, - *p2 = (unsigned char *)s2; - do { - if (*p1++ != *p2++) - return (*--p1 - *--p2); - } while (--n != 0); - } - return (0); -} diff --git a/storage/bdb/clib/memmove.c b/storage/bdb/clib/memmove.c deleted file mode 100644 index d2a505b1f35..00000000000 --- a/storage/bdb/clib/memmove.c +++ /dev/null @@ -1,153 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: memmove.c,v 12.1 2005/06/16 20:20:49 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -/* - * sizeof(word) MUST BE A POWER OF TWO - * SO THAT wmask BELOW IS ALL ONES - */ -typedef int word; /* "word" used for optimal copy speed */ - -#undef wsize -#define wsize sizeof(word) -#undef wmask -#define wmask (wsize - 1) - -/* - * Copy a block of memory, handling overlap. - * This is the routine that actually implements - * (the portable versions of) bcopy, memcpy, and memmove. - */ -#ifdef MEMCOPY -/* - * PUBLIC: #ifndef HAVE_MEMCPY - * PUBLIC: void *memcpy __P((void *, const void *, size_t)); - * PUBLIC: #endif - */ -void * -memcpy(dst0, src0, length) -#else -#ifdef MEMMOVE -/* - * PUBLIC: #ifndef HAVE_MEMMOVE - * PUBLIC: void *memmove __P((void *, const void *, size_t)); - * PUBLIC: #endif - */ -void * -memmove(dst0, src0, length) -#else -void -bcopy(src0, dst0, length) -#endif -#endif - void *dst0; - const void *src0; - register size_t length; -{ - register char *dst = dst0; - register const char *src = src0; - register size_t t; - - if (length == 0 || dst == src) /* nothing to do */ - goto done; - - /* - * Macros: loop-t-times; and loop-t-times, t>0 - */ -#undef TLOOP -#define TLOOP(s) if (t) TLOOP1(s) -#undef TLOOP1 -#define TLOOP1(s) do { s; } while (--t) - - if ((unsigned long)dst < (unsigned long)src) { - /* - * Copy forward. - */ - t = (int)src; /* only need low bits */ - if ((t | (int)dst) & wmask) { - /* - * Try to align operands. This cannot be done - * unless the low bits match. - */ - if ((t ^ (int)dst) & wmask || length < wsize) - t = length; - else - t = wsize - (t & wmask); - length -= t; - TLOOP1(*dst++ = *src++); - } - /* - * Copy whole words, then mop up any trailing bytes. - */ - t = length / wsize; - TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize); - t = length & wmask; - TLOOP(*dst++ = *src++); - } else { - /* - * Copy backwards. Otherwise essentially the same. - * Alignment works as before, except that it takes - * (t&wmask) bytes to align, not wsize-(t&wmask). - */ - src += length; - dst += length; - t = (int)src; - if ((t | (int)dst) & wmask) { - if ((t ^ (int)dst) & wmask || length <= wsize) - t = length; - else - t &= wmask; - length -= t; - TLOOP1(*--dst = *--src); - } - t = length / wsize; - TLOOP(src -= wsize; dst -= wsize; *(word *)dst = *(word *)src); - t = length & wmask; - TLOOP(*--dst = *--src); - } -done: -#if defined(MEMCOPY) || defined(MEMMOVE) - return (dst0); -#else - return; -#endif -} diff --git a/storage/bdb/clib/raise.c b/storage/bdb/clib/raise.c deleted file mode 100644 index 043a2007162..00000000000 --- a/storage/bdb/clib/raise.c +++ /dev/null @@ -1,30 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: raise.c,v 12.2 2005/06/16 20:20:50 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -/* - * raise -- - * Send a signal to the current process. - * - * PUBLIC: #ifndef HAVE_RAISE - * PUBLIC: int raise __P((int)); - * PUBLIC: #endif - */ -int -raise(s) - int s; -{ - return (kill(getpid(), s)); -} diff --git a/storage/bdb/clib/snprintf.c b/storage/bdb/clib/snprintf.c deleted file mode 100644 index 4fa4540b9d2..00000000000 --- a/storage/bdb/clib/snprintf.c +++ /dev/null @@ -1,159 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: snprintf.c,v 12.1 2005/06/16 20:20:50 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include /* Declare STDERR_FILENO. */ -#endif - -#include "db_int.h" - -#if !defined(HAVE_SNPRINTF) || !defined(HAVE_VSNPRINTF) -static void sprintf_overflow __P((void)); -static int sprintf_retcharpnt __P((void)); -#endif - -/* - * snprintf -- - * Bounded version of sprintf. - * - * PUBLIC: #ifndef HAVE_SNPRINTF - * PUBLIC: int snprintf __P((char *, size_t, const char *, ...)); - * PUBLIC: #endif - */ -#ifndef HAVE_SNPRINTF -int -#ifdef STDC_HEADERS -snprintf(char *str, size_t n, const char *fmt, ...) -#else -snprintf(str, n, fmt, va_alist) - char *str; - size_t n; - const char *fmt; - va_dcl -#endif -{ - static int ret_charpnt = -1; - va_list ap; - size_t len; - - if (ret_charpnt == -1) - ret_charpnt = sprintf_retcharpnt(); - -#ifdef STDC_HEADERS - va_start(ap, fmt); -#else - va_start(ap); -#endif - len = (size_t)vsprintf(str, fmt, ap); - if (ret_charpnt) - len = strlen(str); - - va_end(ap); - - if (len >= n) { - sprintf_overflow(); - /* NOTREACHED */ - } - return ((int)len); -} -#endif - -/* - * vsnprintf -- - * Bounded version of vsprintf. - * - * PUBLIC: #ifndef HAVE_VSNPRINTF - * PUBLIC: int vsnprintf __P((char *, size_t, const char *, va_list)); - * PUBLIC: #endif - */ -#ifndef HAVE_VSNPRINTF -int -vsnprintf(str, n, fmt, ap) - char *str; - size_t n; - const char *fmt; - va_list ap; -{ - static int ret_charpnt = -1; - size_t len; - - if (ret_charpnt == -1) - ret_charpnt = sprintf_retcharpnt(); - - len = (size_t)vsprintf(str, fmt, ap); - if (ret_charpnt) - len = strlen(str); - - if (len >= n) { - sprintf_overflow(); - /* NOTREACHED */ - } - return ((int)len); -} -#endif - -#if !defined(HAVE_SNPRINTF) || !defined(HAVE_VSNPRINTF) -static void -sprintf_overflow() -{ - /* - * !!! - * We're potentially manipulating strings handed us by the application, - * and on systems without a real snprintf() the sprintf() calls could - * have overflowed the buffer. We can't do anything about it now, but - * we don't want to return control to the application, we might have - * overwritten the stack with a Trojan horse. We're not trying to do - * anything recoverable here because systems without snprintf support - * are pretty rare anymore. - */ -#define OVERFLOW_ERROR "internal buffer overflow, process ended\n" -#ifndef STDERR_FILENO -#define STDERR_FILENO 2 -#endif - (void)write(STDERR_FILENO, OVERFLOW_ERROR, sizeof(OVERFLOW_ERROR) - 1); - - /* Be polite. */ - exit(1); - - /* But firm. */ - abort(); - - /* NOTREACHED */ -} - -static int -sprintf_retcharpnt() -{ - int ret_charpnt; - char buf[10]; - - /* - * Some old versions of sprintf return a pointer to the first argument - * instead of a character count. Assume the return value of snprintf, - * vsprintf, etc. will be the same as sprintf, and check the easy one. - * - * We do this test at run-time because it's not a test we can do in a - * cross-compilation environment. - */ - - ret_charpnt = - (int)sprintf(buf, "123") != 3 || - (int)sprintf(buf, "123456789") != 9 || - (int)sprintf(buf, "1234") != 4; - - return (ret_charpnt); -} -#endif diff --git a/storage/bdb/clib/strcasecmp.c b/storage/bdb/clib/strcasecmp.c deleted file mode 100644 index b83daa3ccab..00000000000 --- a/storage/bdb/clib/strcasecmp.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 1987, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: strcasecmp.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -/* - * This array is designed for mapping upper and lower case letter - * together for a case independent comparison. The mappings are - * based upon ascii character sequences. - */ -static const unsigned char charmap[] = { - '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007', - '\010', '\011', '\012', '\013', '\014', '\015', '\016', '\017', - '\020', '\021', '\022', '\023', '\024', '\025', '\026', '\027', - '\030', '\031', '\032', '\033', '\034', '\035', '\036', '\037', - '\040', '\041', '\042', '\043', '\044', '\045', '\046', '\047', - '\050', '\051', '\052', '\053', '\054', '\055', '\056', '\057', - '\060', '\061', '\062', '\063', '\064', '\065', '\066', '\067', - '\070', '\071', '\072', '\073', '\074', '\075', '\076', '\077', - '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147', - '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157', - '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167', - '\170', '\171', '\172', '\133', '\134', '\135', '\136', '\137', - '\140', '\141', '\142', '\143', '\144', '\145', '\146', '\147', - '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157', - '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167', - '\170', '\171', '\172', '\173', '\174', '\175', '\176', '\177', - '\200', '\201', '\202', '\203', '\204', '\205', '\206', '\207', - '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217', - '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227', - '\230', '\231', '\232', '\233', '\234', '\235', '\236', '\237', - '\240', '\241', '\242', '\243', '\244', '\245', '\246', '\247', - '\250', '\251', '\252', '\253', '\254', '\255', '\256', '\257', - '\260', '\261', '\262', '\263', '\264', '\265', '\266', '\267', - '\270', '\271', '\272', '\273', '\274', '\275', '\276', '\277', - '\300', '\301', '\302', '\303', '\304', '\305', '\306', '\307', - '\310', '\311', '\312', '\313', '\314', '\315', '\316', '\317', - '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327', - '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337', - '\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347', - '\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357', - '\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367', - '\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377' -}; - -/* - * strcasecmp -- - * Do strcmp(3) in a case-insensitive manner. - * - * PUBLIC: #ifndef HAVE_STRCASECMP - * PUBLIC: int strcasecmp __P((const char *, const char *)); - * PUBLIC: #endif - */ -int -strcasecmp(s1, s2) - const char *s1, *s2; -{ - register const unsigned char *cm = charmap, - *us1 = (const unsigned char *)s1, - *us2 = (const unsigned char *)s2; - - while (cm[*us1] == cm[*us2++]) - if (*us1++ == '\0') - return (0); - return (cm[*us1] - cm[*--us2]); -} - -/* - * strncasecmp -- - * Do strncmp(3) in a case-insensitive manner. - * - * PUBLIC: #ifndef HAVE_STRCASECMP - * PUBLIC: int strncasecmp __P((const char *, const char *, size_t)); - * PUBLIC: #endif - */ -int -strncasecmp(s1, s2, n) - const char *s1, *s2; - register size_t n; -{ - if (n != 0) { - register const unsigned char *cm = charmap, - *us1 = (const unsigned char *)s1, - *us2 = (const unsigned char *)s2; - - do { - if (cm[*us1] != cm[*us2++]) - return (cm[*us1] - cm[*--us2]); - if (*us1++ == '\0') - break; - } while (--n != 0); - } - return (0); -} diff --git a/storage/bdb/clib/strdup.c b/storage/bdb/clib/strdup.c deleted file mode 100644 index e679f5a6ccd..00000000000 --- a/storage/bdb/clib/strdup.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 1988, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: strdup.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -/* - * strdup -- - * - * PUBLIC: #ifndef HAVE_STRDUP - * PUBLIC: char *strdup __P((const char *)); - * PUBLIC: #endif - */ -char * -strdup(str) - const char *str; -{ - size_t len; - char *copy; - - len = strlen(str) + 1; - if (!(copy = malloc((u_int)len))) - return (NULL); - memcpy(copy, str, len); - return (copy); -} diff --git a/storage/bdb/clib/strerror.c b/storage/bdb/clib/strerror.c deleted file mode 100644 index db0d71ccc5f..00000000000 --- a/storage/bdb/clib/strerror.c +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1988, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: strerror.c,v 12.1 2005/06/16 20:20:51 bostic Exp $ - */ - -#include "db_config.h" - -/* - * strerror -- - * Return the string associated with an errno. - * - * PUBLIC: #ifndef HAVE_STRERROR - * PUBLIC: char *strerror __P((int)); - * PUBLIC: #endif - */ -char * -strerror(num) - int num; -{ - extern int sys_nerr; - extern char *sys_errlist[]; -#undef UPREFIX -#define UPREFIX "Unknown error: " - static char ebuf[40] = UPREFIX; /* 64-bit number + slop */ - int errnum; - char *p, *t, tmp[40]; - - errnum = num; /* convert to unsigned */ - if (errnum < sys_nerr) - return(sys_errlist[errnum]); - - /* Do this by hand, so we don't include stdio(3). */ - t = tmp; - do { - *t++ = "0123456789"[errnum % 10]; - } while (errnum /= 10); - for (p = ebuf + sizeof(UPREFIX) - 1;;) { - *p++ = *--t; - if (t <= tmp) - break; - } - return(ebuf); -} diff --git a/storage/bdb/clib/strtol.c b/storage/bdb/clib/strtol.c deleted file mode 100644 index 88b17bd3e9f..00000000000 --- a/storage/bdb/clib/strtol.c +++ /dev/null @@ -1,144 +0,0 @@ -/*- - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: strtol.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#include -#include -#include -#endif - -/* - * Convert a string to a long integer. - * - * Assumes that the upper and lower case - * alphabets and digits are each contiguous. - */ -long -strtol(nptr, endptr, base) - const char * nptr; - char ** endptr; - int base; -{ - const char *s; - unsigned long acc; - char c; - unsigned long cutoff; - int neg, any, cutlim; - - /* - * Skip white space and pick up leading +/- sign if any. - * If base is 0, allow 0x for hex and 0 for octal, else - * assume decimal; if base is already 16, allow 0x. - */ - s = nptr; - do { - c = *s++; - } while (isspace((unsigned char)c)); - if (c == '-') { - neg = 1; - c = *s++; - } else { - neg = 0; - if (c == '+') - c = *s++; - } - if ((base == 0 || base == 16) && - c == '0' && (*s == 'x' || *s == 'X')) { - c = s[1]; - s += 2; - base = 16; - } - if (base == 0) - base = c == '0' ? 8 : 10; - acc = any = 0; - if (base < 2 || base > 36) - goto noconv; - - /* - * Compute the cutoff value between legal numbers and illegal - * numbers. That is the largest legal value, divided by the - * base. An input number that is greater than this value, if - * followed by a legal input character, is too big. One that - * is equal to this value may be valid or not; the limit - * between valid and invalid numbers is then based on the last - * digit. For instance, if the range for longs is - * [-2147483648..2147483647] and the input base is 10, - * cutoff will be set to 214748364 and cutlim to either - * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated - * a value > 214748364, or equal but the next digit is > 7 (or 8), - * the number is too big, and we will return a range error. - * - * Set 'any' if any `digits' consumed; make it negative to indicate - * overflow. - */ - cutoff = neg ? (unsigned long)-(LONG_MIN + LONG_MAX) + LONG_MAX - : LONG_MAX; - cutlim = cutoff % base; - cutoff /= base; - for ( ; ; c = *s++) { - if (c >= '0' && c <= '9') - c -= '0'; - else if (c >= 'A' && c <= 'Z') - c -= 'A' - 10; - else if (c >= 'a' && c <= 'z') - c -= 'a' - 10; - else - break; - if (c >= base) - break; - if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) - any = -1; - else { - any = 1; - acc *= base; - acc += c; - } - } - if (any < 0) { - acc = neg ? LONG_MIN : LONG_MAX; - errno = ERANGE; - } else if (!any) { -noconv: - errno = EINVAL; - } else if (neg) - acc = -acc; - if (endptr != NULL) - *endptr = (char *)(any ? s - 1 : nptr); - return (acc); -} diff --git a/storage/bdb/clib/strtoul.c b/storage/bdb/clib/strtoul.c deleted file mode 100644 index 14eacb89f3f..00000000000 --- a/storage/bdb/clib/strtoul.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: strtoul.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#include -#include -#include -#endif - -/* - * Convert a string to an unsigned long integer. - * - * Assumes that the upper and lower case - * alphabets and digits are each contiguous. - */ -unsigned long -strtoul(nptr, endptr, base) - const char * nptr; - char ** endptr; - int base; -{ - const char *s; - unsigned long acc; - char c; - unsigned long cutoff; - int neg, any, cutlim; - - /* - * See strtol for comments as to the logic used. - */ - s = nptr; - do { - c = *s++; - } while (isspace((unsigned char)c)); - if (c == '-') { - neg = 1; - c = *s++; - } else { - neg = 0; - if (c == '+') - c = *s++; - } - if ((base == 0 || base == 16) && - c == '0' && (*s == 'x' || *s == 'X')) { - c = s[1]; - s += 2; - base = 16; - } - if (base == 0) - base = c == '0' ? 8 : 10; - acc = any = 0; - if (base < 2 || base > 36) - goto noconv; - - cutoff = ULONG_MAX / base; - cutlim = ULONG_MAX % base; - for ( ; ; c = *s++) { - if (c >= '0' && c <= '9') - c -= '0'; - else if (c >= 'A' && c <= 'Z') - c -= 'A' - 10; - else if (c >= 'a' && c <= 'z') - c -= 'a' - 10; - else - break; - if (c >= base) - break; - if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) - any = -1; - else { - any = 1; - acc *= base; - acc += c; - } - } - if (any < 0) { - acc = ULONG_MAX; - errno = ERANGE; - } else if (!any) { -noconv: - errno = EINVAL; - } else if (neg) - acc = -acc; - if (endptr != NULL) - *endptr = (char *)(any ? s - 1 : nptr); - return (acc); -} diff --git a/storage/bdb/clib/vsnprintf.c b/storage/bdb/clib/vsnprintf.c deleted file mode 100644 index 4ffea8cb0ad..00000000000 --- a/storage/bdb/clib/vsnprintf.c +++ /dev/null @@ -1,47 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2002 - * Sleepycat Software. All rights reserved. - */ - -#include "db_config.h" - -#ifndef lint -static const char revid[] = "$Id: vsnprintf.c,v 11.7 2002/01/11 15:51:29 bostic Exp $"; -#endif /* not lint */ - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" - -/* - * vsnprintf -- - * Bounded version of vsprintf. - * - * PUBLIC: #ifndef HAVE_VSNPRINTF - * PUBLIC: int vsnprintf __P((char *, size_t, const char *, va_list)); - * PUBLIC: #endif - */ -#ifndef HAVE_VSNPRINTF -int -vsnprintf(str, n, fmt, ap) - char *str; - size_t n; - const char *fmt; - va_list ap; -{ - COMPQUIET(n, 0); - -#ifdef SPRINTF_RET_CHARPNT - (void)vsprintf(str, fmt, ap); - return (strlen(str)); -#else - return (vsprintf(str, fmt, ap)); -#endif -} -#endif diff --git a/storage/bdb/common/crypto_stub.c b/storage/bdb/common/crypto_stub.c deleted file mode 100644 index e335b61f99a..00000000000 --- a/storage/bdb/common/crypto_stub.c +++ /dev/null @@ -1,45 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: crypto_stub.c,v 12.2 2005/07/20 16:50:55 bostic Exp $ - */ - -#include "db_config.h" - -#include "db_int.h" - -/* - * __crypto_region_init -- - * Initialize crypto. - * - * - * !!! - * We don't put this stub file in the crypto/ directory of the distribution - * because that entire directory is removed for non-crypto distributions. - * - * PUBLIC: int __crypto_region_init __P((DB_ENV *)); - */ -int -__crypto_region_init(dbenv) - DB_ENV *dbenv; -{ - REGENV *renv; - REGINFO *infop; - int ret; - - infop = dbenv->reginfo; - renv = infop->primary; - MUTEX_LOCK(dbenv, renv->mtx_regenv); - ret = !(renv->cipher_off == INVALID_ROFF); - MUTEX_UNLOCK(dbenv, renv->mtx_regenv); - - if (ret == 0) - return (0); - - __db_err(dbenv, -"Encrypted environment: library build did not include cryptography support"); - return (DB_OPNOTSUP); -} diff --git a/storage/bdb/common/db_byteorder.c b/storage/bdb/common/db_byteorder.c deleted file mode 100644 index 60b1d293e41..00000000000 --- a/storage/bdb/common/db_byteorder.c +++ /dev/null @@ -1,72 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_byteorder.c,v 12.1 2005/06/16 20:20:52 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" - -/* - * __db_isbigendian -- - * Return 1 if big-endian (Motorola and Sparc), not little-endian - * (Intel and Vax). We do this work at run-time, rather than at - * configuration time so cross-compilation and general embedded - * system support is simpler. - * - * PUBLIC: int __db_isbigendian __P((void)); - */ -int -__db_isbigendian() -{ - union { /* From Harbison & Steele. */ - long l; - char c[sizeof(long)]; - } u; - - u.l = 1; - return (u.c[sizeof(long) - 1] == 1); -} - -/* - * __db_byteorder -- - * Return if we need to do byte swapping, checking for illegal - * values. - * - * PUBLIC: int __db_byteorder __P((DB_ENV *, int)); - */ -int -__db_byteorder(dbenv, lorder) - DB_ENV *dbenv; - int lorder; -{ - int is_bigendian; - - is_bigendian = __db_isbigendian(); - - switch (lorder) { - case 0: - break; - case 1234: - if (is_bigendian) - return (DB_SWAPBYTES); - break; - case 4321: - if (!is_bigendian) - return (DB_SWAPBYTES); - break; - default: - __db_err(dbenv, - "unsupported byte order, only big and little-endian supported"); - return (EINVAL); - } - return (0); -} diff --git a/storage/bdb/common/db_clock.c b/storage/bdb/common/db_clock.c deleted file mode 100644 index d53b1961ada..00000000000 --- a/storage/bdb/common/db_clock.c +++ /dev/null @@ -1,30 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_clock.c,v 1.2 2005/08/08 14:39:52 bostic Exp $ - */ - -#include "db_config.h" -#include "db_int.h" - -/* - * __db_difftime -- - * - * Compute the difference in seconds and microseconds of two timers. - * - * PUBLIC: void __db_difftime __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t, - * PUBLIC: u_int32_t *, u_int32_t *)); - */ -void -__db_difftime(ssec, esec, susec, eusec, secp, usecp) - u_int32_t ssec, esec, susec, eusec, *secp, *usecp; -{ - if ((*secp = esec - ssec) != 0 && eusec < susec) { - (*secp)--; - eusec += 1000000; - } - *usecp = eusec - susec; -} diff --git a/storage/bdb/common/db_err.c b/storage/bdb/common/db_err.c deleted file mode 100644 index fd9fa89a46a..00000000000 --- a/storage/bdb/common/db_err.c +++ /dev/null @@ -1,840 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_err.c,v 12.19 2005/10/19 19:06:29 sue Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -static void __db_msgcall __P((const DB_ENV *, const char *, va_list)); -static void __db_msgfile __P((const DB_ENV *, const char *, va_list)); - -/* - * __db_fchk -- - * General flags checking routine. - * - * PUBLIC: int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t)); - */ -int -__db_fchk(dbenv, name, flags, ok_flags) - DB_ENV *dbenv; - const char *name; - u_int32_t flags, ok_flags; -{ - return (LF_ISSET(~ok_flags) ? __db_ferr(dbenv, name, 0) : 0); -} - -/* - * __db_fcchk -- - * General combination flags checking routine. - * - * PUBLIC: int __db_fcchk - * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t)); - */ -int -__db_fcchk(dbenv, name, flags, flag1, flag2) - DB_ENV *dbenv; - const char *name; - u_int32_t flags, flag1, flag2; -{ - return (LF_ISSET(flag1) && - LF_ISSET(flag2) ? __db_ferr(dbenv, name, 1) : 0); -} - -/* - * __db_ferr -- - * Common flag errors. - * - * PUBLIC: int __db_ferr __P((const DB_ENV *, const char *, int)); - */ -int -__db_ferr(dbenv, name, iscombo) - const DB_ENV *dbenv; - const char *name; - int iscombo; -{ - __db_err(dbenv, "illegal flag %sspecified to %s", - iscombo ? "combination " : "", name); - return (EINVAL); -} - -/* - * __db_fnl -- - * Common flag-needs-locking message. - * - * PUBLIC: int __db_fnl __P((const DB_ENV *, const char *)); - */ -int -__db_fnl(dbenv, name) - const DB_ENV *dbenv; - const char *name; -{ - __db_err(dbenv, - "%s: DB_READ_COMMITTED, DB_READ_UNCOMMITTED and DB_RMW require locking", - name); - return (EINVAL); -} - -/* - * __db_pgerr -- - * Error when unable to retrieve a specified page. - * - * PUBLIC: int __db_pgerr __P((DB *, db_pgno_t, int)); - */ -int -__db_pgerr(dbp, pgno, errval) - DB *dbp; - db_pgno_t pgno; - int errval; -{ - /* - * Three things are certain: - * Death, taxes, and lost data. - * Guess which has occurred. - */ - __db_err(dbp->dbenv, - "unable to create/retrieve page %lu", (u_long)pgno); - return (__db_panic(dbp->dbenv, errval)); -} - -/* - * __db_pgfmt -- - * Error when a page has the wrong format. - * - * PUBLIC: int __db_pgfmt __P((DB_ENV *, db_pgno_t)); - */ -int -__db_pgfmt(dbenv, pgno) - DB_ENV *dbenv; - db_pgno_t pgno; -{ - __db_err(dbenv, "page %lu: illegal page type or format", (u_long)pgno); - return (__db_panic(dbenv, EINVAL)); -} - -#ifdef DIAGNOSTIC -/* - * __db_assert -- - * Error when an assertion fails. Only checked if #DIAGNOSTIC defined. - * - * PUBLIC: #ifdef DIAGNOSTIC - * PUBLIC: void __db_assert __P((const char *, const char *, int)); - * PUBLIC: #endif - */ -void -__db_assert(failedexpr, file, line) - const char *failedexpr, *file; - int line; -{ - (void)fprintf(stderr, - "__db_assert: \"%s\" failed: file \"%s\", line %d\n", - failedexpr, file, line); - (void)fflush(stderr); - - /* We want a stack trace of how this could possibly happen. */ - abort(); - - /* NOTREACHED */ -} -#endif - -/* - * __db_panic_msg -- - * Just report that someone else paniced. - * - * PUBLIC: int __db_panic_msg __P((DB_ENV *)); - */ -int -__db_panic_msg(dbenv) - DB_ENV *dbenv; -{ - __db_err(dbenv, "PANIC: fatal region error detected; run recovery"); - - if (dbenv->db_paniccall != NULL) - dbenv->db_paniccall(dbenv, DB_RUNRECOVERY); - - return (DB_RUNRECOVERY); -} - -/* - * __db_panic -- - * Lock out the tree due to unrecoverable error. - * - * PUBLIC: int __db_panic __P((DB_ENV *, int)); - */ -int -__db_panic(dbenv, errval) - DB_ENV *dbenv; - int errval; -{ - if (dbenv != NULL) { - __db_panic_set(dbenv, 1); - - __db_err(dbenv, "PANIC: %s", db_strerror(errval)); - - if (dbenv->db_paniccall != NULL) - dbenv->db_paniccall(dbenv, errval); - } - -#if defined(DIAGNOSTIC) && !defined(CONFIG_TEST) - /* - * We want a stack trace of how this could possibly happen. - * - * Don't drop core if it's the test suite -- it's reasonable for the - * test suite to check to make sure that DB_RUNRECOVERY is returned - * under certain conditions. - */ - abort(); -#endif - - /* - * Chaos reigns within. - * Reflect, repent, and reboot. - * Order shall return. - */ - return (DB_RUNRECOVERY); -} - -/* - * __db_panic_set -- - * Set/clear unrecoverable error. - * - * PUBLIC: void __db_panic_set __P((DB_ENV *, int)); - */ -void -__db_panic_set(dbenv, on) - DB_ENV *dbenv; - int on; -{ - if (dbenv != NULL && dbenv->reginfo != NULL) - ((REGENV *) - ((REGINFO *)dbenv->reginfo)->primary)->panic = on ? 1 : 0; -} - -/* - * db_strerror -- - * ANSI C strerror(3) for DB. - * - * EXTERN: char *db_strerror __P((int)); - */ -char * -db_strerror(error) - int error; -{ - char *p; - - if (error == 0) - return ("Successful return: 0"); - if (error > 0) { - if ((p = strerror(error)) != NULL) - return (p); - goto unknown_err; - } - - /* - * !!! - * The Tcl API requires that some of these return strings be compared - * against strings stored in application scripts. So, any of these - * errors that do not invariably result in a Tcl exception may not be - * altered. - */ - switch (error) { - case DB_BUFFER_SMALL: - return - ("DB_BUFFER_SMALL: User memory too small for return value"); - case DB_DONOTINDEX: - return ("DB_DONOTINDEX: Secondary index callback returns null"); - case DB_KEYEMPTY: - return ("DB_KEYEMPTY: Non-existent key/data pair"); - case DB_KEYEXIST: - return ("DB_KEYEXIST: Key/data pair already exists"); - case DB_LOCK_DEADLOCK: - return - ("DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock"); - case DB_LOCK_NOTGRANTED: - return ("DB_LOCK_NOTGRANTED: Lock not granted"); - case DB_LOG_BUFFER_FULL: - return ("DB_LOG_BUFFER_FULL: In-memory log buffer is full"); - case DB_NOSERVER: - return ("DB_NOSERVER: Fatal error, no RPC server"); - case DB_NOSERVER_HOME: - return ("DB_NOSERVER_HOME: Home unrecognized at server"); - case DB_NOSERVER_ID: - return ("DB_NOSERVER_ID: Identifier unrecognized at server"); - case DB_NOTFOUND: - return ("DB_NOTFOUND: No matching key/data pair found"); - case DB_OLD_VERSION: - return ("DB_OLDVERSION: Database requires a version upgrade"); - case DB_PAGE_NOTFOUND: - return ("DB_PAGE_NOTFOUND: Requested page not found"); - case DB_REP_DUPMASTER: - return ("DB_REP_DUPMASTER: A second master site appeared"); - case DB_REP_HANDLE_DEAD: - return ("DB_REP_HANDLE_DEAD: Handle is no longer valid"); - case DB_REP_HOLDELECTION: - return ("DB_REP_HOLDELECTION: Need to hold an election"); - case DB_REP_IGNORE: - return ("DB_REP_IGNORE: Replication record ignored"); - case DB_REP_ISPERM: - return ("DB_REP_ISPERM: Permanent record written"); - case DB_REP_JOIN_FAILURE: - return - ("DB_REP_JOIN_FAILURE: Unable to join replication group"); - case DB_REP_LOCKOUT: - return - ("DB_REP_LOCKOUT: Waiting for replication recovery to complete"); - case DB_REP_NEWMASTER: - return ("DB_REP_NEWMASTER: A new master has declared itself"); - case DB_REP_NEWSITE: - return ("DB_REP_NEWSITE: A new site has entered the system"); - case DB_REP_NOTPERM: - return ("DB_REP_NOTPERM: Permanent log record not written"); - case DB_REP_STARTUPDONE: - return - ("DB_REP_STARTUPDONE: Client completed startup synchronization."); - case DB_REP_UNAVAIL: - return ("DB_REP_UNAVAIL: Unable to elect a master"); - case DB_RUNRECOVERY: - return ("DB_RUNRECOVERY: Fatal error, run database recovery"); - case DB_SECONDARY_BAD: - return - ("DB_SECONDARY_BAD: Secondary index inconsistent with primary"); - case DB_VERIFY_BAD: - return ("DB_VERIFY_BAD: Database verification failed"); - case DB_VERSION_MISMATCH: - return - ("DB_VERSION_MISMATCH: Database environment version mismatch"); - default: - break; - } - -unknown_err: { - /* - * !!! - * Room for a 64-bit number + slop. This buffer is only used - * if we're given an unknown error, which should never happen. - * Note, however, we're no longer thread-safe if it does. - */ - static char ebuf[40]; - - (void)snprintf(ebuf, sizeof(ebuf), "Unknown error: %d", error); - return (ebuf); - } -} - -/* - * __db_err -- - * Standard DB error routine. The same as errx, except we don't write - * to stderr if no output mechanism was specified. - * - * PUBLIC: void __db_err __P((const DB_ENV *, const char *, ...)) - * PUBLIC: __attribute__ ((__format__ (__printf__, 2, 3))); - */ -void -#ifdef STDC_HEADERS -__db_err(const DB_ENV *dbenv, const char *fmt, ...) -#else -__db_err(dbenv, fmt, va_alist) - const DB_ENV *dbenv; - const char *fmt; - va_dcl -#endif -{ - DB_REAL_ERR(dbenv, 0, 0, 0, fmt); -} - -/* - * __db_errcall -- - * Do the error message work for callback functions. - * - * PUBLIC: void __db_errcall - * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list)); - */ -void -__db_errcall(dbenv, error, error_set, fmt, ap) - const DB_ENV *dbenv; - int error, error_set; - const char *fmt; - va_list ap; -{ - char *p; - char buf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ - - p = buf; - if (fmt != NULL) - p += vsnprintf(buf, sizeof(buf), fmt, ap); - if (error_set) - p += snprintf(p, - sizeof(buf) - (size_t)(p - buf), ": %s", - db_strerror(error)); - - dbenv->db_errcall(dbenv, dbenv->db_errpfx, buf); -} - -/* - * __db_errfile -- - * Do the error message work for FILE *s. - * - * PUBLIC: void __db_errfile - * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list)); - */ -void -__db_errfile(dbenv, error, error_set, fmt, ap) - const DB_ENV *dbenv; - int error, error_set; - const char *fmt; - va_list ap; -{ - FILE *fp; - - fp = dbenv == NULL || - dbenv->db_errfile == NULL ? stderr : dbenv->db_errfile; - - if (dbenv != NULL && dbenv->db_errpfx != NULL) - (void)fprintf(fp, "%s: ", dbenv->db_errpfx); - if (fmt != NULL) { - (void)vfprintf(fp, fmt, ap); - if (error_set) - (void)fprintf(fp, ": "); - } - if (error_set) - (void)fprintf(fp, "%s", db_strerror(error)); - (void)fprintf(fp, "\n"); - (void)fflush(fp); -} - -/* - * __db_msgadd -- - * Aggregate a set of strings into a buffer for the callback API. - * - * PUBLIC: void __db_msgadd __P((DB_ENV *, DB_MSGBUF *, const char *, ...)) - * PUBLIC: __attribute__ ((__format__ (__printf__, 3, 4))); - */ -void -#ifdef STDC_HEADERS -__db_msgadd(DB_ENV *dbenv, DB_MSGBUF *mbp, const char *fmt, ...) -#else -__db_msgadd(dbenv, mbp, fmt, va_alist) - DB_ENV *dbenv; - DB_MSGBUF *mbp; - const char *fmt; - va_dcl -#endif -{ - va_list ap; - size_t len, olen; - char buf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ - -#ifdef STDC_HEADERS - va_start(ap, fmt); -#else - va_start(ap); -#endif - len = (size_t)vsnprintf(buf, sizeof(buf), fmt, ap); - - va_end(ap); - - /* - * There's a heap buffer in the DB_ENV handle we use to aggregate the - * message chunks. We maintain a pointer to the buffer, the next slot - * to be filled in in the buffer, and a total buffer length. - */ - olen = (size_t)(mbp->cur - mbp->buf); - if (olen + len >= mbp->len) { - if (__os_realloc(dbenv, mbp->len + len + 256, &mbp->buf)) - return; - mbp->len += (len + 256); - mbp->cur = mbp->buf + olen; - } - - memcpy(mbp->cur, buf, len + 1); - mbp->cur += len; -} - -/* - * __db_msg -- - * Standard DB stat message routine. - * - * PUBLIC: void __db_msg __P((const DB_ENV *, const char *, ...)) - * PUBLIC: __attribute__ ((__format__ (__printf__, 2, 3))); - */ -void -#ifdef STDC_HEADERS -__db_msg(const DB_ENV *dbenv, const char *fmt, ...) -#else -__db_msg(dbenv, fmt, va_alist) - const DB_ENV *dbenv; - const char *fmt; - va_dcl -#endif -{ - DB_REAL_MSG(dbenv, fmt); -} - -/* - * __db_msgcall -- - * Do the message work for callback functions. - */ -static void -__db_msgcall(dbenv, fmt, ap) - const DB_ENV *dbenv; - const char *fmt; - va_list ap; -{ - char buf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ - - (void)vsnprintf(buf, sizeof(buf), fmt, ap); - - dbenv->db_msgcall(dbenv, buf); -} - -/* - * __db_msgfile -- - * Do the message work for FILE *s. - */ -static void -__db_msgfile(dbenv, fmt, ap) - const DB_ENV *dbenv; - const char *fmt; - va_list ap; -{ - FILE *fp; - - fp = dbenv == NULL || - dbenv->db_msgfile == NULL ? stdout : dbenv->db_msgfile; - (void)vfprintf(fp, fmt, ap); - - (void)fprintf(fp, "\n"); - (void)fflush(fp); -} - -/* - * __db_unknown_flag -- report internal error - * - * PUBLIC: int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t)); - */ -int -__db_unknown_flag(dbenv, routine, flag) - DB_ENV *dbenv; - char *routine; - u_int32_t flag; -{ - __db_err(dbenv, "%s: Unknown flag: %#x", routine, (u_int)flag); - DB_ASSERT(0); - return (EINVAL); -} - -/* - * __db_unknown_type -- report internal error - * - * PUBLIC: int __db_unknown_type __P((DB_ENV *, char *, DBTYPE)); - */ -int -__db_unknown_type(dbenv, routine, type) - DB_ENV *dbenv; - char *routine; - DBTYPE type; -{ - __db_err(dbenv, - "%s: Unexpected DB type: %s", routine, __db_dbtype_to_string(type)); - - DB_ASSERT(0); - return (EINVAL); -} - -/* - * __db_check_txn -- - * Check for common transaction errors. - * - * PUBLIC: int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int)); - */ -int -__db_check_txn(dbp, txn, assoc_lid, read_op) - DB *dbp; - DB_TXN *txn; - u_int32_t assoc_lid; - int read_op; -{ - DB_ENV *dbenv; - int isp, ret; - - dbenv = dbp->dbenv; - - /* - * If we are in recovery or aborting a transaction, then we - * don't need to enforce the rules about dbp's not allowing - * transactional operations in non-transactional dbps and - * vica-versa. This happens all the time as the dbp during - * an abort may be transactional, but we undo operations - * outside a transaction since we're aborting. - */ - if (IS_RECOVERING(dbenv) || F_ISSET(dbp, DB_AM_RECOVER)) - return (0); - - /* - * Check for common transaction errors: - * an operation on a handle whose open commit hasn't completed. - * a transaction handle in a non-transactional environment - * a transaction handle for a non-transactional database - */ - if (txn == NULL) { - if (dbp->cur_lid >= TXN_MINIMUM) - goto open_err; - } else { - if (!TXN_ON(dbenv)) - return (__db_not_txn_env(dbenv)); - - if (!F_ISSET(dbp, DB_AM_TXN)) { - __db_err(dbenv, - "Transaction specified for a DB handle opened outside a transaction"); - return (EINVAL); - } - - if (F_ISSET(txn, TXN_DEADLOCK)) { - __db_err(dbenv, - "Previous deadlock return not resolved"); - return (EINVAL); - } - if (dbp->cur_lid >= TXN_MINIMUM && dbp->cur_lid != txn->txnid) { - if ((ret = __lock_locker_is_parent(dbenv, - dbp->cur_lid, txn->txnid, &isp)) != 0) - return (ret); - if (!isp) - goto open_err; - } - } - - /* - * If dbp->associate_lid is not DB_LOCK_INVALIDID, that means we're in - * the middle of a DB->associate with DB_CREATE (i.e., a secondary index - * creation). - * - * In addition to the usual transaction rules, we need to lock out - * non-transactional updates that aren't part of the associate (and - * thus are using some other locker ID). - * - * Transactional updates should simply block; from the time we - * decide to build the secondary until commit, we'll hold a write - * lock on all of its pages, so it should be safe to attempt to update - * the secondary in another transaction (presumably by updating the - * primary). - */ - if (!read_op && dbp->associate_lid != DB_LOCK_INVALIDID && - txn != NULL && dbp->associate_lid != assoc_lid) { - __db_err(dbenv, - "Operation forbidden while secondary index is being created"); - return (EINVAL); - } - - /* - * Check the txn and dbp are from the same env. - */ - if (txn != NULL && dbenv != txn->mgrp->dbenv) { - __db_err(dbenv, - "Transaction and database from different environments"); - return (EINVAL); - } - - return (0); -open_err: - __db_err(dbenv, - "Transaction that opened the DB handle is still active"); - return (EINVAL); -} - -/* - * __db_not_txn_env -- - * DB handle must be in an environment that supports transactions. - * - * PUBLIC: int __db_not_txn_env __P((DB_ENV *)); - */ -int -__db_not_txn_env(dbenv) - DB_ENV *dbenv; -{ - __db_err(dbenv, "DB environment not configured for transactions"); - return (EINVAL); -} - -/* - * __db_rec_toobig -- - * Fixed record length exceeded error message. - * - * PUBLIC: int __db_rec_toobig __P((DB_ENV *, u_int32_t, u_int32_t)); - */ -int -__db_rec_toobig(dbenv, data_len, fixed_rec_len) - DB_ENV *dbenv; - u_int32_t data_len, fixed_rec_len; -{ - __db_err(dbenv, "%s: length of %lu larger than database's value of %lu", - "Record length error", (u_long)data_len, (u_long)fixed_rec_len); - return (EINVAL); -} - -/* - * __db_rec_repl -- - * Fixed record replacement length error message. - * - * PUBLIC: int __db_rec_repl __P((DB_ENV *, u_int32_t, u_int32_t)); - */ -int -__db_rec_repl(dbenv, data_size, data_dlen) - DB_ENV *dbenv; - u_int32_t data_size, data_dlen; -{ - __db_err(dbenv, - "%s: replacement length %lu differs from replaced length %lu", - "Record length error", (u_long)data_size, (u_long)data_dlen); - return (EINVAL); -} - -#if defined(DIAGNOSTIC) || defined(DEBUG_ROP) || defined(DEBUG_WOP) -/* - * __dbc_logging -- - * In DIAGNOSTIC mode, check for bad replication combinations. - * - * PUBLIC: int __dbc_logging __P((DBC *)); - */ -int -__dbc_logging(dbc) - DBC *dbc; -{ - DB_ENV *dbenv; - DB_REP *db_rep; - int ret; - - dbenv = dbc->dbp->dbenv; - db_rep = dbenv->rep_handle; - - ret = LOGGING_ON(dbenv) && - !F_ISSET(dbc, DBC_RECOVER) && !IS_REP_CLIENT(dbenv); - - /* - * If we're not using replication or running recovery, return. - */ - if (db_rep == NULL || F_ISSET(dbc, DBC_RECOVER)) - return (ret); - -#ifndef DEBUG_ROP - /* - * Only check when DEBUG_ROP is not configured. People often do - * non-transactional reads, and debug_rop is going to write - * a log record. - */ - { - REP *rep; - - rep = db_rep->region; - - /* - * If we're a client and not running recovery or internally, error. - */ - if (IS_REP_CLIENT(dbenv) && !F_ISSET(dbc->dbp, DB_AM_CL_WRITER)) { - __db_err(dbenv, "Dbc_logging: Client update"); - goto err; - } - if (IS_REP_MASTER(dbenv) && dbc->txn == NULL) { - __db_err(dbenv, "Dbc_logging: Master non-txn update"); - goto err; - } - if (0) { -err: __db_err(dbenv, "Rep: flags 0x%lx msg_th %lu, start_th %d", - (u_long)rep->flags, (u_long)rep->msg_th, rep->start_th); - __db_err(dbenv, "Rep: handle %lu, opcnt %lu, in_rec %d", - (u_long)rep->handle_cnt, (u_long)rep->op_cnt, - rep->in_recovery); - abort(); - } - } -#endif - return (ret); -} -#endif - -/* - * __db_check_lsn -- - * Display the log sequence error message. - * - * PUBLIC: int __db_check_lsn __P((DB_ENV *, DB_LSN *, DB_LSN *)); - */ -int -__db_check_lsn(dbenv, lsn, prev) - DB_ENV *dbenv; - DB_LSN *lsn, *prev; -{ - __db_err(dbenv, - "Log sequence error: page LSN %lu %lu; previous LSN %lu %lu", - (u_long)(lsn)->file, (u_long)(lsn)->offset, - (u_long)(prev)->file, (u_long)(prev)->offset); - return (EINVAL); -} - -/* - * __db_rdonly -- - * Common readonly message. - * PUBLIC: int __db_rdonly __P((const DB_ENV *, const char *)); - */ -int -__db_rdonly(dbenv, name) - const DB_ENV *dbenv; - const char *name; -{ - __db_err(dbenv, "%s: attempt to modify a read-only database", name); - return (EACCES); -} - -/* - * __db_space_err -- - * Common out of space message. - * PUBLIC: int __db_space_err __P((const DB *)); - */ -int -__db_space_err(dbp) - const DB *dbp; -{ - __db_err(dbp->dbenv, - "%s: file limited to %lu pages", - dbp->fname, (u_long)dbp->mpf->mfp->maxpgno); - return (ENOSPC); -} - -/* - * __db_failed -- - * Common failed thread message. - * - * PUBLIC: int __db_failed __P((const DB_ENV *, - * PUBLIC: const char *, pid_t, db_threadid_t)); - */ -int -__db_failed(dbenv, msg, pid, tid) - const DB_ENV *dbenv; - const char *msg; - pid_t pid; - db_threadid_t tid; -{ - char buf[DB_THREADID_STRLEN]; - - __db_err(dbenv, "Thread/process %s failed: %s", - dbenv->thread_id_string((DB_ENV*)dbenv, pid, tid, buf), msg); - return (DB_RUNRECOVERY); -} diff --git a/storage/bdb/common/db_getlong.c b/storage/bdb/common/db_getlong.c deleted file mode 100644 index 3d0183c602c..00000000000 --- a/storage/bdb/common/db_getlong.c +++ /dev/null @@ -1,146 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_getlong.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" - -/* - * __db_getlong -- - * Return a long value inside of basic parameters. - * - * PUBLIC: int __db_getlong - * PUBLIC: __P((DB_ENV *, const char *, char *, long, long, long *)); - */ -int -__db_getlong(dbenv, progname, p, min, max, storep) - DB_ENV *dbenv; - const char *progname; - char *p; - long min, max, *storep; -{ - long val; - char *end; - - __os_set_errno(0); - val = strtol(p, &end, 10); - if ((val == LONG_MIN || val == LONG_MAX) && - __os_get_errno() == ERANGE) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: %s\n", progname, p, strerror(ERANGE)); - else - dbenv->err(dbenv, ERANGE, "%s", p); - return (1); - } - if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: Invalid numeric argument\n", progname, p); - else - dbenv->errx(dbenv, "%s: Invalid numeric argument", p); - return (1); - } - if (val < min) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: Less than minimum value (%ld)\n", - progname, p, min); - else - dbenv->errx(dbenv, - "%s: Less than minimum value (%ld)", p, min); - return (1); - } - if (val > max) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: Greater than maximum value (%ld)\n", - progname, p, max); - else - dbenv->errx(dbenv, - "%s: Greater than maximum value (%ld)", p, max); - return (1); - } - *storep = val; - return (0); -} - -/* - * __db_getulong -- - * Return an unsigned long value inside of basic parameters. - * - * PUBLIC: int __db_getulong - * PUBLIC: __P((DB_ENV *, const char *, char *, u_long, u_long, u_long *)); - */ -int -__db_getulong(dbenv, progname, p, min, max, storep) - DB_ENV *dbenv; - const char *progname; - char *p; - u_long min, max, *storep; -{ - u_long val; - char *end; - - __os_set_errno(0); - val = strtoul(p, &end, 10); - if (val == ULONG_MAX && __os_get_errno() == ERANGE) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: %s\n", progname, p, strerror(ERANGE)); - else - dbenv->err(dbenv, ERANGE, "%s", p); - return (1); - } - if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: Invalid numeric argument\n", progname, p); - else - dbenv->errx(dbenv, "%s: Invalid numeric argument", p); - return (1); - } - if (val < min) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: Less than minimum value (%lu)\n", - progname, p, min); - else - dbenv->errx(dbenv, - "%s: Less than minimum value (%lu)", p, min); - return (1); - } - - /* - * We allow a 0 to substitute as a max value for ULONG_MAX because - * 1) accepting only a 0 value is unlikely to be necessary, and 2) - * we don't want callers to have to use ULONG_MAX explicitly, as it - * may not exist on all platforms. - */ - if (max != 0 && val > max) { - if (dbenv == NULL) - fprintf(stderr, - "%s: %s: Greater than maximum value (%lu)\n", - progname, p, max); - else - dbenv->errx(dbenv, - "%s: Greater than maximum value (%lu)", p, max); - return (1); - } - *storep = val; - return (0); -} diff --git a/storage/bdb/common/db_idspace.c b/storage/bdb/common/db_idspace.c deleted file mode 100644 index 3932a49ea2e..00000000000 --- a/storage/bdb/common/db_idspace.c +++ /dev/null @@ -1,92 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_idspace.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" - -static int __db_idcmp __P((const void *, const void *)); - -static int -__db_idcmp(a, b) - const void *a; - const void *b; -{ - u_int32_t i, j; - - i = *(u_int32_t *)a; - j = *(u_int32_t *)b; - - if (i < j) - return (-1); - else if (i > j) - return (1); - else - return (0); -} - -/* - * __db_idspace -- - * - * On input, minp and maxp contain the minimum and maximum valid values for - * the name space and on return, they contain the minimum and maximum ids - * available (by finding the biggest gap). The minimum can be an inuse - * value, but the maximum cannot be. - * - * PUBLIC: void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *)); - */ -void -__db_idspace(inuse, n, minp, maxp) - u_int32_t *inuse; - int n; - u_int32_t *minp, *maxp; -{ - int i, low; - u_int32_t gap, t; - - /* A single locker ID is a special case. */ - if (n == 1) { - /* - * If the single item in use is the last one in the range, - * then we've got to perform wrap which means that we set - * the min to the minimum ID, which is what we came in with, - * so we don't do anything. - */ - if (inuse[0] != *maxp) - *minp = inuse[0]; - *maxp = inuse[0] - 1; - return; - } - - gap = 0; - low = 0; - qsort(inuse, (size_t)n, sizeof(u_int32_t), __db_idcmp); - for (i = 0; i < n - 1; i++) - if ((t = (inuse[i + 1] - inuse[i])) > gap) { - gap = t; - low = i; - } - - /* Check for largest gap at the end of the space. */ - if ((*maxp - inuse[n - 1]) + (inuse[0] - *minp) > gap) { - /* Do same check as we do in the n == 1 case. */ - if (inuse[n - 1] != *maxp) - *minp = inuse[n - 1]; - *maxp = inuse[0] - 1; - } else { - *minp = inuse[low]; - *maxp = inuse[low + 1] - 1; - } -} diff --git a/storage/bdb/common/db_log2.c b/storage/bdb/common/db_log2.c deleted file mode 100644 index 455340640e9..00000000000 --- a/storage/bdb/common/db_log2.c +++ /dev/null @@ -1,62 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1995, 1996 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Margo Seltzer. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_log2.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" - -/* - * PUBLIC: u_int32_t __db_log2 __P((u_int32_t)); - */ -u_int32_t -__db_log2(num) - u_int32_t num; -{ - u_int32_t i, limit; - - limit = 1; - for (i = 0; limit < num; limit = limit << 1) - ++i; - return (i); -} diff --git a/storage/bdb/common/util_arg.c b/storage/bdb/common/util_arg.c deleted file mode 100644 index 017fda6c312..00000000000 --- a/storage/bdb/common/util_arg.c +++ /dev/null @@ -1,124 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: util_arg.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" - -static char *__db_strsep __P((char **, const char *)); - -/* - * __db_util_arg -- - * Convert a string into an argc/argv pair. - * - * PUBLIC: int __db_util_arg __P((char *, char *, int *, char ***)); - */ -int -__db_util_arg(arg0, str, argcp, argvp) - char *arg0, *str, ***argvp; - int *argcp; -{ - int n, ret; - char **ap, **argv; - -#define MAXARGS 25 - if ((ret = - __os_malloc(NULL, (MAXARGS + 1) * sizeof(char **), &argv)) != 0) - return (ret); - - ap = argv; - *ap++ = arg0; - for (n = 1; (*ap = __db_strsep(&str, " \t")) != NULL;) - if (**ap != '\0') { - ++ap; - if (++n == MAXARGS) - break; - } - *ap = NULL; - - *argcp = ap - argv; - *argvp = argv; - - return (0); -} - -/*- - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/* - * Get next token from string *stringp, where tokens are possibly-empty - * strings separated by characters from delim. - * - * Writes NULs into the string at *stringp to end tokens. - * delim need not remain constant from call to call. - * On return, *stringp points past the last NUL written (if there might - * be further tokens), or is NULL (if there are definitely no more tokens). - * - * If *stringp is NULL, strsep returns NULL. - */ -static char * -__db_strsep(stringp, delim) - char **stringp; - const char *delim; -{ - const char *spanp; - int c, sc; - char *s, *tok; - - if ((s = *stringp) == NULL) - return (NULL); - for (tok = s;;) { - c = *s++; - spanp = delim; - do { - if ((sc = *spanp++) == c) { - if (c == 0) - s = NULL; - else - s[-1] = 0; - *stringp = s; - return (tok); - } - } while (sc != 0); - } - /* NOTREACHED */ -} diff --git a/storage/bdb/common/util_cache.c b/storage/bdb/common/util_cache.c deleted file mode 100644 index 34ff5ff008f..00000000000 --- a/storage/bdb/common/util_cache.c +++ /dev/null @@ -1,56 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: util_cache.c,v 12.1 2005/06/16 20:20:54 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include - -#include -#endif - -#include "db_int.h" - -/* - * __db_util_cache -- - * Compute if we have enough cache. - * - * PUBLIC: int __db_util_cache __P((DB *, u_int32_t *, int *)); - */ -int -__db_util_cache(dbp, cachep, resizep) - DB *dbp; - u_int32_t *cachep; - int *resizep; -{ - u_int32_t pgsize; - int ret; - - /* Get the current page size. */ - if ((ret = dbp->get_pagesize(dbp, &pgsize)) != 0) - return (ret); - - /* - * The current cache size is in cachep. If it's insufficient, set the - * the memory referenced by resizep to 1 and set cachep to the minimum - * size needed. - * - * Make sure our current cache is big enough. We want at least - * DB_MINPAGECACHE pages in the cache. - */ - if ((*cachep / pgsize) < DB_MINPAGECACHE) { - *resizep = 1; - *cachep = pgsize * DB_MINPAGECACHE; - } else - *resizep = 0; - - return (0); -} diff --git a/storage/bdb/common/util_log.c b/storage/bdb/common/util_log.c deleted file mode 100644 index 5c46d6b2d4c..00000000000 --- a/storage/bdb/common/util_log.c +++ /dev/null @@ -1,63 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: util_log.c,v 12.4 2005/10/12 17:47:17 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#endif - -#include "db_int.h" - -/* - * __db_util_logset -- - * Log that we're running. - * - * PUBLIC: int __db_util_logset __P((const char *, char *)); - */ -int -__db_util_logset(progname, fname) - const char *progname; - char *fname; -{ - pid_t pid; - db_threadid_t tid; - FILE *fp; - time_t now; - - if ((fp = fopen(fname, "w")) == NULL) - goto err; - - (void)time(&now); - - __os_id(NULL, &pid, &tid); - fprintf(fp, "%s: %lu %s", progname, (u_long)pid, ctime(&now)); - - if (fclose(fp) == EOF) - goto err; - - return (0); - -err: fprintf(stderr, "%s: %s: %s\n", progname, fname, strerror(errno)); - return (1); -} diff --git a/storage/bdb/common/util_sig.c b/storage/bdb/common/util_sig.c deleted file mode 100644 index 3561173166f..00000000000 --- a/storage/bdb/common/util_sig.c +++ /dev/null @@ -1,84 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: util_sig.c,v 12.1 2005/06/16 20:20:55 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" - -static int interrupt; -static void onint __P((int)); - -/* - * onint -- - * Interrupt signal handler. - */ -static void -onint(signo) - int signo; -{ - if ((interrupt = signo) == 0) - interrupt = SIGINT; -} - -/* - * __db_util_siginit -- - * - * PUBLIC: void __db_util_siginit __P((void)); - */ -void -__db_util_siginit() -{ - /* - * Initialize the set of signals for which we want to clean up. - * Generally, we try not to leave the shared regions locked if - * we can. - */ -#ifdef SIGHUP - (void)signal(SIGHUP, onint); -#endif - (void)signal(SIGINT, onint); -#ifdef SIGPIPE - (void)signal(SIGPIPE, onint); -#endif - (void)signal(SIGTERM, onint); -} - -/* - * __db_util_interrupted -- - * Return if interrupted. - * - * PUBLIC: int __db_util_interrupted __P((void)); - */ -int -__db_util_interrupted() -{ - return (interrupt != 0); -} - -/* - * __db_util_sigresend -- - * - * PUBLIC: void __db_util_sigresend __P((void)); - */ -void -__db_util_sigresend() -{ - /* Resend any caught signal. */ - if (interrupt != 0) { - (void)signal(interrupt, SIG_DFL); - (void)raise(interrupt); - /* NOTREACHED */ - } -} diff --git a/storage/bdb/crypto/aes_method.c b/storage/bdb/crypto/aes_method.c deleted file mode 100644 index f77616f3c35..00000000000 --- a/storage/bdb/crypto/aes_method.c +++ /dev/null @@ -1,273 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * Some parts of this code originally written by Adam Stubblefield, - * -- astubble@rice.edu. - * - * $Id: aes_method.c,v 12.1 2005/06/16 20:20:55 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/crypto.h" -#include "dbinc/hmac.h" - -static void __aes_err __P((DB_ENV *, int)); -static int __aes_derivekeys __P((DB_ENV *, DB_CIPHER *, u_int8_t *, size_t)); - -/* - * __aes_setup -- - * Setup AES functions. - * - * PUBLIC: int __aes_setup __P((DB_ENV *, DB_CIPHER *)); - */ -int -__aes_setup(dbenv, db_cipher) - DB_ENV *dbenv; - DB_CIPHER *db_cipher; -{ - AES_CIPHER *aes_cipher; - int ret; - - db_cipher->adj_size = __aes_adj_size; - db_cipher->close = __aes_close; - db_cipher->decrypt = __aes_decrypt; - db_cipher->encrypt = __aes_encrypt; - db_cipher->init = __aes_init; - if ((ret = __os_calloc(dbenv, 1, sizeof(AES_CIPHER), &aes_cipher)) != 0) - return (ret); - db_cipher->data = aes_cipher; - return (0); -} - -/* - * __aes_adj_size -- - * Given a size, return an addition amount needed to meet the - * "chunk" needs of the algorithm. - * - * PUBLIC: u_int __aes_adj_size __P((size_t)); - */ -u_int -__aes_adj_size(len) - size_t len; -{ - if (len % DB_AES_CHUNK == 0) - return (0); - return (DB_AES_CHUNK - (u_int)(len % DB_AES_CHUNK)); -} - -/* - * __aes_close -- - * Destroy the AES encryption instantiation. - * - * PUBLIC: int __aes_close __P((DB_ENV *, void *)); - */ -int -__aes_close(dbenv, data) - DB_ENV *dbenv; - void *data; -{ - __os_free(dbenv, data); - return (0); -} - -/* - * __aes_decrypt -- - * Decrypt data with AES. - * - * PUBLIC: int __aes_decrypt __P((DB_ENV *, void *, void *, - * PUBLIC: u_int8_t *, size_t)); - */ -int -__aes_decrypt(dbenv, aes_data, iv, cipher, cipher_len) - DB_ENV *dbenv; - void *aes_data; - void *iv; - u_int8_t *cipher; - size_t cipher_len; -{ - AES_CIPHER *aes; - cipherInstance c; - int ret; - - aes = (AES_CIPHER *)aes_data; - if (iv == NULL || cipher == NULL) - return (EINVAL); - if ((cipher_len % DB_AES_CHUNK) != 0) - return (EINVAL); - /* - * Initialize the cipher - */ - if ((ret = __db_cipherInit(&c, MODE_CBC, iv)) < 0) { - __aes_err(dbenv, ret); - return (EAGAIN); - } - - /* Do the decryption */ - if ((ret = __db_blockDecrypt(&c, &aes->decrypt_ki, cipher, - cipher_len * 8, cipher)) < 0) { - __aes_err(dbenv, ret); - return (EAGAIN); - } - return (0); -} - -/* - * __aes_encrypt -- - * Encrypt data with AES. - * - * PUBLIC: int __aes_encrypt __P((DB_ENV *, void *, void *, - * PUBLIC: u_int8_t *, size_t)); - */ -int -__aes_encrypt(dbenv, aes_data, iv, data, data_len) - DB_ENV *dbenv; - void *aes_data; - void *iv; - u_int8_t *data; - size_t data_len; -{ - AES_CIPHER *aes; - cipherInstance c; - u_int32_t tmp_iv[DB_IV_BYTES/4]; - int ret; - - aes = (AES_CIPHER *)aes_data; - if (aes == NULL || data == NULL) - return (EINVAL); - if ((data_len % DB_AES_CHUNK) != 0) - return (EINVAL); - /* - * Generate the IV here. We store it in a tmp IV because - * the IV might be stored within the data we are encrypting - * and so we will copy it over to the given location after - * encryption is done. - * We don't do this outside of there because some encryption - * algorithms someone might add may not use IV's and we always - * want on here. - */ - if ((ret = __db_generate_iv(dbenv, tmp_iv)) != 0) - return (ret); - - /* - * Initialize the cipher - */ - if ((ret = __db_cipherInit(&c, MODE_CBC, (char *)tmp_iv)) < 0) { - __aes_err(dbenv, ret); - return (EAGAIN); - } - - /* Do the encryption */ - if ((ret = __db_blockEncrypt(&c, &aes->encrypt_ki, data, data_len * 8, - data)) < 0) { - __aes_err(dbenv, ret); - return (EAGAIN); - } - memcpy(iv, tmp_iv, DB_IV_BYTES); - return (0); -} - -/* - * __aes_init -- - * Initialize the AES encryption instantiation. - * - * PUBLIC: int __aes_init __P((DB_ENV *, DB_CIPHER *)); - */ -int -__aes_init(dbenv, db_cipher) - DB_ENV *dbenv; - DB_CIPHER *db_cipher; -{ - return (__aes_derivekeys(dbenv, db_cipher, (u_int8_t *)dbenv->passwd, - dbenv->passwd_len)); -} - -static int -__aes_derivekeys(dbenv, db_cipher, passwd, plen) - DB_ENV *dbenv; - DB_CIPHER *db_cipher; - u_int8_t *passwd; - size_t plen; -{ - SHA1_CTX ctx; - AES_CIPHER *aes; - int ret; - u_int32_t temp[DB_MAC_KEY/4]; - - if (passwd == NULL) - return (EINVAL); - - aes = (AES_CIPHER *)db_cipher->data; - - /* Derive the crypto keys */ - __db_SHA1Init(&ctx); - __db_SHA1Update(&ctx, passwd, plen); - __db_SHA1Update(&ctx, (u_int8_t *)DB_ENC_MAGIC, strlen(DB_ENC_MAGIC)); - __db_SHA1Update(&ctx, passwd, plen); - __db_SHA1Final((u_int8_t *)temp, &ctx); - - if ((ret = __db_makeKey(&aes->encrypt_ki, DIR_ENCRYPT, - DB_AES_KEYLEN, (char *)temp)) != TRUE) { - __aes_err(dbenv, ret); - return (EAGAIN); - } - if ((ret = __db_makeKey(&aes->decrypt_ki, DIR_DECRYPT, - DB_AES_KEYLEN, (char *)temp)) != TRUE) { - __aes_err(dbenv, ret); - return (EAGAIN); - } - return (0); -} - -/* - * __aes_err -- - * Handle AES-specific errors. Codes and messages derived from - * rijndael/rijndael-api-fst.h. - */ -static void -__aes_err(dbenv, err) - DB_ENV *dbenv; - int err; -{ - char *errstr; - - switch (err) { - case BAD_KEY_DIR: - errstr = "AES key direction is invalid"; - break; - case BAD_KEY_MAT: - errstr = "AES key material not of correct length"; - break; - case BAD_KEY_INSTANCE: - errstr = "AES key passwd not valid"; - break; - case BAD_CIPHER_MODE: - errstr = "AES cipher in wrong state (not initialized)"; - break; - case BAD_BLOCK_LENGTH: - errstr = "AES bad block length"; - break; - case BAD_CIPHER_INSTANCE: - errstr = "AES cipher instance is invalid"; - break; - case BAD_DATA: - errstr = "AES data contents are invalid"; - break; - case BAD_OTHER: - errstr = "AES unknown error"; - break; - default: - errstr = "AES error unrecognized"; - break; - } - __db_err(dbenv, errstr); - return; -} diff --git a/storage/bdb/crypto/crypto.c b/storage/bdb/crypto/crypto.c deleted file mode 100644 index 63dea986fe6..00000000000 --- a/storage/bdb/crypto/crypto.c +++ /dev/null @@ -1,394 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * Some parts of this code originally written by Adam Stubblefield - * -- astubble@rice.edu - * - * $Id: crypto.c,v 12.5 2005/07/20 16:50:56 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/crypto.h" - -/* - * __crypto_region_init -- - * Initialize crypto. - */ -int -__crypto_region_init(dbenv) - DB_ENV *dbenv; -{ - REGENV *renv; - REGINFO *infop; - CIPHER *cipher; - DB_CIPHER *db_cipher; - char *sh_passwd; - int ret; - - db_cipher = dbenv->crypto_handle; - - ret = 0; - infop = dbenv->reginfo; - renv = infop->primary; - if (renv->cipher_off == INVALID_ROFF) { - if (!CRYPTO_ON(dbenv)) - return (0); - if (!F_ISSET(infop, REGION_CREATE)) { - __db_err(dbenv, - "Joining non-encrypted environment with encryption key"); - return (EINVAL); - } - if (F_ISSET(db_cipher, CIPHER_ANY)) { - __db_err(dbenv, "Encryption algorithm not supplied"); - return (EINVAL); - } - /* - * Must create the shared information. We need: Shared cipher - * information that contains the passwd. After we copy the - * passwd, we smash and free the one in the dbenv. - */ - if ((ret = - __db_shalloc(infop, sizeof(CIPHER), 0, &cipher)) != 0) - return (ret); - memset(cipher, 0, sizeof(*cipher)); - if ((ret = __db_shalloc( - infop, dbenv->passwd_len, 0, &sh_passwd)) != 0) { - __db_shalloc_free(infop, cipher); - return (ret); - } - memset(sh_passwd, 0, dbenv->passwd_len); - cipher->passwd = R_OFFSET(infop, sh_passwd); - cipher->passwd_len = dbenv->passwd_len; - cipher->flags = db_cipher->alg; - memcpy(sh_passwd, dbenv->passwd, cipher->passwd_len); - renv->cipher_off = R_OFFSET(infop, cipher); - } else { - if (!CRYPTO_ON(dbenv)) { - __db_err(dbenv, - "Encrypted environment: no encryption key supplied"); - return (EINVAL); - } - cipher = R_ADDR(infop, renv->cipher_off); - sh_passwd = R_ADDR(infop, cipher->passwd); - if ((cipher->passwd_len != dbenv->passwd_len) || - memcmp(dbenv->passwd, sh_passwd, cipher->passwd_len) != 0) { - __db_err(dbenv, "Invalid password"); - return (EPERM); - } - if (!F_ISSET(db_cipher, CIPHER_ANY) && - db_cipher->alg != cipher->flags) { - __db_err(dbenv, - "Environment encrypted using a different algorithm"); - return (EINVAL); - } - if (F_ISSET(db_cipher, CIPHER_ANY)) - /* - * We have CIPHER_ANY and we are joining the existing - * env. Setup our cipher structure for whatever - * algorithm this env has. - */ - if ((ret = __crypto_algsetup(dbenv, db_cipher, - cipher->flags, 0)) != 0) - return (ret); - } - ret = db_cipher->init(dbenv, db_cipher); - - /* - * On success, no matter if we allocated it or are using the already - * existing one, we are done with the passwd in the dbenv. We smash - * N-1 bytes so that we don't overwrite the nul. - */ - memset(dbenv->passwd, 0xff, dbenv->passwd_len-1); - __os_free(dbenv, dbenv->passwd); - dbenv->passwd = NULL; - dbenv->passwd_len = 0; - - return (ret); -} - -/* - * __crypto_dbenv_close -- - * Crypto-specific destruction of DB_ENV structure. - * - * PUBLIC: int __crypto_dbenv_close __P((DB_ENV *)); - */ -int -__crypto_dbenv_close(dbenv) - DB_ENV *dbenv; -{ - DB_CIPHER *db_cipher; - int ret; - - ret = 0; - db_cipher = dbenv->crypto_handle; - if (dbenv->passwd != NULL) { - memset(dbenv->passwd, 0xff, dbenv->passwd_len-1); - __os_free(dbenv, dbenv->passwd); - dbenv->passwd = NULL; - } - if (!CRYPTO_ON(dbenv)) - return (0); - if (!F_ISSET(db_cipher, CIPHER_ANY)) - ret = db_cipher->close(dbenv, db_cipher->data); - __os_free(dbenv, db_cipher); - return (ret); -} - -/* - * __crypto_region_destroy -- - * Destroy any system resources allocated in the primary region. - * - * PUBLIC: int __crypto_region_destroy __P((DB_ENV *)); - */ -int -__crypto_region_destroy(dbenv) - DB_ENV *dbenv; -{ - CIPHER *cipher; - REGENV *renv; - REGINFO *infop; - - infop = dbenv->reginfo; - renv = infop->primary; - if (renv->cipher_off != INVALID_ROFF) { - cipher = R_ADDR(infop, renv->cipher_off); - __db_shalloc_free(infop, R_ADDR(infop, cipher->passwd)); - __db_shalloc_free(infop, cipher); - } - return (0); -} - -/* - * __crypto_algsetup -- - * Given a db_cipher structure and a valid algorithm flag, call - * the specific algorithm setup function. - * - * PUBLIC: int __crypto_algsetup __P((DB_ENV *, DB_CIPHER *, u_int32_t, int)); - */ -int -__crypto_algsetup(dbenv, db_cipher, alg, do_init) - DB_ENV *dbenv; - DB_CIPHER *db_cipher; - u_int32_t alg; - int do_init; -{ - int ret; - - ret = 0; - if (!CRYPTO_ON(dbenv)) { - __db_err(dbenv, "No cipher structure given"); - return (EINVAL); - } - F_CLR(db_cipher, CIPHER_ANY); - switch (alg) { - case CIPHER_AES: - db_cipher->alg = CIPHER_AES; - ret = __aes_setup(dbenv, db_cipher); - break; - default: - __db_panic(dbenv, EINVAL); - /* NOTREACHED */ - } - if (do_init) - ret = db_cipher->init(dbenv, db_cipher); - return (ret); -} - -/* - * __crypto_decrypt_meta -- - * Perform decryption on a metapage if needed. - * - * PUBLIC: int __crypto_decrypt_meta __P((DB_ENV *, DB *, u_int8_t *, int)); - */ -int -__crypto_decrypt_meta(dbenv, dbp, mbuf, do_metachk) - DB_ENV *dbenv; - DB *dbp; - u_int8_t *mbuf; - int do_metachk; -{ - DB_CIPHER *db_cipher; - DB dummydb; - DBMETA *meta; - size_t pg_off; - int ret; - u_int8_t *iv; - - /* - * If we weren't given a dbp, we just want to decrypt the page on - * behalf of some internal subsystem, not on behalf of a user with - * a dbp. Therefore, set up a dummy dbp so that the call to - * P_OVERHEAD below works. - */ - if (dbp == NULL) { - memset(&dummydb, 0, sizeof(DB)); - dbp = &dummydb; - } - - ret = 0; - meta = (DBMETA *)mbuf; - - /* - * !!! - * We used an "unused" field in the meta-data page to flag whether or - * not the database is encrypted. Unfortunately, that unused field - * was used in Berkeley DB releases before 3.0 (for example, 2.7.7). - * It would have been OK, except encryption doesn't follow the usual - * rules of "upgrade before doing anything else", we check encryption - * before checking for old versions of the database. - * - * We don't have to check Btree databases -- before 3.0, the field of - * interest was the bt_maxkey field (which was never supported and has - * since been removed). - * - * Ugly check to jump out if this format is older than what we support. - * It assumes no encrypted page will have an unencrypted magic number, - * but that seems relatively safe. [#10920] - */ - if (meta->magic == DB_HASHMAGIC && meta->version <= 5) - return (0); - - /* - * Meta-pages may be encrypted for DBMETASIZE bytes. If we have a - * non-zero IV (that is written after encryption) then we decrypt (or - * error if the user isn't set up for security). We guarantee that - * the IV space on non-encrypted pages will be zero and a zero-IV is - * illegal for encryption. Therefore any non-zero IV means an - * encrypted database. This basically checks the passwd on the file - * if we cannot find a good magic number. We walk through all the - * algorithms we know about attempting to decrypt (and possibly - * byteswap). - * - * !!! - * All method meta pages have the IV and checksum at the exact same - * location, but not in DBMETA, use BTMETA. - */ - if (meta->encrypt_alg != 0) { - db_cipher = (DB_CIPHER *)dbenv->crypto_handle; - if (!F_ISSET(dbp, DB_AM_ENCRYPT)) { - if (!CRYPTO_ON(dbenv)) { - __db_err(dbenv, - "Encrypted database: no encryption flag specified"); - return (EINVAL); - } - /* - * User has a correct, secure env, but has encountered - * a database in that env that is secure, but user - * didn't dbp->set_flags. Since it is existing, use - * encryption if it is that way already. - */ - F_SET(dbp, DB_AM_ENCRYPT|DB_AM_CHKSUM); - } - /* - * This was checked in set_flags when DB_AM_ENCRYPT was set. - * So it better still be true here. - */ - DB_ASSERT(CRYPTO_ON(dbenv)); - if (!F_ISSET(db_cipher, CIPHER_ANY) && - meta->encrypt_alg != db_cipher->alg) { - __db_err(dbenv, - "Database encrypted using a different algorithm"); - return (EINVAL); - } - DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM)); - iv = ((BTMETA *)mbuf)->iv; - /* - * For ALL pages, we do not encrypt the beginning of the page - * that contains overhead information. This is true of meta - * and all other pages. - */ - pg_off = P_OVERHEAD(dbp); -alg_retry: - /* - * If they asked for a specific algorithm, then - * use it. Otherwise walk through those we know. - */ - if (!F_ISSET(db_cipher, CIPHER_ANY)) { - if (do_metachk && (ret = db_cipher->decrypt(dbenv, - db_cipher->data, iv, mbuf + pg_off, - DBMETASIZE - pg_off))) - return (ret); - if (((BTMETA *)meta)->crypto_magic != - meta->magic) { - __db_err(dbenv, "Invalid password"); - return (EINVAL); - } - /* - * Success here. The algorithm asked for and the one - * on the file match. We've just decrypted the meta - * page and checked the magic numbers. They match, - * indicating the password is right. All is right - * with the world. - */ - return (0); - } - /* - * If we get here, CIPHER_ANY must be set. - */ - ret = __crypto_algsetup(dbenv, db_cipher, meta->encrypt_alg, 1); - goto alg_retry; - } else if (F_ISSET(dbp, DB_AM_ENCRYPT)) { - /* - * They gave us a passwd, but the database is not encrypted. - * This is an error. We do NOT want to silently allow them - * to write data in the clear when the user set up and expects - * encrypted data. - * - * This covers at least the following scenario. - * 1. User creates and sets up an encrypted database. - * 2. Attacker cannot read the actual data in the database - * because it is encrypted, but can remove/replace the file - * with an empty, unencrypted database file. - * 3. User sets encryption and we get to this code now. - * If we allowed the file to be used in the clear since - * it is that way on disk, the user would unsuspectingly - * write sensitive data in the clear. - * 4. Attacker reads data that user thought was encrypted. - * - * Therefore, asking for encryption with a database that - * was not encrypted is an error. - */ - __db_err(dbenv, - "Unencrypted database with a supplied encryption key"); - return (EINVAL); - } - return (ret); -} - -/* - * __crypto_set_passwd -- - * Get the password from the shared region; and set it in a new - * environment handle. Use this to duplicate environment handles. - * - * PUBLIC: int __crypto_set_passwd __P((DB_ENV *, DB_ENV *)); - */ -int -__crypto_set_passwd(dbenv_src, dbenv_dest) - DB_ENV *dbenv_src, *dbenv_dest; -{ - CIPHER *cipher; - REGENV *renv; - REGINFO *infop; - char *sh_passwd; - int ret; - - ret = 0; - infop = dbenv_src->reginfo; - renv = infop->primary; - - DB_ASSERT(CRYPTO_ON(dbenv_src)); - - cipher = R_ADDR(infop, renv->cipher_off); - sh_passwd = R_ADDR(infop, cipher->passwd); - return (__env_set_encrypt(dbenv_dest, sh_passwd, DB_ENCRYPT_AES)); -} diff --git a/storage/bdb/crypto/crypto.html b/storage/bdb/crypto/crypto.html deleted file mode 100644 index 7d2804b43b4..00000000000 --- a/storage/bdb/crypto/crypto.html +++ /dev/null @@ -1,639 +0,0 @@ - - - - - - - - -
-

- Security Interface for Berkeley DB

- -
Susan LoVerso -
sue@sleepycat.com -
Rev 1.6 -
2002 Feb 26
- -

We provide an interface allowing secure access to Berkeley DB.   -Our goal is to allow users to have encrypted secure databases.  In -this document, the term ciphering means the act of encryption or -decryption.  They are equal but opposite actions and the same issues -apply to both just in the opposite direction. -

-Requirements

-The overriding requirement is to provide a simple mechanism to allow users -to have a secure database.  A secure database means that all of the -pages of a database will be encrypted, and all of the log files will be -encrypted. -

Falling out from this work will be a simple mechanism to allow users -to request that we checksum their data for additional error detection (without -encryption/decryption). -

We expect that data in process memory or stored in shared memory, potentially -backed by disk, is not encrypted or secure. -

-DB Method Interface Modifications

-With a logging environment, all database changes are recorded in the log -files.  Therefore, users requiring secure databases in such environments -also require secure log files. -

A prior thought had been to allow different passwords on the environment -and the databases within.  However, such a scheme, then requires that -the password be logged in order for recovery to be able to restore the -database.  Therefore, any application having the password for the -log could get the password for any databases by reading the log.  -So having a different password on a database does not gain any additional -security and it makes certain things harder and more complex.  Some -of those more complex things include the need to handle database and env -passwords differently since they'd need to be stored and accessed from -different places.  Also resolving the issue of how db_checkpoint -or db_sync, which flush database pages to disk, would find the passwords -of various databases without any dbps was unsolved.  The feature didn't -gain anything and caused significant pain.  Therefore the decision -is that there will be a single password protecting an environment and all -the logs and some databases within that environment.  We do allow -users to have a secure environment and clear databases.  Users that -want secure databases within a secure environment must set a flag. -

Users wishing to enable encryption on a database in a secure environment -or enable just checksumming on their database pages will use new flags -to DB->set_flags().  -Providing ciphering over an entire environment is accomplished by adding -a single environment method: DBENV->set_encrypt().  -Providing encryption for a database (not part of an environment) is accomplished -by adding a new database method: DB->set_encrypt(). -

Both of the set_encrypt methods must be called before their respective -open calls.  The environment method must be before the environment -open because we must know about security before there is any possibility -of writing any log records out.  The database method must be before -the database open in order to read the root page.  The planned interfaces -for these methods are: -

DBENV->set_encrypt(DBENV *dbenv,        /* DB_ENV structure */
-                  char *passwd          /* Password */
-                  u_int32_t flags);     /* Flags */
- -
DB->set_encrypt(DB *dbp,             /* DB structure */
-               char *passwd          /* Password */
-               u_int32_t flags);     /* Flags */
-The flags accepted by these functions are: -
#define DB_ENCRYPT_AES  0x00000001  /* Use the AES encryption algorithm */
-Passwords are NULL-terminated strings.  NULL or zero length strings -are illegal.  These flags enable the checksumming and encryption using -the particular algorithms we have chosen for this implementation.  -The flags are named such that there is a logical naming pattern if additional -checksum or encryption algorithms are used. If a user gives a flag of zero, -it will behave in a manner similar to DB_UNKNOWN. It will be illegal if -they are creating the environment or database, as an algorithm must be -specified. If they are joining an existing environment or opening an existing -database, they will use whatever algorithm is in force at the time.  -Using DB_ENCRYPT_AES automatically implies SHA1 checksumming. -

These functions will perform several initialization steps.  We -will allocate crypto_handle for our env handle and set up our function -pointers.  We will allocate space and copy the password into our env -handle password area.  Similar to DB->set_cachesize, calling -DB->set_encrypt -will actually reflect back into the local environment created by DB. -

Lastly, we will add a new flag, DB_OVERWRITE, to the DBENV->remove -method.  The purpose of this flag is to force all of the memory used -by the shared regions to be overwritten before removal.  We will use -rm_overwrite, -a function that overwrites and syncs a file 3 times with varying bit patterns -to really remove a file.  Additionally, this flag will force a sync -of the overwritten regions to disk, if the regions are backed by the file -system.  That way there is no residual information left in the clear -in memory or freed disk blocks.  Although we expect that this flag -will be used by customers using security, primarily, its action is not -dependent on passwords or a secure setup, and so can be used by anyone. -

-Initialization of the Environment

-The setup of the security subsystem will be similar to replication initialization -since it is a sort of subsystem, but it does not have its own region.  -When the environment handle is created via db_env_create, we initialize -our set_encrypt method to be the RPC or local version.  Therefore -the DB_ENV structure needs a new pointer: -
    void    *crypto_handle;   /* Security handle */
-The crypto handle will really point to a new __db_cipher structure -that will contain a set of functions and a pointer to the in-memory information -needed by the specific encryption algorithm.  It will look like: -
typedef struct __db_cipher {
-    int      (*init)__P((...));    /* Alg-specific initialization function */
-    int      (*encrypt)__P((...)); /* Alg-specific encryption algorithm */
-    int      (*decrypt)__P((...)); /* Alg-specific decryption function */
-    void      *data;               /* Pointer to alg-specific information (AES_CIPHER) */
-    u_int32_t flags;               /* Cipher flags */
-} DB_CIPHER;
- -
#define DB_MAC_KEY    20    /* Size of the MAC key */
-typedef struct __aes_cipher {
-    keyInstance    encrypt_ki;   /* Encrypt keyInstance temp. */
-    keyInstance    decrypt_ki;   /* Decrypt keyInstance temp. */
-    u_int8_t       mac_key[DB_MAC_KEY]; /* MAC key */
-    u_int32_t      flags;        /* AES-specific flags */
-} AES_CIPHER;
-It should be noted that none of these structures have their own mutex.  -We hold the environment region locked while we are creating this, but once -this is set up, it is read-only forever. -

During dbenv->set_encrypt, -we set the encryption, decryption and checksumming methods to the appropriate -functions based on the flags.  This function will allocate us a crypto -handle that we store in the DB_ENV structure just like all the -other subsystems.  For now, only AES ciphering functions and SHA1 -checksumming functions are supported.  Also we will copy the password -into the DB_ENV structure.  We ultimately need to keep the -password in the environment's shared memory region or compare this one -against the one that is there, if we are joining an existing environment, -but we do not have it yet because open has not yet been called.  We -will allocate a structure that will be used in initialization and set up -the function pointers to point to the algorithm-specific functions. -

In the  __env_open path, in __db_e_attach, if we -are creating the region and the dbenv->passwd field is set, we need -to use the length of the password in the initial computation of the environment's -size.  This guarantees sufficient space for storing the password in -shared memory.  Then we will call a new function to initialize the -security region, __crypto_region_init in __env_open.  -If we are the creator, we will allocate space in the shared region to store -the password and copy the password into that space.  Or, if we are -not the creator we will compare the password stored in the dbenv with the -one in shared memory.   Additionally, we will compare the ciphering -algorithm to the one stored in the shared region.We'll smash the dbenv -password and free it.  If they do not match, we return an error.  -If we are the creator we store the offset into the REGENV structure.  -Then __crypto_region_init  will call the initialization function -set up earlier based on the ciphering algorithm specified.  For now -we will call __aes_init.  Additionally this function will allocate -and set up the per-process state vector for this encryption's IVs.  -See Generating the Initialization -Vector for a detailed description of the IV and state vector. -

In the AES-specific initialization function, __aes_init,  -we will initialize it by calling -__aes_derivekeys in order to fill -in the keyInstance and mac_key fields in that structure.  The REGENV -structure will have one additional item -

   roff_t         passwd_off;   /* Offset of passwd */
- -

-Initializing a Database

-During db->set_encrypt, -we set the encryption, decryption and checksumming methods to the appropriate -functions based on the flags.  Basically, we test that we are not -in an existing environment and we haven't called open.  Then we just -call through the environment handle to set the password. -

Also, we will need to add a flag in the database meta-data page that -indicates that the database is encrypted and what its algorithm is.  -This will be used when the meta-page is read after reopening a file. We -need this information on the meta-page in order to detect a user opening -a secure database without a password.  I propose using the first unused1 -byte (renaming it too) in the meta page for this purpose. -

All pages will not be encrypted for the first 64 bytes of data.  -Database meta-pages will be encrypted on the first 512 bytes only.  -All meta-page types will have an IV and checksum added within the first -512 bytes as well as a crypto magic number.  This will expand the -size of the meta-page from 256 bytes to 512 bytes. The page in/out routines, -__db_pgin and __db_pgout know the page type of the page and -will apply the 512 bytes ciphering to meta pages.  In __db_pgout, -if we have a crypto handle in our (private) environment, we will apply -ciphering to either the entire page, or the first 512 bytes if it is a -meta-page.  In __db_pgin, we will decrypt if the page we have -a crypto handle. -

When multiple processes share a database, all must use the same password -as the database creator. Using an existing database requires several conditions -to be true.  First, if the creator of the database did not create -with security, then opening later with security is an error.  Second, -if the creator did create it with security, then opening later without -security is an error.  Third, we need to be able to test and check -that when another process opens a secure database that the password they -provided is the same as the one in use by the creator. -

When reading the meta-page, in __db_file_setup, we do not go -through the paging functions, but directly read via __os_read.  -It is at this point that we will determine if the user is configured correctly.  -If the meta-page we read has an IV and checksum, they better have a crypto -handle.  If they have a crypto handle, then the meta-page must have -an IV and checksum.  If both of those are true, we test the password.  -We compare the unencrypted magic number to the newly-decrypted crypto magic -number and if they are not the same, then we report that the user gave -us a bad password. -

On a mostly unrelated topic, even when we go to very large pagesizes, -the meta information will still be within a disk sector.  So, after -talking it over with Keith and Margo, we determined that unencrypted meta-pages -still will not need a checksum. -

-Encryption and Checksum Routines

-These routines are provided to us by Adam Stubblefield at Rice University -(astubble@rice.edu).  The functional interfaces are: -
__aes_derivekeys(DB_ENV *dbenv,           /* dbenv */
-                 u_int8_t *passwd,           /* Password */
-                 size_t passwd_len,          /* Length of passwd */
-                 u_int8_t *mac_key,          /* 20 byte array to store MAC key */
-                 keyInstance *encrypt_key,   /* Encryption key of passwd */
-                 keyInstance *decrypt_key);  /* Decryption key of passwd */
-This is the only function requiring the textual user password.  From -the password, this function generates a key used in the checksum function, -__db_chksum.  -It also fills in keyInstance structures which are then used in the -encryption and decryption routines.  The keyInstance structures must -already be allocated.  These will be stored in the AES_CIPHER structure. -
 __db_chksum(u_int8_t *data,    /* Data to checksum */
-          size_t data_len,      /* Length of data */
-          u_int8_t *mac_key,    /* 20 byte array from __db_derive_keys */
-          u_int8_t *checksum);  /* 20 byte array to store checksum */
-This function generates a checksum on the data given.  This function -will do double-duty for users that simply want error detection on their -pages.  When users are using encryption, the mac_key will contain -the 20-byte key set up in __aes_derivekeys.  If they just want -checksumming, then mac_key will be NULL.  According to Adam, -we can safely use the first N-bytes of the checksum.  So for seeding -the generator for initialization vectors, we'll hash the time and then -send in the first 4 bytes for the seed.  I believe we can probably -do the same thing for checksumming log records.  We can only use 4 -bytes for the checksum in the non-secure case.  So when we want to -verify the log checksum we can compute the mac but just compare the first -4 bytes to the one we read.  All locations where we generate or check -log record checksums that currently call __ham_func4 will now call -__db_chksum.  -I believe there are 5 such locations, -__log_put, __log_putr, __log_newfile, -__log_rep_put -and __txn_force_abort. -
__aes_encrypt(DB_ENV *dbenv,        /* dbenv */
-             keyInstance *key,      /* Password key instance from __db_derive_keys */
-             u_int8_t *iv,          /* Initialization vector */
-             u_int8_t *data,        /* Data to encrypt */
-             size_t data_len);      /* Length of data to encrypt - 16 byte multiple */
-This is the function to encrypt data.  It will be called to encrypt -pages and log records.  The key instance is initialized in -__aes_derivekeys.  -The initialization vector, iv, is the 16 byte random value set up -by the Mersenne Twister pseudo-random generator.  Lastly, we pass -in a pointer to the data to encrypt and its length in data_len.  -The data_len must be a multiple of 16 bytes. The encryption is done -in-place so that when the encryption code returns our encrypted data is -in the same location as the original data. -
__aes_decrypt(DB_ENV *dbenv,    /* dbenv */
-             keyInstance *key,  /* Password key instance from __db_derive_keys */
-             u_int8_t *iv,      /* Initialization vector */
-             u_int8_t *data,    /* Data to decrypt */
-             size_t data_len);  /* Length of data to decrypt - 16 byte multiple */
-This is the function to decrypt the data.  It is exactly the same -as the encryption function except for the action it performs.  All -of the args and issues are the same.  It also decrypts in place. -

-Generating the Initialization -Vector

-Internally, we need to provide a unique initialization vector (IV) of 16 -bytes every time we encrypt any data with the same password.  For -the IV we are planning on using mt19937, the Mersenne Twister, a random -number generator that has a period of 2**19937-1. This package can be found -at http://www.math.keio.ac.jp/~matumoto/emt.html.  -Tests show that although it repeats a single integer every once in a while, -that after several million iterations, it doesn't repeat any 4 integers -that we'd be stuffing into our 16-byte IV.  We plan on seeding this -generator with the time (tv_sec) hashed through SHA1 when we create the -environment.  This package uses a global state vector that contains -624 unsigned long integers.  We do not allow a 16-byte IV of zero.  -It is simpler just to reject any 4-byte value of 0 and if we get one, just -call the generator again and get a different number.  We need to detect -holes in files and if we read an IV of zero that is a simple indication -that we need to check for an entire page of zero.  The IVs are stored -on the page after encryption and are not encrypted themselves so it is -not possible for an entire encrypted page to be read as all zeroes, unless -it was a hole in a file.  See Holes in Files -for more details. -

We will not be holding any locks when we need to generate our IV but -we need to protect access to the state vector and the index.  Calls -to the MT code will come while encrypting some data in __aes_encrypt.   -The MT code will assume that all necessary locks are held in the caller.  -We will have per-process state vectors that are set up when a process begins.  -That way we minimize the contention and only multi-threaded processes need -acquire locks for the IV.  We will have the state vector in the environment -handle in heap memory, as well as the index and there will be a mutex protecting -it for threaded access.  This will be added to the DB_ENV -structure: -

    DB_MUTEX    *mt_mutexp;   /* Mersenne Twister mutex */
-    int         *mti;         /* MT index */
-    u_long      *mt;          /* MT state vector */
-This portion of the environment will be initialized at the end of __dbenv_open, -right after we initialize the other mutex for the dblist. When we -allocate the space, we will generate our initial state vector. If we are -multi-threaded we'll allocate and initialize our mutex also. -

We need to make changes to the MT code to make it work in our namespace -and  to take  a pointer to the location of the state vector and -the index.   There will be a wrapper function __db_generate_iv -that DB will call and it will call the appropriate MT function.  I -am also going to change the default seed to use a hashed time instead of -a hard coded value.  I have looked at other implementations of the -MT code available on the web site.  The C++ version does a hash on -the current time.  I will modify our MT code to seed with the hashed -time as well.  That way the code to seed is contained within the MT -code and we can just write the wrapper to get an IV.  We will not -be changing the core computational code of MT. -

-DB Internal Issues

- -

-When do we Cipher?

-All of the page ciphering is done in the __db_pgin/__db_pgout functions.  -We will encrypt after the method-specific function on page-out and decrypt -before the method-specfic function on page-in.  We do not hold any -locks when entering these functions.  We determine that we need to -cipher based on the existence of the encryption flag in the dbp. -

For ciphering log records, the encryption will be done as the first -thing (or a new wrapper) in __log_put.  See Log -Record Encryption for those details. -
  -

-Page Changes

-The checksum and IV values will be stored prior to the first index of the -page.  We have a new P_INP macro that replaces use of inp[X] in the -code.  This macro takes a dbp as an argument and determines where -our first index is based on whether we have DB_AM_CHKSUM and DB_AM_ENCRYPT -set.  If neither is set, then our first index is where it always was. - If just checksumming is set, then we reserve a 4-byte checksum.  -If encryption is set, then we reserve 36 bytes for our checksum/IV as well -as some space to get proper alignment to encrypt on a 16-byte boundary. -

Since several paging macros use inp[X] in them, those macros must now -take a dbp.  There are a lot of changes to make all the necessary -paging macros take a dbp, although these changes are trivial in nature. -

Also, there is a new function __db_chk_meta to perform checksumming -and decryption checking on meta pages specifically.  This function -is where we check that the database algorithm matches what the user gave -(or if they set DB_CIPHER_ANY then we set it), and other encryption related -testing for bad combinations of what is in the file versus what is in the -user structures. -

-Verification

-The verification code will also need to be updated to deal with secure -pages.  Basically when the verification code reads in the meta page -it will call __db_chk_meta to perform any checksumming and decryption. -

-Holes in Files

-Holes in files will be dealt with rather simply.  We need to be able -to distinguish reading a hole in a file from an encrypted page that happened -to encrypt to all zero's.  If we read a hole in a file, we do not -want to send that empty page through the decryption routine.  This -can be determined simply without incurring the performance penalty of comparing -every byte on a page on every read until we get a non-zero byte. -
The __db_pgin function is only given an invalid page P_INVALID in this -case.  So, if the page type, which is always unencrypted, is -P_INVALID, then we do not perform any checksum verification or decryption. -

-Errors and Recovery

-Dealing with a checksum error is tricky.  Ultimately, if a checksum -error occurs it is extremely likely that the user must do catastrophic -recovery.  There is no other failure return other than  DB_RUNRECOVERY -for indicating that the user should run catastrophic recovery.  We -do not want to add a new error return for applications to check because -a lot of applications already look for and deal with DB_RUNRECOVERY as -an error condition and we want to fit ourselves into that application model.  -We already indicate to the user that when they get that error, then they -need to run recovery.  If recovery fails, then they need to run catastrophic -recovery.  We need to get ourselves to the point where users will -run catastrophic recovery. -

If we get a checksum error, then we need to log a message stating a -checksum error occurred on page N.  In __db_pgin, we can check -if logging is on in the environment.  If so, we want to log the message. -

When the application gets the DB_RUNRECOVERY error, they'll have to -shut down their application and run recovery.  When the recovery encounters -the record indicating checksum failure, then normal recovery will fail -and the user will have to perform catastrophic recovery.  When catastrophic -recovery encounters that record, it will simply ignore it. -

-Log Record Encryption

-Log records will be ciphered.  It might make sense to wrap __log_put -to encrypt the DBT we send down.  The __log_put function is -where the checksum is computed before acquiring the region lock.  -But also this function is where we call __rep_send_message to send -the DBT to the replication clients.  Therefore, we need the DBT to -be encrypted prior to there.  We also need it encrypted before checksumming. -I think __log_put will become __log_put_internal, and the -new __log_put will encrypt if needed and then call __log_put_internal -(the -function formerly known as __log_put).  Log records are kept -in a shared memory region buffer prior to going out to disk.  Records -in the buffer will be encrypted.  No locks are held at the time we -will need to encrypt. -

On reading the log, via log cursors, the log code stores log records -in the log buffer.  Records in that buffer will be encrypted, so decryption -will occur no matter whether we are returning records from the buffer or -if we are returning log records directly from the disk. Current checksum -checking is done in -__log_get_c_int.  Decryption will be done -after the checksum is checked. -

There are currently two nasty issues with encrypted log records.  -The first is that __txn_force_abort overwrites a commit record in -the log buffer with an abort record.  Well, our log buffer will be -encrypted.  Therefore, __txn_force_abort is going to need to -do encryption of its new record.  This can be accomplished by sending -in the dbenv handle to the function.  It is available to us in __log_flush_commit -and we can just pass it in.  I don't like putting log encryption in -the txn code, but the layering violation is already there. -

The second issue is that the encryption code requires data that is a -multiple of 16 bytes and log record lengths are variable.  We will -need to pad log records to meet the requirement.  Since the callers -of __log_put set up the given DBT it is a logical place to pad if -necessary. We will modify the gen_rec.awk script to have all of the generated -logging functions pad for us if we have a crypto handle. This padding will -also expand the size of log files. Anyone calling log_put and using -security from the application will have to pad on their own or it will -return an error. -

When ciphering the log file, we will need a different header than the -current one.  The current header only has space for a 4 byte checksum.  -Our secure header will need space for the 16 byte IV and 20 byte checksum.  -This will blow up our log files when running securely since every single -log record header will now consume 32 additional bytes.  I believe -that the log header does not need to be encrypted.  It contains an -offset, a length and our IV and checksum.  Our IV and checksum are -never encrypted.  I don't believe there to be any risk in having the -offset and length in the clear. -

I would prefer not to have two types of log headers that are incompatible -with each other.  It is not acceptable to increase the log headers -of all users from 12 bytes to 44 bytes.  Such a change would also -make log files incompatible with earlier releases.  Worse even, is -that the cksum field of the header is in between the offset and -len.  It would be really convenient if we could have just made a bigger -cksum portion without affecting the location of the other fields.  -Oh well.  Most customers will not be using encryption and we won't -make them pay the price of the expanded header.  Keith indicates that -the log file format is changing with the next release so I will move the -cksum field so it can at least be overlaid. -

One method around this would be to have a single internal header that -contains all the information both mechanisms need, but when we write out -the header we choose which pieces to write.  By appending the security -information to the end of the existing structure, and adding a size field, -we can modify a few places to use the size field to write out only the -current first 12 bytes, or the entire security header needed. -

-Replication

-Replication clients are going to need to start all of their individual -environment handles with the same password.  The log records are going -to be sent to the clients decrypted and the clients will have to encrypt -them on their way to the client log files.  We cannot send encrypted -log records to clients.  The reason is that the checksum and IV are -stored in the log header and the master only sends the log record itself -to the client.  Therefore, the client has no way to decrypt a log -record from the master.  Therefore, anyone wanting to use truly secure -replication is going to have to have a secure transport mechanism.  -By not encrypting records, clients can theoretically have different passwords -and DB won't care. -

On the master side we must copy the DBT sent in.  We encrypt the -original and send to clients the clear record.  On the client side, -support for encryption is added into __log_rep_put. -

-Sharing the Environment

-When multiple processes join the environment, all must use the same password -as the creator. -

Joining an existing environment requires several conditions to be true.  -First, if the creator of the environment did not create with security, -then joining later with security is an error.  Second, if the creator -did create it with security, then joining later without security is an -error.  Third, we need to be able to test and check that when another -process joins a secure environment that the password they provided is the -same as the one in use by the creator. -

The first two scenarios should be fairly trivial to determine, if we -aren't creating the environment, we can compare what is there with what -we have.  In the third case, the __crypto_region_init function -will see that the environment region has a valid passwd_off and we'll then -compare that password to the one we have in our dbenv handle.  In -any case we'll smash the dbenv handle's passwd and free that memory before -returning whether we have a password match or not. -

We need to store the passwords themselves in the region because multiple -calls to the __aes_derivekeys function with the same password yields -different keyInstance contents.  Therefore we don't have any way to -check passwords other than retaining and comparing the actual passwords. -

-Other APIs

-All of the other APIs will need interface enhancements to support the new -security methods.  The Java and C++ interfaces will likely be done -by Michael Cahill and Sue will implement the Tcl and RPC changes.  -Tcl will need the changes for testing purposes but the interface should -be public, not test-only.  RPC should fully support security.  -The biggest risk that I can see is that the client will send the password -to the server in the clear.  Anyone sniffing the wires or running -tcpdump or other packet grabbing code could grab that.  Someone really -interested in using security over RPC probably ought to add authentication -and other measures to the RPC server as well. -

-Utilities

-All should take a -P flag to specify a password for the environment or -password.  Those that take an env and a database might need something -more to distinguish between env passwds and db passwds. Here is what we -do for each utility: -
    -
  • -berkeley_db_svc - Needs -P after each -h specified.
  • - -
  • -db_archive - Needs -P if the env is encrypted.
  • - -
  • -db_checkpoint - Needs -P if the env is encrypted.
  • - -
  • -db_deadlock - No changes
  • - -
  • -db_dump - Needs -P if the env or database is encrypted.
  • - -
  • -db_load - Needs -P if the env or database is encrypted.
  • - -
  • -db_printlog - Needs -P if the env is encrypted.
  • - -
  • -db_recover - Needs -P if the env is encrypted.
  • - -
  • -db_stat - Needs -P if the env or database is encrypted.
  • - -
  • -db_upgrade - Needs -P if the env or database is encrypted.
  • - -
  • -db_verify - Needs -P if the env or database is encrypted.
  • -
- -

-Testing

-All testing should be able to be accomplished via Tcl.  The following -tests (and probably others I haven't thought of yet) should be performed: -
    -
  • -Basic functionality - basically a test001 but encrypted without an env
  • - -
  • -Basic functionality, w/ env - like the previous test but with an env.
  • - -
  • -Basic functionality, multiple processes - like first test, but make sure -others can correctly join.
  • - -
  • -Basic functionality, mult. processes - like above test, but initialize/close -environment/database first so that the next test processes are all joiners -of an existing env, but creator no longer exists and the shared region -must be opened.
  • - -
  • -Recovery test - Run recovery over an encrypted environment.
  • - -
  • -Subdb test - Run with subdbs that are encrypted.
  • - -
  • -Utility test - Verify the new options to all the utilities.
  • - -
  • -Error handling - Test the basic setup errors for both env's and databases -with multiple processes.  They are:
  • - -
      -
    1. -Attempt to set a NULL or zero-length passwd.
    2. - -
    3. -Create Env w/ security and attempt to create database w/ its own password.
    4. - -
    5. -Env/DB creates with security.  Proc2 joins without - should get an -error.
    6. - -
    7. -Env/DB creates without security.  Proc2 joins with - should get an -error.
    8. - -
    9. -Env/DB creates with security.  Proc2 joins with different password -- should get an error.
    10. - -
    11. -Env/DB creates with security.  Closes.  Proc2 reopens with different -password - should get an error.
    12. - -
    13. -Env/DB creates with security.  Closes.  Tcl overwrites a page -of the database with garbage.  Proc2 reopens with the correct password.  -Code should detect checksum error.
    14. - -
    15. -Env/DB creates with security.  Open a 2nd identical DB with a different -password.  Put the exact same data into both databases.  Close.  -Overwrite the identical page of DB1 with the one from DB2.  Reopen -the database with correct DB1 password.  Code should detect an encryption -error on that page.
    16. -
    -
- -

-Risks

-There are several holes in this design.  It is important to document -them clearly. -

The first is that all of the pages are stored in memory and possibly -the file system in the clear.  The password is stored in the shared -data regions in the clear.  Therefore if an attacker can read the -process memory, they can do whatever they want.  If the attacker can -read system memory or swap they can access the data as well.  Since -everything in the shared data regions (with the exception of the buffered -log) will be in the clear, it is important to realize that file backed -regions will be written in the clear, including the portion of the regions -containing passwords.  We recommend to users that they use system -memory instead of file backed shared memory. - - diff --git a/storage/bdb/crypto/mersenne/mt19937db.c b/storage/bdb/crypto/mersenne/mt19937db.c deleted file mode 100644 index 1dad5f6ad12..00000000000 --- a/storage/bdb/crypto/mersenne/mt19937db.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * $Id: mt19937db.c,v 12.1 2005/07/20 16:50:57 bostic Exp $ - */ -#include "db_config.h" - -#include "db_int.h" -#include "dbinc/crypto.h" -#include "dbinc/hmac.h" - -/* A C-program for MT19937: Integer version (1999/10/28) */ -/* genrand() generates one pseudorandom unsigned integer (32bit) */ -/* which is uniformly distributed among 0 to 2^32-1 for each */ -/* call. sgenrand(seed) sets initial values to the working area */ -/* of 624 words. Before genrand(), sgenrand(seed) must be */ -/* called once. (seed is any 32-bit integer.) */ -/* Coded by Takuji Nishimura, considering the suggestions by */ -/* Topher Cooper and Marc Rieffel in July-Aug. 1997. */ - -/* This library is free software under the Artistic license: */ -/* see the file COPYING distributed together with this code. */ -/* For the verification of the code, its output sequence file */ -/* mt19937int.out is attached (2001/4/2) */ - -/* Copyright (C) 1997, 1999 Makoto Matsumoto and Takuji Nishimura. */ -/* Any feedback is very welcome. For any question, comments, */ -/* see http://www.math.keio.ac.jp/matumoto/emt.html or email */ -/* matumoto@math.keio.ac.jp */ - -/* REFERENCE */ -/* M. Matsumoto and T. Nishimura, */ -/* "Mersenne Twister: A 623-Dimensionally Equidistributed Uniform */ -/* Pseudo-Random Number Generator", */ -/* ACM Transactions on Modeling and Computer Simulation, */ -/* Vol. 8, No. 1, January 1998, pp 3--30. */ - -/* Period parameters */ -#define N 624 -#define M 397 -#define MATRIX_A 0x9908b0df /* constant vector a */ -#define UPPER_MASK 0x80000000 /* most significant w-r bits */ -#define LOWER_MASK 0x7fffffff /* least significant r bits */ - -/* Tempering parameters */ -#define TEMPERING_MASK_B 0x9d2c5680 -#define TEMPERING_MASK_C 0xefc60000 -#define TEMPERING_SHIFT_U(y) (y >> 11) -#define TEMPERING_SHIFT_S(y) (y << 7) -#define TEMPERING_SHIFT_T(y) (y << 15) -#define TEMPERING_SHIFT_L(y) (y >> 18) - -static void __db_sgenrand __P((unsigned long, unsigned long *, int *)); -#ifdef NOT_USED -static void __db_lsgenrand __P((unsigned long *, unsigned long *, int *)); -#endif -static unsigned long __db_genrand __P((DB_ENV *)); - -/* - * __db_generate_iv -- - * Generate an initialization vector (IV) - * - * PUBLIC: int __db_generate_iv __P((DB_ENV *, u_int32_t *)); - */ -int -__db_generate_iv(dbenv, iv) - DB_ENV *dbenv; - u_int32_t *iv; -{ - int i, n, ret; - - ret = 0; - n = DB_IV_BYTES / sizeof(u_int32_t); - MUTEX_LOCK(dbenv, dbenv->mtx_mt); - if (dbenv->mt == NULL) { - if ((ret = __os_calloc(dbenv, 1, N*sizeof(unsigned long), - &dbenv->mt)) != 0) - return (ret); - /* mti==N+1 means mt[N] is not initialized */ - dbenv->mti = N + 1; - } - for (i = 0; i < n; i++) { - /* - * We do not allow 0. If we get one just try again. - */ - do { - iv[i] = (u_int32_t)__db_genrand(dbenv); - } while (iv[i] == 0); - } - - MUTEX_UNLOCK(dbenv, dbenv->mtx_mt); - return (0); -} - -/* Initializing the array with a seed */ -static void -__db_sgenrand(seed, mt, mtip) - unsigned long seed; - unsigned long mt[]; - int *mtip; -{ - int i; - - DB_ASSERT(seed != 0); - for (i=0;i> 16; - seed = 69069 * seed + 1; - } - *mtip = N; -} - -#ifdef NOT_USED -/* Initialization by "sgenrand()" is an example. Theoretically, */ -/* there are 2^19937-1 possible states as an intial state. */ -/* This function allows to choose any of 2^19937-1 ones. */ -/* Essential bits in "seed_array[]" is following 19937 bits: */ -/* (seed_array[0]&UPPER_MASK), seed_array[1], ..., seed_array[N-1]. */ -/* (seed_array[0]&LOWER_MASK) is discarded. */ -/* Theoretically, */ -/* (seed_array[0]&UPPER_MASK), seed_array[1], ..., seed_array[N-1] */ -/* can take any values except all zeros. */ -static void -__db_lsgenrand(seed_array, mt, mtip) - unsigned long seed_array[]; - unsigned long mt[]; - int *mtip; - /* the length of seed_array[] must be at least N */ -{ - int i; - - for (i=0;imtx_mt locked. - */ - if (dbenv->mti >= N) { /* generate N words at one time */ - int kk; - - if (dbenv->mti == N+1) { /* if sgenrand() has not been called, */ - /* - * Seed the generator with the hashed time. The __db_mac - * function will return 4 bytes if we don't send in a key. - */ - do { - __os_clock(dbenv, &secs, &usecs); - __db_chksum((u_int8_t *)&secs, sizeof(secs), NULL, - (u_int8_t *)&seed); - } while (seed == 0); - __db_sgenrand((long)seed, dbenv->mt, &dbenv->mti); - } - - for (kk=0;kkmt[kk]&UPPER_MASK)|(dbenv->mt[kk+1]&LOWER_MASK); - dbenv->mt[kk] = dbenv->mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1]; - } - for (;kkmt[kk]&UPPER_MASK)|(dbenv->mt[kk+1]&LOWER_MASK); - dbenv->mt[kk] = dbenv->mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1]; - } - y = (dbenv->mt[N-1]&UPPER_MASK)|(dbenv->mt[0]&LOWER_MASK); - dbenv->mt[N-1] = dbenv->mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1]; - - dbenv->mti = 0; - } - - y = dbenv->mt[dbenv->mti++]; - y ^= TEMPERING_SHIFT_U(y); - y ^= TEMPERING_SHIFT_S(y) & TEMPERING_MASK_B; - y ^= TEMPERING_SHIFT_T(y) & TEMPERING_MASK_C; - y ^= TEMPERING_SHIFT_L(y); - - return y; -} diff --git a/storage/bdb/crypto/rijndael/rijndael-alg-fst.c b/storage/bdb/crypto/rijndael/rijndael-alg-fst.c deleted file mode 100644 index 4a251606d28..00000000000 --- a/storage/bdb/crypto/rijndael/rijndael-alg-fst.c +++ /dev/null @@ -1,1466 +0,0 @@ -/** - * rijndael-alg-fst.c - * - * @version 3.0 (December 2000) - * - * Optimised ANSI C code for the Rijndael cipher (now AES) - * - * @author Vincent Rijmen - * @author Antoon Bosselaers - * @author Paulo Barreto - * - * This code is hereby placed in the public domain. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include "db_config.h" - -#include "db_int.h" -#include "dbinc/crypto.h" - -#include "crypto/rijndael/rijndael-alg-fst.h" - -/* -Te0[x] = S [x].[02, 01, 01, 03]; -Te1[x] = S [x].[03, 02, 01, 01]; -Te2[x] = S [x].[01, 03, 02, 01]; -Te3[x] = S [x].[01, 01, 03, 02]; -Te4[x] = S [x].[01, 01, 01, 01]; - -Td0[x] = Si[x].[0e, 09, 0d, 0b]; -Td1[x] = Si[x].[0b, 0e, 09, 0d]; -Td2[x] = Si[x].[0d, 0b, 0e, 09]; -Td3[x] = Si[x].[09, 0d, 0b, 0e]; -Td4[x] = Si[x].[01, 01, 01, 01]; -*/ - -static const u32 Te0[256] = { - (u_int)0xc66363a5, (u_int)0xf87c7c84, (u_int)0xee777799, (u_int)0xf67b7b8d, - (u_int)0xfff2f20d, (u_int)0xd66b6bbd, (u_int)0xde6f6fb1, (u_int)0x91c5c554, - (u_int)0x60303050, (u_int)0x02010103, (u_int)0xce6767a9, (u_int)0x562b2b7d, - (u_int)0xe7fefe19, (u_int)0xb5d7d762, (u_int)0x4dababe6, (u_int)0xec76769a, - (u_int)0x8fcaca45, (u_int)0x1f82829d, (u_int)0x89c9c940, (u_int)0xfa7d7d87, - (u_int)0xeffafa15, (u_int)0xb25959eb, (u_int)0x8e4747c9, (u_int)0xfbf0f00b, - (u_int)0x41adadec, (u_int)0xb3d4d467, (u_int)0x5fa2a2fd, (u_int)0x45afafea, - (u_int)0x239c9cbf, (u_int)0x53a4a4f7, (u_int)0xe4727296, (u_int)0x9bc0c05b, - (u_int)0x75b7b7c2, (u_int)0xe1fdfd1c, (u_int)0x3d9393ae, (u_int)0x4c26266a, - (u_int)0x6c36365a, (u_int)0x7e3f3f41, (u_int)0xf5f7f702, (u_int)0x83cccc4f, - (u_int)0x6834345c, (u_int)0x51a5a5f4, (u_int)0xd1e5e534, (u_int)0xf9f1f108, - (u_int)0xe2717193, (u_int)0xabd8d873, (u_int)0x62313153, (u_int)0x2a15153f, - (u_int)0x0804040c, (u_int)0x95c7c752, (u_int)0x46232365, (u_int)0x9dc3c35e, - (u_int)0x30181828, (u_int)0x379696a1, (u_int)0x0a05050f, (u_int)0x2f9a9ab5, - (u_int)0x0e070709, (u_int)0x24121236, (u_int)0x1b80809b, (u_int)0xdfe2e23d, - (u_int)0xcdebeb26, (u_int)0x4e272769, (u_int)0x7fb2b2cd, (u_int)0xea75759f, - (u_int)0x1209091b, (u_int)0x1d83839e, (u_int)0x582c2c74, (u_int)0x341a1a2e, - (u_int)0x361b1b2d, (u_int)0xdc6e6eb2, (u_int)0xb45a5aee, (u_int)0x5ba0a0fb, - (u_int)0xa45252f6, (u_int)0x763b3b4d, (u_int)0xb7d6d661, (u_int)0x7db3b3ce, - (u_int)0x5229297b, (u_int)0xdde3e33e, (u_int)0x5e2f2f71, (u_int)0x13848497, - (u_int)0xa65353f5, (u_int)0xb9d1d168, (u_int)0x00000000, (u_int)0xc1eded2c, - (u_int)0x40202060, (u_int)0xe3fcfc1f, (u_int)0x79b1b1c8, (u_int)0xb65b5bed, - (u_int)0xd46a6abe, (u_int)0x8dcbcb46, (u_int)0x67bebed9, (u_int)0x7239394b, - (u_int)0x944a4ade, (u_int)0x984c4cd4, (u_int)0xb05858e8, (u_int)0x85cfcf4a, - (u_int)0xbbd0d06b, (u_int)0xc5efef2a, (u_int)0x4faaaae5, (u_int)0xedfbfb16, - (u_int)0x864343c5, (u_int)0x9a4d4dd7, (u_int)0x66333355, (u_int)0x11858594, - (u_int)0x8a4545cf, (u_int)0xe9f9f910, (u_int)0x04020206, (u_int)0xfe7f7f81, - (u_int)0xa05050f0, (u_int)0x783c3c44, (u_int)0x259f9fba, (u_int)0x4ba8a8e3, - (u_int)0xa25151f3, (u_int)0x5da3a3fe, (u_int)0x804040c0, (u_int)0x058f8f8a, - (u_int)0x3f9292ad, (u_int)0x219d9dbc, (u_int)0x70383848, (u_int)0xf1f5f504, - (u_int)0x63bcbcdf, (u_int)0x77b6b6c1, (u_int)0xafdada75, (u_int)0x42212163, - (u_int)0x20101030, (u_int)0xe5ffff1a, (u_int)0xfdf3f30e, (u_int)0xbfd2d26d, - (u_int)0x81cdcd4c, (u_int)0x180c0c14, (u_int)0x26131335, (u_int)0xc3ecec2f, - (u_int)0xbe5f5fe1, (u_int)0x359797a2, (u_int)0x884444cc, (u_int)0x2e171739, - (u_int)0x93c4c457, (u_int)0x55a7a7f2, (u_int)0xfc7e7e82, (u_int)0x7a3d3d47, - (u_int)0xc86464ac, (u_int)0xba5d5de7, (u_int)0x3219192b, (u_int)0xe6737395, - (u_int)0xc06060a0, (u_int)0x19818198, (u_int)0x9e4f4fd1, (u_int)0xa3dcdc7f, - (u_int)0x44222266, (u_int)0x542a2a7e, (u_int)0x3b9090ab, (u_int)0x0b888883, - (u_int)0x8c4646ca, (u_int)0xc7eeee29, (u_int)0x6bb8b8d3, (u_int)0x2814143c, - (u_int)0xa7dede79, (u_int)0xbc5e5ee2, (u_int)0x160b0b1d, (u_int)0xaddbdb76, - (u_int)0xdbe0e03b, (u_int)0x64323256, (u_int)0x743a3a4e, (u_int)0x140a0a1e, - (u_int)0x924949db, (u_int)0x0c06060a, (u_int)0x4824246c, (u_int)0xb85c5ce4, - (u_int)0x9fc2c25d, (u_int)0xbdd3d36e, (u_int)0x43acacef, (u_int)0xc46262a6, - (u_int)0x399191a8, (u_int)0x319595a4, (u_int)0xd3e4e437, (u_int)0xf279798b, - (u_int)0xd5e7e732, (u_int)0x8bc8c843, (u_int)0x6e373759, (u_int)0xda6d6db7, - (u_int)0x018d8d8c, (u_int)0xb1d5d564, (u_int)0x9c4e4ed2, (u_int)0x49a9a9e0, - (u_int)0xd86c6cb4, (u_int)0xac5656fa, (u_int)0xf3f4f407, (u_int)0xcfeaea25, - (u_int)0xca6565af, (u_int)0xf47a7a8e, (u_int)0x47aeaee9, (u_int)0x10080818, - (u_int)0x6fbabad5, (u_int)0xf0787888, (u_int)0x4a25256f, (u_int)0x5c2e2e72, - (u_int)0x381c1c24, (u_int)0x57a6a6f1, (u_int)0x73b4b4c7, (u_int)0x97c6c651, - (u_int)0xcbe8e823, (u_int)0xa1dddd7c, (u_int)0xe874749c, (u_int)0x3e1f1f21, - (u_int)0x964b4bdd, (u_int)0x61bdbddc, (u_int)0x0d8b8b86, (u_int)0x0f8a8a85, - (u_int)0xe0707090, (u_int)0x7c3e3e42, (u_int)0x71b5b5c4, (u_int)0xcc6666aa, - (u_int)0x904848d8, (u_int)0x06030305, (u_int)0xf7f6f601, (u_int)0x1c0e0e12, - (u_int)0xc26161a3, (u_int)0x6a35355f, (u_int)0xae5757f9, (u_int)0x69b9b9d0, - (u_int)0x17868691, (u_int)0x99c1c158, (u_int)0x3a1d1d27, (u_int)0x279e9eb9, - (u_int)0xd9e1e138, (u_int)0xebf8f813, (u_int)0x2b9898b3, (u_int)0x22111133, - (u_int)0xd26969bb, (u_int)0xa9d9d970, (u_int)0x078e8e89, (u_int)0x339494a7, - (u_int)0x2d9b9bb6, (u_int)0x3c1e1e22, (u_int)0x15878792, (u_int)0xc9e9e920, - (u_int)0x87cece49, (u_int)0xaa5555ff, (u_int)0x50282878, (u_int)0xa5dfdf7a, - (u_int)0x038c8c8f, (u_int)0x59a1a1f8, (u_int)0x09898980, (u_int)0x1a0d0d17, - (u_int)0x65bfbfda, (u_int)0xd7e6e631, (u_int)0x844242c6, (u_int)0xd06868b8, - (u_int)0x824141c3, (u_int)0x299999b0, (u_int)0x5a2d2d77, (u_int)0x1e0f0f11, - (u_int)0x7bb0b0cb, (u_int)0xa85454fc, (u_int)0x6dbbbbd6, (u_int)0x2c16163a, -}; -static const u32 Te1[256] = { - (u_int)0xa5c66363, (u_int)0x84f87c7c, (u_int)0x99ee7777, (u_int)0x8df67b7b, - (u_int)0x0dfff2f2, (u_int)0xbdd66b6b, (u_int)0xb1de6f6f, (u_int)0x5491c5c5, - (u_int)0x50603030, (u_int)0x03020101, (u_int)0xa9ce6767, (u_int)0x7d562b2b, - (u_int)0x19e7fefe, (u_int)0x62b5d7d7, (u_int)0xe64dabab, (u_int)0x9aec7676, - (u_int)0x458fcaca, (u_int)0x9d1f8282, (u_int)0x4089c9c9, (u_int)0x87fa7d7d, - (u_int)0x15effafa, (u_int)0xebb25959, (u_int)0xc98e4747, (u_int)0x0bfbf0f0, - (u_int)0xec41adad, (u_int)0x67b3d4d4, (u_int)0xfd5fa2a2, (u_int)0xea45afaf, - (u_int)0xbf239c9c, (u_int)0xf753a4a4, (u_int)0x96e47272, (u_int)0x5b9bc0c0, - (u_int)0xc275b7b7, (u_int)0x1ce1fdfd, (u_int)0xae3d9393, (u_int)0x6a4c2626, - (u_int)0x5a6c3636, (u_int)0x417e3f3f, (u_int)0x02f5f7f7, (u_int)0x4f83cccc, - (u_int)0x5c683434, (u_int)0xf451a5a5, (u_int)0x34d1e5e5, (u_int)0x08f9f1f1, - (u_int)0x93e27171, (u_int)0x73abd8d8, (u_int)0x53623131, (u_int)0x3f2a1515, - (u_int)0x0c080404, (u_int)0x5295c7c7, (u_int)0x65462323, (u_int)0x5e9dc3c3, - (u_int)0x28301818, (u_int)0xa1379696, (u_int)0x0f0a0505, (u_int)0xb52f9a9a, - (u_int)0x090e0707, (u_int)0x36241212, (u_int)0x9b1b8080, (u_int)0x3ddfe2e2, - (u_int)0x26cdebeb, (u_int)0x694e2727, (u_int)0xcd7fb2b2, (u_int)0x9fea7575, - (u_int)0x1b120909, (u_int)0x9e1d8383, (u_int)0x74582c2c, (u_int)0x2e341a1a, - (u_int)0x2d361b1b, (u_int)0xb2dc6e6e, (u_int)0xeeb45a5a, (u_int)0xfb5ba0a0, - (u_int)0xf6a45252, (u_int)0x4d763b3b, (u_int)0x61b7d6d6, (u_int)0xce7db3b3, - (u_int)0x7b522929, (u_int)0x3edde3e3, (u_int)0x715e2f2f, (u_int)0x97138484, - (u_int)0xf5a65353, (u_int)0x68b9d1d1, (u_int)0x00000000, (u_int)0x2cc1eded, - (u_int)0x60402020, (u_int)0x1fe3fcfc, (u_int)0xc879b1b1, (u_int)0xedb65b5b, - (u_int)0xbed46a6a, (u_int)0x468dcbcb, (u_int)0xd967bebe, (u_int)0x4b723939, - (u_int)0xde944a4a, (u_int)0xd4984c4c, (u_int)0xe8b05858, (u_int)0x4a85cfcf, - (u_int)0x6bbbd0d0, (u_int)0x2ac5efef, (u_int)0xe54faaaa, (u_int)0x16edfbfb, - (u_int)0xc5864343, (u_int)0xd79a4d4d, (u_int)0x55663333, (u_int)0x94118585, - (u_int)0xcf8a4545, (u_int)0x10e9f9f9, (u_int)0x06040202, (u_int)0x81fe7f7f, - (u_int)0xf0a05050, (u_int)0x44783c3c, (u_int)0xba259f9f, (u_int)0xe34ba8a8, - (u_int)0xf3a25151, (u_int)0xfe5da3a3, (u_int)0xc0804040, (u_int)0x8a058f8f, - (u_int)0xad3f9292, (u_int)0xbc219d9d, (u_int)0x48703838, (u_int)0x04f1f5f5, - (u_int)0xdf63bcbc, (u_int)0xc177b6b6, (u_int)0x75afdada, (u_int)0x63422121, - (u_int)0x30201010, (u_int)0x1ae5ffff, (u_int)0x0efdf3f3, (u_int)0x6dbfd2d2, - (u_int)0x4c81cdcd, (u_int)0x14180c0c, (u_int)0x35261313, (u_int)0x2fc3ecec, - (u_int)0xe1be5f5f, (u_int)0xa2359797, (u_int)0xcc884444, (u_int)0x392e1717, - (u_int)0x5793c4c4, (u_int)0xf255a7a7, (u_int)0x82fc7e7e, (u_int)0x477a3d3d, - (u_int)0xacc86464, (u_int)0xe7ba5d5d, (u_int)0x2b321919, (u_int)0x95e67373, - (u_int)0xa0c06060, (u_int)0x98198181, (u_int)0xd19e4f4f, (u_int)0x7fa3dcdc, - (u_int)0x66442222, (u_int)0x7e542a2a, (u_int)0xab3b9090, (u_int)0x830b8888, - (u_int)0xca8c4646, (u_int)0x29c7eeee, (u_int)0xd36bb8b8, (u_int)0x3c281414, - (u_int)0x79a7dede, (u_int)0xe2bc5e5e, (u_int)0x1d160b0b, (u_int)0x76addbdb, - (u_int)0x3bdbe0e0, (u_int)0x56643232, (u_int)0x4e743a3a, (u_int)0x1e140a0a, - (u_int)0xdb924949, (u_int)0x0a0c0606, (u_int)0x6c482424, (u_int)0xe4b85c5c, - (u_int)0x5d9fc2c2, (u_int)0x6ebdd3d3, (u_int)0xef43acac, (u_int)0xa6c46262, - (u_int)0xa8399191, (u_int)0xa4319595, (u_int)0x37d3e4e4, (u_int)0x8bf27979, - (u_int)0x32d5e7e7, (u_int)0x438bc8c8, (u_int)0x596e3737, (u_int)0xb7da6d6d, - (u_int)0x8c018d8d, (u_int)0x64b1d5d5, (u_int)0xd29c4e4e, (u_int)0xe049a9a9, - (u_int)0xb4d86c6c, (u_int)0xfaac5656, (u_int)0x07f3f4f4, (u_int)0x25cfeaea, - (u_int)0xafca6565, (u_int)0x8ef47a7a, (u_int)0xe947aeae, (u_int)0x18100808, - (u_int)0xd56fbaba, (u_int)0x88f07878, (u_int)0x6f4a2525, (u_int)0x725c2e2e, - (u_int)0x24381c1c, (u_int)0xf157a6a6, (u_int)0xc773b4b4, (u_int)0x5197c6c6, - (u_int)0x23cbe8e8, (u_int)0x7ca1dddd, (u_int)0x9ce87474, (u_int)0x213e1f1f, - (u_int)0xdd964b4b, (u_int)0xdc61bdbd, (u_int)0x860d8b8b, (u_int)0x850f8a8a, - (u_int)0x90e07070, (u_int)0x427c3e3e, (u_int)0xc471b5b5, (u_int)0xaacc6666, - (u_int)0xd8904848, (u_int)0x05060303, (u_int)0x01f7f6f6, (u_int)0x121c0e0e, - (u_int)0xa3c26161, (u_int)0x5f6a3535, (u_int)0xf9ae5757, (u_int)0xd069b9b9, - (u_int)0x91178686, (u_int)0x5899c1c1, (u_int)0x273a1d1d, (u_int)0xb9279e9e, - (u_int)0x38d9e1e1, (u_int)0x13ebf8f8, (u_int)0xb32b9898, (u_int)0x33221111, - (u_int)0xbbd26969, (u_int)0x70a9d9d9, (u_int)0x89078e8e, (u_int)0xa7339494, - (u_int)0xb62d9b9b, (u_int)0x223c1e1e, (u_int)0x92158787, (u_int)0x20c9e9e9, - (u_int)0x4987cece, (u_int)0xffaa5555, (u_int)0x78502828, (u_int)0x7aa5dfdf, - (u_int)0x8f038c8c, (u_int)0xf859a1a1, (u_int)0x80098989, (u_int)0x171a0d0d, - (u_int)0xda65bfbf, (u_int)0x31d7e6e6, (u_int)0xc6844242, (u_int)0xb8d06868, - (u_int)0xc3824141, (u_int)0xb0299999, (u_int)0x775a2d2d, (u_int)0x111e0f0f, - (u_int)0xcb7bb0b0, (u_int)0xfca85454, (u_int)0xd66dbbbb, (u_int)0x3a2c1616, -}; -static const u32 Te2[256] = { - (u_int)0x63a5c663, (u_int)0x7c84f87c, (u_int)0x7799ee77, (u_int)0x7b8df67b, - (u_int)0xf20dfff2, (u_int)0x6bbdd66b, (u_int)0x6fb1de6f, (u_int)0xc55491c5, - (u_int)0x30506030, (u_int)0x01030201, (u_int)0x67a9ce67, (u_int)0x2b7d562b, - (u_int)0xfe19e7fe, (u_int)0xd762b5d7, (u_int)0xabe64dab, (u_int)0x769aec76, - (u_int)0xca458fca, (u_int)0x829d1f82, (u_int)0xc94089c9, (u_int)0x7d87fa7d, - (u_int)0xfa15effa, (u_int)0x59ebb259, (u_int)0x47c98e47, (u_int)0xf00bfbf0, - (u_int)0xadec41ad, (u_int)0xd467b3d4, (u_int)0xa2fd5fa2, (u_int)0xafea45af, - (u_int)0x9cbf239c, (u_int)0xa4f753a4, (u_int)0x7296e472, (u_int)0xc05b9bc0, - (u_int)0xb7c275b7, (u_int)0xfd1ce1fd, (u_int)0x93ae3d93, (u_int)0x266a4c26, - (u_int)0x365a6c36, (u_int)0x3f417e3f, (u_int)0xf702f5f7, (u_int)0xcc4f83cc, - (u_int)0x345c6834, (u_int)0xa5f451a5, (u_int)0xe534d1e5, (u_int)0xf108f9f1, - (u_int)0x7193e271, (u_int)0xd873abd8, (u_int)0x31536231, (u_int)0x153f2a15, - (u_int)0x040c0804, (u_int)0xc75295c7, (u_int)0x23654623, (u_int)0xc35e9dc3, - (u_int)0x18283018, (u_int)0x96a13796, (u_int)0x050f0a05, (u_int)0x9ab52f9a, - (u_int)0x07090e07, (u_int)0x12362412, (u_int)0x809b1b80, (u_int)0xe23ddfe2, - (u_int)0xeb26cdeb, (u_int)0x27694e27, (u_int)0xb2cd7fb2, (u_int)0x759fea75, - (u_int)0x091b1209, (u_int)0x839e1d83, (u_int)0x2c74582c, (u_int)0x1a2e341a, - (u_int)0x1b2d361b, (u_int)0x6eb2dc6e, (u_int)0x5aeeb45a, (u_int)0xa0fb5ba0, - (u_int)0x52f6a452, (u_int)0x3b4d763b, (u_int)0xd661b7d6, (u_int)0xb3ce7db3, - (u_int)0x297b5229, (u_int)0xe33edde3, (u_int)0x2f715e2f, (u_int)0x84971384, - (u_int)0x53f5a653, (u_int)0xd168b9d1, (u_int)0x00000000, (u_int)0xed2cc1ed, - (u_int)0x20604020, (u_int)0xfc1fe3fc, (u_int)0xb1c879b1, (u_int)0x5bedb65b, - (u_int)0x6abed46a, (u_int)0xcb468dcb, (u_int)0xbed967be, (u_int)0x394b7239, - (u_int)0x4ade944a, (u_int)0x4cd4984c, (u_int)0x58e8b058, (u_int)0xcf4a85cf, - (u_int)0xd06bbbd0, (u_int)0xef2ac5ef, (u_int)0xaae54faa, (u_int)0xfb16edfb, - (u_int)0x43c58643, (u_int)0x4dd79a4d, (u_int)0x33556633, (u_int)0x85941185, - (u_int)0x45cf8a45, (u_int)0xf910e9f9, (u_int)0x02060402, (u_int)0x7f81fe7f, - (u_int)0x50f0a050, (u_int)0x3c44783c, (u_int)0x9fba259f, (u_int)0xa8e34ba8, - (u_int)0x51f3a251, (u_int)0xa3fe5da3, (u_int)0x40c08040, (u_int)0x8f8a058f, - (u_int)0x92ad3f92, (u_int)0x9dbc219d, (u_int)0x38487038, (u_int)0xf504f1f5, - (u_int)0xbcdf63bc, (u_int)0xb6c177b6, (u_int)0xda75afda, (u_int)0x21634221, - (u_int)0x10302010, (u_int)0xff1ae5ff, (u_int)0xf30efdf3, (u_int)0xd26dbfd2, - (u_int)0xcd4c81cd, (u_int)0x0c14180c, (u_int)0x13352613, (u_int)0xec2fc3ec, - (u_int)0x5fe1be5f, (u_int)0x97a23597, (u_int)0x44cc8844, (u_int)0x17392e17, - (u_int)0xc45793c4, (u_int)0xa7f255a7, (u_int)0x7e82fc7e, (u_int)0x3d477a3d, - (u_int)0x64acc864, (u_int)0x5de7ba5d, (u_int)0x192b3219, (u_int)0x7395e673, - (u_int)0x60a0c060, (u_int)0x81981981, (u_int)0x4fd19e4f, (u_int)0xdc7fa3dc, - (u_int)0x22664422, (u_int)0x2a7e542a, (u_int)0x90ab3b90, (u_int)0x88830b88, - (u_int)0x46ca8c46, (u_int)0xee29c7ee, (u_int)0xb8d36bb8, (u_int)0x143c2814, - (u_int)0xde79a7de, (u_int)0x5ee2bc5e, (u_int)0x0b1d160b, (u_int)0xdb76addb, - (u_int)0xe03bdbe0, (u_int)0x32566432, (u_int)0x3a4e743a, (u_int)0x0a1e140a, - (u_int)0x49db9249, (u_int)0x060a0c06, (u_int)0x246c4824, (u_int)0x5ce4b85c, - (u_int)0xc25d9fc2, (u_int)0xd36ebdd3, (u_int)0xacef43ac, (u_int)0x62a6c462, - (u_int)0x91a83991, (u_int)0x95a43195, (u_int)0xe437d3e4, (u_int)0x798bf279, - (u_int)0xe732d5e7, (u_int)0xc8438bc8, (u_int)0x37596e37, (u_int)0x6db7da6d, - (u_int)0x8d8c018d, (u_int)0xd564b1d5, (u_int)0x4ed29c4e, (u_int)0xa9e049a9, - (u_int)0x6cb4d86c, (u_int)0x56faac56, (u_int)0xf407f3f4, (u_int)0xea25cfea, - (u_int)0x65afca65, (u_int)0x7a8ef47a, (u_int)0xaee947ae, (u_int)0x08181008, - (u_int)0xbad56fba, (u_int)0x7888f078, (u_int)0x256f4a25, (u_int)0x2e725c2e, - (u_int)0x1c24381c, (u_int)0xa6f157a6, (u_int)0xb4c773b4, (u_int)0xc65197c6, - (u_int)0xe823cbe8, (u_int)0xdd7ca1dd, (u_int)0x749ce874, (u_int)0x1f213e1f, - (u_int)0x4bdd964b, (u_int)0xbddc61bd, (u_int)0x8b860d8b, (u_int)0x8a850f8a, - (u_int)0x7090e070, (u_int)0x3e427c3e, (u_int)0xb5c471b5, (u_int)0x66aacc66, - (u_int)0x48d89048, (u_int)0x03050603, (u_int)0xf601f7f6, (u_int)0x0e121c0e, - (u_int)0x61a3c261, (u_int)0x355f6a35, (u_int)0x57f9ae57, (u_int)0xb9d069b9, - (u_int)0x86911786, (u_int)0xc15899c1, (u_int)0x1d273a1d, (u_int)0x9eb9279e, - (u_int)0xe138d9e1, (u_int)0xf813ebf8, (u_int)0x98b32b98, (u_int)0x11332211, - (u_int)0x69bbd269, (u_int)0xd970a9d9, (u_int)0x8e89078e, (u_int)0x94a73394, - (u_int)0x9bb62d9b, (u_int)0x1e223c1e, (u_int)0x87921587, (u_int)0xe920c9e9, - (u_int)0xce4987ce, (u_int)0x55ffaa55, (u_int)0x28785028, (u_int)0xdf7aa5df, - (u_int)0x8c8f038c, (u_int)0xa1f859a1, (u_int)0x89800989, (u_int)0x0d171a0d, - (u_int)0xbfda65bf, (u_int)0xe631d7e6, (u_int)0x42c68442, (u_int)0x68b8d068, - (u_int)0x41c38241, (u_int)0x99b02999, (u_int)0x2d775a2d, (u_int)0x0f111e0f, - (u_int)0xb0cb7bb0, (u_int)0x54fca854, (u_int)0xbbd66dbb, (u_int)0x163a2c16, -}; -static const u32 Te3[256] = { - - (u_int)0x6363a5c6, (u_int)0x7c7c84f8, (u_int)0x777799ee, (u_int)0x7b7b8df6, - (u_int)0xf2f20dff, (u_int)0x6b6bbdd6, (u_int)0x6f6fb1de, (u_int)0xc5c55491, - (u_int)0x30305060, (u_int)0x01010302, (u_int)0x6767a9ce, (u_int)0x2b2b7d56, - (u_int)0xfefe19e7, (u_int)0xd7d762b5, (u_int)0xababe64d, (u_int)0x76769aec, - (u_int)0xcaca458f, (u_int)0x82829d1f, (u_int)0xc9c94089, (u_int)0x7d7d87fa, - (u_int)0xfafa15ef, (u_int)0x5959ebb2, (u_int)0x4747c98e, (u_int)0xf0f00bfb, - (u_int)0xadadec41, (u_int)0xd4d467b3, (u_int)0xa2a2fd5f, (u_int)0xafafea45, - (u_int)0x9c9cbf23, (u_int)0xa4a4f753, (u_int)0x727296e4, (u_int)0xc0c05b9b, - (u_int)0xb7b7c275, (u_int)0xfdfd1ce1, (u_int)0x9393ae3d, (u_int)0x26266a4c, - (u_int)0x36365a6c, (u_int)0x3f3f417e, (u_int)0xf7f702f5, (u_int)0xcccc4f83, - (u_int)0x34345c68, (u_int)0xa5a5f451, (u_int)0xe5e534d1, (u_int)0xf1f108f9, - (u_int)0x717193e2, (u_int)0xd8d873ab, (u_int)0x31315362, (u_int)0x15153f2a, - (u_int)0x04040c08, (u_int)0xc7c75295, (u_int)0x23236546, (u_int)0xc3c35e9d, - (u_int)0x18182830, (u_int)0x9696a137, (u_int)0x05050f0a, (u_int)0x9a9ab52f, - (u_int)0x0707090e, (u_int)0x12123624, (u_int)0x80809b1b, (u_int)0xe2e23ddf, - (u_int)0xebeb26cd, (u_int)0x2727694e, (u_int)0xb2b2cd7f, (u_int)0x75759fea, - (u_int)0x09091b12, (u_int)0x83839e1d, (u_int)0x2c2c7458, (u_int)0x1a1a2e34, - (u_int)0x1b1b2d36, (u_int)0x6e6eb2dc, (u_int)0x5a5aeeb4, (u_int)0xa0a0fb5b, - (u_int)0x5252f6a4, (u_int)0x3b3b4d76, (u_int)0xd6d661b7, (u_int)0xb3b3ce7d, - (u_int)0x29297b52, (u_int)0xe3e33edd, (u_int)0x2f2f715e, (u_int)0x84849713, - (u_int)0x5353f5a6, (u_int)0xd1d168b9, (u_int)0x00000000, (u_int)0xeded2cc1, - (u_int)0x20206040, (u_int)0xfcfc1fe3, (u_int)0xb1b1c879, (u_int)0x5b5bedb6, - (u_int)0x6a6abed4, (u_int)0xcbcb468d, (u_int)0xbebed967, (u_int)0x39394b72, - (u_int)0x4a4ade94, (u_int)0x4c4cd498, (u_int)0x5858e8b0, (u_int)0xcfcf4a85, - (u_int)0xd0d06bbb, (u_int)0xefef2ac5, (u_int)0xaaaae54f, (u_int)0xfbfb16ed, - (u_int)0x4343c586, (u_int)0x4d4dd79a, (u_int)0x33335566, (u_int)0x85859411, - (u_int)0x4545cf8a, (u_int)0xf9f910e9, (u_int)0x02020604, (u_int)0x7f7f81fe, - (u_int)0x5050f0a0, (u_int)0x3c3c4478, (u_int)0x9f9fba25, (u_int)0xa8a8e34b, - (u_int)0x5151f3a2, (u_int)0xa3a3fe5d, (u_int)0x4040c080, (u_int)0x8f8f8a05, - (u_int)0x9292ad3f, (u_int)0x9d9dbc21, (u_int)0x38384870, (u_int)0xf5f504f1, - (u_int)0xbcbcdf63, (u_int)0xb6b6c177, (u_int)0xdada75af, (u_int)0x21216342, - (u_int)0x10103020, (u_int)0xffff1ae5, (u_int)0xf3f30efd, (u_int)0xd2d26dbf, - (u_int)0xcdcd4c81, (u_int)0x0c0c1418, (u_int)0x13133526, (u_int)0xecec2fc3, - (u_int)0x5f5fe1be, (u_int)0x9797a235, (u_int)0x4444cc88, (u_int)0x1717392e, - (u_int)0xc4c45793, (u_int)0xa7a7f255, (u_int)0x7e7e82fc, (u_int)0x3d3d477a, - (u_int)0x6464acc8, (u_int)0x5d5de7ba, (u_int)0x19192b32, (u_int)0x737395e6, - (u_int)0x6060a0c0, (u_int)0x81819819, (u_int)0x4f4fd19e, (u_int)0xdcdc7fa3, - (u_int)0x22226644, (u_int)0x2a2a7e54, (u_int)0x9090ab3b, (u_int)0x8888830b, - (u_int)0x4646ca8c, (u_int)0xeeee29c7, (u_int)0xb8b8d36b, (u_int)0x14143c28, - (u_int)0xdede79a7, (u_int)0x5e5ee2bc, (u_int)0x0b0b1d16, (u_int)0xdbdb76ad, - (u_int)0xe0e03bdb, (u_int)0x32325664, (u_int)0x3a3a4e74, (u_int)0x0a0a1e14, - (u_int)0x4949db92, (u_int)0x06060a0c, (u_int)0x24246c48, (u_int)0x5c5ce4b8, - (u_int)0xc2c25d9f, (u_int)0xd3d36ebd, (u_int)0xacacef43, (u_int)0x6262a6c4, - (u_int)0x9191a839, (u_int)0x9595a431, (u_int)0xe4e437d3, (u_int)0x79798bf2, - (u_int)0xe7e732d5, (u_int)0xc8c8438b, (u_int)0x3737596e, (u_int)0x6d6db7da, - (u_int)0x8d8d8c01, (u_int)0xd5d564b1, (u_int)0x4e4ed29c, (u_int)0xa9a9e049, - (u_int)0x6c6cb4d8, (u_int)0x5656faac, (u_int)0xf4f407f3, (u_int)0xeaea25cf, - (u_int)0x6565afca, (u_int)0x7a7a8ef4, (u_int)0xaeaee947, (u_int)0x08081810, - (u_int)0xbabad56f, (u_int)0x787888f0, (u_int)0x25256f4a, (u_int)0x2e2e725c, - (u_int)0x1c1c2438, (u_int)0xa6a6f157, (u_int)0xb4b4c773, (u_int)0xc6c65197, - (u_int)0xe8e823cb, (u_int)0xdddd7ca1, (u_int)0x74749ce8, (u_int)0x1f1f213e, - (u_int)0x4b4bdd96, (u_int)0xbdbddc61, (u_int)0x8b8b860d, (u_int)0x8a8a850f, - (u_int)0x707090e0, (u_int)0x3e3e427c, (u_int)0xb5b5c471, (u_int)0x6666aacc, - (u_int)0x4848d890, (u_int)0x03030506, (u_int)0xf6f601f7, (u_int)0x0e0e121c, - (u_int)0x6161a3c2, (u_int)0x35355f6a, (u_int)0x5757f9ae, (u_int)0xb9b9d069, - (u_int)0x86869117, (u_int)0xc1c15899, (u_int)0x1d1d273a, (u_int)0x9e9eb927, - (u_int)0xe1e138d9, (u_int)0xf8f813eb, (u_int)0x9898b32b, (u_int)0x11113322, - (u_int)0x6969bbd2, (u_int)0xd9d970a9, (u_int)0x8e8e8907, (u_int)0x9494a733, - (u_int)0x9b9bb62d, (u_int)0x1e1e223c, (u_int)0x87879215, (u_int)0xe9e920c9, - (u_int)0xcece4987, (u_int)0x5555ffaa, (u_int)0x28287850, (u_int)0xdfdf7aa5, - (u_int)0x8c8c8f03, (u_int)0xa1a1f859, (u_int)0x89898009, (u_int)0x0d0d171a, - (u_int)0xbfbfda65, (u_int)0xe6e631d7, (u_int)0x4242c684, (u_int)0x6868b8d0, - (u_int)0x4141c382, (u_int)0x9999b029, (u_int)0x2d2d775a, (u_int)0x0f0f111e, - (u_int)0xb0b0cb7b, (u_int)0x5454fca8, (u_int)0xbbbbd66d, (u_int)0x16163a2c, -}; -static const u32 Te4[256] = { - (u_int)0x63636363, (u_int)0x7c7c7c7c, (u_int)0x77777777, (u_int)0x7b7b7b7b, - (u_int)0xf2f2f2f2, (u_int)0x6b6b6b6b, (u_int)0x6f6f6f6f, (u_int)0xc5c5c5c5, - (u_int)0x30303030, (u_int)0x01010101, (u_int)0x67676767, (u_int)0x2b2b2b2b, - (u_int)0xfefefefe, (u_int)0xd7d7d7d7, (u_int)0xabababab, (u_int)0x76767676, - (u_int)0xcacacaca, (u_int)0x82828282, (u_int)0xc9c9c9c9, (u_int)0x7d7d7d7d, - (u_int)0xfafafafa, (u_int)0x59595959, (u_int)0x47474747, (u_int)0xf0f0f0f0, - (u_int)0xadadadad, (u_int)0xd4d4d4d4, (u_int)0xa2a2a2a2, (u_int)0xafafafaf, - (u_int)0x9c9c9c9c, (u_int)0xa4a4a4a4, (u_int)0x72727272, (u_int)0xc0c0c0c0, - (u_int)0xb7b7b7b7, (u_int)0xfdfdfdfd, (u_int)0x93939393, (u_int)0x26262626, - (u_int)0x36363636, (u_int)0x3f3f3f3f, (u_int)0xf7f7f7f7, (u_int)0xcccccccc, - (u_int)0x34343434, (u_int)0xa5a5a5a5, (u_int)0xe5e5e5e5, (u_int)0xf1f1f1f1, - (u_int)0x71717171, (u_int)0xd8d8d8d8, (u_int)0x31313131, (u_int)0x15151515, - (u_int)0x04040404, (u_int)0xc7c7c7c7, (u_int)0x23232323, (u_int)0xc3c3c3c3, - (u_int)0x18181818, (u_int)0x96969696, (u_int)0x05050505, (u_int)0x9a9a9a9a, - (u_int)0x07070707, (u_int)0x12121212, (u_int)0x80808080, (u_int)0xe2e2e2e2, - (u_int)0xebebebeb, (u_int)0x27272727, (u_int)0xb2b2b2b2, (u_int)0x75757575, - (u_int)0x09090909, (u_int)0x83838383, (u_int)0x2c2c2c2c, (u_int)0x1a1a1a1a, - (u_int)0x1b1b1b1b, (u_int)0x6e6e6e6e, (u_int)0x5a5a5a5a, (u_int)0xa0a0a0a0, - (u_int)0x52525252, (u_int)0x3b3b3b3b, (u_int)0xd6d6d6d6, (u_int)0xb3b3b3b3, - (u_int)0x29292929, (u_int)0xe3e3e3e3, (u_int)0x2f2f2f2f, (u_int)0x84848484, - (u_int)0x53535353, (u_int)0xd1d1d1d1, (u_int)0x00000000, (u_int)0xedededed, - (u_int)0x20202020, (u_int)0xfcfcfcfc, (u_int)0xb1b1b1b1, (u_int)0x5b5b5b5b, - (u_int)0x6a6a6a6a, (u_int)0xcbcbcbcb, (u_int)0xbebebebe, (u_int)0x39393939, - (u_int)0x4a4a4a4a, (u_int)0x4c4c4c4c, (u_int)0x58585858, (u_int)0xcfcfcfcf, - (u_int)0xd0d0d0d0, (u_int)0xefefefef, (u_int)0xaaaaaaaa, (u_int)0xfbfbfbfb, - (u_int)0x43434343, (u_int)0x4d4d4d4d, (u_int)0x33333333, (u_int)0x85858585, - (u_int)0x45454545, (u_int)0xf9f9f9f9, (u_int)0x02020202, (u_int)0x7f7f7f7f, - (u_int)0x50505050, (u_int)0x3c3c3c3c, (u_int)0x9f9f9f9f, (u_int)0xa8a8a8a8, - (u_int)0x51515151, (u_int)0xa3a3a3a3, (u_int)0x40404040, (u_int)0x8f8f8f8f, - (u_int)0x92929292, (u_int)0x9d9d9d9d, (u_int)0x38383838, (u_int)0xf5f5f5f5, - (u_int)0xbcbcbcbc, (u_int)0xb6b6b6b6, (u_int)0xdadadada, (u_int)0x21212121, - (u_int)0x10101010, (u_int)0xffffffff, (u_int)0xf3f3f3f3, (u_int)0xd2d2d2d2, - (u_int)0xcdcdcdcd, (u_int)0x0c0c0c0c, (u_int)0x13131313, (u_int)0xecececec, - (u_int)0x5f5f5f5f, (u_int)0x97979797, (u_int)0x44444444, (u_int)0x17171717, - (u_int)0xc4c4c4c4, (u_int)0xa7a7a7a7, (u_int)0x7e7e7e7e, (u_int)0x3d3d3d3d, - (u_int)0x64646464, (u_int)0x5d5d5d5d, (u_int)0x19191919, (u_int)0x73737373, - (u_int)0x60606060, (u_int)0x81818181, (u_int)0x4f4f4f4f, (u_int)0xdcdcdcdc, - (u_int)0x22222222, (u_int)0x2a2a2a2a, (u_int)0x90909090, (u_int)0x88888888, - (u_int)0x46464646, (u_int)0xeeeeeeee, (u_int)0xb8b8b8b8, (u_int)0x14141414, - (u_int)0xdededede, (u_int)0x5e5e5e5e, (u_int)0x0b0b0b0b, (u_int)0xdbdbdbdb, - (u_int)0xe0e0e0e0, (u_int)0x32323232, (u_int)0x3a3a3a3a, (u_int)0x0a0a0a0a, - (u_int)0x49494949, (u_int)0x06060606, (u_int)0x24242424, (u_int)0x5c5c5c5c, - (u_int)0xc2c2c2c2, (u_int)0xd3d3d3d3, (u_int)0xacacacac, (u_int)0x62626262, - (u_int)0x91919191, (u_int)0x95959595, (u_int)0xe4e4e4e4, (u_int)0x79797979, - (u_int)0xe7e7e7e7, (u_int)0xc8c8c8c8, (u_int)0x37373737, (u_int)0x6d6d6d6d, - (u_int)0x8d8d8d8d, (u_int)0xd5d5d5d5, (u_int)0x4e4e4e4e, (u_int)0xa9a9a9a9, - (u_int)0x6c6c6c6c, (u_int)0x56565656, (u_int)0xf4f4f4f4, (u_int)0xeaeaeaea, - (u_int)0x65656565, (u_int)0x7a7a7a7a, (u_int)0xaeaeaeae, (u_int)0x08080808, - (u_int)0xbabababa, (u_int)0x78787878, (u_int)0x25252525, (u_int)0x2e2e2e2e, - (u_int)0x1c1c1c1c, (u_int)0xa6a6a6a6, (u_int)0xb4b4b4b4, (u_int)0xc6c6c6c6, - (u_int)0xe8e8e8e8, (u_int)0xdddddddd, (u_int)0x74747474, (u_int)0x1f1f1f1f, - (u_int)0x4b4b4b4b, (u_int)0xbdbdbdbd, (u_int)0x8b8b8b8b, (u_int)0x8a8a8a8a, - (u_int)0x70707070, (u_int)0x3e3e3e3e, (u_int)0xb5b5b5b5, (u_int)0x66666666, - (u_int)0x48484848, (u_int)0x03030303, (u_int)0xf6f6f6f6, (u_int)0x0e0e0e0e, - (u_int)0x61616161, (u_int)0x35353535, (u_int)0x57575757, (u_int)0xb9b9b9b9, - (u_int)0x86868686, (u_int)0xc1c1c1c1, (u_int)0x1d1d1d1d, (u_int)0x9e9e9e9e, - (u_int)0xe1e1e1e1, (u_int)0xf8f8f8f8, (u_int)0x98989898, (u_int)0x11111111, - (u_int)0x69696969, (u_int)0xd9d9d9d9, (u_int)0x8e8e8e8e, (u_int)0x94949494, - (u_int)0x9b9b9b9b, (u_int)0x1e1e1e1e, (u_int)0x87878787, (u_int)0xe9e9e9e9, - (u_int)0xcececece, (u_int)0x55555555, (u_int)0x28282828, (u_int)0xdfdfdfdf, - (u_int)0x8c8c8c8c, (u_int)0xa1a1a1a1, (u_int)0x89898989, (u_int)0x0d0d0d0d, - (u_int)0xbfbfbfbf, (u_int)0xe6e6e6e6, (u_int)0x42424242, (u_int)0x68686868, - (u_int)0x41414141, (u_int)0x99999999, (u_int)0x2d2d2d2d, (u_int)0x0f0f0f0f, - (u_int)0xb0b0b0b0, (u_int)0x54545454, (u_int)0xbbbbbbbb, (u_int)0x16161616, -}; -static const u32 Td0[256] = { - (u_int)0x51f4a750, (u_int)0x7e416553, (u_int)0x1a17a4c3, (u_int)0x3a275e96, - (u_int)0x3bab6bcb, (u_int)0x1f9d45f1, (u_int)0xacfa58ab, (u_int)0x4be30393, - (u_int)0x2030fa55, (u_int)0xad766df6, (u_int)0x88cc7691, (u_int)0xf5024c25, - (u_int)0x4fe5d7fc, (u_int)0xc52acbd7, (u_int)0x26354480, (u_int)0xb562a38f, - (u_int)0xdeb15a49, (u_int)0x25ba1b67, (u_int)0x45ea0e98, (u_int)0x5dfec0e1, - (u_int)0xc32f7502, (u_int)0x814cf012, (u_int)0x8d4697a3, (u_int)0x6bd3f9c6, - (u_int)0x038f5fe7, (u_int)0x15929c95, (u_int)0xbf6d7aeb, (u_int)0x955259da, - (u_int)0xd4be832d, (u_int)0x587421d3, (u_int)0x49e06929, (u_int)0x8ec9c844, - (u_int)0x75c2896a, (u_int)0xf48e7978, (u_int)0x99583e6b, (u_int)0x27b971dd, - (u_int)0xbee14fb6, (u_int)0xf088ad17, (u_int)0xc920ac66, (u_int)0x7dce3ab4, - (u_int)0x63df4a18, (u_int)0xe51a3182, (u_int)0x97513360, (u_int)0x62537f45, - (u_int)0xb16477e0, (u_int)0xbb6bae84, (u_int)0xfe81a01c, (u_int)0xf9082b94, - (u_int)0x70486858, (u_int)0x8f45fd19, (u_int)0x94de6c87, (u_int)0x527bf8b7, - (u_int)0xab73d323, (u_int)0x724b02e2, (u_int)0xe31f8f57, (u_int)0x6655ab2a, - (u_int)0xb2eb2807, (u_int)0x2fb5c203, (u_int)0x86c57b9a, (u_int)0xd33708a5, - (u_int)0x302887f2, (u_int)0x23bfa5b2, (u_int)0x02036aba, (u_int)0xed16825c, - (u_int)0x8acf1c2b, (u_int)0xa779b492, (u_int)0xf307f2f0, (u_int)0x4e69e2a1, - (u_int)0x65daf4cd, (u_int)0x0605bed5, (u_int)0xd134621f, (u_int)0xc4a6fe8a, - (u_int)0x342e539d, (u_int)0xa2f355a0, (u_int)0x058ae132, (u_int)0xa4f6eb75, - (u_int)0x0b83ec39, (u_int)0x4060efaa, (u_int)0x5e719f06, (u_int)0xbd6e1051, - (u_int)0x3e218af9, (u_int)0x96dd063d, (u_int)0xdd3e05ae, (u_int)0x4de6bd46, - (u_int)0x91548db5, (u_int)0x71c45d05, (u_int)0x0406d46f, (u_int)0x605015ff, - (u_int)0x1998fb24, (u_int)0xd6bde997, (u_int)0x894043cc, (u_int)0x67d99e77, - (u_int)0xb0e842bd, (u_int)0x07898b88, (u_int)0xe7195b38, (u_int)0x79c8eedb, - (u_int)0xa17c0a47, (u_int)0x7c420fe9, (u_int)0xf8841ec9, (u_int)0x00000000, - (u_int)0x09808683, (u_int)0x322bed48, (u_int)0x1e1170ac, (u_int)0x6c5a724e, - (u_int)0xfd0efffb, (u_int)0x0f853856, (u_int)0x3daed51e, (u_int)0x362d3927, - (u_int)0x0a0fd964, (u_int)0x685ca621, (u_int)0x9b5b54d1, (u_int)0x24362e3a, - (u_int)0x0c0a67b1, (u_int)0x9357e70f, (u_int)0xb4ee96d2, (u_int)0x1b9b919e, - (u_int)0x80c0c54f, (u_int)0x61dc20a2, (u_int)0x5a774b69, (u_int)0x1c121a16, - (u_int)0xe293ba0a, (u_int)0xc0a02ae5, (u_int)0x3c22e043, (u_int)0x121b171d, - (u_int)0x0e090d0b, (u_int)0xf28bc7ad, (u_int)0x2db6a8b9, (u_int)0x141ea9c8, - (u_int)0x57f11985, (u_int)0xaf75074c, (u_int)0xee99ddbb, (u_int)0xa37f60fd, - (u_int)0xf701269f, (u_int)0x5c72f5bc, (u_int)0x44663bc5, (u_int)0x5bfb7e34, - (u_int)0x8b432976, (u_int)0xcb23c6dc, (u_int)0xb6edfc68, (u_int)0xb8e4f163, - (u_int)0xd731dcca, (u_int)0x42638510, (u_int)0x13972240, (u_int)0x84c61120, - (u_int)0x854a247d, (u_int)0xd2bb3df8, (u_int)0xaef93211, (u_int)0xc729a16d, - (u_int)0x1d9e2f4b, (u_int)0xdcb230f3, (u_int)0x0d8652ec, (u_int)0x77c1e3d0, - (u_int)0x2bb3166c, (u_int)0xa970b999, (u_int)0x119448fa, (u_int)0x47e96422, - (u_int)0xa8fc8cc4, (u_int)0xa0f03f1a, (u_int)0x567d2cd8, (u_int)0x223390ef, - (u_int)0x87494ec7, (u_int)0xd938d1c1, (u_int)0x8ccaa2fe, (u_int)0x98d40b36, - (u_int)0xa6f581cf, (u_int)0xa57ade28, (u_int)0xdab78e26, (u_int)0x3fadbfa4, - (u_int)0x2c3a9de4, (u_int)0x5078920d, (u_int)0x6a5fcc9b, (u_int)0x547e4662, - (u_int)0xf68d13c2, (u_int)0x90d8b8e8, (u_int)0x2e39f75e, (u_int)0x82c3aff5, - (u_int)0x9f5d80be, (u_int)0x69d0937c, (u_int)0x6fd52da9, (u_int)0xcf2512b3, - (u_int)0xc8ac993b, (u_int)0x10187da7, (u_int)0xe89c636e, (u_int)0xdb3bbb7b, - (u_int)0xcd267809, (u_int)0x6e5918f4, (u_int)0xec9ab701, (u_int)0x834f9aa8, - (u_int)0xe6956e65, (u_int)0xaaffe67e, (u_int)0x21bccf08, (u_int)0xef15e8e6, - (u_int)0xbae79bd9, (u_int)0x4a6f36ce, (u_int)0xea9f09d4, (u_int)0x29b07cd6, - (u_int)0x31a4b2af, (u_int)0x2a3f2331, (u_int)0xc6a59430, (u_int)0x35a266c0, - (u_int)0x744ebc37, (u_int)0xfc82caa6, (u_int)0xe090d0b0, (u_int)0x33a7d815, - (u_int)0xf104984a, (u_int)0x41ecdaf7, (u_int)0x7fcd500e, (u_int)0x1791f62f, - (u_int)0x764dd68d, (u_int)0x43efb04d, (u_int)0xccaa4d54, (u_int)0xe49604df, - (u_int)0x9ed1b5e3, (u_int)0x4c6a881b, (u_int)0xc12c1fb8, (u_int)0x4665517f, - (u_int)0x9d5eea04, (u_int)0x018c355d, (u_int)0xfa877473, (u_int)0xfb0b412e, - (u_int)0xb3671d5a, (u_int)0x92dbd252, (u_int)0xe9105633, (u_int)0x6dd64713, - (u_int)0x9ad7618c, (u_int)0x37a10c7a, (u_int)0x59f8148e, (u_int)0xeb133c89, - (u_int)0xcea927ee, (u_int)0xb761c935, (u_int)0xe11ce5ed, (u_int)0x7a47b13c, - (u_int)0x9cd2df59, (u_int)0x55f2733f, (u_int)0x1814ce79, (u_int)0x73c737bf, - (u_int)0x53f7cdea, (u_int)0x5ffdaa5b, (u_int)0xdf3d6f14, (u_int)0x7844db86, - (u_int)0xcaaff381, (u_int)0xb968c43e, (u_int)0x3824342c, (u_int)0xc2a3405f, - (u_int)0x161dc372, (u_int)0xbce2250c, (u_int)0x283c498b, (u_int)0xff0d9541, - (u_int)0x39a80171, (u_int)0x080cb3de, (u_int)0xd8b4e49c, (u_int)0x6456c190, - (u_int)0x7bcb8461, (u_int)0xd532b670, (u_int)0x486c5c74, (u_int)0xd0b85742, -}; -static const u32 Td1[256] = { - (u_int)0x5051f4a7, (u_int)0x537e4165, (u_int)0xc31a17a4, (u_int)0x963a275e, - (u_int)0xcb3bab6b, (u_int)0xf11f9d45, (u_int)0xabacfa58, (u_int)0x934be303, - (u_int)0x552030fa, (u_int)0xf6ad766d, (u_int)0x9188cc76, (u_int)0x25f5024c, - (u_int)0xfc4fe5d7, (u_int)0xd7c52acb, (u_int)0x80263544, (u_int)0x8fb562a3, - (u_int)0x49deb15a, (u_int)0x6725ba1b, (u_int)0x9845ea0e, (u_int)0xe15dfec0, - (u_int)0x02c32f75, (u_int)0x12814cf0, (u_int)0xa38d4697, (u_int)0xc66bd3f9, - (u_int)0xe7038f5f, (u_int)0x9515929c, (u_int)0xebbf6d7a, (u_int)0xda955259, - (u_int)0x2dd4be83, (u_int)0xd3587421, (u_int)0x2949e069, (u_int)0x448ec9c8, - (u_int)0x6a75c289, (u_int)0x78f48e79, (u_int)0x6b99583e, (u_int)0xdd27b971, - (u_int)0xb6bee14f, (u_int)0x17f088ad, (u_int)0x66c920ac, (u_int)0xb47dce3a, - (u_int)0x1863df4a, (u_int)0x82e51a31, (u_int)0x60975133, (u_int)0x4562537f, - (u_int)0xe0b16477, (u_int)0x84bb6bae, (u_int)0x1cfe81a0, (u_int)0x94f9082b, - (u_int)0x58704868, (u_int)0x198f45fd, (u_int)0x8794de6c, (u_int)0xb7527bf8, - (u_int)0x23ab73d3, (u_int)0xe2724b02, (u_int)0x57e31f8f, (u_int)0x2a6655ab, - (u_int)0x07b2eb28, (u_int)0x032fb5c2, (u_int)0x9a86c57b, (u_int)0xa5d33708, - (u_int)0xf2302887, (u_int)0xb223bfa5, (u_int)0xba02036a, (u_int)0x5ced1682, - (u_int)0x2b8acf1c, (u_int)0x92a779b4, (u_int)0xf0f307f2, (u_int)0xa14e69e2, - (u_int)0xcd65daf4, (u_int)0xd50605be, (u_int)0x1fd13462, (u_int)0x8ac4a6fe, - (u_int)0x9d342e53, (u_int)0xa0a2f355, (u_int)0x32058ae1, (u_int)0x75a4f6eb, - (u_int)0x390b83ec, (u_int)0xaa4060ef, (u_int)0x065e719f, (u_int)0x51bd6e10, - (u_int)0xf93e218a, (u_int)0x3d96dd06, (u_int)0xaedd3e05, (u_int)0x464de6bd, - (u_int)0xb591548d, (u_int)0x0571c45d, (u_int)0x6f0406d4, (u_int)0xff605015, - (u_int)0x241998fb, (u_int)0x97d6bde9, (u_int)0xcc894043, (u_int)0x7767d99e, - (u_int)0xbdb0e842, (u_int)0x8807898b, (u_int)0x38e7195b, (u_int)0xdb79c8ee, - (u_int)0x47a17c0a, (u_int)0xe97c420f, (u_int)0xc9f8841e, (u_int)0x00000000, - (u_int)0x83098086, (u_int)0x48322bed, (u_int)0xac1e1170, (u_int)0x4e6c5a72, - (u_int)0xfbfd0eff, (u_int)0x560f8538, (u_int)0x1e3daed5, (u_int)0x27362d39, - (u_int)0x640a0fd9, (u_int)0x21685ca6, (u_int)0xd19b5b54, (u_int)0x3a24362e, - (u_int)0xb10c0a67, (u_int)0x0f9357e7, (u_int)0xd2b4ee96, (u_int)0x9e1b9b91, - (u_int)0x4f80c0c5, (u_int)0xa261dc20, (u_int)0x695a774b, (u_int)0x161c121a, - (u_int)0x0ae293ba, (u_int)0xe5c0a02a, (u_int)0x433c22e0, (u_int)0x1d121b17, - (u_int)0x0b0e090d, (u_int)0xadf28bc7, (u_int)0xb92db6a8, (u_int)0xc8141ea9, - (u_int)0x8557f119, (u_int)0x4caf7507, (u_int)0xbbee99dd, (u_int)0xfda37f60, - (u_int)0x9ff70126, (u_int)0xbc5c72f5, (u_int)0xc544663b, (u_int)0x345bfb7e, - (u_int)0x768b4329, (u_int)0xdccb23c6, (u_int)0x68b6edfc, (u_int)0x63b8e4f1, - (u_int)0xcad731dc, (u_int)0x10426385, (u_int)0x40139722, (u_int)0x2084c611, - (u_int)0x7d854a24, (u_int)0xf8d2bb3d, (u_int)0x11aef932, (u_int)0x6dc729a1, - (u_int)0x4b1d9e2f, (u_int)0xf3dcb230, (u_int)0xec0d8652, (u_int)0xd077c1e3, - (u_int)0x6c2bb316, (u_int)0x99a970b9, (u_int)0xfa119448, (u_int)0x2247e964, - (u_int)0xc4a8fc8c, (u_int)0x1aa0f03f, (u_int)0xd8567d2c, (u_int)0xef223390, - (u_int)0xc787494e, (u_int)0xc1d938d1, (u_int)0xfe8ccaa2, (u_int)0x3698d40b, - (u_int)0xcfa6f581, (u_int)0x28a57ade, (u_int)0x26dab78e, (u_int)0xa43fadbf, - (u_int)0xe42c3a9d, (u_int)0x0d507892, (u_int)0x9b6a5fcc, (u_int)0x62547e46, - (u_int)0xc2f68d13, (u_int)0xe890d8b8, (u_int)0x5e2e39f7, (u_int)0xf582c3af, - (u_int)0xbe9f5d80, (u_int)0x7c69d093, (u_int)0xa96fd52d, (u_int)0xb3cf2512, - (u_int)0x3bc8ac99, (u_int)0xa710187d, (u_int)0x6ee89c63, (u_int)0x7bdb3bbb, - (u_int)0x09cd2678, (u_int)0xf46e5918, (u_int)0x01ec9ab7, (u_int)0xa8834f9a, - (u_int)0x65e6956e, (u_int)0x7eaaffe6, (u_int)0x0821bccf, (u_int)0xe6ef15e8, - (u_int)0xd9bae79b, (u_int)0xce4a6f36, (u_int)0xd4ea9f09, (u_int)0xd629b07c, - (u_int)0xaf31a4b2, (u_int)0x312a3f23, (u_int)0x30c6a594, (u_int)0xc035a266, - (u_int)0x37744ebc, (u_int)0xa6fc82ca, (u_int)0xb0e090d0, (u_int)0x1533a7d8, - (u_int)0x4af10498, (u_int)0xf741ecda, (u_int)0x0e7fcd50, (u_int)0x2f1791f6, - (u_int)0x8d764dd6, (u_int)0x4d43efb0, (u_int)0x54ccaa4d, (u_int)0xdfe49604, - (u_int)0xe39ed1b5, (u_int)0x1b4c6a88, (u_int)0xb8c12c1f, (u_int)0x7f466551, - (u_int)0x049d5eea, (u_int)0x5d018c35, (u_int)0x73fa8774, (u_int)0x2efb0b41, - (u_int)0x5ab3671d, (u_int)0x5292dbd2, (u_int)0x33e91056, (u_int)0x136dd647, - (u_int)0x8c9ad761, (u_int)0x7a37a10c, (u_int)0x8e59f814, (u_int)0x89eb133c, - (u_int)0xeecea927, (u_int)0x35b761c9, (u_int)0xede11ce5, (u_int)0x3c7a47b1, - (u_int)0x599cd2df, (u_int)0x3f55f273, (u_int)0x791814ce, (u_int)0xbf73c737, - (u_int)0xea53f7cd, (u_int)0x5b5ffdaa, (u_int)0x14df3d6f, (u_int)0x867844db, - (u_int)0x81caaff3, (u_int)0x3eb968c4, (u_int)0x2c382434, (u_int)0x5fc2a340, - (u_int)0x72161dc3, (u_int)0x0cbce225, (u_int)0x8b283c49, (u_int)0x41ff0d95, - (u_int)0x7139a801, (u_int)0xde080cb3, (u_int)0x9cd8b4e4, (u_int)0x906456c1, - (u_int)0x617bcb84, (u_int)0x70d532b6, (u_int)0x74486c5c, (u_int)0x42d0b857, -}; -static const u32 Td2[256] = { - (u_int)0xa75051f4, (u_int)0x65537e41, (u_int)0xa4c31a17, (u_int)0x5e963a27, - (u_int)0x6bcb3bab, (u_int)0x45f11f9d, (u_int)0x58abacfa, (u_int)0x03934be3, - (u_int)0xfa552030, (u_int)0x6df6ad76, (u_int)0x769188cc, (u_int)0x4c25f502, - (u_int)0xd7fc4fe5, (u_int)0xcbd7c52a, (u_int)0x44802635, (u_int)0xa38fb562, - (u_int)0x5a49deb1, (u_int)0x1b6725ba, (u_int)0x0e9845ea, (u_int)0xc0e15dfe, - (u_int)0x7502c32f, (u_int)0xf012814c, (u_int)0x97a38d46, (u_int)0xf9c66bd3, - (u_int)0x5fe7038f, (u_int)0x9c951592, (u_int)0x7aebbf6d, (u_int)0x59da9552, - (u_int)0x832dd4be, (u_int)0x21d35874, (u_int)0x692949e0, (u_int)0xc8448ec9, - (u_int)0x896a75c2, (u_int)0x7978f48e, (u_int)0x3e6b9958, (u_int)0x71dd27b9, - (u_int)0x4fb6bee1, (u_int)0xad17f088, (u_int)0xac66c920, (u_int)0x3ab47dce, - (u_int)0x4a1863df, (u_int)0x3182e51a, (u_int)0x33609751, (u_int)0x7f456253, - (u_int)0x77e0b164, (u_int)0xae84bb6b, (u_int)0xa01cfe81, (u_int)0x2b94f908, - (u_int)0x68587048, (u_int)0xfd198f45, (u_int)0x6c8794de, (u_int)0xf8b7527b, - (u_int)0xd323ab73, (u_int)0x02e2724b, (u_int)0x8f57e31f, (u_int)0xab2a6655, - (u_int)0x2807b2eb, (u_int)0xc2032fb5, (u_int)0x7b9a86c5, (u_int)0x08a5d337, - (u_int)0x87f23028, (u_int)0xa5b223bf, (u_int)0x6aba0203, (u_int)0x825ced16, - (u_int)0x1c2b8acf, (u_int)0xb492a779, (u_int)0xf2f0f307, (u_int)0xe2a14e69, - (u_int)0xf4cd65da, (u_int)0xbed50605, (u_int)0x621fd134, (u_int)0xfe8ac4a6, - (u_int)0x539d342e, (u_int)0x55a0a2f3, (u_int)0xe132058a, (u_int)0xeb75a4f6, - (u_int)0xec390b83, (u_int)0xefaa4060, (u_int)0x9f065e71, (u_int)0x1051bd6e, - - (u_int)0x8af93e21, (u_int)0x063d96dd, (u_int)0x05aedd3e, (u_int)0xbd464de6, - (u_int)0x8db59154, (u_int)0x5d0571c4, (u_int)0xd46f0406, (u_int)0x15ff6050, - (u_int)0xfb241998, (u_int)0xe997d6bd, (u_int)0x43cc8940, (u_int)0x9e7767d9, - (u_int)0x42bdb0e8, (u_int)0x8b880789, (u_int)0x5b38e719, (u_int)0xeedb79c8, - (u_int)0x0a47a17c, (u_int)0x0fe97c42, (u_int)0x1ec9f884, (u_int)0x00000000, - (u_int)0x86830980, (u_int)0xed48322b, (u_int)0x70ac1e11, (u_int)0x724e6c5a, - (u_int)0xfffbfd0e, (u_int)0x38560f85, (u_int)0xd51e3dae, (u_int)0x3927362d, - (u_int)0xd9640a0f, (u_int)0xa621685c, (u_int)0x54d19b5b, (u_int)0x2e3a2436, - (u_int)0x67b10c0a, (u_int)0xe70f9357, (u_int)0x96d2b4ee, (u_int)0x919e1b9b, - (u_int)0xc54f80c0, (u_int)0x20a261dc, (u_int)0x4b695a77, (u_int)0x1a161c12, - (u_int)0xba0ae293, (u_int)0x2ae5c0a0, (u_int)0xe0433c22, (u_int)0x171d121b, - (u_int)0x0d0b0e09, (u_int)0xc7adf28b, (u_int)0xa8b92db6, (u_int)0xa9c8141e, - (u_int)0x198557f1, (u_int)0x074caf75, (u_int)0xddbbee99, (u_int)0x60fda37f, - (u_int)0x269ff701, (u_int)0xf5bc5c72, (u_int)0x3bc54466, (u_int)0x7e345bfb, - (u_int)0x29768b43, (u_int)0xc6dccb23, (u_int)0xfc68b6ed, (u_int)0xf163b8e4, - (u_int)0xdccad731, (u_int)0x85104263, (u_int)0x22401397, (u_int)0x112084c6, - (u_int)0x247d854a, (u_int)0x3df8d2bb, (u_int)0x3211aef9, (u_int)0xa16dc729, - (u_int)0x2f4b1d9e, (u_int)0x30f3dcb2, (u_int)0x52ec0d86, (u_int)0xe3d077c1, - (u_int)0x166c2bb3, (u_int)0xb999a970, (u_int)0x48fa1194, (u_int)0x642247e9, - (u_int)0x8cc4a8fc, (u_int)0x3f1aa0f0, (u_int)0x2cd8567d, (u_int)0x90ef2233, - (u_int)0x4ec78749, (u_int)0xd1c1d938, (u_int)0xa2fe8cca, (u_int)0x0b3698d4, - (u_int)0x81cfa6f5, (u_int)0xde28a57a, (u_int)0x8e26dab7, (u_int)0xbfa43fad, - (u_int)0x9de42c3a, (u_int)0x920d5078, (u_int)0xcc9b6a5f, (u_int)0x4662547e, - (u_int)0x13c2f68d, (u_int)0xb8e890d8, (u_int)0xf75e2e39, (u_int)0xaff582c3, - (u_int)0x80be9f5d, (u_int)0x937c69d0, (u_int)0x2da96fd5, (u_int)0x12b3cf25, - (u_int)0x993bc8ac, (u_int)0x7da71018, (u_int)0x636ee89c, (u_int)0xbb7bdb3b, - (u_int)0x7809cd26, (u_int)0x18f46e59, (u_int)0xb701ec9a, (u_int)0x9aa8834f, - (u_int)0x6e65e695, (u_int)0xe67eaaff, (u_int)0xcf0821bc, (u_int)0xe8e6ef15, - (u_int)0x9bd9bae7, (u_int)0x36ce4a6f, (u_int)0x09d4ea9f, (u_int)0x7cd629b0, - (u_int)0xb2af31a4, (u_int)0x23312a3f, (u_int)0x9430c6a5, (u_int)0x66c035a2, - (u_int)0xbc37744e, (u_int)0xcaa6fc82, (u_int)0xd0b0e090, (u_int)0xd81533a7, - (u_int)0x984af104, (u_int)0xdaf741ec, (u_int)0x500e7fcd, (u_int)0xf62f1791, - (u_int)0xd68d764d, (u_int)0xb04d43ef, (u_int)0x4d54ccaa, (u_int)0x04dfe496, - (u_int)0xb5e39ed1, (u_int)0x881b4c6a, (u_int)0x1fb8c12c, (u_int)0x517f4665, - (u_int)0xea049d5e, (u_int)0x355d018c, (u_int)0x7473fa87, (u_int)0x412efb0b, - (u_int)0x1d5ab367, (u_int)0xd25292db, (u_int)0x5633e910, (u_int)0x47136dd6, - (u_int)0x618c9ad7, (u_int)0x0c7a37a1, (u_int)0x148e59f8, (u_int)0x3c89eb13, - (u_int)0x27eecea9, (u_int)0xc935b761, (u_int)0xe5ede11c, (u_int)0xb13c7a47, - (u_int)0xdf599cd2, (u_int)0x733f55f2, (u_int)0xce791814, (u_int)0x37bf73c7, - (u_int)0xcdea53f7, (u_int)0xaa5b5ffd, (u_int)0x6f14df3d, (u_int)0xdb867844, - (u_int)0xf381caaf, (u_int)0xc43eb968, (u_int)0x342c3824, (u_int)0x405fc2a3, - (u_int)0xc372161d, (u_int)0x250cbce2, (u_int)0x498b283c, (u_int)0x9541ff0d, - (u_int)0x017139a8, (u_int)0xb3de080c, (u_int)0xe49cd8b4, (u_int)0xc1906456, - (u_int)0x84617bcb, (u_int)0xb670d532, (u_int)0x5c74486c, (u_int)0x5742d0b8, -}; -static const u32 Td3[256] = { - (u_int)0xf4a75051, (u_int)0x4165537e, (u_int)0x17a4c31a, (u_int)0x275e963a, - (u_int)0xab6bcb3b, (u_int)0x9d45f11f, (u_int)0xfa58abac, (u_int)0xe303934b, - (u_int)0x30fa5520, (u_int)0x766df6ad, (u_int)0xcc769188, (u_int)0x024c25f5, - (u_int)0xe5d7fc4f, (u_int)0x2acbd7c5, (u_int)0x35448026, (u_int)0x62a38fb5, - (u_int)0xb15a49de, (u_int)0xba1b6725, (u_int)0xea0e9845, (u_int)0xfec0e15d, - (u_int)0x2f7502c3, (u_int)0x4cf01281, (u_int)0x4697a38d, (u_int)0xd3f9c66b, - (u_int)0x8f5fe703, (u_int)0x929c9515, (u_int)0x6d7aebbf, (u_int)0x5259da95, - (u_int)0xbe832dd4, (u_int)0x7421d358, (u_int)0xe0692949, (u_int)0xc9c8448e, - (u_int)0xc2896a75, (u_int)0x8e7978f4, (u_int)0x583e6b99, (u_int)0xb971dd27, - (u_int)0xe14fb6be, (u_int)0x88ad17f0, (u_int)0x20ac66c9, (u_int)0xce3ab47d, - (u_int)0xdf4a1863, (u_int)0x1a3182e5, (u_int)0x51336097, (u_int)0x537f4562, - (u_int)0x6477e0b1, (u_int)0x6bae84bb, (u_int)0x81a01cfe, (u_int)0x082b94f9, - (u_int)0x48685870, (u_int)0x45fd198f, (u_int)0xde6c8794, (u_int)0x7bf8b752, - (u_int)0x73d323ab, (u_int)0x4b02e272, (u_int)0x1f8f57e3, (u_int)0x55ab2a66, - (u_int)0xeb2807b2, (u_int)0xb5c2032f, (u_int)0xc57b9a86, (u_int)0x3708a5d3, - (u_int)0x2887f230, (u_int)0xbfa5b223, (u_int)0x036aba02, (u_int)0x16825ced, - (u_int)0xcf1c2b8a, (u_int)0x79b492a7, (u_int)0x07f2f0f3, (u_int)0x69e2a14e, - (u_int)0xdaf4cd65, (u_int)0x05bed506, (u_int)0x34621fd1, (u_int)0xa6fe8ac4, - (u_int)0x2e539d34, (u_int)0xf355a0a2, (u_int)0x8ae13205, (u_int)0xf6eb75a4, - (u_int)0x83ec390b, (u_int)0x60efaa40, (u_int)0x719f065e, (u_int)0x6e1051bd, - (u_int)0x218af93e, (u_int)0xdd063d96, (u_int)0x3e05aedd, (u_int)0xe6bd464d, - (u_int)0x548db591, (u_int)0xc45d0571, (u_int)0x06d46f04, (u_int)0x5015ff60, - (u_int)0x98fb2419, (u_int)0xbde997d6, (u_int)0x4043cc89, (u_int)0xd99e7767, - (u_int)0xe842bdb0, (u_int)0x898b8807, (u_int)0x195b38e7, (u_int)0xc8eedb79, - (u_int)0x7c0a47a1, (u_int)0x420fe97c, (u_int)0x841ec9f8, (u_int)0x00000000, - (u_int)0x80868309, (u_int)0x2bed4832, (u_int)0x1170ac1e, (u_int)0x5a724e6c, - (u_int)0x0efffbfd, (u_int)0x8538560f, (u_int)0xaed51e3d, (u_int)0x2d392736, - (u_int)0x0fd9640a, (u_int)0x5ca62168, (u_int)0x5b54d19b, (u_int)0x362e3a24, - (u_int)0x0a67b10c, (u_int)0x57e70f93, (u_int)0xee96d2b4, (u_int)0x9b919e1b, - (u_int)0xc0c54f80, (u_int)0xdc20a261, (u_int)0x774b695a, (u_int)0x121a161c, - (u_int)0x93ba0ae2, (u_int)0xa02ae5c0, (u_int)0x22e0433c, (u_int)0x1b171d12, - (u_int)0x090d0b0e, (u_int)0x8bc7adf2, (u_int)0xb6a8b92d, (u_int)0x1ea9c814, - (u_int)0xf1198557, (u_int)0x75074caf, (u_int)0x99ddbbee, (u_int)0x7f60fda3, - (u_int)0x01269ff7, (u_int)0x72f5bc5c, (u_int)0x663bc544, (u_int)0xfb7e345b, - (u_int)0x4329768b, (u_int)0x23c6dccb, (u_int)0xedfc68b6, (u_int)0xe4f163b8, - (u_int)0x31dccad7, (u_int)0x63851042, (u_int)0x97224013, (u_int)0xc6112084, - (u_int)0x4a247d85, (u_int)0xbb3df8d2, (u_int)0xf93211ae, (u_int)0x29a16dc7, - (u_int)0x9e2f4b1d, (u_int)0xb230f3dc, (u_int)0x8652ec0d, (u_int)0xc1e3d077, - (u_int)0xb3166c2b, (u_int)0x70b999a9, (u_int)0x9448fa11, (u_int)0xe9642247, - (u_int)0xfc8cc4a8, (u_int)0xf03f1aa0, (u_int)0x7d2cd856, (u_int)0x3390ef22, - (u_int)0x494ec787, (u_int)0x38d1c1d9, (u_int)0xcaa2fe8c, (u_int)0xd40b3698, - (u_int)0xf581cfa6, (u_int)0x7ade28a5, (u_int)0xb78e26da, (u_int)0xadbfa43f, - (u_int)0x3a9de42c, (u_int)0x78920d50, (u_int)0x5fcc9b6a, (u_int)0x7e466254, - (u_int)0x8d13c2f6, (u_int)0xd8b8e890, (u_int)0x39f75e2e, (u_int)0xc3aff582, - (u_int)0x5d80be9f, (u_int)0xd0937c69, (u_int)0xd52da96f, (u_int)0x2512b3cf, - (u_int)0xac993bc8, (u_int)0x187da710, (u_int)0x9c636ee8, (u_int)0x3bbb7bdb, - (u_int)0x267809cd, (u_int)0x5918f46e, (u_int)0x9ab701ec, (u_int)0x4f9aa883, - (u_int)0x956e65e6, (u_int)0xffe67eaa, (u_int)0xbccf0821, (u_int)0x15e8e6ef, - (u_int)0xe79bd9ba, (u_int)0x6f36ce4a, (u_int)0x9f09d4ea, (u_int)0xb07cd629, - (u_int)0xa4b2af31, (u_int)0x3f23312a, (u_int)0xa59430c6, (u_int)0xa266c035, - (u_int)0x4ebc3774, (u_int)0x82caa6fc, (u_int)0x90d0b0e0, (u_int)0xa7d81533, - (u_int)0x04984af1, (u_int)0xecdaf741, (u_int)0xcd500e7f, (u_int)0x91f62f17, - (u_int)0x4dd68d76, (u_int)0xefb04d43, (u_int)0xaa4d54cc, (u_int)0x9604dfe4, - (u_int)0xd1b5e39e, (u_int)0x6a881b4c, (u_int)0x2c1fb8c1, (u_int)0x65517f46, - (u_int)0x5eea049d, (u_int)0x8c355d01, (u_int)0x877473fa, (u_int)0x0b412efb, - (u_int)0x671d5ab3, (u_int)0xdbd25292, (u_int)0x105633e9, (u_int)0xd647136d, - (u_int)0xd7618c9a, (u_int)0xa10c7a37, (u_int)0xf8148e59, (u_int)0x133c89eb, - (u_int)0xa927eece, (u_int)0x61c935b7, (u_int)0x1ce5ede1, (u_int)0x47b13c7a, - (u_int)0xd2df599c, (u_int)0xf2733f55, (u_int)0x14ce7918, (u_int)0xc737bf73, - (u_int)0xf7cdea53, (u_int)0xfdaa5b5f, (u_int)0x3d6f14df, (u_int)0x44db8678, - (u_int)0xaff381ca, (u_int)0x68c43eb9, (u_int)0x24342c38, (u_int)0xa3405fc2, - (u_int)0x1dc37216, (u_int)0xe2250cbc, (u_int)0x3c498b28, (u_int)0x0d9541ff, - (u_int)0xa8017139, (u_int)0x0cb3de08, (u_int)0xb4e49cd8, (u_int)0x56c19064, - (u_int)0xcb84617b, (u_int)0x32b670d5, (u_int)0x6c5c7448, (u_int)0xb85742d0, -}; -static const u32 Td4[256] = { - (u_int)0x52525252, (u_int)0x09090909, (u_int)0x6a6a6a6a, (u_int)0xd5d5d5d5, - (u_int)0x30303030, (u_int)0x36363636, (u_int)0xa5a5a5a5, (u_int)0x38383838, - (u_int)0xbfbfbfbf, (u_int)0x40404040, (u_int)0xa3a3a3a3, (u_int)0x9e9e9e9e, - (u_int)0x81818181, (u_int)0xf3f3f3f3, (u_int)0xd7d7d7d7, (u_int)0xfbfbfbfb, - (u_int)0x7c7c7c7c, (u_int)0xe3e3e3e3, (u_int)0x39393939, (u_int)0x82828282, - (u_int)0x9b9b9b9b, (u_int)0x2f2f2f2f, (u_int)0xffffffff, (u_int)0x87878787, - (u_int)0x34343434, (u_int)0x8e8e8e8e, (u_int)0x43434343, (u_int)0x44444444, - (u_int)0xc4c4c4c4, (u_int)0xdededede, (u_int)0xe9e9e9e9, (u_int)0xcbcbcbcb, - (u_int)0x54545454, (u_int)0x7b7b7b7b, (u_int)0x94949494, (u_int)0x32323232, - (u_int)0xa6a6a6a6, (u_int)0xc2c2c2c2, (u_int)0x23232323, (u_int)0x3d3d3d3d, - (u_int)0xeeeeeeee, (u_int)0x4c4c4c4c, (u_int)0x95959595, (u_int)0x0b0b0b0b, - (u_int)0x42424242, (u_int)0xfafafafa, (u_int)0xc3c3c3c3, (u_int)0x4e4e4e4e, - (u_int)0x08080808, (u_int)0x2e2e2e2e, (u_int)0xa1a1a1a1, (u_int)0x66666666, - (u_int)0x28282828, (u_int)0xd9d9d9d9, (u_int)0x24242424, (u_int)0xb2b2b2b2, - (u_int)0x76767676, (u_int)0x5b5b5b5b, (u_int)0xa2a2a2a2, (u_int)0x49494949, - (u_int)0x6d6d6d6d, (u_int)0x8b8b8b8b, (u_int)0xd1d1d1d1, (u_int)0x25252525, - (u_int)0x72727272, (u_int)0xf8f8f8f8, (u_int)0xf6f6f6f6, (u_int)0x64646464, - (u_int)0x86868686, (u_int)0x68686868, (u_int)0x98989898, (u_int)0x16161616, - (u_int)0xd4d4d4d4, (u_int)0xa4a4a4a4, (u_int)0x5c5c5c5c, (u_int)0xcccccccc, - (u_int)0x5d5d5d5d, (u_int)0x65656565, (u_int)0xb6b6b6b6, (u_int)0x92929292, - (u_int)0x6c6c6c6c, (u_int)0x70707070, (u_int)0x48484848, (u_int)0x50505050, - (u_int)0xfdfdfdfd, (u_int)0xedededed, (u_int)0xb9b9b9b9, (u_int)0xdadadada, - (u_int)0x5e5e5e5e, (u_int)0x15151515, (u_int)0x46464646, (u_int)0x57575757, - (u_int)0xa7a7a7a7, (u_int)0x8d8d8d8d, (u_int)0x9d9d9d9d, (u_int)0x84848484, - (u_int)0x90909090, (u_int)0xd8d8d8d8, (u_int)0xabababab, (u_int)0x00000000, - (u_int)0x8c8c8c8c, (u_int)0xbcbcbcbc, (u_int)0xd3d3d3d3, (u_int)0x0a0a0a0a, - (u_int)0xf7f7f7f7, (u_int)0xe4e4e4e4, (u_int)0x58585858, (u_int)0x05050505, - (u_int)0xb8b8b8b8, (u_int)0xb3b3b3b3, (u_int)0x45454545, (u_int)0x06060606, - (u_int)0xd0d0d0d0, (u_int)0x2c2c2c2c, (u_int)0x1e1e1e1e, (u_int)0x8f8f8f8f, - (u_int)0xcacacaca, (u_int)0x3f3f3f3f, (u_int)0x0f0f0f0f, (u_int)0x02020202, - (u_int)0xc1c1c1c1, (u_int)0xafafafaf, (u_int)0xbdbdbdbd, (u_int)0x03030303, - (u_int)0x01010101, (u_int)0x13131313, (u_int)0x8a8a8a8a, (u_int)0x6b6b6b6b, - (u_int)0x3a3a3a3a, (u_int)0x91919191, (u_int)0x11111111, (u_int)0x41414141, - (u_int)0x4f4f4f4f, (u_int)0x67676767, (u_int)0xdcdcdcdc, (u_int)0xeaeaeaea, - (u_int)0x97979797, (u_int)0xf2f2f2f2, (u_int)0xcfcfcfcf, (u_int)0xcececece, - (u_int)0xf0f0f0f0, (u_int)0xb4b4b4b4, (u_int)0xe6e6e6e6, (u_int)0x73737373, - (u_int)0x96969696, (u_int)0xacacacac, (u_int)0x74747474, (u_int)0x22222222, - (u_int)0xe7e7e7e7, (u_int)0xadadadad, (u_int)0x35353535, (u_int)0x85858585, - (u_int)0xe2e2e2e2, (u_int)0xf9f9f9f9, (u_int)0x37373737, (u_int)0xe8e8e8e8, - (u_int)0x1c1c1c1c, (u_int)0x75757575, (u_int)0xdfdfdfdf, (u_int)0x6e6e6e6e, - (u_int)0x47474747, (u_int)0xf1f1f1f1, (u_int)0x1a1a1a1a, (u_int)0x71717171, - (u_int)0x1d1d1d1d, (u_int)0x29292929, (u_int)0xc5c5c5c5, (u_int)0x89898989, - (u_int)0x6f6f6f6f, (u_int)0xb7b7b7b7, (u_int)0x62626262, (u_int)0x0e0e0e0e, - (u_int)0xaaaaaaaa, (u_int)0x18181818, (u_int)0xbebebebe, (u_int)0x1b1b1b1b, - (u_int)0xfcfcfcfc, (u_int)0x56565656, (u_int)0x3e3e3e3e, (u_int)0x4b4b4b4b, - (u_int)0xc6c6c6c6, (u_int)0xd2d2d2d2, (u_int)0x79797979, (u_int)0x20202020, - (u_int)0x9a9a9a9a, (u_int)0xdbdbdbdb, (u_int)0xc0c0c0c0, (u_int)0xfefefefe, - (u_int)0x78787878, (u_int)0xcdcdcdcd, (u_int)0x5a5a5a5a, (u_int)0xf4f4f4f4, - (u_int)0x1f1f1f1f, (u_int)0xdddddddd, (u_int)0xa8a8a8a8, (u_int)0x33333333, - (u_int)0x88888888, (u_int)0x07070707, (u_int)0xc7c7c7c7, (u_int)0x31313131, - (u_int)0xb1b1b1b1, (u_int)0x12121212, (u_int)0x10101010, (u_int)0x59595959, - (u_int)0x27272727, (u_int)0x80808080, (u_int)0xecececec, (u_int)0x5f5f5f5f, - (u_int)0x60606060, (u_int)0x51515151, (u_int)0x7f7f7f7f, (u_int)0xa9a9a9a9, - (u_int)0x19191919, (u_int)0xb5b5b5b5, (u_int)0x4a4a4a4a, (u_int)0x0d0d0d0d, - (u_int)0x2d2d2d2d, (u_int)0xe5e5e5e5, (u_int)0x7a7a7a7a, (u_int)0x9f9f9f9f, - (u_int)0x93939393, (u_int)0xc9c9c9c9, (u_int)0x9c9c9c9c, (u_int)0xefefefef, - (u_int)0xa0a0a0a0, (u_int)0xe0e0e0e0, (u_int)0x3b3b3b3b, (u_int)0x4d4d4d4d, - (u_int)0xaeaeaeae, (u_int)0x2a2a2a2a, (u_int)0xf5f5f5f5, (u_int)0xb0b0b0b0, - (u_int)0xc8c8c8c8, (u_int)0xebebebeb, (u_int)0xbbbbbbbb, (u_int)0x3c3c3c3c, - (u_int)0x83838383, (u_int)0x53535353, (u_int)0x99999999, (u_int)0x61616161, - (u_int)0x17171717, (u_int)0x2b2b2b2b, (u_int)0x04040404, (u_int)0x7e7e7e7e, - (u_int)0xbabababa, (u_int)0x77777777, (u_int)0xd6d6d6d6, (u_int)0x26262626, - (u_int)0xe1e1e1e1, (u_int)0x69696969, (u_int)0x14141414, (u_int)0x63636363, - (u_int)0x55555555, (u_int)0x21212121, (u_int)0x0c0c0c0c, (u_int)0x7d7d7d7d, -}; -static const u32 rcon[] = { - 0x01000000, 0x02000000, 0x04000000, 0x08000000, - 0x10000000, 0x20000000, 0x40000000, 0x80000000, - 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ -}; - -#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) - -#ifdef _MSC_VER -#define GETU32(p) SWAP(*((u32 *)(p))) -#define PUTU32(ct, st) { *((u32 *)(ct)) = SWAP((st)); } -#else -#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) -#define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } -#endif - -/** - * Expand the cipher key into the encryption key schedule. - * - * @return the number of rounds for the given cipher key size. - */ -/* - * __db_rijndaelKeySetupEnc -- - * - * PUBLIC: int __db_rijndaelKeySetupEnc __P((u32 *, const u8 *, int)); - */ -int -__db_rijndaelKeySetupEnc(rk, cipherKey, keyBits) - u32 *rk; /* rk[4*(Nr + 1)] */ - const u8 *cipherKey; - int keyBits; -{ - int i = 0; - u32 temp; - - rk[0] = GETU32(cipherKey ); - rk[1] = GETU32(cipherKey + 4); - rk[2] = GETU32(cipherKey + 8); - rk[3] = GETU32(cipherKey + 12); - if (keyBits == 128) { - for (;;) { - temp = rk[3]; - rk[4] = rk[0] ^ - (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ - (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ - (Te4[(temp ) & 0xff] & 0x0000ff00) ^ - (Te4[(temp >> 24) ] & 0x000000ff) ^ - rcon[i]; - rk[5] = rk[1] ^ rk[4]; - rk[6] = rk[2] ^ rk[5]; - rk[7] = rk[3] ^ rk[6]; - if (++i == 10) { - return 10; - } - rk += 4; - } - } - rk[4] = GETU32(cipherKey + 16); - rk[5] = GETU32(cipherKey + 20); - if (keyBits == 192) { - for (;;) { - temp = rk[ 5]; - rk[ 6] = rk[ 0] ^ - (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ - (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ - (Te4[(temp ) & 0xff] & 0x0000ff00) ^ - (Te4[(temp >> 24) ] & 0x000000ff) ^ - rcon[i]; - rk[ 7] = rk[ 1] ^ rk[ 6]; - rk[ 8] = rk[ 2] ^ rk[ 7]; - rk[ 9] = rk[ 3] ^ rk[ 8]; - if (++i == 8) { - return 12; - } - rk[10] = rk[ 4] ^ rk[ 9]; - rk[11] = rk[ 5] ^ rk[10]; - rk += 6; - } - } - rk[6] = GETU32(cipherKey + 24); - rk[7] = GETU32(cipherKey + 28); - if (keyBits == 256) { - for (;;) { - temp = rk[ 7]; - rk[ 8] = rk[ 0] ^ - (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ - (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ - (Te4[(temp ) & 0xff] & 0x0000ff00) ^ - (Te4[(temp >> 24) ] & 0x000000ff) ^ - rcon[i]; - rk[ 9] = rk[ 1] ^ rk[ 8]; - rk[10] = rk[ 2] ^ rk[ 9]; - rk[11] = rk[ 3] ^ rk[10]; - if (++i == 7) { - return 14; - } - temp = rk[11]; - rk[12] = rk[ 4] ^ - (Te4[(temp >> 24) ] & 0xff000000) ^ - (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(temp ) & 0xff] & 0x000000ff); - rk[13] = rk[ 5] ^ rk[12]; - rk[14] = rk[ 6] ^ rk[13]; - rk[15] = rk[ 7] ^ rk[14]; - - rk += 8; - } - } - return 0; -} - -/** - * Expand the cipher key into the decryption key schedule. - * - * @return the number of rounds for the given cipher key size. - */ -/* - * __db_rijndaelKeySetupDec -- - * - * PUBLIC: int __db_rijndaelKeySetupDec __P((u32 *, const u8 *, int)); - */ -int -__db_rijndaelKeySetupDec(rk, cipherKey, keyBits) - u32 *rk; /* rk[4*(Nr + 1)] */ - const u8 *cipherKey; - int keyBits; -{ - int Nr, i, j; - u32 temp; - - /* expand the cipher key: */ - Nr = __db_rijndaelKeySetupEnc(rk, cipherKey, keyBits); - /* invert the order of the round keys: */ - for (i = 0, j = 4*Nr; i < j; i += 4, j -= 4) { - temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp; - temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp; - temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp; - temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; - } - /* apply the inverse MixColumn transform to all round keys but the first and the last: */ - for (i = 1; i < Nr; i++) { - rk += 4; - rk[0] = - Td0[Te4[(rk[0] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[0] ) & 0xff] & 0xff]; - rk[1] = - Td0[Te4[(rk[1] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[1] ) & 0xff] & 0xff]; - rk[2] = - Td0[Te4[(rk[2] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[2] ) & 0xff] & 0xff]; - rk[3] = - Td0[Te4[(rk[3] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[3] ) & 0xff] & 0xff]; - } - return Nr; -} - -/* - * __db_rijndaelEncrypt -- - * - * PUBLIC: void __db_rijndaelEncrypt __P((u32 *, int, const u8 *, u8 *)); - */ -void -__db_rijndaelEncrypt(rk, Nr, pt, ct) - u32 *rk; /* rk[4*(Nr + 1)] */ - int Nr; - const u8 *pt; - u8 *ct; -{ - u32 s0, s1, s2, s3, t0, t1, t2, t3; -#ifndef FULL_UNROLL - int r; -#endif /* ?FULL_UNROLL */ - - /* - * map byte array block to cipher state - * and add initial round key: - */ - s0 = GETU32(pt ) ^ rk[0]; - s1 = GETU32(pt + 4) ^ rk[1]; - s2 = GETU32(pt + 8) ^ rk[2]; - s3 = GETU32(pt + 12) ^ rk[3]; -#ifdef FULL_UNROLL - /* round 1: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7]; - /* round 2: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11]; - /* round 3: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15]; - /* round 4: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19]; - /* round 5: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23]; - /* round 6: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27]; - /* round 7: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31]; - /* round 8: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35]; - /* round 9: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39]; - if (Nr > 10) { - /* round 10: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43]; - /* round 11: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47]; - if (Nr > 12) { - /* round 12: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; - /* round 13: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55]; - } - } - rk += Nr << 2; -#else /* !FULL_UNROLL */ - /* - * Nr - 1 full rounds: - */ - r = Nr >> 1; - for (;;) { - t0 = - Te0[(s0 >> 24) ] ^ - Te1[(s1 >> 16) & 0xff] ^ - Te2[(s2 >> 8) & 0xff] ^ - Te3[(s3 ) & 0xff] ^ - rk[4]; - t1 = - Te0[(s1 >> 24) ] ^ - Te1[(s2 >> 16) & 0xff] ^ - Te2[(s3 >> 8) & 0xff] ^ - Te3[(s0 ) & 0xff] ^ - rk[5]; - t2 = - Te0[(s2 >> 24) ] ^ - Te1[(s3 >> 16) & 0xff] ^ - Te2[(s0 >> 8) & 0xff] ^ - Te3[(s1 ) & 0xff] ^ - rk[6]; - t3 = - Te0[(s3 >> 24) ] ^ - Te1[(s0 >> 16) & 0xff] ^ - Te2[(s1 >> 8) & 0xff] ^ - Te3[(s2 ) & 0xff] ^ - rk[7]; - - rk += 8; - if (--r == 0) { - break; - } - - s0 = - Te0[(t0 >> 24) ] ^ - Te1[(t1 >> 16) & 0xff] ^ - Te2[(t2 >> 8) & 0xff] ^ - Te3[(t3 ) & 0xff] ^ - rk[0]; - s1 = - Te0[(t1 >> 24) ] ^ - Te1[(t2 >> 16) & 0xff] ^ - Te2[(t3 >> 8) & 0xff] ^ - Te3[(t0 ) & 0xff] ^ - rk[1]; - s2 = - Te0[(t2 >> 24) ] ^ - Te1[(t3 >> 16) & 0xff] ^ - Te2[(t0 >> 8) & 0xff] ^ - Te3[(t1 ) & 0xff] ^ - rk[2]; - s3 = - Te0[(t3 >> 24) ] ^ - Te1[(t0 >> 16) & 0xff] ^ - Te2[(t1 >> 8) & 0xff] ^ - Te3[(t2 ) & 0xff] ^ - rk[3]; - } -#endif /* ?FULL_UNROLL */ - /* - * apply last round and - * map cipher state to byte array block: - */ - s0 = - (Te4[(t0 >> 24) ] & 0xff000000) ^ - (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t3 ) & 0xff] & 0x000000ff) ^ - rk[0]; - PUTU32(ct , s0); - s1 = - (Te4[(t1 >> 24) ] & 0xff000000) ^ - (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t0 ) & 0xff] & 0x000000ff) ^ - rk[1]; - PUTU32(ct + 4, s1); - s2 = - (Te4[(t2 >> 24) ] & 0xff000000) ^ - (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t1 ) & 0xff] & 0x000000ff) ^ - rk[2]; - PUTU32(ct + 8, s2); - s3 = - (Te4[(t3 >> 24) ] & 0xff000000) ^ - (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t2 ) & 0xff] & 0x000000ff) ^ - rk[3]; - PUTU32(ct + 12, s3); -} - -/* - * __db_rijndaelDecrypt -- - * - * PUBLIC: void __db_rijndaelDecrypt __P((u32 *, int, const u8 *, u8 *)); - */ -void -__db_rijndaelDecrypt(rk, Nr, ct, pt) - u32 *rk; /* rk[4*(Nr + 1)] */ - int Nr; - const u8 *ct; - u8 *pt; -{ - u32 s0, s1, s2, s3, t0, t1, t2, t3; -#ifndef FULL_UNROLL - int r; -#endif /* ?FULL_UNROLL */ - - /* - * map byte array block to cipher state - * and add initial round key: - */ - s0 = GETU32(ct ) ^ rk[0]; - s1 = GETU32(ct + 4) ^ rk[1]; - s2 = GETU32(ct + 8) ^ rk[2]; - s3 = GETU32(ct + 12) ^ rk[3]; -#ifdef FULL_UNROLL - /* round 1: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7]; - /* round 2: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11]; - /* round 3: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15]; - /* round 4: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19]; - /* round 5: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23]; - /* round 6: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27]; - /* round 7: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31]; - /* round 8: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35]; - /* round 9: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39]; - if (Nr > 10) { - /* round 10: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43]; - /* round 11: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47]; - if (Nr > 12) { - /* round 12: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51]; - /* round 13: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55]; - } - } - rk += Nr << 2; -#else /* !FULL_UNROLL */ - /* - * Nr - 1 full rounds: - */ - r = Nr >> 1; - for (;;) { - t0 = - Td0[(s0 >> 24) ] ^ - Td1[(s3 >> 16) & 0xff] ^ - Td2[(s2 >> 8) & 0xff] ^ - Td3[(s1 ) & 0xff] ^ - rk[4]; - t1 = - Td0[(s1 >> 24) ] ^ - Td1[(s0 >> 16) & 0xff] ^ - Td2[(s3 >> 8) & 0xff] ^ - Td3[(s2 ) & 0xff] ^ - rk[5]; - t2 = - Td0[(s2 >> 24) ] ^ - Td1[(s1 >> 16) & 0xff] ^ - Td2[(s0 >> 8) & 0xff] ^ - Td3[(s3 ) & 0xff] ^ - rk[6]; - t3 = - Td0[(s3 >> 24) ] ^ - Td1[(s2 >> 16) & 0xff] ^ - Td2[(s1 >> 8) & 0xff] ^ - Td3[(s0 ) & 0xff] ^ - rk[7]; - - rk += 8; - if (--r == 0) { - break; - } - - s0 = - Td0[(t0 >> 24) ] ^ - Td1[(t3 >> 16) & 0xff] ^ - Td2[(t2 >> 8) & 0xff] ^ - Td3[(t1 ) & 0xff] ^ - rk[0]; - s1 = - Td0[(t1 >> 24) ] ^ - Td1[(t0 >> 16) & 0xff] ^ - Td2[(t3 >> 8) & 0xff] ^ - Td3[(t2 ) & 0xff] ^ - rk[1]; - s2 = - Td0[(t2 >> 24) ] ^ - Td1[(t1 >> 16) & 0xff] ^ - Td2[(t0 >> 8) & 0xff] ^ - Td3[(t3 ) & 0xff] ^ - rk[2]; - s3 = - Td0[(t3 >> 24) ] ^ - Td1[(t2 >> 16) & 0xff] ^ - Td2[(t1 >> 8) & 0xff] ^ - Td3[(t0 ) & 0xff] ^ - rk[3]; - } -#endif /* ?FULL_UNROLL */ - /* - * apply last round and - * map cipher state to byte array block: - */ - s0 = - (Td4[(t0 >> 24) ] & 0xff000000) ^ - (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t1 ) & 0xff] & 0x000000ff) ^ - rk[0]; - PUTU32(pt , s0); - s1 = - (Td4[(t1 >> 24) ] & 0xff000000) ^ - (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t2 ) & 0xff] & 0x000000ff) ^ - rk[1]; - PUTU32(pt + 4, s1); - s2 = - (Td4[(t2 >> 24) ] & 0xff000000) ^ - (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t3 ) & 0xff] & 0x000000ff) ^ - rk[2]; - PUTU32(pt + 8, s2); - s3 = - (Td4[(t3 >> 24) ] & 0xff000000) ^ - (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t0 ) & 0xff] & 0x000000ff) ^ - rk[3]; - PUTU32(pt + 12, s3); -} - -#ifdef INTERMEDIATE_VALUE_KAT - -/* - * __db_rijndaelEncryptRound -- - * - * PUBLIC: void __db_rijndaelEncryptRound __P((const u32 *, int, u8 *, int)); - */ -void -__db_rijndaelEncryptRound(rk, Nr, pt, ct) - const u32 *rk; /* rk[4*(Nr + 1)] */ - int Nr; - u8 *block; - int rounds; -{ - int r; - u32 s0, s1, s2, s3, t0, t1, t2, t3; - - /* - * map byte array block to cipher state - * and add initial round key: - */ - s0 = GETU32(block ) ^ rk[0]; - s1 = GETU32(block + 4) ^ rk[1]; - s2 = GETU32(block + 8) ^ rk[2]; - s3 = GETU32(block + 12) ^ rk[3]; - rk += 4; - - /* - * Nr - 1 full rounds: - */ - for (r = (rounds < Nr ? rounds : Nr - 1); r > 0; r--) { - t0 = - Te0[(s0 >> 24) ] ^ - Te1[(s1 >> 16) & 0xff] ^ - Te2[(s2 >> 8) & 0xff] ^ - Te3[(s3 ) & 0xff] ^ - rk[0]; - t1 = - Te0[(s1 >> 24) ] ^ - Te1[(s2 >> 16) & 0xff] ^ - Te2[(s3 >> 8) & 0xff] ^ - Te3[(s0 ) & 0xff] ^ - rk[1]; - t2 = - Te0[(s2 >> 24) ] ^ - Te1[(s3 >> 16) & 0xff] ^ - Te2[(s0 >> 8) & 0xff] ^ - Te3[(s1 ) & 0xff] ^ - rk[2]; - t3 = - Te0[(s3 >> 24) ] ^ - Te1[(s0 >> 16) & 0xff] ^ - Te2[(s1 >> 8) & 0xff] ^ - Te3[(s2 ) & 0xff] ^ - rk[3]; - - s0 = t0; - s1 = t1; - s2 = t2; - s3 = t3; - rk += 4; - - } - - /* - * apply last round and - * map cipher state to byte array block: - */ - if (rounds == Nr) { - t0 = - (Te4[(s0 >> 24) ] & 0xff000000) ^ - (Te4[(s1 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(s2 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(s3 ) & 0xff] & 0x000000ff) ^ - rk[0]; - t1 = - (Te4[(s1 >> 24) ] & 0xff000000) ^ - (Te4[(s2 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(s3 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(s0 ) & 0xff] & 0x000000ff) ^ - rk[1]; - t2 = - (Te4[(s2 >> 24) ] & 0xff000000) ^ - (Te4[(s3 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(s0 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(s1 ) & 0xff] & 0x000000ff) ^ - rk[2]; - t3 = - (Te4[(s3 >> 24) ] & 0xff000000) ^ - (Te4[(s0 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(s1 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(s2 ) & 0xff] & 0x000000ff) ^ - rk[3]; - - s0 = t0; - s1 = t1; - s2 = t2; - s3 = t3; - } - - PUTU32(block , s0); - PUTU32(block + 4, s1); - PUTU32(block + 8, s2); - PUTU32(block + 12, s3); -} - -/* - * __db_rijndaelDecryptRound -- - * - * PUBLIC: void __db_rijndaelDecryptRound __P((const u32 *, int, u8 *, int)); - */ -void -__db_rijndaelDecryptRound(rk, Nr, pt, ct) - const u32 *rk; /* rk[4*(Nr + 1)] */ - int Nr; - u8 *block; - int rounds; -{ - int r; - u32 s0, s1, s2, s3, t0, t1, t2, t3; - - /* - * map byte array block to cipher state - * and add initial round key: - */ - s0 = GETU32(block ) ^ rk[0]; - s1 = GETU32(block + 4) ^ rk[1]; - s2 = GETU32(block + 8) ^ rk[2]; - s3 = GETU32(block + 12) ^ rk[3]; - rk += 4; - - /* - * Nr - 1 full rounds: - */ - for (r = (rounds < Nr ? rounds : Nr) - 1; r > 0; r--) { - t0 = - Td0[(s0 >> 24) ] ^ - Td1[(s3 >> 16) & 0xff] ^ - Td2[(s2 >> 8) & 0xff] ^ - Td3[(s1 ) & 0xff] ^ - rk[0]; - t1 = - Td0[(s1 >> 24) ] ^ - Td1[(s0 >> 16) & 0xff] ^ - Td2[(s3 >> 8) & 0xff] ^ - Td3[(s2 ) & 0xff] ^ - rk[1]; - t2 = - Td0[(s2 >> 24) ] ^ - Td1[(s1 >> 16) & 0xff] ^ - Td2[(s0 >> 8) & 0xff] ^ - Td3[(s3 ) & 0xff] ^ - rk[2]; - t3 = - Td0[(s3 >> 24) ] ^ - Td1[(s2 >> 16) & 0xff] ^ - Td2[(s1 >> 8) & 0xff] ^ - Td3[(s0 ) & 0xff] ^ - rk[3]; - - s0 = t0; - s1 = t1; - s2 = t2; - s3 = t3; - rk += 4; - - } - - /* - * complete the last round and - * map cipher state to byte array block: - */ - t0 = - (Td4[(s0 >> 24) ] & 0xff000000) ^ - (Td4[(s3 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(s2 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(s1 ) & 0xff] & 0x000000ff); - t1 = - (Td4[(s1 >> 24) ] & 0xff000000) ^ - (Td4[(s0 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(s3 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(s2 ) & 0xff] & 0x000000ff); - t2 = - (Td4[(s2 >> 24) ] & 0xff000000) ^ - (Td4[(s1 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(s0 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(s3 ) & 0xff] & 0x000000ff); - t3 = - (Td4[(s3 >> 24) ] & 0xff000000) ^ - (Td4[(s2 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(s1 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(s0 ) & 0xff] & 0x000000ff); - - if (rounds == Nr) { - t0 ^= rk[0]; - t1 ^= rk[1]; - t2 ^= rk[2]; - t3 ^= rk[3]; - } - - PUTU32(block , t0); - PUTU32(block + 4, t1); - PUTU32(block + 8, t2); - PUTU32(block + 12, t3); -} - -#endif /* INTERMEDIATE_VALUE_KAT */ diff --git a/storage/bdb/crypto/rijndael/rijndael-alg-fst.h b/storage/bdb/crypto/rijndael/rijndael-alg-fst.h deleted file mode 100644 index 60c01212764..00000000000 --- a/storage/bdb/crypto/rijndael/rijndael-alg-fst.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * $Id: rijndael-alg-fst.h,v 12.0 2004/11/17 03:43:17 bostic Exp $ - */ -/** - * rijndael-alg-fst.h - * - * @version 3.0 (December 2000) - * - * Optimised ANSI C code for the Rijndael cipher (now AES) - * - * @author Vincent Rijmen - * @author Antoon Bosselaers - * @author Paulo Barreto - * - * This code is hereby placed in the public domain. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __RIJNDAEL_ALG_FST_H -#define __RIJNDAEL_ALG_FST_H - -#define MAXKC (256/32) -#define MAXKB (256/8) -#define MAXNR 14 - -typedef u_int8_t u8; -typedef u_int16_t u16; -typedef u_int32_t u32; - -#endif /* __RIJNDAEL_ALG_FST_H */ diff --git a/storage/bdb/crypto/rijndael/rijndael-api-fst.c b/storage/bdb/crypto/rijndael/rijndael-api-fst.c deleted file mode 100644 index 09475370f6b..00000000000 --- a/storage/bdb/crypto/rijndael/rijndael-api-fst.c +++ /dev/null @@ -1,496 +0,0 @@ -/** - * rijndael-api-fst.c - * - * @version 2.9 (December 2000) - * - * Optimised ANSI C code for the Rijndael cipher (now AES) - * - * @author Vincent Rijmen - * @author Antoon Bosselaers - * @author Paulo Barreto - * - * This code is hereby placed in the public domain. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Acknowledgements: - * - * We are deeply indebted to the following people for their bug reports, - * fixes, and improvement suggestions to this implementation. Though we - * tried to list all contributions, we apologise in advance for any - * missing reference. - * - * Andrew Bales - * Markus Friedl - * John Skodon - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/crypto.h" - -#include "crypto/rijndael/rijndael-alg-fst.h" -#include "crypto/rijndael/rijndael-api-fst.h" - -/* - * __db_makeKey -- - * - * PUBLIC: int __db_makeKey __P((keyInstance *, int, int, char *)); - */ -int -__db_makeKey(key, direction, keyLen, keyMaterial) - keyInstance *key; - int direction; - int keyLen; - char *keyMaterial; -{ - u8 cipherKey[MAXKB]; - - if (key == NULL) { - return BAD_KEY_INSTANCE; - } - - if ((direction == DIR_ENCRYPT) || (direction == DIR_DECRYPT)) { - key->direction = direction; - } else { - return BAD_KEY_DIR; - } - - if ((keyLen == 128) || (keyLen == 192) || (keyLen == 256)) { - key->keyLen = keyLen; - } else { - return BAD_KEY_MAT; - } - - if (keyMaterial != NULL) { - memcpy(cipherKey, keyMaterial, key->keyLen/8); - } - - if (direction == DIR_ENCRYPT) { - key->Nr = __db_rijndaelKeySetupEnc(key->rk, cipherKey, keyLen); - } else { - key->Nr = __db_rijndaelKeySetupDec(key->rk, cipherKey, keyLen); - } - __db_rijndaelKeySetupEnc(key->ek, cipherKey, keyLen); - return TRUE; -} - -/* - * __db_cipherInit -- - * - * PUBLIC: int __db_cipherInit __P((cipherInstance *, int, char *)); - */ -int -__db_cipherInit(cipher, mode, IV) - cipherInstance *cipher; - int mode; - char *IV; -{ - if ((mode == MODE_ECB) || (mode == MODE_CBC) || (mode == MODE_CFB1)) { - cipher->mode = mode; - } else { - return BAD_CIPHER_MODE; - } - if (IV != NULL) { - memcpy(cipher->IV, IV, MAX_IV_SIZE); - } - return TRUE; -} - -/* - * __db_blockEncrypt -- - * - * PUBLIC: int __db_blockEncrypt __P((cipherInstance *, keyInstance *, u_int8_t *, - * PUBLIC: size_t, u_int8_t *)); - */ -int -__db_blockEncrypt(cipher, key, input, inputLen, outBuffer) - cipherInstance *cipher; - keyInstance *key; - u_int8_t *input; - size_t inputLen; - u_int8_t *outBuffer; -{ - int i, k, t, numBlocks; - u8 block[16], *iv; - u32 tmpiv[4]; - - if (cipher == NULL || - key == NULL || - key->direction == DIR_DECRYPT) { - return BAD_CIPHER_STATE; - } - if (input == NULL || inputLen <= 0) { - return 0; /* nothing to do */ - } - - numBlocks = (int)(inputLen/128); - - switch (cipher->mode) { - case MODE_ECB: - for (i = numBlocks; i > 0; i--) { - __db_rijndaelEncrypt(key->rk, key->Nr, input, outBuffer); - input += 16; - outBuffer += 16; - } - break; - - case MODE_CBC: - iv = cipher->IV; - for (i = numBlocks; i > 0; i--) { - memcpy(tmpiv, iv, MAX_IV_SIZE); - ((u32*)block)[0] = ((u32*)input)[0] ^ tmpiv[0]; - ((u32*)block)[1] = ((u32*)input)[1] ^ tmpiv[1]; - ((u32*)block)[2] = ((u32*)input)[2] ^ tmpiv[2]; - ((u32*)block)[3] = ((u32*)input)[3] ^ tmpiv[3]; - __db_rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); - iv = outBuffer; - input += 16; - outBuffer += 16; - } - break; - - case MODE_CFB1: - iv = cipher->IV; - for (i = numBlocks; i > 0; i--) { - memcpy(outBuffer, input, 16); - for (k = 0; k < 128; k++) { - __db_rijndaelEncrypt(key->ek, key->Nr, iv, block); - outBuffer[k >> 3] ^= (block[0] & (u_int)0x80) >> (k & 7); - for (t = 0; t < 15; t++) { - iv[t] = (iv[t] << 1) | (iv[t + 1] >> 7); - } - iv[15] = (iv[15] << 1) | ((outBuffer[k >> 3] >> (7 - (k & 7))) & 1); - } - outBuffer += 16; - input += 16; - } - break; - - default: - return BAD_CIPHER_STATE; - } - - return 128*numBlocks; -} - -/** - * Encrypt data partitioned in octets, using RFC 2040-like padding. - * - * @param input data to be encrypted (octet sequence) - * @param inputOctets input length in octets (not bits) - * @param outBuffer encrypted output data - * - * @return length in octets (not bits) of the encrypted output buffer. - */ -/* - * __db_padEncrypt -- - * - * PUBLIC: int __db_padEncrypt __P((cipherInstance *, keyInstance *, u_int8_t *, - * PUBLIC: int, u_int8_t *)); - */ -int -__db_padEncrypt(cipher, key, input, inputOctets, outBuffer) - cipherInstance *cipher; - keyInstance *key; - u_int8_t *input; - int inputOctets; - u_int8_t *outBuffer; -{ - int i, numBlocks, padLen; - u8 block[16], *iv; - u32 tmpiv[4]; - - if (cipher == NULL || - key == NULL || - key->direction == DIR_DECRYPT) { - return BAD_CIPHER_STATE; - } - if (input == NULL || inputOctets <= 0) { - return 0; /* nothing to do */ - } - - numBlocks = inputOctets/16; - - switch (cipher->mode) { - case MODE_ECB: - for (i = numBlocks; i > 0; i--) { - __db_rijndaelEncrypt(key->rk, key->Nr, input, outBuffer); - input += 16; - outBuffer += 16; - } - padLen = 16 - (inputOctets - 16*numBlocks); - DB_ASSERT(padLen > 0 && padLen <= 16); - memcpy(block, input, 16 - padLen); - memset(block + 16 - padLen, padLen, padLen); - __db_rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); - break; - - case MODE_CBC: - iv = cipher->IV; - for (i = numBlocks; i > 0; i--) { - memcpy(tmpiv, iv, MAX_IV_SIZE); - ((u32*)block)[0] = ((u32*)input)[0] ^ tmpiv[0]; - ((u32*)block)[1] = ((u32*)input)[1] ^ tmpiv[1]; - ((u32*)block)[2] = ((u32*)input)[2] ^ tmpiv[2]; - ((u32*)block)[3] = ((u32*)input)[3] ^ tmpiv[3]; - __db_rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); - iv = outBuffer; - input += 16; - outBuffer += 16; - } - padLen = 16 - (inputOctets - 16*numBlocks); - DB_ASSERT(padLen > 0 && padLen <= 16); - for (i = 0; i < 16 - padLen; i++) { - block[i] = input[i] ^ iv[i]; - } - for (i = 16 - padLen; i < 16; i++) { - block[i] = (u_int8_t)padLen ^ iv[i]; - } - __db_rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); - break; - - default: - return BAD_CIPHER_STATE; - } - - return 16*(numBlocks + 1); -} - -/* - * __db_blockDecrypt -- - * - * PUBLIC: int __db_blockDecrypt __P((cipherInstance *, keyInstance *, u_int8_t *, - * PUBLIC: size_t, u_int8_t *)); - */ -int -__db_blockDecrypt(cipher, key, input, inputLen, outBuffer) - cipherInstance *cipher; - keyInstance *key; - u_int8_t *input; - size_t inputLen; - u_int8_t *outBuffer; -{ - int i, k, t, numBlocks; - u8 block[16], *iv; - u32 tmpiv[4]; - - if (cipher == NULL || - key == NULL || - (cipher->mode != MODE_CFB1 && key->direction == DIR_ENCRYPT)) { - return BAD_CIPHER_STATE; - } - if (input == NULL || inputLen <= 0) { - return 0; /* nothing to do */ - } - - numBlocks = (int)(inputLen/128); - - switch (cipher->mode) { - case MODE_ECB: - for (i = numBlocks; i > 0; i--) { - __db_rijndaelDecrypt(key->rk, key->Nr, input, outBuffer); - input += 16; - outBuffer += 16; - } - break; - - case MODE_CBC: - memcpy(tmpiv, cipher->IV, MAX_IV_SIZE); - for (i = numBlocks; i > 0; i--) { - __db_rijndaelDecrypt(key->rk, key->Nr, input, block); - ((u32*)block)[0] ^= tmpiv[0]; - ((u32*)block)[1] ^= tmpiv[1]; - ((u32*)block)[2] ^= tmpiv[2]; - ((u32*)block)[3] ^= tmpiv[3]; - memcpy(tmpiv, input, 16); - memcpy(outBuffer, block, 16); - input += 16; - outBuffer += 16; - } - break; - - case MODE_CFB1: - iv = cipher->IV; - for (i = numBlocks; i > 0; i--) { - memcpy(outBuffer, input, 16); - for (k = 0; k < 128; k++) { - __db_rijndaelEncrypt(key->ek, key->Nr, iv, block); - for (t = 0; t < 15; t++) { - iv[t] = (iv[t] << 1) | (iv[t + 1] >> 7); - } - iv[15] = (iv[15] << 1) | ((input[k >> 3] >> (7 - (k & 7))) & 1); - outBuffer[k >> 3] ^= (block[0] & (u_int)0x80) >> (k & 7); - } - outBuffer += 16; - input += 16; - } - break; - - default: - return BAD_CIPHER_STATE; - } - - return 128*numBlocks; -} - -/* - * __db_padDecrypt -- - * - * PUBLIC: int __db_padDecrypt __P((cipherInstance *, keyInstance *, u_int8_t *, - * PUBLIC: int, u_int8_t *)); - */ -int -__db_padDecrypt(cipher, key, input, inputOctets, outBuffer) - cipherInstance *cipher; - keyInstance *key; - u_int8_t *input; - int inputOctets; - u_int8_t *outBuffer; -{ - int i, numBlocks, padLen; - u8 block[16]; - u32 tmpiv[4]; - - if (cipher == NULL || - key == NULL || - key->direction == DIR_ENCRYPT) { - return BAD_CIPHER_STATE; - } - if (input == NULL || inputOctets <= 0) { - return 0; /* nothing to do */ - } - if (inputOctets % 16 != 0) { - return BAD_DATA; - } - - numBlocks = inputOctets/16; - - switch (cipher->mode) { - case MODE_ECB: - /* all blocks but last */ - for (i = numBlocks - 1; i > 0; i--) { - __db_rijndaelDecrypt(key->rk, key->Nr, input, outBuffer); - input += 16; - outBuffer += 16; - } - /* last block */ - __db_rijndaelDecrypt(key->rk, key->Nr, input, block); - padLen = block[15]; - if (padLen >= 16) { - return BAD_DATA; - } - for (i = 16 - padLen; i < 16; i++) { - if (block[i] != padLen) { - return BAD_DATA; - } - } - memcpy(outBuffer, block, 16 - padLen); - break; - - case MODE_CBC: - /* all blocks but last */ - memcpy(tmpiv, cipher->IV, MAX_IV_SIZE); - for (i = numBlocks - 1; i > 0; i--) { - __db_rijndaelDecrypt(key->rk, key->Nr, input, block); - ((u32*)block)[0] ^= tmpiv[0]; - ((u32*)block)[1] ^= tmpiv[1]; - ((u32*)block)[2] ^= tmpiv[2]; - ((u32*)block)[3] ^= tmpiv[3]; - memcpy(tmpiv, input, 16); - memcpy(outBuffer, block, 16); - input += 16; - outBuffer += 16; - } - /* last block */ - __db_rijndaelDecrypt(key->rk, key->Nr, input, block); - ((u32*)block)[0] ^= tmpiv[0]; - ((u32*)block)[1] ^= tmpiv[1]; - ((u32*)block)[2] ^= tmpiv[2]; - ((u32*)block)[3] ^= tmpiv[3]; - padLen = block[15]; - if (padLen <= 0 || padLen > 16) { - return BAD_DATA; - } - for (i = 16 - padLen; i < 16; i++) { - if (block[i] != padLen) { - return BAD_DATA; - } - } - memcpy(outBuffer, block, 16 - padLen); - break; - - default: - return BAD_CIPHER_STATE; - } - - return 16*numBlocks - padLen; -} - -#ifdef INTERMEDIATE_VALUE_KAT -/** - * cipherUpdateRounds: - * - * Encrypts/Decrypts exactly one full block a specified number of rounds. - * Only used in the Intermediate Value Known Answer Test. - * - * Returns: - * TRUE - on success - * BAD_CIPHER_STATE - cipher in bad state (e.g., not initialized) - */ -/* - * __db_cipherUpdateRounds -- - * - * PUBLIC: int __db_cipherUpdateRounds __P((cipherInstance *, keyInstance *, - * PUBLIC: u_int8_t *, int, u_int8_t *, int)); - */ -int -__db_cipherUpdateRounds(cipher, key, input, inputLen, outBuffer, rounds) - cipherInstance *cipher; - keyInstance *key; - u_int8_t *input; - size_t inputLen; - u_int8_t *outBuffer; - int rounds; -{ - u8 block[16]; - - if (cipher == NULL || key == NULL) { - return BAD_CIPHER_STATE; - } - - memcpy(block, input, 16); - - switch (key->direction) { - case DIR_ENCRYPT: - __db_rijndaelEncryptRound(key->rk, key->Nr, block, rounds); - break; - - case DIR_DECRYPT: - __db_rijndaelDecryptRound(key->rk, key->Nr, block, rounds); - break; - - default: - return BAD_KEY_DIR; - } - - memcpy(outBuffer, block, 16); - - return TRUE; -} -#endif /* INTERMEDIATE_VALUE_KAT */ diff --git a/storage/bdb/crypto/rijndael/rijndael-api-fst.h b/storage/bdb/crypto/rijndael/rijndael-api-fst.h deleted file mode 100644 index caf0abc4aa7..00000000000 --- a/storage/bdb/crypto/rijndael/rijndael-api-fst.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * $Id: rijndael-api-fst.h,v 12.0 2004/11/17 03:43:17 bostic Exp $ - */ -/** - * rijndael-api-fst.h - * - * @version 2.9 (December 2000) - * - * Optimised ANSI C code for the Rijndael cipher (now AES) - * - * @author Vincent Rijmen - * @author Antoon Bosselaers - * @author Paulo Barreto - * - * This code is hereby placed in the public domain. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Acknowledgements: - * - * We are deeply indebted to the following people for their bug reports, - * fixes, and improvement suggestions to this implementation. Though we - * tried to list all contributions, we apologise in advance for any - * missing reference. - * - * Andrew Bales - * Markus Friedl - * John Skodon - */ - -#ifndef __RIJNDAEL_API_FST_H -#define __RIJNDAEL_API_FST_H - -#include "crypto/rijndael/rijndael-alg-fst.h" - -/* Generic Defines */ -#define DIR_ENCRYPT 0 /* Are we encrpyting? */ -#define DIR_DECRYPT 1 /* Are we decrpyting? */ -#define MODE_ECB 1 /* Are we ciphering in ECB mode? */ -#define MODE_CBC 2 /* Are we ciphering in CBC mode? */ -#define MODE_CFB1 3 /* Are we ciphering in 1-bit CFB mode? */ -#undef TRUE -#define TRUE 1 -#undef FALSE -#define FALSE 0 -#define BITSPERBLOCK 128 /* Default number of bits in a cipher block */ - -/* Error Codes */ -#define BAD_KEY_DIR -1 /* Key direction is invalid, e.g., unknown value */ -#define BAD_KEY_MAT -2 /* Key material not of correct length */ -#define BAD_KEY_INSTANCE -3 /* Key passed is not valid */ -#define BAD_CIPHER_MODE -4 /* Params struct passed to cipherInit invalid */ -#define BAD_CIPHER_STATE -5 /* Cipher in wrong state (e.g., not initialized) */ -#define BAD_BLOCK_LENGTH -6 -#define BAD_CIPHER_INSTANCE -7 -#define BAD_DATA -8 /* Data contents are invalid, e.g., invalid padding */ -#define BAD_OTHER -9 /* Unknown error */ - -/* Algorithm-specific Defines */ -#define MAX_KEY_SIZE 64 /* # of ASCII char's needed to represent a key */ -#define MAX_IV_SIZE 16 /* # bytes needed to represent an IV */ - -/* Typedefs */ - -/* The structure for key information */ -typedef struct { - u_int8_t direction; /* Key used for encrypting or decrypting? */ - int keyLen; /* Length of the key */ - char keyMaterial[MAX_KEY_SIZE+1]; /* Raw key data in ASCII, e.g., user input or KAT values */ - int Nr; /* key-length-dependent number of rounds */ - u32 rk[4*(MAXNR + 1)]; /* key schedule */ - u32 ek[4*(MAXNR + 1)]; /* CFB1 key schedule (encryption only) */ -} keyInstance; - -/* The structure for cipher information */ -typedef struct { /* changed order of the components */ - u_int8_t mode; /* MODE_ECB, MODE_CBC, or MODE_CFB1 */ - u_int8_t IV[MAX_IV_SIZE]; /* A possible Initialization Vector for ciphering */ -} cipherInstance; - -#endif /* __RIJNDAEL_API_FST_H */ diff --git a/storage/bdb/cxx/cxx_db.cpp b/storage/bdb/cxx/cxx_db.cpp deleted file mode 100644 index 03e07f4d238..00000000000 --- a/storage/bdb/cxx/cxx_db.cpp +++ /dev/null @@ -1,657 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_db.cpp,v 12.4 2005/10/18 14:25:53 mjc Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc_auto/db_auto.h" -#include "dbinc_auto/crdel_auto.h" -#include "dbinc/db_dispatch.h" -#include "dbinc_auto/db_ext.h" -#include "dbinc_auto/common_ext.h" - -// Helper macros for simple methods that pass through to the -// underlying C method. It may return an error or raise an exception. -// Note this macro expects that input _argspec is an argument -// list element (e.g., "char *arg") and that _arglist is the arguments -// that should be passed through to the C method (e.g., "(db, arg)") -// -#define DB_METHOD(_name, _argspec, _arglist, _retok) \ -int Db::_name _argspec \ -{ \ - int ret; \ - DB *db = unwrap(this); \ - \ - ret = db->_name _arglist; \ - if (!_retok(ret)) \ - DB_ERROR(env_, "Db::" # _name, ret, error_policy()); \ - return (ret); \ -} - -#define DB_DESTRUCTOR(_name, _argspec, _arglist, _retok) \ -int Db::_name _argspec \ -{ \ - int ret; \ - DB *db = unwrap(this); \ - \ - if (!db) { \ - DB_ERROR(env_, "Db::" # _name, EINVAL, error_policy()); \ - return (EINVAL); \ - } \ - cleanup(); \ - ret = db->_name _arglist; \ - if (!_retok(ret)) \ - DB_ERROR(env_, "Db::" # _name, ret, error_policy()); \ - return (ret); \ -} - -#define DB_METHOD_QUIET(_name, _argspec, _arglist) \ -int Db::_name _argspec \ -{ \ - DB *db = unwrap(this); \ - \ - return (db->_name _arglist); \ -} - -#define DB_METHOD_VOID(_name, _argspec, _arglist) \ -void Db::_name _argspec \ -{ \ - DB *db = unwrap(this); \ - \ - db->_name _arglist; \ -} - -// A truism for the Db object is that there is a valid -// DB handle from the constructor until close(). -// After the close, the DB handle is invalid and -// no operations are permitted on the Db (other than -// destructor). Leaving the Db handle open and not -// doing a close is generally considered an error. -// -// We used to allow Db objects to be closed and reopened. -// This implied always keeping a valid DB object, and -// coordinating the open objects between Db/DbEnv turned -// out to be overly complicated. Now we do not allow this. - -Db::Db(DbEnv *env, u_int32_t flags) -: imp_(0) -, env_(env) -, mpf_(0) -, construct_error_(0) -, flags_(0) -, construct_flags_(flags) -, append_recno_callback_(0) -, associate_callback_(0) -, bt_compare_callback_(0) -, bt_prefix_callback_(0) -, dup_compare_callback_(0) -, feedback_callback_(0) -, h_hash_callback_(0) -{ - if (env_ == 0) - flags_ |= DB_CXX_PRIVATE_ENV; - - if ((construct_error_ = initialize()) != 0) - DB_ERROR(env_, "Db::Db", construct_error_, error_policy()); -} - -// If the DB handle is still open, we close it. This is to make stack -// allocation of Db objects easier so that they are cleaned up in the error -// path. If the environment was closed prior to this, it may cause a trap, but -// an error message is generated during the environment close. Applications -// should call close explicitly in normal (non-exceptional) cases to check the -// return value. -// -Db::~Db() -{ - DB *db; - - db = unwrap(this); - if (db != NULL) { - cleanup(); - (void)db->close(db, 0); - } -} - -// private method to initialize during constructor. -// initialize must create a backing DB object, -// and if that creates a new DB_ENV, it must be tied to a new DbEnv. -// -int Db::initialize() -{ - DB *db; - DB_ENV *cenv = unwrap(env_); - int ret; - u_int32_t cxx_flags; - - cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS; - - // Create a new underlying DB object. - // We rely on the fact that if a NULL DB_ENV* is given, - // one is allocated by DB. - // - if ((ret = db_create(&db, cenv, - construct_flags_ & ~cxx_flags)) != 0) - return (ret); - - // Associate the DB with this object - imp_ = db; - db->api_internal = this; - - // Create a new DbEnv from a DB_ENV* if it was created locally. - // It is deleted in Db::close(). - // - if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) - env_ = new DbEnv(db->dbenv, cxx_flags); - - // Create a DbMpoolFile from the DB_MPOOLFILE* in the DB handle. - mpf_ = new DbMpoolFile(); - mpf_->imp_ = db->mpf; - - return (0); -} - -// private method to cleanup after destructor or during close. -// If the environment was created by this Db object, we optionally -// delete it, or return it so the caller can delete it after -// last use. -// -void Db::cleanup() -{ - DB *db = unwrap(this); - - if (db != NULL) { - // extra safety - imp_ = 0; - - // we must dispose of the DbEnv object if - // we created it. This will be the case - // if a NULL DbEnv was passed into the constructor. - // The underlying DB_ENV object will be inaccessible - // after the close, so we must clean it up now. - // - if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) { - env_->cleanup(); - delete env_; - env_ = 0; - } - - delete mpf_; - } -} - -// Return a tristate value corresponding to whether we should -// throw exceptions on errors: -// ON_ERROR_RETURN -// ON_ERROR_THROW -// ON_ERROR_UNKNOWN -// -int Db::error_policy() -{ - if (env_ != NULL) - return (env_->error_policy()); - else { - // If the env_ is null, that means that the user - // did not attach an environment, so the correct error - // policy can be deduced from constructor flags - // for this Db. - // - if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) { - return (ON_ERROR_RETURN); - } - else { - return (ON_ERROR_THROW); - } - } -} - -DB_DESTRUCTOR(close, (u_int32_t flags), (db, flags), DB_RETOK_STD) -DB_METHOD(compact, (DbTxn *txnid, Dbt *start, Dbt *stop, - DB_COMPACT *c_data, u_int32_t flags, Dbt *end), - (db, unwrap(txnid), start, stop, c_data, flags, end), DB_RETOK_STD) - -// The following cast implies that Dbc can be no larger than DBC -DB_METHOD(cursor, (DbTxn *txnid, Dbc **cursorp, u_int32_t flags), - (db, unwrap(txnid), (DBC **)cursorp, flags), - DB_RETOK_STD) - -DB_METHOD(del, (DbTxn *txnid, Dbt *key, u_int32_t flags), - (db, unwrap(txnid), key, flags), - DB_RETOK_DBDEL) - -void Db::err(int error, const char *format, ...) -{ - DB *db = unwrap(this); - - DB_REAL_ERR(db->dbenv, error, 1, 1, format); -} - -void Db::errx(const char *format, ...) -{ - DB *db = unwrap(this); - - DB_REAL_ERR(db->dbenv, 0, 0, 1, format); -} - -DB_METHOD(fd, (int *fdp), (db, fdp), DB_RETOK_STD) - -int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags) -{ - DB *db = unwrap(this); - int ret; - - ret = db->get(db, unwrap(txnid), key, value, flags); - - if (!DB_RETOK_DBGET(ret)) { - if (ret == DB_BUFFER_SMALL) - DB_ERROR_DBT(env_, "Db::get", value, error_policy()); - else - DB_ERROR(env_, "Db::get", ret, error_policy()); - } - - return (ret); -} - -int Db::get_byteswapped(int *isswapped) -{ - DB *db = (DB *)unwrapConst(this); - return (db->get_byteswapped(db, isswapped)); -} - -DbEnv *Db::get_env() -{ - DB *db = (DB *)unwrapConst(this); - DB_ENV *dbenv = db->get_env(db); - return (dbenv != NULL ? DbEnv::get_DbEnv(dbenv) : NULL); -} - -DbMpoolFile *Db::get_mpf() -{ - return (mpf_); -} - -DB_METHOD(get_dbname, (const char **filenamep, const char **dbnamep), - (db, filenamep, dbnamep), DB_RETOK_STD) - -DB_METHOD(get_open_flags, (u_int32_t *flagsp), (db, flagsp), DB_RETOK_STD) - -int Db::get_type(DBTYPE *dbtype) -{ - DB *db = (DB *)unwrapConst(this); - return (db->get_type(db, dbtype)); -} - -// Dbc is a "compatible" subclass of DBC - that is, no virtual functions -// or even extra data members, so these casts, although technically -// non-portable, "should" always be okay. -DB_METHOD(join, (Dbc **curslist, Dbc **cursorp, u_int32_t flags), - (db, (DBC **)curslist, (DBC **)cursorp, flags), DB_RETOK_STD) - -DB_METHOD(key_range, - (DbTxn *txnid, Dbt *key, DB_KEY_RANGE *results, u_int32_t flags), - (db, unwrap(txnid), key, results, flags), DB_RETOK_STD) - -// If an error occurred during the constructor, report it now. -// Otherwise, call the underlying DB->open method. -// -int Db::open(DbTxn *txnid, const char *file, const char *database, - DBTYPE type, u_int32_t flags, int mode) -{ - int ret; - DB *db = unwrap(this); - - if (construct_error_ != 0) - ret = construct_error_; - else - ret = db->open(db, unwrap(txnid), file, database, type, flags, - mode); - - if (!DB_RETOK_STD(ret)) - DB_ERROR(env_, "Db::open", ret, error_policy()); - - return (ret); -} - -int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags) -{ - DB *db = unwrap(this); - int ret; - - ret = db->pget(db, unwrap(txnid), key, pkey, value, flags); - - /* The logic here is identical to Db::get - reuse the macro. */ - if (!DB_RETOK_DBGET(ret)) { - if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(value)) - DB_ERROR_DBT(env_, "Db::pget", value, error_policy()); - else - DB_ERROR(env_, "Db::pget", ret, error_policy()); - } - - return (ret); -} - -DB_METHOD(put, (DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags), - (db, unwrap(txnid), key, value, flags), DB_RETOK_DBPUT) - -DB_DESTRUCTOR(rename, - (const char *file, const char *database, const char *newname, - u_int32_t flags), - (db, file, database, newname, flags), DB_RETOK_STD) - -DB_DESTRUCTOR(remove, (const char *file, const char *database, u_int32_t flags), - (db, file, database, flags), DB_RETOK_STD) - -DB_METHOD(truncate, (DbTxn *txnid, u_int32_t *countp, u_int32_t flags), - (db, unwrap(txnid), countp, flags), DB_RETOK_STD) - -DB_METHOD(stat, (DbTxn *txnid, void *sp, u_int32_t flags), - (db, unwrap(txnid), sp, flags), DB_RETOK_STD) - -DB_METHOD(stat_print, (u_int32_t flags), (db, flags), DB_RETOK_STD) - -DB_METHOD(sync, (u_int32_t flags), (db, flags), DB_RETOK_STD) - -DB_METHOD(upgrade, - (const char *name, u_int32_t flags), (db, name, flags), DB_RETOK_STD) - -//////////////////////////////////////////////////////////////////////// -// -// callbacks -// -// *_intercept_c are 'glue' functions that must be declared -// as extern "C" so to be typesafe. Using a C++ method, even -// a static class method with 'correct' arguments, will not pass -// the test; some picky compilers do not allow mixing of function -// pointers to 'C' functions with function pointers to C++ functions. -// -// One wart with this scheme is that the *_callback_ method pointer -// must be declared public to be accessible by the C intercept. -// It's possible to accomplish the goal without this, and with -// another public transfer method, but it's just too much overhead. -// These callbacks are supposed to be *fast*. -// -// The DBTs we receive in these callbacks from the C layer may be -// manufactured there, but we want to treat them as a Dbts. -// Technically speaking, these DBTs were not constructed as a Dbts, -// but it should be safe to cast them as such given that Dbt is a -// *very* thin extension of the DBT. That is, Dbt has no additional -// data elements, does not use virtual functions, virtual inheritance, -// multiple inheritance, RTI, or any other language feature that -// causes the structure to grow or be displaced. Although this may -// sound risky, a design goal of C++ is complete structure -// compatibility with C, and has the philosophy 'if you don't use it, -// you shouldn't incur the overhead'. If the C/C++ compilers you're -// using on a given machine do not have matching struct layouts, then -// a lot more things will be broken than just this. -// -// The alternative, creating a Dbt here in the callback, and populating -// it from the DBT, is just too slow and cumbersome to be very useful. - -// These macros avoid a lot of boilerplate code for callbacks - -#define DB_CALLBACK_C_INTERCEPT(_name, _rettype, _cargspec, \ - _return, _cxxargs) \ -extern "C" _rettype _db_##_name##_intercept_c _cargspec \ -{ \ - Db *cxxthis; \ - \ - DB_ASSERT(cthis != NULL); \ - cxxthis = Db::get_Db(cthis); \ - DB_ASSERT(cxxthis != NULL); \ - DB_ASSERT(cxxthis->_name##_callback_ != 0); \ - \ - _return (*cxxthis->_name##_callback_) _cxxargs; \ -} - -#define DB_SET_CALLBACK(_cxxname, _name, _cxxargspec, _cb) \ -int Db::_cxxname _cxxargspec \ -{ \ - DB *cthis = unwrap(this); \ - \ - _name##_callback_ = _cb; \ - return ((*(cthis->_cxxname))(cthis, \ - (_cb) ? _db_##_name##_intercept_c : NULL)); \ -} - -/* associate callback - doesn't quite fit the pattern because of the flags */ -DB_CALLBACK_C_INTERCEPT(associate, - int, (DB *cthis, const DBT *key, const DBT *data, DBT *retval), - return, (cxxthis, Dbt::get_const_Dbt(key), Dbt::get_const_Dbt(data), - Dbt::get_Dbt(retval))) - -int Db::associate(DbTxn *txn, Db *secondary, int (*callback)(Db *, const Dbt *, - const Dbt *, Dbt *), u_int32_t flags) -{ - DB *cthis = unwrap(this); - - /* Since the secondary Db is used as the first argument - * to the callback, we store the C++ callback on it - * rather than on 'this'. - */ - secondary->associate_callback_ = callback; - return ((*(cthis->associate))(cthis, unwrap(txn), unwrap(secondary), - (callback) ? _db_associate_intercept_c : NULL, flags)); -} - -DB_CALLBACK_C_INTERCEPT(feedback, - void, (DB *cthis, int opcode, int pct), - /* no return */ (void), (cxxthis, opcode, pct)) - -DB_SET_CALLBACK(set_feedback, feedback, - (void (*arg)(Db *cxxthis, int opcode, int pct)), arg) - -DB_CALLBACK_C_INTERCEPT(append_recno, - int, (DB *cthis, DBT *data, db_recno_t recno), - return, (cxxthis, Dbt::get_Dbt(data), recno)) - -DB_SET_CALLBACK(set_append_recno, append_recno, - (int (*arg)(Db *cxxthis, Dbt *data, db_recno_t recno)), arg) - -DB_CALLBACK_C_INTERCEPT(bt_compare, - int, (DB *cthis, const DBT *data1, const DBT *data2), - return, - (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2))) - -DB_SET_CALLBACK(set_bt_compare, bt_compare, - (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg) - -DB_CALLBACK_C_INTERCEPT(bt_prefix, - size_t, (DB *cthis, const DBT *data1, const DBT *data2), - return, - (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2))) - -DB_SET_CALLBACK(set_bt_prefix, bt_prefix, - (size_t (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg) - -DB_CALLBACK_C_INTERCEPT(dup_compare, - int, (DB *cthis, const DBT *data1, const DBT *data2), - return, - (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2))) - -DB_SET_CALLBACK(set_dup_compare, dup_compare, - (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg) - -DB_CALLBACK_C_INTERCEPT(h_hash, - u_int32_t, (DB *cthis, const void *data, u_int32_t len), - return, (cxxthis, data, len)) - -DB_SET_CALLBACK(set_h_hash, h_hash, - (u_int32_t (*arg)(Db *cxxthis, const void *data, u_int32_t len)), arg) - -// This is a 'glue' function declared as extern "C" so it will -// be compatible with picky compilers that do not allow mixing -// of function pointers to 'C' functions with function pointers -// to C++ functions. -// -extern "C" -int _verify_callback_c(void *handle, const void *str_arg) -{ - char *str; - __DB_STD(ostream) *out; - - str = (char *)str_arg; - out = (__DB_STD(ostream) *)handle; - - (*out) << str; - if (out->fail()) - return (EIO); - - return (0); -} - -int Db::verify(const char *name, const char *subdb, - __DB_STD(ostream) *ostr, u_int32_t flags) -{ - DB *db = unwrap(this); - int ret; - - if (!db) - ret = EINVAL; - else { - // after a DB->verify (no matter if success or failure), - // the underlying DB object must not be accessed, - // so we clean up in advance. - // - cleanup(); - - ret = __db_verify_internal(db, name, subdb, ostr, - _verify_callback_c, flags); - } - - if (!DB_RETOK_STD(ret)) - DB_ERROR(env_, "Db::verify", ret, error_policy()); - - return (ret); -} - -DB_METHOD(set_bt_compare, (bt_compare_fcn_type func), - (db, func), DB_RETOK_STD) -DB_METHOD(get_bt_minkey, (u_int32_t *bt_minkeyp), - (db, bt_minkeyp), DB_RETOK_STD) -DB_METHOD(set_bt_minkey, (u_int32_t bt_minkey), - (db, bt_minkey), DB_RETOK_STD) -DB_METHOD(set_bt_prefix, (bt_prefix_fcn_type func), - (db, func), DB_RETOK_STD) -DB_METHOD(set_dup_compare, (dup_compare_fcn_type func), - (db, func), DB_RETOK_STD) -DB_METHOD(get_encrypt_flags, (u_int32_t *flagsp), - (db, flagsp), DB_RETOK_STD) -DB_METHOD(set_encrypt, (const char *passwd, u_int32_t flags), - (db, passwd, flags), DB_RETOK_STD) -DB_METHOD_VOID(get_errfile, (FILE **errfilep), (db, errfilep)) -DB_METHOD_VOID(set_errfile, (FILE *errfile), (db, errfile)) -DB_METHOD_VOID(get_errpfx, (const char **errpfx), (db, errpfx)) -DB_METHOD_VOID(set_errpfx, (const char *errpfx), (db, errpfx)) -DB_METHOD(get_flags, (u_int32_t *flagsp), (db, flagsp), - DB_RETOK_STD) -DB_METHOD(set_flags, (u_int32_t flags), (db, flags), - DB_RETOK_STD) -DB_METHOD(get_h_ffactor, (u_int32_t *h_ffactorp), - (db, h_ffactorp), DB_RETOK_STD) -DB_METHOD(set_h_ffactor, (u_int32_t h_ffactor), - (db, h_ffactor), DB_RETOK_STD) -DB_METHOD(set_h_hash, (h_hash_fcn_type func), - (db, func), DB_RETOK_STD) -DB_METHOD(get_h_nelem, (u_int32_t *h_nelemp), - (db, h_nelemp), DB_RETOK_STD) -DB_METHOD(set_h_nelem, (u_int32_t h_nelem), - (db, h_nelem), DB_RETOK_STD) -DB_METHOD(get_lorder, (int *db_lorderp), (db, db_lorderp), - DB_RETOK_STD) -DB_METHOD(set_lorder, (int db_lorder), (db, db_lorder), - DB_RETOK_STD) -DB_METHOD_VOID(get_msgfile, (FILE **msgfilep), (db, msgfilep)) -DB_METHOD_VOID(set_msgfile, (FILE *msgfile), (db, msgfile)) -DB_METHOD(get_pagesize, (u_int32_t *db_pagesizep), - (db, db_pagesizep), DB_RETOK_STD) -DB_METHOD(set_pagesize, (u_int32_t db_pagesize), - (db, db_pagesize), DB_RETOK_STD) -DB_METHOD(get_re_delim, (int *re_delimp), - (db, re_delimp), DB_RETOK_STD) -DB_METHOD(set_re_delim, (int re_delim), - (db, re_delim), DB_RETOK_STD) -DB_METHOD(get_re_len, (u_int32_t *re_lenp), - (db, re_lenp), DB_RETOK_STD) -DB_METHOD(set_re_len, (u_int32_t re_len), - (db, re_len), DB_RETOK_STD) -DB_METHOD(get_re_pad, (int *re_padp), - (db, re_padp), DB_RETOK_STD) -DB_METHOD(set_re_pad, (int re_pad), - (db, re_pad), DB_RETOK_STD) -DB_METHOD(get_re_source, (const char **re_source), - (db, re_source), DB_RETOK_STD) -DB_METHOD(set_re_source, (const char *re_source), - (db, re_source), DB_RETOK_STD) -DB_METHOD(get_q_extentsize, (u_int32_t *extentsizep), - (db, extentsizep), DB_RETOK_STD) -DB_METHOD(set_q_extentsize, (u_int32_t extentsize), - (db, extentsize), DB_RETOK_STD) - -DB_METHOD_QUIET(set_alloc, (db_malloc_fcn_type malloc_fcn, - db_realloc_fcn_type realloc_fcn, db_free_fcn_type free_fcn), - (db, malloc_fcn, realloc_fcn, free_fcn)) - -void Db::set_errcall(void (*arg)(const DbEnv *, const char *, const char *)) -{ - env_->set_errcall(arg); -} - -void Db::set_msgcall(void (*arg)(const DbEnv *, const char *)) -{ - env_->set_msgcall(arg); -} - -void *Db::get_app_private() const -{ - return unwrapConst(this)->app_private; -} - -void Db::set_app_private(void *value) -{ - unwrap(this)->app_private = value; -} - -DB_METHOD(get_cachesize, (u_int32_t *gbytesp, u_int32_t *bytesp, int *ncachep), - (db, gbytesp, bytesp, ncachep), DB_RETOK_STD) -DB_METHOD(set_cachesize, (u_int32_t gbytes, u_int32_t bytes, int ncache), - (db, gbytes, bytes, ncache), DB_RETOK_STD) - -int Db::set_paniccall(void (*callback)(DbEnv *, int)) -{ - return (env_->set_paniccall(callback)); -} - -__DB_STD(ostream) *Db::get_error_stream() -{ - return env_->get_error_stream(); -} - -void Db::set_error_stream(__DB_STD(ostream) *error_stream) -{ - env_->set_error_stream(error_stream); -} - -__DB_STD(ostream) *Db::get_message_stream() -{ - return env_->get_message_stream(); -} - -void Db::set_message_stream(__DB_STD(ostream) *message_stream) -{ - env_->set_message_stream(message_stream); -} - -DB_METHOD_QUIET(get_transactional, (), (db)) diff --git a/storage/bdb/cxx/cxx_dbc.cpp b/storage/bdb/cxx/cxx_dbc.cpp deleted file mode 100644 index 8f73557222a..00000000000 --- a/storage/bdb/cxx/cxx_dbc.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_dbc.cpp,v 12.2 2005/09/30 07:38:25 mjc Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc_auto/db_auto.h" -#include "dbinc_auto/crdel_auto.h" -#include "dbinc/db_dispatch.h" -#include "dbinc_auto/db_ext.h" -#include "dbinc_auto/common_ext.h" - -// Helper macro for simple methods that pass through to the -// underlying C method. It may return an error or raise an exception. -// Note this macro expects that input _argspec is an argument -// list element (e.g., "char *arg") and that _arglist is the arguments -// that should be passed through to the C method (e.g., "(db, arg)") -// -#define DBC_METHOD(_name, _argspec, _arglist, _retok) \ -int Dbc::_name _argspec \ -{ \ - int ret; \ - DBC *dbc = this; \ - \ - ret = dbc->c_##_name _arglist; \ - if (!_retok(ret)) \ - DB_ERROR(DbEnv::get_DbEnv(dbc->dbp->dbenv), \ - "Dbc::" # _name, ret, ON_ERROR_UNKNOWN); \ - return (ret); \ -} - -// It's private, and should never be called, but VC4.0 needs it resolved -// -Dbc::~Dbc() -{ -} - -DBC_METHOD(close, (void), (dbc), DB_RETOK_STD) -DBC_METHOD(count, (db_recno_t *countp, u_int32_t _flags), - (dbc, countp, _flags), DB_RETOK_STD) -DBC_METHOD(del, (u_int32_t _flags), - (dbc, _flags), DB_RETOK_DBCDEL) - -int Dbc::dup(Dbc** cursorp, u_int32_t _flags) -{ - int ret; - DBC *dbc = this; - DBC *new_cursor = 0; - - ret = dbc->c_dup(dbc, &new_cursor, _flags); - - if (DB_RETOK_STD(ret)) - // The following cast implies that Dbc can be no larger than DBC - *cursorp = (Dbc*)new_cursor; - else - DB_ERROR(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::dup", ret, ON_ERROR_UNKNOWN); - - return (ret); -} - -int Dbc::get(Dbt* key, Dbt *data, u_int32_t _flags) -{ - int ret; - DBC *dbc = this; - - ret = dbc->c_get(dbc, key, data, _flags); - - if (!DB_RETOK_DBCGET(ret)) { - if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(key)) - DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::get", key, ON_ERROR_UNKNOWN); - else if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(data)) - DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::get", data, ON_ERROR_UNKNOWN); - else - DB_ERROR(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::get", ret, ON_ERROR_UNKNOWN); - } - - return (ret); -} - -int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t _flags) -{ - int ret; - DBC *dbc = this; - - ret = dbc->c_pget(dbc, key, pkey, data, _flags); - - /* Logic is the same as for Dbc::get - reusing macro. */ - if (!DB_RETOK_DBCGET(ret)) { - if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(key)) - DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::pget", key, ON_ERROR_UNKNOWN); - else if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(data)) - DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::pget", data, ON_ERROR_UNKNOWN); - else - DB_ERROR(DbEnv::get_DbEnv(dbc->dbp->dbenv), - "Dbc::pget", ret, ON_ERROR_UNKNOWN); - } - - return (ret); -} - -DBC_METHOD(put, (Dbt* key, Dbt *data, u_int32_t _flags), - (dbc, key, data, _flags), DB_RETOK_DBCPUT) diff --git a/storage/bdb/cxx/cxx_dbt.cpp b/storage/bdb/cxx/cxx_dbt.cpp deleted file mode 100644 index 8062c255be5..00000000000 --- a/storage/bdb/cxx/cxx_dbt.cpp +++ /dev/null @@ -1,59 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_dbt.cpp,v 12.1 2005/06/16 20:20:58 bostic Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc_auto/db_auto.h" -#include "dbinc_auto/crdel_auto.h" -#include "dbinc/db_dispatch.h" -#include "dbinc_auto/db_ext.h" -#include "dbinc_auto/common_ext.h" - -Dbt::Dbt() -{ - DBT *dbt = this; - memset(dbt, 0, sizeof(DBT)); -} - -Dbt::Dbt(void *data_arg, u_int32_t size_arg) -{ - DBT *dbt = this; - memset(dbt, 0, sizeof(DBT)); - set_data(data_arg); - set_size(size_arg); -} - -Dbt::~Dbt() -{ -} - -Dbt::Dbt(const Dbt &that) -{ - const DBT *from = &that; - DBT *to = this; - memcpy(to, from, sizeof(DBT)); -} - -Dbt &Dbt::operator = (const Dbt &that) -{ - if (this != &that) { - const DBT *from = &that; - DBT *to = this; - memcpy(to, from, sizeof(DBT)); - } - return (*this); -} diff --git a/storage/bdb/cxx/cxx_env.cpp b/storage/bdb/cxx/cxx_env.cpp deleted file mode 100644 index 62bbb0de381..00000000000 --- a/storage/bdb/cxx/cxx_env.cpp +++ /dev/null @@ -1,1054 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_env.cpp,v 12.14 2005/10/18 14:49:27 mjc Exp $ - */ - -#include "db_config.h" - -#include -#include // needed for set_error_stream -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" -#include "dbinc/log.h" -#include "dbinc_auto/common_ext.h" -#include "dbinc_auto/log_ext.h" - -#ifdef HAVE_CXX_STDHEADERS -using std::cerr; -#endif - -// Helper macros for simple methods that pass through to the -// underlying C method. They may return an error or raise an exception. -// These macros expect that input _argspec is an argument -// list element (e.g., "char *arg") and that _arglist is the arguments -// that should be passed through to the C method (e.g., "(dbenv, arg)") -// -#define DBENV_METHOD_ERR(_name, _argspec, _arglist, _on_err) \ -int DbEnv::_name _argspec \ -{ \ - DB_ENV *dbenv = unwrap(this); \ - int ret; \ - \ - if ((ret = dbenv->_name _arglist) != 0) { \ - _on_err; \ - } \ - return (ret); \ -} - -#define DBENV_METHOD(_name, _argspec, _arglist) \ - DBENV_METHOD_ERR(_name, _argspec, _arglist, \ - DB_ERROR(this, "DbEnv::" # _name, ret, error_policy())) - -#define DBENV_METHOD_QUIET(_name, _argspec, _arglist) \ -int DbEnv::_name _argspec \ -{ \ - DB_ENV *dbenv = unwrap(this); \ - \ - return (dbenv->_name _arglist); \ -} - -#define DBENV_METHOD_VOID(_name, _argspec, _arglist) \ -void DbEnv::_name _argspec \ -{ \ - DB_ENV *dbenv = unwrap(this); \ - \ - dbenv->_name _arglist; \ -} - -// The reason for a static variable is that some structures -// (like Dbts) have no connection to any Db or DbEnv, so when -// errors occur in their methods, we must have some reasonable -// way to determine whether to throw or return errors. -// -// This variable is taken from flags whenever a DbEnv is constructed. -// Normally there is only one DbEnv per program, and even if not, -// there is typically a single policy of throwing or returning. -// -static int last_known_error_policy = ON_ERROR_UNKNOWN; - -// These 'glue' function are declared as extern "C" so they will -// be compatible with picky compilers that do not allow mixing -// of function pointers to 'C' functions with function pointers -// to C++ functions. -// -extern "C" -void _feedback_intercept_c(DB_ENV *env, int opcode, int pct) -{ - DbEnv::_feedback_intercept(env, opcode, pct); -} - -extern "C" -void _paniccall_intercept_c(DB_ENV *env, int errval) -{ - DbEnv::_paniccall_intercept(env, errval); -} - -extern "C" -void _stream_error_function_c(const DB_ENV *env, - const char *prefix, const char *message) -{ - DbEnv::_stream_error_function(env, prefix, message); -} - -extern "C" -void _stream_message_function_c(const DB_ENV *env, const char *message) -{ - DbEnv::_stream_message_function(env, message); -} - -extern "C" -int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt, DB_LSN *lsn, db_recops op) -{ - return (DbEnv::_app_dispatch_intercept(env, dbt, lsn, op)); -} - -extern "C" -int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl, const DBT *data, - const DB_LSN *lsn, int id, u_int32_t flags) -{ - return (DbEnv::_rep_send_intercept(env, - cntrl, data, lsn, id, flags)); -} - -extern "C" -int _isalive_intercept_c(DB_ENV *env, pid_t pid, db_threadid_t thrid) -{ - return (DbEnv::_isalive_intercept(env, pid, thrid)); -} - -extern "C" -void _thread_id_intercept_c(DB_ENV *env, pid_t *pidp, db_threadid_t *thridp) -{ - DbEnv::_thread_id_intercept(env, pidp, thridp); -} - -extern "C" -char *_thread_id_string_intercept_c(DB_ENV *env, pid_t pid, - db_threadid_t thrid, char *buf) -{ - return (DbEnv::_thread_id_string_intercept(env, pid, thrid, buf)); -} - -void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(0, - "DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN); - return; - } - if (cxxenv->feedback_callback_ == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::feedback_callback", EINVAL, cxxenv->error_policy()); - return; - } - (*cxxenv->feedback_callback_)(cxxenv, opcode, pct); -} - -void DbEnv::_paniccall_intercept(DB_ENV *env, int errval) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(0, - "DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN); - return; - } - if (cxxenv->paniccall_callback_ == 0) { - DB_ERROR(cxxenv, "DbEnv::paniccall_callback", EINVAL, - cxxenv->error_policy()); - return; - } - (*cxxenv->paniccall_callback_)(cxxenv, errval); -} - -int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn, - db_recops op) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::app_dispatch_callback", EINVAL, ON_ERROR_UNKNOWN); - return (EINVAL); - } - if (cxxenv->app_dispatch_callback_ == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::app_dispatch_callback", EINVAL, - cxxenv->error_policy()); - return (EINVAL); - } - Dbt *cxxdbt = (Dbt *)dbt; - DbLsn *cxxlsn = (DbLsn *)lsn; - return ((*cxxenv->app_dispatch_callback_)(cxxenv, cxxdbt, cxxlsn, op)); -} - -int DbEnv::_isalive_intercept(DB_ENV *env, pid_t pid, db_threadid_t thrid) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::isalive_callback", EINVAL, ON_ERROR_UNKNOWN); - return (0); - } - return ((*cxxenv->isalive_callback_)(cxxenv, pid, thrid)); -} - -int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl, const DBT *data, - const DB_LSN *lsn, int id, u_int32_t flags) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN); - return (EINVAL); - } - const Dbt *cxxcntrl = (const Dbt *)cntrl; - const DbLsn *cxxlsn = (const DbLsn *)lsn; - Dbt *cxxdata = (Dbt *)data; - return ((*cxxenv->rep_send_callback_)(cxxenv, - cxxcntrl, cxxdata, cxxlsn, id, flags)); -} - -void DbEnv::_thread_id_intercept(DB_ENV *env, pid_t *pidp, db_threadid_t *thridp) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::thread_id_callback", EINVAL, ON_ERROR_UNKNOWN); - } else - cxxenv->thread_id_callback_(cxxenv, pidp, thridp); -} - -char *DbEnv::_thread_id_string_intercept(DB_ENV *env, pid_t pid, - db_threadid_t thrid, char *buf) -{ - DbEnv *cxxenv = DbEnv::get_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(DbEnv::get_DbEnv(env), - "DbEnv::thread_id_string_callback", EINVAL, ON_ERROR_UNKNOWN); - return (NULL); - } - return (cxxenv->thread_id_string_callback_(cxxenv, pid, thrid, buf)); -} - -// A truism for the DbEnv object is that there is a valid -// DB_ENV handle from the constructor until close(). -// After the close, the DB_ENV handle is invalid and -// no operations are permitted on the DbEnv (other than -// destructor). Leaving the DbEnv handle open and not -// doing a close is generally considered an error. -// -// We used to allow DbEnv objects to be closed and reopened. -// This implied always keeping a valid DB_ENV object, and -// coordinating the open objects between Db/DbEnv turned -// out to be overly complicated. Now we do not allow this. - -DbEnv::DbEnv(u_int32_t flags) -: imp_(0) -, construct_error_(0) -, construct_flags_(flags) -, error_stream_(0) -, message_stream_(0) -, app_dispatch_callback_(0) -, feedback_callback_(0) -, paniccall_callback_(0) -, rep_send_callback_(0) -{ - if ((construct_error_ = initialize(0)) != 0) - DB_ERROR(this, "DbEnv::DbEnv", construct_error_, - error_policy()); -} - -DbEnv::DbEnv(DB_ENV *env, u_int32_t flags) -: imp_(0) -, construct_error_(0) -, construct_flags_(flags) -, error_stream_(0) -, message_stream_(0) -, app_dispatch_callback_(0) -, feedback_callback_(0) -, paniccall_callback_(0) -, rep_send_callback_(0) -{ - if ((construct_error_ = initialize(env)) != 0) - DB_ERROR(this, "DbEnv::DbEnv", construct_error_, - error_policy()); -} - -// If the DB_ENV handle is still open, we close it. This is to make stack -// allocation of DbEnv objects easier so that they are cleaned up in the error -// path. Note that the C layer catches cases where handles are open in the -// environment at close time and reports an error. Applications should call -// close explicitly in normal (non-exceptional) cases to check the return -// value. -// -DbEnv::~DbEnv() -{ - DB_ENV *env = unwrap(this); - - if (env != NULL) { - cleanup(); - (void)env->close(env, 0); - } -} - -// called by destructors before the DB_ENV is destroyed. -void DbEnv::cleanup() -{ - DB_ENV *env = unwrap(this); - - if (env != NULL) { - env->api1_internal = 0; - imp_ = 0; - } -} - -int DbEnv::close(u_int32_t flags) -{ - int ret; - DB_ENV *env = unwrap(this); - - // after a close (no matter if success or failure), - // the underlying DB_ENV object must not be accessed, - // so we clean up in advance. - // - cleanup(); - - // It's safe to throw an error after the close, - // since our error mechanism does not peer into - // the DB* structures. - // - if ((ret = env->close(env, flags)) != 0) - DB_ERROR(this, "DbEnv::close", ret, error_policy()); - - return (ret); -} - -DBENV_METHOD(dbremove, - (DbTxn *txn, const char *name, const char *subdb, u_int32_t flags), - (dbenv, unwrap(txn), name, subdb, flags)) -DBENV_METHOD(dbrename, (DbTxn *txn, const char *name, const char *subdb, - const char *newname, u_int32_t flags), - (dbenv, unwrap(txn), name, subdb, newname, flags)) - -void DbEnv::err(int error, const char *format, ...) -{ - DB_ENV *env = unwrap(this); - - DB_REAL_ERR(env, error, 1, 1, format); -} - -// Return a tristate value corresponding to whether we should -// throw exceptions on errors: -// ON_ERROR_RETURN -// ON_ERROR_THROW -// ON_ERROR_UNKNOWN -// -int DbEnv::error_policy() -{ - if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) { - return (ON_ERROR_RETURN); - } - else { - return (ON_ERROR_THROW); - } -} - -void DbEnv::errx(const char *format, ...) -{ - DB_ENV *env = unwrap(this); - - DB_REAL_ERR(env, 0, 0, 1, format); -} - -void *DbEnv::get_app_private() const -{ - return unwrapConst(this)->app_private; -} - -DBENV_METHOD(failchk, (u_int32_t flags), (dbenv, flags)) -DBENV_METHOD(fileid_reset, (const char *file, u_int32_t flags), - (dbenv, file, flags)) -DBENV_METHOD(get_home, (const char **homep), (dbenv, homep)) -DBENV_METHOD(get_open_flags, (u_int32_t *flagsp), (dbenv, flagsp)) -DBENV_METHOD(get_data_dirs, (const char ***dirspp), (dbenv, dirspp)) - -bool DbEnv::is_bigendian() -{ - return unwrap(this)->is_bigendian() ? true : false; -} - -DBENV_METHOD(set_thread_count, (u_int32_t count), (dbenv, count)) - -// used internally during constructor -// to associate an existing DB_ENV with this DbEnv, -// or create a new one. -// -int DbEnv::initialize(DB_ENV *env) -{ - int ret; - - last_known_error_policy = error_policy(); - - if (env == 0) { - // Create a new DB_ENV environment. - if ((ret = ::db_env_create(&env, - construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0) - return (ret); - } - imp_ = env; - env->api1_internal = this; // for DB_ENV* to DbEnv* conversion - return (0); -} - -// lock methods -DBENV_METHOD(lock_detect, (u_int32_t flags, u_int32_t atype, int *aborted), - (dbenv, flags, atype, aborted)) -DBENV_METHOD_ERR(lock_get, - (u_int32_t locker, u_int32_t flags, const Dbt *obj, - db_lockmode_t lock_mode, DbLock *lock), - (dbenv, locker, flags, obj, lock_mode, &lock->lock_), - DbEnv::runtime_error_lock_get(this, "DbEnv::lock_get", ret, - DB_LOCK_GET, lock_mode, obj, *lock, - -1, error_policy())) -DBENV_METHOD(lock_id, (u_int32_t *idp), (dbenv, idp)) -DBENV_METHOD(lock_id_free, (u_int32_t id), (dbenv, id)) -DBENV_METHOD(lock_put, (DbLock *lock), (dbenv, &lock->lock_)) -DBENV_METHOD(lock_stat, (DB_LOCK_STAT **statp, u_int32_t flags), - (dbenv, statp, flags)) -DBENV_METHOD(lock_stat_print, (u_int32_t flags), (dbenv, flags)) -DBENV_METHOD_ERR(lock_vec, - (u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[], - int nlist, DB_LOCKREQ **elist_returned), - (dbenv, locker, flags, list, nlist, elist_returned), - DbEnv::runtime_error_lock_get(this, "DbEnv::lock_vec", ret, - (*elist_returned)->op, (*elist_returned)->mode, - Dbt::get_Dbt((*elist_returned)->obj), DbLock((*elist_returned)->lock), - (*elist_returned) - list, error_policy())) -// log methods -DBENV_METHOD(log_archive, (char **list[], u_int32_t flags), - (dbenv, list, flags)) - -int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1) -{ - return (::log_compare(lsn0, lsn1)); -} - -// The following cast implies that DbLogc can be no larger than DB_LOGC -DBENV_METHOD(log_cursor, (DbLogc **cursorp, u_int32_t flags), - (dbenv, (DB_LOGC **)cursorp, flags)) -DBENV_METHOD(log_file, (DbLsn *lsn, char *namep, size_t len), - (dbenv, lsn, namep, len)) -DBENV_METHOD(log_flush, (const DbLsn *lsn), (dbenv, lsn)) -DBENV_METHOD(log_put, (DbLsn *lsn, const Dbt *data, u_int32_t flags), - (dbenv, lsn, data, flags)) - -int DbEnv::log_printf(DbTxn *txn, const char *fmt, ...) -{ - DB_ENV *env = unwrap(this); - va_list ap; - int ret; - - va_start(ap, fmt); - ret = __log_printf_pp(env, unwrap(txn), fmt, ap); - va_end(ap); - - return (ret); -} - -DBENV_METHOD(log_stat, (DB_LOG_STAT **spp, u_int32_t flags), - (dbenv, spp, flags)) -DBENV_METHOD(log_stat_print, (u_int32_t flags), (dbenv, flags)) - -DBENV_METHOD(lsn_reset, (const char *file, u_int32_t flags), - (dbenv, file, flags)) - -int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags) -{ - DB_ENV *env = unwrap(this); - int ret; - DB_MPOOLFILE *mpf; - - if (env == NULL) - ret = EINVAL; - else - ret = env->memp_fcreate(env, &mpf, flags); - - if (DB_RETOK_STD(ret)) { - *dbmfp = new DbMpoolFile(); - (*dbmfp)->imp_ = mpf; - } else - DB_ERROR(this, "DbMpoolFile::f_create", ret, ON_ERROR_UNKNOWN); - - return (ret); -} - -DBENV_METHOD(memp_register, - (int ftype, pgin_fcn_type pgin_fcn, pgout_fcn_type pgout_fcn), - (dbenv, ftype, pgin_fcn, pgout_fcn)) - -// memory pool methods -DBENV_METHOD(memp_stat, - (DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags), - (dbenv, gsp, fsp, flags)) -DBENV_METHOD(memp_stat_print, (u_int32_t flags), (dbenv, flags)) -DBENV_METHOD(memp_sync, (DbLsn *sn), (dbenv, sn)) -DBENV_METHOD(memp_trickle, (int pct, int *nwrotep), (dbenv, pct, nwrotep)) - -// If an error occurred during the constructor, report it now. -// Otherwise, call the underlying DB->open method. -// -int DbEnv::open(const char *db_home, u_int32_t flags, int mode) -{ - int ret; - DB_ENV *env = unwrap(this); - - if (construct_error_ != 0) - ret = construct_error_; - else - ret = env->open(env, db_home, flags, mode); - - if (!DB_RETOK_STD(ret)) - DB_ERROR(this, "DbEnv::open", ret, error_policy()); - - return (ret); -} - -int DbEnv::remove(const char *db_home, u_int32_t flags) -{ - int ret; - DB_ENV *env = unwrap(this); - - // after a remove (no matter if success or failure), - // the underlying DB_ENV object must not be accessed, - // so we clean up in advance. - // - cleanup(); - - if ((ret = env->remove(env, db_home, flags)) != 0) - DB_ERROR(this, "DbEnv::remove", ret, error_policy()); - - return (ret); -} - -// Report an error associated with the DbEnv. -// error_policy is one of: -// ON_ERROR_THROW throw an error -// ON_ERROR_RETURN do nothing here, the caller will return an error -// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv -// -void DbEnv::runtime_error(DbEnv *env, - const char *caller, int error, int error_policy) -{ - if (error_policy == ON_ERROR_UNKNOWN) - error_policy = last_known_error_policy; - if (error_policy == ON_ERROR_THROW) { - // Creating and throwing the object in two separate - // statements seems to be necessary for HP compilers. - switch (error) { - case DB_LOCK_DEADLOCK: - { - DbDeadlockException dl_except(caller); - dl_except.set_env(env); - throw dl_except; - } - break; - case DB_RUNRECOVERY: - { - DbRunRecoveryException rr_except(caller); - rr_except.set_env(env); - throw rr_except; - } - break; - case DB_LOCK_NOTGRANTED: - { - DbLockNotGrantedException lng_except(caller); - lng_except.set_env(env); - throw lng_except; - } - break; - case DB_REP_HANDLE_DEAD: - { - DbRepHandleDeadException dl_except(caller); - dl_except.set_env(env); - throw dl_except; - } - default: - { - DbException except(caller, error); - except.set_env(env); - throw except; - } - break; - } - } -} - -// Like DbEnv::runtime_error, but issue a DbMemoryException -// based on the fact that this Dbt is not large enough. -void DbEnv::runtime_error_dbt(DbEnv *env, - const char *caller, Dbt *dbt, int error_policy) -{ - if (error_policy == ON_ERROR_UNKNOWN) - error_policy = last_known_error_policy; - if (error_policy == ON_ERROR_THROW) { - // Creating and throwing the object in two separate - // statements seems to be necessary for HP compilers. - DbMemoryException except(caller, dbt); - except.set_env(env); - throw except; - } -} - -// Like DbEnv::runtime_error, but issue a DbLockNotGrantedException, -// or a regular runtime error. -// call regular runtime_error if it -void DbEnv::runtime_error_lock_get(DbEnv *env, - const char *caller, int error, - db_lockop_t op, db_lockmode_t mode, const Dbt *obj, - DbLock lock, int index, int error_policy) -{ - if (error != DB_LOCK_NOTGRANTED) { - runtime_error(env, caller, error, error_policy); - return; - } - - if (error_policy == ON_ERROR_UNKNOWN) - error_policy = last_known_error_policy; - if (error_policy == ON_ERROR_THROW) { - // Creating and throwing the object in two separate - // statements seems to be necessary for HP compilers. - DbLockNotGrantedException except(caller, op, mode, - obj, lock, index); - except.set_env(env); - throw except; - } -} - -void DbEnv::_stream_error_function( - const DB_ENV *env, const char *prefix, const char *message) -{ - const DbEnv *cxxenv = DbEnv::get_const_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(0, - "DbEnv::stream_error", EINVAL, ON_ERROR_UNKNOWN); - return; - } - - if (cxxenv->error_callback_) - cxxenv->error_callback_(cxxenv, prefix, message); - else if (cxxenv->error_stream_) { - // HP compilers need the extra casts, we don't know why. - if (prefix) { - (*cxxenv->error_stream_) << prefix; - (*cxxenv->error_stream_) << (const char *)": "; - } - if (message) - (*cxxenv->error_stream_) << (const char *)message; - (*cxxenv->error_stream_) << (const char *)"\n"; - } -} - -void DbEnv::_stream_message_function(const DB_ENV *env, const char *message) -{ - const DbEnv *cxxenv = DbEnv::get_const_DbEnv(env); - if (cxxenv == 0) { - DB_ERROR(0, - "DbEnv::stream_message", EINVAL, ON_ERROR_UNKNOWN); - return; - } - - if (cxxenv->message_callback_) - cxxenv->message_callback_(cxxenv, message); - else if (cxxenv->message_stream_) { - // HP compilers need the extra casts, we don't know why. - (*cxxenv->message_stream_) << (const char *)message; - (*cxxenv->message_stream_) << (const char *)"\n"; - } -} - -// static method -char *DbEnv::strerror(int error) -{ - return (db_strerror(error)); -} - -// We keep these alphabetical by field name, -// for comparison with Java's list. -// -DBENV_METHOD(set_data_dir, (const char *dir), (dbenv, dir)) -DBENV_METHOD(get_encrypt_flags, (u_int32_t *flagsp), - (dbenv, flagsp)) -DBENV_METHOD(set_encrypt, (const char *passwd, u_int32_t flags), - (dbenv, passwd, flags)) -DBENV_METHOD_VOID(get_errfile, (FILE **errfilep), (dbenv, errfilep)) -DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile)) -DBENV_METHOD_VOID(get_errpfx, (const char **errpfxp), (dbenv, errpfxp)) -DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx)) -DBENV_METHOD(set_intermediate_dir, (int mode, u_int32_t flags), - (dbenv, mode, flags)) -DBENV_METHOD(get_lg_bsize, (u_int32_t *bsizep), (dbenv, bsizep)) -DBENV_METHOD(set_lg_bsize, (u_int32_t bsize), (dbenv, bsize)) -DBENV_METHOD(get_lg_dir, (const char **dirp), (dbenv, dirp)) -DBENV_METHOD(set_lg_dir, (const char *dir), (dbenv, dir)) -DBENV_METHOD(get_lg_filemode, (int *modep), (dbenv, modep)) -DBENV_METHOD(set_lg_filemode, (int mode), (dbenv, mode)) -DBENV_METHOD(get_lg_max, (u_int32_t *maxp), (dbenv, maxp)) -DBENV_METHOD(set_lg_max, (u_int32_t max), (dbenv, max)) -DBENV_METHOD(get_lg_regionmax, (u_int32_t *regionmaxp), (dbenv, regionmaxp)) -DBENV_METHOD(set_lg_regionmax, (u_int32_t regionmax), (dbenv, regionmax)) -DBENV_METHOD(get_lk_conflicts, (const u_int8_t **lk_conflictsp, int *lk_maxp), - (dbenv, lk_conflictsp, lk_maxp)) -DBENV_METHOD(set_lk_conflicts, (u_int8_t *lk_conflicts, int lk_max), - (dbenv, lk_conflicts, lk_max)) -DBENV_METHOD(get_lk_detect, (u_int32_t *detectp), (dbenv, detectp)) -DBENV_METHOD(set_lk_detect, (u_int32_t detect), (dbenv, detect)) -DBENV_METHOD(set_lk_max, (u_int32_t max), (dbenv, max)) -DBENV_METHOD(get_lk_max_lockers, (u_int32_t *max_lockersp), - (dbenv, max_lockersp)) -DBENV_METHOD(set_lk_max_lockers, (u_int32_t max_lockers), (dbenv, max_lockers)) -DBENV_METHOD(get_lk_max_locks, (u_int32_t *max_locksp), (dbenv, max_locksp)) -DBENV_METHOD(set_lk_max_locks, (u_int32_t max_locks), (dbenv, max_locks)) -DBENV_METHOD(get_lk_max_objects, (u_int32_t *max_objectsp), - (dbenv, max_objectsp)) -DBENV_METHOD(set_lk_max_objects, (u_int32_t max_objects), (dbenv, max_objects)) -DBENV_METHOD(get_mp_max_openfd, (int *maxopenfdp), (dbenv, maxopenfdp)) -DBENV_METHOD(set_mp_max_openfd, (int maxopenfd), (dbenv, maxopenfd)) -DBENV_METHOD(get_mp_max_write, (int *maxwritep, int *maxwrite_sleepp), (dbenv, maxwritep, maxwrite_sleepp)) -DBENV_METHOD(set_mp_max_write, (int maxwrite, int maxwrite_sleep), (dbenv, maxwrite, maxwrite_sleep)) -DBENV_METHOD(get_mp_mmapsize, (size_t *mmapsizep), (dbenv, mmapsizep)) -DBENV_METHOD(set_mp_mmapsize, (size_t mmapsize), (dbenv, mmapsize)) -DBENV_METHOD_VOID(get_msgfile, (FILE **msgfilep), (dbenv, msgfilep)) -DBENV_METHOD_VOID(set_msgfile, (FILE *msgfile), (dbenv, msgfile)) -DBENV_METHOD(get_tmp_dir, (const char **tmp_dirp), (dbenv, tmp_dirp)) -DBENV_METHOD(set_tmp_dir, (const char *tmp_dir), (dbenv, tmp_dir)) -DBENV_METHOD(get_tx_max, (u_int32_t *tx_maxp), (dbenv, tx_maxp)) -DBENV_METHOD(set_tx_max, (u_int32_t tx_max), (dbenv, tx_max)) - -DBENV_METHOD(stat_print, (u_int32_t flags), (dbenv, flags)) - -DBENV_METHOD_QUIET(set_alloc, - (db_malloc_fcn_type malloc_fcn, db_realloc_fcn_type realloc_fcn, - db_free_fcn_type free_fcn), - (dbenv, malloc_fcn, realloc_fcn, free_fcn)) - -void DbEnv::set_app_private(void *value) -{ - unwrap(this)->app_private = value; -} - -DBENV_METHOD(get_cachesize, - (u_int32_t *gbytesp, u_int32_t *bytesp, int *ncachep), - (dbenv, gbytesp, bytesp, ncachep)) -DBENV_METHOD(set_cachesize, - (u_int32_t gbytes, u_int32_t bytes, int ncache), - (dbenv, gbytes, bytes, ncache)) - -void DbEnv::set_errcall(void (*arg)(const DbEnv *, const char *, const char *)) -{ - DB_ENV *dbenv = unwrap(this); - - error_callback_ = arg; - error_stream_ = 0; - - dbenv->set_errcall(dbenv, (arg == 0) ? 0 : - _stream_error_function_c); -} - -__DB_STD(ostream) *DbEnv::get_error_stream() -{ - return (error_stream_); -} - -void DbEnv::set_error_stream(__DB_STD(ostream) *stream) -{ - DB_ENV *dbenv = unwrap(this); - - error_stream_ = stream; - error_callback_ = 0; - - dbenv->set_errcall(dbenv, (stream == 0) ? 0 : - _stream_error_function_c); -} - -int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int)) -{ - DB_ENV *dbenv = unwrap(this); - - feedback_callback_ = arg; - - return (dbenv->set_feedback(dbenv, - arg == 0 ? 0 : _feedback_intercept_c)); -} - -DBENV_METHOD(get_flags, (u_int32_t *flagsp), (dbenv, flagsp)) -DBENV_METHOD(set_flags, (u_int32_t flags, int onoff), (dbenv, flags, onoff)) - -void DbEnv::set_msgcall(void (*arg)(const DbEnv *, const char *)) -{ - DB_ENV *dbenv = unwrap(this); - - message_callback_ = arg; - message_stream_ = 0; - - dbenv->set_msgcall(dbenv, (arg == 0) ? 0 : - _stream_message_function_c); -} - -__DB_STD(ostream) *DbEnv::get_message_stream() -{ - return (message_stream_); -} - -void DbEnv::set_message_stream(__DB_STD(ostream) *stream) -{ - DB_ENV *dbenv = unwrap(this); - - message_stream_ = stream; - message_callback_ = 0; - - dbenv->set_msgcall(dbenv, (stream == 0) ? 0 : - _stream_message_function_c); -} - -int DbEnv::set_paniccall(void (*arg)(DbEnv *, int)) -{ - DB_ENV *dbenv = unwrap(this); - - paniccall_callback_ = arg; - - return (dbenv->set_paniccall(dbenv, - arg == 0 ? 0 : _paniccall_intercept_c)); -} - -DBENV_METHOD(set_rpc_server, - (void *cl, char *host, long tsec, long ssec, u_int32_t flags), - (dbenv, cl, host, tsec, ssec, flags)) -DBENV_METHOD(get_shm_key, (long *shm_keyp), (dbenv, shm_keyp)) -DBENV_METHOD(set_shm_key, (long shm_key), (dbenv, shm_key)) - -int DbEnv::set_app_dispatch - (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops)) -{ - DB_ENV *dbenv = unwrap(this); - int ret; - - app_dispatch_callback_ = arg; - if ((ret = dbenv->set_app_dispatch(dbenv, - arg == 0 ? 0 : _app_dispatch_intercept_c)) != 0) - DB_ERROR(this, "DbEnv::set_app_dispatch", ret, error_policy()); - - return (ret); -} - -int DbEnv::set_isalive - (int (*arg)(DbEnv *, pid_t, db_threadid_t)) -{ - DB_ENV *dbenv = unwrap(this); - int ret; - - isalive_callback_ = arg; - if ((ret = dbenv->set_isalive(dbenv, - arg == 0 ? 0 : _isalive_intercept_c)) != 0) - DB_ERROR(this, "DbEnv::set_isalive", ret, error_policy()); - - return (ret); -} - -DBENV_METHOD(get_tx_timestamp, (time_t *timestamp), (dbenv, timestamp)) -DBENV_METHOD(set_tx_timestamp, (time_t *timestamp), (dbenv, timestamp)) -DBENV_METHOD(get_verbose, (u_int32_t which, int *onoffp), - (dbenv, which, onoffp)) -DBENV_METHOD(set_verbose, (u_int32_t which, int onoff), (dbenv, which, onoff)) - -DBENV_METHOD(mutex_alloc, - (u_int32_t flags, db_mutex_t *mutexp), (dbenv, flags, mutexp)) -DBENV_METHOD(mutex_free, (db_mutex_t mutex), (dbenv, mutex)) -DBENV_METHOD(mutex_get_align, (u_int32_t *argp), (dbenv, argp)) -DBENV_METHOD(mutex_get_increment, (u_int32_t *argp), (dbenv, argp)) -DBENV_METHOD(mutex_get_max, (u_int32_t *argp), (dbenv, argp)) -DBENV_METHOD(mutex_get_tas_spins, (u_int32_t *argp), (dbenv, argp)) -DBENV_METHOD(mutex_lock, (db_mutex_t mutex), (dbenv, mutex)) -DBENV_METHOD(mutex_set_align, (u_int32_t arg), (dbenv, arg)) -DBENV_METHOD(mutex_set_increment, (u_int32_t arg), (dbenv, arg)) -DBENV_METHOD(mutex_set_max, (u_int32_t arg), (dbenv, arg)) -DBENV_METHOD(mutex_set_tas_spins, (u_int32_t arg), (dbenv, arg)) -DBENV_METHOD(mutex_stat, - (DB_MUTEX_STAT **statp, u_int32_t flags), (dbenv, statp, flags)) -DBENV_METHOD(mutex_stat_print, (u_int32_t flags), (dbenv, flags)) -DBENV_METHOD(mutex_unlock, (db_mutex_t mutex), (dbenv, mutex)) - -int DbEnv::set_thread_id(void (*arg)(DbEnv *, pid_t *, db_threadid_t *)) -{ - DB_ENV *dbenv = unwrap(this); - int ret; - - thread_id_callback_ = arg; - if ((ret = dbenv->set_thread_id(dbenv, - arg == 0 ? 0 : _thread_id_intercept_c)) != 0) - DB_ERROR(this, "DbEnv::set_thread_id", ret, error_policy()); - - return (ret); -} - -int DbEnv::set_thread_id_string( - char *(*arg)(DbEnv *, pid_t, db_threadid_t, char *)) -{ - DB_ENV *dbenv = unwrap(this); - int ret; - - thread_id_string_callback_ = arg; - if ((ret = dbenv->set_thread_id_string(dbenv, - arg == 0 ? 0 : _thread_id_string_intercept_c)) != 0) - DB_ERROR(this, "DbEnv::set_thread_id_string", ret, - error_policy()); - - return (ret); -} - -int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags) -{ - DB_ENV *env = unwrap(this); - DB_TXN *txn; - int ret; - - ret = env->txn_begin(env, unwrap(pid), &txn, flags); - if (DB_RETOK_STD(ret)) - *tid = new DbTxn(txn); - else - DB_ERROR(this, "DbEnv::txn_begin", ret, error_policy()); - - return (ret); -} - -DBENV_METHOD(txn_checkpoint, (u_int32_t kbyte, u_int32_t min, u_int32_t flags), - (dbenv, kbyte, min, flags)) - -int DbEnv::txn_recover(DbPreplist *preplist, long count, - long *retp, u_int32_t flags) -{ - DB_ENV *dbenv = unwrap(this); - DB_PREPLIST *c_preplist; - long i; - int ret; - - /* - * We need to allocate some local storage for the - * returned preplist, and that requires us to do - * our own argument validation. - */ - if (count <= 0) - ret = EINVAL; - else - ret = __os_malloc(dbenv, sizeof(DB_PREPLIST) * count, - &c_preplist); - - if (ret != 0) { - DB_ERROR(this, "DbEnv::txn_recover", ret, error_policy()); - return (ret); - } - - if ((ret = - dbenv->txn_recover(dbenv, c_preplist, count, retp, flags)) != 0) { - __os_free(dbenv, c_preplist); - DB_ERROR(this, "DbEnv::txn_recover", ret, error_policy()); - return (ret); - } - - for (i = 0; i < *retp; i++) { - preplist[i].txn = new DbTxn(); - preplist[i].txn->imp_ = c_preplist[i].txn; - memcpy(preplist[i].gid, c_preplist[i].gid, - sizeof(preplist[i].gid)); - } - - __os_free(dbenv, c_preplist); - - return (0); -} - -DBENV_METHOD(txn_stat, (DB_TXN_STAT **statp, u_int32_t flags), - (dbenv, statp, flags)) -DBENV_METHOD(txn_stat_print, (u_int32_t flags), (dbenv, flags)) - -int DbEnv::set_rep_transport(int myid, - int (*arg)(DbEnv *, const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t)) -{ - DB_ENV *dbenv = unwrap(this); - int ret; - - rep_send_callback_ = arg; - if ((ret = dbenv->set_rep_transport(dbenv, myid, - arg == 0 ? 0 : _rep_send_intercept_c)) != 0) - DB_ERROR(this, "DbEnv::set_rep_transport", ret, error_policy()); - - return (ret); -} - -DBENV_METHOD(rep_elect, - (int nsites, - int nvotes, int priority, u_int32_t timeout, int *eidp, u_int32_t flags), - (dbenv, nsites, nvotes, priority, timeout, eidp, flags)) -DBENV_METHOD(rep_flush, (), (dbenv)) -DBENV_METHOD(rep_get_config, (u_int32_t which, int *onoffp), - (dbenv, which, onoffp)) -DBENV_METHOD(set_rep_request, (u_int32_t min, u_int32_t max), (dbenv, min, max)) - -int DbEnv::rep_process_message(Dbt *control, - Dbt *rec, int *idp, DbLsn *ret_lsnp) -{ - DB_ENV *dbenv = unwrap(this); - int ret; - - ret = dbenv->rep_process_message(dbenv, control, rec, idp, ret_lsnp); - if (!DB_RETOK_REPPMSG(ret)) - DB_ERROR(this, "DbEnv::rep_process_message", ret, - error_policy()); - - return (ret); -} - -DBENV_METHOD(rep_set_config, - (u_int32_t which, int onoff), (dbenv, which, onoff)) -DBENV_METHOD(rep_start, - (Dbt *cookie, u_int32_t flags), - (dbenv, (DBT *)cookie, flags)) - -DBENV_METHOD(rep_stat, (DB_REP_STAT **statp, u_int32_t flags), - (dbenv, statp, flags)) -DBENV_METHOD(rep_stat_print, (u_int32_t flags), (dbenv, flags)) -DBENV_METHOD(rep_sync, (u_int32_t flags), (dbenv, flags)) - -DBENV_METHOD(get_rep_limit, (u_int32_t *gbytesp, u_int32_t *bytesp), - (dbenv, gbytesp, bytesp)) -DBENV_METHOD(set_rep_limit, (u_int32_t gbytes, u_int32_t bytes), - (dbenv, gbytes, bytes)) - -DBENV_METHOD(get_timeout, - (db_timeout_t *timeoutp, u_int32_t flags), - (dbenv, timeoutp, flags)) -DBENV_METHOD(set_timeout, - (db_timeout_t timeout, u_int32_t flags), - (dbenv, timeout, flags)) - -// static method -char *DbEnv::version(int *major, int *minor, int *patch) -{ - return (db_version(major, minor, patch)); -} - -// static method -DbEnv *DbEnv::wrap_DB_ENV(DB_ENV *dbenv) -{ - DbEnv *wrapped_env = get_DbEnv(dbenv); - return (wrapped_env != NULL) ? wrapped_env : new DbEnv(dbenv, 0); -} diff --git a/storage/bdb/cxx/cxx_except.cpp b/storage/bdb/cxx/cxx_except.cpp deleted file mode 100644 index b0bf7c0690e..00000000000 --- a/storage/bdb/cxx/cxx_except.cpp +++ /dev/null @@ -1,354 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_except.cpp,v 12.2 2005/10/14 12:20:04 mjc Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -// Note: would not be needed if we can inherit from exception -// It does not appear to be possible to inherit from exception -// with the current Microsoft library (VC5.0). -// -static char *dupString(const char *s) -{ - char *r = new char[strlen(s)+1]; - strcpy(r, s); - return (r); -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbException // -// // -//////////////////////////////////////////////////////////////////////// - -DbException::~DbException() throw() -{ - delete [] what_; -} - -DbException::DbException(int err) -: err_(err) -, env_(0) -{ - describe(0, 0); -} - -DbException::DbException(const char *description) -: err_(0) -, env_(0) -{ - describe(0, description); -} - -DbException::DbException(const char *description, int err) -: err_(err) -, env_(0) -{ - describe(0, description); -} - -DbException::DbException(const char *prefix, const char *description, int err) -: err_(err) -, env_(0) -{ - describe(prefix, description); -} - -DbException::DbException(const DbException &that) -: __DB_STD(exception)() -, what_(dupString(that.what_)) -, err_(that.err_) -, env_(0) -{ -} - -DbException &DbException::operator = (const DbException &that) -{ - if (this != &that) { - err_ = that.err_; - delete [] what_; - what_ = dupString(that.what_); - } - return (*this); -} - -void DbException::describe(const char *prefix, const char *description) -{ - char msgbuf[1024], *p, *end; - - p = msgbuf; - end = msgbuf + sizeof(msgbuf) - 1; - - if (prefix != NULL) { - strncpy(p, prefix, (p < end) ? end - p: 0); - p += strlen(prefix); - strncpy(p, ": ", (p < end) ? end - p: 0); - p += 2; - } - if (description != NULL) { - strncpy(p, description, (p < end) ? end - p: 0); - p += strlen(description); - if (err_ != 0) { - strncpy(p, ": ", (p < end) ? end - p: 0); - p += 2; - } - } - if (err_ != 0) { - strncpy(p, db_strerror(err_), (p < end) ? end - p: 0); - p += strlen(db_strerror(err_)); - } - - /* - * If the result was too long, the buffer will not be null-terminated, - * so we need to fix that here before duplicating it. - */ - if (p >= end) - *end = '\0'; - - what_ = dupString(msgbuf); -} - -int DbException::get_errno() const -{ - return (err_); -} - -const char *DbException::what() const throw() -{ - return (what_); -} - -DbEnv *DbException::get_env() const -{ - return env_; -} - -void DbException::set_env(DbEnv *env) -{ - env_= env; -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbMemoryException // -// // -//////////////////////////////////////////////////////////////////////// - -static const char *memory_err_desc = "Dbt not large enough for available data"; -DbMemoryException::~DbMemoryException() throw() -{ -} - -DbMemoryException::DbMemoryException(Dbt *dbt) -: DbException(memory_err_desc, ENOMEM) -, dbt_(dbt) -{ -} - -DbMemoryException::DbMemoryException(const char *prefix, Dbt *dbt) -: DbException(prefix, memory_err_desc, ENOMEM) -, dbt_(dbt) -{ -} - -DbMemoryException::DbMemoryException(const DbMemoryException &that) -: DbException(that) -, dbt_(that.dbt_) -{ -} - -DbMemoryException -&DbMemoryException::operator =(const DbMemoryException &that) -{ - if (this != &that) { - DbException::operator=(that); - dbt_ = that.dbt_; - } - return (*this); -} - -Dbt *DbMemoryException::get_dbt() const -{ - return (dbt_); -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbDeadlockException // -// // -//////////////////////////////////////////////////////////////////////// - -DbDeadlockException::~DbDeadlockException() throw() -{ -} - -DbDeadlockException::DbDeadlockException(const char *description) -: DbException(description, DB_LOCK_DEADLOCK) -{ -} - -DbDeadlockException::DbDeadlockException(const DbDeadlockException &that) -: DbException(that) -{ -} - -DbDeadlockException -&DbDeadlockException::operator =(const DbDeadlockException &that) -{ - if (this != &that) - DbException::operator=(that); - return (*this); -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbLockNotGrantedException // -// // -//////////////////////////////////////////////////////////////////////// - -DbLockNotGrantedException::~DbLockNotGrantedException() throw() -{ - delete lock_; -} - -DbLockNotGrantedException::DbLockNotGrantedException(const char *prefix, - db_lockop_t op, db_lockmode_t mode, const Dbt *obj, const DbLock lock, - int index) -: DbException(prefix, DbEnv::strerror(DB_LOCK_NOTGRANTED), - DB_LOCK_NOTGRANTED) -, op_(op) -, mode_(mode) -, obj_(obj) -, lock_(new DbLock(lock)) -, index_(index) -{ -} - -DbLockNotGrantedException::DbLockNotGrantedException(const char *description) -: DbException(description, DB_LOCK_NOTGRANTED) -, op_(DB_LOCK_GET) -, mode_(DB_LOCK_NG) -, obj_(NULL) -, lock_(NULL) -, index_(0) -{ -} - -DbLockNotGrantedException::DbLockNotGrantedException - (const DbLockNotGrantedException &that) -: DbException(that) -{ - op_ = that.op_; - mode_ = that.mode_; - obj_ = that.obj_; - lock_ = (that.lock_ != NULL) ? new DbLock(*that.lock_) : NULL; - index_ = that.index_; -} - -DbLockNotGrantedException -&DbLockNotGrantedException::operator =(const DbLockNotGrantedException &that) -{ - if (this != &that) { - DbException::operator=(that); - op_ = that.op_; - mode_ = that.mode_; - obj_ = that.obj_; - lock_ = (that.lock_ != NULL) ? new DbLock(*that.lock_) : NULL; - index_ = that.index_; - } - return (*this); -} - -db_lockop_t DbLockNotGrantedException::get_op() const -{ - return op_; -} - -db_lockmode_t DbLockNotGrantedException::get_mode() const -{ - return mode_; -} - -const Dbt* DbLockNotGrantedException::get_obj() const -{ - return obj_; -} - -DbLock* DbLockNotGrantedException::get_lock() const -{ - return lock_; -} - -int DbLockNotGrantedException::get_index() const -{ - return index_; -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbRepHandleDeadException // -// // -//////////////////////////////////////////////////////////////////////// - -DbRepHandleDeadException::~DbRepHandleDeadException() throw() -{ -} - -DbRepHandleDeadException::DbRepHandleDeadException(const char *description) -: DbException(description, DB_REP_HANDLE_DEAD) -{ -} - -DbRepHandleDeadException::DbRepHandleDeadException - (const DbRepHandleDeadException &that) -: DbException(that) -{ -} - -DbRepHandleDeadException -&DbRepHandleDeadException::operator =(const DbRepHandleDeadException &that) -{ - if (this != &that) - DbException::operator=(that); - return (*this); -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbRunRecoveryException // -// // -//////////////////////////////////////////////////////////////////////// - -DbRunRecoveryException::~DbRunRecoveryException() throw() -{ -} - -DbRunRecoveryException::DbRunRecoveryException(const char *description) -: DbException(description, DB_RUNRECOVERY) -{ -} - -DbRunRecoveryException::DbRunRecoveryException - (const DbRunRecoveryException &that) -: DbException(that) -{ -} - -DbRunRecoveryException -&DbRunRecoveryException::operator =(const DbRunRecoveryException &that) -{ - if (this != &that) - DbException::operator=(that); - return (*this); -} diff --git a/storage/bdb/cxx/cxx_lock.cpp b/storage/bdb/cxx/cxx_lock.cpp deleted file mode 100644 index 47f27ae3504..00000000000 --- a/storage/bdb/cxx/cxx_lock.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_lock.cpp,v 12.1 2005/06/16 20:20:59 bostic Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -//////////////////////////////////////////////////////////////////////// -// // -// DbLock // -// // -//////////////////////////////////////////////////////////////////////// - -DbLock::DbLock(DB_LOCK value) -: lock_(value) -{ -} - -DbLock::DbLock() -{ - memset(&lock_, 0, sizeof(DB_LOCK)); -} - -DbLock::DbLock(const DbLock &that) -: lock_(that.lock_) -{ -} - -DbLock &DbLock::operator = (const DbLock &that) -{ - lock_ = that.lock_; - return (*this); -} diff --git a/storage/bdb/cxx/cxx_logc.cpp b/storage/bdb/cxx/cxx_logc.cpp deleted file mode 100644 index 63d7fd9fe17..00000000000 --- a/storage/bdb/cxx/cxx_logc.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_logc.cpp,v 12.1 2005/06/16 20:21:00 bostic Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc_auto/db_auto.h" -#include "dbinc_auto/crdel_auto.h" -#include "dbinc/db_dispatch.h" -#include "dbinc_auto/db_ext.h" -#include "dbinc_auto/common_ext.h" - -// It's private, and should never be called, -// but some compilers need it resolved -// -DbLogc::~DbLogc() -{ -} - -// The name _flags prevents a name clash with __db_log_cursor::flags -int DbLogc::close(u_int32_t _flags) -{ - DB_LOGC *logc = this; - int ret; - DbEnv *dbenv2 = DbEnv::get_DbEnv(logc->dbenv); - - ret = logc->close(logc, _flags); - - if (!DB_RETOK_STD(ret)) - DB_ERROR(dbenv2, "DbLogc::close", ret, ON_ERROR_UNKNOWN); - - return (ret); -} - -// The name _flags prevents a name clash with __db_log_cursor::flags -int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t _flags) -{ - DB_LOGC *logc = this; - int ret; - - ret = logc->get(logc, lsn, data, _flags); - - if (!DB_RETOK_LGGET(ret)) { - if (ret == DB_BUFFER_SMALL) - DB_ERROR_DBT(DbEnv::get_DbEnv(logc->dbenv), - "DbLogc::get", data, ON_ERROR_UNKNOWN); - else - DB_ERROR(DbEnv::get_DbEnv(logc->dbenv), - "DbLogc::get", ret, ON_ERROR_UNKNOWN); - } - - return (ret); -} diff --git a/storage/bdb/cxx/cxx_mpool.cpp b/storage/bdb/cxx/cxx_mpool.cpp deleted file mode 100644 index 475a18b3e3f..00000000000 --- a/storage/bdb/cxx/cxx_mpool.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_mpool.cpp,v 12.1 2005/06/16 20:21:02 bostic Exp $ - */ - -#include "db_config.h" - -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" - -// Helper macros for simple methods that pass through to the -// underlying C method. It may return an error or raise an exception. -// Note this macro expects that input _argspec is an argument -// list element (e.g., "char *arg") and that _arglist is the arguments -// that should be passed through to the C method (e.g., "(mpf, arg)") -// -#define DB_MPOOLFILE_METHOD(_name, _argspec, _arglist, _retok) \ -int DbMpoolFile::_name _argspec \ -{ \ - int ret; \ - DB_MPOOLFILE *mpf = unwrap(this); \ - \ - if (mpf == NULL) \ - ret = EINVAL; \ - else \ - ret = mpf->_name _arglist; \ - if (!_retok(ret)) \ - DB_ERROR(DbEnv::get_DbEnv(mpf->dbenv), \ - "DbMpoolFile::"#_name, ret, ON_ERROR_UNKNOWN); \ - return (ret); \ -} - -#define DB_MPOOLFILE_METHOD_VOID(_name, _argspec, _arglist) \ -void DbMpoolFile::_name _argspec \ -{ \ - DB_MPOOLFILE *mpf = unwrap(this); \ - \ - mpf->_name _arglist; \ -} - -//////////////////////////////////////////////////////////////////////// -// // -// DbMpoolFile // -// // -//////////////////////////////////////////////////////////////////////// - -DbMpoolFile::DbMpoolFile() -: imp_(0) -{ -} - -DbMpoolFile::~DbMpoolFile() -{ -} - -int DbMpoolFile::close(u_int32_t flags) -{ - DB_MPOOLFILE *mpf = unwrap(this); - int ret; - DbEnv *dbenv = DbEnv::get_DbEnv(mpf->dbenv); - - if (mpf == NULL) - ret = EINVAL; - else - ret = mpf->close(mpf, flags); - - imp_ = 0; // extra safety - - // This may seem weird, but is legal as long as we don't access - // any data before returning. - delete this; - - if (!DB_RETOK_STD(ret)) - DB_ERROR(dbenv, "DbMpoolFile::close", ret, ON_ERROR_UNKNOWN); - - return (ret); -} - -DB_MPOOLFILE_METHOD(get, (db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep), - (mpf, pgnoaddr, flags, pagep), DB_RETOK_MPGET) -DB_MPOOLFILE_METHOD(open, - (const char *file, u_int32_t flags, int mode, size_t pagesize), - (mpf, file, flags, mode, pagesize), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(put, (void *pgaddr, u_int32_t flags), - (mpf, pgaddr, flags), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set, (void *pgaddr, u_int32_t flags), - (mpf, pgaddr, flags), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_clear_len, (u_int32_t *lenp), - (mpf, lenp), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_clear_len, (u_int32_t len), - (mpf, len), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_fileid, (u_int8_t *fileid), - (mpf, fileid), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_fileid, (u_int8_t *fileid), - (mpf, fileid), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_flags, (u_int32_t *flagsp), - (mpf, flagsp), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_flags, (u_int32_t flags, int onoff), - (mpf, flags, onoff), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_ftype, (int *ftypep), - (mpf, ftypep), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_ftype, (int ftype), - (mpf, ftype), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_lsn_offset, (int32_t *offsetp), - (mpf, offsetp), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_lsn_offset, (int32_t offset), - (mpf, offset), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_maxsize, (u_int32_t *gbytesp, u_int32_t *bytesp), - (mpf, gbytesp, bytesp), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_maxsize, (u_int32_t gbytes, u_int32_t bytes), - (mpf, gbytes, bytes), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_pgcookie, (DBT *dbt), - (mpf, dbt), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_pgcookie, (DBT *dbt), - (mpf, dbt), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(get_priority, (DB_CACHE_PRIORITY *priorityp), - (mpf, priorityp), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(set_priority, (DB_CACHE_PRIORITY priority), - (mpf, priority), DB_RETOK_STD) -DB_MPOOLFILE_METHOD(sync, (), - (mpf), DB_RETOK_STD) diff --git a/storage/bdb/cxx/cxx_multi.cpp b/storage/bdb/cxx/cxx_multi.cpp deleted file mode 100644 index ca80bbafbc4..00000000000 --- a/storage/bdb/cxx/cxx_multi.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_multi.cpp,v 12.3 2005/09/30 07:40:20 mjc Exp $ - */ - -#include "db_config.h" - -#include "db_cxx.h" - -DbMultipleIterator::DbMultipleIterator(const Dbt &dbt) - : data_((u_int8_t*)dbt.get_data()), - p_((u_int32_t*)(data_ + dbt.get_size() - sizeof(u_int32_t))) -{ -} - -bool DbMultipleDataIterator::next(Dbt &data) -{ - if (*p_ == (u_int32_t)-1) { - data.set_data(0); - data.set_size(0); - p_ = 0; - } else { - data.set_data(data_ + *p_--); - data.set_size(*p_--); - if (data.get_size() == 0 && data.get_data() == data_) - data.set_data(0); - } - return (p_ != 0); -} - -bool DbMultipleKeyDataIterator::next(Dbt &key, Dbt &data) -{ - if (*p_ == (u_int32_t)-1) { - key.set_data(0); - key.set_size(0); - data.set_data(0); - data.set_size(0); - p_ = 0; - } else { - key.set_data(data_ + *p_--); - key.set_size(*p_--); - data.set_data(data_ + *p_--); - data.set_size(*p_--); - } - return (p_ != 0); -} - -bool DbMultipleRecnoDataIterator::next(db_recno_t &recno, Dbt &data) -{ - if (*p_ == (u_int32_t)0) { - recno = 0; - data.set_data(0); - data.set_size(0); - p_ = 0; - } else { - recno = *p_--; - data.set_data(data_ + *p_--); - data.set_size(*p_--); - } - return (p_ != 0); -} diff --git a/storage/bdb/cxx/cxx_seq.cpp b/storage/bdb/cxx/cxx_seq.cpp deleted file mode 100644 index ed8997f0340..00000000000 --- a/storage/bdb/cxx/cxx_seq.cpp +++ /dev/null @@ -1,113 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_seq.cpp,v 12.2 2005/10/13 20:49:47 bostic Exp $ - */ - -#include "db_config.h" - -#include -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" - -// Helper macro for simple methods that pass through to the -// underlying C method. It may return an error or raise an exception. -// Note this macro expects that input _argspec is an argument -// list element (e.g., "char *arg") and that _arglist is the arguments -// that should be passed through to the C method (e.g., "(db, arg)") -// -#define DBSEQ_METHOD(_name, _argspec, _arglist, _destructor) \ -int DbSequence::_name _argspec \ -{ \ - int ret; \ - DB_SEQUENCE *seq = unwrap(this); \ - DbEnv *dbenv = DbEnv::get_DbEnv(seq->seq_dbp->dbenv); \ - \ - ret = seq->_name _arglist; \ - if (_destructor) \ - imp_ = 0; \ - if (!DB_RETOK_STD(ret)) \ - DB_ERROR(dbenv, \ - "DbSequence::" # _name, ret, ON_ERROR_UNKNOWN); \ - return (ret); \ -} - -DbSequence::DbSequence(Db *db, u_int32_t flags) -: imp_(0) -{ - DB_SEQUENCE *seq; - int ret; - - if ((ret = db_sequence_create(&seq, unwrap(db), flags)) != 0) - DB_ERROR(db->get_env(), "DbSequence::DbSequence", ret, - ON_ERROR_UNKNOWN); - else { - imp_ = seq; - seq->api_internal = this; - } -} - -DbSequence::DbSequence(DB_SEQUENCE *seq) -: imp_(seq) -{ - seq->api_internal = this; -} - -DbSequence::~DbSequence() -{ - DB_SEQUENCE *seq; - - seq = unwrap(this); - if (seq != NULL) - (void)seq->close(seq, 0); -} - -DBSEQ_METHOD(open, (DbTxn *txnid, Dbt *key, u_int32_t flags), - (seq, unwrap(txnid), key, flags), 0) -DBSEQ_METHOD(initial_value, (db_seq_t value), (seq, value), 0) -DBSEQ_METHOD(close, (u_int32_t flags), (seq, flags), 1) -DBSEQ_METHOD(remove, (DbTxn *txnid, u_int32_t flags), - (seq, unwrap(txnid), flags), 1) -DBSEQ_METHOD(stat, (DB_SEQUENCE_STAT **sp, u_int32_t flags), - (seq, sp, flags), 0) -DBSEQ_METHOD(stat_print, (u_int32_t flags), (seq, flags), 0) - -DBSEQ_METHOD(get, - (DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags), - (seq, unwrap(txnid), delta, retp, flags), 0) -DBSEQ_METHOD(get_cachesize, (int32_t *sizep), (seq, sizep), 0) -DBSEQ_METHOD(set_cachesize, (int32_t size), (seq, size), 0) -DBSEQ_METHOD(get_flags, (u_int32_t *flagsp), (seq, flagsp), 0) -DBSEQ_METHOD(set_flags, (u_int32_t flags), (seq, flags), 0) -DBSEQ_METHOD(get_range, (db_seq_t *minp, db_seq_t *maxp), (seq, minp, maxp), 0) -DBSEQ_METHOD(set_range, (db_seq_t min, db_seq_t max), (seq, min, max), 0) - -Db *DbSequence::get_db() -{ - DB_SEQUENCE *seq = unwrap(this); - DB *db; - (void)seq->get_db(seq, &db); - return Db::get_Db(db); -} - -Dbt *DbSequence::get_key() -{ - DB_SEQUENCE *seq = unwrap(this); - memset(&key_, 0, sizeof (DBT)); - (void)seq->get_key(seq, &key_); - return Dbt::get_Dbt(&key_); -} - -// static method -DbSequence *DbSequence::wrap_DB_SEQUENCE(DB_SEQUENCE *seq) -{ - DbSequence *wrapped_seq = get_DbSequence(seq); - return (wrapped_seq != NULL) ? wrapped_seq : new DbSequence(seq); -} diff --git a/storage/bdb/cxx/cxx_txn.cpp b/storage/bdb/cxx/cxx_txn.cpp deleted file mode 100644 index 64ee3b9fff5..00000000000 --- a/storage/bdb/cxx/cxx_txn.cpp +++ /dev/null @@ -1,81 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_txn.cpp,v 12.2 2005/06/16 20:21:03 bostic Exp $ - */ - -#include "db_config.h" - -#include - -#include "db_cxx.h" -#include "dbinc/cxx_int.h" - -#include "db_int.h" -#include "dbinc/txn.h" - -// Helper macro for simple methods that pass through to the -// underlying C method. It may return an error or raise an exception. -// Note this macro expects that input _argspec is an argument -// list element (e.g., "char *arg") and that _arglist is the arguments -// that should be passed through to the C method (e.g., "(db, arg)") -// -#define DBTXN_METHOD(_name, _delete, _argspec, _arglist) \ -int DbTxn::_name _argspec \ -{ \ - int ret; \ - DB_TXN *txn = unwrap(this); \ - DbEnv *dbenv = DbEnv::get_DbEnv(txn->mgrp->dbenv); \ - \ - ret = txn->_name _arglist; \ - /* Weird, but safe if we don't access this again. */ \ - if (_delete) \ - delete this; \ - if (!DB_RETOK_STD(ret)) \ - DB_ERROR(dbenv, "DbTxn::" # _name, ret, ON_ERROR_UNKNOWN); \ - return (ret); \ -} - -// private constructor, never called but needed by some C++ linkers -DbTxn::DbTxn() -: imp_(0) -{ -} - -DbTxn::DbTxn(DB_TXN *txn) -: imp_(txn) -{ - txn->api_internal = this; -} - -DbTxn::~DbTxn() -{ -} - -DBTXN_METHOD(abort, 1, (), (txn)) -DBTXN_METHOD(commit, 1, (u_int32_t flags), (txn, flags)) -DBTXN_METHOD(discard, 1, (u_int32_t flags), (txn, flags)) - -u_int32_t DbTxn::id() -{ - DB_TXN *txn; - - txn = unwrap(this); - return (txn->id(txn)); // no error -} - -DBTXN_METHOD(get_name, 0, (const char **namep), (txn, namep)) -DBTXN_METHOD(prepare, 0, (u_int8_t *gid), (txn, gid)) -DBTXN_METHOD(set_name, 0, (const char *name), (txn, name)) -DBTXN_METHOD(set_timeout, 0, (db_timeout_t timeout, u_int32_t flags), - (txn, timeout, flags)) - -// static method -DbTxn *DbTxn::wrap_DB_TXN(DB_TXN *txn) -{ - DbTxn *wrapped_txn = get_DbTxn(txn); - return (wrapped_txn != NULL) ? wrapped_txn : new DbTxn(txn); -} diff --git a/storage/bdb/db/crdel.src b/storage/bdb/db/crdel.src deleted file mode 100644 index ba03fea9312..00000000000 --- a/storage/bdb/db/crdel.src +++ /dev/null @@ -1,80 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: crdel.src,v 12.2 2005/09/28 17:44:18 margo Exp $ - */ - -PREFIX __crdel -DBPRIVATE - -INCLUDE #ifndef NO_SYSTEM_INCLUDES -INCLUDE #include -INCLUDE -INCLUDE #include -INCLUDE #include -INCLUDE #endif -INCLUDE -INCLUDE #include "db_int.h" -INCLUDE #include "dbinc/crypto.h" -INCLUDE #include "dbinc/db_page.h" -INCLUDE #include "dbinc/db_dispatch.h" -INCLUDE #include "dbinc/db_am.h" -INCLUDE #include "dbinc/log.h" -INCLUDE #include "dbinc/txn.h" -INCLUDE - -/* - * Metasub: log the creation of a subdatabase meta data page. - * - * fileid: identifies the file being acted upon. - * pgno: page number on which to write this meta-data page - * page: the actual meta-data page - * lsn: lsn of the page. - */ -BEGIN metasub 142 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -PGDBT page DBT s -POINTER lsn DB_LSN * lu -END - -/* - * Inmem_create: Log the creation of an in-memory database. - * - * name: Name of the database - * fid: File id of the database - */ -BEGIN inmem_create 138 -ARG fileid int32_t ld -DBT name DBT s -DBT fid DBT s -ARG pgsize u_int32_t lu -END - -/* - * Inmem_rename: Log the renaming of an in-memory only database. - * - * oldname: database's starting name - * newname: database's ending name - * fid: fileid - */ -BEGIN inmem_rename 139 -DBT oldname DBT s -DBT newname DBT s -DBT fid DBT s -END - -/* - * Inmem_remove: Log the removal of an in-memory only database. - * - * name: database's ending name - * fid: fileid - */ -BEGIN inmem_remove 140 -DBT name DBT s -DBT fid DBT s -END - diff --git a/storage/bdb/db/crdel_rec.c b/storage/bdb/db/crdel_rec.c deleted file mode 100644 index a94c6cbbc1f..00000000000 --- a/storage/bdb/db/crdel_rec.c +++ /dev/null @@ -1,294 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: crdel_rec.c,v 12.6 2005/10/20 18:57:04 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/fop.h" -#include "dbinc/hash.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -/* - * __crdel_metasub_recover -- - * Recovery function for metasub. - * - * PUBLIC: int __crdel_metasub_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__crdel_metasub_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __crdel_metasub_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_p, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__crdel_metasub_print); - REC_INTRO(__crdel_metasub_read, 0, 0); - - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - /* If this is an in-memory file, this might be OK. */ - if (F_ISSET(file_dbp, DB_AM_INMEM) && (ret = __memp_fget(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) == 0) - LSN_NOT_LOGGED(LSN(pagep)); - else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - } - - modified = 0; - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); - - if (cmp_p == 0 && DB_REDO(op)) { - memcpy(pagep, argp->page.data, argp->page.size); - LSN(pagep) = *lsnp; - modified = 1; - - /* - * If this was an in-memory database and we are re-creating - * and this is the meta-data page, then we need to set up a - * bunch of fields in the dbo as well. - */ - if (F_ISSET(file_dbp, DB_AM_INMEM) && - argp->pgno == PGNO_BASE_MD && - (ret = __db_meta_setup(file_dbp->dbenv, - file_dbp, file_dbp->dname, (DBMETA *)pagep, 0, 1)) != 0) - goto out; - } else if (DB_UNDO(op)) { - /* - * We want to undo this page creation. The page creation - * happened in two parts. First, we called __bam_new which - * was logged separately. Then we wrote the meta-data onto - * the page. So long as we restore the LSN, then the recovery - * for __bam_new will do everything else. - * - * Don't bother checking the lsn on the page. If we are - * rolling back the next thing is that this page will get - * freed. Opening the subdb will have reinitialized the - * page, but not the lsn. - */ - LSN(pagep) = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __crdel_inmem_create_recover -- - * Recovery function for inmem_create. - * - * PUBLIC: int __crdel_inmem_create_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__crdel_inmem_create_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - DB *dbp; - __crdel_inmem_create_args *argp; - int do_close, ret, t_ret; - - COMPQUIET(info, NULL); - dbp = NULL; - do_close = 0; - REC_PRINT(__crdel_inmem_create_print); - REC_NOOP_INTRO(__crdel_inmem_create_read); - - /* First, see if the DB handle already exists. */ - if (argp->fileid == DB_LOGFILEID_INVALID) { - if (DB_REDO(op)) - ret = ENOENT; - else - ret = 0; - } else - ret = __dbreg_id_to_db_int(dbenv, - argp->txnid, &dbp, argp->fileid, 0, 0); - - if (DB_REDO(op)) { - /* - * If the dbreg failed, that means that we're creating a - * tmp file. - */ - if (ret != 0) { - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - goto out; - - F_SET(dbp, DB_AM_RECOVER | DB_AM_INMEM); - memcpy(dbp->fileid, argp->fid.data, DB_FILE_ID_LEN); - if (((ret = __os_strdup(dbenv, - argp->name.data, &dbp->dname)) != 0)) - goto out; - - /* - * This DBP is never going to be entered into the - * dbentry table, so if we leave it open here, - * then we're going to lose it. - */ - do_close = 1; - } - - /* Now, set the fileid. */ - memcpy(dbp->fileid, argp->fid.data, argp->fid.size); - if ((ret = __memp_set_fileid(dbp->mpf, dbp->fileid)) != 0) - goto out; - dbp->preserve_fid = 1; - MAKE_INMEM(dbp); - if ((ret = __db_dbenv_setup(dbp, - NULL, NULL, argp->name.data, TXN_INVALID, 0)) != 0) - goto out; - ret = __db_dbenv_mpool(dbp, argp->name.data, 0); - - if (ret == ENOENT) { - dbp->pgsize = argp->pgsize; - if ((ret = __db_dbenv_mpool(dbp, - argp->name.data, DB_CREATE)) != 0) - goto out; - } else if (ret != 0) - goto out; - } - - if (DB_UNDO(op)) { - if (ret == 0) - ret = __memp_nameop(dbenv, argp->fid.data, NULL, - (const char *)argp->name.data, NULL, 1); - - if (ret == ENOENT || ret == DB_DELETED) - ret = 0; - else - goto out; - } - - *lsnp = argp->prev_lsn; - -out: if (dbp != NULL) { - t_ret = 0; - if (DB_UNDO(op)) - t_ret = __db_refresh(dbp, NULL, DB_NOSYNC, NULL, 0); - else if (do_close || ret != 0) - t_ret = __db_close(dbp, NULL, DB_NOSYNC); - if (t_ret != 0 && ret == 0) - ret = t_ret; - } - REC_NOOP_CLOSE; -} - -/* - * __crdel_inmem_rename_recover -- - * Recovery function for inmem_rename. - * - * PUBLIC: int __crdel_inmem_rename_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__crdel_inmem_rename_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __crdel_inmem_rename_args *argp; - u_int8_t *fileid; - int ret; - - COMPQUIET(info, NULL); - REC_PRINT(__crdel_inmem_rename_print); - REC_NOOP_INTRO(__crdel_inmem_rename_read); - fileid = argp->fid.data; - - /* Void out errors because the files may or may not still exist. */ - if (DB_REDO(op)) - (void)__memp_nameop(dbenv, fileid, - (const char *)argp->newname.data, - (const char *)argp->oldname.data, - (const char *)argp->newname.data, 1); - - if (DB_UNDO(op)) - (void)__memp_nameop(dbenv, fileid, - (const char *)argp->oldname.data, - (const char *)argp->newname.data, - (const char *)argp->oldname.data, 1); - - *lsnp = argp->prev_lsn; - ret = 0; - - REC_NOOP_CLOSE; -} - -/* - * __crdel_inmem_remove_recover -- - * Recovery function for inmem_remove. - * - * PUBLIC: int __crdel_inmem_remove_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__crdel_inmem_remove_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __crdel_inmem_remove_args *argp; - int ret; - - COMPQUIET(info, NULL); - REC_PRINT(__crdel_inmem_remove_print); - REC_NOOP_INTRO(__crdel_inmem_remove_read); - - /* - * Since removes are delayed; there is no undo for a remove; only redo. - * The remove may fail, which is OK. - */ - if (DB_REDO(op)) { - (void)__memp_nameop(dbenv, - argp->fid.data, NULL, argp->name.data, NULL, 1); - } - - *lsnp = argp->prev_lsn; - ret = 0; - - REC_NOOP_CLOSE; -} diff --git a/storage/bdb/db/db.c b/storage/bdb/db/db.c deleted file mode 100644 index 432919133a2..00000000000 --- a/storage/bdb/db/db.c +++ /dev/null @@ -1,1509 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db.c,v 12.22 2005/11/12 17:41:44 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_swap.h" -#include "dbinc/btree.h" -#include "dbinc/fop.h" -#include "dbinc/hash.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" -#include "dbinc/txn.h" - -static int __db_disassociate __P((DB *)); - -#ifdef CONFIG_TEST -static void __db_makecopy __P((DB_ENV *, const char *, const char *)); -static int __db_testdocopy __P((DB_ENV *, const char *)); -static int __qam_testdocopy __P((DB *, const char *)); -#endif - -/* - * DB.C -- - * This file contains the utility functions for the DBP layer. - */ - -/* - * __db_master_open -- - * Open up a handle on a master database. - * - * PUBLIC: int __db_master_open __P((DB *, - * PUBLIC: DB_TXN *, const char *, u_int32_t, int, DB **)); - */ -int -__db_master_open(subdbp, txn, name, flags, mode, dbpp) - DB *subdbp; - DB_TXN *txn; - const char *name; - u_int32_t flags; - int mode; - DB **dbpp; -{ - DB *dbp; - int ret; - - *dbpp = NULL; - - /* Open up a handle on the main database. */ - if ((ret = db_create(&dbp, subdbp->dbenv, 0)) != 0) - return (ret); - - /* - * It's always a btree. - * Run in the transaction we've created. - * Set the pagesize in case we're creating a new database. - * Flag that we're creating a database with subdatabases. - */ - dbp->pgsize = subdbp->pgsize; - F_SET(dbp, DB_AM_SUBDB); - F_SET(dbp, F_ISSET(subdbp, - DB_AM_RECOVER | DB_AM_SWAP | - DB_AM_ENCRYPT | DB_AM_CHKSUM | DB_AM_NOT_DURABLE)); - - /* - * If there was a subdb specified, then we only want to apply - * DB_EXCL to the subdb, not the actual file. We only got here - * because there was a subdb specified. - */ - LF_CLR(DB_EXCL); - LF_SET(DB_RDWRMASTER); - if ((ret = __db_open(dbp, - txn, name, NULL, DB_BTREE, flags, mode, PGNO_BASE_MD)) != 0) - goto err; - - /* - * Verify that pagesize is the same on both. The items in dbp were now - * initialized from the meta page. The items in dbp were set in - * __db_dbopen when we either read or created the master file. Other - * items such as checksum and encryption are checked when we read the - * meta-page. So we do not check those here. However, if the - * meta-page caused checksumming to be turned on and it wasn't already, - * set it here. - */ - if (F_ISSET(dbp, DB_AM_CHKSUM)) - F_SET(subdbp, DB_AM_CHKSUM); - if (subdbp->pgsize != 0 && dbp->pgsize != subdbp->pgsize) { - ret = EINVAL; - __db_err(dbp->dbenv, - "Different pagesize specified on existent file"); - goto err; - } -err: - if (ret != 0 && !F_ISSET(dbp, DB_AM_DISCARD)) - (void)__db_close(dbp, txn, 0); - else - *dbpp = dbp; - return (ret); -} - -/* - * __db_master_update -- - * Add/Open/Remove a subdatabase from a master database. - * - * PUBLIC: int __db_master_update __P((DB *, DB *, DB_TXN *, const char *, - * PUBLIC: DBTYPE, mu_action, const char *, u_int32_t)); - */ -int -__db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags) - DB *mdbp, *sdbp; - DB_TXN *txn; - const char *subdb; - DBTYPE type; - mu_action action; - const char *newname; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBC *dbc, *ndbc; - DBT key, data, ndata; - PAGE *p, *r; - db_pgno_t t_pgno; - int modify, ret, t_ret; - - dbenv = mdbp->dbenv; - dbc = ndbc = NULL; - p = NULL; - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - - /* Might we modify the master database? If so, we'll need to lock. */ - modify = (action != MU_OPEN || LF_ISSET(DB_CREATE)) ? 1 : 0; - - /* - * Open up a cursor. If this is CDB and we're creating the database, - * make it an update cursor. - */ - if ((ret = __db_cursor(mdbp, txn, &dbc, - (CDB_LOCKING(dbenv) && modify) ? DB_WRITECURSOR : 0)) != 0) - goto err; - - /* - * Point the cursor at the record. - * - * If we're removing or potentially creating an entry, lock the page - * with DB_RMW. - * - * We do multiple cursor operations with the cursor in some cases and - * subsequently access the data DBT information. Set DB_DBT_MALLOC so - * we don't risk modification of the data between our uses of it. - * - * !!! - * We don't include the name's nul termination in the database. - */ - key.data = (void *)subdb; - key.size = (u_int32_t)strlen(subdb); - F_SET(&data, DB_DBT_MALLOC); - - ret = __db_c_get(dbc, &key, &data, - DB_SET | ((STD_LOCKING(dbc) && modify) ? DB_RMW : 0)); - - /* - * What we do next--whether or not we found a record for the - * specified subdatabase--depends on what the specified action is. - * Handle ret appropriately as the first statement of each case. - */ - switch (action) { - case MU_REMOVE: - /* - * We should have found something if we're removing it. Note - * that in the common case where the DB we're asking to remove - * doesn't exist, we won't get this far; __db_subdb_remove - * will already have returned an error from __db_open. - */ - if (ret != 0) - goto err; - - /* - * Delete the subdatabase entry first; if this fails, - * we don't want to touch the actual subdb pages. - */ - if ((ret = __db_c_del(dbc, 0)) != 0) - goto err; - - /* - * We're handling actual data, not on-page meta-data, - * so it hasn't been converted to/from opposite - * endian architectures. Do it explicitly, now. - */ - memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t)); - DB_NTOHL(&sdbp->meta_pgno); - if ((ret = - __memp_fget(mdbp->mpf, &sdbp->meta_pgno, 0, &p)) != 0) - goto err; - - /* Free the root on the master db if it was created. */ - if (TYPE(p) == P_BTREEMETA && - ((BTMETA *)p)->root != PGNO_INVALID) { - if ((ret = __memp_fget(mdbp->mpf, - &((BTMETA *)p)->root, 0, &r)) != 0) - goto err; - - /* Free and put the page. */ - if ((ret = __db_free(dbc, r)) != 0) { - r = NULL; - goto err; - } - } - /* Free and put the page. */ - if ((ret = __db_free(dbc, p)) != 0) { - p = NULL; - goto err; - } - p = NULL; - break; - case MU_RENAME: - /* We should have found something if we're renaming it. */ - if (ret != 0) - goto err; - - /* - * Before we rename, we need to make sure we're not - * overwriting another subdatabase, or else this operation - * won't be undoable. Open a second cursor and check - * for the existence of newname; it shouldn't appear under - * us since we hold the metadata lock. - */ - if ((ret = __db_cursor(mdbp, txn, &ndbc, 0)) != 0) - goto err; - key.data = (void *)newname; - key.size = (u_int32_t)strlen(newname); - - /* - * We don't actually care what the meta page of the potentially- - * overwritten DB is; we just care about existence. - */ - memset(&ndata, 0, sizeof(ndata)); - F_SET(&ndata, DB_DBT_USERMEM | DB_DBT_PARTIAL); - - if ((ret = __db_c_get(ndbc, &key, &ndata, DB_SET)) == 0) { - /* A subdb called newname exists. Bail. */ - ret = EEXIST; - __db_err(dbenv, "rename: database %s exists", newname); - goto err; - } else if (ret != DB_NOTFOUND) - goto err; - - /* - * Now do the put first; we don't want to lose our - * sole reference to the subdb. Use the second cursor - * so that the first one continues to point to the old record. - */ - if ((ret = __db_c_put(ndbc, &key, &data, DB_KEYFIRST)) != 0) - goto err; - if ((ret = __db_c_del(dbc, 0)) != 0) { - /* - * If the delete fails, try to delete the record - * we just put, in case we're not txn-protected. - */ - (void)__db_c_del(ndbc, 0); - goto err; - } - - break; - case MU_OPEN: - /* - * Get the subdatabase information. If it already exists, - * copy out the page number and we're done. - */ - switch (ret) { - case 0: - if (LF_ISSET(DB_CREATE) && LF_ISSET(DB_EXCL)) { - ret = EEXIST; - goto err; - } - memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t)); - DB_NTOHL(&sdbp->meta_pgno); - goto done; - case DB_NOTFOUND: - if (LF_ISSET(DB_CREATE)) - break; - /* - * No db_err, it is reasonable to remove a - * nonexistent db. - */ - ret = ENOENT; - goto err; - default: - goto err; - } - - /* Create a subdatabase. */ - if ((ret = __db_new(dbc, - type == DB_HASH ? P_HASHMETA : P_BTREEMETA, &p)) != 0) - goto err; - sdbp->meta_pgno = PGNO(p); - - /* - * XXX - * We're handling actual data, not on-page meta-data, so it - * hasn't been converted to/from opposite endian architectures. - * Do it explicitly, now. - */ - t_pgno = PGNO(p); - DB_HTONL(&t_pgno); - memset(&ndata, 0, sizeof(ndata)); - ndata.data = &t_pgno; - ndata.size = sizeof(db_pgno_t); - if ((ret = __db_c_put(dbc, &key, &ndata, DB_KEYLAST)) != 0) - goto err; - F_SET(sdbp, DB_AM_CREATED); - break; - } - -err: -done: /* - * If we allocated a page: if we're successful, mark the page dirty - * and return it to the cache, otherwise, discard/free it. - */ - if (p != NULL) { - if (ret == 0) { - if ((t_ret = - __memp_fput(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0) - ret = t_ret; - } else - (void)__memp_fput(mdbp->mpf, p, 0); - } - - /* Discard the cursor(s) and data. */ - if (data.data != NULL) - __os_ufree(dbenv, data.data); - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - if (ndbc != NULL && (t_ret = __db_c_close(ndbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_dbenv_setup -- - * Set up the underlying environment during a db_open. - * - * PUBLIC: int __db_dbenv_setup __P((DB *, - * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t, u_int32_t)); - */ -int -__db_dbenv_setup(dbp, txn, fname, dname, id, flags) - DB *dbp; - DB_TXN *txn; - const char *fname, *dname; - u_int32_t id, flags; -{ - DB *ldbp; - DB_ENV *dbenv; - u_int32_t maxid; - int ret; - - dbenv = dbp->dbenv; - - /* If we don't yet have an environment, it's time to create it. */ - if (!F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) { - /* Make sure we have at least DB_MINCACHE pages in our cache. */ - if (dbenv->mp_gbytes == 0 && - dbenv->mp_bytes < dbp->pgsize * DB_MINPAGECACHE && - (ret = __memp_set_cachesize( - dbenv, 0, dbp->pgsize * DB_MINPAGECACHE, 0)) != 0) - return (ret); - - if ((ret = __env_open(dbenv, NULL, DB_CREATE | - DB_INIT_MPOOL | DB_PRIVATE | LF_ISSET(DB_THREAD), 0)) != 0) - return (ret); - } - - /* Join the underlying cache. */ - if ((!F_ISSET(dbp, DB_AM_INMEM) || dname == NULL) && - (ret = __db_dbenv_mpool(dbp, fname, flags)) != 0) - return (ret); - - /* We may need a per-thread mutex. */ - if (LF_ISSET(DB_THREAD) && (ret = __mutex_alloc( - dbenv, MTX_DB_HANDLE, DB_MUTEX_THREAD, &dbp->mutex)) != 0) - return (ret); - - /* - * Set up a bookkeeping entry for this database in the log region, - * if such a region exists. Note that even if we're in recovery - * or a replication client, where we won't log registries, we'll - * still need an FNAME struct, so LOGGING_ON is the correct macro. - */ - if (LOGGING_ON(dbenv) && dbp->log_filename == NULL && - (ret = __dbreg_setup(dbp, - F_ISSET(dbp, DB_AM_INMEM) ? dname : fname, id)) != 0) - return (ret); - - /* - * If we're actively logging and our caller isn't a recovery function - * that already did so, then assign this dbp a log fileid. - */ - if (DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER) && -#if !defined(DEBUG_ROP) - !F_ISSET(dbp, DB_AM_RDONLY) && -#endif - (ret = __dbreg_new_id(dbp, txn)) != 0) - return (ret); - - /* - * Insert ourselves into the DB_ENV's dblist. We allocate a - * unique ID to each {fileid, meta page number} pair, and to - * each temporary file (since they all have a zero fileid). - * This ID gives us something to use to tell which DB handles - * go with which databases in all the cursor adjustment - * routines, where we don't want to do a lot of ugly and - * expensive memcmps. - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (maxid = 0, ldbp = LIST_FIRST(&dbenv->dblist); - ldbp != NULL; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - if (!F_ISSET(dbp, DB_AM_INMEM)) { - if (memcmp(ldbp->fileid, dbp->fileid, DB_FILE_ID_LEN) - == 0 && ldbp->meta_pgno == dbp->meta_pgno) - break; - } else if (dname != NULL) { - if (F_ISSET(ldbp, DB_AM_INMEM) && - strcmp(ldbp->dname, dname) == 0) - break; - } - if (ldbp->adj_fileid > maxid) - maxid = ldbp->adj_fileid; - } - - /* - * If ldbp is NULL, we didn't find a match, or we weren't - * really looking because fname is NULL. Assign the dbp an - * adj_fileid one higher than the largest we found, and - * insert it at the head of the master dbp list. - * - * If ldbp is not NULL, it is a match for our dbp. Give dbp - * the same ID that ldbp has, and add it after ldbp so they're - * together in the list. - */ - if (ldbp == NULL) { - dbp->adj_fileid = maxid + 1; - LIST_INSERT_HEAD(&dbenv->dblist, dbp, dblistlinks); - } else { - dbp->adj_fileid = ldbp->adj_fileid; - LIST_INSERT_AFTER(ldbp, dbp, dblistlinks); - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - return (0); -} - -/* - * __db_dbenv_mpool -- - * Set up the underlying environment cache during a db_open. - * - * PUBLIC: int __db_dbenv_mpool __P((DB *, const char *, u_int32_t)); - */ -int -__db_dbenv_mpool(dbp, fname, flags) - DB *dbp; - const char *fname; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBT pgcookie; - DB_MPOOLFILE *mpf; - DB_PGINFO pginfo; - int fidset, ftype, ret; - int32_t lsn_off; - u_int8_t nullfid[DB_FILE_ID_LEN]; - u_int32_t clear_len; - - COMPQUIET(mpf, NULL); - - dbenv = dbp->dbenv; - lsn_off = 0; - - /* It's possible that this database is already open. */ - if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) - return (0); - - /* - * If we need to pre- or post-process a file's pages on I/O, set the - * file type. If it's a hash file, always call the pgin and pgout - * routines. This means that hash files can never be mapped into - * process memory. If it's a btree file and requires swapping, we - * need to page the file in and out. This has to be right -- we can't - * mmap files that are being paged in and out. - */ - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM) - ? DB_FTYPE_SET : DB_FTYPE_NOTSET; - clear_len = CRYPTO_ON(dbenv) ? - (dbp->pgsize != 0 ? dbp->pgsize : DB_CLEARLEN_NOTSET) : - DB_PAGE_DB_LEN; - break; - case DB_HASH: - ftype = DB_FTYPE_SET; - clear_len = CRYPTO_ON(dbenv) ? - (dbp->pgsize != 0 ? dbp->pgsize : DB_CLEARLEN_NOTSET) : - DB_PAGE_DB_LEN; - break; - case DB_QUEUE: - ftype = F_ISSET(dbp, - DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM) ? - DB_FTYPE_SET : DB_FTYPE_NOTSET; - - /* - * If we came in here without a pagesize set, then we need - * to mark the in-memory handle as having clear_len not - * set, because we don't really know the clear length or - * the page size yet (since the file doesn't yet exist). - */ - clear_len = dbp->pgsize != 0 ? dbp->pgsize : DB_CLEARLEN_NOTSET; - break; - case DB_UNKNOWN: - /* - * If we're running in the verifier, our database might - * be corrupt and we might not know its type--but we may - * still want to be able to verify and salvage. - * - * If we can't identify the type, it's not going to be safe - * to call __db_pgin--we pretty much have to give up all - * hope of salvaging cross-endianness. Proceed anyway; - * at worst, the database will just appear more corrupt - * than it actually is, but at best, we may be able - * to salvage some data even with no metadata page. - */ - if (F_ISSET(dbp, DB_AM_VERIFYING)) { - ftype = DB_FTYPE_NOTSET; - clear_len = DB_PAGE_DB_LEN; - break; - } - - /* - * This might be an in-memory file and we won't know its - * file type until after we open it and read the meta-data - * page. - */ - if (F_ISSET(dbp, DB_AM_INMEM)) { - clear_len = DB_CLEARLEN_NOTSET; - ftype = DB_FTYPE_NOTSET; - lsn_off = DB_LSN_OFF_NOTSET; - break; - } - /* FALLTHROUGH */ - default: - return (__db_unknown_type(dbenv, "DB->open", dbp->type)); - } - - mpf = dbp->mpf; - - memset(nullfid, 0, DB_FILE_ID_LEN); - fidset = memcmp(nullfid, dbp->fileid, DB_FILE_ID_LEN); - if (fidset) - (void)__memp_set_fileid(mpf, dbp->fileid); - - (void)__memp_set_clear_len(mpf, clear_len); - (void)__memp_set_ftype(mpf, ftype); - (void)__memp_set_lsn_offset(mpf, lsn_off); - - pginfo.db_pagesize = dbp->pgsize; - pginfo.flags = - F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP)); - pginfo.type = dbp->type; - pgcookie.data = &pginfo; - pgcookie.size = sizeof(DB_PGINFO); - (void)__memp_set_pgcookie(mpf, &pgcookie); - - if ((ret = __memp_fopen(mpf, NULL, fname, - LF_ISSET(DB_CREATE | DB_DURABLE_UNKNOWN | - DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE) | - (F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_DIRECT : 0) | - (F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_TXN_NOT_DURABLE : 0), - 0, dbp->pgsize)) != 0) { - /* - * The open didn't work; we need to reset the mpf, - * retaining the in-memory semantics (if any). - */ - (void)__memp_fclose(dbp->mpf, 0); - (void)__memp_fcreate(dbenv, &dbp->mpf); - if (F_ISSET(dbp, DB_AM_INMEM)) - MAKE_INMEM(dbp); - return (ret); - } - - /* - * Set the open flag. We use it to mean that the dbp has gone - * through mpf setup, including dbreg_register. Also, below, - * the underlying access method open functions may want to do - * things like acquire cursors, so the open flag has to be set - * before calling them. - */ - F_SET(dbp, DB_AM_OPEN_CALLED); - if (!fidset && fname != NULL) { - (void)__memp_get_fileid(dbp->mpf, dbp->fileid); - dbp->preserve_fid = 1; - } - - return (0); -} - -/* - * __db_close -- - * DB->close method. - * - * PUBLIC: int __db_close __P((DB *, DB_TXN *, u_int32_t)); - */ -int -__db_close(dbp, txn, flags) - DB *dbp; - DB_TXN *txn; - u_int32_t flags; -{ - DB_ENV *dbenv; - int db_ref, deferred_close, ret, t_ret; - - dbenv = dbp->dbenv; - deferred_close = ret = 0; - - /* - * Validate arguments, but as a DB handle destructor, we can't fail. - * - * Check for consistent transaction usage -- ignore errors. Only - * internal callers specify transactions, so it's a serious problem - * if we get error messages. - */ - if (txn != NULL) - (void)__db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0); - - /* Refresh the structure and close any underlying resources. */ - ret = __db_refresh(dbp, txn, flags, &deferred_close, 0); - - /* - * If we've deferred the close because the logging of the close failed, - * return our failure right away without destroying the handle. - */ - if (deferred_close) - return (ret); - - /* !!! - * This code has an apparent race between the moment we read and - * decrement dbenv->db_ref and the moment we check whether it's 0. - * However, if the environment is DBLOCAL, the user shouldn't have a - * reference to the dbenv handle anyway; the only way we can get - * multiple dbps sharing a local dbenv is if we open them internally - * during something like a subdatabase open. If any such thing is - * going on while the user is closing the original dbp with a local - * dbenv, someone's already badly screwed up, so there's no reason - * to bother engineering around this possibility. - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - db_ref = --dbenv->db_ref; - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - if (F_ISSET(dbenv, DB_ENV_DBLOCAL) && db_ref == 0 && - (t_ret = __env_close(dbenv, 0)) != 0 && ret == 0) - ret = t_ret; - - /* Free the database handle. */ - memset(dbp, CLEAR_BYTE, sizeof(*dbp)); - __os_free(dbenv, dbp); - - return (ret); -} - -/* - * __db_refresh -- - * Refresh the DB structure, releasing any allocated resources. - * This does most of the work of closing files now because refresh - * is what is used during abort processing (since we can't destroy - * the actual handle) and during abort processing, we may have a - * fully opened handle. - * - * PUBLIC: int __db_refresh __P((DB *, DB_TXN *, u_int32_t, int *, int)); - */ -int -__db_refresh(dbp, txn, flags, deferred_closep, reuse) - DB *dbp; - DB_TXN *txn; - u_int32_t flags; - int *deferred_closep, reuse; -{ - DB *sdbp; - DBC *dbc; - DB_ENV *dbenv; - DB_LOCKREQ lreq; - REGENV *renv; - REGINFO *infop; - u_int32_t save_flags; - int resync, ret, t_ret; - - ret = 0; - - dbenv = dbp->dbenv; - infop = dbenv->reginfo; - if (infop != NULL) - renv = infop->primary; - else - renv = NULL; - - /* If never opened, or not currently open, it's easy. */ - if (!F_ISSET(dbp, DB_AM_OPEN_CALLED)) - goto never_opened; - - /* - * If we have any secondary indices, disassociate them from us. - * We don't bother with the mutex here; it only protects some - * of the ops that will make us core-dump mid-close anyway, and - * if you're trying to do something with a secondary *while* you're - * closing the primary, you deserve what you get. The disassociation - * is mostly done just so we can close primaries and secondaries in - * any order--but within one thread of control. - */ - for (sdbp = LIST_FIRST(&dbp->s_secondaries); - sdbp != NULL; sdbp = LIST_NEXT(sdbp, s_links)) { - LIST_REMOVE(sdbp, s_links); - if ((t_ret = __db_disassociate(sdbp)) != 0 && ret == 0) - ret = t_ret; - } - - /* - * Sync the underlying access method. Do before closing the cursors - * because DB->sync allocates cursors in order to write Recno backing - * source text files. - * - * Sync is slow on some systems, notably Solaris filesystems where the - * entire buffer cache is searched. If we're in recovery, don't flush - * the file, it's not necessary. - */ - if (!LF_ISSET(DB_NOSYNC) && - !F_ISSET(dbp, DB_AM_DISCARD | DB_AM_RECOVER) && - (t_ret = __db_sync(dbp)) != 0 && ret == 0) - ret = t_ret; - - /* - * Go through the active cursors and call the cursor recycle routine, - * which resolves pending operations and moves the cursors onto the - * free list. Then, walk the free list and call the cursor destroy - * routine. Note that any failure on a close is considered "really - * bad" and we just break out of the loop and force forward. - */ - resync = TAILQ_FIRST(&dbp->active_queue) == NULL ? 0 : 1; - while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL) - if ((t_ret = __db_c_close(dbc)) != 0) { - if (ret == 0) - ret = t_ret; - break; - } - - while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL) - if ((t_ret = __db_c_destroy(dbc)) != 0) { - if (ret == 0) - ret = t_ret; - break; - } - - /* - * Close any outstanding join cursors. Join cursors destroy themselves - * on close and have no separate destroy routine. We don't have to set - * the resync flag here, because join cursors aren't write cursors. - */ - while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL) - if ((t_ret = __db_join_close(dbc)) != 0) { - if (ret == 0) - ret = t_ret; - break; - } - - /* - * Sync the memory pool, even though we've already called DB->sync, - * because closing cursors can dirty pages by deleting items they - * referenced. - * - * Sync is slow on some systems, notably Solaris filesystems where the - * entire buffer cache is searched. If we're in recovery, don't flush - * the file, it's not necessary. - */ - if (resync && !LF_ISSET(DB_NOSYNC) && - !F_ISSET(dbp, DB_AM_DISCARD | DB_AM_RECOVER) && - (t_ret = __memp_fsync(dbp->mpf)) != 0 && ret == 0) - ret = t_ret; - -never_opened: - /* - * At this point, we haven't done anything to render the DB - * handle unusable, at least by a transaction abort. Take the - * opportunity now to log the file close. If this log fails - * and we're in a transaction, we have to bail out of the attempted - * close; we'll need a dbp in order to successfully abort the - * transaction, and we can't conjure a new one up because we haven't - * gotten out the dbreg_register record that represents the close. - * In this case, we put off actually closing the dbp until we've - * performed the abort. - */ - if (!reuse && LOGGING_ON(dbp->dbenv)) { - /* - * Discard the log file id, if any. We want to log the close - * if and only if this is not a recovery dbp or a client dbp, - * or a dead dbp handle. - */ - DB_ASSERT(renv != NULL); - if (F_ISSET(dbp, DB_AM_RECOVER) || IS_REP_CLIENT(dbenv) || - dbp->timestamp != renv->rep_timestamp) - t_ret = __dbreg_revoke_id(dbp, 0, DB_LOGFILEID_INVALID); - else { - if ((t_ret = __dbreg_close_id(dbp, - txn, DBREG_CLOSE)) != 0 && txn != NULL) { - /* - * We're in a txn and the attempt to log the - * close failed; let the txn subsystem know - * that we need to destroy this dbp once we're - * done with the abort, then bail from the - * close. - * - * Note that if the attempt to put off the - * close -also- fails--which it won't unless - * we're out of heap memory--we're really - * screwed. Panic. - */ - if ((ret = - __txn_closeevent(dbenv, txn, dbp)) != 0) - return (__db_panic(dbenv, ret)); - if (deferred_closep != NULL) - *deferred_closep = 1; - return (t_ret); - } - /* - * If dbreg_close_id failed and we were not in a - * transaction, then we need to finish this close - * because the caller can't do anything with the - * handle after we return an error. We rely on - * dbreg_close_id to mark the entry in some manner - * so that we do not do a clean shutdown of this - * environment. If shutdown isn't clean, then the - * application *must* run recovery and that will - * generate the RCLOSE record. - */ - } - - if (ret == 0) - ret = t_ret; - - /* Discard the log FNAME. */ - if ((t_ret = __dbreg_teardown(dbp)) != 0 && ret == 0) - ret = t_ret; - } - - /* Close any handle we've been holding since the open. */ - if (dbp->saved_open_fhp != NULL && - (t_ret = __os_closehandle(dbenv, dbp->saved_open_fhp)) != 0 && - ret == 0) - ret = t_ret; - - /* - * Remove this DB handle from the DB_ENV's dblist, if it's been added. - * - * Close our reference to the underlying cache while locked, we don't - * want to race with a thread searching for our underlying cache link - * while opening a DB handle. - */ - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - if (!reuse && dbp->dblistlinks.le_prev != NULL) { - LIST_REMOVE(dbp, dblistlinks); - dbp->dblistlinks.le_prev = NULL; - } - - /* Close the memory pool file handle. */ - if (dbp->mpf != NULL) { - if ((t_ret = __memp_fclose(dbp->mpf, - F_ISSET(dbp, DB_AM_DISCARD) ? DB_MPOOL_DISCARD : 0)) != 0 && - ret == 0) - ret = t_ret; - dbp->mpf = NULL; - if (reuse && - (t_ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0 && - ret == 0) - ret = t_ret; - } - - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - /* - * Call the access specific close function. - * - * We do this here rather than in __db_close as we need to do this when - * aborting an open so that file descriptors are closed and abort of - * renames can succeed on platforms that lock open files (such as - * Windows). In particular, we need to ensure that all the extents - * associated with a queue are closed so that queue renames can be - * aborted. - * - * It is also important that we do this before releasing the handle - * lock, because dbremove and dbrename assume that once they have the - * handle lock, it is safe to modify the underlying file(s). - * - * !!! - * Because of where these functions are called in the DB handle close - * process, these routines can't do anything that would dirty pages or - * otherwise affect closing down the database. Specifically, we can't - * abort and recover any of the information they control. - */ - if ((t_ret = __bam_db_close(dbp)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __qam_db_close(dbp, dbp->flags)) != 0 && ret == 0) - ret = t_ret; - - /* - * !!! - * At this point, the access-method specific information has been - * freed. From now on, we can use the dbp, but not touch any - * access-method specific data. - */ - - if (!reuse && dbp->lid != DB_LOCK_INVALIDID) { - /* We may have pending trade operations on this dbp. */ - if (txn != NULL) - __txn_remlock(dbenv, txn, &dbp->handle_lock, dbp->lid); - - /* We may be holding the handle lock; release it. */ - lreq.op = DB_LOCK_PUT_ALL; - lreq.obj = NULL; - if ((t_ret = __lock_vec(dbenv, - dbp->lid, 0, &lreq, 1, NULL)) != 0 && ret == 0) - ret = t_ret; - - if ((t_ret = __lock_id_free(dbenv, dbp->lid)) != 0 && ret == 0) - ret = t_ret; - dbp->lid = DB_LOCK_INVALIDID; - LOCK_INIT(dbp->handle_lock); - } - - /* - * If this is a temporary file (un-named in-memory file), then - * discard the locker ID allocated as the fileid. - */ - if (LOCKING_ON(dbenv) && - F_ISSET(dbp, DB_AM_INMEM) && !dbp->preserve_fid && - *(u_int32_t *)dbp->fileid != DB_LOCK_INVALIDID && - (t_ret = __lock_id_free(dbenv, *(u_int32_t *)dbp->fileid)) != 0 && - ret == 0) - ret = t_ret; - - if (reuse) { - /* - * If we are reusing this dbp, then we're done now. Re-init - * the handle, preserving important flags, and then return. - * This code is borrowed from __db_init, which does more - * than we can do here. - */ - save_flags = F_ISSET(dbp, DB_AM_INMEM | DB_AM_TXN); - - /* - * XXX If this is an XA handle, we'll want to specify - * DB_XA_CREATE. - */ - if ((ret = __bam_db_create(dbp)) != 0) - return (ret); - if ((ret = __ham_db_create(dbp)) != 0) - return (ret); - if ((ret = __qam_db_create(dbp)) != 0) - return (ret); - - /* Restore flags */ - dbp->flags = dbp->orig_flags | save_flags; - - if (FLD_ISSET(save_flags, DB_AM_INMEM)) { - /* - * If this is inmem, then it may have a fileid - * even if it was never opened, and we need to - * clear out that fileid. - */ - memset(dbp->fileid, 0, sizeof(dbp->fileid)); - MAKE_INMEM(dbp); - } - return (ret); - } - - dbp->type = DB_UNKNOWN; - - /* Discard the thread mutex. */ - if ((t_ret = __mutex_free(dbenv, &dbp->mutex)) != 0 && ret == 0) - ret = t_ret; - - /* Discard any memory allocated for the file and database names. */ - if (dbp->fname != NULL) { - __os_free(dbp->dbenv, dbp->fname); - dbp->fname = NULL; - } - if (dbp->dname != NULL) { - __os_free(dbp->dbenv, dbp->dname); - dbp->dname = NULL; - } - - /* Discard any memory used to store returned data. */ - if (dbp->my_rskey.data != NULL) - __os_free(dbp->dbenv, dbp->my_rskey.data); - if (dbp->my_rkey.data != NULL) - __os_free(dbp->dbenv, dbp->my_rkey.data); - if (dbp->my_rdata.data != NULL) - __os_free(dbp->dbenv, dbp->my_rdata.data); - - /* For safety's sake; we may refresh twice. */ - memset(&dbp->my_rskey, 0, sizeof(DBT)); - memset(&dbp->my_rkey, 0, sizeof(DBT)); - memset(&dbp->my_rdata, 0, sizeof(DBT)); - - /* Clear out fields that normally get set during open. */ - memset(dbp->fileid, 0, sizeof(dbp->fileid)); - dbp->adj_fileid = 0; - dbp->meta_pgno = 0; - dbp->cur_lid = DB_LOCK_INVALIDID; - dbp->associate_lid = DB_LOCK_INVALIDID; - dbp->cl_id = 0; - dbp->open_flags = 0; - - /* - * If we are being refreshed with a txn specified, then we need - * to make sure that we clear out the lock handle field, because - * releasing all the locks for this transaction will release this - * lock and we don't want close to stumble upon this handle and - * try to close it. - */ - if (txn != NULL) - LOCK_INIT(dbp->handle_lock); - - /* Reset flags to whatever the user configured. */ - dbp->flags = dbp->orig_flags; - - return (ret); -} - -/* - * __db_log_page - * Log a meta-data or root page during a subdatabase create operation. - * - * PUBLIC: int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *)); - */ -int -__db_log_page(dbp, txn, lsn, pgno, page) - DB *dbp; - DB_TXN *txn; - DB_LSN *lsn; - db_pgno_t pgno; - PAGE *page; -{ - DBT page_dbt; - DB_LSN new_lsn; - int ret; - - if (!LOGGING_ON(dbp->dbenv) || txn == NULL) - return (0); - - memset(&page_dbt, 0, sizeof(page_dbt)); - page_dbt.size = dbp->pgsize; - page_dbt.data = page; - - ret = __crdel_metasub_log(dbp, txn, &new_lsn, 0, pgno, &page_dbt, lsn); - - if (ret == 0) - page->lsn = new_lsn; - return (ret); -} - -/* - * __db_backup_name - * Create the backup file name for a given file. - * - * PUBLIC: int __db_backup_name __P((DB_ENV *, - * PUBLIC: const char *, DB_TXN *, char **)); - */ -#undef BACKUP_PREFIX -#define BACKUP_PREFIX "__db" - -#undef MAX_LSN_TO_TEXT -#define MAX_LSN_TO_TEXT 17 - -int -__db_backup_name(dbenv, name, txn, backup) - DB_ENV *dbenv; - const char *name; - DB_TXN *txn; - char **backup; -{ - DB_LSN lsn; - size_t len; - int ret; - char *p, *retp; - - /* - * Part of the name may be a full path, so we need to make sure that - * we allocate enough space for it, even in the case where we don't - * use the entire filename for the backup name. - */ - len = strlen(name) + strlen(BACKUP_PREFIX) + 1 + MAX_LSN_TO_TEXT; - if ((ret = __os_malloc(dbenv, len, &retp)) != 0) - return (ret); - - /* - * Create the name. Backup file names are in one of three forms: - * - * In a transactional env: __db.LSN(8).LSN(8) - * and - * In VXWORKS (where we want 8.3 support) - * and - * in any other non-transactional env: __db.FILENAME - * - * If the transaction doesn't have a current LSN, we write a dummy - * log record to force it, so we ensure all tmp names are unique. - * - * In addition, the name passed may contain an env-relative path. - * In that case, put the __db. in the right place (in the last - * component of the pathname). - * - * There are four cases here: - * 1. simple path w/out transaction - * 2. simple path + transaction - * 3. multi-component path w/out transaction - * 4. multi-component path + transaction - */ - p = __db_rpath(name); - if (txn == NULL) { -#ifdef HAVE_VXWORKS - { int i, n; - /* On VxWorks we must support 8.3 names. */ - if (p == NULL) /* Case 1. */ - n = snprintf(retp, - len, "%s%.4s.tmp", BACKUP_PREFIX, name); - else /* Case 3. */ - n = snprintf(retp, len, "%.*s%s%.4s.tmp", - (int)(p - name) + 1, name, BACKUP_PREFIX, p + 1); - - /* - * Overwrite "." in the characters copied from the name. - * If we backup 8 characters from the end, we're guaranteed - * to a) include the four bytes we copied from the name - * and b) not run off the beginning of the string. - */ - for (i = 0, p = (retp + n) - 8; i < 4; p++, i++) - if (*p == '.') - *p = '_'; - } -#else - if (p == NULL) /* Case 1. */ - snprintf(retp, len, "%s.%s", BACKUP_PREFIX, name); - else /* Case 3. */ - snprintf(retp, len, "%.*s%s.%s", - (int)(p - name) + 1, name, BACKUP_PREFIX, p + 1); -#endif - } else { - lsn = ((TXN_DETAIL *)txn->td)->last_lsn; - if (IS_ZERO_LSN(lsn)) { - /* - * Write dummy log record. The two choices for dummy - * log records are __db_noop_log and __db_debug_log; - * unfortunately __db_noop_log requires a valid dbp, - * and we aren't guaranteed to be able to pass one in - * here. - */ - if ((ret = __db_debug_log(dbenv, - txn, &lsn, 0, NULL, 0, NULL, NULL, 0)) != 0) { - __os_free(dbenv, retp); - return (ret); - } - } - - if (p == NULL) /* Case 2. */ - snprintf(retp, len, - "%s.%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset); - else /* Case 4. */ - snprintf(retp, len, "%.*s%x.%x", - (int)(p - name) + 1, name, lsn.file, lsn.offset); - } - - *backup = retp; - return (0); -} - -/* - * __dblist_get -- - * Get the first element of dbenv->dblist with - * dbp->adj_fileid matching adjid. - * - * PUBLIC: DB *__dblist_get __P((DB_ENV *, u_int32_t)); - */ -DB * -__dblist_get(dbenv, adjid) - DB_ENV *dbenv; - u_int32_t adjid; -{ - DB *dbp; - - for (dbp = LIST_FIRST(&dbenv->dblist); - dbp != NULL && dbp->adj_fileid != adjid; - dbp = LIST_NEXT(dbp, dblistlinks)) - ; - - return (dbp); -} - -/* - * __db_disassociate -- - * Destroy the association between a given secondary and its primary. - */ -static int -__db_disassociate(sdbp) - DB *sdbp; -{ - DBC *dbc; - int ret, t_ret; - - ret = 0; - - sdbp->s_callback = NULL; - sdbp->s_primary = NULL; - sdbp->get = sdbp->stored_get; - sdbp->close = sdbp->stored_close; - - /* - * Complain, but proceed, if we have any active cursors. (We're in - * the middle of a close, so there's really no turning back.) - */ - if (sdbp->s_refcnt != 1 || - TAILQ_FIRST(&sdbp->active_queue) != NULL || - TAILQ_FIRST(&sdbp->join_queue) != NULL) { - __db_err(sdbp->dbenv, - "Closing a primary DB while a secondary DB has active cursors is unsafe"); - ret = EINVAL; - } - sdbp->s_refcnt = 0; - - while ((dbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL) - if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0) - ret = t_ret; - - F_CLR(sdbp, DB_AM_SECONDARY); - return (ret); -} - -#ifdef CONFIG_TEST -/* - * __db_testcopy - * Create a copy of all backup files and our "main" DB. - * - * PUBLIC: #ifdef CONFIG_TEST - * PUBLIC: int __db_testcopy __P((DB_ENV *, DB *, const char *)); - * PUBLIC: #endif - */ -int -__db_testcopy(dbenv, dbp, name) - DB_ENV *dbenv; - DB *dbp; - const char *name; -{ - DB_MPOOL *dbmp; - DB_MPOOLFILE *mpf; - - DB_ASSERT(dbp != NULL || name != NULL); - - if (name == NULL) { - dbmp = dbenv->mp_handle; - mpf = dbp->mpf; - name = R_ADDR(dbmp->reginfo, mpf->mfp->path_off); - } - - if (dbp != NULL && dbp->type == DB_QUEUE) - return (__qam_testdocopy(dbp, name)); - else - return (__db_testdocopy(dbenv, name)); -} - -static int -__qam_testdocopy(dbp, name) - DB *dbp; - const char *name; -{ - QUEUE_FILELIST *filelist, *fp; - char buf[256], *dir; - int ret; - - filelist = NULL; - if ((ret = __db_testdocopy(dbp->dbenv, name)) != 0) - return (ret); - if (dbp->mpf != NULL && - (ret = __qam_gen_filelist(dbp, &filelist)) != 0) - return (ret); - - if (filelist == NULL) - return (0); - dir = ((QUEUE *)dbp->q_internal)->dir; - for (fp = filelist; fp->mpf != NULL; fp++) { - snprintf(buf, sizeof(buf), - QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id); - if ((ret = __db_testdocopy(dbp->dbenv, buf)) != 0) - return (ret); - } - - __os_free(dbp->dbenv, filelist); - return (0); -} - -/* - * __db_testdocopy - * Create a copy of all backup files and our "main" DB. - * - */ -static int -__db_testdocopy(dbenv, name) - DB_ENV *dbenv; - const char *name; -{ - size_t len; - int dircnt, i, ret; - char *backup, *copy, *dir, **namesp, *p, *real_name; - - dircnt = 0; - copy = backup = NULL; - namesp = NULL; - - /* Get the real backing file name. */ - if ((ret = __db_appname(dbenv, - DB_APP_DATA, name, 0, NULL, &real_name)) != 0) - return (ret); - - /* - * Maximum size of file, including adding a ".afterop". - */ - len = strlen(real_name) + - strlen(BACKUP_PREFIX) + 1 + MAX_LSN_TO_TEXT + 9; - - if ((ret = __os_malloc(dbenv, len, ©)) != 0) - goto err; - - if ((ret = __os_malloc(dbenv, len, &backup)) != 0) - goto err; - - /* - * First copy the file itself. - */ - snprintf(copy, len, "%s.afterop", real_name); - __db_makecopy(dbenv, real_name, copy); - - if ((ret = __os_strdup(dbenv, real_name, &dir)) != 0) - goto err; - __os_free(dbenv, real_name); - real_name = NULL; - - /* - * Create the name. Backup file names are of the form: - * - * __db.name.0x[lsn-file].0x[lsn-offset] - * - * which guarantees uniqueness. We want to look for the - * backup name, followed by a '.0x' (so that if they have - * files named, say, 'a' and 'abc' we won't match 'abc' when - * looking for 'a'. - */ - snprintf(backup, len, "%s.%s.0x", BACKUP_PREFIX, name); - - /* - * We need the directory path to do the __os_dirlist. - */ - p = __db_rpath(dir); - if (p != NULL) - *p = '\0'; - ret = __os_dirlist(dbenv, dir, &namesp, &dircnt); -#if DIAGNOSTIC - /* - * XXX - * To get the memory guard code to work because it uses strlen and we - * just moved the end of the string somewhere sooner. This causes the - * guard code to fail because it looks at one byte past the end of the - * string. - */ - *p = '/'; -#endif - __os_free(dbenv, dir); - if (ret != 0) - goto err; - for (i = 0; i < dircnt; i++) { - /* - * Need to check if it is a backup file for this. - * No idea what namesp[i] may be or how long, so - * must use strncmp and not memcmp. We don't want - * to use strcmp either because we are only matching - * the first part of the real file's name. We don't - * know its LSN's. - */ - if (strncmp(namesp[i], backup, strlen(backup)) == 0) { - if ((ret = __db_appname(dbenv, DB_APP_DATA, - namesp[i], 0, NULL, &real_name)) != 0) - goto err; - - /* - * This should not happen. Check that old - * .afterop files aren't around. - * If so, just move on. - */ - if (strstr(real_name, ".afterop") != NULL) { - __os_free(dbenv, real_name); - real_name = NULL; - continue; - } - snprintf(copy, len, "%s.afterop", real_name); - __db_makecopy(dbenv, real_name, copy); - __os_free(dbenv, real_name); - real_name = NULL; - } - } - -err: if (backup != NULL) - __os_free(dbenv, backup); - if (copy != NULL) - __os_free(dbenv, copy); - if (namesp != NULL) - __os_dirfree(dbenv, namesp, dircnt); - if (real_name != NULL) - __os_free(dbenv, real_name); - return (ret); -} - -static void -__db_makecopy(dbenv, src, dest) - DB_ENV *dbenv; - const char *src, *dest; -{ - DB_FH *rfhp, *wfhp; - size_t rcnt, wcnt; - char *buf; - - rfhp = wfhp = NULL; - - if (__os_malloc(dbenv, 1024, &buf) != 0) - return; - - if (__os_open(dbenv, - src, DB_OSO_RDONLY, __db_omode(OWNER_RW), &rfhp) != 0) - goto err; - if (__os_open(dbenv, dest, - DB_OSO_CREATE | DB_OSO_TRUNC, __db_omode(OWNER_RW), &wfhp) != 0) - goto err; - - for (;;) - if (__os_read(dbenv, rfhp, buf, 1024, &rcnt) < 0 || rcnt == 0 || - __os_write(dbenv, wfhp, buf, rcnt, &wcnt) < 0) - break; - -err: if (buf != NULL) - __os_free(dbenv, buf); - if (rfhp != NULL) - (void)__os_closehandle(dbenv, rfhp); - if (wfhp != NULL) - (void)__os_closehandle(dbenv, wfhp); -} -#endif diff --git a/storage/bdb/db/db.src b/storage/bdb/db/db.src deleted file mode 100644 index 21fe754a3f7..00000000000 --- a/storage/bdb/db/db.src +++ /dev/null @@ -1,259 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db.src,v 12.2 2005/08/08 03:37:06 ubell Exp $ - */ - -PREFIX __db -DBPRIVATE - -INCLUDE #ifndef NO_SYSTEM_INCLUDES -INCLUDE #include -INCLUDE -INCLUDE #include -INCLUDE #include -INCLUDE #endif -INCLUDE -INCLUDE #include "db_int.h" -INCLUDE #include "dbinc/crypto.h" -INCLUDE #include "dbinc/db_page.h" -INCLUDE #include "dbinc/db_dispatch.h" -INCLUDE #include "dbinc/db_am.h" -INCLUDE #include "dbinc/log.h" -INCLUDE #include "dbinc/txn.h" -INCLUDE - -/* - * addrem -- Add or remove an entry from a duplicate page. - * - * opcode: identifies if this is an add or delete. - * fileid: file identifier of the file being modified. - * pgno: duplicate page number. - * indx: location at which to insert or delete. - * nbytes: number of bytes added/removed to/from the page. - * hdr: header for the data item. - * dbt: data that is deleted or is to be added. - * pagelsn: former lsn of the page. - * - * If the hdr was NULL then, the dbt is a regular B_KEYDATA. - * If the dbt was NULL then the hdr is a complete item to be - * pasted on the page. - */ -BEGIN addrem 41 -ARG opcode u_int32_t lu -DB fileid int32_t ld -ARG pgno db_pgno_t lu -ARG indx u_int32_t lu -ARG nbytes u_int32_t lu -PGDBT hdr DBT s -DBT dbt DBT s -POINTER pagelsn DB_LSN * lu -END - -/* - * big -- Handles addition and deletion of big key/data items. - * - * opcode: identifies get/put. - * fileid: file identifier of the file being modified. - * pgno: page onto which data is being added/removed. - * prev_pgno: the page before the one we are logging. - * next_pgno: the page after the one we are logging. - * dbt: data being written onto the page. - * pagelsn: former lsn of the orig_page. - * prevlsn: former lsn of the prev_pgno. - * nextlsn: former lsn of the next_pgno. This is not currently used, but - * may be used later if we actually do overwrites of big key/ - * data items in place. - */ -BEGIN big 43 -ARG opcode u_int32_t lu -DB fileid int32_t ld -ARG pgno db_pgno_t lu -ARG prev_pgno db_pgno_t lu -ARG next_pgno db_pgno_t lu -DBT dbt DBT s -POINTER pagelsn DB_LSN * lu -POINTER prevlsn DB_LSN * lu -POINTER nextlsn DB_LSN * lu -END - -/* - * ovref -- Handles increment/decrement of overflow page reference count. - * - * fileid: identifies the file being modified. - * pgno: page number whose ref count is being incremented/decremented. - * adjust: the adjustment being made. - * lsn: the page's original lsn. - */ -BEGIN ovref 44 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -ARG adjust int32_t ld -POINTER lsn DB_LSN * lu -END - -/* - * Debug -- log an operation upon entering an access method. - * op: Operation (cursor, c_close, c_get, c_put, c_del, - * get, put, delete). - * fileid: identifies the file being acted upon. - * key: key paramater - * data: data parameter - * flags: flags parameter - */ -BEGIN debug 47 -DBT op DBT s -ARG fileid int32_t ld -DBT key DBT s -DBT data DBT s -ARG arg_flags u_int32_t lu -END - -/* - * noop -- do nothing, but get an LSN. - */ -BEGIN noop 48 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER prevlsn DB_LSN * lu -END - -/* - * pg_alloc: used to record allocating a new page. - * - * meta_lsn: the meta-data page's original lsn. - * meta_pgno the meta-data page number. - * page_lsn: the allocated page's original lsn. - * pgno: the page allocated. - * ptype: the type of the page allocated. - * next: the next page on the free list. - * last_pgno: the last page in the file after this op. - */ -BEGIN pg_alloc 49 -DB fileid int32_t ld -POINTER meta_lsn DB_LSN * lu -ARG meta_pgno db_pgno_t lu -POINTER page_lsn DB_LSN * lu -ARG pgno db_pgno_t lu -ARG ptype u_int32_t lu -ARG next db_pgno_t lu -ARG last_pgno db_pgno_t lu -END - -/* - * pg_free: used to record freeing a page. - * - * pgno: the page being freed. - * meta_lsn: the meta-data page's original lsn. - * meta_pgno: the meta-data page number. - * header: the header from the free'd page. - * next: the previous next pointer on the metadata page. - * last_pgno: the last page in the file before this op. - */ -BEGIN pg_free 50 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER meta_lsn DB_LSN * lu -ARG meta_pgno db_pgno_t lu -PGDBT header DBT s -ARG next db_pgno_t lu -ARG last_pgno db_pgno_t lu -END - -/* - * cksum -- - * This log record is written when we're unable to checksum a page, - * before returning DB_RUNRECOVERY. This log record causes normal - * recovery to itself return DB_RUNRECOVERY, as only catastrophic - * recovery can fix things. - */ -BEGIN cksum 51 -END - -/* - * pg_freedata: used to record freeing a page with data on it. - * - * pgno: the page being freed. - * meta_lsn: the meta-data page's original lsn. - * meta_pgno: the meta-data page number. - * header: the header and index entries from the free'd page. - * data: the data from the free'd page. - * next: the previous next pointer on the metadata page. - * last_pgno: the last page in the file before this op. - */ -BEGIN pg_freedata 52 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER meta_lsn DB_LSN * lu -ARG meta_pgno db_pgno_t lu -PGDBT header DBT s -ARG next db_pgno_t lu -ARG last_pgno db_pgno_t lu -PGDBT data DBT s -END - -/* - * pg_prepare: used to record an aborted page in a prepared transaction. - * - * pgno: the page being freed. - */ -BEGIN pg_prepare 53 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -END - -/* - * pg_new: used to record a new page put on the free list. - * - * pgno: the page being freed. - * meta_lsn: the meta-data page's original lsn. - * meta_pgno: the meta-data page number. - * header: the header from the free'd page. - * next: the previous next pointer on the metadata page. - */ -BEGIN pg_new 54 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -POINTER meta_lsn DB_LSN * lu -ARG meta_pgno db_pgno_t lu -PGDBT header DBT s -ARG next db_pgno_t lu -END - -/* - * pg_init: used to reinitialize a page during truncate. - * - * pgno: the page being initialized. - * header: the header from the page. - * data: data that used to be on the page. - */ -BEGIN pg_init 60 -DB fileid int32_t ld -ARG pgno db_pgno_t lu -PGDBT header DBT s -PGDBT data DBT s -END - -/* - * pg_sort: sort the free list - * - * meta: meta page number - * meta_lsn: lsn on meta page. - * last_free: page number of new last free page. - * last_lsn; lsn of last free page. - * last_pgno: current last page number. - * list: list of pages and lsns to sort. - */ -BEGIN pg_sort 61 -DB fileid int32_t ld -ARG meta db_pgno_t lu -POINTER meta_lsn DB_LSN * lu -ARG last_free db_pgno_t lu -POINTER last_lsn DB_LSN * lu -ARG last_pgno db_pgno_t lu -DBT list DBT s -END - diff --git a/storage/bdb/db/db_am.c b/storage/bdb/db/db_am.c deleted file mode 100644 index 966f5f07123..00000000000 --- a/storage/bdb/db/db_am.c +++ /dev/null @@ -1,904 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_am.c,v 12.12 2005/11/01 00:44:09 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" - -static int __db_append_primary __P((DBC *, DBT *, DBT *)); -static int __db_secondary_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - -/* - * __db_cursor_int -- - * Internal routine to create a cursor. - * - * PUBLIC: int __db_cursor_int - * PUBLIC: __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **)); - */ -int -__db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp) - DB *dbp; - DB_TXN *txn; - DBTYPE dbtype; - db_pgno_t root; - int is_opd; - u_int32_t lockerid; - DBC **dbcp; -{ - DBC *dbc; - DBC_INTERNAL *cp; - DB_ENV *dbenv; - db_threadid_t tid; - int allocated, ret; - pid_t pid; - - dbenv = dbp->dbenv; - allocated = 0; - - /* - * If dbcp is non-NULL it is assumed to point to an area to initialize - * as a cursor. - * - * Take one from the free list if it's available. Take only the - * right type. With off page dups we may have different kinds - * of cursors on the queue for a single database. - */ - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&dbp->free_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if (dbtype == dbc->dbtype) { - TAILQ_REMOVE(&dbp->free_queue, dbc, links); - F_CLR(dbc, ~DBC_OWN_LID); - break; - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - - if (dbc == NULL) { - if ((ret = __os_calloc(dbenv, 1, sizeof(DBC), &dbc)) != 0) - return (ret); - allocated = 1; - dbc->flags = 0; - - dbc->dbp = dbp; - - /* Set up locking information. */ - if (LOCKING_ON(dbenv)) { - /* - * If we are not threaded, we share a locker ID among - * all cursors opened in the environment handle, - * allocating one if this is the first cursor. - * - * This relies on the fact that non-threaded DB handles - * always have non-threaded environment handles, since - * we set DB_THREAD on DB handles created with threaded - * environment handles. - */ - if (!DB_IS_THREADED(dbp)) { - if (dbp->dbenv->env_lref == NULL && - (ret = __lock_id(dbenv, NULL, - (DB_LOCKER **)&dbp->dbenv->env_lref)) != 0) - goto err; - dbc->lref = dbp->dbenv->env_lref; - } else { - if ((ret = __lock_id(dbenv, NULL, - (DB_LOCKER **)&dbc->lref)) != 0) - goto err; - F_SET(dbc, DBC_OWN_LID); - } - - /* - * In CDB, secondary indices should share a lock file - * ID with the primary; otherwise we're susceptible - * to deadlocks. We also use __db_cursor_int rather - * than __db_cursor to create secondary update cursors - * in c_put and c_del; these won't acquire a new lock. - * - * !!! - * Since this is in the one-time cursor allocation - * code, we need to be sure to destroy, not just - * close, all cursors in the secondary when we - * associate. - */ - if (CDB_LOCKING(dbenv) && - F_ISSET(dbp, DB_AM_SECONDARY)) - memcpy(dbc->lock.fileid, - dbp->s_primary->fileid, DB_FILE_ID_LEN); - else - memcpy(dbc->lock.fileid, - dbp->fileid, DB_FILE_ID_LEN); - - if (CDB_LOCKING(dbenv)) { - if (F_ISSET(dbenv, DB_ENV_CDB_ALLDB)) { - /* - * If we are doing a single lock per - * environment, set up the global - * lock object just like we do to - * single thread creates. - */ - DB_ASSERT(sizeof(db_pgno_t) == - sizeof(u_int32_t)); - dbc->lock_dbt.size = sizeof(u_int32_t); - dbc->lock_dbt.data = &dbc->lock.pgno; - dbc->lock.pgno = 0; - } else { - dbc->lock_dbt.size = DB_FILE_ID_LEN; - dbc->lock_dbt.data = dbc->lock.fileid; - } - } else { - dbc->lock.type = DB_PAGE_LOCK; - dbc->lock_dbt.size = sizeof(dbc->lock); - dbc->lock_dbt.data = &dbc->lock; - } - } - /* Init the DBC internal structure. */ - switch (dbtype) { - case DB_BTREE: - case DB_RECNO: - if ((ret = __bam_c_init(dbc, dbtype)) != 0) - goto err; - break; - case DB_HASH: - if ((ret = __ham_c_init(dbc)) != 0) - goto err; - break; - case DB_QUEUE: - if ((ret = __qam_c_init(dbc)) != 0) - goto err; - break; - case DB_UNKNOWN: - default: - ret = __db_unknown_type(dbenv, "DB->cursor", dbtype); - goto err; - } - - cp = dbc->internal; - } - - /* Refresh the DBC structure. */ - dbc->dbtype = dbtype; - RESET_RET_MEM(dbc); - - if ((dbc->txn = txn) != NULL) - dbc->locker = txn->txnid; - else if (LOCKING_ON(dbenv)) { - /* - * There are certain cases in which we want to create a - * new cursor with a particular locker ID that is known - * to be the same as (and thus not conflict with) an - * open cursor. - * - * The most obvious case is cursor duplication; when we - * call DBC->c_dup or __db_c_idup, we want to use the original - * cursor's locker ID. - * - * Another case is when updating secondary indices. Standard - * CDB locking would mean that we might block ourself: we need - * to open an update cursor in the secondary while an update - * cursor in the primary is open, and when the secondary and - * primary are subdatabases or we're using env-wide locking, - * this is disastrous. - * - * In these cases, our caller will pass a nonzero locker - * ID into this function. Use this locker ID instead of - * the default as the locker ID for our new cursor. - */ - if (lockerid != DB_LOCK_INVALIDID) - dbc->locker = lockerid; - else { - /* - * If we are threaded then we need to set the - * proper thread id into the locker. - */ - if (DB_IS_THREADED(dbp)) { - dbenv->thread_id(dbenv, &pid, &tid); - __lock_set_thread_id( - (DB_LOCKER *)dbc->lref, pid, tid); - } - dbc->locker = ((DB_LOCKER *)dbc->lref)->id; - } - } - - /* - * These fields change when we are used as a secondary index, so - * if the DB is a secondary, make sure they're set properly just - * in case we opened some cursors before we were associated. - * - * __db_c_get is used by all access methods, so this should be safe. - */ - if (F_ISSET(dbp, DB_AM_SECONDARY)) - dbc->c_get = __db_c_secondary_get_pp; - - if (is_opd) - F_SET(dbc, DBC_OPD); - if (F_ISSET(dbp, DB_AM_RECOVER)) - F_SET(dbc, DBC_RECOVER); - if (F_ISSET(dbp, DB_AM_COMPENSATE)) - F_SET(dbc, DBC_COMPENSATE); - - /* Refresh the DBC internal structure. */ - cp = dbc->internal; - cp->opd = NULL; - - cp->indx = 0; - cp->page = NULL; - cp->pgno = PGNO_INVALID; - cp->root = root; - - switch (dbtype) { - case DB_BTREE: - case DB_RECNO: - if ((ret = __bam_c_refresh(dbc)) != 0) - goto err; - break; - case DB_HASH: - case DB_QUEUE: - break; - case DB_UNKNOWN: - default: - ret = __db_unknown_type(dbenv, "DB->cursor", dbp->type); - goto err; - } - - /* - * The transaction keeps track of how many cursors were opened within - * it to catch application errors where the cursor isn't closed when - * the transaction is resolved. - */ - if (txn != NULL) - ++txn->cursors; - - MUTEX_LOCK(dbenv, dbp->mutex); - TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links); - F_SET(dbc, DBC_ACTIVE); - MUTEX_UNLOCK(dbenv, dbp->mutex); - - *dbcp = dbc; - return (0); - -err: if (allocated) - __os_free(dbenv, dbc); - return (ret); -} - -/* - * __db_put -- - * Store a key/data pair. - * - * PUBLIC: int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - */ -int -__db_put(dbp, txn, key, data, flags) - DB *dbp; - DB_TXN *txn; - DBT *key, *data; - u_int32_t flags; -{ - DBC *dbc; - DBT tdata; - DB_ENV *dbenv; - int ret, t_ret; - - dbenv = dbp->dbenv; - - if ((ret = __db_cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0) - return (ret); - - DEBUG_LWRITE(dbc, txn, "DB->put", key, data, flags); - - SET_RET_MEM(dbc, dbp); - - /* - * See the comment in __db_get(). - * - * Note that the c_get in the DB_NOOVERWRITE case is safe to - * do with this flag set; if it errors in any way other than - * DB_NOTFOUND, we're going to close the cursor without doing - * anything else, and if it returns DB_NOTFOUND then it's safe - * to do a c_put(DB_KEYLAST) even if an access method moved the - * cursor, since that's not position-dependent. - */ - F_SET(dbc, DBC_TRANSIENT); - - switch (flags) { - case DB_APPEND: - /* - * If there is an append callback, the value stored in - * data->data may be replaced and then freed. To avoid - * passing a freed pointer back to the user, just operate - * on a copy of the data DBT. - */ - tdata = *data; - - /* - * Append isn't a normal put operation; call the appropriate - * access method's append function. - */ - switch (dbp->type) { - case DB_QUEUE: - if ((ret = __qam_append(dbc, key, &tdata)) != 0) - goto err; - break; - case DB_RECNO: - if ((ret = __ram_append(dbc, key, &tdata)) != 0) - goto err; - break; - case DB_BTREE: - case DB_HASH: - case DB_UNKNOWN: - default: - /* The interface should prevent this. */ - DB_ASSERT( - dbp->type == DB_QUEUE || dbp->type == DB_RECNO); - - ret = __db_ferr(dbenv, "DB->put", 0); - goto err; - } - - /* - * Secondary indices: since we've returned zero from - * an append function, we've just put a record, and done - * so outside __db_c_put. We know we're not a secondary-- - * the interface prevents puts on them--but we may be a - * primary. If so, update our secondary indices - * appropriately. - */ - DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY)); - - if (LIST_FIRST(&dbp->s_secondaries) != NULL) - ret = __db_append_primary(dbc, key, &tdata); - - /* - * The append callback, if one exists, may have allocated - * a new tdata.data buffer. If so, free it. - */ - FREE_IF_NEEDED(dbp, &tdata); - - /* No need for a cursor put; we're done. */ - goto done; - case DB_NOOVERWRITE: - flags = 0; - /* - * Set DB_DBT_USERMEM, this might be a threaded application and - * the flags checking will catch us. We don't want the actual - * data, so request a partial of length 0. - */ - memset(&tdata, 0, sizeof(tdata)); - F_SET(&tdata, DB_DBT_USERMEM | DB_DBT_PARTIAL); - - /* - * If we're doing page-level locking, set the read-modify-write - * flag, we're going to overwrite immediately. - */ - if ((ret = __db_c_get(dbc, key, &tdata, - DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0))) == 0) - ret = DB_KEYEXIST; - else if (ret == DB_NOTFOUND || ret == DB_KEYEMPTY) - ret = 0; - break; - default: - /* Fall through to normal cursor put. */ - break; - } - - if (ret == 0) - ret = __db_c_put(dbc, - key, data, flags == 0 ? DB_KEYLAST : flags); - -err: -done: /* Close the cursor. */ - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_del -- - * Delete the items referenced by a key. - * - * PUBLIC: int __db_del __P((DB *, DB_TXN *, DBT *, u_int32_t)); - */ -int -__db_del(dbp, txn, key, flags) - DB *dbp; - DB_TXN *txn; - DBT *key; - u_int32_t flags; -{ - DBC *dbc; - DBT data, lkey; - u_int32_t f_init, f_next; - int ret, t_ret; - - /* Allocate a cursor. */ - if ((ret = __db_cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0) - goto err; - - DEBUG_LWRITE(dbc, txn, "DB->del", key, NULL, flags); - COMPQUIET(flags, 0); - - /* - * Walk a cursor through the key/data pairs, deleting as we go. Set - * the DB_DBT_USERMEM flag, as this might be a threaded application - * and the flags checking will catch us. We don't actually want the - * keys or data, so request a partial of length 0. - */ - memset(&lkey, 0, sizeof(lkey)); - F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL); - memset(&data, 0, sizeof(data)); - F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL); - - /* - * If locking (and we haven't already acquired CDB locks), set the - * read-modify-write flag. - */ - f_init = DB_SET; - f_next = DB_NEXT_DUP; - if (STD_LOCKING(dbc)) { - f_init |= DB_RMW; - f_next |= DB_RMW; - } - - /* - * Optimize the simple cases. For all AMs if we don't have secondaries - * and are not a secondary and there are no dups then we can avoid a - * bunch of overhead. For queue we don't need to fetch the record since - * we delete by direct calculation from the record number. - * - * Hash permits an optimization in DB->del: since on-page duplicates are - * stored in a single HKEYDATA structure, it's possible to delete an - * entire set of them at once, and as the HKEYDATA has to be rebuilt - * and re-put each time it changes, this is much faster than deleting - * the duplicates one by one. Thus, if not pointing at an off-page - * duplicate set, and we're not using secondary indices (in which case - * we'd have to examine the items one by one anyway), let hash do this - * "quick delete". - * - * !!! - * Note that this is the only application-executed delete call in - * Berkeley DB that does not go through the __db_c_del function. - * If anything other than the delete itself (like a secondary index - * update) has to happen there in a particular situation, the - * conditions here should be modified not to use these optimizations. - * The ordinary AM-independent alternative will work just fine; - * it'll just be slower. - */ - if (!F_ISSET(dbp, DB_AM_SECONDARY) && - LIST_FIRST(&dbp->s_secondaries) == NULL) { - -#ifdef HAVE_QUEUE - if (dbp->type == DB_QUEUE) { - ret = __qam_delete(dbc, key); - goto done; - } -#endif - - /* Fetch the first record. */ - if ((ret = __db_c_get(dbc, key, &data, f_init)) != 0) - goto err; - -#ifdef HAVE_HASH - if (dbp->type == DB_HASH && dbc->internal->opd == NULL) { - ret = __ham_quick_delete(dbc); - goto done; - } -#endif - - if ((dbp->type == DB_BTREE || dbp->type == DB_RECNO) && - !F_ISSET(dbp, DB_AM_DUP)) { - ret = dbc->c_am_del(dbc); - goto done; - } - } else if ((ret = __db_c_get(dbc, key, &data, f_init)) != 0) - goto err; - - /* Walk through the set of key/data pairs, deleting as we go. */ - for (;;) { - if ((ret = __db_c_del(dbc, 0)) != 0) - break; - if ((ret = __db_c_get(dbc, &lkey, &data, f_next)) != 0) { - if (ret == DB_NOTFOUND) - ret = 0; - break; - } - } - -done: -err: /* Discard the cursor. */ - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_sync -- - * Flush the database cache. - * - * PUBLIC: int __db_sync __P((DB *)); - */ -int -__db_sync(dbp) - DB *dbp; -{ - int ret, t_ret; - - ret = 0; - - /* If the database was read-only, we're done. */ - if (F_ISSET(dbp, DB_AM_RDONLY)) - return (0); - - /* If it's a Recno tree, write the backing source text file. */ - if (dbp->type == DB_RECNO) - ret = __ram_writeback(dbp); - - /* If the database was never backed by a database file, we're done. */ - if (F_ISSET(dbp, DB_AM_INMEM)) - return (ret); - - if (dbp->type == DB_QUEUE) - ret = __qam_sync(dbp); - else - /* Flush any dirty pages from the cache to the backing file. */ - if ((t_ret = __memp_fsync(dbp->mpf)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_associate -- - * Associate another database as a secondary index to this one. - * - * PUBLIC: int __db_associate __P((DB *, DB_TXN *, DB *, - * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t)); - */ -int -__db_associate(dbp, txn, sdbp, callback, flags) - DB *dbp, *sdbp; - DB_TXN *txn; - int (*callback) __P((DB *, const DBT *, const DBT *, DBT *)); - u_int32_t flags; -{ - DB_ENV *dbenv; - DBC *pdbc, *sdbc; - DBT skey, key, data; - int build, ret, t_ret; - - dbenv = dbp->dbenv; - pdbc = sdbc = NULL; - ret = 0; - - /* - * Check to see if the secondary is empty -- and thus if we should - * build it -- before we link it in and risk making it show up in other - * threads. Do this first so that the databases remain unassociated on - * error. - */ - build = 0; - if (LF_ISSET(DB_CREATE)) { - if ((ret = __db_cursor(sdbp, txn, &sdbc, 0)) != 0) - goto err; - - /* - * We don't care about key or data; we're just doing - * an existence check. - */ - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - F_SET(&key, DB_DBT_PARTIAL | DB_DBT_USERMEM); - F_SET(&data, DB_DBT_PARTIAL | DB_DBT_USERMEM); - if ((ret = __db_c_get(sdbc, &key, &data, - (STD_LOCKING(sdbc) ? DB_RMW : 0) | - DB_FIRST)) == DB_NOTFOUND) { - build = 1; - ret = 0; - } - - if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0) - ret = t_ret; - - /* Reset for later error check. */ - sdbc = NULL; - - if (ret != 0) - goto err; - } - - /* - * Set up the database handle as a secondary. - */ - sdbp->s_callback = callback; - sdbp->s_primary = dbp; - - sdbp->stored_get = sdbp->get; - sdbp->get = __db_secondary_get; - - sdbp->stored_close = sdbp->close; - sdbp->close = __db_secondary_close_pp; - - F_SET(sdbp, DB_AM_SECONDARY); - - if (LF_ISSET(DB_IMMUTABLE_KEY)) - FLD_SET(sdbp->s_assoc_flags, DB_ASSOC_IMMUTABLE_KEY); - - /* - * Add the secondary to the list on the primary. Do it here - * so that we see any updates that occur while we're walking - * the primary. - */ - MUTEX_LOCK(dbenv, dbp->mutex); - - /* See __db_s_next for an explanation of secondary refcounting. */ - DB_ASSERT(sdbp->s_refcnt == 0); - sdbp->s_refcnt = 1; - LIST_INSERT_HEAD(&dbp->s_secondaries, sdbp, s_links); - MUTEX_UNLOCK(dbenv, dbp->mutex); - - if (build) { - /* - * We loop through the primary, putting each item we - * find into the new secondary. - * - * If we're using CDB, opening these two cursors puts us - * in a bit of a locking tangle: CDB locks are done on the - * primary, so that we stay deadlock-free, but that means - * that updating the secondary while we have a read cursor - * open on the primary will self-block. To get around this, - * we force the primary cursor to use the same locker ID - * as the secondary, so they won't conflict. This should - * be harmless even if we're not using CDB. - */ - if ((ret = __db_cursor(sdbp, txn, &sdbc, - CDB_LOCKING(sdbp->dbenv) ? DB_WRITECURSOR : 0)) != 0) - goto err; - if ((ret = __db_cursor_int(dbp, - txn, dbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0) - goto err; - - /* Lock out other threads, now that we have a locker ID. */ - dbp->associate_lid = sdbc->locker; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - while ((ret = __db_c_get(pdbc, &key, &data, DB_NEXT)) == 0) { - memset(&skey, 0, sizeof(DBT)); - if ((ret = callback(sdbp, &key, &data, &skey)) != 0) { - if (ret == DB_DONOTINDEX) - continue; - goto err; - } - SWAP_IF_NEEDED(dbp, sdbp, &key); - if ((ret = __db_c_put(sdbc, - &skey, &key, DB_UPDATE_SECONDARY)) != 0) { - FREE_IF_NEEDED(sdbp, &skey); - goto err; - } - SWAP_IF_NEEDED(dbp, sdbp, &key); - - FREE_IF_NEEDED(sdbp, &skey); - } - if (ret == DB_NOTFOUND) - ret = 0; - } - -err: if (sdbc != NULL && (t_ret = __db_c_close(sdbc)) != 0 && ret == 0) - ret = t_ret; - - if (pdbc != NULL && (t_ret = __db_c_close(pdbc)) != 0 && ret == 0) - ret = t_ret; - - dbp->associate_lid = DB_LOCK_INVALIDID; - - return (ret); -} - -/* - * __db_secondary_get -- - * This wrapper function for DB->pget() is the DB->get() function - * on a database which has been made into a secondary index. - */ -static int -__db_secondary_get(sdbp, txn, skey, data, flags) - DB *sdbp; - DB_TXN *txn; - DBT *skey, *data; - u_int32_t flags; -{ - - DB_ASSERT(F_ISSET(sdbp, DB_AM_SECONDARY)); - return (__db_pget_pp(sdbp, txn, skey, NULL, data, flags)); -} - -/* - * __db_secondary_close -- - * Wrapper function for DB->close() which we use on secondaries to - * manage refcounting and make sure we don't close them underneath - * a primary that is updating. - * - * PUBLIC: int __db_secondary_close __P((DB *, u_int32_t)); - */ -int -__db_secondary_close(sdbp, flags) - DB *sdbp; - u_int32_t flags; -{ - DB *primary; - int doclose; - - doclose = 0; - primary = sdbp->s_primary; - - MUTEX_LOCK(primary->dbenv, primary->mutex); - /* - * Check the refcount--if it was at 1 when we were called, no - * thread is currently updating this secondary through the primary, - * so it's safe to close it for real. - * - * If it's not safe to do the close now, we do nothing; the - * database will actually be closed when the refcount is decremented, - * which can happen in either __db_s_next or __db_s_done. - */ - DB_ASSERT(sdbp->s_refcnt != 0); - if (--sdbp->s_refcnt == 0) { - LIST_REMOVE(sdbp, s_links); - /* We don't want to call close while the mutex is held. */ - doclose = 1; - } - MUTEX_UNLOCK(primary->dbenv, primary->mutex); - - /* - * sdbp->close is this function; call the real one explicitly if - * need be. - */ - return (doclose ? __db_close(sdbp, NULL, flags) : 0); -} - -/* - * __db_append_primary -- - * Perform the secondary index updates necessary to put(DB_APPEND) - * a record to a primary database. - */ -static int -__db_append_primary(dbc, key, data) - DBC *dbc; - DBT *key, *data; -{ - DB *dbp, *sdbp; - DBC *sdbc, *pdbc; - DBT oldpkey, pkey, pdata, skey; - int cmp, ret, t_ret; - - dbp = dbc->dbp; - sdbp = NULL; - ret = 0; - - /* - * Worrying about partial appends seems a little like worrying - * about Linear A character encodings. But we support those - * too if your application understands them. - */ - pdbc = NULL; - if (F_ISSET(data, DB_DBT_PARTIAL) || F_ISSET(key, DB_DBT_PARTIAL)) { - /* - * The dbc we were passed is all set to pass things - * back to the user; we can't safely do a call on it. - * Dup the cursor, grab the real data item (we don't - * care what the key is--we've been passed it directly), - * and use that instead of the data DBT we were passed. - * - * Note that we can get away with this simple get because - * an appended item is by definition new, and the - * correctly-constructed full data item from this partial - * put is on the page waiting for us. - */ - if ((ret = __db_c_idup(dbc, &pdbc, DB_POSITION)) != 0) - return (ret); - memset(&pkey, 0, sizeof(DBT)); - memset(&pdata, 0, sizeof(DBT)); - - if ((ret = __db_c_get(pdbc, &pkey, &pdata, DB_CURRENT)) != 0) - goto err; - - key = &pkey; - data = &pdata; - } - - /* - * Loop through the secondary indices, putting a new item in - * each that points to the appended item. - * - * This is much like the loop in "step 3" in __db_c_put, so - * I'm not commenting heavily here; it was unclean to excerpt - * just that section into a common function, but the basic - * overview is the same here. - */ - if ((ret = __db_s_first(dbp, &sdbp)) != 0) - goto err; - for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) { - memset(&skey, 0, sizeof(DBT)); - if ((ret = sdbp->s_callback(sdbp, key, data, &skey)) != 0) { - if (ret == DB_DONOTINDEX) - continue; - goto err; - } - - if ((ret = __db_cursor_int(sdbp, dbc->txn, sdbp->type, - PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0) { - FREE_IF_NEEDED(sdbp, &skey); - goto err; - } - if (CDB_LOCKING(sdbp->dbenv)) { - DB_ASSERT(sdbc->mylock.off == LOCK_INVALID); - F_SET(sdbc, DBC_WRITER); - } - - /* - * Since we know we have a new primary key, it can't be a - * duplicate duplicate in the secondary. It can be a - * duplicate in a secondary that doesn't support duplicates, - * however, so we need to be careful to avoid an overwrite - * (which would corrupt our index). - */ - if (!F_ISSET(sdbp, DB_AM_DUP)) { - memset(&oldpkey, 0, sizeof(DBT)); - F_SET(&oldpkey, DB_DBT_MALLOC); - ret = __db_c_get(sdbc, &skey, &oldpkey, - DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0)); - if (ret == 0) { - cmp = __bam_defcmp(sdbp, &oldpkey, key); - /* - * XXX - * This needs to use the right free function - * as soon as this is possible. - */ - __os_ufree(sdbp->dbenv, - oldpkey.data); - if (cmp != 0) { - __db_err(sdbp->dbenv, "%s%s", - "Append results in a non-unique secondary key in", - " an index not configured to support duplicates"); - ret = EINVAL; - goto err1; - } - } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY) - goto err1; - } - - ret = __db_c_put(sdbc, &skey, key, DB_UPDATE_SECONDARY); - -err1: FREE_IF_NEEDED(sdbp, &skey); - - if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - } - -err: if (pdbc != NULL && (t_ret = __db_c_close(pdbc)) != 0 && ret == 0) - ret = t_ret; - if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} diff --git a/storage/bdb/db/db_cam.c b/storage/bdb/db/db_cam.c deleted file mode 100644 index f7b93ad36b1..00000000000 --- a/storage/bdb/db/db_cam.c +++ /dev/null @@ -1,2367 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_cam.c,v 12.21 2005/10/07 20:21:22 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" - -static int __db_buildpartial __P((DB *, DBT *, DBT *, DBT *)); -static int __db_c_cleanup __P((DBC *, DBC *, int)); -static int __db_c_del_secondary __P((DBC *)); -static int __db_c_pget_recno __P((DBC *, DBT *, DBT *, u_int32_t)); -static int __db_wrlock_err __P((DB_ENV *)); - -#define CDB_LOCKING_INIT(dbp, dbc) \ - /* \ - * If we are running CDB, this had better be either a write \ - * cursor or an immediate writer. If it's a regular writer, \ - * that means we have an IWRITE lock and we need to upgrade \ - * it to a write lock. \ - */ \ - if (CDB_LOCKING((dbp)->dbenv)) { \ - if (!F_ISSET(dbc, DBC_WRITECURSOR | DBC_WRITER)) \ - return (__db_wrlock_err(dbp->dbenv)); \ - \ - if (F_ISSET(dbc, DBC_WRITECURSOR) && \ - (ret = __lock_get((dbp)->dbenv, \ - (dbc)->locker, DB_LOCK_UPGRADE, &(dbc)->lock_dbt, \ - DB_LOCK_WRITE, &(dbc)->mylock)) != 0) \ - return (ret); \ - } -#define CDB_LOCKING_DONE(dbp, dbc) \ - /* Release the upgraded lock. */ \ - if (F_ISSET(dbc, DBC_WRITECURSOR)) \ - (void)__lock_downgrade( \ - (dbp)->dbenv, &(dbc)->mylock, DB_LOCK_IWRITE, 0); - -/* - * __db_c_close -- - * DBC->c_close. - * - * PUBLIC: int __db_c_close __P((DBC *)); - */ -int -__db_c_close(dbc) - DBC *dbc; -{ - DB *dbp; - DBC *opd; - DBC_INTERNAL *cp; - DB_ENV *dbenv; - int ret, t_ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - cp = dbc->internal; - opd = cp->opd; - ret = 0; - - /* - * Remove the cursor(s) from the active queue. We may be closing two - * cursors at once here, a top-level one and a lower-level, off-page - * duplicate one. The access-method specific cursor close routine must - * close both of them in a single call. - * - * !!! - * Cursors must be removed from the active queue before calling the - * access specific cursor close routine, btree depends on having that - * order of operations. - */ - MUTEX_LOCK(dbenv, dbp->mutex); - - if (opd != NULL) { - DB_ASSERT(F_ISSET(opd, DBC_ACTIVE)); - F_CLR(opd, DBC_ACTIVE); - TAILQ_REMOVE(&dbp->active_queue, opd, links); - } - DB_ASSERT(F_ISSET(dbc, DBC_ACTIVE)); - F_CLR(dbc, DBC_ACTIVE); - TAILQ_REMOVE(&dbp->active_queue, dbc, links); - - MUTEX_UNLOCK(dbenv, dbp->mutex); - - /* Call the access specific cursor close routine. */ - if ((t_ret = - dbc->c_am_close(dbc, PGNO_INVALID, NULL)) != 0 && ret == 0) - ret = t_ret; - - /* - * Release the lock after calling the access method specific close - * routine, a Btree cursor may have had pending deletes. - */ - if (CDB_LOCKING(dbenv)) { - /* - * Also, be sure not to free anything if mylock.off is - * INVALID; in some cases, such as idup'ed read cursors - * and secondary update cursors, a cursor in a CDB - * environment may not have a lock at all. - */ - if ((t_ret = __LPUT(dbc, dbc->mylock)) != 0 && ret == 0) - ret = t_ret; - - /* For safety's sake, since this is going on the free queue. */ - memset(&dbc->mylock, 0, sizeof(dbc->mylock)); - if (opd != NULL) - memset(&opd->mylock, 0, sizeof(opd->mylock)); - } - - if (dbc->txn != NULL) - dbc->txn->cursors--; - - /* Move the cursor(s) to the free queue. */ - MUTEX_LOCK(dbenv, dbp->mutex); - if (opd != NULL) { - if (dbc->txn != NULL) - dbc->txn->cursors--; - TAILQ_INSERT_TAIL(&dbp->free_queue, opd, links); - opd = NULL; - } - TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links); - MUTEX_UNLOCK(dbenv, dbp->mutex); - - return (ret); -} - -/* - * __db_c_destroy -- - * Destroy the cursor, called after DBC->c_close. - * - * PUBLIC: int __db_c_destroy __P((DBC *)); - */ -int -__db_c_destroy(dbc) - DBC *dbc; -{ - DB *dbp; - DB_ENV *dbenv; - int ret, t_ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - /* Remove the cursor from the free queue. */ - MUTEX_LOCK(dbenv, dbp->mutex); - TAILQ_REMOVE(&dbp->free_queue, dbc, links); - MUTEX_UNLOCK(dbenv, dbp->mutex); - - /* Free up allocated memory. */ - if (dbc->my_rskey.data != NULL) - __os_free(dbenv, dbc->my_rskey.data); - if (dbc->my_rkey.data != NULL) - __os_free(dbenv, dbc->my_rkey.data); - if (dbc->my_rdata.data != NULL) - __os_free(dbenv, dbc->my_rdata.data); - - /* Call the access specific cursor destroy routine. */ - ret = dbc->c_am_destroy == NULL ? 0 : dbc->c_am_destroy(dbc); - - /* - * Release the lock id for this cursor. - */ - if (LOCKING_ON(dbenv) && - F_ISSET(dbc, DBC_OWN_LID) && - (t_ret = __lock_id_free(dbenv, - ((DB_LOCKER *)dbc->lref)->id)) != 0 && ret == 0) - ret = t_ret; - - __os_free(dbenv, dbc); - - return (ret); -} - -/* - * __db_c_count -- - * Return a count of duplicate data items. - * - * PUBLIC: int __db_c_count __P((DBC *, db_recno_t *)); - */ -int -__db_c_count(dbc, recnop) - DBC *dbc; - db_recno_t *recnop; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbc->dbp->dbenv; - - /* - * Cursor Cleanup Note: - * All of the cursors passed to the underlying access methods by this - * routine are not duplicated and will not be cleaned up on return. - * So, pages/locks that the cursor references must be resolved by the - * underlying functions. - */ - switch (dbc->dbtype) { - case DB_QUEUE: - case DB_RECNO: - *recnop = 1; - break; - case DB_HASH: - if (dbc->internal->opd == NULL) { - if ((ret = __ham_c_count(dbc, recnop)) != 0) - return (ret); - break; - } - /* FALLTHROUGH */ - case DB_BTREE: - if ((ret = __bam_c_count(dbc, recnop)) != 0) - return (ret); - break; - case DB_UNKNOWN: - default: - return (__db_unknown_type(dbenv, "__db_c_count", dbc->dbtype)); - } - return (0); -} - -/* - * __db_c_del -- - * DBC->c_del. - * - * PUBLIC: int __db_c_del __P((DBC *, u_int32_t)); - */ -int -__db_c_del(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - DB *dbp; - DBC *opd; - int ret, t_ret; - - dbp = dbc->dbp; - - /* - * Cursor Cleanup Note: - * All of the cursors passed to the underlying access methods by this - * routine are not duplicated and will not be cleaned up on return. - * So, pages/locks that the cursor references must be resolved by the - * underlying functions. - */ - - CDB_LOCKING_INIT(dbp, dbc); - - /* - * If we're a secondary index, and DB_UPDATE_SECONDARY isn't set - * (which it only is if we're being called from a primary update), - * then we need to call through to the primary and delete the item. - * - * Note that this will delete the current item; we don't need to - * delete it ourselves as well, so we can just goto done. - */ - if (flags != DB_UPDATE_SECONDARY && F_ISSET(dbp, DB_AM_SECONDARY)) { - ret = __db_c_del_secondary(dbc); - goto done; - } - - /* - * If we are a primary and have secondary indices, go through - * and delete any secondary keys that point at the current record. - */ - if (LIST_FIRST(&dbp->s_secondaries) != NULL && - (ret = __db_c_del_primary(dbc)) != 0) - goto done; - - /* - * Off-page duplicate trees are locked in the primary tree, that is, - * we acquire a write lock in the primary tree and no locks in the - * off-page dup tree. If the del operation is done in an off-page - * duplicate tree, call the primary cursor's upgrade routine first. - */ - opd = dbc->internal->opd; - if (opd == NULL) - ret = dbc->c_am_del(dbc); - else - if ((ret = dbc->c_am_writelock(dbc)) == 0) - ret = opd->c_am_del(opd); - - /* - * If this was an update that is supporting dirty reads - * then we may have just swapped our read for a write lock - * which is held by the surviving cursor. We need - * to explicitly downgrade this lock. The closed cursor - * may only have had a read lock. - */ - if (F_ISSET(dbc->dbp, DB_AM_READ_UNCOMMITTED) && - dbc->internal->lock_mode == DB_LOCK_WRITE) { - if ((t_ret = - __TLPUT(dbc, dbc->internal->lock)) != 0 && ret == 0) - ret = t_ret; - if (t_ret == 0) - dbc->internal->lock_mode = DB_LOCK_WWRITE; - } - -done: CDB_LOCKING_DONE(dbp, dbc); - - return (ret); -} - -/* - * __db_c_dup -- - * Duplicate a cursor - * - * PUBLIC: int __db_c_dup __P((DBC *, DBC **, u_int32_t)); - */ -int -__db_c_dup(dbc_orig, dbcp, flags) - DBC *dbc_orig; - DBC **dbcp; - u_int32_t flags; -{ - DBC *dbc_n, *dbc_nopd; - int ret; - - dbc_n = dbc_nopd = NULL; - - /* Allocate a new cursor and initialize it. */ - if ((ret = __db_c_idup(dbc_orig, &dbc_n, flags)) != 0) - goto err; - *dbcp = dbc_n; - - /* - * If the cursor references an off-page duplicate tree, allocate a - * new cursor for that tree and initialize it. - */ - if (dbc_orig->internal->opd != NULL) { - if ((ret = - __db_c_idup(dbc_orig->internal->opd, &dbc_nopd, flags)) != 0) - goto err; - dbc_n->internal->opd = dbc_nopd; - } - return (0); - -err: if (dbc_n != NULL) - (void)__db_c_close(dbc_n); - if (dbc_nopd != NULL) - (void)__db_c_close(dbc_nopd); - - return (ret); -} - -/* - * __db_c_idup -- - * Internal version of __db_c_dup. - * - * PUBLIC: int __db_c_idup __P((DBC *, DBC **, u_int32_t)); - */ -int -__db_c_idup(dbc_orig, dbcp, flags) - DBC *dbc_orig, **dbcp; - u_int32_t flags; -{ - DB *dbp; - DBC *dbc_n; - DBC_INTERNAL *int_n, *int_orig; - int ret; - - dbp = dbc_orig->dbp; - dbc_n = *dbcp; - - if ((ret = __db_cursor_int(dbp, dbc_orig->txn, dbc_orig->dbtype, - dbc_orig->internal->root, F_ISSET(dbc_orig, DBC_OPD), - dbc_orig->locker, &dbc_n)) != 0) - return (ret); - - /* Position the cursor if requested, acquiring the necessary locks. */ - if (flags == DB_POSITION) { - int_n = dbc_n->internal; - int_orig = dbc_orig->internal; - - dbc_n->flags |= dbc_orig->flags & ~DBC_OWN_LID; - - int_n->indx = int_orig->indx; - int_n->pgno = int_orig->pgno; - int_n->root = int_orig->root; - int_n->lock_mode = int_orig->lock_mode; - - switch (dbc_orig->dbtype) { - case DB_QUEUE: - if ((ret = __qam_c_dup(dbc_orig, dbc_n)) != 0) - goto err; - break; - case DB_BTREE: - case DB_RECNO: - if ((ret = __bam_c_dup(dbc_orig, dbc_n)) != 0) - goto err; - break; - case DB_HASH: - if ((ret = __ham_c_dup(dbc_orig, dbc_n)) != 0) - goto err; - break; - case DB_UNKNOWN: - default: - ret = __db_unknown_type(dbp->dbenv, - "__db_c_idup", dbc_orig->dbtype); - goto err; - } - } - - /* Copy the locking flags to the new cursor. */ - F_SET(dbc_n, F_ISSET(dbc_orig, - DBC_READ_COMMITTED | DBC_READ_UNCOMMITTED | DBC_WRITECURSOR)); - - /* - * If we're in CDB and this isn't an offpage dup cursor, then - * we need to get a lock for the duplicated cursor. - */ - if (CDB_LOCKING(dbp->dbenv) && !F_ISSET(dbc_n, DBC_OPD) && - (ret = __lock_get(dbp->dbenv, dbc_n->locker, 0, - &dbc_n->lock_dbt, F_ISSET(dbc_orig, DBC_WRITECURSOR) ? - DB_LOCK_IWRITE : DB_LOCK_READ, &dbc_n->mylock)) != 0) - goto err; - - *dbcp = dbc_n; - return (0); - -err: (void)__db_c_close(dbc_n); - return (ret); -} - -/* - * __db_c_newopd -- - * Create a new off-page duplicate cursor. - * - * PUBLIC: int __db_c_newopd __P((DBC *, db_pgno_t, DBC *, DBC **)); - */ -int -__db_c_newopd(dbc_parent, root, oldopd, dbcp) - DBC *dbc_parent; - db_pgno_t root; - DBC *oldopd; - DBC **dbcp; -{ - DB *dbp; - DBC *opd; - DBTYPE dbtype; - int ret; - - dbp = dbc_parent->dbp; - dbtype = (dbp->dup_compare == NULL) ? DB_RECNO : DB_BTREE; - - /* - * On failure, we want to default to returning the old off-page dup - * cursor, if any; our caller can't be left with a dangling pointer - * to a freed cursor. On error the only allowable behavior is to - * close the cursor (and the old OPD cursor it in turn points to), so - * this should be safe. - */ - *dbcp = oldopd; - - if ((ret = __db_cursor_int(dbp, - dbc_parent->txn, dbtype, root, 1, dbc_parent->locker, &opd)) != 0) - return (ret); - - *dbcp = opd; - - /* - * Check to see if we already have an off-page dup cursor that we've - * passed in. If we do, close it. It'd be nice to use it again - * if it's a cursor belonging to the right tree, but if we're doing - * a cursor-relative operation this might not be safe, so for now - * we'll take the easy way out and always close and reopen. - * - * Note that under no circumstances do we want to close the old - * cursor without returning a valid new one; we don't want to - * leave the main cursor in our caller with a non-NULL pointer - * to a freed off-page dup cursor. - */ - if (oldopd != NULL && (ret = __db_c_close(oldopd)) != 0) - return (ret); - - return (0); -} - -/* - * __db_c_get -- - * Get using a cursor. - * - * PUBLIC: int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_get(dbc_arg, key, data, flags) - DBC *dbc_arg; - DBT *key, *data; - u_int32_t flags; -{ - DB *dbp; - DBC *dbc, *dbc_n, *opd; - DBC_INTERNAL *cp, *cp_n; - DB_MPOOLFILE *mpf; - db_pgno_t pgno; - u_int32_t multi, orig_ulen, tmp_flags, tmp_read_uncommitted, tmp_rmw; - u_int8_t type; - int key_small, ret, t_ret; - - COMPQUIET(orig_ulen, 0); - - key_small = 0; - - /* - * Cursor Cleanup Note: - * All of the cursors passed to the underlying access methods by this - * routine are duplicated cursors. On return, any referenced pages - * will be discarded, and, if the cursor is not intended to be used - * again, the close function will be called. So, pages/locks that - * the cursor references do not need to be resolved by the underlying - * functions. - */ - dbp = dbc_arg->dbp; - mpf = dbp->mpf; - dbc_n = NULL; - opd = NULL; - - /* Clear OR'd in additional bits so we can check for flag equality. */ - tmp_rmw = LF_ISSET(DB_RMW); - LF_CLR(DB_RMW); - - tmp_read_uncommitted = - LF_ISSET(DB_READ_UNCOMMITTED) && - !F_ISSET(dbc_arg, DBC_READ_UNCOMMITTED); - LF_CLR(DB_READ_UNCOMMITTED); - - multi = LF_ISSET(DB_MULTIPLE|DB_MULTIPLE_KEY); - LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY); - - /* - * Return a cursor's record number. It has nothing to do with the - * cursor get code except that it was put into the interface. - */ - if (flags == DB_GET_RECNO) { - if (tmp_rmw) - F_SET(dbc_arg, DBC_RMW); - if (tmp_read_uncommitted) - F_SET(dbc_arg, DBC_READ_UNCOMMITTED); - ret = __bam_c_rget(dbc_arg, data); - if (tmp_rmw) - F_CLR(dbc_arg, DBC_RMW); - if (tmp_read_uncommitted) - F_CLR(dbc_arg, DBC_READ_UNCOMMITTED); - return (ret); - } - - if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT) - CDB_LOCKING_INIT(dbp, dbc_arg); - - /* - * If we have an off-page duplicates cursor, and the operation applies - * to it, perform the operation. Duplicate the cursor and call the - * underlying function. - * - * Off-page duplicate trees are locked in the primary tree, that is, - * we acquire a write lock in the primary tree and no locks in the - * off-page dup tree. If the DB_RMW flag was specified and the get - * operation is done in an off-page duplicate tree, call the primary - * cursor's upgrade routine first. - */ - cp = dbc_arg->internal; - if (cp->opd != NULL && - (flags == DB_CURRENT || flags == DB_GET_BOTHC || - flags == DB_NEXT || flags == DB_NEXT_DUP || flags == DB_PREV)) { - if (tmp_rmw && (ret = dbc_arg->c_am_writelock(dbc_arg)) != 0) - return (ret); - if ((ret = __db_c_idup(cp->opd, &opd, DB_POSITION)) != 0) - return (ret); - - switch (ret = - opd->c_am_get(opd, key, data, flags, NULL)) { - case 0: - goto done; - case DB_NOTFOUND: - /* - * Translate DB_NOTFOUND failures for the DB_NEXT and - * DB_PREV operations into a subsequent operation on - * the parent cursor. - */ - if (flags == DB_NEXT || flags == DB_PREV) { - if ((ret = __db_c_close(opd)) != 0) - goto err; - opd = NULL; - break; - } - goto err; - default: - goto err; - } - } - - /* - * Perform an operation on the main cursor. Duplicate the cursor, - * upgrade the lock as required, and call the underlying function. - */ - switch (flags) { - case DB_CURRENT: - case DB_GET_BOTHC: - case DB_NEXT: - case DB_NEXT_DUP: - case DB_NEXT_NODUP: - case DB_PREV: - case DB_PREV_NODUP: - tmp_flags = DB_POSITION; - break; - default: - tmp_flags = 0; - break; - } - - if (tmp_read_uncommitted) - F_SET(dbc_arg, DBC_READ_UNCOMMITTED); - - /* - * If this cursor is going to be closed immediately, we don't - * need to take precautions to clean it up on error. - */ - if (F_ISSET(dbc_arg, DBC_TRANSIENT)) - dbc_n = dbc_arg; - else { - ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags); - if (tmp_read_uncommitted) - F_CLR(dbc_arg, DBC_READ_UNCOMMITTED); - - if (ret != 0) - goto err; - COPY_RET_MEM(dbc_arg, dbc_n); - } - - if (tmp_rmw) - F_SET(dbc_n, DBC_RMW); - - switch (multi) { - case DB_MULTIPLE: - F_SET(dbc_n, DBC_MULTIPLE); - break; - case DB_MULTIPLE_KEY: - F_SET(dbc_n, DBC_MULTIPLE_KEY); - break; - case DB_MULTIPLE | DB_MULTIPLE_KEY: - F_SET(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY); - break; - case 0: - default: - break; - } - - pgno = PGNO_INVALID; - ret = dbc_n->c_am_get(dbc_n, key, data, flags, &pgno); - if (tmp_rmw) - F_CLR(dbc_n, DBC_RMW); - if (tmp_read_uncommitted) - F_CLR(dbc_arg, DBC_READ_UNCOMMITTED); - F_CLR(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY); - if (ret != 0) - goto err; - - cp_n = dbc_n->internal; - - /* - * We may be referencing a new off-page duplicates tree. Acquire - * a new cursor and call the underlying function. - */ - if (pgno != PGNO_INVALID) { - if ((ret = __db_c_newopd(dbc_arg, - pgno, cp_n->opd, &cp_n->opd)) != 0) - goto err; - - switch (flags) { - case DB_FIRST: - case DB_NEXT: - case DB_NEXT_NODUP: - case DB_SET: - case DB_SET_RECNO: - case DB_SET_RANGE: - tmp_flags = DB_FIRST; - break; - case DB_LAST: - case DB_PREV: - case DB_PREV_NODUP: - tmp_flags = DB_LAST; - break; - case DB_GET_BOTH: - case DB_GET_BOTHC: - case DB_GET_BOTH_RANGE: - tmp_flags = flags; - break; - default: - ret = - __db_unknown_flag(dbp->dbenv, "__db_c_get", flags); - goto err; - } - if ((ret = cp_n->opd->c_am_get( - cp_n->opd, key, data, tmp_flags, NULL)) != 0) - goto err; - } - -done: /* - * Return a key/data item. The only exception is that we don't return - * a key if the user already gave us one, that is, if the DB_SET flag - * was set. The DB_SET flag is necessary. In a Btree, the user's key - * doesn't have to be the same as the key stored the tree, depending on - * the magic performed by the comparison function. As we may not have - * done any key-oriented operation here, the page reference may not be - * valid. Fill it in as necessary. We don't have to worry about any - * locks, the cursor must already be holding appropriate locks. - * - * XXX - * If not a Btree and DB_SET_RANGE is set, we shouldn't return a key - * either, should we? - */ - cp_n = dbc_n == NULL ? dbc_arg->internal : dbc_n->internal; - if (!F_ISSET(key, DB_DBT_ISSET)) { - if (cp_n->page == NULL && (ret = - __memp_fget(mpf, &cp_n->pgno, 0, &cp_n->page)) != 0) - goto err; - - if ((ret = __db_ret(dbp, cp_n->page, cp_n->indx, - key, &dbc_arg->rkey->data, &dbc_arg->rkey->ulen)) != 0) { - /* - * If the key DBT is too small, we still want to return - * the size of the data. Otherwise applications are - * forced to check each one with a separate call. We - * don't want to copy the data, so we set the ulen to - * zero before calling __db_ret. - */ - if (ret == DB_BUFFER_SMALL && - F_ISSET(data, DB_DBT_USERMEM)) { - key_small = 1; - orig_ulen = data->ulen; - data->ulen = 0; - } else - goto err; - } - } - if (multi != 0) { - /* - * Even if fetching from the OPD cursor we need a duplicate - * primary cursor if we are going after multiple keys. - */ - if (dbc_n == NULL) { - /* - * Non-"_KEY" DB_MULTIPLE doesn't move the main cursor, - * so it's safe to just use dbc_arg, unless dbc_arg - * has an open OPD cursor whose state might need to - * be preserved. - */ - if ((!(multi & DB_MULTIPLE_KEY) && - dbc_arg->internal->opd == NULL) || - F_ISSET(dbc_arg, DBC_TRANSIENT)) - dbc_n = dbc_arg; - else { - if ((ret = __db_c_idup(dbc_arg, - &dbc_n, DB_POSITION)) != 0) - goto err; - if ((ret = dbc_n->c_am_get(dbc_n, - key, data, DB_CURRENT, &pgno)) != 0) - goto err; - } - cp_n = dbc_n->internal; - } - - /* - * If opd is set then we dupped the opd that we came in with. - * When we return we may have a new opd if we went to another - * key. - */ - if (opd != NULL) { - DB_ASSERT(cp_n->opd == NULL); - cp_n->opd = opd; - opd = NULL; - } - - /* - * Bulk get doesn't use __db_retcopy, so data.size won't - * get set up unless there is an error. Assume success - * here. This is the only call to c_am_bulk, and it avoids - * setting it exactly the same everywhere. If we have an - * DB_BUFFER_SMALL error, it'll get overwritten with the - * needed value. - */ - data->size = data->ulen; - ret = dbc_n->c_am_bulk(dbc_n, data, flags | multi); - } else if (!F_ISSET(data, DB_DBT_ISSET)) { - dbc = opd != NULL ? opd : cp_n->opd != NULL ? cp_n->opd : dbc_n; - type = TYPE(dbc->internal->page); - ret = __db_ret(dbp, dbc->internal->page, dbc->internal->indx + - (type == P_LBTREE || type == P_HASH ? O_INDX : 0), - data, &dbc_arg->rdata->data, &dbc_arg->rdata->ulen); - } - -err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */ - F_CLR(key, DB_DBT_ISSET); - F_CLR(data, DB_DBT_ISSET); - - /* Cleanup and cursor resolution. */ - if (opd != NULL) { - /* - * To support dirty reads we must reget the write lock - * if we have just stepped off a deleted record. - * Since the OPD cursor does not know anything - * about the referencing page or cursor we need - * to peek at the OPD cursor and get the lock here. - */ - if (F_ISSET(dbc_arg->dbp, DB_AM_READ_UNCOMMITTED) && - F_ISSET((BTREE_CURSOR *) - dbc_arg->internal->opd->internal, C_DELETED)) - if ((t_ret = - dbc_arg->c_am_writelock(dbc_arg)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __db_c_cleanup( - dbc_arg->internal->opd, opd, ret)) != 0 && ret == 0) - ret = t_ret; - - } - - if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0) - ret = t_ret; - - if (key_small) { - data->ulen = orig_ulen; - if (ret == 0) - ret = DB_BUFFER_SMALL; - } - - if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT) - CDB_LOCKING_DONE(dbp, dbc_arg); - return (ret); -} - -/* - * __db_c_put -- - * Put using a cursor. - * - * PUBLIC: int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_put(dbc_arg, key, data, flags) - DBC *dbc_arg; - DBT *key, *data; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB *dbp, *sdbp; - DBC *dbc_n, *oldopd, *opd, *sdbc, *pdbc; - DBT olddata, oldpkey, oldskey, newdata, pkey, skey, temppkey, tempskey; - db_pgno_t pgno; - int cmp, have_oldrec, ispartial, nodel, re_pad, ret, rmw, t_ret; - u_int32_t re_len, size, tmp_flags; - - /* - * Cursor Cleanup Note: - * All of the cursors passed to the underlying access methods by this - * routine are duplicated cursors. On return, any referenced pages - * will be discarded, and, if the cursor is not intended to be used - * again, the close function will be called. So, pages/locks that - * the cursor references do not need to be resolved by the underlying - * functions. - */ - dbp = dbc_arg->dbp; - dbenv = dbp->dbenv; - sdbp = NULL; - pdbc = dbc_n = NULL; - memset(&newdata, 0, sizeof(DBT)); - ret = 0; - - /* - * We do multiple cursor operations in some cases and subsequently - * access the data DBT information. Set DB_DBT_MALLOC so we don't risk - * modification of the data between our uses of it. - */ - memset(&olddata, 0, sizeof(DBT)); - F_SET(&olddata, DB_DBT_MALLOC); - - /* - * Putting to secondary indices is forbidden; when we need - * to internally update one, we'll call this with a private - * synonym for DB_KEYLAST, DB_UPDATE_SECONDARY, which does - * the right thing but won't return an error from cputchk(). - */ - if (flags == DB_UPDATE_SECONDARY) - flags = DB_KEYLAST; - - CDB_LOCKING_INIT(dbp, dbc_arg); - - /* - * Check to see if we are a primary and have secondary indices. - * If we are not, we save ourselves a good bit of trouble and - * just skip to the "normal" put. - */ - if (LIST_FIRST(&dbp->s_secondaries) == NULL) - goto skip_s_update; - - /* - * We have at least one secondary which we may need to update. - * - * There is a rather vile locking issue here. Secondary gets - * will always involve acquiring a read lock in the secondary, - * then acquiring a read lock in the primary. Ideally, we - * would likewise perform puts by updating all the secondaries - * first, then doing the actual put in the primary, to avoid - * deadlock (since having multiple threads doing secondary - * gets and puts simultaneously is probably a common case). - * - * However, if this put is a put-overwrite--and we have no way to - * tell in advance whether it will be--we may need to delete - * an outdated secondary key. In order to find that old - * secondary key, we need to get the record we're overwriting, - * before we overwrite it. - * - * (XXX: It would be nice to avoid this extra get, and have the - * underlying put routines somehow pass us the old record - * since they need to traverse the tree anyway. I'm saving - * this optimization for later, as it's a lot of work, and it - * would be hard to fit into this locking paradigm anyway.) - * - * The simple thing to do would be to go get the old record before - * we do anything else. Unfortunately, though, doing so would - * violate our "secondary, then primary" lock acquisition - * ordering--even in the common case where no old primary record - * exists, we'll still acquire and keep a lock on the page where - * we're about to do the primary insert. - * - * To get around this, we do the following gyrations, which - * hopefully solve this problem in the common case: - * - * 1) If this is a c_put(DB_CURRENT), go ahead and get the - * old record. We already hold the lock on this page in - * the primary, so no harm done, and we'll need the primary - * key (which we weren't passed in this case) to do any - * secondary puts anyway. - * - * 2) If we're doing a partial put, we need to perform the - * get on the primary key right away, since we don't have - * the whole datum that the secondary key is based on. - * We may also need to pad out the record if the primary - * has a fixed record length. - * - * 3) Loop through the secondary indices, putting into each a - * new secondary key that corresponds to the new record. - * - * 4) If we haven't done so in (1) or (2), get the old primary - * key/data pair. If one does not exist--the common case--we're - * done with secondary indices, and can go straight on to the - * primary put. - * - * 5) If we do have an old primary key/data pair, however, we need - * to loop through all the secondaries a second time and delete - * the old secondary in each. - */ - memset(&pkey, 0, sizeof(DBT)); - have_oldrec = nodel = 0; - - /* - * Primary indices can't have duplicates, so only DB_CURRENT, - * DB_KEYFIRST, and DB_KEYLAST make any sense. Other flags - * should have been caught by the checking routine, but - * add a sprinkling of paranoia. - */ - DB_ASSERT(flags == DB_CURRENT || - flags == DB_KEYFIRST || flags == DB_KEYLAST); - - /* - * We'll want to use DB_RMW in a few places, but it's only legal - * when locking is on. - */ - rmw = STD_LOCKING(dbc_arg) ? DB_RMW : 0; - - if (flags == DB_CURRENT) { /* Step 1. */ - /* - * This is safe to do on the cursor we already have; - * error or no, it won't move. - * - * We use DB_RMW for all of these gets because we'll be - * writing soon enough in the "normal" put code. In - * transactional databases we'll hold those write locks - * even if we close the cursor we're reading with. - * - * The DB_KEYEMPTY return needs special handling -- if the - * cursor is on a deleted key, we return DB_NOTFOUND. - */ - ret = __db_c_get(dbc_arg, &pkey, &olddata, rmw | DB_CURRENT); - if (ret == DB_KEYEMPTY) - ret = DB_NOTFOUND; - if (ret != 0) - goto err; - - have_oldrec = 1; /* We've looked for the old record. */ - } else { - /* - * Set pkey so we can use &pkey everywhere instead of key. - * If DB_CURRENT is set and there is a key at the current - * location, pkey will be overwritten before it's used. - */ - pkey.data = key->data; - pkey.size = key->size; - } - - /* - * Check for partial puts (step 2). - */ - if (F_ISSET(data, DB_DBT_PARTIAL)) { - if (!have_oldrec && !nodel) { - /* - * We're going to have to search the tree for the - * specified key. Dup a cursor (so we have the same - * locking info) and do a c_get. - */ - if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0) - goto err; - - /* We should have gotten DB_CURRENT in step 1. */ - DB_ASSERT(flags != DB_CURRENT); - - ret = __db_c_get(pdbc, &pkey, &olddata, rmw | DB_SET); - if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) { - nodel = 1; - ret = 0; - } - if ((t_ret = __db_c_close(pdbc)) != 0) - ret = t_ret; - if (ret != 0) - goto err; - - have_oldrec = 1; - } - - /* - * Now build the new datum from olddata and the partial data we - * were given. It's okay to do this if no record was returned - * above: a partial put on an empty record is allowed, if a - * little strange. The data is zero-padded. - */ - if ((ret = - __db_buildpartial(dbp, &olddata, data, &newdata)) != 0) - goto err; - ispartial = 1; - } else - ispartial = 0; - - /* - * Handle fixed-length records. If the primary database has - * fixed-length records, we need to pad out the datum before - * we pass it into the callback function; we always index the - * "real" record. - */ - if ((dbp->type == DB_RECNO && F_ISSET(dbp, DB_AM_FIXEDLEN)) || - (dbp->type == DB_QUEUE)) { - if (dbp->type == DB_QUEUE) { - re_len = ((QUEUE *)dbp->q_internal)->re_len; - re_pad = ((QUEUE *)dbp->q_internal)->re_pad; - } else { - re_len = ((BTREE *)dbp->bt_internal)->re_len; - re_pad = ((BTREE *)dbp->bt_internal)->re_pad; - } - - size = ispartial ? newdata.size : data->size; - if (size > re_len) { - ret = __db_rec_toobig(dbenv, size, re_len); - goto err; - } else if (size < re_len) { - /* - * If we're not doing a partial put, copy - * data->data into newdata.data, then pad out - * newdata.data. - * - * If we're doing a partial put, the data - * we want are already in newdata.data; we - * just need to pad. - * - * Either way, realloc is safe. - */ - if ((ret = - __os_realloc(dbenv, re_len, &newdata.data)) != 0) - goto err; - if (!ispartial) - memcpy(newdata.data, data->data, size); - memset((u_int8_t *)newdata.data + size, re_pad, - re_len - size); - newdata.size = re_len; - ispartial = 1; - } - } - - /* - * Loop through the secondaries. (Step 3.) - * - * Note that __db_s_first and __db_s_next will take care of - * thread-locking and refcounting issues. - */ - if ((ret = __db_s_first(dbp, &sdbp)) != 0) - goto err; - for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) { - /* - * Don't process this secondary if the key is immutable and we - * know that the old record exists. This optimization can't be - * used if we have not checked for the old record yet. - */ - if (have_oldrec && !nodel && - FLD_ISSET(sdbp->s_assoc_flags, DB_ASSOC_IMMUTABLE_KEY)) - continue; - - /* - * Call the callback for this secondary, to get the - * appropriate secondary key. - */ - memset(&skey, 0, sizeof(DBT)); - if ((ret = sdbp->s_callback(sdbp, - &pkey, ispartial ? &newdata : data, &skey)) != 0) { - if (ret == DB_DONOTINDEX) - /* - * The callback returned a null value--don't - * put this key in the secondary. Just - * move on to the next one--we'll handle - * any necessary deletes in step 5. - */ - continue; - goto err; - } - - /* - * Open a cursor in this secondary. - * - * Use the same locker ID as our primary cursor, so that - * we're guaranteed that the locks don't conflict (e.g. in CDB - * or if we're subdatabases that share and want to lock a - * metadata page). - */ - if ((ret = __db_cursor_int(sdbp, dbc_arg->txn, sdbp->type, - PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0) - goto err; - - /* - * If we're in CDB, updates will fail since the new cursor - * isn't a writer. However, we hold the WRITE lock in the - * primary and will for as long as our new cursor lasts, - * and the primary and secondary share a lock file ID, - * so it's safe to consider this a WRITER. The close - * routine won't try to put anything because we don't - * really have a lock. - */ - if (CDB_LOCKING(dbenv)) { - DB_ASSERT(sdbc->mylock.off == LOCK_INVALID); - F_SET(sdbc, DBC_WRITER); - } - - /* - * Swap the primary key to the byte order of this secondary, if - * necessary. By doing this now, we can compare directly - * against the data already in the secondary without having to - * swap it after reading. - */ - SWAP_IF_NEEDED(dbp, sdbp, &pkey); - - /* - * There are three cases here-- - * 1) The secondary supports sorted duplicates. - * If we attempt to put a secondary/primary pair - * that already exists, that's a duplicate duplicate, - * and c_put will return DB_KEYEXIST (see __db_duperr). - * This will leave us with exactly one copy of the - * secondary/primary pair, and this is just right--we'll - * avoid deleting it later, as the old and new secondaries - * will match (since the old secondary is the dup dup - * that's already there). - * 2) The secondary supports duplicates, but they're not - * sorted. We need to avoid putting a duplicate - * duplicate, because the matching old and new secondaries - * will prevent us from deleting anything and we'll - * wind up with two secondary records that point to the - * same primary key. Do a c_get(DB_GET_BOTH); only - * do the put if the secondary doesn't exist. - * 3) The secondary doesn't support duplicates at all. - * In this case, secondary keys must be unique; if - * another primary key already exists for this - * secondary key, we have to either overwrite it or - * not put this one, and in either case we've - * corrupted the secondary index. Do a c_get(DB_SET). - * If the secondary/primary pair already exists, do - * nothing; if the secondary exists with a different - * primary, return an error; and if the secondary - * does not exist, put it. - */ - if (!F_ISSET(sdbp, DB_AM_DUP)) { - /* Case 3. */ - memset(&oldpkey, 0, sizeof(DBT)); - F_SET(&oldpkey, DB_DBT_MALLOC); - ret = __db_c_get(sdbc, - &skey, &oldpkey, rmw | DB_SET); - if (ret == 0) { - cmp = __bam_defcmp(sdbp, &oldpkey, &pkey); - __os_ufree(dbenv, oldpkey.data); - if (cmp != 0) { - __db_err(dbenv, "%s%s", - "Put results in a non-unique secondary key in an ", - "index not configured to support duplicates"); - ret = EINVAL; - } - } - if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY) - goto skipput; - } else if (!F_ISSET(sdbp, DB_AM_DUPSORT)) { - /* Case 2. */ - memset(&tempskey, 0, sizeof(DBT)); - tempskey.data = skey.data; - tempskey.size = skey.size; - memset(&temppkey, 0, sizeof(DBT)); - temppkey.data = pkey.data; - temppkey.size = pkey.size; - ret = __db_c_get(sdbc, &tempskey, &temppkey, - rmw | DB_GET_BOTH); - if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY) - goto skipput; - } - - ret = __db_c_put(sdbc, &skey, &pkey, DB_UPDATE_SECONDARY); - - /* - * We don't know yet whether this was a put-overwrite that - * in fact changed nothing. If it was, we may get DB_KEYEXIST. - * This is not an error. - */ - if (ret == DB_KEYEXIST) - ret = 0; - -skipput: FREE_IF_NEEDED(sdbp, &skey) - - /* Make sure the primary key is back in native byte-order. */ - SWAP_IF_NEEDED(dbp, sdbp, &pkey); - - if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0) - ret = t_ret; - - if (ret != 0) - goto err; - } - if (ret != 0) - goto err; - - /* If still necessary, go get the old primary key/data. (Step 4.) */ - if (!have_oldrec) { - /* See the comments in step 2. This is real familiar. */ - if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0) - goto err; - DB_ASSERT(flags != DB_CURRENT); - pkey.data = key->data; - pkey.size = key->size; - ret = __db_c_get(pdbc, &pkey, &olddata, rmw | DB_SET); - if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) { - nodel = 1; - ret = 0; - } - if ((t_ret = __db_c_close(pdbc)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - have_oldrec = 1; - } - - /* - * If we don't follow this goto, we do in fact have an old record - * we may need to go delete. (Step 5). - */ - if (nodel) - goto skip_s_update; - - if ((ret = __db_s_first(dbp, &sdbp)) != 0) - goto err; - for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) { - /* - * Don't process this secondary if the key is immutable. We - * know that the old record exists, so this optimization can - * always be used. - */ - if (FLD_ISSET(sdbp->s_assoc_flags, DB_ASSOC_IMMUTABLE_KEY)) - continue; - - /* - * Call the callback for this secondary to get the - * old secondary key. - */ - memset(&oldskey, 0, sizeof(DBT)); - if ((ret = sdbp->s_callback(sdbp, - &pkey, &olddata, &oldskey)) != 0) { - if (ret == DB_DONOTINDEX) - /* - * The callback returned a null value--there's - * nothing to delete. Go on to the next - * secondary. - */ - continue; - goto err; - } - memset(&skey, 0, sizeof(DBT)); - if ((ret = sdbp->s_callback(sdbp, - &pkey, ispartial ? &newdata : data, &skey)) != 0 && - ret != DB_DONOTINDEX) - goto err; - - /* - * If there is no new secondary key, or if the old secondary - * key is different from the new secondary key, then - * we need to delete the old one. - * - * Note that bt_compare is (and must be) set no matter - * what access method we're in. - */ - sdbc = NULL; - if (ret == DB_DONOTINDEX || - ((BTREE *)sdbp->bt_internal)->bt_compare(sdbp, - &oldskey, &skey) != 0) { - if ((ret = __db_cursor_int( - sdbp, dbc_arg->txn, sdbp->type, - PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0) - goto err; - if (CDB_LOCKING(dbenv)) { - DB_ASSERT(sdbc->mylock.off == LOCK_INVALID); - F_SET(sdbc, DBC_WRITER); - } - - /* - * Don't let c_get(DB_GET_BOTH) stomp on - * our data. Use a temp DBT instead. - */ - memset(&tempskey, 0, sizeof(DBT)); - tempskey.data = oldskey.data; - tempskey.size = oldskey.size; - SWAP_IF_NEEDED(dbp, sdbp, &pkey); - memset(&temppkey, 0, sizeof(DBT)); - temppkey.data = pkey.data; - temppkey.size = pkey.size; - if ((ret = __db_c_get(sdbc, - &tempskey, &temppkey, rmw | DB_GET_BOTH)) == 0) - ret = __db_c_del(sdbc, DB_UPDATE_SECONDARY); - else if (ret == DB_NOTFOUND) - ret = __db_secondary_corrupt(dbp); - SWAP_IF_NEEDED(dbp, sdbp, &pkey); - } - - FREE_IF_NEEDED(sdbp, &skey); - FREE_IF_NEEDED(sdbp, &oldskey); - if (sdbc != NULL && (t_ret = __db_c_close(sdbc)) != 0 && - ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - } - - /* Secondary index updates are now done. On to the "real" stuff. */ - -skip_s_update: - /* - * If we have an off-page duplicates cursor, and the operation applies - * to it, perform the operation. Duplicate the cursor and call the - * underlying function. - * - * Off-page duplicate trees are locked in the primary tree, that is, - * we acquire a write lock in the primary tree and no locks in the - * off-page dup tree. If the put operation is done in an off-page - * duplicate tree, call the primary cursor's upgrade routine first. - */ - if (dbc_arg->internal->opd != NULL && - (flags == DB_AFTER || flags == DB_BEFORE || flags == DB_CURRENT)) { - /* - * A special case for hash off-page duplicates. Hash doesn't - * support (and is documented not to support) put operations - * relative to a cursor which references an already deleted - * item. For consistency, apply the same criteria to off-page - * duplicates as well. - */ - if (dbc_arg->dbtype == DB_HASH && F_ISSET( - ((BTREE_CURSOR *)(dbc_arg->internal->opd->internal)), - C_DELETED)) { - ret = DB_NOTFOUND; - goto err; - } - - if ((ret = dbc_arg->c_am_writelock(dbc_arg)) != 0 || - (ret = __db_c_dup(dbc_arg, &dbc_n, DB_POSITION)) != 0) - goto err; - opd = dbc_n->internal->opd; - if ((ret = opd->c_am_put( - opd, key, data, flags, NULL)) != 0) - goto err; - goto done; - } - - /* - * Perform an operation on the main cursor. Duplicate the cursor, - * and call the underlying function. - * - * XXX: MARGO - * - tmp_flags = flags == DB_AFTER || - flags == DB_BEFORE || flags == DB_CURRENT ? DB_POSITION : 0; - */ - tmp_flags = DB_POSITION; - - /* - * If this cursor is going to be closed immediately, we don't - * need to take precautions to clean it up on error. - */ - if (F_ISSET(dbc_arg, DBC_TRANSIENT)) - dbc_n = dbc_arg; - else if ((ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags)) != 0) - goto err; - - pgno = PGNO_INVALID; - if ((ret = dbc_n->c_am_put(dbc_n, key, data, flags, &pgno)) != 0) - goto err; - - /* - * We may be referencing a new off-page duplicates tree. Acquire - * a new cursor and call the underlying function. - */ - if (pgno != PGNO_INVALID) { - oldopd = dbc_n->internal->opd; - if ((ret = __db_c_newopd(dbc_arg, pgno, oldopd, &opd)) != 0) { - dbc_n->internal->opd = opd; - goto err; - } - - dbc_n->internal->opd = opd; - - if ((ret = opd->c_am_put( - opd, key, data, flags, NULL)) != 0) - goto err; - } - -done: -err: /* Cleanup and cursor resolution. */ - if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0) - ret = t_ret; - - /* If newdata or olddata were used, free their buffers. */ - if (newdata.data != NULL) - __os_free(dbenv, newdata.data); - if (olddata.data != NULL) - __os_ufree(dbenv, olddata.data); - - CDB_LOCKING_DONE(dbp, dbc_arg); - - if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_duperr() - * Error message: we don't currently support sorted duplicate duplicates. - * PUBLIC: int __db_duperr __P((DB *, u_int32_t)); - */ -int -__db_duperr(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - - /* - * If we run into this error while updating a secondary index, - * don't yell--there's no clean way to pass DB_NODUPDATA in along - * with DB_UPDATE_SECONDARY, but we may run into this problem - * in a normal, non-error course of events. - * - * !!! - * If and when we ever permit duplicate duplicates in sorted-dup - * databases, we need to either change the secondary index code - * to check for dup dups, or we need to maintain the implicit - * "DB_NODUPDATA" behavior for databases with DB_AM_SECONDARY set. - */ - if (flags != DB_NODUPDATA && !F_ISSET(dbp, DB_AM_SECONDARY)) - __db_err(dbp->dbenv, - "Duplicate data items are not supported with sorted data"); - return (DB_KEYEXIST); -} - -/* - * __db_c_cleanup -- - * Clean up duplicate cursors. - */ -static int -__db_c_cleanup(dbc, dbc_n, failed) - DBC *dbc, *dbc_n; - int failed; -{ - DB *dbp; - DBC *opd; - DBC_INTERNAL *internal; - DB_MPOOLFILE *mpf; - int ret, t_ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - internal = dbc->internal; - ret = 0; - - /* Discard any pages we're holding. */ - if (internal->page != NULL) { - if ((t_ret = - __memp_fput(mpf, internal->page, 0)) != 0 && ret == 0) - ret = t_ret; - internal->page = NULL; - } - opd = internal->opd; - if (opd != NULL && opd->internal->page != NULL) { - if ((t_ret = - __memp_fput(mpf, opd->internal->page, 0)) != 0 && ret == 0) - ret = t_ret; - opd->internal->page = NULL; - } - - /* - * If dbc_n is NULL, there's no internal cursor swapping to be done - * and no dbc_n to close--we probably did the entire operation on an - * offpage duplicate cursor. Just return. - * - * If dbc and dbc_n are the same, we're either inside a DB->{put/get} - * operation, and as an optimization we performed the operation on - * the main cursor rather than on a duplicated one, or we're in a - * bulk get that can't have moved the cursor (DB_MULTIPLE with the - * initial c_get operation on an off-page dup cursor). Just - * return--either we know we didn't move the cursor, or we're going - * to close it before we return to application code, so we're sure - * not to visibly violate the "cursor stays put on error" rule. - */ - if (dbc_n == NULL || dbc == dbc_n) - return (ret); - - if (dbc_n->internal->page != NULL) { - if ((t_ret = __memp_fput( - mpf, dbc_n->internal->page, 0)) != 0 && ret == 0) - ret = t_ret; - dbc_n->internal->page = NULL; - } - opd = dbc_n->internal->opd; - if (opd != NULL && opd->internal->page != NULL) { - if ((t_ret = - __memp_fput(mpf, opd->internal->page, 0)) != 0 && ret == 0) - ret = t_ret; - opd->internal->page = NULL; - } - - /* - * If we didn't fail before entering this routine or just now when - * freeing pages, swap the interesting contents of the old and new - * cursors. - */ - if (!failed && ret == 0) { - dbc->internal = dbc_n->internal; - dbc_n->internal = internal; - } - - /* - * Close the cursor we don't care about anymore. The close can fail, - * but we only expect DB_LOCK_DEADLOCK failures. This violates our - * "the cursor is unchanged on error" semantics, but since all you can - * do with a DB_LOCK_DEADLOCK failure is close the cursor, I believe - * that's OK. - * - * XXX - * There's no way to recover from failure to close the old cursor. - * All we can do is move to the new position and return an error. - * - * XXX - * We might want to consider adding a flag to the cursor, so that any - * subsequent operations other than close just return an error? - */ - if ((t_ret = __db_c_close(dbc_n)) != 0 && ret == 0) - ret = t_ret; - - /* - * If this was an update that is supporting dirty reads - * then we may have just swapped our read for a write lock - * which is held by the surviving cursor. We need - * to explicitly downgrade this lock. The closed cursor - * may only have had a read lock. - */ - if (F_ISSET(dbp, DB_AM_READ_UNCOMMITTED) && - dbc->internal->lock_mode == DB_LOCK_WRITE) { - if ((t_ret = - __TLPUT(dbc, dbc->internal->lock)) != 0 && ret == 0) - ret = t_ret; - if (t_ret == 0) - dbc->internal->lock_mode = DB_LOCK_WWRITE; - } - - return (ret); -} - -/* - * __db_c_secondary_get_pp -- - * This wrapper function for DBC->c_pget() is the DBC->c_get() function - * for a secondary index cursor. - * - * PUBLIC: int __db_c_secondary_get_pp __P((DBC *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_secondary_get_pp(dbc, skey, data, flags) - DBC *dbc; - DBT *skey, *data; - u_int32_t flags; -{ - - DB_ASSERT(F_ISSET(dbc->dbp, DB_AM_SECONDARY)); - return (__db_c_pget_pp(dbc, skey, NULL, data, flags)); -} - -/* - * __db_c_pget -- - * Get a primary key/data pair through a secondary index. - * - * PUBLIC: int __db_c_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_pget(dbc, skey, pkey, data, flags) - DBC *dbc; - DBT *skey, *pkey, *data; - u_int32_t flags; -{ - DB *pdbp, *sdbp; - DBC *dbc_n, *pdbc; - DBT nullpkey; - u_int32_t save_pkey_flags, tmp_flags, tmp_read_uncommitted, tmp_rmw; - int pkeymalloc, ret, t_ret; - - sdbp = dbc->dbp; - pdbp = sdbp->s_primary; - dbc_n = NULL; - pkeymalloc = t_ret = 0; - - /* - * The challenging part of this function is getting the behavior - * right for all the various permutations of DBT flags. The - * next several blocks handle the various cases we need to - * deal with specially. - */ - - /* - * We may be called with a NULL pkey argument, if we've been - * wrapped by a 2-DBT get call. If so, we need to use our - * own DBT. - */ - if (pkey == NULL) { - memset(&nullpkey, 0, sizeof(DBT)); - pkey = &nullpkey; - } - - /* Clear OR'd in additional bits so we can check for flag equality. */ - tmp_rmw = LF_ISSET(DB_RMW); - LF_CLR(DB_RMW); - - tmp_read_uncommitted = - LF_ISSET(DB_READ_UNCOMMITTED) && - !F_ISSET(dbc, DBC_READ_UNCOMMITTED); - LF_CLR(DB_READ_UNCOMMITTED); - - /* - * DB_GET_RECNO is a special case, because we're interested not in - * the primary key/data pair, but rather in the primary's record - * number. - */ - if (flags == DB_GET_RECNO) { - if (tmp_rmw) - F_SET(dbc, DBC_RMW); - if (tmp_read_uncommitted) - F_SET(dbc, DBC_READ_UNCOMMITTED); - ret = __db_c_pget_recno(dbc, pkey, data, flags); - if (tmp_rmw) - F_CLR(dbc, DBC_RMW); - if (tmp_read_uncommitted) - F_CLR(dbc, DBC_READ_UNCOMMITTED); - return (ret); - } - - /* - * If the DBTs we've been passed don't have any of the - * user-specified memory management flags set, we want to make sure - * we return values using the DBTs dbc->rskey, dbc->rkey, and - * dbc->rdata, respectively. - * - * There are two tricky aspects to this: first, we need to pass - * skey and pkey *in* to the initial c_get on the secondary key, - * since either or both may be looked at by it (depending on the - * get flag). Second, we must not use a normal DB->get call - * on the secondary, even though that's what we want to accomplish, - * because the DB handle may be free-threaded. Instead, - * we open a cursor, then take steps to ensure that we actually use - * the rkey/rdata from the *secondary* cursor. - * - * We accomplish all this by passing in the DBTs we started out - * with to the c_get, but swapping the contents of rskey and rkey, - * respectively, into rkey and rdata; __db_ret will treat them like - * the normal key/data pair in a c_get call, and will realloc them as - * need be (this is "step 1"). Then, for "step 2", we swap back - * rskey/rkey/rdata to normal, and do a get on the primary with the - * secondary dbc appointed as the owner of the returned-data memory. - * - * Note that in step 2, we copy the flags field in case we need to - * pass down a DB_DBT_PARTIAL or other flag that is compatible with - * letting DB do the memory management. - */ - - /* - * It is correct, though slightly sick, to attempt a partial get of a - * primary key. However, if we do so here, we'll never find the - * primary record; clear the DB_DBT_PARTIAL field of pkey just for the - * duration of the next call. - */ - save_pkey_flags = pkey->flags; - F_CLR(pkey, DB_DBT_PARTIAL); - - /* - * Now we can go ahead with the meat of this call. First, get the - * primary key from the secondary index. (What exactly we get depends - * on the flags, but the underlying cursor get will take care of the - * dirty work.) Duplicate the cursor, in case the later get on the - * primary fails. - */ - switch (flags) { - case DB_CURRENT: - case DB_GET_BOTHC: - case DB_NEXT: - case DB_NEXT_DUP: - case DB_NEXT_NODUP: - case DB_PREV: - case DB_PREV_NODUP: - tmp_flags = DB_POSITION; - break; - default: - tmp_flags = 0; - break; - } - - if (tmp_read_uncommitted) - F_SET(dbc, DBC_READ_UNCOMMITTED); - - if ((ret = __db_c_dup(dbc, &dbc_n, tmp_flags)) != 0) { - if (tmp_read_uncommitted) - F_CLR(dbc, DBC_READ_UNCOMMITTED); - - return (ret); - } - - F_SET(dbc_n, DBC_TRANSIENT); - - if (tmp_rmw) - F_SET(dbc_n, DBC_RMW); - - /* - * If we've been handed a primary key, it will be in native byte order, - * so we need to swap it before reading from the secondary. - */ - if (flags == DB_GET_BOTH || flags == DB_GET_BOTHC || - flags == DB_GET_BOTH_RANGE) - SWAP_IF_NEEDED(pdbp, sdbp, pkey); - - /* Step 1. */ - dbc_n->rdata = dbc->rkey; - dbc_n->rkey = dbc->rskey; - ret = __db_c_get(dbc_n, skey, pkey, flags); - /* Restore pkey's flags in case we stomped the PARTIAL flag. */ - pkey->flags = save_pkey_flags; - - if (tmp_read_uncommitted) - F_CLR(dbc_n, DBC_READ_UNCOMMITTED); - if (tmp_rmw) - F_CLR(dbc_n, DBC_RMW); - - /* - * We need to swap the primary key to native byte order if we read it - * successfully, or if we swapped it on entry above. We can't return - * with the application's data modified. - */ - if (ret == 0 || flags == DB_GET_BOTH || flags == DB_GET_BOTHC || - flags == DB_GET_BOTH_RANGE) - SWAP_IF_NEEDED(pdbp, sdbp, pkey); - - if (ret != 0) - goto err; - - /* - * Now we're ready for "step 2". If either or both of pkey and data do - * not have memory management flags set--that is, if DB is managing - * their memory--we need to swap around the rkey/rdata structures so - * that we don't wind up trying to use memory managed by the primary - * database cursor, which we'll close before we return. - * - * !!! - * If you're carefully following the bouncing ball, you'll note that in - * the DB-managed case, the buffer hanging off of pkey is the same as - * dbc->rkey->data. This is just fine; we may well realloc and stomp - * on it when we return, if we're doing a DB_GET_BOTH and need to - * return a different partial or key (depending on the comparison - * function), but this is safe. - * - * !!! - * We need to use __db_cursor_int here rather than simply calling - * pdbp->cursor, because otherwise, if we're in CDB, we'll allocate a - * new locker ID and leave ourselves open to deadlocks. (Even though - * we're only acquiring read locks, we'll still block if there are any - * waiters.) - */ - if ((ret = __db_cursor_int(pdbp, - dbc->txn, pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0) - goto err; - - if (tmp_read_uncommitted) - F_SET(pdbc, DBC_READ_UNCOMMITTED); - if (tmp_rmw) - F_SET(pdbc, DBC_RMW); - if (F_ISSET(dbc, DBC_READ_COMMITTED)) - F_SET(pdbc, DBC_READ_COMMITTED); - - /* - * We're about to use pkey a second time. If DB_DBT_MALLOC is set on - * it, we'll leak the memory we allocated the first time. Thus, set - * DB_DBT_REALLOC instead so that we reuse that memory instead of - * leaking it. - * - * !!! - * This assumes that the user must always specify a compatible realloc - * function if a malloc function is specified. I think this is a - * reasonable requirement. - */ - if (F_ISSET(pkey, DB_DBT_MALLOC)) { - F_CLR(pkey, DB_DBT_MALLOC); - F_SET(pkey, DB_DBT_REALLOC); - pkeymalloc = 1; - } - - /* - * Do the actual get. Set DBC_TRANSIENT since we don't care about - * preserving the position on error, and it's faster. SET_RET_MEM so - * that the secondary DBC owns any returned-data memory. - */ - F_SET(pdbc, DBC_TRANSIENT); - SET_RET_MEM(pdbc, dbc); - ret = __db_c_get(pdbc, pkey, data, DB_SET); - - /* - * If the item wasn't found in the primary, this is a bug; our - * secondary has somehow gotten corrupted, and contains elements that - * don't correspond to anything in the primary. Complain. - */ - if (ret == DB_NOTFOUND) - ret = __db_secondary_corrupt(pdbp); - - /* Now close the primary cursor. */ - if ((t_ret = __db_c_close(pdbc)) != 0 && ret == 0) - ret = t_ret; - -err: /* Cleanup and cursor resolution. */ - if ((t_ret = __db_c_cleanup(dbc, dbc_n, ret)) != 0 && ret == 0) - ret = t_ret; - if (pkeymalloc) { - /* - * If pkey had a MALLOC flag, we need to restore it; otherwise, - * if the user frees the buffer but reuses the DBT without - * NULL'ing its data field or changing the flags, we may drop - * core. - */ - F_CLR(pkey, DB_DBT_REALLOC); - F_SET(pkey, DB_DBT_MALLOC); - } - - return (ret); -} - -/* - * __db_c_pget_recno -- - * Perform a DB_GET_RECNO c_pget on a secondary index. Returns - * the secondary's record number in the pkey field and the primary's - * in the data field. - */ -static int -__db_c_pget_recno(sdbc, pkey, data, flags) - DBC *sdbc; - DBT *pkey, *data; - u_int32_t flags; -{ - DB *pdbp, *sdbp; - DB_ENV *dbenv; - DBC *pdbc; - DBT discardme, primary_key; - db_recno_t oob; - u_int32_t rmw; - int ret, t_ret; - - sdbp = sdbc->dbp; - pdbp = sdbp->s_primary; - dbenv = sdbp->dbenv; - pdbc = NULL; - ret = t_ret = 0; - - rmw = LF_ISSET(DB_RMW); - - memset(&discardme, 0, sizeof(DBT)); - F_SET(&discardme, DB_DBT_USERMEM | DB_DBT_PARTIAL); - - oob = RECNO_OOB; - - /* - * If the primary is an rbtree, we want its record number, whether - * or not the secondary is one too. Fetch the recno into "data". - * - * If it's not an rbtree, return RECNO_OOB in "data". - */ - if (F_ISSET(pdbp, DB_AM_RECNUM)) { - /* - * Get the primary key, so we can find the record number - * in the primary. (We're uninterested in the secondary key.) - */ - memset(&primary_key, 0, sizeof(DBT)); - F_SET(&primary_key, DB_DBT_MALLOC); - if ((ret = __db_c_get(sdbc, - &discardme, &primary_key, rmw | DB_CURRENT)) != 0) - return (ret); - - /* - * Open a cursor on the primary, set it to the right record, - * and fetch its recno into "data". - * - * (See __db_c_pget for comments on the use of __db_cursor_int.) - * - * SET_RET_MEM so that the secondary DBC owns any returned-data - * memory. - */ - if ((ret = __db_cursor_int(pdbp, sdbc->txn, - pdbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0) - goto perr; - SET_RET_MEM(pdbc, sdbc); - if ((ret = __db_c_get(pdbc, - &primary_key, &discardme, rmw | DB_SET)) != 0) - goto perr; - - ret = __db_c_get(pdbc, &discardme, data, rmw | DB_GET_RECNO); - -perr: __os_ufree(sdbp->dbenv, primary_key.data); - if (pdbc != NULL && - (t_ret = __db_c_close(pdbc)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - } else if ((ret = __db_retcopy(dbenv, data, &oob, - sizeof(oob), &sdbc->rkey->data, &sdbc->rkey->ulen)) != 0) - return (ret); - - /* - * If the secondary is an rbtree, we want its record number, whether - * or not the primary is one too. Fetch the recno into "pkey". - * - * If it's not an rbtree, return RECNO_OOB in "pkey". - */ - if (F_ISSET(sdbp, DB_AM_RECNUM)) - return (__db_c_get(sdbc, &discardme, pkey, flags)); - else - return (__db_retcopy(dbenv, pkey, &oob, - sizeof(oob), &sdbc->rdata->data, &sdbc->rdata->ulen)); -} - -/* - * __db_wrlock_err -- do not have a write lock. - */ -static int -__db_wrlock_err(dbenv) - DB_ENV *dbenv; -{ - __db_err(dbenv, "Write attempted on read-only cursor"); - return (EPERM); -} - -/* - * __db_c_del_secondary -- - * Perform a delete operation on a secondary index: call through - * to the primary and delete the primary record that this record - * points to. - * - * Note that deleting the primary record will call c_del on all - * the secondaries, including this one; thus, it is not necessary - * to execute both this function and an actual delete. - */ -static int -__db_c_del_secondary(dbc) - DBC *dbc; -{ - DB *pdbp; - DBC *pdbc; - DBT skey, pkey; - int ret, t_ret; - - memset(&skey, 0, sizeof(DBT)); - memset(&pkey, 0, sizeof(DBT)); - pdbp = dbc->dbp->s_primary; - - /* - * Get the current item that we're pointing at. - * We don't actually care about the secondary key, just - * the primary. - */ - F_SET(&skey, DB_DBT_PARTIAL | DB_DBT_USERMEM); - if ((ret = __db_c_get(dbc, &skey, &pkey, DB_CURRENT)) != 0) - return (ret); - - SWAP_IF_NEEDED(pdbp, dbc->dbp, &pkey); - - /* - * Create a cursor on the primary with our locker ID, - * so that when it calls back, we don't conflict. - * - * We create a cursor explicitly because there's no - * way to specify the same locker ID if we're using - * locking but not transactions if we use the DB->del - * interface. This shouldn't be any less efficient - * anyway. - */ - if ((ret = __db_cursor_int(pdbp, dbc->txn, - pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0) - return (ret); - - /* - * See comment in __db_c_put--if we're in CDB, - * we already hold the locks we need, and we need to flag - * the cursor as a WRITER so we don't run into errors - * when we try to delete. - */ - if (CDB_LOCKING(pdbp->dbenv)) { - DB_ASSERT(pdbc->mylock.off == LOCK_INVALID); - F_SET(pdbc, DBC_WRITER); - } - - /* - * Set the new cursor to the correct primary key. Then - * delete it. We don't really care about the datum; - * just reuse our skey DBT. - * - * If the primary get returns DB_NOTFOUND, something is amiss-- - * every record in the secondary should correspond to some record - * in the primary. - */ - if ((ret = __db_c_get(pdbc, &pkey, &skey, - (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_SET)) == 0) - ret = __db_c_del(pdbc, 0); - else if (ret == DB_NOTFOUND) - ret = __db_secondary_corrupt(pdbp); - - if ((t_ret = __db_c_close(pdbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_c_del_primary -- - * Perform a delete operation on a primary index. Loop through - * all the secondary indices which correspond to this primary - * database, and delete any secondary keys that point at the current - * record. - * - * PUBLIC: int __db_c_del_primary __P((DBC *)); - */ -int -__db_c_del_primary(dbc) - DBC *dbc; -{ - DB *dbp, *sdbp; - DBC *sdbc; - DBT data, pkey, skey, temppkey, tempskey; - int ret, t_ret; - - dbp = dbc->dbp; - - /* - * If we're called at all, we have at least one secondary. - * (Unfortunately, we can't assert this without grabbing the mutex.) - * Get the current record so that we can construct appropriate - * secondary keys as needed. - */ - memset(&pkey, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - if ((ret = __db_c_get(dbc, &pkey, &data, DB_CURRENT)) != 0) - return (ret); - - if ((ret = __db_s_first(dbp, &sdbp)) != 0) - goto err; - for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) { - /* - * Get the secondary key for this secondary and the current - * item. - */ - memset(&skey, 0, sizeof(DBT)); - if ((ret = sdbp->s_callback(sdbp, &pkey, &data, &skey)) != 0) { - /* - * If the current item isn't in this index, we - * have no work to do. Proceed. - */ - if (ret == DB_DONOTINDEX) - continue; - - /* We had a substantive error. Bail. */ - FREE_IF_NEEDED(sdbp, &skey); - goto err; - } - - /* Open a secondary cursor. */ - if ((ret = __db_cursor_int(sdbp, dbc->txn, sdbp->type, - PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0) - goto err; - /* See comment above and in __db_c_put. */ - if (CDB_LOCKING(sdbp->dbenv)) { - DB_ASSERT(sdbc->mylock.off == LOCK_INVALID); - F_SET(sdbc, DBC_WRITER); - } - - /* - * Set the secondary cursor to the appropriate item. - * Delete it. - * - * We want to use DB_RMW if locking is on; it's only - * legal then, though. - * - * !!! - * Don't stomp on any callback-allocated buffer in skey - * when we do a c_get(DB_GET_BOTH); use a temp DBT instead. - * Similarly, don't allow pkey to be invalidated when the - * cursor is closed. - */ - memset(&tempskey, 0, sizeof(DBT)); - tempskey.data = skey.data; - tempskey.size = skey.size; - SWAP_IF_NEEDED(dbp, sdbp, &pkey); - memset(&temppkey, 0, sizeof(DBT)); - temppkey.data = pkey.data; - temppkey.size = pkey.size; - if ((ret = __db_c_get(sdbc, &tempskey, &temppkey, - (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_GET_BOTH)) == 0) - ret = __db_c_del(sdbc, DB_UPDATE_SECONDARY); - else if (ret == DB_NOTFOUND) - ret = __db_secondary_corrupt(dbp); - SWAP_IF_NEEDED(dbp, sdbp, &pkey); - - FREE_IF_NEEDED(sdbp, &skey); - - if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - } - -err: if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __db_s_first -- - * Get the first secondary, if any are present, from the primary. - * - * PUBLIC: int __db_s_first __P((DB *, DB **)); - */ -int -__db_s_first(pdbp, sdbpp) - DB *pdbp, **sdbpp; -{ - DB *sdbp; - - MUTEX_LOCK(pdbp->dbenv, pdbp->mutex); - sdbp = LIST_FIRST(&pdbp->s_secondaries); - - /* See __db_s_next. */ - if (sdbp != NULL) - sdbp->s_refcnt++; - MUTEX_UNLOCK(pdbp->dbenv, pdbp->mutex); - - *sdbpp = sdbp; - - return (0); -} - -/* - * __db_s_next -- - * Get the next secondary in the list. - * - * PUBLIC: int __db_s_next __P((DB **)); - */ -int -__db_s_next(sdbpp) - DB **sdbpp; -{ - DB *sdbp, *pdbp, *closeme; - int ret; - - /* - * Secondary indices are kept in a linked list, s_secondaries, - * off each primary DB handle. If a primary is free-threaded, - * this list may only be traversed or modified while the primary's - * thread mutex is held. - * - * The tricky part is that we don't want to hold the thread mutex - * across the full set of secondary puts necessary for each primary - * put, or we'll wind up essentially single-threading all the puts - * to the handle; the secondary puts will each take about as - * long as the primary does, and may require I/O. So we instead - * hold the thread mutex only long enough to follow one link to the - * next secondary, and then we release it before performing the - * actual secondary put. - * - * The only danger here is that we might legitimately close a - * secondary index in one thread while another thread is performing - * a put and trying to update that same secondary index. To - * prevent this from happening, we refcount the secondary handles. - * If close is called on a secondary index handle while we're putting - * to it, it won't really be closed--the refcount will simply drop, - * and we'll be responsible for closing it here. - */ - sdbp = *sdbpp; - pdbp = sdbp->s_primary; - closeme = NULL; - - MUTEX_LOCK(pdbp->dbenv, pdbp->mutex); - DB_ASSERT(sdbp->s_refcnt != 0); - if (--sdbp->s_refcnt == 0) { - LIST_REMOVE(sdbp, s_links); - closeme = sdbp; - } - sdbp = LIST_NEXT(sdbp, s_links); - if (sdbp != NULL) - sdbp->s_refcnt++; - MUTEX_UNLOCK(pdbp->dbenv, pdbp->mutex); - - *sdbpp = sdbp; - - /* - * closeme->close() is a wrapper; call __db_close explicitly. - */ - ret = closeme != NULL ? __db_close(closeme, NULL, 0) : 0; - return (ret); -} - -/* - * __db_s_done -- - * Properly decrement the refcount on a secondary database handle we're - * using, without calling __db_s_next. - * - * PUBLIC: int __db_s_done __P((DB *)); - */ -int -__db_s_done(sdbp) - DB *sdbp; -{ - DB *pdbp; - int doclose; - - pdbp = sdbp->s_primary; - doclose = 0; - - MUTEX_LOCK(pdbp->dbenv, pdbp->mutex); - DB_ASSERT(sdbp->s_refcnt != 0); - if (--sdbp->s_refcnt == 0) { - LIST_REMOVE(sdbp, s_links); - doclose = 1; - } - MUTEX_UNLOCK(pdbp->dbenv, pdbp->mutex); - - return (doclose ? __db_close(sdbp, NULL, 0) : 0); -} - -/* - * __db_buildpartial -- - * Build the record that will result after a partial put is applied to - * an existing record. - * - * This should probably be merged with __bam_build, but that requires - * a little trickery if we plan to keep the overflow-record optimization - * in that function. - */ -static int -__db_buildpartial(dbp, oldrec, partial, newrec) - DB *dbp; - DBT *oldrec, *partial, *newrec; -{ - int ret; - u_int8_t *buf; - u_int32_t len, nbytes; - - DB_ASSERT(F_ISSET(partial, DB_DBT_PARTIAL)); - - memset(newrec, 0, sizeof(DBT)); - - nbytes = __db_partsize(oldrec->size, partial); - newrec->size = nbytes; - - if ((ret = __os_malloc(dbp->dbenv, nbytes, &buf)) != 0) - return (ret); - newrec->data = buf; - - /* Nul or pad out the buffer, for any part that isn't specified. */ - memset(buf, - F_ISSET(dbp, DB_AM_FIXEDLEN) ? ((BTREE *)dbp->bt_internal)->re_pad : - 0, nbytes); - - /* Copy in any leading data from the original record. */ - memcpy(buf, oldrec->data, - partial->doff > oldrec->size ? oldrec->size : partial->doff); - - /* Copy the data from partial. */ - memcpy(buf + partial->doff, partial->data, partial->size); - - /* Copy any trailing data from the original record. */ - len = partial->doff + partial->dlen; - if (oldrec->size > len) - memcpy(buf + partial->doff + partial->size, - (u_int8_t *)oldrec->data + len, oldrec->size - len); - - return (0); -} - -/* - * __db_partsize -- - * Given the number of bytes in an existing record and a DBT that - * is about to be partial-put, calculate the size of the record - * after the put. - * - * This code is called from __bam_partsize. - * - * PUBLIC: u_int32_t __db_partsize __P((u_int32_t, DBT *)); - */ -u_int32_t -__db_partsize(nbytes, data) - u_int32_t nbytes; - DBT *data; -{ - - /* - * There are really two cases here: - * - * Case 1: We are replacing some bytes that do not exist (i.e., they - * are past the end of the record). In this case the number of bytes - * we are replacing is irrelevant and all we care about is how many - * bytes we are going to add from offset. So, the new record length - * is going to be the size of the new bytes (size) plus wherever those - * new bytes begin (doff). - * - * Case 2: All the bytes we are replacing exist. Therefore, the new - * size is the oldsize (nbytes) minus the bytes we are replacing (dlen) - * plus the bytes we are adding (size). - */ - if (nbytes < data->doff + data->dlen) /* Case 1 */ - return (data->doff + data->size); - - return (nbytes + data->size - data->dlen); /* Case 2 */ -} diff --git a/storage/bdb/db/db_conv.c b/storage/bdb/db/db_conv.c deleted file mode 100644 index 53f4e638d5c..00000000000 --- a/storage/bdb/db/db_conv.c +++ /dev/null @@ -1,561 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_conv.c,v 12.1 2005/06/16 20:21:09 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/crypto.h" -#include "dbinc/hmac.h" -#include "dbinc/db_page.h" -#include "dbinc/db_swap.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/log.h" -#include "dbinc/qam.h" - -/* - * __db_pgin -- - * Primary page-swap routine. - * - * PUBLIC: int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *)); - */ -int -__db_pgin(dbenv, pg, pp, cookie) - DB_ENV *dbenv; - db_pgno_t pg; - void *pp; - DBT *cookie; -{ - DB dummydb, *dbp; - DB_PGINFO *pginfo; - DB_CIPHER *db_cipher; - DB_LSN not_used; - PAGE *pagep; - size_t pg_off, pg_len, sum_len; - int is_hmac, ret; - u_int8_t *chksum, *iv; - - pginfo = (DB_PGINFO *)cookie->data; - pagep = (PAGE *)pp; - - ret = is_hmac = 0; - chksum = iv = NULL; - memset(&dummydb, 0, sizeof(DB)); - dbp = &dummydb; - dummydb.flags = pginfo->flags; - db_cipher = (DB_CIPHER *)dbenv->crypto_handle; - switch (pagep->type) { - case P_HASHMETA: - case P_BTREEMETA: - case P_QAMMETA: - /* - * If checksumming is set on the meta-page, we must set - * it in the dbp. - */ - if (FLD_ISSET(((DBMETA *)pp)->metaflags, DBMETA_CHKSUM)) - F_SET(dbp, DB_AM_CHKSUM); - else - F_CLR(dbp, DB_AM_CHKSUM); - if (((DBMETA *)pp)->encrypt_alg != 0 || - F_ISSET(dbp, DB_AM_ENCRYPT)) - is_hmac = 1; - /* - * !!! - * For all meta pages it is required that the chksum - * be at the same location. Use BTMETA to get to it - * for any meta type. - */ - chksum = ((BTMETA *)pp)->chksum; - sum_len = DBMETASIZE; - break; - case P_INVALID: - /* - * We assume that we've read a file hole if we have - * a zero LSN, zero page number and P_INVALID. Otherwise - * we have an invalid page that might contain real data. - */ - if (IS_ZERO_LSN(LSN(pagep)) && pagep->pgno == PGNO_INVALID) { - sum_len = 0; - break; - } - /* FALLTHROUGH */ - default: - chksum = P_CHKSUM(dbp, pagep); - sum_len = pginfo->db_pagesize; - /* - * If we are reading in a non-meta page, then if we have - * a db_cipher then we are using hmac. - */ - is_hmac = CRYPTO_ON(dbenv) ? 1 : 0; - break; - } - - /* - * We expect a checksum error if there was a configuration problem. - * If there is no configuration problem and we don't get a match, - * it's fatal: panic the system. - */ - if (F_ISSET(dbp, DB_AM_CHKSUM) && sum_len != 0) { - if (F_ISSET(dbp, DB_AM_SWAP) && is_hmac == 0) - P_32_SWAP(chksum); - switch (ret = __db_check_chksum( - dbenv, db_cipher, chksum, pp, sum_len, is_hmac)) { - case 0: - break; - case -1: - if (DBENV_LOGGING(dbenv)) - (void)__db_cksum_log( - dbenv, NULL, ¬_used, DB_FLUSH); - __db_err(dbenv, - "checksum error: page %lu: catastrophic recovery required", - (u_long)pg); - return (__db_panic(dbenv, DB_RUNRECOVERY)); - default: - return (ret); - } - } - if (F_ISSET(dbp, DB_AM_ENCRYPT)) { - DB_ASSERT(db_cipher != NULL); - DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM)); - - pg_off = P_OVERHEAD(dbp); - DB_ASSERT(db_cipher->adj_size(pg_off) == 0); - - switch (pagep->type) { - case P_HASHMETA: - case P_BTREEMETA: - case P_QAMMETA: - /* - * !!! - * For all meta pages it is required that the iv - * be at the same location. Use BTMETA to get to it - * for any meta type. - */ - iv = ((BTMETA *)pp)->iv; - pg_len = DBMETASIZE; - break; - case P_INVALID: - if (IS_ZERO_LSN(LSN(pagep)) && - pagep->pgno == PGNO_INVALID) { - pg_len = 0; - break; - } - /* FALLTHROUGH */ - default: - iv = P_IV(dbp, pagep); - pg_len = pginfo->db_pagesize; - break; - } - if (pg_len != 0 && (ret = db_cipher->decrypt(dbenv, - db_cipher->data, iv, ((u_int8_t *)pagep) + pg_off, - pg_len - pg_off)) != 0) - return (ret); - } - switch (pagep->type) { - case P_INVALID: - if (pginfo->type == DB_QUEUE) - return (__qam_pgin_out(dbenv, pg, pp, cookie)); - else - return (__ham_pgin(dbenv, dbp, pg, pp, cookie)); - case P_HASH: - case P_HASHMETA: - return (__ham_pgin(dbenv, dbp, pg, pp, cookie)); - case P_BTREEMETA: - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - case P_OVERFLOW: - return (__bam_pgin(dbenv, dbp, pg, pp, cookie)); - case P_QAMMETA: - case P_QAMDATA: - return (__qam_pgin_out(dbenv, pg, pp, cookie)); - default: - break; - } - return (__db_pgfmt(dbenv, pg)); -} - -/* - * __db_pgout -- - * Primary page-swap routine. - * - * PUBLIC: int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *)); - */ -int -__db_pgout(dbenv, pg, pp, cookie) - DB_ENV *dbenv; - db_pgno_t pg; - void *pp; - DBT *cookie; -{ - DB dummydb, *dbp; - DB_CIPHER *db_cipher; - DB_PGINFO *pginfo; - PAGE *pagep; - size_t pg_off, pg_len, sum_len; - int ret; - u_int8_t *chksum, *iv, *key; - - pginfo = (DB_PGINFO *)cookie->data; - pagep = (PAGE *)pp; - - chksum = iv = key = NULL; - memset(&dummydb, 0, sizeof(DB)); - dbp = &dummydb; - dummydb.flags = pginfo->flags; - ret = 0; - switch (pagep->type) { - case P_INVALID: - if (pginfo->type == DB_QUEUE) - ret = __qam_pgin_out(dbenv, pg, pp, cookie); - else - ret = __ham_pgout(dbenv, dbp, pg, pp, cookie); - break; - case P_HASH: - case P_HASHMETA: - ret = __ham_pgout(dbenv, dbp, pg, pp, cookie); - break; - case P_BTREEMETA: - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - case P_OVERFLOW: - ret = __bam_pgout(dbenv, dbp, pg, pp, cookie); - break; - case P_QAMMETA: - case P_QAMDATA: - ret = __qam_pgin_out(dbenv, pg, pp, cookie); - break; - default: - return (__db_pgfmt(dbenv, pg)); - } - if (ret) - return (ret); - - db_cipher = (DB_CIPHER *)dbenv->crypto_handle; - if (F_ISSET(dbp, DB_AM_ENCRYPT)) { - - DB_ASSERT(db_cipher != NULL); - DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM)); - - pg_off = P_OVERHEAD(dbp); - DB_ASSERT(db_cipher->adj_size(pg_off) == 0); - - key = db_cipher->mac_key; - - switch (pagep->type) { - case P_HASHMETA: - case P_BTREEMETA: - case P_QAMMETA: - /* - * !!! - * For all meta pages it is required that the iv - * be at the same location. Use BTMETA to get to it - * for any meta type. - */ - iv = ((BTMETA *)pp)->iv; - pg_len = DBMETASIZE; - break; - default: - iv = P_IV(dbp, pagep); - pg_len = pginfo->db_pagesize; - break; - } - if ((ret = db_cipher->encrypt(dbenv, db_cipher->data, - iv, ((u_int8_t *)pagep) + pg_off, pg_len - pg_off)) != 0) - return (ret); - } - if (F_ISSET(dbp, DB_AM_CHKSUM)) { - switch (pagep->type) { - case P_HASHMETA: - case P_BTREEMETA: - case P_QAMMETA: - /* - * !!! - * For all meta pages it is required that the chksum - * be at the same location. Use BTMETA to get to it - * for any meta type. - */ - chksum = ((BTMETA *)pp)->chksum; - sum_len = DBMETASIZE; - break; - default: - chksum = P_CHKSUM(dbp, pagep); - sum_len = pginfo->db_pagesize; - break; - } - __db_chksum(pp, sum_len, key, chksum); - if (F_ISSET(dbp, DB_AM_SWAP) && !F_ISSET(dbp, DB_AM_ENCRYPT)) - P_32_SWAP(chksum); - } - return (0); -} - -/* - * __db_metaswap -- - * Byteswap the common part of the meta-data page. - * - * PUBLIC: void __db_metaswap __P((PAGE *)); - */ -void -__db_metaswap(pg) - PAGE *pg; -{ - u_int8_t *p; - - p = (u_int8_t *)pg; - - /* Swap the meta-data information. */ - SWAP32(p); /* lsn.file */ - SWAP32(p); /* lsn.offset */ - SWAP32(p); /* pgno */ - SWAP32(p); /* magic */ - SWAP32(p); /* version */ - SWAP32(p); /* pagesize */ - p += 4; /* unused, page type, unused, unused */ - SWAP32(p); /* free */ - SWAP32(p); /* alloc_lsn part 1 */ - SWAP32(p); /* alloc_lsn part 2 */ - SWAP32(p); /* cached key count */ - SWAP32(p); /* cached record count */ - SWAP32(p); /* flags */ -} - -/* - * __db_byteswap -- - * Byteswap a page. - * - * PUBLIC: int __db_byteswap - * PUBLIC: __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int)); - */ -int -__db_byteswap(dbenv, dbp, pg, h, pagesize, pgin) - DB_ENV *dbenv; - DB *dbp; - db_pgno_t pg; - PAGE *h; - size_t pagesize; - int pgin; -{ - BINTERNAL *bi; - BKEYDATA *bk; - BOVERFLOW *bo; - RINTERNAL *ri; - db_indx_t i, *inp, len, tmp; - u_int8_t *p, *end; - - COMPQUIET(pg, 0); - - inp = P_INP(dbp, h); - if (pgin) { - M_32_SWAP(h->lsn.file); - M_32_SWAP(h->lsn.offset); - M_32_SWAP(h->pgno); - M_32_SWAP(h->prev_pgno); - M_32_SWAP(h->next_pgno); - M_16_SWAP(h->entries); - M_16_SWAP(h->hf_offset); - } - - switch (h->type) { - case P_HASH: - for (i = 0; i < NUM_ENT(h); i++) { - if (pgin) - M_16_SWAP(inp[i]); - - switch (HPAGE_TYPE(dbp, h, i)) { - case H_KEYDATA: - break; - case H_DUPLICATE: - len = LEN_HKEYDATA(dbp, h, pagesize, i); - p = HKEYDATA_DATA(P_ENTRY(dbp, h, i)); - for (end = p + len; p < end;) { - if (pgin) { - P_16_SWAP(p); - memcpy(&tmp, - p, sizeof(db_indx_t)); - p += sizeof(db_indx_t); - } else { - memcpy(&tmp, - p, sizeof(db_indx_t)); - SWAP16(p); - } - p += tmp; - SWAP16(p); - } - break; - case H_OFFDUP: - p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i)); - SWAP32(p); /* pgno */ - break; - case H_OFFPAGE: - p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i)); - SWAP32(p); /* pgno */ - SWAP32(p); /* tlen */ - break; - default: - return (__db_pgfmt(dbenv, pg)); - } - - } - - /* - * The offsets in the inp array are used to determine - * the size of entries on a page; therefore they - * cannot be converted until we've done all the - * entries. - */ - if (!pgin) - for (i = 0; i < NUM_ENT(h); i++) - M_16_SWAP(inp[i]); - break; - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - for (i = 0; i < NUM_ENT(h); i++) { - if (pgin) - M_16_SWAP(inp[i]); - - /* - * In the case of on-page duplicates, key information - * should only be swapped once. - */ - if (h->type == P_LBTREE && i > 1) { - if (pgin) { - if (inp[i] == inp[i - 2]) - continue; - } else { - M_16_SWAP(inp[i]); - if (inp[i] == inp[i - 2]) - continue; - M_16_SWAP(inp[i]); - } - } - - bk = GET_BKEYDATA(dbp, h, i); - switch (B_TYPE(bk->type)) { - case B_KEYDATA: - M_16_SWAP(bk->len); - break; - case B_DUPLICATE: - case B_OVERFLOW: - bo = (BOVERFLOW *)bk; - M_32_SWAP(bo->pgno); - M_32_SWAP(bo->tlen); - break; - default: - return (__db_pgfmt(dbenv, pg)); - } - - if (!pgin) - M_16_SWAP(inp[i]); - } - break; - case P_IBTREE: - for (i = 0; i < NUM_ENT(h); i++) { - if (pgin) - M_16_SWAP(inp[i]); - - bi = GET_BINTERNAL(dbp, h, i); - M_16_SWAP(bi->len); - M_32_SWAP(bi->pgno); - M_32_SWAP(bi->nrecs); - - switch (B_TYPE(bi->type)) { - case B_KEYDATA: - break; - case B_DUPLICATE: - case B_OVERFLOW: - bo = (BOVERFLOW *)bi->data; - M_32_SWAP(bo->pgno); - M_32_SWAP(bo->tlen); - break; - default: - return (__db_pgfmt(dbenv, pg)); - } - - if (!pgin) - M_16_SWAP(inp[i]); - } - break; - case P_IRECNO: - for (i = 0; i < NUM_ENT(h); i++) { - if (pgin) - M_16_SWAP(inp[i]); - - ri = GET_RINTERNAL(dbp, h, i); - M_32_SWAP(ri->pgno); - M_32_SWAP(ri->nrecs); - - if (!pgin) - M_16_SWAP(inp[i]); - } - break; - case P_OVERFLOW: - case P_INVALID: - /* Nothing to do. */ - break; - default: - return (__db_pgfmt(dbenv, pg)); - } - - if (!pgin) { - /* Swap the header information. */ - M_32_SWAP(h->lsn.file); - M_32_SWAP(h->lsn.offset); - M_32_SWAP(h->pgno); - M_32_SWAP(h->prev_pgno); - M_32_SWAP(h->next_pgno); - M_16_SWAP(h->entries); - M_16_SWAP(h->hf_offset); - } - return (0); -} diff --git a/storage/bdb/db/db_dispatch.c b/storage/bdb/db/db_dispatch.c deleted file mode 100644 index 3c56b556219..00000000000 --- a/storage/bdb/db/db_dispatch.c +++ /dev/null @@ -1,1603 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1995, 1996 - * The President and Fellows of Harvard University. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Margo Seltzer. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_dispatch.c,v 12.12 2005/11/10 21:11:42 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#ifndef HAVE_FTRUNCATE -#include "dbinc/db_shash.h" -#endif -#include "dbinc/hash.h" -#ifndef HAVE_FTRUNCATE -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#endif -#include "dbinc/log.h" -#include "dbinc/fop.h" -#include "dbinc/txn.h" - -#ifndef HAVE_FTRUNCATE -static int __db_limbo_fix __P((DB *, DB_TXN *, - DB_TXNLIST *, db_pgno_t *, DBMETA *, db_limbo_state)); -static int __db_limbo_bucket __P((DB_ENV *, - DB_TXN *, DB_TXNLIST *, db_limbo_state)); -static int __db_limbo_move __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNLIST *)); -static int __db_limbo_prepare __P(( DB *, DB_TXN *, DB_TXNLIST *)); -static int __db_lock_move __P((DB_ENV *, - u_int8_t *, db_pgno_t, db_lockmode_t, DB_TXN *, DB_TXN *)); -static int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *, - int32_t, u_int8_t *, char *, db_pgno_t)); -#endif -static int __db_txnlist_find_internal __P((DB_ENV *, DB_TXNHEAD *, - db_txnlist_type, u_int32_t, u_int8_t *, DB_TXNLIST **, - int, u_int32_t *)); - -/* - * __db_dispatch -- - * - * This is the transaction dispatch function used by the db access methods. - * It is designed to handle the record format used by all the access - * methods (the one automatically generated by the db_{h,log,read}.sh - * scripts in the tools directory). An application using a different - * recovery paradigm will supply a different dispatch function to txn_open. - * - * PUBLIC: int __db_dispatch __P((DB_ENV *, - * PUBLIC: int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)), - * PUBLIC: size_t, DBT *, DB_LSN *, db_recops, DB_TXNHEAD *)); - */ -int -__db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info) - DB_ENV *dbenv; /* The environment. */ - int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t dtabsize; /* Size of the dtab. */ - DBT *db; /* The log record upon which to dispatch. */ - DB_LSN *lsnp; /* The lsn of the record being dispatched. */ - db_recops redo; /* Redo this op (or undo it). */ - DB_TXNHEAD *info; /* Transaction list. */ -{ - DB_LSN prev_lsn; - u_int32_t rectype, status, txnid; - int make_call, ret; - - memcpy(&rectype, db->data, sizeof(rectype)); - memcpy(&txnid, (u_int8_t *)db->data + sizeof(rectype), sizeof(txnid)); - make_call = ret = 0; - - /* If we don't have a dispatch table, it's hard to dispatch. */ - DB_ASSERT(dtab != NULL); - - /* - * If we find a record that is in the user's number space and they - * have specified a recovery routine, let them handle it. If they - * didn't specify a recovery routine, then we expect that they've - * followed all our rules and registered new recovery functions. - */ - switch (redo) { - case DB_TXN_ABORT: - case DB_TXN_APPLY: - case DB_TXN_PRINT: - make_call = 1; - break; - case DB_TXN_OPENFILES: - /* - * We collect all the transactions that have - * "begin" records, those with no previous LSN, - * so that we do not abort partial transactions. - * These are known to be undone, otherwise the - * log would not have been freeable. - */ - memcpy(&prev_lsn, (u_int8_t *)db->data + - sizeof(rectype) + sizeof(txnid), sizeof(prev_lsn)); - if (txnid != 0 && prev_lsn.file == 0 && (ret = - __db_txnlist_add(dbenv, info, txnid, TXN_OK, NULL)) != 0) - return (ret); - - /* FALLTHROUGH */ - case DB_TXN_POPENFILES: - if (rectype == DB___dbreg_register || - rectype == DB___txn_child || - rectype == DB___txn_ckp || rectype == DB___txn_recycle) - return (dtab[rectype](dbenv, db, lsnp, redo, info)); - break; - case DB_TXN_BACKWARD_ROLL: - /* - * Running full recovery in the backward pass. In general, - * we only process records during this pass that belong - * to aborted transactions. Unfortunately, there are several - * exceptions: - * 1. If this is a meta-record, one not associated with - * a transaction, then we must always process it. - * 2. If this is a transaction commit/abort, we must - * always process it, so that we know the status of - * every transaction. - * 3. If this is a child commit, we need to process it - * because the outcome of the child transaction depends - * on the outcome of the parent. - * 4. If this is a dbreg_register record, we must always - * process is because they contain non-transactional - * closes that must be properly handled. - * 5. If this is a noop, we must always undo it so that we - * properly handle any aborts before a file was closed. - * 6. If this a file remove, we need to process it to - * determine if the on-disk file is the same as the - * one being described. - */ - switch (rectype) { - /* - * These either do not belong to a transaction or (regop) - * must be processed regardless of the status of the - * transaction. - */ - case DB___txn_regop: - case DB___txn_recycle: - case DB___txn_ckp: - make_call = 1; - break; - /* - * These belong to a transaction whose status must be - * checked. - */ - case DB___txn_child: - case DB___db_noop: - case DB___fop_file_remove: - case DB___dbreg_register: - make_call = 1; - - /* FALLTHROUGH */ - default: - if (txnid == 0) - break; - - ret = __db_txnlist_find(dbenv, info, txnid, &status); - - /* If not found, this is an incomplete abort. */ - if (ret == DB_NOTFOUND) - return (__db_txnlist_add(dbenv, - info, txnid, TXN_IGNORE, lsnp)); - if (ret != 0) - return (ret); - - /* - * If we ignore the transaction, ignore the operation - * UNLESS this is a child commit in which case we need - * to make sure that the child also gets marked as - * ignore. - */ - if (status == TXN_IGNORE && rectype != DB___txn_child) { - make_call = 0; - break; - } - if (status == TXN_COMMIT) - break; - - /* Set make_call in case we came through default */ - make_call = 1; - if (status == TXN_OK && - (ret = __db_txnlist_update(dbenv, - info, txnid, rectype == DB___txn_xa_regop ? - TXN_PREPARE : TXN_ABORT, NULL, &status, 0)) != 0) - return (ret); - } - break; - case DB_TXN_FORWARD_ROLL: - /* - * In the forward pass, if we haven't seen the transaction, - * do nothing, else recover it. - * - * We need to always redo DB___db_noop records, so that we - * properly handle any commits after the file was closed. - */ - switch (rectype) { - case DB___txn_recycle: - case DB___txn_ckp: - case DB___db_noop: - make_call = 1; - break; - - default: - if (txnid == 0) - status = 0; - else { - ret = __db_txnlist_find(dbenv, - info, txnid, &status); - - if (ret == DB_NOTFOUND) - /* Break out out of if clause. */ - ; - else if (ret != 0) - return (ret); - else if (status == TXN_COMMIT) { - make_call = 1; - break; - } - } - -#ifndef HAVE_FTRUNCATE - if (status != TXN_IGNORE && - (rectype == DB___ham_metagroup || - rectype == DB___ham_groupalloc || - rectype == DB___db_pg_alloc)) { - /* - * Because we do not have truncate - * all allocation records must be reprocessed - * during rollforward in case the file was - * just created. It may not have been - * present during the backward pass. - */ - make_call = 1; - redo = DB_TXN_BACKWARD_ALLOC; - } else -#endif - if (rectype == DB___dbreg_register) { - /* - * This may be a transaction dbreg_register. - * If it is, we only make the call on a COMMIT, - * which we checked above. If it's not, then we - * should always make the call, because we need - * the file open information. - */ - if (txnid == 0) - make_call = 1; - } - } - break; - case DB_TXN_BACKWARD_ALLOC: - default: - return (__db_unknown_flag( - dbenv, "__db_dispatch", (u_int32_t)redo)); - } - - if (make_call) { - /* - * If the debug flag is set then we are logging - * records for a non-durable update so that they - * may be examined for diagnostic purposes. - * So only make the call if we are printing, - * otherwise we need to extract the previous - * lsn so undo will work properly. - */ - if (rectype & DB_debug_FLAG) { - if (redo == DB_TXN_PRINT) - rectype &= ~DB_debug_FLAG; - else { - memcpy(lsnp, - (u_int8_t *)db->data + - sizeof(rectype) + - sizeof(txnid), sizeof(*lsnp)); - return (0); - } - } - if (rectype >= DB_user_BEGIN && dbenv->app_dispatch != NULL) - return (dbenv->app_dispatch(dbenv, db, lsnp, redo)); - else { - /* - * The size of the dtab table argument is the same as - * the standard table, use the standard table's size - * as our sanity check. - */ - if (rectype > dtabsize || dtab[rectype] == NULL) { - __db_err(dbenv, - "Illegal record type %lu in log", - (u_long)rectype); - return (EINVAL); - } - return (dtab[rectype](dbenv, db, lsnp, redo, info)); - } - } - - return (0); -} - -/* - * __db_add_recovery -- - * - * PUBLIC: int __db_add_recovery __P((DB_ENV *, - * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *, - * PUBLIC: int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t)); - */ -int -__db_add_recovery(dbenv, dtab, dtabsize, func, ndx) - DB_ENV *dbenv; - int (***dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t *dtabsize; - int (*func) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - u_int32_t ndx; -{ - size_t i, nsize; - int ret; - - /* Check if we have to grow the table. */ - if (ndx >= *dtabsize) { - nsize = ndx + 40; - if ((ret = - __os_realloc(dbenv, nsize * sizeof((*dtab)[0]), dtab)) != 0) - return (ret); - for (i = *dtabsize; i < nsize; ++i) - (*dtab)[i] = NULL; - *dtabsize = nsize; - } - - (*dtab)[ndx] = func; - return (0); -} - -/* - * __db_txnlist_init -- - * Initialize transaction linked list. - * - * PUBLIC: int __db_txnlist_init __P((DB_ENV *, - * PUBLIC: u_int32_t, u_int32_t, DB_LSN *, DB_TXNHEAD **)); - */ -int -__db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp) - DB_ENV *dbenv; - u_int32_t low_txn, hi_txn; - DB_LSN *trunc_lsn; - DB_TXNHEAD **retp; -{ - DB_TXNHEAD *headp; - u_int32_t size, tmp; - int ret; - - /* - * Size a hash table. - * If low is zero then we are being called during rollback - * and we need only one slot. - * Hi maybe lower than low if we have recycled txnid's. - * The numbers here are guesses about txn density, we can afford - * to look at a few entries in each slot. - */ - if (low_txn == 0) - size = 1; - else { - if (hi_txn < low_txn) { - tmp = hi_txn; - hi_txn = low_txn; - low_txn = tmp; - } - tmp = hi_txn - low_txn; - /* See if we wrapped around. */ - if (tmp > (TXN_MAXIMUM - TXN_MINIMUM) / 2) - tmp = (low_txn - TXN_MINIMUM) + (TXN_MAXIMUM - hi_txn); - size = tmp / 5; - if (size < 100) - size = 100; - } - if ((ret = __os_malloc(dbenv, - sizeof(DB_TXNHEAD) + size * sizeof(headp->head), &headp)) != 0) - return (ret); - - memset(headp, 0, sizeof(DB_TXNHEAD) + size * sizeof(headp->head)); - headp->maxid = hi_txn; - headp->generation = 0; - headp->nslots = size; - headp->gen_alloc = 8; - if ((ret = __os_malloc(dbenv, headp->gen_alloc * - sizeof(headp->gen_array[0]), &headp->gen_array)) != 0) { - __os_free(dbenv, headp); - return (ret); - } - headp->gen_array[0].generation = 0; - headp->gen_array[0].txn_min = TXN_MINIMUM; - headp->gen_array[0].txn_max = TXN_MAXIMUM; - if (trunc_lsn != NULL) { - headp->trunc_lsn = *trunc_lsn; - headp->maxlsn = *trunc_lsn; - } else { - ZERO_LSN(headp->trunc_lsn); - ZERO_LSN(headp->maxlsn); - } - ZERO_LSN(headp->ckplsn); - - *retp = headp; - return (0); -} - -/* - * __db_txnlist_add -- - * Add an element to our transaction linked list. - * - * PUBLIC: int __db_txnlist_add __P((DB_ENV *, - * PUBLIC: DB_TXNHEAD *, u_int32_t, u_int32_t, DB_LSN *)); - */ -int -__db_txnlist_add(dbenv, hp, txnid, status, lsn) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - u_int32_t txnid, status; - DB_LSN *lsn; -{ - DB_TXNLIST *elp; - int ret; - - if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0) - return (ret); - - LIST_INSERT_HEAD(&hp->head[DB_TXNLIST_MASK(hp, txnid)], elp, links); - - elp->type = TXNLIST_TXNID; - elp->u.t.txnid = txnid; - elp->u.t.status = status; - elp->u.t.generation = hp->generation; - if (txnid > hp->maxid) - hp->maxid = txnid; - if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT) - hp->maxlsn = *lsn; - - DB_ASSERT(lsn == NULL || - status != TXN_COMMIT || log_compare(&hp->maxlsn, lsn) >= 0); - - return (0); -} - -/* - * __db_txnlist_remove -- - * Remove an element from our transaction linked list. - * - * PUBLIC: int __db_txnlist_remove __P((DB_ENV *, DB_TXNHEAD *, u_int32_t)); - */ -int -__db_txnlist_remove(dbenv, hp, txnid) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - u_int32_t txnid; -{ - DB_TXNLIST *entry; - u_int32_t status; - - return (__db_txnlist_find_internal(dbenv, - hp, TXNLIST_TXNID, txnid, NULL, &entry, 1, &status)); -} - -/* - * __db_txnlist_ckp -- - * Used to record the maximum checkpoint that will be retained - * after recovery. Typically this is simply the max checkpoint, but - * if we are doing client replication recovery or timestamp-based - * recovery, we are going to virtually truncate the log and we need - * to retain the last checkpoint before the truncation point. - * - * PUBLIC: void __db_txnlist_ckp __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *)); - */ -void -__db_txnlist_ckp(dbenv, hp, ckp_lsn) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - DB_LSN *ckp_lsn; -{ - - COMPQUIET(dbenv, NULL); - - if (IS_ZERO_LSN(hp->ckplsn) && !IS_ZERO_LSN(hp->maxlsn) && - log_compare(&hp->maxlsn, ckp_lsn) >= 0) - hp->ckplsn = *ckp_lsn; -} - -/* - * __db_txnlist_end -- - * Discard transaction linked list. - * - * PUBLIC: void __db_txnlist_end __P((DB_ENV *, DB_TXNHEAD *)); - */ -void -__db_txnlist_end(dbenv, hp) - DB_ENV *dbenv; - DB_TXNHEAD *hp; -{ - u_int32_t i; - DB_TXNLIST *p; - - if (hp == NULL) - return; - - for (i = 0; i < hp->nslots; i++) - while (hp != NULL && (p = LIST_FIRST(&hp->head[i])) != NULL) { - switch (p->type) { - case TXNLIST_LSN: - __os_free(dbenv, p->u.l.lsn_stack); - break; - case TXNLIST_DELETE: - case TXNLIST_PGNO: - case TXNLIST_TXNID: - default: - /* - * Possibly an incomplete DB_TXNLIST; just - * free it. - */ - break; - } - LIST_REMOVE(p, links); - __os_free(dbenv, p); - } - - if (hp->gen_array != NULL) - __os_free(dbenv, hp->gen_array); - __os_free(dbenv, hp); -} - -/* - * __db_txnlist_find -- - * Checks to see if a txnid with the current generation is in the - * txnid list. This returns DB_NOTFOUND if the item isn't in the - * list otherwise it returns (like __db_txnlist_find_internal) - * the status of the transaction. A txnid of 0 means the record - * was generated while not in a transaction. - * - * PUBLIC: int __db_txnlist_find __P((DB_ENV *, - * PUBLIC: DB_TXNHEAD *, u_int32_t, u_int32_t *)); - */ -int -__db_txnlist_find(dbenv, hp, txnid, statusp) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - u_int32_t txnid, *statusp; -{ - DB_TXNLIST *entry; - - if (txnid == 0) - return (DB_NOTFOUND); - - return (__db_txnlist_find_internal(dbenv, hp, - TXNLIST_TXNID, txnid, NULL, &entry, 0, statusp)); -} - -/* - * __db_txnlist_update -- - * Change the status of an existing transaction entry. - * Returns DB_NOTFOUND if no such entry exists. - * - * PUBLIC: int __db_txnlist_update __P((DB_ENV *, DB_TXNHEAD *, - * PUBLIC: u_int32_t, u_int32_t, DB_LSN *, u_int32_t *, int)); - */ -int -__db_txnlist_update(dbenv, hp, txnid, status, lsn, ret_status, add_ok) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - u_int32_t txnid, status; - DB_LSN *lsn; - u_int32_t *ret_status; - int add_ok; -{ - DB_TXNLIST *elp; - int ret; - - if (txnid == 0) - return (DB_NOTFOUND); - - ret = __db_txnlist_find_internal(dbenv, - hp, TXNLIST_TXNID, txnid, NULL, &elp, 0, ret_status); - - if (ret == DB_NOTFOUND && add_ok) { - *ret_status = status; - return (__db_txnlist_add(dbenv, hp, txnid, status, lsn)); - } - if (ret != 0) - return (ret); - - if (*ret_status == TXN_IGNORE) - return (0); - - elp->u.t.status = status; - - if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT) - hp->maxlsn = *lsn; - - return (ret); -} - -/* - * __db_txnlist_find_internal -- - * Find an entry on the transaction list. If the entry is not there or - * the list pointer is not initialized we return DB_NOTFOUND. If the - * item is found, we return the status. Currently we always call this - * with an initialized list pointer but checking for NULL keeps it general. - */ -static int -__db_txnlist_find_internal(dbenv, - hp, type, txnid, uid, txnlistp, delete, statusp) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - db_txnlist_type type; - u_int32_t txnid; - u_int8_t uid[DB_FILE_ID_LEN]; - DB_TXNLIST **txnlistp; - int delete; - u_int32_t *statusp; -{ - struct __db_headlink *head; - DB_TXNLIST *p; - u_int32_t generation, hash, i; - int ret; - - ret = 0; - - if (hp == NULL) - return (DB_NOTFOUND); - - switch (type) { - case TXNLIST_TXNID: - hash = txnid; - /* Find the most recent generation containing this ID */ - for (i = 0; i <= hp->generation; i++) - /* The range may wrap around the end. */ - if (hp->gen_array[i].txn_min < - hp->gen_array[i].txn_max ? - (txnid >= hp->gen_array[i].txn_min && - txnid <= hp->gen_array[i].txn_max) : - (txnid >= hp->gen_array[i].txn_min || - txnid <= hp->gen_array[i].txn_max)) - break; - DB_ASSERT(i <= hp->generation); - generation = hp->gen_array[i].generation; - break; - case TXNLIST_PGNO: - memcpy(&hash, uid, sizeof(hash)); - generation = 0; - break; - case TXNLIST_DELETE: - case TXNLIST_LSN: - default: - return (__db_panic(dbenv, EINVAL)); - } - - head = &hp->head[DB_TXNLIST_MASK(hp, hash)]; - - for (p = LIST_FIRST(head); p != NULL; p = LIST_NEXT(p, links)) { - if (p->type != type) - continue; - switch (type) { - case TXNLIST_TXNID: - if (p->u.t.txnid != txnid || - generation != p->u.t.generation) - continue; - *statusp = p->u.t.status; - break; - - case TXNLIST_PGNO: - if (memcmp(uid, p->u.p.uid, DB_FILE_ID_LEN) != 0) - continue; - break; - case TXNLIST_DELETE: - case TXNLIST_LSN: - default: - return (__db_panic(dbenv, EINVAL)); - } - if (delete == 1) { - LIST_REMOVE(p, links); - __os_free(dbenv, p); - *txnlistp = NULL; - } else if (p != LIST_FIRST(head)) { - /* Move it to head of list. */ - LIST_REMOVE(p, links); - LIST_INSERT_HEAD(head, p, links); - *txnlistp = p; - } else - *txnlistp = p; - return (ret); - } - - return (DB_NOTFOUND); -} - -/* - * __db_txnlist_gen -- - * Change the current generation number. - * - * PUBLIC: int __db_txnlist_gen __P((DB_ENV *, - * PUBLIC: DB_TXNHEAD *, int, u_int32_t, u_int32_t)); - */ -int -__db_txnlist_gen(dbenv, hp, incr, min, max) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - int incr; - u_int32_t min, max; -{ - int ret; - - /* - * During recovery generation numbers keep track of "restart" - * checkpoints and recycle records. Restart checkpoints occur - * whenever we take a checkpoint and there are no outstanding - * transactions. When that happens, we can reset transaction IDs - * back to TXNID_MINIMUM. Currently we only do the reset - * at then end of recovery. Recycle records occur when txnids - * are exhausted during runtime. A free range of ids is identified - * and logged. This code maintains a stack of ranges. A txnid - * is given the generation number of the first range it falls into - * in the stack. - */ - if (incr < 0) { - --hp->generation; - memmove(hp->gen_array, &hp->gen_array[1], - (hp->generation + 1) * sizeof(hp->gen_array[0])); - } else { - ++hp->generation; - if (hp->generation >= hp->gen_alloc) { - hp->gen_alloc *= 2; - if ((ret = __os_realloc(dbenv, hp->gen_alloc * - sizeof(hp->gen_array[0]), &hp->gen_array)) != 0) - return (ret); - } - memmove(&hp->gen_array[1], &hp->gen_array[0], - hp->generation * sizeof(hp->gen_array[0])); - hp->gen_array[0].generation = hp->generation; - hp->gen_array[0].txn_min = min; - hp->gen_array[0].txn_max = max; - } - return (0); -} - -/* - * __db_txnlist_lsnadd -- - * Save the prev_lsn from a txn_child record. - * - * PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *)); - */ -int -__db_txnlist_lsnadd(dbenv, hp, lsnp) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - DB_LSN *lsnp; -{ - DB_TXNLIST *elp; - int ret; - - if (IS_ZERO_LSN(*lsnp)) - return (0); - - for (elp = LIST_FIRST(&hp->head[0]); - elp != NULL; elp = LIST_NEXT(elp, links)) - if (elp->type == TXNLIST_LSN) - break; - - if (elp == NULL) { - if ((ret = __db_txnlist_lsninit(dbenv, hp, lsnp)) != 0) - return (ret); - return (DB_SURPRISE_KID); - } - - if (elp->u.l.stack_indx == elp->u.l.stack_size) { - elp->u.l.stack_size <<= 1; - if ((ret = __os_realloc(dbenv, sizeof(DB_LSN) * - elp->u.l.stack_size, &elp->u.l.lsn_stack)) != 0) { - __db_txnlist_end(dbenv, hp); - return (ret); - } - } - elp->u.l.lsn_stack[elp->u.l.stack_indx++] = *lsnp; - - return (0); -} - -/* - * __db_txnlist_lsnget -- - * - * PUBLIC: int __db_txnlist_lsnget __P((DB_ENV *, - * PUBLIC: DB_TXNHEAD *, DB_LSN *, u_int32_t)); - * Get the lsn saved from a txn_child record. - */ -int -__db_txnlist_lsnget(dbenv, hp, lsnp, flags) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - DB_LSN *lsnp; - u_int32_t flags; -{ - DB_TXNLIST *elp; - - COMPQUIET(dbenv, NULL); - COMPQUIET(flags, 0); - - for (elp = LIST_FIRST(&hp->head[0]); - elp != NULL; elp = LIST_NEXT(elp, links)) - if (elp->type == TXNLIST_LSN) - break; - - if (elp == NULL || elp->u.l.stack_indx == 0) { - ZERO_LSN(*lsnp); - return (0); - } - - *lsnp = elp->u.l.lsn_stack[--elp->u.l.stack_indx]; - - return (0); -} - -/* - * __db_txnlist_lsninit -- - * Initialize a transaction list with an lsn array entry. - * - * PUBLIC: int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *)); - */ -int -__db_txnlist_lsninit(dbenv, hp, lsnp) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - DB_LSN *lsnp; -{ - DB_TXNLIST *elp; - int ret; - - elp = NULL; - - if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0) - goto err; - LIST_INSERT_HEAD(&hp->head[0], elp, links); - - elp->type = TXNLIST_LSN; - if ((ret = __os_malloc(dbenv, - sizeof(DB_LSN) * DB_LSN_STACK_SIZE, &elp->u.l.lsn_stack)) != 0) - goto err; - elp->u.l.stack_indx = 1; - elp->u.l.stack_size = DB_LSN_STACK_SIZE; - elp->u.l.lsn_stack[0] = *lsnp; - - return (0); - -err: __db_txnlist_end(dbenv, hp); - return (ret); -} - -#ifndef HAVE_FTRUNCATE -/* - * __db_add_limbo -- add pages to the limbo list. - * Get the file information and call pgnoadd for each page. - * - * PUBLIC: #ifndef HAVE_FTRUNCATE - * PUBLIC: int __db_add_limbo __P((DB_ENV *, - * PUBLIC: DB_TXNHEAD *, int32_t, db_pgno_t, int32_t)); - * PUBLIC: #endif - */ -int -__db_add_limbo(dbenv, hp, fileid, pgno, count) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - int32_t fileid; - db_pgno_t pgno; - int32_t count; -{ - DB_LOG *dblp; - FNAME *fnp; - int ret; - - dblp = dbenv->lg_handle; - if ((ret = __dbreg_id_to_fname(dblp, fileid, 0, &fnp)) != 0) - return (ret); - - do { - if ((ret = - __db_txnlist_pgnoadd(dbenv, hp, fileid, fnp->ufid, - R_ADDR(&dblp->reginfo, fnp->name_off), pgno)) != 0) - return (ret); - pgno++; - } while (--count != 0); - - return (0); -} - -/* - * __db_do_the_limbo -- move pages from limbo to free. - * - * Limbo processing is what ensures that we correctly handle and - * recover from page allocations. During recovery, for each database, - * we process each in-question allocation, link them into the free list - * and then write out the new meta-data page that contains the pointer - * to the new beginning of the free list. On an abort, we use our - * standard __db_free mechanism in a compensating transaction which logs - * the specific modifications to the free list. - * - * If we run out of log space during an abort, then we can't write the - * compensating transaction, so we abandon the idea of a compensating - * transaction, and go back to processing how we do during recovery. - * The reason that this is not the norm is that it's expensive: it requires - * that we flush any database with an in-question allocation. Thus if - * a compensating transaction fails, we never try to restart it. - * - * Since files may be open and closed within transactions (in particular, - * the master database for subdatabases), we must be prepared to open - * files during this process. If there is a compensating transaction, we - * can open the files in that transaction. If this was an abort and there - * is no compensating transaction, then we've got to perform these opens - * in the context of the aborting transaction so that we do not deadlock. - * During recovery, there's no locking, so this isn't an issue. - * - * What you want to keep in mind when reading this is that there are two - * algorithms going on here: ctxn == NULL, then we're either in recovery - * or our compensating transaction has failed and we're doing the - * "create list and write meta-data page" algorithm. Otherwise, we're in - * an abort and doing the "use compensating transaction" algorithm. - * - * PUBLIC: #ifndef HAVE_FTRUNCATE - * PUBLIC: int __db_do_the_limbo __P((DB_ENV *, - * PUBLIC: DB_TXN *, DB_TXN *, DB_TXNHEAD *, db_limbo_state)); - * PUBLIC: #endif - */ -int -__db_do_the_limbo(dbenv, ptxn, txn, hp, state) - DB_ENV *dbenv; - DB_TXN *ptxn, *txn; - DB_TXNHEAD *hp; - db_limbo_state state; -{ - DB_TXNLIST *elp; - u_int32_t h; - int ret; - - ret = 0; - /* - * The slots correspond to hash buckets. We've hashed the - * fileids into hash buckets and need to pick up all affected - * files. (There will only be a single slot for an abort.) - */ - for (h = 0; h < hp->nslots; h++) { - if ((elp = LIST_FIRST(&hp->head[h])) == NULL) - continue; - if (ptxn != NULL) { - if ((ret = - __db_limbo_move(dbenv, ptxn, txn, elp)) != 0) - goto err; - } else if ((ret = - __db_limbo_bucket(dbenv, txn, elp, state)) != 0) - goto err; - } - -err: if (ret != 0) { - __db_err(dbenv, "Fatal error in abort of an allocation"); - ret = __db_panic(dbenv, ret); - } - - return (ret); -} - -/* Limbo support routines. */ - -/* - * __db_lock_move -- - * Move a lock from child to parent. - */ -static int -__db_lock_move(dbenv, fileid, pgno, mode, ptxn, txn) - DB_ENV *dbenv; - u_int8_t *fileid; - db_pgno_t pgno; - db_lockmode_t mode; - DB_TXN *ptxn, *txn; -{ - DBT lock_dbt; - DB_LOCK lock; - DB_LOCK_ILOCK lock_obj; - DB_LOCKREQ req; - int ret; - - lock_obj.pgno = pgno; - memcpy(lock_obj.fileid, fileid, DB_FILE_ID_LEN); - lock_obj.type = DB_PAGE_LOCK; - - memset(&lock_dbt, 0, sizeof(lock_dbt)); - lock_dbt.data = &lock_obj; - lock_dbt.size = sizeof(lock_obj); - - if ((ret = __lock_get(dbenv, - txn->txnid, 0, &lock_dbt, mode, &lock)) == 0) { - memset(&req, 0, sizeof(req)); - req.lock = lock; - req.op = DB_LOCK_TRADE; - ret = __lock_vec(dbenv, ptxn->txnid, 0, &req, 1, NULL); - } - return (ret); -} - -/* - * __db_limbo_move - * Move just the metapage lock to the parent. - */ -static int -__db_limbo_move(dbenv, ptxn, txn, elp) - DB_ENV *dbenv; - DB_TXN *ptxn, *txn; - DB_TXNLIST *elp; -{ - int ret; - - for (; elp != NULL; elp = LIST_NEXT(elp, links)) { - if (elp->type != TXNLIST_PGNO || elp->u.p.locked == 1) - continue; - if ((ret = __db_lock_move(dbenv, elp->u.p.uid, - PGNO_BASE_MD, DB_LOCK_WRITE, ptxn, txn)) != 0) - return (ret); - elp->u.p.locked = 1; - } - - return (0); -} -/* - * __db_limbo_bucket - * Perform limbo processing for a single hash bucket in the txnlist. - * txn is the transaction aborting in the case of an abort and ctxn is the - * compensating transaction. - */ - -#define T_RESTORED(txn) ((txn) != NULL && F_ISSET(txn, TXN_RESTORED)) -static int -__db_limbo_bucket(dbenv, txn, elp, state) - DB_ENV *dbenv; - DB_TXN *txn; - DB_TXNLIST *elp; - db_limbo_state state; -{ - DB *dbp; - DB_MPOOLFILE *mpf; - DBMETA *meta; - DB_TXN *ctxn, *t; - FNAME *fname; - db_pgno_t last_pgno, pgno; - int dbp_created, in_retry, ret, t_ret; - - ctxn = NULL; - in_retry = 0; - meta = NULL; - mpf = NULL; - ret = 0; - for (; elp != NULL; elp = LIST_NEXT(elp, links)) { - if (elp->type != TXNLIST_PGNO) - continue; -retry: dbp_created = 0; - - /* - * Pick the transaction in which to potentially - * log compensations. - */ - if (state == LIMBO_PREPARE) - ctxn = txn; - else if (!in_retry && state != LIMBO_RECOVER && - state != LIMBO_TIMESTAMP && !T_RESTORED(txn) && - (ret = __txn_compensate_begin(dbenv, &ctxn)) != 0) - return (ret); - - /* - * Either use the compensating transaction or - * the one passed in, which will be null if recovering. - */ - t = ctxn == NULL ? txn : ctxn; - - /* First try to get a dbp by fileid. */ - ret = __dbreg_id_to_db(dbenv, t, &dbp, elp->u.p.fileid, 0); - - /* - * If the file was closed and reopened its id could change. - * Look it up the hard way. - */ - if (ret == DB_DELETED || ret == ENOENT || - ((ret == 0 && - memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) != 0))) { - if ((ret = __dbreg_fid_to_fname( - dbenv->lg_handle, elp->u.p.uid, 0, &fname)) == 0) - ret = __dbreg_id_to_db( - dbenv, t, &dbp, fname->id, 0); - } - /* - * File is being destroyed. No need to worry about - * dealing with recovery of allocations. - */ - if (ret == DB_DELETED || - (ret == 0 && F_ISSET(dbp, DB_AM_DISCARD))) - goto next; - - if (ret != 0) { - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - goto err; - - /* - * This tells the system not to lock, which is always - * OK, whether this is an abort or recovery. - */ - F_SET(dbp, DB_AM_COMPENSATE); - dbp_created = 1; - - /* It is ok if the file is nolonger there. */ - ret = __db_open(dbp, t, elp->u.p.fname, NULL, - DB_UNKNOWN, DB_ODDFILESIZE, __db_omode(OWNER_RW), - PGNO_BASE_MD); - if (ret == ENOENT) - goto next; - } - - /* - * Verify that we are opening the same file that we were - * referring to when we wrote this log record. - */ - if (memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) != 0) - goto next; - - mpf = dbp->mpf; - last_pgno = PGNO_INVALID; - - if (meta == NULL && - (ctxn == NULL || state == LIMBO_COMPENSATE)) { - pgno = PGNO_BASE_MD; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err; - last_pgno = meta->free; - } - - if (state == LIMBO_PREPARE) { - if ((ret = __db_limbo_prepare(dbp, ctxn, elp)) != 0) - goto err; - } else - ret = __db_limbo_fix(dbp, - ctxn, elp, &last_pgno, meta, state); - /* - * If we were doing compensating transactions, then we are - * going to hope this error was due to running out of space. - * We'll change modes (into the sync the file mode) and keep - * trying. If we weren't doing compensating transactions, - * then this is a real error and we're sunk. - */ - if (ret != 0) { - if (ret == DB_RUNRECOVERY || ctxn == NULL) - goto err; - in_retry = 1; - if ((ret = __txn_abort(ctxn)) != 0) - goto err; - ctxn = NULL; - goto retry; - } - - if (state == LIMBO_PREPARE) - ctxn = NULL; - - else if (ctxn != NULL) { - /* - * We only force compensation at the end of recovery. - * We want the txn_commit to be logged so turn - * off the recovery flag briefly. - */ - if (state == LIMBO_COMPENSATE) - F_CLR( - (DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); - ret = __txn_commit(ctxn, DB_TXN_NOSYNC); - ctxn = NULL; - if (state == LIMBO_COMPENSATE) - F_SET( - (DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); - if (ret != 0) - goto retry; - } - - /* - * This is where we handle the case where we're explicitly - * putting together a free list. We need to decide whether - * we have to write the meta-data page, and if we do, then - * we need to sync it as well. - */ - else if (last_pgno == meta->free) { - /* No change to page; just put the page back. */ - if ((ret = __memp_fput(mpf, meta, 0)) != 0) - goto err; - meta = NULL; - } else { - /* - * These changes are unlogged so we cannot have the - * metapage pointing at pages that are not on disk. - * Therefore, we flush the new free list, then update - * the metapage. We have to put the meta-data page - * first so that it isn't pinned when we try to sync. - */ - if (!IS_RECOVERING(dbenv) && !T_RESTORED(txn)) - __db_err(dbenv, "Flushing free list to disk"); - if ((ret = __memp_fput(mpf, meta, 0)) != 0) - goto err; - meta = NULL; - /* - * If the sync fails then we cannot flush the - * newly allocated pages. That is, the file - * cannot be extended. Don't let the metapage - * point at them. - * We may lose these pages from the file if it - * can be extended later. If there is never - * space for the pages, then things will be ok. - */ - if ((ret = __db_sync(dbp)) == 0) { - pgno = PGNO_BASE_MD; - if ((ret = - __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err; - meta->free = last_pgno; - if ((ret = __memp_fput(mpf, - meta, DB_MPOOL_DIRTY)) != 0) - goto err; - meta = NULL; - } else { - __db_err(dbenv, - "%s: %s", dbp->fname, db_strerror(ret)); - __db_err(dbenv, "%s: %s %s", dbp->fname, - "allocation flush failed, some free pages", - "may not appear in the free list"); - ret = 0; - } - } - -next: - /* - * If we get here, either we have processed the list - * or the db file has been deleted or could not be opened. - */ - if (ctxn != NULL && - (t_ret = __txn_abort(ctxn)) != 0 && ret == 0) - ret = t_ret; - - if (dbp_created && - (t_ret = __db_close(dbp, txn, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - dbp = NULL; - if (state != LIMBO_PREPARE && state != LIMBO_TIMESTAMP) { - __os_free(dbenv, elp->u.p.fname); - __os_free(dbenv, elp->u.p.pgno_array); - } - if (ret == ENOENT) - ret = 0; - else if (ret != 0) - goto err; - } - -err: if (meta != NULL) - (void)__memp_fput(mpf, meta, 0); - return (ret); -} - -/* - * __db_limbo_fix -- - * Process a single limbo entry which describes all the page allocations - * for a single file. - */ -static int -__db_limbo_fix(dbp, ctxn, elp, lastp, meta, state) - DB *dbp; - DB_TXN *ctxn; - DB_TXNLIST *elp; - db_pgno_t *lastp; - DBMETA *meta; - db_limbo_state state; -{ - DBC *dbc; - DBT ldbt; - DB_MPOOLFILE *mpf; - PAGE *freep, *pagep; - db_pgno_t next, pgno; - u_int32_t i; - int ret, t_ret; - - /* - * Loop through the entries for this txnlist element and - * either link them into the free list or write a compensating - * record for each. - */ - dbc = NULL; - mpf = dbp->mpf; - pagep = NULL; - ret = 0; - - for (i = 0; i < elp->u.p.nentries; i++) { - pgno = elp->u.p.pgno_array[i]; - - if (pgno == PGNO_INVALID) - continue; - - if ((ret = - __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) { - if (ret != ENOSPC) - goto err; - continue; - } - - if (state == LIMBO_COMPENSATE || IS_ZERO_LSN(LSN(pagep))) { - if (ctxn == NULL) { - /* - * If this is a fatal recovery which - * spans a previous crash this page may - * be on the free list already. - */ - for (next = *lastp; next != 0; ) { - if (next == pgno) - break; - if ((ret = __memp_fget(mpf, - &next, 0, &freep)) != 0) - goto err; - next = NEXT_PGNO(freep); - if ((ret = - __memp_fput(mpf, freep, 0)) != 0) - goto err; - } - - if (next != pgno) { - P_INIT(pagep, dbp->pgsize, pgno, - PGNO_INVALID, *lastp, 0, P_INVALID); - /* Make the lsn non-zero but generic. */ - INIT_LSN(LSN(pagep)); - *lastp = pgno; - } - } else if (state == LIMBO_COMPENSATE) { - /* - * Generate a log record for what we did on the - * LIMBO_TIMESTAMP pass. All pages here are - * free so P_OVERHEAD is sufficient. - */ - ZERO_LSN(pagep->lsn); - memset(&ldbt, 0, sizeof(ldbt)); - ldbt.data = pagep; - ldbt.size = P_OVERHEAD(dbp); - if ((ret = __db_pg_new_log(dbp, ctxn, - &LSN(meta), 0, pagep->pgno, - &LSN(meta), PGNO_BASE_MD, - &ldbt, pagep->next_pgno)) != 0) - goto err; - } else { - if (dbc == NULL && (ret = - __db_cursor(dbp, ctxn, &dbc, 0)) != 0) - goto err; - /* - * If the dbp is compensating (because we - * opened it), the dbc will automatically be - * marked compensating, but in case we didn't - * do the open, we have to mark it explicitly. - */ - F_SET(dbc, DBC_COMPENSATE); - - /* - * If aborting a txn for a different process - * via XA or failchk, DB_AM_RECOVER will be - * set but we need to log the compensating - * transactions. - */ - F_CLR(dbc, DBC_RECOVER); - - ret = __db_free(dbc, pagep); - pagep = NULL; - - /* - * On any error, we hope that the error was - * caused due to running out of space, and we - * switch modes, doing the processing where we - * sync out files instead of doing compensating - * transactions. If this was a real error and - * not out of space, we assume that some other - * call will fail real soon. - */ - if (ret != 0) { - /* Assume that this is out of space. */ - (void)__db_c_close(dbc); - dbc = NULL; - goto err; - } - } - } - else - elp->u.p.pgno_array[i] = PGNO_INVALID; - - if (pagep != NULL) { - ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY); - pagep = NULL; - } - if (ret != 0) - goto err; - } - -err: if (pagep != NULL && - (t_ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -static int -__db_limbo_prepare(dbp, txn, elp) - DB *dbp; - DB_TXN *txn; - DB_TXNLIST *elp; -{ - DB_LSN lsn; - DB_MPOOLFILE *mpf; - PAGE *pagep; - db_pgno_t pgno; - u_int32_t i; - int ret, t_ret; - - /* - * Loop through the entries for this txnlist element and - * output a prepare record for them. - */ - pagep = NULL; - ret = 0; - mpf = dbp->mpf; - - for (i = 0; i < elp->u.p.nentries; i++) { - pgno = elp->u.p.pgno_array[i]; - - if ((ret = - __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) { - if (ret != ENOSPC) - return (ret); - continue; - } - - if (IS_ZERO_LSN(LSN(pagep))) - ret = __db_pg_prepare_log(dbp, txn, &lsn, 0, pgno); - - if ((t_ret = __memp_fput(mpf, pagep, 0)) != 0 && ret == 0) - ret = t_ret; - - if (ret != 0) - return (ret); - } - - return (0); -} - -#define DB_TXNLIST_MAX_PGNO 8 /* A nice even number. */ - -/* - * __db_txnlist_pgnoadd -- - * Find the txnlist entry for a file and add this pgno, or add the list - * entry for the file and then add the pgno. - */ -static int -__db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno) - DB_ENV *dbenv; - DB_TXNHEAD *hp; - int32_t fileid; - u_int8_t uid[DB_FILE_ID_LEN]; - char *fname; - db_pgno_t pgno; -{ - DB_TXNLIST *elp; - size_t len; - u_int32_t hash, status; - int ret; - - elp = NULL; - - if ((ret = __db_txnlist_find_internal(dbenv, hp, - TXNLIST_PGNO, 0, uid, &elp, 0, &status)) != 0 && ret != DB_NOTFOUND) - goto err; - - if (ret == DB_NOTFOUND || status != TXN_OK) { - if ((ret = - __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0) - goto err; - memcpy(&hash, uid, sizeof(hash)); - LIST_INSERT_HEAD( - &hp->head[DB_TXNLIST_MASK(hp, hash)], elp, links); - memcpy(elp->u.p.uid, uid, DB_FILE_ID_LEN); - - len = strlen(fname) + 1; - if ((ret = __os_malloc(dbenv, len, &elp->u.p.fname)) != 0) - goto err; - memcpy(elp->u.p.fname, fname, len); - - elp->u.p.maxentry = 0; - elp->u.p.locked = 0; - elp->type = TXNLIST_PGNO; - if ((ret = __os_malloc(dbenv, - 8 * sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0) - goto err; - elp->u.p.maxentry = DB_TXNLIST_MAX_PGNO; - elp->u.p.nentries = 0; - } else if (elp->u.p.nentries == elp->u.p.maxentry) { - elp->u.p.maxentry <<= 1; - if ((ret = __os_realloc(dbenv, elp->u.p.maxentry * - sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0) - goto err; - } - - elp->u.p.pgno_array[elp->u.p.nentries++] = pgno; - /* Update to the latest fileid. Limbo will find it faster. */ - elp->u.p.fileid = fileid; - - return (0); - -err: return (ret); -} -#endif - -#ifdef DEBUG -/* - * __db_txnlist_print -- - * Print out the transaction list. - * - * PUBLIC: void __db_txnlist_print __P((DB_TXNHEAD *)); - */ -void -__db_txnlist_print(hp) - DB_TXNHEAD *hp; -{ - DB_TXNLIST *p; - u_int32_t i; - char *txntype; - - printf("Maxid: %lu Generation: %lu\n", - (u_long)hp->maxid, (u_long)hp->generation); - for (i = 0; i < hp->nslots; i++) - for (p = LIST_FIRST(&hp->head[i]); - p != NULL; p = LIST_NEXT(p, links)) { - if (p->type != TXNLIST_TXNID) { - printf("Unrecognized type: %d\n", p->type); - continue; - } - switch (p->u.t.status) { - case TXN_OK: - txntype = "OK"; - break; - case TXN_COMMIT: - txntype = "commit"; - break; - case TXN_PREPARE: - txntype = "prepare"; - break; - case TXN_ABORT: - txntype = "abort"; - break; - case TXN_IGNORE: - txntype = "ignore"; - break; - case TXN_EXPECTED: - txntype = "expected"; - break; - case TXN_UNEXPECTED: - txntype = "unexpected"; - break; - default: - txntype = "UNKNOWN"; - break; - } - printf("TXNID: %lx(%lu): %s\n", - (u_long)p->u.t.txnid, - (u_long)p->u.t.generation, txntype); - } -} -#endif diff --git a/storage/bdb/db/db_dup.c b/storage/bdb/db/db_dup.c deleted file mode 100644 index 2f0732c6b5c..00000000000 --- a/storage/bdb/db/db_dup.c +++ /dev/null @@ -1,164 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_dup.c,v 12.2 2005/06/16 20:21:10 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/mp.h" -#include "dbinc/db_am.h" - -/* - * __db_ditem -- - * Remove an item from a page. - * - * PUBLIC: int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t)); - */ -int -__db_ditem(dbc, pagep, indx, nbytes) - DBC *dbc; - PAGE *pagep; - u_int32_t indx, nbytes; -{ - DB *dbp; - DBT ldbt; - db_indx_t cnt, *inp, offset; - int ret; - u_int8_t *from; - - dbp = dbc->dbp; - if (DBC_LOGGING(dbc)) { - ldbt.data = P_ENTRY(dbp, pagep, indx); - ldbt.size = nbytes; - if ((ret = __db_addrem_log(dbp, dbc->txn, - &LSN(pagep), 0, DB_REM_DUP, PGNO(pagep), - (u_int32_t)indx, nbytes, &ldbt, NULL, &LSN(pagep))) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(pagep)); - - /* - * If there's only a single item on the page, we don't have to - * work hard. - */ - if (NUM_ENT(pagep) == 1) { - NUM_ENT(pagep) = 0; - HOFFSET(pagep) = dbp->pgsize; - return (0); - } - - inp = P_INP(dbp, pagep); - /* - * Pack the remaining key/data items at the end of the page. Use - * memmove(3), the regions may overlap. - */ - from = (u_int8_t *)pagep + HOFFSET(pagep); - DB_ASSERT(inp[indx] >= HOFFSET(pagep)); - memmove(from + nbytes, from, inp[indx] - HOFFSET(pagep)); - HOFFSET(pagep) += nbytes; - - /* Adjust the indices' offsets. */ - offset = inp[indx]; - for (cnt = 0; cnt < NUM_ENT(pagep); ++cnt) - if (inp[cnt] < offset) - inp[cnt] += nbytes; - - /* Shift the indices down. */ - --NUM_ENT(pagep); - if (indx != NUM_ENT(pagep)) - memmove(&inp[indx], &inp[indx + 1], - sizeof(db_indx_t) * (NUM_ENT(pagep) - indx)); - - return (0); -} - -/* - * __db_pitem -- - * Put an item on a page. - * - * PUBLIC: int __db_pitem - * PUBLIC: __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *)); - */ -int -__db_pitem(dbc, pagep, indx, nbytes, hdr, data) - DBC *dbc; - PAGE *pagep; - u_int32_t indx; - u_int32_t nbytes; - DBT *hdr, *data; -{ - DB *dbp; - BKEYDATA bk; - DBT thdr; - db_indx_t *inp; - int ret; - u_int8_t *p; - - dbp = dbc->dbp; - if (nbytes > P_FREESPACE(dbp, pagep)) { - DB_ASSERT(nbytes <= P_FREESPACE(dbp, pagep)); - return (EINVAL); - } - /* - * Put a single item onto a page. The logic figuring out where to - * insert and whether it fits is handled in the caller. All we do - * here is manage the page shuffling. We cheat a little bit in that - * we don't want to copy the dbt on a normal put twice. If hdr is - * NULL, we create a BKEYDATA structure on the page, otherwise, just - * copy the caller's information onto the page. - * - * This routine is also used to put entries onto the page where the - * entry is pre-built, e.g., during recovery. In this case, the hdr - * will point to the entry, and the data argument will be NULL. - * - * !!! - * There's a tremendous potential for off-by-one errors here, since - * the passed in header sizes must be adjusted for the structure's - * placeholder for the trailing variable-length data field. - */ - if (DBC_LOGGING(dbc)) { - if ((ret = __db_addrem_log(dbp, dbc->txn, - &LSN(pagep), 0, DB_ADD_DUP, PGNO(pagep), - (u_int32_t)indx, nbytes, hdr, data, &LSN(pagep))) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(pagep)); - - if (hdr == NULL) { - B_TSET(bk.type, B_KEYDATA, 0); - bk.len = data == NULL ? 0 : data->size; - - thdr.data = &bk; - thdr.size = SSZA(BKEYDATA, data); - hdr = &thdr; - } - inp = P_INP(dbp, pagep); - - /* Adjust the index table, then put the item on the page. */ - if (indx != NUM_ENT(pagep)) - memmove(&inp[indx + 1], &inp[indx], - sizeof(db_indx_t) * (NUM_ENT(pagep) - indx)); - HOFFSET(pagep) -= nbytes; - inp[indx] = HOFFSET(pagep); - ++NUM_ENT(pagep); - - p = P_ENTRY(dbp, pagep, indx); - memcpy(p, hdr->data, hdr->size); - if (data != NULL) - memcpy(p + hdr->size, data->data, data->size); - - return (0); -} diff --git a/storage/bdb/db/db_iface.c b/storage/bdb/db/db_iface.c deleted file mode 100644 index 37811a17a26..00000000000 --- a/storage/bdb/db/db_iface.c +++ /dev/null @@ -1,2438 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_iface.c,v 12.29 2005/11/08 14:49:44 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#ifndef HAVE_HASH -#include "dbinc/hash.h" /* For __db_no_hash_am(). */ -#endif -#ifndef HAVE_QUEUE -#include "dbinc/qam.h" /* For __db_no_queue_am(). */ -#endif -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -static int __db_associate_arg __P((DB *, DB *, - int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t)); -static int __db_c_del_arg __P((DBC *, u_int32_t)); -static int __db_c_get_arg __P((DBC *, DBT *, DBT *, u_int32_t)); -static int __db_c_pget_arg __P((DBC *, DBT *, u_int32_t)); -static int __db_c_put_arg __P((DBC *, DBT *, DBT *, u_int32_t)); -static int __db_curinval __P((const DB_ENV *)); -static int __db_cursor_arg __P((DB *, u_int32_t)); -static int __db_del_arg __P((DB *, u_int32_t)); -static int __db_get_arg __P((const DB *, const DBT *, DBT *, u_int32_t)); -static int __db_join_arg __P((DB *, DBC **, u_int32_t)); -static int __db_open_arg __P((DB *, - DB_TXN *, const char *, const char *, DBTYPE, u_int32_t)); -static int __db_pget_arg __P((DB *, DBT *, u_int32_t)); -static int __db_put_arg __P((DB *, DBT *, DBT *, u_int32_t)); -static int __dbt_ferr __P((const DB *, const char *, const DBT *, int)); - -/* - * These functions implement the Berkeley DB API. They are organized in a - * layered fashion. The interface functions (XXX_pp) perform all generic - * error checks (for example, PANIC'd region, replication state change - * in progress, inconsistent transaction usage), call function-specific - * check routines (_arg) to check for proper flag usage, etc., do pre-amble - * processing (incrementing handle counts, handling local transactions), - * call the function and then do post-amble processing (local transactions, - * decrement handle counts). - * - * The basic structure is: - * Check for simple/generic errors (PANIC'd region) - * Check if replication is changing state (increment handle count). - * Call function-specific argument checking routine - * Create internal transaction if necessary - * Call underlying worker function - * Commit/abort internal transaction if necessary - * Decrement handle count - */ - -/* - * __db_associate_pp -- - * DB->associate pre/post processing. - * - * PUBLIC: int __db_associate_pp __P((DB *, DB_TXN *, DB *, - * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t)); - */ -int -__db_associate_pp(dbp, txn, sdbp, callback, flags) - DB *dbp, *sdbp; - DB_TXN *txn; - int (*callback) __P((DB *, const DBT *, const DBT *, DBT *)); - u_int32_t flags; -{ - DBC *sdbc; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret, txn_local; - - dbenv = dbp->dbenv; - txn_local = 0; - - PANIC_CHECK(dbenv); - STRIP_AUTO_COMMIT(flags); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - /* - * Secondary cursors may have the primary's lock file ID, so we need - * to make sure that no older cursors are lying around when we make - * the transition. - */ - if (TAILQ_FIRST(&sdbp->active_queue) != NULL || - TAILQ_FIRST(&sdbp->join_queue) != NULL) { - __db_err(dbenv, - "Databases may not become secondary indices while cursors are open"); - ret = EINVAL; - goto err; - } - - if ((ret = __db_associate_arg(dbp, sdbp, callback, flags)) != 0) - goto err; - - /* - * Create a local transaction as necessary, check for consistent - * transaction usage, and, if we have no transaction but do have - * locking on, acquire a locker id for the handle lock acquisition. - */ - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - txn_local = 1; - } - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0) - goto err; - - while ((sdbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL) - if ((ret = __db_c_destroy(sdbc)) != 0) - goto err; - - ret = __db_associate(dbp, txn, sdbp, callback, flags); - -err: if (txn_local && - (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_associate_arg -- - * Check DB->associate arguments. - */ -static int -__db_associate_arg(dbp, sdbp, callback, flags) - DB *dbp, *sdbp; - int (*callback) __P((DB *, const DBT *, const DBT *, DBT *)); - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - if (F_ISSET(sdbp, DB_AM_SECONDARY)) { - __db_err(dbenv, - "Secondary index handles may not be re-associated"); - return (EINVAL); - } - if (F_ISSET(dbp, DB_AM_SECONDARY)) { - __db_err(dbenv, - "Secondary indices may not be used as primary databases"); - return (EINVAL); - } - if (F_ISSET(dbp, DB_AM_DUP)) { - __db_err(dbenv, - "Primary databases may not be configured with duplicates"); - return (EINVAL); - } - if (F_ISSET(dbp, DB_AM_RENUMBER)) { - __db_err(dbenv, - "Renumbering recno databases may not be used as primary databases"); - return (EINVAL); - } - if (dbp->dbenv != sdbp->dbenv && - (!F_ISSET(dbp->dbenv, DB_ENV_DBLOCAL) || - !F_ISSET(sdbp->dbenv, DB_ENV_DBLOCAL))) { - __db_err(dbenv, - "The primary and secondary must be opened in the same environment"); - return (EINVAL); - } - if ((DB_IS_THREADED(dbp) && !DB_IS_THREADED(sdbp)) || - (!DB_IS_THREADED(dbp) && DB_IS_THREADED(sdbp))) { - __db_err(dbenv, - "The DB_THREAD setting must be the same for primary and secondary"); - return (EINVAL); - } - if (callback == NULL && - (!F_ISSET(dbp, DB_AM_RDONLY) || !F_ISSET(sdbp, DB_AM_RDONLY))) { - __db_err(dbenv, - "Callback function may be NULL only when database handles are read-only"); - return (EINVAL); - } - - if ((ret = __db_fchk(dbenv, "DB->associate", flags, DB_CREATE | - DB_IMMUTABLE_KEY)) != 0) - return (ret); - - return (0); -} - -/* - * __db_close_pp -- - * DB->close pre/post processing. - * - * PUBLIC: int __db_close_pp __P((DB *, u_int32_t)); - */ -int -__db_close_pp(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - ret = 0; - - PANIC_CHECK(dbenv); - - /* - * Close a DB handle -- as a handle destructor, we can't fail. - * - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0 && flags != DB_NOSYNC) - ret = __db_ferr(dbenv, "DB->close", 0); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) { - handle_check = 0; - if (ret == 0) - ret = t_ret; - } - - if ((t_ret = __db_close(dbp, NULL, flags)) != 0 && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_cursor_pp -- - * DB->cursor pre/post processing. - * - * PUBLIC: int __db_cursor_pp __P((DB *, DB_TXN *, DBC **, u_int32_t)); - */ -int -__db_cursor_pp(dbp, txn, dbcp, flags) - DB *dbp; - DB_TXN *txn; - DBC **dbcp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->cursor"); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - if (txn == NULL) { - handle_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (handle_check && (ret = __op_rep_enter(dbenv)) != 0) { - handle_check = 0; - goto err; - } - } else - handle_check = 0; - if ((ret = __db_cursor_arg(dbp, flags)) != 0) - goto err; - - /* - * Check for consistent transaction usage. For now, assume this - * cursor might be used for read operations only (in which case - * it may not require a txn). We'll check more stringently in - * c_del and c_put. (Note this means the read-op txn tests have - * to be a subset of the write-op ones.) - */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0) - goto err; - - ret = __db_cursor(dbp, txn, dbcp, flags); - -err: /* Release replication block on error. */ - if (ret != 0 && handle_check) - (void)__op_rep_exit(dbenv); - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_cursor -- - * DB->cursor. - * - * PUBLIC: int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t)); - */ -int -__db_cursor(dbp, txn, dbcp, flags) - DB *dbp; - DB_TXN *txn; - DBC **dbcp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBC *dbc; - db_lockmode_t mode; - u_int32_t op; - int ret; - - dbenv = dbp->dbenv; - - if ((ret = __db_cursor_int(dbp, - txn, dbp->type, PGNO_INVALID, 0, DB_LOCK_INVALIDID, &dbc)) != 0) - return (ret); - - /* - * If this is CDB, do all the locking in the interface, which is - * right here. - */ - if (CDB_LOCKING(dbenv)) { - op = LF_ISSET(DB_OPFLAGS_MASK); - mode = (op == DB_WRITELOCK) ? DB_LOCK_WRITE : - ((op == DB_WRITECURSOR) ? DB_LOCK_IWRITE : DB_LOCK_READ); - if ((ret = __lock_get(dbenv, dbc->locker, 0, - &dbc->lock_dbt, mode, &dbc->mylock)) != 0) - goto err; - if (op == DB_WRITECURSOR) - F_SET(dbc, DBC_WRITECURSOR); - if (op == DB_WRITELOCK) - F_SET(dbc, DBC_WRITER); - } - - if (LF_ISSET(DB_READ_UNCOMMITTED) || - (txn != NULL && F_ISSET(txn, TXN_READ_UNCOMMITTED))) - F_SET(dbc, DBC_READ_UNCOMMITTED); - - if (LF_ISSET(DB_READ_COMMITTED) || - (txn != NULL && F_ISSET(txn, TXN_READ_COMMITTED))) - F_SET(dbc, DBC_READ_COMMITTED); - - *dbcp = dbc; - return (0); - -err: (void)__db_c_close(dbc); - return (ret); -} - -/* - * __db_cursor_arg -- - * Check DB->cursor arguments. - */ -static int -__db_cursor_arg(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - /* - * DB_READ_COMMITTED and DB_READ_UNCOMMITTED are the only valid - * bit-flags; they require locking. - */ - if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED)) { - if (!LOCKING_ON(dbenv)) - return (__db_fnl(dbenv, "DB->cursor")); - LF_CLR(DB_READ_COMMITTED| DB_READ_UNCOMMITTED); - } - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - break; - case DB_WRITECURSOR: - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DB->cursor")); - if (!CDB_LOCKING(dbenv)) - return (__db_ferr(dbenv, "DB->cursor", 0)); - break; - case DB_WRITELOCK: - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DB->cursor")); - break; - default: - return (__db_ferr(dbenv, "DB->cursor", 0)); - } - - return (0); -} - -/* - * __db_del_pp -- - * DB->del pre/post processing. - * - * PUBLIC: int __db_del_pp __P((DB *, DB_TXN *, DBT *, u_int32_t)); - */ -int -__db_del_pp(dbp, txn, key, flags) - DB *dbp; - DB_TXN *txn; - DBT *key; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret, txn_local; - - dbenv = dbp->dbenv; - txn_local = 0; - - PANIC_CHECK(dbenv); - STRIP_AUTO_COMMIT(flags); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del"); - -#if CONFIG_TEST - if (IS_REP_MASTER(dbenv)) - DB_TEST_WAIT(dbenv, dbenv->test_check); -#endif - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - if ((ret = __db_del_arg(dbp, flags)) != 0) - goto err; - - /* Create local transaction as necessary. */ - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - txn_local = 1; - } - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0) - goto err; - - ret = __db_del(dbp, txn, key, flags); - -err: if (txn_local && - (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_del_arg -- - * Check DB->delete arguments. - */ -static int -__db_del_arg(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - /* Check for changes to a read-only tree. */ - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DB->del")); - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - break; - default: - return (__db_ferr(dbenv, "DB->del", 0)); - } - - return (0); -} - -/* - * db_fd_pp -- - * DB->fd pre/post processing. - * - * PUBLIC: int __db_fd_pp __P((DB *, int *)); - */ -int -__db_fd_pp(dbp, fdp) - DB *dbp; - int *fdp; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - DB_FH *fhp; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->fd"); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) - goto err; - - /* - * !!! - * There's no argument checking to be done. - * - * !!! - * The actual method call is simple, do it inline. - * - * XXX - * Truly spectacular layering violation. - */ - if ((ret = __mp_xxx_fh(dbp->mpf, &fhp)) == 0) { - if (fhp == NULL) { - *fdp = -1; - __db_err(dbenv, - "Database does not have a valid file handle"); - ret = ENOENT; - } else - *fdp = fhp->fd; - } - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_get_pp -- - * DB->get pre/post processing. - * - * PUBLIC: int __db_get_pp __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - */ -int -__db_get_pp(dbp, txn, key, data, flags) - DB *dbp; - DB_TXN *txn; - DBT *key, *data; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - u_int32_t mode; - int handle_check, ret, t_ret, txn_local; - - dbenv = dbp->dbenv; - mode = 0; - txn_local = 0; - - PANIC_CHECK(dbenv); - STRIP_AUTO_COMMIT(flags); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get"); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - if ((ret = __db_get_arg(dbp, key, data, flags)) != 0) - goto err; - - if (LF_ISSET(DB_READ_UNCOMMITTED)) - mode = DB_READ_UNCOMMITTED; - else if ((flags & DB_OPFLAGS_MASK) == DB_CONSUME || - (flags & DB_OPFLAGS_MASK) == DB_CONSUME_WAIT) { - mode = DB_WRITELOCK; - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - txn_local = 1; - } - } - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, - mode == DB_WRITELOCK || LF_ISSET(DB_RMW) ? 0 : 1)) != 0) - goto err; - - ret = __db_get(dbp, txn, key, data, flags); - -err: if (txn_local && - (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_get -- - * DB->get. - * - * PUBLIC: int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - */ -int -__db_get(dbp, txn, key, data, flags) - DB *dbp; - DB_TXN *txn; - DBT *key, *data; - u_int32_t flags; -{ - DBC *dbc; - u_int32_t mode; - int ret, t_ret; - - mode = 0; - if (LF_ISSET(DB_READ_UNCOMMITTED)) { - mode = DB_READ_UNCOMMITTED; - LF_CLR(DB_READ_UNCOMMITTED); - } else if (LF_ISSET(DB_READ_COMMITTED)) { - mode = DB_READ_COMMITTED; - LF_CLR(DB_READ_COMMITTED); - } else if ((flags & DB_OPFLAGS_MASK) == DB_CONSUME || - (flags & DB_OPFLAGS_MASK) == DB_CONSUME_WAIT) - mode = DB_WRITELOCK; - - if ((ret = __db_cursor(dbp, txn, &dbc, mode)) != 0) - return (ret); - - DEBUG_LREAD(dbc, txn, "DB->get", key, NULL, flags); - - /* - * The DBC_TRANSIENT flag indicates that we're just doing a - * single operation with this cursor, and that in case of - * error we don't need to restore it to its old position--we're - * going to close it right away. Thus, we can perform the get - * without duplicating the cursor, saving some cycles in this - * common case. - */ - F_SET(dbc, DBC_TRANSIENT); - - /* - * SET_RET_MEM indicates that if key and/or data have no DBT - * flags set and DB manages the returned-data memory, that memory - * will belong to this handle, not to the underlying cursor. - */ - SET_RET_MEM(dbc, dbp); - - if (LF_ISSET(~(DB_RMW | DB_MULTIPLE)) == 0) - LF_SET(DB_SET); - - ret = __db_c_get(dbc, key, data, flags); - - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_get_arg -- - * DB->get argument checking, used by both DB->get and DB->pget. - */ -static int -__db_get_arg(dbp, key, data, flags) - const DB *dbp; - const DBT *key; - DBT *data; - u_int32_t flags; -{ - DB_ENV *dbenv; - int check_thread, dirty, multi, ret; - - dbenv = dbp->dbenv; - - /* - * Check for read-modify-write validity. DB_RMW doesn't make sense - * with CDB cursors since if you're going to write the cursor, you - * had to create it with DB_WRITECURSOR. Regardless, we check for - * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it. - * If this changes, confirm that DB does not itself set the DB_RMW - * flag in a path where CDB may have been configured. - */ - check_thread = dirty = 0; - if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW)) { - if (!LOCKING_ON(dbenv)) - return (__db_fnl(dbenv, "DB->get")); - if ((ret = __db_fcchk(dbenv, "DB->get", - flags, DB_READ_UNCOMMITTED, DB_READ_COMMITTED)) != 0) - return (ret); - if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED)) - dirty = 1; - LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW); - } - - multi = 0; - if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) { - if (LF_ISSET(DB_MULTIPLE_KEY)) - goto multi_err; - multi = LF_ISSET(DB_MULTIPLE) ? 1 : 0; - LF_CLR(DB_MULTIPLE); - } - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - case DB_GET_BOTH: - break; - case DB_SET_RECNO: - check_thread = 1; - if (!F_ISSET(dbp, DB_AM_RECNUM)) - goto err; - break; - case DB_CONSUME: - case DB_CONSUME_WAIT: - check_thread = 1; - if (dirty) { - __db_err(dbenv, - "%s is not supported with DB_CONSUME or DB_CONSUME_WAIT", - LF_ISSET(DB_READ_UNCOMMITTED) ? - "DB_READ_UNCOMMITTED" : "DB_READ_COMMITTED"); - return (EINVAL); - } - if (multi) -multi_err: return (__db_ferr(dbenv, "DB->get", 1)); - if (dbp->type == DB_QUEUE) - break; - /* FALLTHROUGH */ - default: -err: return (__db_ferr(dbenv, "DB->get", 0)); - } - - /* - * Check for invalid key/data flags. - * - * XXX: Dave Krinsky - * Remember to modify this when we fix the flag-returning problem. - */ - if ((ret = __dbt_ferr(dbp, "key", key, check_thread)) != 0) - return (ret); - if ((ret = __dbt_ferr(dbp, "data", data, 1)) != 0) - return (ret); - - if (multi) { - if (!F_ISSET(data, DB_DBT_USERMEM)) { - __db_err(dbenv, - "DB_MULTIPLE requires DB_DBT_USERMEM be set"); - return (EINVAL); - } - if (F_ISSET(key, DB_DBT_PARTIAL) || - F_ISSET(data, DB_DBT_PARTIAL)) { - __db_err(dbenv, - "DB_MULTIPLE does not support DB_DBT_PARTIAL"); - return (EINVAL); - } - if (data->ulen < 1024 || - data->ulen < dbp->pgsize || data->ulen % 1024 != 0) { - __db_err(dbenv, "%s%s", - "DB_MULTIPLE buffers must be ", - "aligned, at least page size and multiples of 1KB"); - return (EINVAL); - } - } - - return (0); -} - -/* - * __db_join_pp -- - * DB->join pre/post processing. - * - * PUBLIC: int __db_join_pp __P((DB *, DBC **, DBC **, u_int32_t)); - */ -int -__db_join_pp(primary, curslist, dbcp, flags) - DB *primary; - DBC **curslist, **dbcp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = primary->dbenv; - - PANIC_CHECK(dbenv); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = - __db_rep_enter(primary, 1, 0, curslist[0]->txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - if ((ret = __db_join_arg(primary, curslist, flags)) == 0) - ret = __db_join(primary, curslist, dbcp, flags); - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_join_arg -- - * Check DB->join arguments. - */ -static int -__db_join_arg(primary, curslist, flags) - DB *primary; - DBC **curslist; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_TXN *txn; - int i; - - dbenv = primary->dbenv; - - switch (flags) { - case 0: - case DB_JOIN_NOSORT: - break; - default: - return (__db_ferr(dbenv, "DB->join", 0)); - } - - if (curslist == NULL || curslist[0] == NULL) { - __db_err(dbenv, - "At least one secondary cursor must be specified to DB->join"); - return (EINVAL); - } - - txn = curslist[0]->txn; - for (i = 1; curslist[i] != NULL; i++) - if (curslist[i]->txn != txn) { - __db_err(dbenv, - "All secondary cursors must share the same transaction"); - return (EINVAL); - } - - return (0); -} - -/* - * __db_key_range_pp -- - * DB->key_range pre/post processing. - * - * PUBLIC: int __db_key_range_pp - * PUBLIC: __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t)); - */ -int -__db_key_range_pp(dbp, txn, key, kr, flags) - DB *dbp; - DB_TXN *txn; - DBT *key; - DB_KEY_RANGE *kr; - u_int32_t flags; -{ - DBC *dbc; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbp->dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->key_range"); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0) - return (__db_ferr(dbenv, "DB->key_range", 0)); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0) - goto err; - - /* - * !!! - * The actual method call is simple, do it inline. - */ - switch (dbp->type) { - case DB_BTREE: - /* Acquire a cursor. */ - if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) - break; - - DEBUG_LWRITE(dbc, NULL, "bam_key_range", NULL, NULL, 0); - - ret = __bam_key_range(dbc, key, kr, flags); - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - break; - case DB_HASH: - case DB_QUEUE: - case DB_RECNO: - ret = __dbh_am_chk(dbp, DB_OK_BTREE); - break; - case DB_UNKNOWN: - default: - ret = __db_unknown_type(dbenv, "DB->key_range", dbp->type); - break; - } - -err: /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_open_pp -- - * DB->open pre/post processing. - * - * PUBLIC: int __db_open_pp __P((DB *, DB_TXN *, - * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int)); - */ -int -__db_open_pp(dbp, txn, fname, dname, type, flags, mode) - DB *dbp; - DB_TXN *txn; - const char *fname, *dname; - DBTYPE type; - u_int32_t flags; - int mode; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, nosync, remove_me, ret, t_ret, txn_local; - - dbenv = dbp->dbenv; - nosync = 1; - remove_me = txn_local = 0; - handle_check = 0; - - PANIC_CHECK(dbenv); - - ENV_ENTER(dbenv, ip); - - /* - * Save the file and database names and flags. We do this here - * because we don't pass all of the flags down into the actual - * DB->open method call, we strip DB_AUTO_COMMIT at this layer. - */ - if ((fname != NULL && - (ret = __os_strdup(dbenv, fname, &dbp->fname)) != 0)) - goto err; - if ((dname != NULL && - (ret = __os_strdup(dbenv, dname, &dbp->dname)) != 0)) - goto err; - dbp->open_flags = flags; - - /* Save the current DB handle flags for refresh. */ - dbp->orig_flags = dbp->flags; - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - /* - * Create local transaction as necessary, check for consistent - * transaction usage. - */ - if (IS_ENV_AUTO_COMMIT(dbenv, txn, flags)) { - if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0) - goto err; - txn_local = 1; - } else - if (txn != NULL && !TXN_ON(dbenv)) { - ret = __db_not_txn_env(dbenv); - goto err; - } - LF_CLR(DB_AUTO_COMMIT); - - /* - * We check arguments after possibly creating a local transaction, - * which is unusual -- the reason is some flags are illegal if any - * kind of transaction is in effect. - */ - if ((ret = __db_open_arg(dbp, txn, fname, dname, type, flags)) == 0) - if ((ret = __db_open(dbp, txn, fname, dname, type, - flags, mode, PGNO_BASE_MD)) != 0) - goto txnerr; - - /* - * You can open the database that describes the subdatabases in the - * rest of the file read-only. The content of each key's data is - * unspecified and applications should never be adding new records - * or updating existing records. However, during recovery, we need - * to open these databases R/W so we can redo/undo changes in them. - * Likewise, we need to open master databases read/write during - * rename and remove so we can be sure they're fully sync'ed, so - * we provide an override flag for the purpose. - */ - if (dname == NULL && !IS_RECOVERING(dbenv) && !LF_ISSET(DB_RDONLY) && - !LF_ISSET(DB_RDWRMASTER) && F_ISSET(dbp, DB_AM_SUBDB)) { - __db_err(dbenv, - "files containing multiple databases may only be opened read-only"); - ret = EINVAL; - goto txnerr; - } - - /* - * Success: file creations have to be synchronous, otherwise we don't - * care. - */ - if (F_ISSET(dbp, DB_AM_CREATED | DB_AM_CREATED_MSTR)) - nosync = 0; - - /* Success: don't discard the file on close. */ - F_CLR(dbp, DB_AM_DISCARD | DB_AM_CREATED | DB_AM_CREATED_MSTR); - - /* - * If not transactional, remove the databases/subdatabases. If we're - * transactional, the child transaction abort cleans up. - */ -txnerr: if (ret != 0 && txn == NULL) { - remove_me = F_ISSET(dbp, DB_AM_CREATED); - if (F_ISSET(dbp, DB_AM_CREATED_MSTR) || - (dname == NULL && remove_me)) - /* Remove file. */ - (void)__db_remove_int(dbp, txn, fname, NULL, DB_FORCE); - else if (remove_me) - /* Remove subdatabase. */ - (void)__db_remove_int(dbp, txn, fname, dname, DB_FORCE); - } - - if (txn_local && (t_ret = - __db_txn_auto_resolve(dbenv, txn, nosync, ret)) && ret == 0) - ret = t_ret; - -err: /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_open_arg -- - * Check DB->open arguments. - */ -static int -__db_open_arg(dbp, txn, fname, dname, type, flags) - DB *dbp; - DB_TXN *txn; - const char *fname, *dname; - DBTYPE type; - u_int32_t flags; -{ - DB_ENV *dbenv; - u_int32_t ok_flags; - int ret; - - dbenv = dbp->dbenv; - - /* Validate arguments. */ -#undef OKFLAGS -#define OKFLAGS \ - (DB_AUTO_COMMIT | DB_CREATE | DB_EXCL | DB_FCNTL_LOCKING | \ - DB_NOMMAP | DB_NO_AUTO_COMMIT | DB_RDONLY | DB_RDWRMASTER | \ - DB_READ_UNCOMMITTED | DB_THREAD | DB_TRUNCATE | DB_WRITEOPEN) - if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0) - return (ret); - if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE)) - return (__db_ferr(dbenv, "DB->open", 1)); - if (LF_ISSET(DB_RDONLY) && LF_ISSET(DB_CREATE)) - return (__db_ferr(dbenv, "DB->open", 1)); - -#ifdef HAVE_VXWORKS - if (LF_ISSET(DB_TRUNCATE)) { - __db_err(dbenv, "DB_TRUNCATE not supported on VxWorks"); - return (DB_OPNOTSUP); - } -#endif - switch (type) { - case DB_UNKNOWN: - if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) { - __db_err(dbenv, - "DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE"); - return (EINVAL); - } - ok_flags = 0; - break; - case DB_BTREE: - ok_flags = DB_OK_BTREE; - break; - case DB_HASH: -#ifndef HAVE_HASH - return (__db_no_hash_am(dbenv)); -#endif - ok_flags = DB_OK_HASH; - break; - case DB_QUEUE: -#ifndef HAVE_QUEUE - return (__db_no_queue_am(dbenv)); -#endif - ok_flags = DB_OK_QUEUE; - break; - case DB_RECNO: - ok_flags = DB_OK_RECNO; - break; - default: - __db_err(dbenv, "unknown type: %lu", (u_long)type); - return (EINVAL); - } - if (ok_flags) - DB_ILLEGAL_METHOD(dbp, ok_flags); - - /* The environment may have been created, but never opened. */ - if (!F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_OPEN_CALLED)) { - __db_err(dbenv, "environment not yet opened"); - return (EINVAL); - } - - /* - * Historically, you could pass in an environment that didn't have a - * mpool, and DB would create a private one behind the scenes. This - * no longer works. - */ - if (!F_ISSET(dbenv, DB_ENV_DBLOCAL) && !MPOOL_ON(dbenv)) { - __db_err(dbenv, "environment did not include a memory pool"); - return (EINVAL); - } - - /* - * You can't specify threads during DB->open if subsystems in the - * environment weren't configured with them. - */ - if (LF_ISSET(DB_THREAD) && - !F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_THREAD)) { - __db_err(dbenv, "environment not created using DB_THREAD"); - return (EINVAL); - } - - /* DB_TRUNCATE is neither transaction recoverable nor lockable. */ - if (LF_ISSET(DB_TRUNCATE) && (LOCKING_ON(dbenv) || txn != NULL)) { - __db_err(dbenv, - "DB_TRUNCATE illegal with %s specified", - LOCKING_ON(dbenv) ? "locking" : "transactions"); - return (EINVAL); - } - - /* Subdatabase checks. */ - if (dname != NULL) { - /* QAM can only be done on in-memory subdatabases. */ - if (type == DB_QUEUE && fname != NULL) { - __db_err(dbenv, "Queue databases must be one-per-file"); - return (EINVAL); - } - - /* - * Named in-memory databases can't support certain flags, - * so check here. - */ - if (fname == NULL) - F_CLR(dbp, DB_AM_CHKSUM | DB_AM_ENCRYPT); - } - - return (0); -} - -/* - * __db_pget_pp -- - * DB->pget pre/post processing. - * - * PUBLIC: int __db_pget_pp - * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t)); - */ -int -__db_pget_pp(dbp, txn, skey, pkey, data, flags) - DB *dbp; - DB_TXN *txn; - DBT *skey, *pkey, *data; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->pget"); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - if ((ret = __db_pget_arg(dbp, pkey, flags)) != 0 || - (ret = __db_get_arg(dbp, skey, data, flags)) != 0) - goto err; - - ret = __db_pget(dbp, txn, skey, pkey, data, flags); - -err: /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_pget -- - * DB->pget. - * - * PUBLIC: int __db_pget - * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t)); - */ -int -__db_pget(dbp, txn, skey, pkey, data, flags) - DB *dbp; - DB_TXN *txn; - DBT *skey, *pkey, *data; - u_int32_t flags; -{ - DBC *dbc; - u_int32_t mode; - int ret, t_ret; - - if (LF_ISSET(DB_READ_UNCOMMITTED)) { - mode = DB_READ_UNCOMMITTED; - LF_CLR(DB_READ_UNCOMMITTED); - } else if (LF_ISSET(DB_READ_COMMITTED)) { - mode = DB_READ_COMMITTED; - LF_CLR(DB_READ_COMMITTED); - } else - mode = 0; - - if ((ret = __db_cursor(dbp, txn, &dbc, mode)) != 0) - return (ret); - - SET_RET_MEM(dbc, dbp); - - DEBUG_LREAD(dbc, txn, "__db_pget", skey, NULL, flags); - - /* - * !!! - * The actual method call is simple, do it inline. - * - * The underlying cursor pget will fill in a default DBT for null - * pkeys, and use the cursor's returned-key memory internally to - * store any intermediate primary keys. However, we've just set - * the returned-key memory to the DB handle's key memory, which - * is unsafe to use if the DB handle is threaded. If the pkey - * argument is NULL, use the DBC-owned returned-key memory - * instead; it'll go away when we close the cursor before we - * return, but in this case that's just fine, as we're not - * returning the primary key. - */ - if (pkey == NULL) - dbc->rkey = &dbc->my_rkey; - - /* - * The cursor is just a perfectly ordinary secondary database cursor. - * Call its c_pget() method to do the dirty work. - */ - if (flags == 0 || flags == DB_RMW) - flags |= DB_SET; - - ret = __db_c_pget(dbc, skey, pkey, data, flags); - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_pget_arg -- - * Check DB->pget arguments. - */ -static int -__db_pget_arg(dbp, pkey, flags) - DB *dbp; - DBT *pkey; - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - if (!F_ISSET(dbp, DB_AM_SECONDARY)) { - __db_err(dbenv, - "DB->pget may only be used on secondary indices"); - return (EINVAL); - } - - if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) { - __db_err(dbenv, - "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices"); - return (EINVAL); - } - - /* DB_CONSUME makes no sense on a secondary index. */ - LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW); - switch (flags) { - case DB_CONSUME: - case DB_CONSUME_WAIT: - return (__db_ferr(dbenv, "DB->pget", 0)); - default: - /* __db_get_arg will catch the rest. */ - break; - } - - /* - * We allow the pkey field to be NULL, so that we can make the - * two-DBT get calls into wrappers for the three-DBT ones. - */ - if (pkey != NULL && - (ret = __dbt_ferr(dbp, "primary key", pkey, 1)) != 0) - return (ret); - - /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */ - if (pkey == NULL && flags == DB_GET_BOTH) { - __db_err(dbenv, - "DB_GET_BOTH on a secondary index requires a primary key"); - return (EINVAL); - } - - return (0); -} - -/* - * __db_put_pp -- - * DB->put pre/post processing. - * - * PUBLIC: int __db_put_pp __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - */ -int -__db_put_pp(dbp, txn, key, data, flags) - DB *dbp; - DB_TXN *txn; - DBT *key, *data; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, txn_local, t_ret; - - dbenv = dbp->dbenv; - txn_local = 0; - - PANIC_CHECK(dbenv); - STRIP_AUTO_COMMIT(flags); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->put"); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - if ((ret = __db_put_arg(dbp, key, data, flags)) != 0) - goto err; - - /* Create local transaction as necessary. */ - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - txn_local = 1; - } - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0) - goto err; - - ret = __db_put(dbp, txn, key, data, flags); - -err: if (txn_local && - (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_put_arg -- - * Check DB->put arguments. - */ -static int -__db_put_arg(dbp, key, data, flags) - DB *dbp; - DBT *key, *data; - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret, returnkey; - - dbenv = dbp->dbenv; - returnkey = 0; - - /* Check for changes to a read-only tree. */ - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DB->put")); - - /* Check for puts on a secondary. */ - if (F_ISSET(dbp, DB_AM_SECONDARY)) { - __db_err(dbenv, "DB->put forbidden on secondary indices"); - return (EINVAL); - } - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - case DB_NOOVERWRITE: - break; - case DB_APPEND: - if (dbp->type != DB_RECNO && dbp->type != DB_QUEUE) - goto err; - returnkey = 1; - break; - case DB_NODUPDATA: - if (F_ISSET(dbp, DB_AM_DUPSORT)) - break; - /* FALLTHROUGH */ - default: -err: return (__db_ferr(dbenv, "DB->put", 0)); - } - - /* Check for invalid key/data flags. */ - if ((ret = __dbt_ferr(dbp, "key", key, returnkey)) != 0) - return (ret); - if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0) - return (ret); - - /* Keys shouldn't have partial flags during a put. */ - if (F_ISSET(key, DB_DBT_PARTIAL)) - return (__db_ferr(dbenv, "key DBT", 0)); - - /* Check for partial puts in the presence of duplicates. */ - if (F_ISSET(data, DB_DBT_PARTIAL) && - (F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) { - __db_err(dbenv, -"a partial put in the presence of duplicates requires a cursor operation"); - return (EINVAL); - } - - return (0); -} - -/* - * __db_compact_pp -- - * DB->compact pre/post processing. - * - * PUBLIC: int __db_compact_pp __P((DB *, DB_TXN *, - * PUBLIC: DBT *, DBT *, DB_COMPACT *, u_int32_t, DBT *)); - */ -int -__db_compact_pp(dbp, txn, start, stop, c_data, flags, end) - DB *dbp; - DB_TXN *txn; - DBT *start, *stop; - DB_COMPACT *c_data; - u_int32_t flags; - DBT *end; -{ - DB_COMPACT *dp, l_data; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->compact"); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if ((flags & ~DB_COMPACT_FLAGS) != 0) - return (__db_ferr(dbenv, "DB->compact", 0)); - - /* Check for changes to a read-only database. */ - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DB->compact")); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) { - handle_check = 0; - goto err; - } - - if (c_data == NULL) { - dp = &l_data; - memset(dp, 0, sizeof(*dp)); - } else - dp = c_data; - - switch (dbp->type) { - case DB_HASH: - if (!LF_ISSET(DB_FREELIST_ONLY)) - goto err; - /* FALLTHROUGH */ - case DB_BTREE: - case DB_RECNO: - ret = __bam_compact(dbp, txn, start, stop, dp, flags, end); - break; - - default: -err: ret = __dbh_am_chk(dbp, DB_OK_BTREE); - break; - } - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_sync_pp -- - * DB->sync pre/post processing. - * - * PUBLIC: int __db_sync_pp __P((DB *, u_int32_t)); - */ -int -__db_sync_pp(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync"); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0) - return (__db_ferr(dbenv, "DB->sync", 0)); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) { - handle_check = 0; - goto err; - } - - ret = __db_sync(dbp); - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_close_pp -- - * DBC->c_close pre/post processing. - * - * PUBLIC: int __db_c_close_pp __P((DBC *)); - */ -int -__db_c_close_pp(dbc) - DBC *dbc; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - DB *dbp; - int handle_check, ret, t_ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - ENV_ENTER(dbenv, ip); - - /* - * If the cursor is already closed we have a serious problem, and we - * assume that the cursor isn't on the active queue. Don't do any of - * the remaining cursor close processing. - */ - if (!F_ISSET(dbc, DBC_ACTIVE)) { - if (dbp != NULL) - __db_err(dbenv, "Closing already-closed cursor"); - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - /* Check for replication block. */ - handle_check = dbc->txn == NULL && IS_ENV_REPLICATED(dbenv); - ret = __db_c_close(dbc); - - /* Release replication block. */ - if (handle_check && - (t_ret = __op_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_count_pp -- - * DBC->c_count pre/post processing. - * - * PUBLIC: int __db_c_count_pp __P((DBC *, db_recno_t *, u_int32_t)); - */ -int -__db_c_count_pp(dbc, recnop, flags) - DBC *dbc; - db_recno_t *recnop; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - DB *dbp; - int ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - * - * The cursor must be initialized, return EINVAL for an invalid cursor. - */ - if (flags != 0) - return (__db_ferr(dbenv, "DBcursor->count", 0)); - - if (!IS_INITIALIZED(dbc)) - return (__db_curinval(dbenv)); - - ENV_ENTER(dbenv, ip); - - ret = __db_c_count(dbc, recnop); - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_del_pp -- - * DBC->c_del pre/post processing. - * - * PUBLIC: int __db_c_del_pp __P((DBC *, u_int32_t)); - */ -int -__db_c_del_pp(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - if ((ret = __db_c_del_arg(dbc, flags)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0) - goto err; - - DEBUG_LWRITE(dbc, dbc->txn, "DBcursor->del", NULL, NULL, flags); - ret = __db_c_del(dbc, flags); -err: - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_del_arg -- - * Check DBC->c_del arguments. - */ -static int -__db_c_del_arg(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - /* Check for changes to a read-only tree. */ - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DBcursor->del")); - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - break; - case DB_UPDATE_SECONDARY: - DB_ASSERT(F_ISSET(dbp, DB_AM_SECONDARY)); - break; - default: - return (__db_ferr(dbenv, "DBcursor->del", 0)); - } - - /* - * The cursor must be initialized, return EINVAL for an invalid cursor, - * otherwise 0. - */ - if (!IS_INITIALIZED(dbc)) - return (__db_curinval(dbenv)); - - return (0); -} - -/* - * __db_c_dup_pp -- - * DBC->c_dup pre/post processing. - * - * PUBLIC: int __db_c_dup_pp __P((DBC *, DBC **, u_int32_t)); - */ -int -__db_c_dup_pp(dbc, dbcp, flags) - DBC *dbc, **dbcp; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0 && flags != DB_POSITION) - return (__db_ferr(dbenv, "DBcursor->dup", 0)); - - ENV_ENTER(dbenv, ip); - - ret = __db_c_dup(dbc, dbcp, flags); - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_get_pp -- - * DBC->c_get pre/post processing. - * - * PUBLIC: int __db_c_get_pp __P((DBC *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_get_pp(dbc, key, data, flags) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - int ret; - DB_THREAD_INFO *ip; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - if ((ret = __db_c_get_arg(dbc, key, data, flags)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - DEBUG_LREAD(dbc, dbc->txn, "DBcursor->get", - flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags); - ret = __db_c_get(dbc, key, data, flags); - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_get_arg -- - * Common DBC->get argument checking, used by both DBC->get and DBC->pget. - */ -static int -__db_c_get_arg(dbc, key, data, flags) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - int dirty, multi, ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - /* - * Typically in checking routines that modify the flags, we have - * to save them and restore them, because the checking routine - * calls the work routine. However, this is a pure-checking - * routine which returns to a function that calls the work routine, - * so it's OK that we do not save and restore the flags, even though - * we modify them. - * - * Check for read-modify-write validity. DB_RMW doesn't make sense - * with CDB cursors since if you're going to write the cursor, you - * had to create it with DB_WRITECURSOR. Regardless, we check for - * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it. - * If this changes, confirm that DB does not itself set the DB_RMW - * flag in a path where CDB may have been configured. - */ - dirty = 0; - if (LF_ISSET(DB_READ_UNCOMMITTED | DB_RMW)) { - if (!LOCKING_ON(dbenv)) - return (__db_fnl(dbenv, "DBcursor->get")); - if (LF_ISSET(DB_READ_UNCOMMITTED)) - dirty = 1; - LF_CLR(DB_READ_UNCOMMITTED | DB_RMW); - } - - multi = 0; - if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) { - multi = 1; - if (LF_ISSET(DB_MULTIPLE) && LF_ISSET(DB_MULTIPLE_KEY)) - goto multi_err; - LF_CLR(DB_MULTIPLE | DB_MULTIPLE_KEY); - } - - /* Check for invalid function flags. */ - switch (flags) { - case DB_CONSUME: - case DB_CONSUME_WAIT: - if (dirty) { - __db_err(dbenv, - "DB_READ_UNCOMMITTED is not supported with DB_CONSUME or DB_CONSUME_WAIT"); - return (EINVAL); - } - if (dbp->type != DB_QUEUE) - goto err; - break; - case DB_CURRENT: - case DB_FIRST: - case DB_GET_BOTH: - case DB_GET_BOTH_RANGE: - case DB_NEXT: - case DB_NEXT_DUP: - case DB_NEXT_NODUP: - case DB_SET: - case DB_SET_RANGE: - break; - case DB_LAST: - case DB_PREV: - case DB_PREV_NODUP: - if (multi) -multi_err: return (__db_ferr(dbenv, "DBcursor->get", 1)); - break; - case DB_GET_BOTHC: - if (dbp->type == DB_QUEUE) - goto err; - break; - case DB_GET_RECNO: - /* - * The one situation in which this might be legal with a - * non-RECNUM dbp is if dbp is a secondary and its primary is - * DB_AM_RECNUM. - */ - if (!F_ISSET(dbp, DB_AM_RECNUM) && - (!F_ISSET(dbp, DB_AM_SECONDARY) || - !F_ISSET(dbp->s_primary, DB_AM_RECNUM))) - goto err; - break; - case DB_SET_RECNO: - if (!F_ISSET(dbp, DB_AM_RECNUM)) - goto err; - break; - default: -err: return (__db_ferr(dbenv, "DBcursor->get", 0)); - } - - /* Check for invalid key/data flags. */ - if ((ret = __dbt_ferr(dbp, "key", key, 0)) != 0) - return (ret); - if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0) - return (ret); - - if (multi) { - if (!F_ISSET(data, DB_DBT_USERMEM)) { - __db_err(dbenv, - "DB_MULTIPLE/DB_MULTIPLE_KEY require DB_DBT_USERMEM be set"); - return (EINVAL); - } - if (F_ISSET(key, DB_DBT_PARTIAL) || - F_ISSET(data, DB_DBT_PARTIAL)) { - __db_err(dbenv, - "DB_MULTIPLE/DB_MULTIPLE_KEY do not support DB_DBT_PARTIAL"); - return (EINVAL); - } - if (data->ulen < 1024 || - data->ulen < dbp->pgsize || data->ulen % 1024 != 0) { - __db_err(dbenv, "%s%s", - "DB_MULTIPLE/DB_MULTIPLE_KEY buffers must be ", - "aligned, at least page size and multiples of 1KB"); - return (EINVAL); - } - } - - /* - * The cursor must be initialized for DB_CURRENT, DB_GET_RECNO and - * DB_NEXT_DUP. Return EINVAL for an invalid cursor, otherwise 0. - */ - if (!IS_INITIALIZED(dbc) && (flags == DB_CURRENT || - flags == DB_GET_RECNO || flags == DB_NEXT_DUP)) - return (__db_curinval(dbenv)); - - /* Check for consistent transaction usage. */ - if (LF_ISSET(DB_RMW) && - (ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0) - return (ret); - - return (0); -} - -/* - * __db_secondary_close_pp -- - * DB->close for secondaries - * - * PUBLIC: int __db_secondary_close_pp __P((DB *, u_int32_t)); - */ -int -__db_secondary_close_pp(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - ret = 0; - - PANIC_CHECK(dbenv); - - /* - * As a DB handle destructor, we can't fail. - * - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0 && flags != DB_NOSYNC) - ret = __db_ferr(dbenv, "DB->close", 0); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) { - handle_check = 0; - if (ret == 0) - ret = t_ret; - } - - if ((t_ret = __db_secondary_close(dbp, flags)) != 0 && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_pget_pp -- - * DBC->c_pget pre/post processing. - * - * PUBLIC: int __db_c_pget_pp __P((DBC *, DBT *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_pget_pp(dbc, skey, pkey, data, flags) - DBC *dbc; - DBT *skey, *pkey, *data; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - if ((ret = __db_c_pget_arg(dbc, pkey, flags)) != 0 || - (ret = __db_c_get_arg(dbc, skey, data, flags)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - ret = __db_c_pget(dbc, skey, pkey, data, flags); - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_pget_arg -- - * Check DBC->pget arguments. - */ -static int -__db_c_pget_arg(dbc, pkey, flags) - DBC *dbc; - DBT *pkey; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - int ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - if (!F_ISSET(dbp, DB_AM_SECONDARY)) { - __db_err(dbenv, - "DBcursor->pget may only be used on secondary indices"); - return (EINVAL); - } - - if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) { - __db_err(dbenv, - "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices"); - return (EINVAL); - } - - switch (LF_ISSET(~DB_RMW)) { - case DB_CONSUME: - case DB_CONSUME_WAIT: - /* These flags make no sense on a secondary index. */ - return (__db_ferr(dbenv, "DBcursor->pget", 0)); - case DB_GET_BOTH: - /* DB_GET_BOTH is "get both the primary and the secondary". */ - if (pkey == NULL) { - __db_err(dbenv, - "DB_GET_BOTH requires both a secondary and a primary key"); - return (EINVAL); - } - break; - default: - /* __db_c_get_arg will catch the rest. */ - break; - } - - /* - * We allow the pkey field to be NULL, so that we can make the - * two-DBT get calls into wrappers for the three-DBT ones. - */ - if (pkey != NULL && - (ret = __dbt_ferr(dbp, "primary key", pkey, 0)) != 0) - return (ret); - - /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */ - if (pkey == NULL && (flags & DB_OPFLAGS_MASK) == DB_GET_BOTH) { - __db_err(dbenv, - "DB_GET_BOTH on a secondary index requires a primary key"); - return (EINVAL); - } - return (0); -} - -/* - * __db_c_put_pp -- - * DBC->put pre/post processing. - * - * PUBLIC: int __db_c_put_pp __P((DBC *, DBT *, DBT *, u_int32_t)); - */ -int -__db_c_put_pp(dbc, key, data, flags) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - if ((ret = __db_c_put_arg(dbc, key, data, flags)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0) - goto err; - - DEBUG_LWRITE(dbc, dbc->txn, "DBcursor->put", - flags == DB_KEYFIRST || flags == DB_KEYLAST || - flags == DB_NODUPDATA || flags == DB_UPDATE_SECONDARY ? - key : NULL, data, flags); - ret =__db_c_put(dbc, key, data, flags); -err: - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_c_put_arg -- - * Check DBC->put arguments. - */ -static int -__db_c_put_arg(dbc, key, data, flags) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - int key_flags, ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - key_flags = 0; - - /* Check for changes to a read-only tree. */ - if (DB_IS_READONLY(dbp)) - return (__db_rdonly(dbenv, "DBcursor->put")); - - /* Check for puts on a secondary. */ - if (F_ISSET(dbp, DB_AM_SECONDARY)) { - if (flags == DB_UPDATE_SECONDARY) - flags = DB_KEYLAST; - else { - __db_err(dbenv, - "DBcursor->put forbidden on secondary indices"); - return (EINVAL); - } - } - - /* Check for invalid function flags. */ - switch (flags) { - case DB_AFTER: - case DB_BEFORE: - switch (dbp->type) { - case DB_BTREE: - case DB_HASH: /* Only with unsorted duplicates. */ - if (!F_ISSET(dbp, DB_AM_DUP)) - goto err; - if (dbp->dup_compare != NULL) - goto err; - break; - case DB_QUEUE: /* Not permitted. */ - goto err; - case DB_RECNO: /* Only with mutable record numbers. */ - if (!F_ISSET(dbp, DB_AM_RENUMBER)) - goto err; - key_flags = 1; - break; - case DB_UNKNOWN: - default: - goto err; - } - break; - case DB_CURRENT: - /* - * If there is a comparison function, doing a DB_CURRENT - * must not change the part of the data item that is used - * for the comparison. - */ - break; - case DB_NODUPDATA: - if (!F_ISSET(dbp, DB_AM_DUPSORT)) - goto err; - /* FALLTHROUGH */ - case DB_KEYFIRST: - case DB_KEYLAST: - key_flags = 1; - break; - default: -err: return (__db_ferr(dbenv, "DBcursor->put", 0)); - } - - /* Check for invalid key/data flags. */ - if (key_flags && (ret = __dbt_ferr(dbp, "key", key, 0)) != 0) - return (ret); - if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0) - return (ret); - - /* Keys shouldn't have partial flags during a put. */ - if (F_ISSET(key, DB_DBT_PARTIAL)) - return (__db_ferr(dbenv, "key DBT", 0)); - - /* - * The cursor must be initialized for anything other than DB_KEYFIRST - * and DB_KEYLAST, return EINVAL for an invalid cursor, otherwise 0. - */ - if (!IS_INITIALIZED(dbc) && flags != DB_KEYFIRST && - flags != DB_KEYLAST && flags != DB_NODUPDATA) - return (__db_curinval(dbenv)); - - return (0); -} - -/* - * __dbt_ferr -- - * Check a DBT for flag errors. - */ -static int -__dbt_ferr(dbp, name, dbt, check_thread) - const DB *dbp; - const char *name; - const DBT *dbt; - int check_thread; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - /* - * Check for invalid DBT flags. We allow any of the flags to be - * specified to any DB or DBcursor call so that applications can - * set DB_DBT_MALLOC when retrieving a data item from a secondary - * database and then specify that same DBT as a key to a primary - * database, without having to clear flags. - */ - if ((ret = __db_fchk(dbenv, name, dbt->flags, DB_DBT_APPMALLOC | - DB_DBT_MALLOC | DB_DBT_DUPOK | DB_DBT_REALLOC | DB_DBT_USERMEM | - DB_DBT_PARTIAL)) != 0) - return (ret); - switch (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) { - case 0: - case DB_DBT_MALLOC: - case DB_DBT_REALLOC: - case DB_DBT_USERMEM: - break; - default: - return (__db_ferr(dbenv, name, 1)); - } - - if (check_thread && DB_IS_THREADED(dbp) && - !F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) { - __db_err(dbenv, - "DB_THREAD mandates memory allocation flag on DBT %s", - name); - return (EINVAL); - } - return (0); -} - -/* - * __db_curinval - * Report that a cursor is in an invalid state. - */ -static int -__db_curinval(dbenv) - const DB_ENV *dbenv; -{ - __db_err(dbenv, - "Cursor position must be set before performing this operation"); - return (EINVAL); -} - -/* - * __db_txn_auto_init -- - * Handle DB_AUTO_COMMIT initialization. - * - * PUBLIC: int __db_txn_auto_init __P((DB_ENV *, DB_TXN **)); - */ -int -__db_txn_auto_init(dbenv, txnidp) - DB_ENV *dbenv; - DB_TXN **txnidp; -{ - /* - * Method calls where applications explicitly specify DB_AUTO_COMMIT - * require additional validation: the DB_AUTO_COMMIT flag cannot be - * specified if a transaction cookie is also specified, nor can the - * flag be specified in a non-transactional environment. - */ - if (*txnidp != NULL) { - __db_err(dbenv, - "DB_AUTO_COMMIT may not be specified along with a transaction handle"); - return (EINVAL); - } - - if (!TXN_ON(dbenv)) { - __db_err(dbenv, - "DB_AUTO_COMMIT may not be specified in non-transactional environment"); - return (EINVAL); - } - - /* - * Our caller checked to see if replication is making a state change. - * Don't call the user-level API (which would repeat that check). - */ - return (__txn_begin(dbenv, NULL, txnidp, 0)); -} - -/* - * __db_txn_auto_resolve -- - * Resolve local transactions. - * - * PUBLIC: int __db_txn_auto_resolve __P((DB_ENV *, DB_TXN *, int, int)); - */ -int -__db_txn_auto_resolve(dbenv, txn, nosync, ret) - DB_ENV *dbenv; - DB_TXN *txn; - int nosync, ret; -{ - int t_ret; - - /* - * We're resolving a transaction for the user, and must decrement the - * replication handle count. Call the user-level API. - */ - if (ret == 0) - return (__txn_commit(txn, nosync ? DB_TXN_NOSYNC : 0)); - - if ((t_ret = __txn_abort(txn)) != 0) - return (__db_panic(dbenv, t_ret)); - - return (ret); -} diff --git a/storage/bdb/db/db_join.c b/storage/bdb/db/db_join.c deleted file mode 100644 index 720891ac07e..00000000000 --- a/storage/bdb/db/db_join.c +++ /dev/null @@ -1,942 +0,0 @@ -/* - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_join.c,v 12.6 2005/10/07 20:21:22 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_join.h" -#include "dbinc/btree.h" - -static int __db_join_close_pp __P((DBC *)); -static int __db_join_cmp __P((const void *, const void *)); -static int __db_join_del __P((DBC *, u_int32_t)); -static int __db_join_get __P((DBC *, DBT *, DBT *, u_int32_t)); -static int __db_join_get_pp __P((DBC *, DBT *, DBT *, u_int32_t)); -static int __db_join_getnext __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t)); -static int __db_join_primget __P((DB *, - DB_TXN *, u_int32_t, DBT *, DBT *, u_int32_t)); -static int __db_join_put __P((DBC *, DBT *, DBT *, u_int32_t)); - -/* - * Check to see if the Nth secondary cursor of join cursor jc is pointing - * to a sorted duplicate set. - */ -#define SORTED_SET(jc, n) ((jc)->j_curslist[(n)]->dbp->dup_compare != NULL) - -/* - * This is the duplicate-assisted join functionality. Right now we're - * going to write it such that we return one item at a time, although - * I think we may need to optimize it to return them all at once. - * It should be easier to get it working this way, and I believe that - * changing it should be fairly straightforward. - * - * We optimize the join by sorting cursors from smallest to largest - * cardinality. In most cases, this is indeed optimal. However, if - * a cursor with large cardinality has very few data in common with the - * first cursor, it is possible that the join will be made faster by - * putting it earlier in the cursor list. Since we have no way to detect - * cases like this, we simply provide a flag, DB_JOIN_NOSORT, which retains - * the sort order specified by the caller, who may know more about the - * structure of the data. - * - * The first cursor moves sequentially through the duplicate set while - * the others search explicitly for the duplicate in question. - * - */ - -/* - * __db_join -- - * This is the interface to the duplicate-assisted join functionality. - * In the same way that cursors mark a position in a database, a cursor - * can mark a position in a join. While most cursors are created by the - * cursor method of a DB, join cursors are created through an explicit - * call to DB->join. - * - * The curslist is an array of existing, initialized cursors and primary - * is the DB of the primary file. The data item that joins all the - * cursors in the curslist is used as the key into the primary and that - * key and data are returned. When no more items are left in the join - * set, the c_next operation off the join cursor will return DB_NOTFOUND. - * - * PUBLIC: int __db_join __P((DB *, DBC **, DBC **, u_int32_t)); - */ -int -__db_join(primary, curslist, dbcp, flags) - DB *primary; - DBC **curslist, **dbcp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBC *dbc; - JOIN_CURSOR *jc; - size_t ncurs, nslots; - u_int32_t i; - int ret; - - dbenv = primary->dbenv; - dbc = NULL; - jc = NULL; - - if ((ret = __os_calloc(dbenv, 1, sizeof(DBC), &dbc)) != 0) - goto err; - - if ((ret = __os_calloc(dbenv, 1, sizeof(JOIN_CURSOR), &jc)) != 0) - goto err; - - if ((ret = __os_malloc(dbenv, 256, &jc->j_key.data)) != 0) - goto err; - jc->j_key.ulen = 256; - F_SET(&jc->j_key, DB_DBT_USERMEM); - - F_SET(&jc->j_rdata, DB_DBT_REALLOC); - - for (jc->j_curslist = curslist; - *jc->j_curslist != NULL; jc->j_curslist++) - ; - - /* - * The number of cursor slots we allocate is one greater than - * the number of cursors involved in the join, because the - * list is NULL-terminated. - */ - ncurs = (size_t)(jc->j_curslist - curslist); - nslots = ncurs + 1; - - /* - * !!! -- A note on the various lists hanging off jc. - * - * j_curslist is the initial NULL-terminated list of cursors passed - * into __db_join. The original cursors are not modified; pristine - * copies are required because, in databases with unsorted dups, we - * must reset all of the secondary cursors after the first each - * time the first one is incremented, or else we will lose data - * which happen to be sorted differently in two different cursors. - * - * j_workcurs is where we put those copies that we're planning to - * work with. They're lazily c_dup'ed from j_curslist as we need - * them, and closed when the join cursor is closed or when we need - * to reset them to their original values (in which case we just - * c_dup afresh). - * - * j_fdupcurs is an array of cursors which point to the first - * duplicate in the duplicate set that contains the data value - * we're currently interested in. We need this to make - * __db_join_get correctly return duplicate duplicates; i.e., if a - * given data value occurs twice in the set belonging to cursor #2, - * and thrice in the set belonging to cursor #3, and once in all - * the other cursors, successive calls to __db_join_get need to - * return that data item six times. To make this happen, each time - * cursor N is allowed to advance to a new datum, all cursors M - * such that M > N have to be reset to the first duplicate with - * that datum, so __db_join_get will return all the dup-dups again. - * We could just reset them to the original cursor from j_curslist, - * but that would be a bit slower in the unsorted case and a LOT - * slower in the sorted one. - * - * j_exhausted is a list of boolean values which represent - * whether or not their corresponding cursors are "exhausted", - * i.e. whether the datum under the corresponding cursor has - * been found not to exist in any unreturned combinations of - * later secondary cursors, in which case they are ready to be - * incremented. - */ - - /* We don't want to free regions whose callocs have failed. */ - jc->j_curslist = NULL; - jc->j_workcurs = NULL; - jc->j_fdupcurs = NULL; - jc->j_exhausted = NULL; - - if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *), - &jc->j_curslist)) != 0) - goto err; - if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *), - &jc->j_workcurs)) != 0) - goto err; - if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *), - &jc->j_fdupcurs)) != 0) - goto err; - if ((ret = __os_calloc(dbenv, nslots, sizeof(u_int8_t), - &jc->j_exhausted)) != 0) - goto err; - for (i = 0; curslist[i] != NULL; i++) { - jc->j_curslist[i] = curslist[i]; - jc->j_workcurs[i] = NULL; - jc->j_fdupcurs[i] = NULL; - jc->j_exhausted[i] = 0; - } - jc->j_ncurs = (u_int32_t)ncurs; - - /* - * If DB_JOIN_NOSORT is not set, optimize secondary cursors by - * sorting in order of increasing cardinality. - */ - if (!LF_ISSET(DB_JOIN_NOSORT)) - qsort(jc->j_curslist, ncurs, sizeof(DBC *), __db_join_cmp); - - /* - * We never need to reset the 0th cursor, so there's no - * solid reason to use workcurs[0] rather than curslist[0] in - * join_get. Nonetheless, it feels cleaner to do it for symmetry, - * and this is the most logical place to copy it. - * - * !!! - * There's no need to close the new cursor if we goto err only - * because this is the last thing that can fail. Modifier of this - * function beware! - */ - if ((ret = - __db_c_dup(jc->j_curslist[0], jc->j_workcurs, DB_POSITION)) != 0) - goto err; - - dbc->c_close = __db_join_close_pp; - dbc->c_del = __db_join_del; - dbc->c_get = __db_join_get_pp; - dbc->c_put = __db_join_put; - dbc->internal = (DBC_INTERNAL *)jc; - dbc->dbp = primary; - jc->j_primary = primary; - - /* Stash the first cursor's transaction here for easy access. */ - dbc->txn = curslist[0]->txn; - - *dbcp = dbc; - - MUTEX_LOCK(dbenv, primary->mutex); - TAILQ_INSERT_TAIL(&primary->join_queue, dbc, links); - MUTEX_UNLOCK(dbenv, primary->mutex); - - return (0); - -err: if (jc != NULL) { - if (jc->j_curslist != NULL) - __os_free(dbenv, jc->j_curslist); - if (jc->j_workcurs != NULL) { - if (jc->j_workcurs[0] != NULL) - (void)__db_c_close(jc->j_workcurs[0]); - __os_free(dbenv, jc->j_workcurs); - } - if (jc->j_fdupcurs != NULL) - __os_free(dbenv, jc->j_fdupcurs); - if (jc->j_exhausted != NULL) - __os_free(dbenv, jc->j_exhausted); - __os_free(dbenv, jc); - } - if (dbc != NULL) - __os_free(dbenv, dbc); - return (ret); -} - -/* - * __db_join_close_pp -- - * DBC->c_close pre/post processing for join cursors. - */ -static int -__db_join_close_pp(dbc) - DBC *dbc; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - DB *dbp; - int handle_check, ret, t_ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - ENV_ENTER(dbenv, ip); - - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - ret = __db_join_close(dbc); - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -static int -__db_join_put(dbc, key, data, flags) - DBC *dbc; - DBT *key; - DBT *data; - u_int32_t flags; -{ - PANIC_CHECK(dbc->dbp->dbenv); - - COMPQUIET(key, NULL); - COMPQUIET(data, NULL); - COMPQUIET(flags, 0); - return (EINVAL); -} - -static int -__db_join_del(dbc, flags) - DBC *dbc; - u_int32_t flags; -{ - PANIC_CHECK(dbc->dbp->dbenv); - - COMPQUIET(flags, 0); - return (EINVAL); -} - -/* - * __db_join_get_pp -- - * DBjoin->get pre/post processing. - */ -static int -__db_join_get_pp(dbc, key, data, flags) - DBC *dbc; - DBT *key, *data; - u_int32_t flags; -{ - DB *dbp; - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - u_int32_t handle_check, save_flags; - int ret, t_ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - /* Save the original flags value. */ - save_flags = flags; - - PANIC_CHECK(dbenv); - - if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW)) { - if (!LOCKING_ON(dbp->dbenv)) - return (__db_fnl(dbp->dbenv, "DBcursor->c_get")); - LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW); - } - - switch (flags) { - case 0: - case DB_JOIN_ITEM: - break; - default: - return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0)); - } - - /* - * A partial get of the key of a join cursor don't make much sense; - * the entire key is necessary to query the primary database - * and find the datum, and so regardless of the size of the key - * it would not be a performance improvement. Since it would require - * special handling, we simply disallow it. - * - * A partial get of the data, however, potentially makes sense (if - * all possible data are a predictable large structure, for instance) - * and causes us no headaches, so we permit it. - */ - if (F_ISSET(key, DB_DBT_PARTIAL)) { - __db_err(dbp->dbenv, - "DB_DBT_PARTIAL may not be set on key during join_get"); - return (EINVAL); - } - - ENV_ENTER(dbenv, ip); - - handle_check = IS_ENV_REPLICATED(dbp->dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - /* Restore the original flags value. */ - flags = save_flags; - - ret = __db_join_get(dbc, key, data, flags); - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -static int -__db_join_get(dbc, key_arg, data_arg, flags) - DBC *dbc; - DBT *key_arg, *data_arg; - u_int32_t flags; -{ - DBT *key_n, key_n_mem; - DB *dbp; - DBC *cp; - JOIN_CURSOR *jc; - int db_manage_data, ret; - u_int32_t i, j, operation, opmods; - - dbp = dbc->dbp; - jc = (JOIN_CURSOR *)dbc->internal; - - operation = LF_ISSET(DB_OPFLAGS_MASK); - - /* !!! - * If the set of flags here changes, check that __db_join_primget - * is updated to handle them properly. - */ - opmods = LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW); - - /* - * Since we are fetching the key as a datum in the secondary indices, - * we must be careful of caller-specified DB_DBT_* memory - * management flags. If necessary, use a stack-allocated DBT; - * we'll appropriately copy and/or allocate the data later. - */ - if (F_ISSET(key_arg, DB_DBT_USERMEM) || - F_ISSET(key_arg, DB_DBT_MALLOC)) { - /* We just use the default buffer; no need to go malloc. */ - key_n = &key_n_mem; - memset(key_n, 0, sizeof(DBT)); - } else { - /* - * Either DB_DBT_REALLOC or the default buffer will work - * fine if we have to reuse it, as we do. - */ - key_n = key_arg; - } - - /* - * If our last attempt to do a get on the primary key failed, - * short-circuit the join and try again with the same key. - */ - if (F_ISSET(jc, JOIN_RETRY)) - goto samekey; - F_CLR(jc, JOIN_RETRY); - -retry: ret = __db_c_get(jc->j_workcurs[0], &jc->j_key, key_n, - opmods | (jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT)); - - if (ret == DB_BUFFER_SMALL) { - jc->j_key.ulen <<= 1; - if ((ret = __os_realloc(dbp->dbenv, - jc->j_key.ulen, &jc->j_key.data)) != 0) - goto mem_err; - goto retry; - } - - /* - * If ret == DB_NOTFOUND, we're out of elements of the first - * secondary cursor. This is how we finally finish the join - * if all goes well. - */ - if (ret != 0) - goto err; - - /* - * If jc->j_exhausted[0] == 1, we've just advanced the first cursor, - * and we're going to want to advance all the cursors that point to - * the first member of a duplicate duplicate set (j_fdupcurs[1..N]). - * Close all the cursors in j_fdupcurs; we'll reopen them the - * first time through the upcoming loop. - */ - for (i = 1; i < jc->j_ncurs; i++) { - if (jc->j_fdupcurs[i] != NULL && - (ret = __db_c_close(jc->j_fdupcurs[i])) != 0) - goto err; - jc->j_fdupcurs[i] = NULL; - } - - /* - * If jc->j_curslist[1] == NULL, we have only one cursor in the join. - * Thus, we can safely increment that one cursor on each call - * to __db_join_get, and we signal this by setting jc->j_exhausted[0] - * right away. - * - * Otherwise, reset jc->j_exhausted[0] to 0, so that we don't - * increment it until we know we're ready to. - */ - if (jc->j_curslist[1] == NULL) - jc->j_exhausted[0] = 1; - else - jc->j_exhausted[0] = 0; - - /* We have the first element; now look for it in the other cursors. */ - for (i = 1; i < jc->j_ncurs; i++) { - DB_ASSERT(jc->j_curslist[i] != NULL); - if (jc->j_workcurs[i] == NULL) - /* If this is NULL, we need to dup curslist into it. */ - if ((ret = __db_c_dup(jc->j_curslist[i], - &jc->j_workcurs[i], DB_POSITION)) != 0) - goto err; - -retry2: cp = jc->j_workcurs[i]; - - if ((ret = __db_join_getnext(cp, &jc->j_key, key_n, - jc->j_exhausted[i], opmods)) == DB_NOTFOUND) { - /* - * jc->j_workcurs[i] has no more of the datum we're - * interested in. Go back one cursor and get - * a new dup. We can't just move to a new - * element of the outer relation, because that way - * we might miss duplicate duplicates in cursor i-1. - * - * If this takes us back to the first cursor, - * -then- we can move to a new element of the outer - * relation. - */ - --i; - jc->j_exhausted[i] = 1; - - if (i == 0) { - for (j = 1; jc->j_workcurs[j] != NULL; j++) { - /* - * We're moving to a new element of - * the first secondary cursor. If - * that cursor is sorted, then any - * other sorted cursors can be safely - * reset to the first duplicate - * duplicate in the current set if we - * have a pointer to it (we can't just - * leave them be, or we'll miss - * duplicate duplicates in the outer - * relation). - * - * If the first cursor is unsorted, or - * if cursor j is unsorted, we can - * make no assumptions about what - * we're looking for next or where it - * will be, so we reset to the very - * beginning (setting workcurs NULL - * will achieve this next go-round). - * - * XXX: This is likely to break - * horribly if any two cursors are - * both sorted, but have different - * specified sort functions. For, - * now, we dismiss this as pathology - * and let strange things happen--we - * can't make rope childproof. - */ - if ((ret = __db_c_close( - jc->j_workcurs[j])) != 0) - goto err; - if (!SORTED_SET(jc, 0) || - !SORTED_SET(jc, j) || - jc->j_fdupcurs[j] == NULL) - /* - * Unsafe conditions; - * reset fully. - */ - jc->j_workcurs[j] = NULL; - else - /* Partial reset suffices. */ - if ((__db_c_dup( - jc->j_fdupcurs[j], - &jc->j_workcurs[j], - DB_POSITION)) != 0) - goto err; - jc->j_exhausted[j] = 0; - } - goto retry; - /* NOTREACHED */ - } - - /* - * We're about to advance the cursor and need to - * reset all of the workcurs[j] where j>i, so that - * we don't miss any duplicate duplicates. - */ - for (j = i + 1; - jc->j_workcurs[j] != NULL; - j++) { - if ((ret = - __db_c_close(jc->j_workcurs[j])) != 0) - goto err; - jc->j_exhausted[j] = 0; - if (jc->j_fdupcurs[j] == NULL) - jc->j_workcurs[j] = NULL; - else if ((ret = __db_c_dup(jc->j_fdupcurs[j], - &jc->j_workcurs[j], DB_POSITION)) != 0) - goto err; - } - goto retry2; - /* NOTREACHED */ - } - - if (ret == DB_BUFFER_SMALL) { - jc->j_key.ulen <<= 1; - if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen, - &jc->j_key.data)) != 0) { -mem_err: __db_err(dbp->dbenv, - "Allocation failed for join key, len = %lu", - (u_long)jc->j_key.ulen); - goto err; - } - goto retry2; - } - - if (ret != 0) - goto err; - - /* - * If we made it this far, we've found a matching - * datum in cursor i. Mark the current cursor - * unexhausted, so we don't miss any duplicate - * duplicates the next go-round--unless this is the - * very last cursor, in which case there are none to - * miss, and we'll need that exhausted flag to finally - * get a DB_NOTFOUND and move on to the next datum in - * the outermost cursor. - */ - if (i + 1 != jc->j_ncurs) - jc->j_exhausted[i] = 0; - else - jc->j_exhausted[i] = 1; - - /* - * If jc->j_fdupcurs[i] is NULL and the ith cursor's dups are - * sorted, then we're here for the first time since advancing - * cursor 0, and we have a new datum of interest. - * jc->j_workcurs[i] points to the beginning of a set of - * duplicate duplicates; store this into jc->j_fdupcurs[i]. - */ - if (SORTED_SET(jc, i) && jc->j_fdupcurs[i] == NULL && (ret = - __db_c_dup(cp, &jc->j_fdupcurs[i], DB_POSITION)) != 0) - goto err; - } - -err: if (ret != 0) - return (ret); - - if (0) { -samekey: /* - * Get the key we tried and failed to return last time; - * it should be the current datum of all the secondary cursors. - */ - if ((ret = __db_c_get(jc->j_workcurs[0], - &jc->j_key, key_n, DB_CURRENT | opmods)) != 0) - return (ret); - F_CLR(jc, JOIN_RETRY); - } - - /* - * ret == 0; we have a key to return. - * - * If DB_DBT_USERMEM or DB_DBT_MALLOC is set, we need to copy the key - * back into the dbt we were given for the key; call __db_retcopy. - * Otherwise, assert that we do not need to copy anything and proceed. - */ - DB_ASSERT(F_ISSET( - key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) || key_n == key_arg); - - if (F_ISSET(key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) && - (ret = __db_retcopy(dbp->dbenv, - key_arg, key_n->data, key_n->size, NULL, NULL)) != 0) { - /* - * The retcopy failed, most commonly because we have a user - * buffer for the key which is too small. Set things up to - * retry next time, and return. - */ - F_SET(jc, JOIN_RETRY); - return (ret); - } - - /* - * If DB_JOIN_ITEM is set, we return it; otherwise we do the lookup - * in the primary and then return. - * - * Note that we use key_arg here; it is safe (and appropriate) - * to do so. - */ - if (operation == DB_JOIN_ITEM) - return (0); - - /* - * If data_arg->flags == 0--that is, if DB is managing the - * data DBT's memory--it's not safe to just pass the DBT - * through to the primary get call, since we don't want that - * memory to belong to the primary DB handle (and if the primary - * is free-threaded, it can't anyway). - * - * Instead, use memory that is managed by the join cursor, in - * jc->j_rdata. - */ - if (!F_ISSET(data_arg, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) - db_manage_data = 1; - else - db_manage_data = 0; - if ((ret = __db_join_primget(jc->j_primary, - jc->j_curslist[0]->txn, jc->j_curslist[0]->locker, key_arg, - db_manage_data ? &jc->j_rdata : data_arg, opmods)) != 0) { - if (ret == DB_NOTFOUND) - /* - * If ret == DB_NOTFOUND, the primary and secondary - * are out of sync; every item in each secondary - * should correspond to something in the primary, - * or we shouldn't have done the join this way. - * Wail. - */ - ret = __db_secondary_corrupt(jc->j_primary); - else - /* - * The get on the primary failed for some other - * reason, most commonly because we're using a user - * buffer that's not big enough. Flag our failure - * so we can return the same key next time. - */ - F_SET(jc, JOIN_RETRY); - } - if (db_manage_data && ret == 0) { - data_arg->data = jc->j_rdata.data; - data_arg->size = jc->j_rdata.size; - } - - return (ret); -} - -/* - * __db_join_close -- - * DBC->c_close for join cursors. - * - * PUBLIC: int __db_join_close __P((DBC *)); - */ -int -__db_join_close(dbc) - DBC *dbc; -{ - DB *dbp; - DB_ENV *dbenv; - JOIN_CURSOR *jc; - int ret, t_ret; - u_int32_t i; - - jc = (JOIN_CURSOR *)dbc->internal; - dbp = dbc->dbp; - dbenv = dbp->dbenv; - ret = t_ret = 0; - - /* - * Remove from active list of join cursors. Note that this - * must happen before any action that can fail and return, or else - * __db_close may loop indefinitely. - */ - MUTEX_LOCK(dbenv, dbp->mutex); - TAILQ_REMOVE(&dbp->join_queue, dbc, links); - MUTEX_UNLOCK(dbenv, dbp->mutex); - - PANIC_CHECK(dbenv); - - /* - * Close any open scratch cursors. In each case, there may - * not be as many outstanding as there are cursors in - * curslist, but we want to close whatever's there. - * - * If any close fails, there's no reason not to close everything else; - * we'll just return the error code of the last one to fail. There's - * not much the caller can do anyway, since these cursors only exist - * hanging off a db-internal data structure that they shouldn't be - * mucking with. - */ - for (i = 0; i < jc->j_ncurs; i++) { - if (jc->j_workcurs[i] != NULL && - (t_ret = __db_c_close(jc->j_workcurs[i])) != 0) - ret = t_ret; - if (jc->j_fdupcurs[i] != NULL && - (t_ret = __db_c_close(jc->j_fdupcurs[i])) != 0) - ret = t_ret; - } - - __os_free(dbenv, jc->j_exhausted); - __os_free(dbenv, jc->j_curslist); - __os_free(dbenv, jc->j_workcurs); - __os_free(dbenv, jc->j_fdupcurs); - __os_free(dbenv, jc->j_key.data); - if (jc->j_rdata.data != NULL) - __os_ufree(dbenv, jc->j_rdata.data); - __os_free(dbenv, jc); - __os_free(dbenv, dbc); - - return (ret); -} - -/* - * __db_join_getnext -- - * This function replaces the DBC_CONTINUE and DBC_KEYSET - * functionality inside the various cursor get routines. - * - * If exhausted == 0, we're not done with the current datum; - * return it if it matches "matching", otherwise search - * using DB_GET_BOTHC (which is faster than iteratively doing - * DB_NEXT_DUP) forward until we find one that does. - * - * If exhausted == 1, we are done with the current datum, so just - * leap forward to searching NEXT_DUPs. - * - * If no matching datum exists, returns DB_NOTFOUND, else 0. - */ -static int -__db_join_getnext(dbc, key, data, exhausted, opmods) - DBC *dbc; - DBT *key, *data; - u_int32_t exhausted, opmods; -{ - int ret, cmp; - DB *dbp; - DBT ldata; - int (*func) __P((DB *, const DBT *, const DBT *)); - - dbp = dbc->dbp; - func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare; - - switch (exhausted) { - case 0: - /* - * We don't want to step on data->data; use a new - * DBT and malloc so we don't step on dbc's rdata memory. - */ - memset(&ldata, 0, sizeof(DBT)); - F_SET(&ldata, DB_DBT_MALLOC); - if ((ret = __db_c_get(dbc, - key, &ldata, opmods | DB_CURRENT)) != 0) - break; - cmp = func(dbp, data, &ldata); - if (cmp == 0) { - /* - * We have to return the real data value. Copy - * it into data, then free the buffer we malloc'ed - * above. - */ - if ((ret = __db_retcopy(dbp->dbenv, data, ldata.data, - ldata.size, &data->data, &data->size)) != 0) - return (ret); - __os_ufree(dbp->dbenv, ldata.data); - return (0); - } - - /* - * Didn't match--we want to fall through and search future - * dups. We just forget about ldata and free - * its buffer--data contains the value we're searching for. - */ - __os_ufree(dbp->dbenv, ldata.data); - /* FALLTHROUGH */ - case 1: - ret = __db_c_get(dbc, key, data, opmods | DB_GET_BOTHC); - break; - default: - ret = EINVAL; - break; - } - - return (ret); -} - -/* - * __db_join_cmp -- - * Comparison function for sorting DBCs in cardinality order. - */ -static int -__db_join_cmp(a, b) - const void *a, *b; -{ - DBC *dbca, *dbcb; - db_recno_t counta, countb; - - dbca = *((DBC * const *)a); - dbcb = *((DBC * const *)b); - - if (__db_c_count(dbca, &counta) != 0 || - __db_c_count(dbcb, &countb) != 0) - return (0); - - return ((long)counta - (long)countb); -} - -/* - * __db_join_primget -- - * Perform a DB->get in the primary, being careful not to use a new - * locker ID if we're doing CDB locking. - */ -static int -__db_join_primget(dbp, txn, lockerid, key, data, flags) - DB *dbp; - DB_TXN *txn; - u_int32_t lockerid; - DBT *key, *data; - u_int32_t flags; -{ - DBC *dbc; - u_int32_t rmw; - int ret, t_ret; - - if ((ret = __db_cursor_int(dbp, - txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0) - return (ret); - - /* - * The only allowable flags here are the two flags copied into "opmods" - * in __db_join_get, DB_RMW and DB_READ_UNCOMMITTED. The former is an - * op on the c_get call, the latter on the cursor call. It's a DB bug - * if we allow any other flags down in here. - */ - rmw = LF_ISSET(DB_RMW); - if (LF_ISSET(DB_READ_UNCOMMITTED) || - (txn != NULL && F_ISSET(txn, TXN_READ_UNCOMMITTED))) - F_SET(dbc, DBC_READ_UNCOMMITTED); - - if (LF_ISSET(DB_READ_COMMITTED) || - (txn != NULL && F_ISSET(txn, TXN_READ_COMMITTED))) - F_SET(dbc, DBC_READ_COMMITTED); - - LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW); - DB_ASSERT(flags == 0); - - F_SET(dbc, DBC_TRANSIENT); - - /* - * This shouldn't be necessary, thanks to the fact that join cursors - * swap in their own DB_DBT_REALLOC'ed buffers, but just for form's - * sake, we mirror what __db_get does. - */ - SET_RET_MEM(dbc, dbp); - - ret = __db_c_get(dbc, key, data, DB_SET | rmw); - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_secondary_corrupt -- - * Report that a secondary index appears corrupt, as it has a record - * that does not correspond to a record in the primary or vice versa. - * - * PUBLIC: int __db_secondary_corrupt __P((DB *)); - */ -int -__db_secondary_corrupt(dbp) - DB *dbp; -{ - __db_err(dbp->dbenv, - "Secondary index corrupt: not consistent with primary"); - return (DB_SECONDARY_BAD); -} diff --git a/storage/bdb/db/db_meta.c b/storage/bdb/db/db_meta.c deleted file mode 100644 index c1264d38fb1..00000000000 --- a/storage/bdb/db/db_meta.c +++ /dev/null @@ -1,1065 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_meta.c,v 12.22 2005/10/27 01:46:34 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#include "dbinc/db_am.h" - -static void __db_init_meta __P((DB *, void *, db_pgno_t, u_int32_t)); -#ifdef HAVE_FTRUNCATE -static void __db_freelist_sort __P((struct pglist *, u_int32_t)); -static int __db_pglistcmp __P((const void *, const void *)); -static int __db_truncate_freelist __P((DBC *, DBMETA *, - PAGE *, db_pgno_t *, u_int32_t, u_int32_t)); -#endif - -/* - * __db_init_meta -- - * Helper function for __db_new that initializes the important fields in - * a meta-data page (used instead of P_INIT). We need to make sure that we - * retain the page number and LSN of the existing page. - */ -static void -__db_init_meta(dbp, p, pgno, pgtype) - DB *dbp; - void *p; - db_pgno_t pgno; - u_int32_t pgtype; -{ - DB_LSN save_lsn; - DBMETA *meta; - - meta = (DBMETA *)p; - save_lsn = meta->lsn; - memset(meta, 0, sizeof(DBMETA)); - meta->lsn = save_lsn; - meta->pagesize = dbp->pgsize; - if (F_ISSET(dbp, DB_AM_CHKSUM)) - FLD_SET(meta->metaflags, DBMETA_CHKSUM); - meta->pgno = pgno; - meta->type = (u_int8_t)pgtype; -} - -/* - * __db_new -- - * Get a new page, preferably from the freelist. - * - * PUBLIC: int __db_new __P((DBC *, u_int32_t, PAGE **)); - */ -int -__db_new(dbc, type, pagepp) - DBC *dbc; - u_int32_t type; - PAGE **pagepp; -{ - DBMETA *meta; - DB *dbp; - DB_LOCK metalock; - DB_LSN lsn; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t last, *list, pgno, newnext; - u_int32_t meta_flags; - int extend, ret, t_ret; - - meta = NULL; - meta_flags = 0; - dbp = dbc->dbp; - mpf = dbp->mpf; - h = NULL; - newnext = PGNO_INVALID; - - pgno = PGNO_BASE_MD; - if ((ret = __db_lget(dbc, - LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err; - last = meta->last_pgno; - if (meta->free == PGNO_INVALID) { - if (FLD_ISSET(type, P_DONTEXTEND)) { - *pagepp = NULL; - goto err; - } - last = pgno = meta->last_pgno + 1; - ZERO_LSN(lsn); - extend = 1; - } else { - pgno = meta->free; - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - - /* - * We want to take the first page off the free list and - * then set meta->free to the that page's next_pgno, but - * we need to log the change first. - */ - newnext = h->next_pgno; - lsn = h->lsn; - extend = 0; - } - - FLD_CLR(type, P_DONTEXTEND); - - /* - * Log the allocation before fetching the new page. If we - * don't have room in the log then we don't want to tell - * mpool to extend the file. - */ - if (DBC_LOGGING(dbc)) { - if ((ret = __db_pg_alloc_log(dbp, dbc->txn, &LSN(meta), 0, - &LSN(meta), PGNO_BASE_MD, &lsn, - pgno, (u_int32_t)type, newnext, meta->last_pgno)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(meta)); - - meta_flags = DB_MPOOL_DIRTY; - meta->free = newnext; - - if (extend == 1) { - if ((ret = __memp_fget(mpf, &pgno, DB_MPOOL_NEW, &h)) != 0) - goto err; - DB_ASSERT(last == pgno); - meta->last_pgno = pgno; - ZERO_LSN(h->lsn); - h->pgno = pgno; - } - LSN(h) = LSN(meta); - - DB_ASSERT(TYPE(h) == P_INVALID); - - if (TYPE(h) != P_INVALID) - return (__db_panic(dbp->dbenv, EINVAL)); - - ret = __memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY); - meta = NULL; - if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - goto err; - - switch (type) { - case P_BTREEMETA: - case P_HASHMETA: - case P_QAMMETA: - __db_init_meta(dbp, h, h->pgno, type); - break; - default: - P_INIT(h, dbp->pgsize, - h->pgno, PGNO_INVALID, PGNO_INVALID, 0, type); - break; - } - - /* Fix up the sorted free list if necessary. */ -#ifdef HAVE_FTRUNCATE - if (extend == 0) { - u_int32_t nelems = 0; - - if ((ret = __memp_get_freelist(dbp->mpf, &nelems, &list)) != 0) - goto err; - if (nelems != 0) { - DB_ASSERT(h->pgno == list[0]); - memmove(list, &list[1], (nelems - 1) * sizeof(*list)); - if ((ret = __memp_extend_freelist( - dbp->mpf, nelems - 1, &list)) != 0) - goto err; - } - } -#else - COMPQUIET(list, NULL); -#endif - - /* - * If dirty reads are enabled and we are in a transaction, we could - * abort this allocation after the page(s) pointing to this - * one have their locks downgraded. This would permit dirty readers - * to access this page which is ok, but they must be off the - * page when we abort. We never lock overflow pages or off page - * duplicate trees. - */ - if (type != P_OVERFLOW && !F_ISSET(dbc, DBC_OPD) && - F_ISSET(dbc->dbp, DB_AM_READ_UNCOMMITTED) && dbc->txn != NULL) { - if ((ret = __db_lget(dbc, 0, - h->pgno, DB_LOCK_WWRITE, 0, &metalock)) != 0) - goto err; - } - - *pagepp = h; - return (0); - -err: if (h != NULL) - (void)__memp_fput(mpf, h, 0); - if (meta != NULL) - (void)__memp_fput(mpf, meta, meta_flags); - (void)__TLPUT(dbc, metalock); - return (ret); -} - -/* - * __db_free -- - * Add a page to the head of the freelist. - * - * PUBLIC: int __db_free __P((DBC *, PAGE *)); - */ -int -__db_free(dbc, h) - DBC *dbc; - PAGE *h; -{ - DBMETA *meta; - DB *dbp; - DBT ddbt, ldbt; - DB_LOCK metalock; - DB_MPOOLFILE *mpf; - db_pgno_t last_pgno, *lp, next_pgno, pgno, prev_pgno; - u_int32_t dirty_flag, lflag, nelem; - int do_truncate, ret, t_ret; -#ifdef HAVE_FTRUNCATE - db_pgno_t *list; - u_int32_t position, start; -#endif - - dbp = dbc->dbp; - mpf = dbp->mpf; - prev_pgno = PGNO_INVALID; - nelem = 0; - meta = NULL; - do_truncate = 0; - lp = NULL; - - /* - * Retrieve the metadata page. If we are not keeping a sorted - * free list put the page at the head of the the free list. - * If we are keeping a sorted free list, for truncation, - * then figure out where this page belongs and either - * link it in or truncate the file as much as possible. - * If either the lock get or page get routines - * fail, then we need to put the page with which we were called - * back because our caller assumes we take care of it. - */ - dirty_flag = 0; - pgno = PGNO_BASE_MD; - if ((ret = __db_lget(dbc, - LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err1; - - last_pgno = meta->last_pgno; - next_pgno = meta->free; - - DB_ASSERT(h->pgno != next_pgno); - -#ifdef HAVE_FTRUNCATE - /* - * If we are maintaining a sorted free list see if we either have a - * new truncation point or the page goes somewhere in the middle of - * the list. If it goes in the middle of the list, we will drop the - * meta page and get the previous page. - */ - if ((ret = __memp_get_freelist(mpf, &nelem, &list)) != 0) - goto err; - if (list == NULL) - goto no_sort; - - if (h->pgno != last_pgno) { - /* - * Put the page number in the sorted list. - * Finds its position and the previous page, - * extend the list, make room and insert. - */ - position = 0; - if (nelem != 0) { - __db_freelist_pos(h->pgno, list, nelem, &position); - - DB_ASSERT(h->pgno != list[position]); - - /* Get the previous page if this is not the smallest. */ - if (position != 0 || h->pgno > list[0]) - prev_pgno = list[position]; - } - - /* Put the page number into the list. */ - if ((ret = __memp_extend_freelist(mpf, nelem + 1, &list)) != 0) - return (ret); - if (prev_pgno != PGNO_INVALID) - lp = &list[position + 1]; - else - lp = list; - if (nelem != 0 && position != nelem) - memmove(lp + 1, lp, - (size_t)((u_int8_t*)&list[nelem] - (u_int8_t*)lp)); - *lp = h->pgno; - } else if (nelem != 0) { - /* Find the truncation point. */ - for (lp = &list[nelem - 1]; lp >= list; lp--) - if (--last_pgno != *lp) - break; - if (lp < list || last_pgno < h->pgno - 1) - do_truncate = 1; - last_pgno = meta->last_pgno; - } - -no_sort: - if (prev_pgno != PGNO_INVALID) { - if ((ret = __memp_fput(mpf, meta, 0)) != 0) - goto err1; - meta = NULL; - pgno = prev_pgno; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err1; - next_pgno = NEXT_PGNO(meta); - } -#endif - - /* Log the change. */ - if (DBC_LOGGING(dbc)) { - memset(&ldbt, 0, sizeof(ldbt)); - ldbt.data = h; - ldbt.size = P_OVERHEAD(dbp); - switch (h->type) { - case P_HASH: - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LRECNO: - case P_LDUP: - if (h->entries > 0) { - ldbt.size += h->entries * sizeof(db_indx_t); - ddbt.data = (u_int8_t *)h + HOFFSET(h); - ddbt.size = dbp->pgsize - HOFFSET(h); - if ((ret = __db_pg_freedata_log(dbp, dbc->txn, - &LSN(meta), 0, h->pgno, &LSN(meta), pgno, - &ldbt, next_pgno, last_pgno, &ddbt)) != 0) - goto err1; - goto logged; - } - break; - case P_HASHMETA: - ldbt.size = sizeof(HMETA); - break; - case P_BTREEMETA: - ldbt.size = sizeof(BTMETA); - break; - case P_OVERFLOW: - ldbt.size += OV_LEN(h); - break; - default: - DB_ASSERT(h->type != P_QAMDATA); - } - - /* - * If we are truncating the file, we need to make sure - * the logging happens before the truncation. If we - * are truncating multiple pages we don't need to flush the - * log here as it will be flushed by __db_truncate_freelist. - */ - lflag = 0; -#ifdef HAVE_FTRUNCATE - if (do_truncate == 0 && h->pgno == last_pgno) - lflag = DB_FLUSH; -#endif - if ((ret = __db_pg_free_log(dbp, - dbc->txn, &LSN(meta), lflag, h->pgno, - &LSN(meta), pgno, &ldbt, next_pgno, last_pgno)) != 0) - goto err1; - } else - LSN_NOT_LOGGED(LSN(meta)); -logged: LSN(h) = LSN(meta); - -#ifdef HAVE_FTRUNCATE - if (do_truncate) { - start = (u_int32_t) (lp - list) + 1; - meta->last_pgno--; - ret = __db_truncate_freelist( - dbc, meta, h, list, start, nelem); - h = NULL; - } else if (h->pgno == last_pgno) { - if ((ret = __memp_fput(mpf, h, DB_MPOOL_DISCARD)) != 0) - goto err; - /* Give the page back to the OS. */ - if ((ret = __memp_ftruncate(mpf, last_pgno, 0)) != 0) - goto err; - DB_ASSERT(meta->pgno == PGNO_BASE_MD); - meta->last_pgno--; - h = NULL; - } else -#endif - - { - /* - * If we are not truncating the page then we - * reinitialize it and put it at the head of - * the free list. - */ - P_INIT(h, dbp->pgsize, - h->pgno, PGNO_INVALID, next_pgno, 0, P_INVALID); -#ifdef DIAGNOSTIC - memset((u_int8_t *) h + P_OVERHEAD(dbp), - CLEAR_BYTE, dbp->pgsize - P_OVERHEAD(dbp)); -#endif - if (prev_pgno == PGNO_INVALID) - meta->free = h->pgno; - else - NEXT_PGNO(meta) = h->pgno; - } - - /* Discard the metadata or previous page. */ -err1: if (meta != NULL && (t_ret = - __memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - - /* Discard the caller's page reference. */ - dirty_flag = DB_MPOOL_DIRTY; -err: if (h != NULL && - (t_ret = __memp_fput(mpf, h, dirty_flag)) != 0 && ret == 0) - ret = t_ret; - - /* - * XXX - * We have to unlock the caller's page in the caller! - */ - return (ret); -} - -#ifdef HAVE_FTRUNCATE -/* - * __db_freelist_pos -- find the position of a page in the freelist. - * The list is sorted, we do a binary search. - * - * PUBLIC: #ifdef HAVE_FTRUNCATE - * PUBLIC: void __db_freelist_pos __P((db_pgno_t, - * PUBLIC: db_pgno_t *, u_int32_t, u_int32_t *)); - * PUBLIC: #endif - */ -void -__db_freelist_pos(pgno, list, nelem, posp) - db_pgno_t pgno; - db_pgno_t *list; - u_int32_t nelem; - u_int32_t *posp; -{ - u_int32_t base, indx, lim; - - indx = 0; - for (base = 0, lim = nelem; lim != 0; lim >>= 1) { - indx = base + (lim >> 1); - if (pgno == list[indx]) { - *posp = indx; - return; - } - if (pgno > list[indx]) { - base = indx + 1; - --lim; - } - } - if (base != 0) - base--; - *posp = base; - return; -} - -static int -__db_pglistcmp(a, b) - const void *a, *b; -{ - struct pglist *ap, *bp; - - ap = (struct pglist *)a; - bp = (struct pglist *)b; - - return ((ap->pgno > bp->pgno) ? 1 : (ap->pgno < bp->pgno) ? -1: 0); -} - -/* - * __db_freelist_sort -- sort a list of free pages. - */ -static void -__db_freelist_sort(list, nelems) - struct pglist *list; - u_int32_t nelems; -{ - qsort(list, (size_t)nelems, sizeof(struct pglist), __db_pglistcmp); -} - -/* - * __db_pg_truncate -- sort the freelist and find the truncation point. - * - * PUBLIC: #ifdef HAVE_FTRUNCATE - * PUBLIC: int __db_pg_truncate __P((DB_MPOOLFILE *, struct pglist *list, - * PUBLIC: DB_COMPACT *, u_int32_t *, db_pgno_t *, DB_LSN *, int)); - * PUBLIC: #endif - */ -int -__db_pg_truncate(mpf, list, c_data, nelemp, last_pgno, lsnp, in_recovery) - DB_MPOOLFILE *mpf; - struct pglist *list; - DB_COMPACT *c_data; - u_int32_t *nelemp; - db_pgno_t *last_pgno; - DB_LSN *lsnp; - int in_recovery; -{ - PAGE *h; - struct pglist *lp; - db_pgno_t pgno; - u_int32_t nelems; - int modified, ret; - - ret = 0; - - nelems = *nelemp; - /* Sort the list */ - __db_freelist_sort(list, nelems); - - /* Find the truncation point. */ - pgno = *last_pgno; - lp = &list[nelems - 1]; - while (nelems != 0) { - if (lp->pgno != pgno) - break; - pgno--; - nelems--; - lp--; - } - - /* - * Figure out what (if any) pages can be truncated immediately and - * record the place from which we can truncate, so we can do the - * memp_ftruncate below. We also use this to avoid ever putting - * these pages on the freelist, which we are about to relink. - */ - for (lp = list; lp < &list[nelems]; lp++) { - if ((ret = __memp_fget(mpf, &lp->pgno, 0, &h)) != 0) { - /* Page may have been truncated later. */ - if (in_recovery && ret == DB_PAGE_NOTFOUND) { - ret = 0; - continue; - } - goto err; - } - modified = 0; - if (!in_recovery || log_compare(&LSN(h), &lp->lsn) == 0) { - if (lp == &list[nelems - 1]) - NEXT_PGNO(h) = PGNO_INVALID; - else - NEXT_PGNO(h) = lp[1].pgno; - DB_ASSERT(NEXT_PGNO(h) < *last_pgno); - - LSN(h) = *lsnp; - modified = 1; - } - if ((ret = __memp_fput(mpf, h, - modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto err; - } - - if (pgno != *last_pgno) { - if ((ret = __memp_ftruncate(mpf, - pgno + 1, in_recovery ? MP_TRUNC_RECOVER : 0)) != 0) - goto err; - if (c_data) - c_data->compact_pages_truncated += *last_pgno - pgno; - *last_pgno = pgno; - } - *nelemp = nelems; - -err: return (ret); -} - -/* - * __db_free_truncate -- - * Truncate free pages at the end of the file. - * - * PUBLIC: #ifdef HAVE_FTRUNCATE - * PUBLIC: int __db_free_truncate __P((DB *, DB_TXN *, u_int32_t, - * PUBLIC: DB_COMPACT *, struct pglist **, u_int32_t *, db_pgno_t *)); - * PUBLIC: #endif - */ -int -__db_free_truncate(dbp, txn, flags, c_data, listp, nelemp, last_pgnop) - DB *dbp; - DB_TXN *txn; - u_int32_t flags; - DB_COMPACT *c_data; - struct pglist **listp; - u_int32_t *nelemp; - db_pgno_t *last_pgnop; -{ - DBC *dbc; - DB_ENV *dbenv; - DBMETA *meta; - DBT ddbt; - DB_LOCK metalock; - DB_LSN null_lsn; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t pgno; - u_int32_t nelems; - struct pglist *list, *lp; - int ret, t_ret; - size_t size; - - COMPQUIET(flags, 0); - list = NULL; - meta = NULL; - dbenv = dbp->dbenv; - mpf = dbp->mpf; - h = NULL; - nelems = 0; - if (listp != NULL) { - *listp = NULL; - DB_ASSERT(nelemp != NULL); - *nelemp = 0; - } - - if ((ret = __db_cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0) - return (ret); - - pgno = PGNO_BASE_MD; - if ((ret = __db_lget(dbc, - LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0) - goto err; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) - goto err; - - if (last_pgnop != NULL) - *last_pgnop = meta->last_pgno; - if ((pgno = meta->free) == PGNO_INVALID) - goto done; - - size = 128; - if ((ret = __os_malloc(dbenv, size * sizeof(*list), &list)) != 0) - goto err; - lp = list; - - do { - if (lp == &list[size]) { - size *= 2; - if ((ret = __os_realloc(dbenv, - size * sizeof(*list), &list)) != 0) - goto err; - lp = &list[size / 2]; - } - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - goto err; - - lp->pgno = pgno; - lp->lsn = LSN(h); - pgno = NEXT_PGNO(h); - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - lp++; - } while (pgno != PGNO_INVALID); - nelems = (u_int32_t)(lp - list); - - /* Log the current state of the free list */ - if (DBC_LOGGING(dbc)) { - ddbt.data = list; - ddbt.size = nelems * sizeof(*lp); - ZERO_LSN(null_lsn); - if ((ret = __db_pg_sort_log(dbp, - dbc->txn, &LSN(meta), DB_FLUSH, PGNO_BASE_MD, &LSN(meta), - PGNO_INVALID, &null_lsn, meta->last_pgno, &ddbt)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(meta)); - - if ((ret = __db_pg_truncate(mpf, list, c_data, - &nelems, &meta->last_pgno, &LSN(meta), 0)) != 0) - goto err; - - if (nelems == 0) - meta->free = PGNO_INVALID; - else - meta->free = list[0].pgno; - -done: if (last_pgnop != NULL) - *last_pgnop = meta->last_pgno; - - /* - * The truncate point is the number of pages in the free - * list back from the last page. The number of pages - * in the free list are the number that we can swap in. - */ - if (c_data) - c_data->compact_truncate = (u_int32_t)meta->last_pgno - nelems; - - if (nelems != 0 && listp != NULL) { - *listp = list; - *nelemp = nelems; - list = NULL; - } - -err: if (list != NULL) - __os_free(dbenv, list); - if (meta != NULL && (t_ret = - __memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -static int -__db_truncate_freelist(dbc, meta, h, list, start, nelem) - DBC *dbc; - DBMETA *meta; - PAGE *h; - db_pgno_t *list; - u_int32_t start, nelem; -{ - DB *dbp; - DB_LSN null_lsn; - DB_MPOOLFILE *mpf; - DBT ddbt; - PAGE *last_free, *pg; - db_pgno_t *lp; - struct pglist *plist, *pp; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - plist = NULL; - last_free = NULL; - - if (start != 0 && - (ret = __memp_fget(mpf, &list[start - 1], 0, &last_free)) != 0) - goto err; - - if (DBC_LOGGING(dbc)) { - if ((ret = __os_malloc(dbp->dbenv, - (nelem - start) * sizeof(*pp), &plist)) != 0) - goto err; - - pp = plist; - for (lp = &list[start]; lp < &list[nelem]; lp++) { - pp->pgno = *lp; - if ((ret = __memp_fget(mpf, lp, 0, &pg)) != 0) - goto err; - pp->lsn = LSN(pg); - if ((ret = __memp_fput(mpf, pg, DB_MPOOL_DISCARD)) != 0) - goto err; - pp++; - } - ddbt.data = plist; - ddbt.size = (nelem - start) * sizeof(*pp); - ZERO_LSN(null_lsn); - if (last_free != NULL) { - if ((ret = __db_pg_sort_log(dbp, dbc->txn, &LSN(meta), - DB_FLUSH, PGNO(meta), &LSN(meta), PGNO(last_free), - &LSN(last_free), meta->last_pgno, &ddbt)) != 0) - goto err; - } else if ((ret = __db_pg_sort_log(dbp, dbc->txn, - &LSN(meta), DB_FLUSH, PGNO(meta), &LSN(meta), - PGNO_INVALID, &null_lsn, meta->last_pgno, &ddbt)) != 0) - goto err; - } else - LSN_NOT_LOGGED(LSN(meta)); - if (last_free != NULL) - LSN(last_free) = LSN(meta); - - if ((ret = __memp_fput(mpf, h, DB_MPOOL_DISCARD)) != 0) - goto err; - h = NULL; - if ((ret = __memp_ftruncate(mpf, list[start], 0)) != 0) - goto err; - meta->last_pgno = list[start] - 1; - - if (start == 0) - meta->free = PGNO_INVALID; - else { - NEXT_PGNO(last_free) = PGNO_INVALID; - if ((ret = __memp_fput(mpf, last_free, DB_MPOOL_DIRTY)) != 0) - goto err; - last_free = NULL; - } - - /* Shrink the number of elements in the list. */ - ret = __memp_extend_freelist(mpf, start, &list); - -err: if (plist != NULL) - __os_free(dbp->dbenv, plist); - - /* We need to put the page on error. */ - if (h != NULL) - (void)__memp_fput(mpf, h, 0); - if (last_free != NULL) - (void)__memp_fput(mpf, last_free, 0); - - return (ret); -} -#endif - -#ifdef DEBUG -/* - * __db_lprint -- - * Print out the list of locks currently held by a cursor. - * - * PUBLIC: int __db_lprint __P((DBC *)); - */ -int -__db_lprint(dbc) - DBC *dbc; -{ - DB_ENV *dbenv; - DB *dbp; - DB_LOCKREQ req; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - - if (LOCKING_ON(dbenv)) { - req.op = DB_LOCK_DUMP; - (void)__lock_vec(dbenv, dbc->locker, 0, &req, 1, NULL); - } - return (0); -} -#endif - -/* - * __db_lget -- - * The standard lock get call. - * - * PUBLIC: int __db_lget __P((DBC *, - * PUBLIC: int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *)); - */ -int -__db_lget(dbc, action, pgno, mode, lkflags, lockp) - DBC *dbc; - int action; - db_pgno_t pgno; - db_lockmode_t mode; - u_int32_t lkflags; - DB_LOCK *lockp; -{ - DB *dbp; - DB_ENV *dbenv; - DB_LOCKREQ couple[3], *reqp; - DB_TXN *txn; - int has_timeout, i, ret; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - txn = dbc->txn; - - /* - * We do not always check if we're configured for locking before - * calling __db_lget to acquire the lock. - */ - if (CDB_LOCKING(dbenv) || - !LOCKING_ON(dbenv) || F_ISSET(dbc, DBC_COMPENSATE) || - (F_ISSET(dbc, DBC_RECOVER) && - (action != LCK_ROLLBACK || IS_REP_CLIENT(dbenv))) || - (action != LCK_ALWAYS && F_ISSET(dbc, DBC_OPD))) { - LOCK_INIT(*lockp); - return (0); - } - - dbc->lock.pgno = pgno; - if (lkflags & DB_LOCK_RECORD) - dbc->lock.type = DB_RECORD_LOCK; - else - dbc->lock.type = DB_PAGE_LOCK; - lkflags &= ~DB_LOCK_RECORD; - if (action == LCK_ROLLBACK) - lkflags |= DB_LOCK_ABORT; - - /* - * If the transaction enclosing this cursor has DB_LOCK_NOWAIT set, - * pass that along to the lock call. - */ - if (DB_NONBLOCK(dbc)) - lkflags |= DB_LOCK_NOWAIT; - - if (F_ISSET(dbc, DBC_READ_UNCOMMITTED) && mode == DB_LOCK_READ) - mode = DB_LOCK_READ_UNCOMMITTED; - - has_timeout = F_ISSET(dbc, DBC_RECOVER) || - (txn != NULL && F_ISSET(txn, TXN_LOCKTIMEOUT)); - - /* - * Transactional locking. - * Hold on to the previous read lock only if we are in full isolation. - * COUPLE_ALWAYS indicates we are holding an interior node which need - * not be isolated. - * Downgrade write locks if we are supporting dirty readers. - */ - if ((action != LCK_COUPLE && action != LCK_COUPLE_ALWAYS) || - !LOCK_ISSET(*lockp)) - action = 0; - else if (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS) - action = LCK_COUPLE; - else if (F_ISSET(dbc, - DBC_READ_COMMITTED) && lockp->mode == DB_LOCK_READ) - action = LCK_COUPLE; - else if (F_ISSET(dbc, - DBC_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_READ_UNCOMMITTED) - action = LCK_COUPLE; - else if (F_ISSET(dbc->dbp, - DB_AM_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_WRITE) - action = LCK_DOWNGRADE; - else - action = 0; - - i = 0; - switch (action) { - default: - if (has_timeout) - goto couple; - ret = __lock_get(dbenv, - dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp); - break; - - case LCK_DOWNGRADE: - couple[0].op = DB_LOCK_GET; - couple[0].obj = NULL; - couple[0].lock = *lockp; - couple[0].mode = DB_LOCK_WWRITE; - UMRW_SET(couple[0].timeout); - i++; - /* FALLTHROUGH */ - case LCK_COUPLE: -couple: couple[i].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET; - couple[i].obj = &dbc->lock_dbt; - couple[i].mode = mode; - UMRW_SET(couple[i].timeout); - i++; - if (has_timeout) - couple[0].timeout = - F_ISSET(dbc, DBC_RECOVER) ? 0 : txn->lock_timeout; - if (action == LCK_COUPLE || action == LCK_DOWNGRADE) { - couple[i].op = DB_LOCK_PUT; - couple[i].lock = *lockp; - i++; - } - - ret = __lock_vec(dbenv, - dbc->locker, lkflags, couple, i, &reqp); - if (ret == 0 || reqp == &couple[i - 1]) - *lockp = i == 1 ? couple[0].lock : couple[i - 2].lock; - break; - } - - if (txn != NULL && ret == DB_LOCK_DEADLOCK) - F_SET(txn, TXN_DEADLOCK); - return ((ret == DB_LOCK_NOTGRANTED && - !F_ISSET(dbenv, DB_ENV_TIME_NOTGRANTED)) ? DB_LOCK_DEADLOCK : ret); -} - -/* - * __db_lput -- - * The standard lock put call. - * - * PUBLIC: int __db_lput __P((DBC *, DB_LOCK *)); - */ -int -__db_lput(dbc, lockp) - DBC *dbc; - DB_LOCK *lockp; -{ - DB_ENV *dbenv; - DB_LOCKREQ couple[2], *reqp; - int action, ret; - - /* - * Transactional locking. - * Hold on to the read locks only if we are in full isolation. - * Downgrade write locks if we are supporting dirty readers. - */ - if (F_ISSET(dbc->dbp, - DB_AM_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_WRITE) - action = LCK_DOWNGRADE; - else if (dbc->txn == NULL) - action = LCK_COUPLE; - else if (F_ISSET(dbc, - DBC_READ_COMMITTED) && lockp->mode == DB_LOCK_READ) - action = LCK_COUPLE; - else if (F_ISSET(dbc, - DBC_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_READ_UNCOMMITTED) - action = LCK_COUPLE; - else - action = 0; - - dbenv = dbc->dbp->dbenv; - switch (action) { - case LCK_COUPLE: - ret = __lock_put(dbenv, lockp); - break; - case LCK_DOWNGRADE: - couple[0].op = DB_LOCK_GET; - couple[0].obj = NULL; - couple[0].mode = DB_LOCK_WWRITE; - couple[0].lock = *lockp; - UMRW_SET(couple[0].timeout); - couple[1].op = DB_LOCK_PUT; - couple[1].lock = *lockp; - ret = __lock_vec(dbenv, dbc->locker, 0, couple, 2, &reqp); - if (ret == 0 || reqp == &couple[1]) - *lockp = couple[0].lock; - break; - default: - ret = 0; - break; - } - - return (ret); -} diff --git a/storage/bdb/db/db_method.c b/storage/bdb/db/db_method.c deleted file mode 100644 index 141392148e7..00000000000 --- a/storage/bdb/db/db_method.c +++ /dev/null @@ -1,867 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_method.c,v 12.15 2005/11/08 03:24:58 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#ifdef HAVE_RPC -#include -#endif - -#include -#endif - -#ifdef HAVE_RPC -#include "db_server.h" -#endif - -#include "db_int.h" -#include "dbinc/crypto.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" -#include "dbinc/txn.h" - -#ifdef HAVE_RPC -#include "dbinc_auto/rpc_client_ext.h" -#endif - -static int __db_get_byteswapped __P((DB *, int *)); -static int __db_get_dbname __P((DB *, const char **, const char **)); -static DB_ENV *__db_get_env __P((DB *)); -static DB_MPOOLFILE *__db_get_mpf __P((DB *)); -static int __db_get_transactional __P((DB *)); -static int __db_get_type __P((DB *, DBTYPE *dbtype)); -static int __db_init __P((DB_ENV *, DB *, u_int32_t)); -static int __db_set_alloc __P((DB *, void *(*)(size_t), - void *(*)(void *, size_t), void (*)(void *))); -static int __db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t))); -static int __db_get_cachesize __P((DB *, u_int32_t *, u_int32_t *, int *)); -static int __db_set_cachesize __P((DB *, u_int32_t, u_int32_t, int)); -static int __db_set_dup_compare - __P((DB *, int (*)(DB *, const DBT *, const DBT *))); -static int __db_get_encrypt_flags __P((DB *, u_int32_t *)); -static int __db_set_encrypt __P((DB *, const char *, u_int32_t)); -static int __db_set_feedback __P((DB *, void (*)(DB *, int, int))); -static void __db_map_flags __P((DB *, u_int32_t *, u_int32_t *)); -static int __db_get_pagesize __P((DB *, u_int32_t *)); -static int __db_set_paniccall __P((DB *, void (*)(DB_ENV *, int))); -static void __db_set_errcall - __P((DB *, void (*)(const DB_ENV *, const char *, const char *))); -static void __db_get_errfile __P((DB *, FILE **)); -static void __db_set_errfile __P((DB *, FILE *)); -static void __db_get_errpfx __P((DB *, const char **)); -static void __db_set_errpfx __P((DB *, const char *)); -static void __db_set_msgcall - __P((DB *, void (*)(const DB_ENV *, const char *))); -static void __db_get_msgfile __P((DB *, FILE **)); -static void __db_set_msgfile __P((DB *, FILE *)); -static void __dbh_err __P((DB *, int, const char *, ...)); -static void __dbh_errx __P((DB *, const char *, ...)); - -/* - * db_create -- - * DB constructor. - * - * EXTERN: int db_create __P((DB **, DB_ENV *, u_int32_t)); - */ -int -db_create(dbpp, dbenv, flags) - DB **dbpp; - DB_ENV *dbenv; - u_int32_t flags; -{ - DB *dbp; - DB_THREAD_INFO *ip; - DB_REP *db_rep; - int ret; - - /* Check for invalid function flags. */ - switch (flags) { - case 0: - break; - case DB_XA_CREATE: - if (dbenv != NULL) { - __db_err(dbenv, - "XA applications may not specify an environment to db_create"); - return (EINVAL); - } - - /* - * If it's an XA database, open it within the XA environment, - * taken from the global list of environments. (When the XA - * transaction manager called our xa_start() routine the - * "current" environment was moved to the start of the list. - */ - dbenv = TAILQ_FIRST(&DB_GLOBAL(db_envq)); - break; - default: - return (__db_ferr(dbenv, "db_create", 0)); - } - - ip = NULL; - if (dbenv != NULL) - ENV_ENTER(dbenv, ip); - /* Allocate the DB. */ - if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0) { - if (dbenv != NULL) - ENV_LEAVE(dbenv, ip); - return (ret); - } - - if ((ret = __db_init(dbenv, dbp, flags)) != 0) - goto err; - - /* If we don't have an environment yet, allocate a local one. */ - if (dbenv == NULL) { - if ((ret = db_env_create(&dbenv, 0)) != 0) - goto err; - F_SET(dbenv, DB_ENV_DBLOCAL); - ENV_ENTER(dbenv, ip); - } - dbp->dbenv = dbenv; - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - ++dbenv->db_ref; - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - /* - * Set the replication timestamp; it's 0 if we're not in a replicated - * environment. Don't acquire a lock to read the value, even though - * it's opaque: all we check later is value equality, nothing else. - */ - dbp->timestamp = REP_ON(dbenv) ? - ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->rep_timestamp : 0; - /* - * Set the replication generation number for fid management; valid - * replication generations start at 1. Don't acquire a lock to - * read the value. All we check later is value equality. - */ - db_rep = dbenv->rep_handle; - dbp->fid_gen = - (REP_ON(dbenv) && db_rep->region != NULL) ? - ((REP *)db_rep->region)->gen : 0; - - /* If not RPC, open a backing DB_MPOOLFILE handle in the memory pool. */ - if (!RPC_ON(dbenv) && - (ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0) - goto err; - - dbp->type = DB_UNKNOWN; - - *dbpp = dbp; - return (0); - -err: if (dbp->mpf != NULL) - (void)__memp_fclose(dbp->mpf, 0); - if (dbenv != NULL && F_ISSET(dbenv, DB_ENV_DBLOCAL)) - (void)__env_close(dbenv, 0); - __os_free(dbenv, dbp); - *dbpp = NULL; - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_init -- - * Initialize a DB structure. - */ -static int -__db_init(dbenv, dbp, flags) - DB_ENV *dbenv; - DB *dbp; - u_int32_t flags; -{ - int ret; - - dbp->lid = DB_LOCK_INVALIDID; - LOCK_INIT(dbp->handle_lock); - - TAILQ_INIT(&dbp->free_queue); - TAILQ_INIT(&dbp->active_queue); - TAILQ_INIT(&dbp->join_queue); - LIST_INIT(&dbp->s_secondaries); - - FLD_SET(dbp->am_ok, - DB_OK_BTREE | DB_OK_HASH | DB_OK_QUEUE | DB_OK_RECNO); - - /* DB PUBLIC HANDLE LIST BEGIN */ - dbp->associate = __db_associate_pp; - dbp->close = __db_close_pp; - dbp->compact = __db_compact_pp; - dbp->cursor = __db_cursor_pp; - dbp->del = __db_del_pp; - dbp->dump = __db_dump_pp; - dbp->err = __dbh_err; - dbp->errx = __dbh_errx; - dbp->fd = __db_fd_pp; - dbp->get = __db_get_pp; - dbp->get_byteswapped = __db_get_byteswapped; - dbp->get_cachesize = __db_get_cachesize; - dbp->get_dbname = __db_get_dbname; - dbp->get_encrypt_flags = __db_get_encrypt_flags; - dbp->get_env = __db_get_env; - dbp->get_errfile = __db_get_errfile; - dbp->get_errpfx = __db_get_errpfx; - dbp->get_flags = __db_get_flags; - dbp->get_lorder = __db_get_lorder; - dbp->get_mpf = __db_get_mpf; - dbp->get_msgfile = __db_get_msgfile; - dbp->get_open_flags = __db_get_open_flags; - dbp->get_pagesize = __db_get_pagesize; - dbp->get_transactional = __db_get_transactional; - dbp->get_type = __db_get_type; - dbp->join = __db_join_pp; - dbp->key_range = __db_key_range_pp; - dbp->open = __db_open_pp; - dbp->pget = __db_pget_pp; - dbp->put = __db_put_pp; - dbp->remove = __db_remove_pp; - dbp->rename = __db_rename_pp; - dbp->set_alloc = __db_set_alloc; - dbp->set_append_recno = __db_set_append_recno; - dbp->set_cachesize = __db_set_cachesize; - dbp->set_dup_compare = __db_set_dup_compare; - dbp->set_encrypt = __db_set_encrypt; - dbp->set_errcall = __db_set_errcall; - dbp->set_errfile = __db_set_errfile; - dbp->set_errpfx = __db_set_errpfx; - dbp->set_feedback = __db_set_feedback; - dbp->set_flags = __db_set_flags; - dbp->set_lorder = __db_set_lorder; - dbp->set_msgcall = __db_set_msgcall; - dbp->set_msgfile = __db_set_msgfile; - dbp->set_pagesize = __db_set_pagesize; - dbp->set_paniccall = __db_set_paniccall; - dbp->stat = __db_stat_pp; - dbp->stat_print = __db_stat_print_pp; - dbp->sync = __db_sync_pp; - dbp->truncate = __db_truncate_pp; - dbp->upgrade = __db_upgrade_pp; - dbp->verify = __db_verify_pp; - /* DB PUBLIC HANDLE LIST END */ - - /* Access method specific. */ - if ((ret = __bam_db_create(dbp)) != 0) - return (ret); - if ((ret = __ham_db_create(dbp)) != 0) - return (ret); - if ((ret = __qam_db_create(dbp)) != 0) - return (ret); - - /* - * XA specific: must be last, as we replace methods set by the - * access methods. - */ - if (LF_ISSET(DB_XA_CREATE) && (ret = __db_xa_create(dbp)) != 0) - return (ret); - -#ifdef HAVE_RPC - /* - * RPC specific: must be last, as we replace methods set by the - * access methods. - */ - if (dbenv != NULL && RPC_ON(dbenv)) { - __dbcl_dbp_init(dbp); - /* - * !!! - * We wrap the DB->open method for RPC, and the rpc.src file - * can't handle that. - */ - dbp->open = __dbcl_db_open_wrap; - if ((ret = __dbcl_db_create(dbp, dbenv, flags)) != 0) - return (ret); - } -#else - COMPQUIET(dbenv, NULL); -#endif - - return (0); -} - -/* - * __dbh_am_chk -- - * Error if an unreasonable method is called. - * - * PUBLIC: int __dbh_am_chk __P((DB *, u_int32_t)); - */ -int -__dbh_am_chk(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - /* - * We start out allowing any access methods to be called, and as the - * application calls the methods the options become restricted. The - * idea is to quit as soon as an illegal method combination is called. - */ - if ((LF_ISSET(DB_OK_BTREE) && FLD_ISSET(dbp->am_ok, DB_OK_BTREE)) || - (LF_ISSET(DB_OK_HASH) && FLD_ISSET(dbp->am_ok, DB_OK_HASH)) || - (LF_ISSET(DB_OK_QUEUE) && FLD_ISSET(dbp->am_ok, DB_OK_QUEUE)) || - (LF_ISSET(DB_OK_RECNO) && FLD_ISSET(dbp->am_ok, DB_OK_RECNO))) { - FLD_CLR(dbp->am_ok, ~flags); - return (0); - } - - __db_err(dbp->dbenv, - "call implies an access method which is inconsistent with previous calls"); - return (EINVAL); -} - -/* - * __dbh_err -- - * Error message, including the standard error string. - */ -static void -#ifdef STDC_HEADERS -__dbh_err(DB *dbp, int error, const char *fmt, ...) -#else -__dbh_err(dbp, error, fmt, va_alist) - DB *dbp; - int error; - const char *fmt; - va_dcl -#endif -{ - DB_REAL_ERR(dbp->dbenv, error, 1, 1, fmt); -} - -/* - * __dbh_errx -- - * Error message. - */ -static void -#ifdef STDC_HEADERS -__dbh_errx(DB *dbp, const char *fmt, ...) -#else -__dbh_errx(dbp, fmt, va_alist) - DB *dbp; - const char *fmt; - va_dcl -#endif -{ - DB_REAL_ERR(dbp->dbenv, 0, 0, 1, fmt); -} - -/* - * __db_get_byteswapped -- - * Return if database requires byte swapping. - */ -static int -__db_get_byteswapped(dbp, isswapped) - DB *dbp; - int *isswapped; -{ - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get_byteswapped"); - - *isswapped = F_ISSET(dbp, DB_AM_SWAP) ? 1 : 0; - return (0); -} - -/* - * __db_get_dbname -- - * Get the name of the database as passed to DB->open. - */ -static int -__db_get_dbname(dbp, fnamep, dnamep) - DB *dbp; - const char **fnamep, **dnamep; -{ - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get_dbname"); - - if (fnamep != NULL) - *fnamep = dbp->fname; - if (dnamep != NULL) - *dnamep = dbp->dname; - return (0); -} - -/* - * __db_get_env -- - * Get the DB_ENV handle that was passed to db_create. - */ -static DB_ENV * -__db_get_env(dbp) - DB *dbp; -{ - return (dbp->dbenv); -} - -/* - * __db_get_mpf -- - * Get the underlying DB_MPOOLFILE handle. - */ -static DB_MPOOLFILE * -__db_get_mpf(dbp) - DB *dbp; -{ - return (dbp->mpf); -} - -/* - * get_transactional -- - * Get whether this database was created in a transaction. - */ -static int -__db_get_transactional(dbp) - DB *dbp; -{ - return (F_ISSET(dbp, DB_AM_TXN) ? 1 : 0); -} - -/* - * __db_get_type -- - * Return type of underlying database. - */ -static int -__db_get_type(dbp, dbtype) - DB *dbp; - DBTYPE *dbtype; -{ - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get_type"); - - *dbtype = dbp->type; - return (0); -} - -/* - * __db_set_append_recno -- - * Set record number append routine. - */ -static int -__db_set_append_recno(dbp, func) - DB *dbp; - int (*func) __P((DB *, DBT *, db_recno_t)); -{ - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_append_recno"); - DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO); - - dbp->db_append_recno = func; - - return (0); -} - -/* - * __db_get_cachesize -- - * Get underlying cache size. - */ -static int -__db_get_cachesize(dbp, cache_gbytesp, cache_bytesp, ncachep) - DB *dbp; - u_int32_t *cache_gbytesp, *cache_bytesp; - int *ncachep; -{ - DB_ILLEGAL_IN_ENV(dbp, "DB->get_cachesize"); - - return (__memp_get_cachesize(dbp->dbenv, - cache_gbytesp, cache_bytesp, ncachep)); -} - -/* - * __db_set_cachesize -- - * Set underlying cache size. - */ -static int -__db_set_cachesize(dbp, cache_gbytes, cache_bytes, ncache) - DB *dbp; - u_int32_t cache_gbytes, cache_bytes; - int ncache; -{ - DB_ILLEGAL_IN_ENV(dbp, "DB->set_cachesize"); - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_cachesize"); - - return (__memp_set_cachesize( - dbp->dbenv, cache_gbytes, cache_bytes, ncache)); -} - -/* - * __db_set_dup_compare -- - * Set duplicate comparison routine. - */ -static int -__db_set_dup_compare(dbp, func) - DB *dbp; - int (*func) __P((DB *, const DBT *, const DBT *)); -{ - int ret; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->dup_compare"); - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH); - - if ((ret = __db_set_flags(dbp, DB_DUPSORT)) != 0) - return (ret); - - dbp->dup_compare = func; - - return (0); -} - -/* - * __db_get_encrypt_flags -- - */ -static int -__db_get_encrypt_flags(dbp, flagsp) - DB *dbp; - u_int32_t *flagsp; -{ - DB_ILLEGAL_IN_ENV(dbp, "DB->get_encrypt_flags"); - - return (__env_get_encrypt_flags(dbp->dbenv, flagsp)); -} - -/* - * __db_set_encrypt -- - * Set database passwd. - */ -static int -__db_set_encrypt(dbp, passwd, flags) - DB *dbp; - const char *passwd; - u_int32_t flags; -{ - DB_CIPHER *db_cipher; - int ret; - - DB_ILLEGAL_IN_ENV(dbp, "DB->set_encrypt"); - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_encrypt"); - - if ((ret = __env_set_encrypt(dbp->dbenv, passwd, flags)) != 0) - return (ret); - - /* - * In a real env, this gets initialized with the region. In a local - * env, we must do it here. - */ - db_cipher = (DB_CIPHER *)dbp->dbenv->crypto_handle; - if (!F_ISSET(db_cipher, CIPHER_ANY) && - (ret = db_cipher->init(dbp->dbenv, db_cipher)) != 0) - return (ret); - - return (__db_set_flags(dbp, DB_ENCRYPT)); -} - -static void -__db_set_errcall(dbp, errcall) - DB *dbp; - void (*errcall) __P((const DB_ENV *, const char *, const char *)); -{ - __env_set_errcall(dbp->dbenv, errcall); -} - -static void -__db_get_errfile(dbp, errfilep) - DB *dbp; - FILE **errfilep; -{ - __env_get_errfile(dbp->dbenv, errfilep); -} - -static void -__db_set_errfile(dbp, errfile) - DB *dbp; - FILE *errfile; -{ - __env_set_errfile(dbp->dbenv, errfile); -} - -static void -__db_get_errpfx(dbp, errpfxp) - DB *dbp; - const char **errpfxp; -{ - __env_get_errpfx(dbp->dbenv, errpfxp); -} - -static void -__db_set_errpfx(dbp, errpfx) - DB *dbp; - const char *errpfx; -{ - __env_set_errpfx(dbp->dbenv, errpfx); -} - -static int -__db_set_feedback(dbp, feedback) - DB *dbp; - void (*feedback) __P((DB *, int, int)); -{ - dbp->db_feedback = feedback; - return (0); -} - -/* - * __db_map_flags -- - * Maps between public and internal flag values. - * This function doesn't check for validity, so it can't fail. - */ -static void -__db_map_flags(dbp, inflagsp, outflagsp) - DB *dbp; - u_int32_t *inflagsp, *outflagsp; -{ - COMPQUIET(dbp, NULL); - - if (FLD_ISSET(*inflagsp, DB_CHKSUM)) { - FLD_SET(*outflagsp, DB_AM_CHKSUM); - FLD_CLR(*inflagsp, DB_CHKSUM); - } - if (FLD_ISSET(*inflagsp, DB_ENCRYPT)) { - FLD_SET(*outflagsp, DB_AM_ENCRYPT | DB_AM_CHKSUM); - FLD_CLR(*inflagsp, DB_ENCRYPT); - } - if (FLD_ISSET(*inflagsp, DB_TXN_NOT_DURABLE)) { - FLD_SET(*outflagsp, DB_AM_NOT_DURABLE); - FLD_CLR(*inflagsp, DB_TXN_NOT_DURABLE); - } -} - -/* - * __db_get_flags -- - * The DB->get_flags method. - * - * PUBLIC: int __db_get_flags __P((DB *, u_int32_t *)); - */ -int -__db_get_flags(dbp, flagsp) - DB *dbp; - u_int32_t *flagsp; -{ - static const u_int32_t db_flags[] = { - DB_CHKSUM, - DB_DUP, - DB_DUPSORT, - DB_ENCRYPT, - DB_INORDER, - DB_RECNUM, - DB_RENUMBER, - DB_REVSPLITOFF, - DB_SNAPSHOT, - DB_TXN_NOT_DURABLE, - 0 - }; - u_int32_t f, flags, mapped_flag; - int i; - - flags = 0; - for (i = 0; (f = db_flags[i]) != 0; i++) { - mapped_flag = 0; - __db_map_flags(dbp, &f, &mapped_flag); - __bam_map_flags(dbp, &f, &mapped_flag); - __ram_map_flags(dbp, &f, &mapped_flag); -#ifdef HAVE_QUEUE - __qam_map_flags(dbp, &f, &mapped_flag); -#endif - DB_ASSERT(f == 0); - if (F_ISSET(dbp, mapped_flag) == mapped_flag) - LF_SET(db_flags[i]); - } - - *flagsp = flags; - return (0); -} - -/* - * __db_set_flags -- - * DB->set_flags. - * - * PUBLIC: int __db_set_flags __P((DB *, u_int32_t)); - */ -int -__db_set_flags(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - if (LF_ISSET(DB_ENCRYPT) && !CRYPTO_ON(dbenv)) { - __db_err(dbenv, - "Database environment not configured for encryption"); - return (EINVAL); - } - if (LF_ISSET(DB_TXN_NOT_DURABLE)) - ENV_REQUIRES_CONFIG(dbenv, - dbenv->tx_handle, "DB_NOT_DURABLE", DB_INIT_TXN); - - __db_map_flags(dbp, &flags, &dbp->flags); - - if ((ret = __bam_set_flags(dbp, &flags)) != 0) - return (ret); - if ((ret = __ram_set_flags(dbp, &flags)) != 0) - return (ret); -#ifdef HAVE_QUEUE - if ((ret = __qam_set_flags(dbp, &flags)) != 0) - return (ret); -#endif - - return (flags == 0 ? 0 : __db_ferr(dbenv, "DB->set_flags", 0)); -} - -/* - * __db_get_lorder -- - * Get whether lorder is swapped or not. - * - * PUBLIC: int __db_get_lorder __P((DB *, int *)); - */ -int -__db_get_lorder(dbp, db_lorderp) - DB *dbp; - int *db_lorderp; -{ - int ret; - - /* Flag if the specified byte order requires swapping. */ - switch (ret = __db_byteorder(dbp->dbenv, 1234)) { - case 0: - *db_lorderp = F_ISSET(dbp, DB_AM_SWAP) ? 4321 : 1234; - break; - case DB_SWAPBYTES: - *db_lorderp = F_ISSET(dbp, DB_AM_SWAP) ? 1234 : 4321; - break; - default: - return (ret); - /* NOTREACHED */ - } - - return (0); -} - -/* - * __db_set_lorder -- - * Set whether lorder is swapped or not. - * - * PUBLIC: int __db_set_lorder __P((DB *, int)); - */ -int -__db_set_lorder(dbp, db_lorder) - DB *dbp; - int db_lorder; -{ - int ret; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_lorder"); - - /* Flag if the specified byte order requires swapping. */ - switch (ret = __db_byteorder(dbp->dbenv, db_lorder)) { - case 0: - F_CLR(dbp, DB_AM_SWAP); - break; - case DB_SWAPBYTES: - F_SET(dbp, DB_AM_SWAP); - break; - default: - return (ret); - /* NOTREACHED */ - } - return (0); -} - -static int -__db_set_alloc(dbp, mal_func, real_func, free_func) - DB *dbp; - void *(*mal_func) __P((size_t)); - void *(*real_func) __P((void *, size_t)); - void (*free_func) __P((void *)); -{ - DB_ILLEGAL_IN_ENV(dbp, "DB->set_alloc"); - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_alloc"); - - return (__env_set_alloc(dbp->dbenv, mal_func, real_func, free_func)); -} - -static void -__db_set_msgcall(dbp, msgcall) - DB *dbp; - void (*msgcall) __P((const DB_ENV *, const char *)); -{ - __env_set_msgcall(dbp->dbenv, msgcall); -} - -static void -__db_get_msgfile(dbp, msgfilep) - DB *dbp; - FILE **msgfilep; -{ - __env_get_msgfile(dbp->dbenv, msgfilep); -} - -static void -__db_set_msgfile(dbp, msgfile) - DB *dbp; - FILE *msgfile; -{ - __env_set_msgfile(dbp->dbenv, msgfile); -} - -static int -__db_get_pagesize(dbp, db_pagesizep) - DB *dbp; - u_int32_t *db_pagesizep; -{ - *db_pagesizep = dbp->pgsize; - return (0); -} - -/* - * __db_set_pagesize -- - * DB->set_pagesize - * - * PUBLIC: int __db_set_pagesize __P((DB *, u_int32_t)); - */ -int -__db_set_pagesize(dbp, db_pagesize) - DB *dbp; - u_int32_t db_pagesize; -{ - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_pagesize"); - - if (db_pagesize < DB_MIN_PGSIZE) { - __db_err(dbp->dbenv, "page sizes may not be smaller than %lu", - (u_long)DB_MIN_PGSIZE); - return (EINVAL); - } - if (db_pagesize > DB_MAX_PGSIZE) { - __db_err(dbp->dbenv, "page sizes may not be larger than %lu", - (u_long)DB_MAX_PGSIZE); - return (EINVAL); - } - - /* - * We don't want anything that's not a power-of-2, as we rely on that - * for alignment of various types on the pages. - */ - if (!POWER_OF_TWO(db_pagesize)) { - __db_err(dbp->dbenv, "page sizes must be a power-of-2"); - return (EINVAL); - } - - /* - * XXX - * Should we be checking for a page size that's not a multiple of 512, - * so that we never try and write less than a disk sector? - */ - dbp->pgsize = db_pagesize; - - return (0); -} - -static int -__db_set_paniccall(dbp, paniccall) - DB *dbp; - void (*paniccall) __P((DB_ENV *, int)); -{ - return (__env_set_paniccall(dbp->dbenv, paniccall)); -} diff --git a/storage/bdb/db/db_open.c b/storage/bdb/db/db_open.c deleted file mode 100644 index a397c92bc53..00000000000 --- a/storage/bdb/db/db_open.c +++ /dev/null @@ -1,616 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_open.c,v 12.13 2005/10/12 17:45:53 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_swap.h" -#include "dbinc/btree.h" -#include "dbinc/crypto.h" -#include "dbinc/hmac.h" -#include "dbinc/fop.h" -#include "dbinc/hash.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" -#include "dbinc/txn.h" - -/* - * __db_open -- - * DB->open method. - * - * This routine gets called in three different ways: - * - * 1. It can be called to open a file/database. In this case, subdb will - * be NULL and meta_pgno will be PGNO_BASE_MD. - * 2. It can be called to open a subdatabase during normal operation. In - * this case, name and subname will both be non-NULL and meta_pgno will - * be PGNO_BASE_MD (also PGNO_INVALID). - * 3. It can be called to open an in-memory database (name == NULL; - * subname = name). - * 4. It can be called during recovery to open a file/database, in which case - * name will be non-NULL, subname will be NULL, and meta-pgno will be - * PGNO_BASE_MD. - * 5. It can be called during recovery to open a subdatabase, in which case - * name will be non-NULL, subname may be NULL and meta-pgno will be - * a valid pgno (i.e., not PGNO_BASE_MD). - * 6. It can be called during recovery to open an in-memory database. - * - * PUBLIC: int __db_open __P((DB *, DB_TXN *, - * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int, db_pgno_t)); - */ -int -__db_open(dbp, txn, fname, dname, type, flags, mode, meta_pgno) - DB *dbp; - DB_TXN *txn; - const char *fname, *dname; - DBTYPE type; - u_int32_t flags; - int mode; - db_pgno_t meta_pgno; -{ - DB_ENV *dbenv; - int ret; - u_int32_t id; - - dbenv = dbp->dbenv; - id = TXN_INVALID; - - DB_TEST_RECOVERY(dbp, DB_TEST_PREOPEN, ret, fname); - - /* - * If the environment was configured with threads, the DB handle - * must also be free-threaded, so we force the DB_THREAD flag on. - * (See SR #2033 for why this is a requirement--recovery needs - * to be able to grab a dbp using __db_fileid_to_dbp, and it has - * no way of knowing which dbp goes with which thread, so whichever - * one it finds has to be usable in any of them.) - */ - if (F_ISSET(dbenv, DB_ENV_THREAD)) - LF_SET(DB_THREAD); - - /* Convert any DB->open flags. */ - if (LF_ISSET(DB_RDONLY)) - F_SET(dbp, DB_AM_RDONLY); - if (LF_ISSET(DB_READ_UNCOMMITTED)) - F_SET(dbp, DB_AM_READ_UNCOMMITTED); - - if (txn != NULL) - F_SET(dbp, DB_AM_TXN); - - /* Fill in the type. */ - dbp->type = type; - - /* - * If both fname and subname are NULL, it's always a create, so make - * sure that we have both DB_CREATE and a type specified. It would - * be nice if this checking were done in __db_open where most of the - * interface checking is done, but this interface (__db_dbopen) is - * used by the recovery and limbo system, so we need to safeguard - * this interface as well. - */ - if (fname == NULL) { - if (dname == NULL) { - if (!LF_ISSET(DB_CREATE)) { - __db_err(dbenv, - "DB_CREATE must be specified to create databases."); - return (ENOENT); - } - - F_SET(dbp, DB_AM_INMEM); - F_SET(dbp, DB_AM_CREATED); - - if (dbp->type == DB_UNKNOWN) { - __db_err(dbenv, - "DBTYPE of unknown without existing file"); - return (EINVAL); - } - - if (dbp->pgsize == 0) - dbp->pgsize = DB_DEF_IOSIZE; - - /* - * If the file is a temporary file and we're - * doing locking, then we have to create a - * unique file ID. We can't use our normal - * dev/inode pair (or whatever this OS uses - * in place of dev/inode pairs) because no - * backing file will be created until the - * mpool cache is filled forcing the buffers - * to disk. Grab a random locker ID to use - * as a file ID. The created ID must never - * match a potential real file ID -- we know - * it won't because real file IDs contain a - * time stamp after the dev/inode pair, and - * we're simply storing a 4-byte value. - - * !!! - * Store the locker in the file id structure - * -- we can get it from there as necessary, - * and it saves having two copies. - */ - if (LOCKING_ON(dbenv) && (ret = __lock_id(dbenv, - (u_int32_t *)dbp->fileid, NULL)) != 0) - return (ret); - } else - MAKE_INMEM(dbp); - - /* - * Normally we would do handle locking here, however, with - * in-memory files, we cannot do any database manipulation - * until the mpool is open, so it happens later. - */ - } else if (dname == NULL && meta_pgno == PGNO_BASE_MD) { - /* Open/create the underlying file. Acquire locks. */ - if ((ret = - __fop_file_setup(dbp, txn, fname, mode, flags, &id)) != 0) - return (ret); - } else { - if ((ret = __fop_subdb_setup(dbp, - txn, fname, dname, mode, flags)) != 0) - return (ret); - meta_pgno = dbp->meta_pgno; - } - - /* - * If we created the file, set the truncate flag for the mpool. This - * isn't for anything we've done, it's protection against stupid user - * tricks: if the user deleted a file behind Berkeley DB's back, we - * may still have pages in the mpool that match the file's "unique" ID. - * - * Note that if we're opening a subdatabase, we don't want to set - * the TRUNCATE flag even if we just created the file--we already - * opened and updated the master using access method interfaces, - * so we don't want to get rid of any pages that are in the mpool. - * If we created the file when we opened the master, we already hit - * this check in a non-subdatabase context then. - */ - if (dname == NULL && F_ISSET(dbp, DB_AM_CREATED)) - LF_SET(DB_TRUNCATE); - - /* Set up the underlying environment. */ - if ((ret = __db_dbenv_setup(dbp, txn, fname, dname, id, flags)) != 0) - return (ret); - - /* For in-memory databases, we now need to open/create the database. */ - if (F_ISSET(dbp, DB_AM_INMEM)) { - if (dname == NULL) - ret = __db_new_file(dbp, txn, NULL, NULL); - else { - id = TXN_INVALID; - if ((ret = __fop_file_setup(dbp, - txn, dname, mode, flags, &id)) == 0 && - DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER) -#if !defined(DEBUG_ROP) - && !F_ISSET(dbp, DB_AM_RDONLY) -#endif - ) - ret = __dbreg_log_id(dbp, - txn, dbp->log_filename->id, 1); - } - if (ret != 0) - goto err; - } - - switch (dbp->type) { - case DB_BTREE: - ret = __bam_open(dbp, txn, fname, meta_pgno, flags); - break; - case DB_HASH: - ret = __ham_open(dbp, txn, fname, meta_pgno, flags); - break; - case DB_RECNO: - ret = __ram_open(dbp, txn, fname, meta_pgno, flags); - break; - case DB_QUEUE: - ret = __qam_open( - dbp, txn, fname, meta_pgno, mode, flags); - break; - case DB_UNKNOWN: - return ( - __db_unknown_type(dbenv, "__db_dbopen", dbp->type)); - } - if (ret != 0) - goto err; - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, fname); - - /* - * Temporary files don't need handle locks, so we only have to check - * for a handle lock downgrade or lockevent in the case of named - * files. - */ - if (!F_ISSET(dbp, DB_AM_RECOVER) && (fname != NULL || dname != NULL) - && LOCK_ISSET(dbp->handle_lock)) { - if (txn != NULL) - ret = __txn_lockevent(dbenv, - txn, dbp, &dbp->handle_lock, dbp->lid); - else if (LOCKING_ON(dbenv)) - /* Trade write handle lock for read handle lock. */ - ret = __lock_downgrade(dbenv, - &dbp->handle_lock, DB_LOCK_READ, 0); - } -DB_TEST_RECOVERY_LABEL -err: - return (ret); -} - -/* - * __db_get_open_flags -- - * Accessor for flags passed into DB->open call - * - * PUBLIC: int __db_get_open_flags __P((DB *, u_int32_t *)); - */ -int -__db_get_open_flags(dbp, flagsp) - DB *dbp; - u_int32_t *flagsp; -{ - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get_open_flags"); - - *flagsp = dbp->open_flags; - return (0); -} - -/* - * __db_new_file -- - * Create a new database file. - * - * PUBLIC: int __db_new_file __P((DB *, DB_TXN *, DB_FH *, const char *)); - */ -int -__db_new_file(dbp, txn, fhp, name) - DB *dbp; - DB_TXN *txn; - DB_FH *fhp; - const char *name; -{ - int ret; - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ret = __bam_new_file(dbp, txn, fhp, name); - break; - case DB_HASH: - ret = __ham_new_file(dbp, txn, fhp, name); - break; - case DB_QUEUE: - ret = __qam_new_file(dbp, txn, fhp, name); - break; - case DB_UNKNOWN: - default: - __db_err(dbp->dbenv, - "%s: Invalid type %d specified", name, dbp->type); - ret = EINVAL; - break; - } - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, name); - /* Sync the file in preparation for moving it into place. */ - if (ret == 0 && fhp != NULL) - ret = __os_fsync(dbp->dbenv, fhp); - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name); - -DB_TEST_RECOVERY_LABEL - return (ret); -} - -/* - * __db_init_subdb -- - * Initialize the dbp for a subdb. - * - * PUBLIC: int __db_init_subdb __P((DB *, DB *, const char *, DB_TXN *)); - */ -int -__db_init_subdb(mdbp, dbp, name, txn) - DB *mdbp, *dbp; - const char *name; - DB_TXN *txn; -{ - DBMETA *meta; - DB_MPOOLFILE *mpf; - int ret, t_ret; - - ret = 0; - if (!F_ISSET(dbp, DB_AM_CREATED)) { - /* Subdb exists; read meta-data page and initialize. */ - mpf = mdbp->mpf; - if ((ret = __memp_fget(mpf, &dbp->meta_pgno, 0, &meta)) != 0) - goto err; - ret = __db_meta_setup(mdbp->dbenv, dbp, name, meta, 0, 0); - if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) - ret = t_ret; - /* - * If __db_meta_setup found that the meta-page hadn't - * been written out during recovery, we can just return. - */ - if (ret == ENOENT) - ret = 0; - goto err; - } - - /* Handle the create case here. */ - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ret = __bam_new_subdb(mdbp, dbp, txn); - break; - case DB_HASH: - ret = __ham_new_subdb(mdbp, dbp, txn); - break; - case DB_QUEUE: - ret = EINVAL; - break; - case DB_UNKNOWN: - default: - __db_err(dbp->dbenv, - "Invalid subdatabase type %d specified", dbp->type); - return (EINVAL); - } - -err: return (ret); -} - -/* - * __db_chk_meta -- - * Take a buffer containing a meta-data page and check it for a valid LSN, - * checksum (and verify the checksum if necessary) and possibly decrypt it. - * - * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch. - * - * PUBLIC: int __db_chk_meta __P((DB_ENV *, DB *, DBMETA *, int)); - */ -int -__db_chk_meta(dbenv, dbp, meta, do_metachk) - DB_ENV *dbenv; - DB *dbp; - DBMETA *meta; - int do_metachk; -{ - DB_LSN cur_lsn, swap_lsn; - int is_hmac, ret, swapped; - u_int32_t magic, orig_chk; - u_int8_t *chksum; - - ret = 0; - swapped = 0; - - if (FLD_ISSET(meta->metaflags, DBMETA_CHKSUM)) { - if (dbp != NULL) - F_SET(dbp, DB_AM_CHKSUM); - - is_hmac = meta->encrypt_alg == 0 ? 0 : 1; - chksum = ((BTMETA *)meta)->chksum; - - /* - * If we need to swap, the checksum function overwrites the - * original checksum with 0, so we need to save a copy of the - * original for swapping later. - */ - orig_chk = *(u_int32_t *)chksum; - - /* - * We cannot add this to __db_metaswap because that gets done - * later after we've verified the checksum or decrypted. - */ - if (do_metachk) { - swapped = 0; -chk_retry: if ((ret = __db_check_chksum(dbenv, - (DB_CIPHER *)dbenv->crypto_handle, chksum, meta, - DBMETASIZE, is_hmac)) != 0) { - if (is_hmac || swapped) - return (ret); - - M_32_SWAP(orig_chk); - swapped = 1; - *(u_int32_t *)chksum = orig_chk; - goto chk_retry; - } - } - } else if (dbp != NULL) - F_CLR(dbp, DB_AM_CHKSUM); - -#ifdef HAVE_CRYPTO - ret = __crypto_decrypt_meta(dbenv, dbp, (u_int8_t *)meta, do_metachk); -#endif - - /* Now that we're decrypted, we can check LSN. */ - if (LOGGING_ON(dbenv)) { - /* - * This gets called both before and after swapping, so we - * need to check ourselves. If we already swapped it above, - * we'll know that here. - */ - - swap_lsn = meta->lsn; - magic = meta->magic; -lsn_retry: - if (swapped) { - M_32_SWAP(swap_lsn.file); - M_32_SWAP(swap_lsn.offset); - M_32_SWAP(magic); - } - switch (magic) { - case DB_BTREEMAGIC: - case DB_HASHMAGIC: - case DB_QAMMAGIC: - case DB_RENAMEMAGIC: - break; - default: - if (swapped) - return (EINVAL); - swapped = 1; - goto lsn_retry; - } - if (!IS_REP_CLIENT(dbenv) && - !IS_NOT_LOGGED_LSN(swap_lsn) && !IS_ZERO_LSN(swap_lsn)) { - /* Need to do check. */ - if ((ret = __log_current_lsn(dbenv, - &cur_lsn, NULL, NULL)) != 0) - return (ret); - if (log_compare(&swap_lsn, &cur_lsn) > 0) { - __db_err(dbenv, - "file %s (meta pgno = %lu) has LSN [%lu][%lu].", - dbp->fname == NULL - ? "unknown" : dbp->fname, - (u_long)dbp->meta_pgno, - (u_long)swap_lsn.file, - (u_long)swap_lsn.offset); - __db_err(dbenv, "end of log is [%lu][%lu]", - (u_long)cur_lsn.file, - (u_long)cur_lsn.offset); - return (EINVAL); - } - } - } - return (ret); -} - -/* - * __db_meta_setup -- - * - * Take a buffer containing a meta-data page and figure out if it's - * valid, and if so, initialize the dbp from the meta-data page. - * - * PUBLIC: int __db_meta_setup __P((DB_ENV *, - * PUBLIC: DB *, const char *, DBMETA *, u_int32_t, int)); - */ -int -__db_meta_setup(dbenv, dbp, name, meta, oflags, do_metachk) - DB_ENV *dbenv; - DB *dbp; - const char *name; - DBMETA *meta; - u_int32_t oflags; - int do_metachk; -{ - u_int32_t flags, magic; - int ret; - - ret = 0; - - /* - * Figure out what access method we're dealing with, and then - * call access method specific code to check error conditions - * based on conflicts between the found file and application - * arguments. A found file overrides some user information -- - * we don't consider it an error, for example, if the user set - * an expected byte order and the found file doesn't match it. - */ - F_CLR(dbp, DB_AM_SWAP | DB_AM_IN_RENAME); - magic = meta->magic; - -swap_retry: - switch (magic) { - case DB_BTREEMAGIC: - case DB_HASHMAGIC: - case DB_QAMMAGIC: - case DB_RENAMEMAGIC: - break; - case 0: - /* - * The only time this should be 0 is if we're in the - * midst of opening a subdb during recovery and that - * subdatabase had its meta-data page allocated, but - * not yet initialized. - */ - if (F_ISSET(dbp, DB_AM_SUBDB) && ((IS_RECOVERING(dbenv) && - F_ISSET((DB_LOG *) dbenv->lg_handle, DBLOG_FORCE_OPEN)) || - meta->pgno != PGNO_INVALID)) - return (ENOENT); - - goto bad_format; - default: - if (F_ISSET(dbp, DB_AM_SWAP)) - goto bad_format; - - M_32_SWAP(magic); - F_SET(dbp, DB_AM_SWAP); - goto swap_retry; - } - - /* - * We can only check the meta page if we are sure we have a meta page. - * If it is random data, then this check can fail. So only now can we - * checksum and decrypt. Don't distinguish between configuration and - * checksum match errors here, because we haven't opened the database - * and even a checksum error isn't a reason to panic the environment. - */ - if ((ret = __db_chk_meta(dbenv, dbp, meta, do_metachk)) != 0) { - if (ret == -1) - __db_err(dbenv, - "%s: metadata page checksum error", name); - goto bad_format; - } - - switch (magic) { - case DB_BTREEMAGIC: - if (dbp->type != DB_UNKNOWN && - dbp->type != DB_RECNO && dbp->type != DB_BTREE) - goto bad_format; - - flags = meta->flags; - if (F_ISSET(dbp, DB_AM_SWAP)) - M_32_SWAP(flags); - if (LF_ISSET(BTM_RECNO)) - dbp->type = DB_RECNO; - else - dbp->type = DB_BTREE; - if ((oflags & DB_TRUNCATE) == 0 && (ret = - __bam_metachk(dbp, name, (BTMETA *)meta)) != 0) - return (ret); - break; - case DB_HASHMAGIC: - if (dbp->type != DB_UNKNOWN && dbp->type != DB_HASH) - goto bad_format; - - dbp->type = DB_HASH; - if ((oflags & DB_TRUNCATE) == 0 && (ret = - __ham_metachk(dbp, name, (HMETA *)meta)) != 0) - return (ret); - break; - case DB_QAMMAGIC: - if (dbp->type != DB_UNKNOWN && dbp->type != DB_QUEUE) - goto bad_format; - dbp->type = DB_QUEUE; - if ((oflags & DB_TRUNCATE) == 0 && (ret = - __qam_metachk(dbp, name, (QMETA *)meta)) != 0) - return (ret); - break; - case DB_RENAMEMAGIC: - F_SET(dbp, DB_AM_IN_RENAME); - - /* Copy the file's ID. */ - memcpy(dbp->fileid, ((DBMETA *)meta)->uid, DB_FILE_ID_LEN); - - break; - default: - goto bad_format; - } - return (0); - -bad_format: - if (F_ISSET(dbp, DB_AM_RECOVER)) - ret = ENOENT; - else - __db_err(dbenv, "%s: unexpected file type or format", name); - return (ret == 0 ? EINVAL : ret); -} diff --git a/storage/bdb/db/db_overflow.c b/storage/bdb/db/db_overflow.c deleted file mode 100644 index 818ee91a8b2..00000000000 --- a/storage/bdb/db/db_overflow.c +++ /dev/null @@ -1,441 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_overflow.c,v 12.3 2005/08/08 17:30:51 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_am.h" -#include "dbinc/mp.h" - -/* - * Big key/data code. - * - * Big key and data entries are stored on linked lists of pages. The initial - * reference is a structure with the total length of the item and the page - * number where it begins. Each entry in the linked list contains a pointer - * to the next page of data, and so on. - */ - -/* - * __db_goff -- - * Get an offpage item. - * - * PUBLIC: int __db_goff __P((DB *, DBT *, - * PUBLIC: u_int32_t, db_pgno_t, void **, u_int32_t *)); - */ -int -__db_goff(dbp, dbt, tlen, pgno, bpp, bpsz) - DB *dbp; - DBT *dbt; - u_int32_t tlen; - db_pgno_t pgno; - void **bpp; - u_int32_t *bpsz; -{ - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *h; - db_indx_t bytes; - u_int32_t curoff, needed, start; - u_int8_t *p, *src; - int ret; - - dbenv = dbp->dbenv; - mpf = dbp->mpf; - - /* - * Check if the buffer is big enough; if it is not and we are - * allowed to malloc space, then we'll malloc it. If we are - * not (DB_DBT_USERMEM), then we'll set the dbt and return - * appropriately. - */ - if (F_ISSET(dbt, DB_DBT_PARTIAL)) { - start = dbt->doff; - if (start > tlen) - needed = 0; - else if (dbt->dlen > tlen - start) - needed = tlen - start; - else - needed = dbt->dlen; - } else { - start = 0; - needed = tlen; - } - - /* Allocate any necessary memory. */ - if (F_ISSET(dbt, DB_DBT_USERMEM)) { - if (needed > dbt->ulen) { - dbt->size = needed; - return (DB_BUFFER_SMALL); - } - } else if (F_ISSET(dbt, DB_DBT_MALLOC)) { - if ((ret = __os_umalloc(dbenv, needed, &dbt->data)) != 0) - return (ret); - } else if (F_ISSET(dbt, DB_DBT_REALLOC)) { - if ((ret = __os_urealloc(dbenv, needed, &dbt->data)) != 0) - return (ret); - } else if (bpsz != NULL && (*bpsz == 0 || *bpsz < needed)) { - if ((ret = __os_realloc(dbenv, needed, bpp)) != 0) - return (ret); - *bpsz = needed; - dbt->data = *bpp; - } else if (bpp != NULL) - dbt->data = *bpp; - else { - DB_ASSERT( - F_ISSET(dbt, - DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC) || - bpsz != NULL || bpp != NULL); - return (DB_BUFFER_SMALL); - } - - /* - * Step through the linked list of pages, copying the data on each - * one into the buffer. Never copy more than the total data length. - */ - dbt->size = needed; - for (curoff = 0, p = dbt->data; pgno != PGNO_INVALID && needed > 0;) { - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - return (ret); - - /* Check if we need any bytes from this page. */ - if (curoff + OV_LEN(h) >= start) { - src = (u_int8_t *)h + P_OVERHEAD(dbp); - bytes = OV_LEN(h); - if (start > curoff) { - src += start - curoff; - bytes -= start - curoff; - } - if (bytes > needed) - bytes = needed; - memcpy(p, src, bytes); - p += bytes; - needed -= bytes; - } - curoff += OV_LEN(h); - pgno = h->next_pgno; - (void)__memp_fput(mpf, h, 0); - } - return (0); -} - -/* - * __db_poff -- - * Put an offpage item. - * - * PUBLIC: int __db_poff __P((DBC *, const DBT *, db_pgno_t *)); - */ -int -__db_poff(dbc, dbt, pgnop) - DBC *dbc; - const DBT *dbt; - db_pgno_t *pgnop; -{ - DB *dbp; - DBT tmp_dbt; - DB_LSN new_lsn, null_lsn; - DB_MPOOLFILE *mpf; - PAGE *pagep, *lastp; - db_indx_t pagespace; - u_int32_t sz; - u_int8_t *p; - int ret, t_ret; - - /* - * Allocate pages and copy the key/data item into them. Calculate the - * number of bytes we get for pages we fill completely with a single - * item. - */ - dbp = dbc->dbp; - mpf = dbp->mpf; - pagespace = P_MAXSPACE(dbp, dbp->pgsize); - - ret = 0; - lastp = NULL; - for (p = dbt->data, - sz = dbt->size; sz > 0; p += pagespace, sz -= pagespace) { - /* - * Reduce pagespace so we terminate the loop correctly and - * don't copy too much data. - */ - if (sz < pagespace) - pagespace = sz; - - /* - * Allocate and initialize a new page and copy all or part of - * the item onto the page. If sz is less than pagespace, we - * have a partial record. - */ - if ((ret = __db_new(dbc, P_OVERFLOW, &pagep)) != 0) - break; - if (DBC_LOGGING(dbc)) { - tmp_dbt.data = p; - tmp_dbt.size = pagespace; - ZERO_LSN(null_lsn); - if ((ret = __db_big_log(dbp, dbc->txn, - &new_lsn, 0, DB_ADD_BIG, PGNO(pagep), - lastp ? PGNO(lastp) : PGNO_INVALID, - PGNO_INVALID, &tmp_dbt, &LSN(pagep), - lastp == NULL ? &null_lsn : &LSN(lastp), - &null_lsn)) != 0) { - if (lastp != NULL) - (void)__memp_fput(mpf, - lastp, DB_MPOOL_DIRTY); - lastp = pagep; - break; - } - } else - LSN_NOT_LOGGED(new_lsn); - - /* Move LSN onto page. */ - if (lastp != NULL) - LSN(lastp) = new_lsn; - LSN(pagep) = new_lsn; - - OV_LEN(pagep) = pagespace; - OV_REF(pagep) = 1; - memcpy((u_int8_t *)pagep + P_OVERHEAD(dbp), p, pagespace); - - /* - * If this is the first entry, update the user's info. - * Otherwise, update the entry on the last page filled - * in and release that page. - */ - if (lastp == NULL) - *pgnop = PGNO(pagep); - else { - lastp->next_pgno = PGNO(pagep); - pagep->prev_pgno = PGNO(lastp); - (void)__memp_fput(mpf, lastp, DB_MPOOL_DIRTY); - } - lastp = pagep; - } - if (lastp != NULL && - (t_ret = __memp_fput(mpf, lastp, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __db_ovref -- - * Increment/decrement the reference count on an overflow page. - * - * PUBLIC: int __db_ovref __P((DBC *, db_pgno_t, int32_t)); - */ -int -__db_ovref(dbc, pgno, adjust) - DBC *dbc; - db_pgno_t pgno; - int32_t adjust; -{ - DB *dbp; - DB_MPOOLFILE *mpf; - PAGE *h; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - return (ret); - - if (DBC_LOGGING(dbc)) { - if ((ret = __db_ovref_log(dbp, - dbc->txn, &LSN(h), 0, h->pgno, adjust, &LSN(h))) != 0) { - (void)__memp_fput(mpf, h, 0); - return (ret); - } - } else - LSN_NOT_LOGGED(LSN(h)); - OV_REF(h) += adjust; - - (void)__memp_fput(mpf, h, DB_MPOOL_DIRTY); - return (0); -} - -/* - * __db_doff -- - * Delete an offpage chain of overflow pages. - * - * PUBLIC: int __db_doff __P((DBC *, db_pgno_t)); - */ -int -__db_doff(dbc, pgno) - DBC *dbc; - db_pgno_t pgno; -{ - DB *dbp; - PAGE *pagep; - DB_LSN null_lsn; - DB_MPOOLFILE *mpf; - DBT tmp_dbt; - int ret; - - dbp = dbc->dbp; - mpf = dbp->mpf; - - do { - if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) - return (ret); - - DB_ASSERT(TYPE(pagep) == P_OVERFLOW); - /* - * If it's referenced by more than one key/data item, - * decrement the reference count and return. - */ - if (OV_REF(pagep) > 1) { - (void)__memp_fput(mpf, pagep, 0); - return (__db_ovref(dbc, pgno, -1)); - } - - if (DBC_LOGGING(dbc)) { - tmp_dbt.data = (u_int8_t *)pagep + P_OVERHEAD(dbp); - tmp_dbt.size = OV_LEN(pagep); - ZERO_LSN(null_lsn); - if ((ret = __db_big_log(dbp, dbc->txn, - &LSN(pagep), 0, DB_REM_BIG, - PGNO(pagep), PREV_PGNO(pagep), - NEXT_PGNO(pagep), &tmp_dbt, - &LSN(pagep), &null_lsn, &null_lsn)) != 0) { - (void)__memp_fput(mpf, pagep, 0); - return (ret); - } - } else - LSN_NOT_LOGGED(LSN(pagep)); - pgno = pagep->next_pgno; - OV_LEN(pagep) = 0; - if ((ret = __db_free(dbc, pagep)) != 0) - return (ret); - } while (pgno != PGNO_INVALID); - - return (0); -} - -/* - * __db_moff -- - * Match on overflow pages. - * - * Given a starting page number and a key, return <0, 0, >0 to indicate if the - * key on the page is less than, equal to or greater than the key specified. - * We optimize this by doing chunk at a time comparison unless the user has - * specified a comparison function. In this case, we need to materialize - * the entire object and call their comparison routine. - * - * PUBLIC: int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t, - * PUBLIC: int (*)(DB *, const DBT *, const DBT *), int *)); - */ -int -__db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp) - DB *dbp; - const DBT *dbt; - db_pgno_t pgno; - u_int32_t tlen; - int (*cmpfunc) __P((DB *, const DBT *, const DBT *)), *cmpp; -{ - DBT local_dbt; - DB_MPOOLFILE *mpf; - PAGE *pagep; - void *buf; - u_int32_t bufsize, cmp_bytes, key_left; - u_int8_t *p1, *p2; - int ret; - - mpf = dbp->mpf; - - /* - * If there is a user-specified comparison function, build a - * contiguous copy of the key, and call it. - */ - if (cmpfunc != NULL) { - memset(&local_dbt, 0, sizeof(local_dbt)); - buf = NULL; - bufsize = 0; - - if ((ret = __db_goff(dbp, - &local_dbt, tlen, pgno, &buf, &bufsize)) != 0) - return (ret); - /* Pass the key as the first argument */ - *cmpp = cmpfunc(dbp, dbt, &local_dbt); - __os_free(dbp->dbenv, buf); - return (0); - } - - /* While there are both keys to compare. */ - for (*cmpp = 0, p1 = dbt->data, - key_left = dbt->size; key_left > 0 && pgno != PGNO_INVALID;) { - if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) - return (ret); - - cmp_bytes = OV_LEN(pagep) < key_left ? OV_LEN(pagep) : key_left; - tlen -= cmp_bytes; - key_left -= cmp_bytes; - for (p2 = (u_int8_t *)pagep + P_OVERHEAD(dbp); - cmp_bytes-- > 0; ++p1, ++p2) - if (*p1 != *p2) { - *cmpp = (long)*p1 - (long)*p2; - break; - } - pgno = NEXT_PGNO(pagep); - if ((ret = __memp_fput(mpf, pagep, 0)) != 0) - return (ret); - if (*cmpp != 0) - return (0); - } - if (key_left > 0) /* DBT is longer than the page key. */ - *cmpp = 1; - else if (tlen > 0) /* DBT is shorter than the page key. */ - *cmpp = -1; - else - *cmpp = 0; - - return (0); -} diff --git a/storage/bdb/db/db_ovfl_vrfy.c b/storage/bdb/db/db_ovfl_vrfy.c deleted file mode 100644 index ceff4d2569c..00000000000 --- a/storage/bdb/db/db_ovfl_vrfy.c +++ /dev/null @@ -1,374 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_ovfl_vrfy.c,v 12.1 2005/06/16 20:21:13 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_am.h" -#include "dbinc/db_verify.h" -#include "dbinc/mp.h" - -/* - * __db_vrfy_overflow -- - * Verify overflow page. - * - * PUBLIC: int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, - * PUBLIC: u_int32_t)); - */ -int -__db_vrfy_overflow(dbp, vdp, h, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t flags; -{ - VRFY_PAGEINFO *pip; - int isbad, ret, t_ret; - - isbad = 0; - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - pip->refcount = OV_REF(h); - if (pip->refcount < 1) { - EPRINT((dbp->dbenv, - "Page %lu: overflow page has zero reference count", - (u_long)pgno)); - isbad = 1; - } - - /* Just store for now. */ - pip->olen = HOFFSET(h); - -err: if ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) - ret = t_ret; - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_vrfy_ovfl_structure -- - * Walk a list of overflow pages, avoiding cycles and marking - * pages seen. - * - * PUBLIC: int __db_vrfy_ovfl_structure - * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t)); - */ -int -__db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - u_int32_t tlen; - u_int32_t flags; -{ - DB *pgset; - VRFY_PAGEINFO *pip; - db_pgno_t next, prev; - int isbad, ret, seen_cnt, t_ret; - u_int32_t refcount; - - pgset = vdp->pgset; - DB_ASSERT(pgset != NULL); - isbad = 0; - - /* This shouldn't happen, but just to be sure. */ - if (!IS_VALID_PGNO(pgno)) - return (DB_VERIFY_BAD); - - /* - * Check the first prev_pgno; it ought to be PGNO_INVALID, - * since there's no prev page. - */ - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - /* The refcount is stored on the first overflow page. */ - refcount = pip->refcount; - - if (pip->type != P_OVERFLOW) { - EPRINT((dbp->dbenv, - "Page %lu: overflow page of invalid type %lu", - (u_long)pgno, (u_long)pip->type)); - ret = DB_VERIFY_BAD; - goto err; /* Unsafe to continue. */ - } - - prev = pip->prev_pgno; - if (prev != PGNO_INVALID) { - EPRINT((dbp->dbenv, - "Page %lu: first page in overflow chain has a prev_pgno %lu", - (u_long)pgno, (u_long)prev)); - isbad = 1; - } - - for (;;) { - /* - * We may have seen this page elsewhere, if the overflow entry - * has been promoted to an internal page; we just want to - * make sure that each overflow page is seen exactly as many - * times as its refcount dictates. - * - * Note that this code also serves to keep us from looping - * infinitely if there's a cycle in an overflow chain. - */ - if ((ret = __db_vrfy_pgset_get(pgset, pgno, &seen_cnt)) != 0) - goto err; - if ((u_int32_t)seen_cnt > refcount) { - EPRINT((dbp->dbenv, - "Page %lu: encountered too many times in overflow traversal", - (u_long)pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0) - goto err; - - /* - * Each overflow page can be referenced multiple times, - * because it's possible for overflow Btree keys to get - * promoted to internal pages. We want to make sure that - * each page is referenced from a Btree leaf (or Hash data - * page, which we consider a "leaf" here) exactly once; if - * the parent was a leaf, set a flag to indicate that we've - * seen this page in a leaf context. - * - * If the parent is not a leaf--in which case it's a Btree - * internal page--we don't need to bother doing any further - * verification, as we'll do it when we hit the leaf (or - * complain that we never saw the leaf). Only the first - * page in an overflow chain should ever have a refcount - * greater than 1, and the combination of the LEAFSEEN check - * and the fact that we bail after the first page for - * non-leaves should ensure this. - * - * Note that each "child" of a page, such as an overflow page, - * is stored and verified in a structure check exactly once, - * so this code does not need to contend with the fact that - * overflow chains used as Btree duplicate keys may be - * referenced multiply from a single Btree leaf page. - */ - if (LF_ISSET(ST_OVFL_LEAF)) { - if (F_ISSET(pip, VRFY_OVFL_LEAFSEEN)) { - EPRINT((dbp->dbenv, - "Page %lu: overflow page linked twice from leaf or data page", - (u_long)pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - F_SET(pip, VRFY_OVFL_LEAFSEEN); - } - - /* - * We want to verify each overflow chain only once, and - * although no chain should be linked more than once from a - * leaf page, we can't guarantee that it'll be linked that - * once if it's linked from an internal page and the key - * is gone. - * - * seen_cnt is the number of times we'd encountered this page - * before calling this function. - */ - if (seen_cnt == 0) { - /* - * Keep a running tab on how much of the item we've - * seen. - */ - tlen -= pip->olen; - - /* Send the application feedback about our progress. */ - if (!LF_ISSET(DB_SALVAGE)) - __db_vrfy_struct_feedback(dbp, vdp); - } else - goto done; - - next = pip->next_pgno; - - /* Are we there yet? */ - if (next == PGNO_INVALID) - break; - - /* - * We've already checked this when we saved it, but just - * to be sure... - */ - if (!IS_VALID_PGNO(next)) { - DB_ASSERT(0); - EPRINT((dbp->dbenv, - "Page %lu: bad next_pgno %lu on overflow page", - (u_long)pgno, (u_long)next)); - ret = DB_VERIFY_BAD; - goto err; - } - - if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 || - (ret = __db_vrfy_getpageinfo(vdp, next, &pip)) != 0) - return (ret); - if (pip->prev_pgno != pgno) { - EPRINT((dbp->dbenv, - "Page %lu: bad prev_pgno %lu on overflow page (should be %lu)", - (u_long)next, (u_long)pip->prev_pgno, - (u_long)pgno)); - isbad = 1; - /* - * It's safe to continue because we have separate - * cycle detection. - */ - } - - pgno = next; - } - - if (tlen > 0) { - isbad = 1; - EPRINT((dbp->dbenv, - "Page %lu: overflow item incomplete", (u_long)pgno)); - } - -done: -err: if ((t_ret = - __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_safe_goff -- - * Get an overflow item, very carefully, from an untrusted database, - * in the context of the salvager. - * - * PUBLIC: int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t, - * PUBLIC: DBT *, void *, u_int32_t)); - */ -int -__db_safe_goff(dbp, vdp, pgno, dbt, buf, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - DBT *dbt; - void *buf; - u_int32_t flags; -{ - DB_MPOOLFILE *mpf; - PAGE *h; - int ret, t_ret; - u_int32_t bytesgot, bytes; - u_int8_t *src, *dest; - - mpf = dbp->mpf; - h = NULL; - ret = t_ret = 0; - bytesgot = bytes = 0; - - while ((pgno != PGNO_INVALID) && (IS_VALID_PGNO(pgno))) { - /* - * Mark that we're looking at this page; if we've seen it - * already, quit. - */ - if ((ret = __db_salvage_markdone(vdp, pgno)) != 0) - break; - - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - break; - - /* - * Make sure it's really an overflow page, unless we're - * being aggressive, in which case we pretend it is. - */ - if (!LF_ISSET(DB_AGGRESSIVE) && TYPE(h) != P_OVERFLOW) { - ret = DB_VERIFY_BAD; - break; - } - - src = (u_int8_t *)h + P_OVERHEAD(dbp); - bytes = OV_LEN(h); - - if (bytes + P_OVERHEAD(dbp) > dbp->pgsize) - bytes = dbp->pgsize - P_OVERHEAD(dbp); - - if ((ret = __os_realloc(dbp->dbenv, - bytesgot + bytes, buf)) != 0) - break; - - dest = *(u_int8_t **)buf + bytesgot; - bytesgot += bytes; - - memcpy(dest, src, bytes); - - pgno = NEXT_PGNO(h); - - if ((ret = __memp_fput(mpf, h, 0)) != 0) - break; - h = NULL; - } - - /* - * If we're being aggressive, salvage a partial datum if there - * was an error somewhere along the way. - */ - if (ret == 0 || LF_ISSET(DB_AGGRESSIVE)) { - dbt->size = bytesgot; - dbt->data = *(void **)buf; - } - - /* If we broke out on error, don't leave pages pinned. */ - if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} diff --git a/storage/bdb/db/db_pr.c b/storage/bdb/db/db_pr.c deleted file mode 100644 index 4618d4f4754..00000000000 --- a/storage/bdb/db/db_pr.c +++ /dev/null @@ -1,1614 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_pr.c,v 12.17 2005/11/08 03:13:30 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" -#include "dbinc/db_verify.h" - -/* - * __db_loadme -- - * A nice place to put a breakpoint. - * - * PUBLIC: void __db_loadme __P((void)); - */ -void -__db_loadme() -{ - pid_t pid; - db_threadid_t tid; - - __os_id(NULL, &pid, &tid); -} - -#ifdef HAVE_STATISTICS -static int __db_bmeta __P((DB *, BTMETA *, u_int32_t)); -static int __db_hmeta __P((DB *, HMETA *, u_int32_t)); -static void __db_meta __P((DB *, DBMETA *, FN const *, u_int32_t)); -static const char *__db_pagetype_to_string __P((u_int32_t)); -static void __db_prdb __P((DB *, u_int32_t)); -static void __db_proff __P((DB_ENV *, DB_MSGBUF *, void *)); -static int __db_prtree __P((DB *, u_int32_t)); -static int __db_qmeta __P((DB *, QMETA *, u_int32_t)); - -/* - * __db_dumptree -- - * Dump the tree to a file. - * - * PUBLIC: int __db_dumptree __P((DB *, char *, char *)); - */ -int -__db_dumptree(dbp, op, name) - DB *dbp; - char *op, *name; -{ - DB_ENV *dbenv; - FILE *fp, *orig_fp; - u_int32_t flags; - int ret; - - dbenv = dbp->dbenv; - - for (flags = 0; *op != '\0'; ++op) - switch (*op) { - case 'a': - LF_SET(DB_PR_PAGE); - break; - case 'h': - break; - case 'r': - LF_SET(DB_PR_RECOVERYTEST); - break; - default: - return (EINVAL); - } - - if (name != NULL) { - if ((fp = fopen(name, "w")) == NULL) - return (__os_get_errno()); - - orig_fp = dbenv->db_msgfile; - dbenv->db_msgfile = fp; - } else - fp = orig_fp = NULL; - - __db_prdb(dbp, flags); - - __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - - ret = __db_prtree(dbp, flags); - - if (fp != NULL) { - (void)fclose(fp); - dbenv->db_msgfile = orig_fp; - } - - return (ret); -} - -static const FN __db_flags_fn[] = { - { DB_AM_CHKSUM, "checksumming" }, - { DB_AM_CL_WRITER, "client replica writer" }, - { DB_AM_COMPENSATE, "created by compensating transaction" }, - { DB_AM_CREATED, "database created" }, - { DB_AM_CREATED_MSTR, "encompassing file created" }, - { DB_AM_DBM_ERROR, "dbm/ndbm error" }, - { DB_AM_DELIMITER, "variable length" }, - { DB_AM_DISCARD, "discard cached pages" }, - { DB_AM_DUP, "duplicates" }, - { DB_AM_DUPSORT, "sorted duplicates" }, - { DB_AM_ENCRYPT, "encrypted" }, - { DB_AM_FIXEDLEN, "fixed-length records" }, - { DB_AM_INMEM, "in-memory" }, - { DB_AM_IN_RENAME, "file is being renamed" }, - { DB_AM_NOT_DURABLE, "changes not logged" }, - { DB_AM_OPEN_CALLED, "open called" }, - { DB_AM_PAD, "pad value" }, - { DB_AM_PGDEF, "default page size" }, - { DB_AM_RDONLY, "read-only" }, - { DB_AM_READ_UNCOMMITTED, "read-uncommitted" }, - { DB_AM_RECNUM, "Btree record numbers" }, - { DB_AM_RECOVER, "opened for recovery" }, - { DB_AM_RENUMBER, "renumber" }, - { DB_AM_REVSPLITOFF, "no reverse splits" }, - { DB_AM_SECONDARY, "secondary" }, - { DB_AM_SNAPSHOT, "load on open" }, - { DB_AM_SUBDB, "subdatabases" }, - { DB_AM_SWAP, "needswap" }, - { DB_AM_TXN, "transactional" }, - { DB_AM_VERIFYING, "verifier" }, - { 0, NULL } -}; - -/* - * __db_get_flags_fn -- - * Return the __db_flags_fn array. - * - * PUBLIC: const FN * __db_get_flags_fn __P((void)); - */ -const FN * -__db_get_flags_fn() -{ - return (__db_flags_fn); -} - -/* - * __db_prdb -- - * Print out the DB structure information. - */ -static void -__db_prdb(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_MSGBUF mb; - DB_ENV *dbenv; - BTREE *bt; - HASH *h; - QUEUE *q; - - dbenv = dbp->dbenv; - - DB_MSGBUF_INIT(&mb); - __db_msg(dbenv, "In-memory DB structure:"); - __db_msgadd(dbenv, &mb, "%s: %#lx", - __db_dbtype_to_string(dbp->type), (u_long)dbp->flags); - __db_prflags(dbenv, &mb, dbp->flags, __db_flags_fn, " (", ")"); - DB_MSGBUF_FLUSH(dbenv, &mb); - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - bt = dbp->bt_internal; - __db_msg(dbenv, "bt_meta: %lu bt_root: %lu", - (u_long)bt->bt_meta, (u_long)bt->bt_root); - __db_msg(dbenv, "bt_minkey: %lu", (u_long)bt->bt_minkey); - if (!LF_ISSET(DB_PR_RECOVERYTEST)) - __db_msg(dbenv, "bt_compare: %#lx bt_prefix: %#lx", - P_TO_ULONG(bt->bt_compare), - P_TO_ULONG(bt->bt_prefix)); - __db_msg(dbenv, "bt_lpgno: %lu", (u_long)bt->bt_lpgno); - if (dbp->type == DB_RECNO) { - __db_msg(dbenv, - "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s", - (u_long)bt->re_pad, (u_long)bt->re_delim, - (u_long)bt->re_len, - bt->re_source == NULL ? "" : bt->re_source); - __db_msg(dbenv, - "re_modified: %d re_eof: %d re_last: %lu", - bt->re_modified, bt->re_eof, (u_long)bt->re_last); - } - break; - case DB_HASH: - h = dbp->h_internal; - __db_msg(dbenv, "meta_pgno: %lu", (u_long)h->meta_pgno); - __db_msg(dbenv, "h_ffactor: %lu", (u_long)h->h_ffactor); - __db_msg(dbenv, "h_nelem: %lu", (u_long)h->h_nelem); - if (!LF_ISSET(DB_PR_RECOVERYTEST)) - __db_msg(dbenv, "h_hash: %#lx", P_TO_ULONG(h->h_hash)); - break; - case DB_QUEUE: - q = dbp->q_internal; - __db_msg(dbenv, "q_meta: %lu", (u_long)q->q_meta); - __db_msg(dbenv, "q_root: %lu", (u_long)q->q_root); - __db_msg(dbenv, "re_pad: %#lx re_len: %lu", - (u_long)q->re_pad, (u_long)q->re_len); - __db_msg(dbenv, "rec_page: %lu", (u_long)q->rec_page); - __db_msg(dbenv, "page_ext: %lu", (u_long)q->page_ext); - break; - case DB_UNKNOWN: - default: - break; - } -} - -/* - * __db_prtree -- - * Print out the entire tree. - */ -static int -__db_prtree(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t i, last; - int ret; - - mpf = dbp->mpf; - - if (dbp->type == DB_QUEUE) - return (__db_prqueue(dbp, flags)); - - /* - * Find out the page number of the last page in the database, then - * dump each page. - */ - if ((ret = __memp_last_pgno(mpf, &last)) != 0) - return (ret); - for (i = 0; i <= last; ++i) { - if ((ret = __memp_fget(mpf, &i, 0, &h)) != 0) - return (ret); - (void)__db_prpage(dbp, h, flags); - if ((ret = __memp_fput(mpf, h, 0)) != 0) - return (ret); - } - - return (0); -} - -/* - * __db_meta -- - * Print out common metadata information. - */ -static void -__db_meta(dbp, dbmeta, fn, flags) - DB *dbp; - DBMETA *dbmeta; - FN const *fn; - u_int32_t flags; -{ - DB_MSGBUF mb; - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t pgno; - u_int8_t *p; - int cnt, ret; - const char *sep; - - dbenv = dbp->dbenv; - mpf = dbp->mpf; - DB_MSGBUF_INIT(&mb); - - __db_msg(dbenv, "\tmagic: %#lx", (u_long)dbmeta->magic); - __db_msg(dbenv, "\tversion: %lu", (u_long)dbmeta->version); - __db_msg(dbenv, "\tpagesize: %lu", (u_long)dbmeta->pagesize); - __db_msg(dbenv, "\ttype: %lu", (u_long)dbmeta->type); - __db_msg(dbenv, "\tkeys: %lu\trecords: %lu", - (u_long)dbmeta->key_count, (u_long)dbmeta->record_count); - - /* - * If we're doing recovery testing, don't display the free list, - * it may have changed and that makes the dump diff not work. - */ - if (!LF_ISSET(DB_PR_RECOVERYTEST)) { - __db_msgadd( - dbenv, &mb, "\tfree list: %lu", (u_long)dbmeta->free); - for (pgno = dbmeta->free, - cnt = 0, sep = ", "; pgno != PGNO_INVALID;) { - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) { - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msg(dbenv, - "Unable to retrieve free-list page: %lu: %s", - (u_long)pgno, db_strerror(ret)); - break; - } - pgno = h->next_pgno; - (void)__memp_fput(mpf, h, 0); - __db_msgadd(dbenv, &mb, "%s%lu", sep, (u_long)pgno); - if (++cnt % 10 == 0) { - DB_MSGBUF_FLUSH(dbenv, &mb); - cnt = 0; - sep = "\t"; - } else - sep = ", "; - } - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msg(dbenv, "\tlast_pgno: %lu", (u_long)dbmeta->last_pgno); - } - - if (fn != NULL) { - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msgadd(dbenv, &mb, "\tflags: %#lx", (u_long)dbmeta->flags); - __db_prflags(dbenv, &mb, dbmeta->flags, fn, " (", ")"); - } - - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msgadd(dbenv, &mb, "\tuid: "); - for (p = (u_int8_t *)dbmeta->uid, - cnt = 0; cnt < DB_FILE_ID_LEN; ++cnt) { - __db_msgadd(dbenv, &mb, "%x", *p++); - if (cnt < DB_FILE_ID_LEN - 1) - __db_msgadd(dbenv, &mb, " "); - } - DB_MSGBUF_FLUSH(dbenv, &mb); -} - -/* - * __db_bmeta -- - * Print out the btree meta-data page. - */ -static int -__db_bmeta(dbp, h, flags) - DB *dbp; - BTMETA *h; - u_int32_t flags; -{ - static const FN fn[] = { - { BTM_DUP, "duplicates" }, - { BTM_RECNO, "recno" }, - { BTM_RECNUM, "btree:recnum" }, - { BTM_FIXEDLEN, "recno:fixed-length" }, - { BTM_RENUMBER, "recno:renumber" }, - { BTM_SUBDB, "multiple-databases" }, - { BTM_DUPSORT, "sorted duplicates" }, - { 0, NULL } - }; - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - __db_meta(dbp, (DBMETA *)h, fn, flags); - - __db_msg(dbenv, "\tminkey: %lu", (u_long)h->minkey); - if (dbp->type == DB_RECNO) - __db_msg(dbenv, "\tre_len: %#lx re_pad: %#lx", - (u_long)h->re_len, (u_long)h->re_pad); - __db_msg(dbenv, "\troot: %lu", (u_long)h->root); - - return (0); -} - -/* - * __db_hmeta -- - * Print out the hash meta-data page. - */ -static int -__db_hmeta(dbp, h, flags) - DB *dbp; - HMETA *h; - u_int32_t flags; -{ - DB_MSGBUF mb; - static const FN fn[] = { - { DB_HASH_DUP, "duplicates" }, - { DB_HASH_SUBDB, "multiple-databases" }, - { DB_HASH_DUPSORT, "sorted duplicates" }, - { 0, NULL } - }; - DB_ENV *dbenv; - int i; - - dbenv = dbp->dbenv; - DB_MSGBUF_INIT(&mb); - - __db_meta(dbp, (DBMETA *)h, fn, flags); - - __db_msg(dbenv, "\tmax_bucket: %lu", (u_long)h->max_bucket); - __db_msg(dbenv, "\thigh_mask: %#lx", (u_long)h->high_mask); - __db_msg(dbenv, "\tlow_mask: %#lx", (u_long)h->low_mask); - __db_msg(dbenv, "\tffactor: %lu", (u_long)h->ffactor); - __db_msg(dbenv, "\tnelem: %lu", (u_long)h->nelem); - __db_msg(dbenv, "\th_charkey: %#lx", (u_long)h->h_charkey); - __db_msgadd(dbenv, &mb, "\tspare points: "); - for (i = 0; i < NCACHED; i++) - __db_msgadd(dbenv, &mb, "%lu ", (u_long)h->spares[i]); - DB_MSGBUF_FLUSH(dbenv, &mb); - - return (0); -} - -/* - * __db_qmeta -- - * Print out the queue meta-data page. - */ -static int -__db_qmeta(dbp, h, flags) - DB *dbp; - QMETA *h; - u_int32_t flags; -{ - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - __db_meta(dbp, (DBMETA *)h, NULL, flags); - - __db_msg(dbenv, "\tfirst_recno: %lu", (u_long)h->first_recno); - __db_msg(dbenv, "\tcur_recno: %lu", (u_long)h->cur_recno); - __db_msg(dbenv, "\tre_len: %#lx re_pad: %lu", - (u_long)h->re_len, (u_long)h->re_pad); - __db_msg(dbenv, "\trec_page: %lu", (u_long)h->rec_page); - __db_msg(dbenv, "\tpage_ext: %lu", (u_long)h->page_ext); - - return (0); -} - -/* - * __db_prnpage - * -- Print out a specific page. - * - * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t)); - */ -int -__db_prnpage(dbp, pgno) - DB *dbp; - db_pgno_t pgno; -{ - DB_MPOOLFILE *mpf; - PAGE *h; - int ret, t_ret; - - mpf = dbp->mpf; - - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - return (ret); - - ret = __db_prpage(dbp, h, DB_PR_PAGE); - - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_prpage - * -- Print out a page. - * - * PUBLIC: int __db_prpage __P((DB *, PAGE *, u_int32_t)); - */ -int -__db_prpage(dbp, h, flags) - DB *dbp; - PAGE *h; - u_int32_t flags; -{ - BINTERNAL *bi; - BKEYDATA *bk; - DB_ENV *dbenv; - DB_MSGBUF mb; - HOFFPAGE a_hkd; - QAMDATA *qp, *qep; - RINTERNAL *ri; - db_indx_t dlen, len, i, *inp; - db_pgno_t pgno; - db_recno_t recno; - u_int32_t pagesize, qlen; - u_int8_t *ep, *hk, *p; - int deleted, ret; - const char *s; - void *sp; - - dbenv = dbp->dbenv; - DB_MSGBUF_INIT(&mb); - - /* - * If we're doing recovery testing and this page is P_INVALID, - * assume it's a page that's on the free list, and don't display it. - */ - if (LF_ISSET(DB_PR_RECOVERYTEST) && TYPE(h) == P_INVALID) - return (0); - - if ((s = __db_pagetype_to_string(TYPE(h))) == NULL) { - __db_msg(dbenv, "ILLEGAL PAGE TYPE: page: %lu type: %lu", - (u_long)h->pgno, (u_long)TYPE(h)); - return (1); - } - - /* - * !!! - * Find out the page size. We don't want to do it the "right" way, - * by reading the value from the meta-data page, that's going to be - * slow. Reach down into the mpool region. - */ - pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize; - - /* Page number, page type. */ - __db_msgadd(dbenv, &mb, "page %lu: %s:", (u_long)h->pgno, s); - - /* - * LSNs on a metadata page will be different from the original after an - * abort, in some cases. Don't display them if we're testing recovery. - */ - if (!LF_ISSET(DB_PR_RECOVERYTEST) || - (TYPE(h) != P_BTREEMETA && TYPE(h) != P_HASHMETA && - TYPE(h) != P_QAMMETA && TYPE(h) != P_QAMDATA)) - __db_msgadd(dbenv, &mb, " LSN [%lu][%lu]:", - (u_long)LSN(h).file, (u_long)LSN(h).offset); - - /* - * Page level (only applicable for Btree/Recno, but we always display - * it, for no particular reason. - */ - __db_msgadd(dbenv, &mb, " level %lu", (u_long)h->level); - - /* Record count. */ - if (TYPE(h) == P_IBTREE || - TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO && - h->pgno == ((BTREE *)dbp->bt_internal)->bt_root)) - __db_msgadd(dbenv, &mb, " records: %lu", (u_long)RE_NREC(h)); - DB_MSGBUF_FLUSH(dbenv, &mb); - - switch (TYPE(h)) { - case P_BTREEMETA: - return (__db_bmeta(dbp, (BTMETA *)h, flags)); - case P_HASHMETA: - return (__db_hmeta(dbp, (HMETA *)h, flags)); - case P_QAMMETA: - return (__db_qmeta(dbp, (QMETA *)h, flags)); - case P_QAMDATA: /* Should be meta->start. */ - if (!LF_ISSET(DB_PR_PAGE)) - return (0); - - qlen = ((QUEUE *)dbp->q_internal)->re_len; - recno = (h->pgno - 1) * QAM_RECNO_PER_PAGE(dbp) + 1; - i = 0; - qep = (QAMDATA *)((u_int8_t *)h + pagesize - qlen); - for (qp = QAM_GET_RECORD(dbp, h, i); qp < qep; - recno++, i++, qp = QAM_GET_RECORD(dbp, h, i)) { - if (!F_ISSET(qp, QAM_SET)) - continue; - - __db_msgadd(dbenv, &mb, "%s", - F_ISSET(qp, QAM_VALID) ? "\t" : " D"); - __db_msgadd(dbenv, &mb, "[%03lu] %4lu ", (u_long)recno, - (u_long)((u_int8_t *)qp - (u_int8_t *)h)); - __db_pr(dbenv, &mb, qp->data, qlen); - } - return (0); - default: - break; - } - - s = "\t"; - if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) { - __db_msgadd(dbenv, &mb, "%sprev: %4lu next: %4lu", - s, (u_long)PREV_PGNO(h), (u_long)NEXT_PGNO(h)); - s = " "; - } - if (TYPE(h) == P_OVERFLOW) { - __db_msgadd(dbenv, &mb, - "%sref cnt: %4lu ", s, (u_long)OV_REF(h)); - __db_pr(dbenv, &mb, (u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h)); - return (0); - } - __db_msgadd(dbenv, &mb, "%sentries: %4lu", s, (u_long)NUM_ENT(h)); - __db_msgadd(dbenv, &mb, " offset: %4lu", (u_long)HOFFSET(h)); - DB_MSGBUF_FLUSH(dbenv, &mb); - - if (TYPE(h) == P_INVALID || !LF_ISSET(DB_PR_PAGE)) - return (0); - - ret = 0; - inp = P_INP(dbp, h); - for (i = 0; i < NUM_ENT(h); i++) { - if ((uintptr_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) < - (uintptr_t)(P_OVERHEAD(dbp)) || - (size_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) >= pagesize) { - __db_msg(dbenv, - "ILLEGAL PAGE OFFSET: indx: %lu of %lu", - (u_long)i, (u_long)inp[i]); - ret = EINVAL; - continue; - } - deleted = 0; - switch (TYPE(h)) { - case P_HASH: - case P_IBTREE: - case P_IRECNO: - sp = P_ENTRY(dbp, h, i); - break; - case P_LBTREE: - sp = P_ENTRY(dbp, h, i); - deleted = i % 2 == 0 && - B_DISSET(GET_BKEYDATA(dbp, h, i + O_INDX)->type); - break; - case P_LDUP: - case P_LRECNO: - sp = P_ENTRY(dbp, h, i); - deleted = B_DISSET(GET_BKEYDATA(dbp, h, i)->type); - break; - default: - goto type_err; - } - __db_msgadd(dbenv, &mb, "%s", deleted ? " D" : "\t"); - __db_msgadd( - dbenv, &mb, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]); - switch (TYPE(h)) { - case P_HASH: - hk = sp; - switch (HPAGE_PTYPE(hk)) { - case H_OFFDUP: - memcpy(&pgno, - HOFFDUP_PGNO(hk), sizeof(db_pgno_t)); - __db_msgadd(dbenv, &mb, - "%4lu [offpage dups]", (u_long)pgno); - DB_MSGBUF_FLUSH(dbenv, &mb); - break; - case H_DUPLICATE: - /* - * If this is the first item on a page, then - * we cannot figure out how long it is, so - * we only print the first one in the duplicate - * set. - */ - if (i != 0) - len = LEN_HKEYDATA(dbp, h, 0, i); - else - len = 1; - - __db_msgadd(dbenv, &mb, "Duplicates:"); - DB_MSGBUF_FLUSH(dbenv, &mb); - for (p = HKEYDATA_DATA(hk), - ep = p + len; p < ep;) { - memcpy(&dlen, p, sizeof(db_indx_t)); - p += sizeof(db_indx_t); - __db_msgadd(dbenv, &mb, "\t\t"); - __db_pr(dbenv, &mb, p, dlen); - p += sizeof(db_indx_t) + dlen; - } - break; - case H_KEYDATA: - __db_pr(dbenv, &mb, HKEYDATA_DATA(hk), - LEN_HKEYDATA(dbp, h, i == 0 ? - pagesize : 0, i)); - break; - case H_OFFPAGE: - memcpy(&a_hkd, hk, HOFFPAGE_SIZE); - __db_msgadd(dbenv, &mb, - "overflow: total len: %4lu page: %4lu", - (u_long)a_hkd.tlen, (u_long)a_hkd.pgno); - DB_MSGBUF_FLUSH(dbenv, &mb); - break; - default: - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msg(dbenv, "ILLEGAL HASH PAGE TYPE: %lu", - (u_long)HPAGE_PTYPE(hk)); - ret = EINVAL; - break; - } - break; - case P_IBTREE: - bi = sp; - __db_msgadd(dbenv, &mb, - "count: %4lu pgno: %4lu type: %lu ", - (u_long)bi->nrecs, (u_long)bi->pgno, - (u_long)bi->type); - switch (B_TYPE(bi->type)) { - case B_KEYDATA: - __db_pr(dbenv, &mb, bi->data, bi->len); - break; - case B_DUPLICATE: - case B_OVERFLOW: - __db_proff(dbenv, &mb, bi->data); - break; - default: - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msg(dbenv, "ILLEGAL BINTERNAL TYPE: %lu", - (u_long)B_TYPE(bi->type)); - ret = EINVAL; - break; - } - break; - case P_IRECNO: - ri = sp; - __db_msgadd(dbenv, &mb, "entries %4lu pgno %4lu", - (u_long)ri->nrecs, (u_long)ri->pgno); - DB_MSGBUF_FLUSH(dbenv, &mb); - break; - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - bk = sp; - switch (B_TYPE(bk->type)) { - case B_KEYDATA: - __db_pr(dbenv, &mb, bk->data, bk->len); - break; - case B_DUPLICATE: - case B_OVERFLOW: - __db_proff(dbenv, &mb, bk); - break; - default: - DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msg(dbenv, - "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu", - (u_long)B_TYPE(bk->type)); - ret = EINVAL; - break; - } - break; - default: -type_err: DB_MSGBUF_FLUSH(dbenv, &mb); - __db_msg(dbenv, - "ILLEGAL PAGE TYPE: %lu", (u_long)TYPE(h)); - ret = EINVAL; - continue; - } - } - return (ret); -} - -/* - * __db_pr -- - * Print out a data element. - * - * PUBLIC: void __db_pr __P((DB_ENV *, DB_MSGBUF *, u_int8_t *, u_int32_t)); - */ -void -__db_pr(dbenv, mbp, p, len) - DB_ENV *dbenv; - DB_MSGBUF *mbp; - u_int8_t *p; - u_int32_t len; -{ - u_int32_t i; - - __db_msgadd(dbenv, mbp, "len: %3lu", (u_long)len); - if (len != 0) { - __db_msgadd(dbenv, mbp, " data: "); - for (i = len <= 20 ? len : 20; i > 0; --i, ++p) { - if (isprint((int)*p) || *p == '\n') - __db_msgadd(dbenv, mbp, "%c", *p); - else - __db_msgadd(dbenv, mbp, "%#.2x", (u_int)*p); - } - if (len > 20) - __db_msgadd(dbenv, mbp, "..."); - } - DB_MSGBUF_FLUSH(dbenv, mbp); -} - -/* - * __db_proff -- - * Print out an off-page element. - */ -static void -__db_proff(dbenv, mbp, vp) - DB_ENV *dbenv; - DB_MSGBUF *mbp; - void *vp; -{ - BOVERFLOW *bo; - - bo = vp; - switch (B_TYPE(bo->type)) { - case B_OVERFLOW: - __db_msgadd(dbenv, mbp, "overflow: total len: %4lu page: %4lu", - (u_long)bo->tlen, (u_long)bo->pgno); - break; - case B_DUPLICATE: - __db_msgadd( - dbenv, mbp, "duplicate: page: %4lu", (u_long)bo->pgno); - break; - default: - /* NOTREACHED */ - break; - } - DB_MSGBUF_FLUSH(dbenv, mbp); -} - -/* - * __db_prflags -- - * Print out flags values. - * - * PUBLIC: void __db_prflags __P((DB_ENV *, DB_MSGBUF *, - * PUBLIC: u_int32_t, const FN *, const char *, const char *)); - */ -void -__db_prflags(dbenv, mbp, flags, fn, prefix, suffix) - DB_ENV *dbenv; - DB_MSGBUF *mbp; - u_int32_t flags; - FN const *fn; - const char *prefix, *suffix; -{ - DB_MSGBUF mb; - const FN *fnp; - int found, standalone; - const char *sep; - - /* - * If it's a standalone message, output the suffix (which will be the - * label), regardless of whether we found anything or not, and flush - * the line. - */ - if (mbp == NULL) { - standalone = 1; - mbp = &mb; - DB_MSGBUF_INIT(mbp); - } else - standalone = 0; - - sep = prefix == NULL ? "" : prefix; - for (found = 0, fnp = fn; fnp->mask != 0; ++fnp) - if (LF_ISSET(fnp->mask)) { - __db_msgadd(dbenv, mbp, "%s%s", sep, fnp->name); - sep = ", "; - found = 1; - } - - if ((standalone || found) && suffix != NULL) - __db_msgadd(dbenv, mbp, "%s", suffix); - if (standalone) - DB_MSGBUF_FLUSH(dbenv, mbp); -} - -/* - * __db_lockmode_to_string -- - * Return the name of the lock mode. - * - * PUBLIC: const char * __db_lockmode_to_string __P((db_lockmode_t)); - */ -const char * -__db_lockmode_to_string(mode) - db_lockmode_t mode; -{ - switch (mode) { - case DB_LOCK_NG: - return ("Not granted"); - case DB_LOCK_READ: - return ("Shared/read"); - case DB_LOCK_WRITE: - return ("Exclusive/write"); - case DB_LOCK_WAIT: - return ("Wait for event"); - case DB_LOCK_IWRITE: - return ("Intent exclusive/write"); - case DB_LOCK_IREAD: - return ("Intent shared/read"); - case DB_LOCK_IWR: - return ("Intent to read/write"); - case DB_LOCK_READ_UNCOMMITTED: - return ("Read uncommitted"); - case DB_LOCK_WWRITE: - return ("Was written"); - default: - break; - } - return ("UNKNOWN LOCK MODE"); -} - -/* - * __db_pagetype_to_string -- - * Return the name of the specified page type. - */ -static const char * -__db_pagetype_to_string(type) - u_int32_t type; -{ - char *s; - - s = NULL; - switch (type) { - case P_BTREEMETA: - s = "btree metadata"; - break; - case P_LDUP: - s = "duplicate"; - break; - case P_HASH: - s = "hash"; - break; - case P_HASHMETA: - s = "hash metadata"; - break; - case P_IBTREE: - s = "btree internal"; - break; - case P_INVALID: - s = "invalid"; - break; - case P_IRECNO: - s = "recno internal"; - break; - case P_LBTREE: - s = "btree leaf"; - break; - case P_LRECNO: - s = "recno leaf"; - break; - case P_OVERFLOW: - s = "overflow"; - break; - case P_QAMMETA: - s = "queue metadata"; - break; - case P_QAMDATA: - s = "queue"; - break; - default: - /* Just return a NULL. */ - break; - } - return (s); -} - -#else /* !HAVE_STATISTICS */ - -/* - * __db_dumptree -- - * Dump the tree to a file. - * - * PUBLIC: int __db_dumptree __P((DB *, char *, char *)); - */ -int -__db_dumptree(dbp, op, name) - DB *dbp; - char *op, *name; -{ - COMPQUIET(op, NULL); - COMPQUIET(name, NULL); - - return (__db_stat_not_built(dbp->dbenv)); -} - -/* - * __db_get_flags_fn -- - * Return the __db_flags_fn array. - * - * PUBLIC: const FN * __db_get_flags_fn __P((void)); - */ -const FN * -__db_get_flags_fn() -{ - static const FN __db_flags_fn[] = { - { 0, NULL } - }; - - /* - * !!! - * The Tcl API uses this interface, stub it off. - */ - return (__db_flags_fn); -} -#endif - -/* - * __db_dump_pp -- - * DB->dump pre/post processing. - * - * PUBLIC: int __db_dump_pp __P((DB *, const char *, - * PUBLIC: int (*)(void *, const void *), void *, int, int)); - */ -int -__db_dump_pp(dbp, subname, callback, handle, pflag, keyflag) - DB *dbp; - const char *subname; - int (*callback) __P((void *, const void *)); - void *handle; - int pflag, keyflag; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->dump"); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 1)) != 0) { - handle_check = 0; - goto err; - } - - ret = __db_dump(dbp, subname, callback, handle, pflag, keyflag); - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_dump -- - * DB->dump. - * - * PUBLIC: int __db_dump __P((DB *, const char *, - * PUBLIC: int (*)(void *, const void *), void *, int, int)); - */ -int -__db_dump(dbp, subname, callback, handle, pflag, keyflag) - DB *dbp; - const char *subname; - int (*callback) __P((void *, const void *)); - void *handle; - int pflag, keyflag; -{ - DB_ENV *dbenv; - DBC *dbcp; - DBT key, data; - DBT keyret, dataret; - db_recno_t recno; - int is_recno, ret, t_ret; - void *pointer; - - dbenv = dbp->dbenv; - - if ((ret = __db_prheader( - dbp, subname, pflag, keyflag, handle, callback, NULL, 0)) != 0) - return (ret); - - /* - * Get a cursor and step through the database, printing out each - * key/data pair. - */ - if ((ret = __db_cursor(dbp, NULL, &dbcp, 0)) != 0) - return (ret); - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - if ((ret = __os_malloc(dbenv, 1024 * 1024, &data.data)) != 0) - goto err; - data.ulen = 1024 * 1024; - data.flags = DB_DBT_USERMEM; - is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE); - keyflag = is_recno ? keyflag : 1; - if (is_recno) { - keyret.data = &recno; - keyret.size = sizeof(recno); - } - -retry: while ((ret = - __db_c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) { - DB_MULTIPLE_INIT(pointer, &data); - for (;;) { - if (is_recno) - DB_MULTIPLE_RECNO_NEXT(pointer, &data, - recno, dataret.data, dataret.size); - else - DB_MULTIPLE_KEY_NEXT(pointer, - &data, keyret.data, - keyret.size, dataret.data, dataret.size); - - if (dataret.data == NULL) - break; - - if ((keyflag && - (ret = __db_prdbt(&keyret, pflag, " ", - handle, callback, is_recno)) != 0) || - (ret = __db_prdbt(&dataret, pflag, " ", - handle, callback, 0)) != 0) - goto err; - } - } - if (ret == DB_BUFFER_SMALL) { - data.size = (u_int32_t)DB_ALIGN(data.size, 1024); - if ((ret = __os_realloc(dbenv, data.size, &data.data)) != 0) - goto err; - data.ulen = data.size; - goto retry; - } - if (ret == DB_NOTFOUND) - ret = 0; - - if ((t_ret = __db_prfooter(handle, callback)) != 0 && ret == 0) - ret = t_ret; - -err: if ((t_ret = __db_c_close(dbcp)) != 0 && ret == 0) - ret = t_ret; - if (data.data != NULL) - __os_free(dbenv, data.data); - - return (ret); -} - -/* - * __db_prdbt -- - * Print out a DBT data element. - * - * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *, - * PUBLIC: int (*)(void *, const void *), int)); - */ -int -__db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno) - DBT *dbtp; - int checkprint; - const char *prefix; - void *handle; - int (*callback) __P((void *, const void *)); - int is_recno; -{ - static const u_char hex[] = "0123456789abcdef"; - db_recno_t recno; - size_t len; - int ret; -#define DBTBUFLEN 100 - u_int8_t *p, *hp; - char buf[DBTBUFLEN], hbuf[DBTBUFLEN]; - - /* - * !!! - * This routine is the routine that dumps out items in the format - * used by db_dump(1) and db_load(1). This means that the format - * cannot change. - */ - if (prefix != NULL && (ret = callback(handle, prefix)) != 0) - return (ret); - if (is_recno) { - /* - * We're printing a record number, and this has to be done - * in a platform-independent way. So we use the numeral in - * straight ASCII. - */ - (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno)); - snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno); - - /* If we're printing data as hex, print keys as hex too. */ - if (!checkprint) { - for (len = strlen(buf), p = (u_int8_t *)buf, - hp = (u_int8_t *)hbuf; len-- > 0; ++p) { - *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4]; - *hp++ = hex[*p & 0x0f]; - } - *hp = '\0'; - ret = callback(handle, hbuf); - } else - ret = callback(handle, buf); - - if (ret != 0) - return (ret); - } else if (checkprint) { - for (len = dbtp->size, p = dbtp->data; len--; ++p) - if (isprint((int)*p)) { - if (*p == '\\' && - (ret = callback(handle, "\\")) != 0) - return (ret); - snprintf(buf, DBTBUFLEN, "%c", *p); - if ((ret = callback(handle, buf)) != 0) - return (ret); - } else { - snprintf(buf, DBTBUFLEN, "\\%c%c", - hex[(u_int8_t)(*p & 0xf0) >> 4], - hex[*p & 0x0f]); - if ((ret = callback(handle, buf)) != 0) - return (ret); - } - } else - for (len = dbtp->size, p = dbtp->data; len--; ++p) { - snprintf(buf, DBTBUFLEN, "%c%c", - hex[(u_int8_t)(*p & 0xf0) >> 4], - hex[*p & 0x0f]); - if ((ret = callback(handle, buf)) != 0) - return (ret); - } - - return (callback(handle, "\n")); -} - -/* - * __db_prheader -- - * Write out header information in the format expected by db_load. - * - * PUBLIC: int __db_prheader __P((DB *, const char *, int, int, void *, - * PUBLIC: int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t)); - */ -int -__db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) - DB *dbp; - const char *subname; - int pflag, keyflag; - void *handle; - int (*callback) __P((void *, const void *)); - VRFY_DBINFO *vdp; - db_pgno_t meta_pgno; -{ - DBT dbt; - DB_ENV *dbenv; - DBTYPE dbtype; - VRFY_PAGEINFO *pip; - u_int32_t flags, tmp_u_int32; - size_t buflen; - char *buf; - int using_vdp, ret, t_ret, tmp_int; - - ret = 0; - buf = NULL; - COMPQUIET(buflen, 0); - - /* - * If dbp is NULL, then pip is guaranteed to be non-NULL; we only ever - * call __db_prheader with a NULL dbp from one case inside __db_prdbt, - * and this is a special subdatabase for "lost" items. In this case - * we have a vdp (from which we'll get a pip). In all other cases, we - * will have a non-NULL dbp (and vdp may or may not be NULL depending - * on whether we're salvaging). - */ - DB_ASSERT(dbp != NULL || vdp != NULL); - - if (dbp == NULL) - dbenv = NULL; - else - dbenv = dbp->dbenv; - - /* - * If we've been passed a verifier statistics object, use that; we're - * being called in a context where dbp->stat is unsafe. - * - * Also, the verifier may set the pflag on a per-salvage basis. If so, - * respect that. - */ - if (vdp != NULL) { - if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0) - return (ret); - - if (F_ISSET(vdp, SALVAGE_PRINTABLE)) - pflag = 1; - using_vdp = 1; - } else { - pip = NULL; - using_vdp = 0; - } - - /* - * If dbp is NULL, make it a btree. Otherwise, set dbtype to whatever - * appropriate type for the specified meta page, or the type of the dbp. - */ - if (dbp == NULL) - dbtype = DB_BTREE; - else if (using_vdp) - switch (pip->type) { - case P_BTREEMETA: - if (F_ISSET(pip, VRFY_IS_RECNO)) - dbtype = DB_RECNO; - else - dbtype = DB_BTREE; - break; - case P_HASHMETA: - dbtype = DB_HASH; - break; - case P_QAMMETA: - dbtype = DB_QUEUE; - break; - default: - /* - * If the meta page is of a bogus type, it's because - * we have a badly corrupt database. (We must be in - * the verifier for pip to be non-NULL.) Pretend we're - * a Btree and salvage what we can. - */ - DB_ASSERT(F_ISSET(dbp, DB_AM_VERIFYING)); - dbtype = DB_BTREE; - break; - } - else - dbtype = dbp->type; - - if ((ret = callback(handle, "VERSION=3\n")) != 0) - goto err; - if (pflag) { - if ((ret = callback(handle, "format=print\n")) != 0) - goto err; - } else if ((ret = callback(handle, "format=bytevalue\n")) != 0) - goto err; - - /* - * 64 bytes is long enough, as a minimum bound, for any of the - * fields besides subname. Subname uses __db_prdbt and therefore - * does not need buffer space here. - */ - buflen = 64; - if ((ret = __os_malloc(dbenv, buflen, &buf)) != 0) - goto err; - if (subname != NULL) { - snprintf(buf, buflen, "database="); - if ((ret = callback(handle, buf)) != 0) - goto err; - memset(&dbt, 0, sizeof(dbt)); - dbt.data = (char *)subname; - dbt.size = (u_int32_t)strlen(subname); - if ((ret = __db_prdbt(&dbt, 1, NULL, handle, callback, 0)) != 0) - goto err; - } - switch (dbtype) { - case DB_BTREE: - if ((ret = callback(handle, "type=btree\n")) != 0) - goto err; - if (using_vdp) - tmp_int = F_ISSET(pip, VRFY_HAS_RECNUMS) ? 1 : 0; - else { - if ((ret = __db_get_flags(dbp, &flags)) != 0) { - __db_err(dbenv, - "DB->get_flags: %s", db_strerror(ret)); - goto err; - } - tmp_int = F_ISSET(dbp, DB_AM_RECNUM) ? 1 : 0; - } - if (tmp_int && (ret = callback(handle, "recnum=1\n")) != 0) - goto err; - - if (using_vdp) - tmp_u_int32 = pip->bt_minkey; - else - if ((ret = - __bam_get_bt_minkey(dbp, &tmp_u_int32)) != 0) { - __db_err(dbenv, - "DB->get_bt_minkey: %s", db_strerror(ret)); - goto err; - } - if (tmp_u_int32 != 0 && tmp_u_int32 != DEFMINKEYPAGE) { - snprintf(buf, buflen, - "bt_minkey=%lu\n", (u_long)tmp_u_int32); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - break; - case DB_HASH: -#ifdef HAVE_HASH - if ((ret = callback(handle, "type=hash\n")) != 0) - goto err; - if (using_vdp) - tmp_u_int32 = pip->h_ffactor; - else - if ((ret = - __ham_get_h_ffactor(dbp, &tmp_u_int32)) != 0) { - __db_err(dbenv, - "DB->get_h_ffactor: %s", db_strerror(ret)); - goto err; - } - if (tmp_u_int32 != 0) { - snprintf(buf, buflen, - "h_ffactor=%lu\n", (u_long)tmp_u_int32); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - - if (using_vdp) - tmp_u_int32 = pip->h_nelem; - else - if ((ret = __ham_get_h_nelem(dbp, &tmp_u_int32)) != 0) { - __db_err(dbenv, - "DB->get_h_nelem: %s", db_strerror(ret)); - goto err; - } - /* - * Hash databases have an h_nelem field of 0 or 1, neither - * of those values is interesting. - */ - if (tmp_u_int32 > 1) { - snprintf(buf, buflen, - "h_nelem=%lu\n", (u_long)tmp_u_int32); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - break; -#else - ret = __db_no_hash_am(dbenv); - goto err; -#endif - case DB_QUEUE: -#ifdef HAVE_QUEUE - if ((ret = callback(handle, "type=queue\n")) != 0) - goto err; - if (using_vdp) - tmp_u_int32 = vdp->re_len; - else - if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) { - __db_err(dbenv, - "DB->get_re_len: %s", db_strerror(ret)); - goto err; - } - snprintf(buf, buflen, "re_len=%lu\n", (u_long)tmp_u_int32); - if ((ret = callback(handle, buf)) != 0) - goto err; - - if (using_vdp) - tmp_int = (int)vdp->re_pad; - else - if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) { - __db_err(dbenv, - "DB->get_re_pad: %s", db_strerror(ret)); - goto err; - } - if (tmp_int != 0 && tmp_int != ' ') { - snprintf(buf, buflen, "re_pad=%#x\n", tmp_int); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - - if (using_vdp) - tmp_u_int32 = vdp->page_ext; - else - if ((ret = - __qam_get_extentsize(dbp, &tmp_u_int32)) != 0) { - __db_err(dbenv, "DB->get_q_extentsize: %s", - db_strerror(ret)); - goto err; - } - if (tmp_u_int32 != 0) { - snprintf(buf, buflen, - "extentsize=%lu\n", (u_long)tmp_u_int32); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - break; -#else - ret = __db_no_queue_am(dbenv); - goto err; -#endif - case DB_RECNO: - if ((ret = callback(handle, "type=recno\n")) != 0) - goto err; - if (using_vdp) - tmp_int = F_ISSET(pip, VRFY_IS_RRECNO) ? 1 : 0; - else - tmp_int = F_ISSET(dbp, DB_AM_RENUMBER) ? 1 : 0; - if (tmp_int != 0 && - (ret = callback(handle, "renumber=1\n")) != 0) - goto err; - - if (using_vdp) - tmp_int = F_ISSET(pip, VRFY_IS_FIXEDLEN) ? 1 : 0; - else - tmp_int = F_ISSET(dbp, DB_AM_FIXEDLEN) ? 1 : 0; - if (tmp_int) { - if (using_vdp) - tmp_u_int32 = pip->re_len; - else - if ((ret = - __ram_get_re_len(dbp, &tmp_u_int32)) != 0) { - __db_err(dbenv, "DB->get_re_len: %s", - db_strerror(ret)); - goto err; - } - snprintf(buf, buflen, - "re_len=%lu\n", (u_long)tmp_u_int32); - if ((ret = callback(handle, buf)) != 0) - goto err; - - if (using_vdp) - tmp_int = (int)pip->re_pad; - else - if ((ret = - __ram_get_re_pad(dbp, &tmp_int)) != 0) { - __db_err(dbenv, "DB->get_re_pad: %s", - db_strerror(ret)); - goto err; - } - if (tmp_int != 0 && tmp_int != ' ') { - snprintf(buf, - buflen, "re_pad=%#x\n", (u_int)tmp_int); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - } - break; - case DB_UNKNOWN: - DB_ASSERT(0); /* Impossible. */ - __db_err(dbenv, - "Unknown or unsupported DB type in __db_prheader"); - ret = EINVAL; - goto err; - } - - if (using_vdp) { - if (F_ISSET(pip, VRFY_HAS_CHKSUM)) - if ((ret = callback(handle, "chksum=1\n")) != 0) - goto err; - if (F_ISSET(pip, VRFY_HAS_DUPS)) - if ((ret = callback(handle, "duplicates=1\n")) != 0) - goto err; - if (F_ISSET(pip, VRFY_HAS_DUPSORT)) - if ((ret = callback(handle, "dupsort=1\n")) != 0) - goto err; - /* - * !!! - * We don't know if the page size was the default if we're - * salvaging. It doesn't seem that interesting to have, so - * we ignore it for now. - */ - } else { - if (F_ISSET(dbp, DB_AM_CHKSUM)) - if ((ret = callback(handle, "chksum=1\n")) != 0) - goto err; - if (F_ISSET(dbp, DB_AM_DUP)) - if ((ret = callback(handle, "duplicates=1\n")) != 0) - goto err; - if (F_ISSET(dbp, DB_AM_DUPSORT)) - if ((ret = callback(handle, "dupsort=1\n")) != 0) - goto err; - if (!F_ISSET(dbp, DB_AM_PGDEF)) { - snprintf(buf, buflen, - "db_pagesize=%lu\n", (u_long)dbp->pgsize); - if ((ret = callback(handle, buf)) != 0) - goto err; - } - } - - if (keyflag && (ret = callback(handle, "keys=1\n")) != 0) - goto err; - - ret = callback(handle, "HEADER=END\n"); - -err: if (using_vdp && - (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - if (buf != NULL) - __os_free(dbenv, buf); - - return (ret); -} - -/* - * __db_prfooter -- - * Print the footer that marks the end of a DB dump. This is trivial, - * but for consistency's sake we don't want to put its literal contents - * in multiple places. - * - * PUBLIC: int __db_prfooter __P((void *, int (*)(void *, const void *))); - */ -int -__db_prfooter(handle, callback) - void *handle; - int (*callback) __P((void *, const void *)); -{ - return (callback(handle, "DATA=END\n")); -} - -/* - * __db_pr_callback -- - * Callback function for using pr_* functions from C. - * - * PUBLIC: int __db_pr_callback __P((void *, const void *)); - */ -int -__db_pr_callback(handle, str_arg) - void *handle; - const void *str_arg; -{ - char *str; - FILE *f; - - str = (char *)str_arg; - f = (FILE *)handle; - - if (fprintf(f, "%s", str) != (int)strlen(str)) - return (EIO); - - return (0); -} - -/* - * __db_dbtype_to_string -- - * Return the name of the database type. - * - * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE)); - */ -const char * -__db_dbtype_to_string(type) - DBTYPE type; -{ - switch (type) { - case DB_BTREE: - return ("btree"); - case DB_HASH: - return ("hash"); - case DB_RECNO: - return ("recno"); - case DB_QUEUE: - return ("queue"); - case DB_UNKNOWN: - default: - break; - } - return ("UNKNOWN TYPE"); -} diff --git a/storage/bdb/db/db_rec.c b/storage/bdb/db/db_rec.c deleted file mode 100644 index e0c13f255c1..00000000000 --- a/storage/bdb/db/db_rec.c +++ /dev/null @@ -1,1266 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_rec.c,v 12.12 2005/10/27 01:03:01 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/hash.h" - -static int __db_pg_free_recover_int __P((DB_ENV *, - __db_pg_freedata_args *, DB *, DB_LSN *, DB_MPOOLFILE *, db_recops, int)); - -/* - * PUBLIC: int __db_addrem_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - * - * This log message is generated whenever we add or remove a duplicate - * to/from a duplicate page. On recover, we just do the opposite. - */ -int -__db_addrem_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_addrem_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - u_int32_t change; - int cmp_n, cmp_p, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__db_addrem_print); - REC_INTRO(__db_addrem_read, 1, 1); - - REC_FGET(mpf, argp->pgno, &pagep, done); - - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn); - change = 0; - if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_DUP) || - (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_DUP)) { - - /* Need to redo an add, or undo a delete. */ - if ((ret = __db_pitem(dbc, pagep, argp->indx, argp->nbytes, - argp->hdr.size == 0 ? NULL : &argp->hdr, - argp->dbt.size == 0 ? NULL : &argp->dbt)) != 0) - goto out; - - change = DB_MPOOL_DIRTY; - - } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_DUP) || - (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_DUP)) { - /* Need to undo an add, or redo a delete. */ - if ((ret = __db_ditem(dbc, - pagep, argp->indx, argp->nbytes)) != 0) - goto out; - change = DB_MPOOL_DIRTY; - } - - if (change) { - if (DB_REDO(op)) - LSN(pagep) = *lsnp; - else - LSN(pagep) = argp->pagelsn; - } - - if ((ret = __memp_fput(mpf, pagep, change)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * PUBLIC: int __db_big_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_big_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_big_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - u_int32_t change; - int cmp_n, cmp_p, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__db_big_print); - REC_INTRO(__db_big_read, 1, 0); - - REC_FGET(mpf, argp->pgno, &pagep, ppage); - - /* - * There are three pages we need to check. The one on which we are - * adding data, the previous one whose next_pointer may have - * been updated, and the next one whose prev_pointer may have - * been updated. - */ - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->pagelsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn); - change = 0; - if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) || - (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) { - /* We are either redo-ing an add, or undoing a delete. */ - P_INIT(pagep, file_dbp->pgsize, argp->pgno, argp->prev_pgno, - argp->next_pgno, 0, P_OVERFLOW); - OV_LEN(pagep) = argp->dbt.size; - OV_REF(pagep) = 1; - memcpy((u_int8_t *)pagep + P_OVERHEAD(file_dbp), argp->dbt.data, - argp->dbt.size); - PREV_PGNO(pagep) = argp->prev_pgno; - change = DB_MPOOL_DIRTY; - } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_BIG) || - (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_BIG)) { - /* - * We are either undo-ing an add or redo-ing a delete. - * The page is about to be reclaimed in either case, so - * there really isn't anything to do here. - */ - change = DB_MPOOL_DIRTY; - } - if (change) - LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn; - - if ((ret = __memp_fput(mpf, pagep, change)) != 0) - goto out; - pagep = NULL; - - /* - * We only delete a whole chain of overflow. - * Each page is handled individually - */ - if (argp->opcode == DB_REM_BIG) - goto done; - - /* Now check the previous page. */ -ppage: if (argp->prev_pgno != PGNO_INVALID) { - change = 0; - REC_FGET(mpf, argp->prev_pgno, &pagep, npage); - - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->prevlsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->prevlsn); - - if (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) { - /* Redo add, undo delete. */ - NEXT_PGNO(pagep) = argp->pgno; - change = DB_MPOOL_DIRTY; - } else if (cmp_n == 0 && - DB_UNDO(op) && argp->opcode == DB_ADD_BIG) { - /* Redo delete, undo add. */ - NEXT_PGNO(pagep) = argp->next_pgno; - change = DB_MPOOL_DIRTY; - } - if (change) - LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn; - if ((ret = __memp_fput(mpf, pagep, change)) != 0) - goto out; - } - pagep = NULL; - - /* Now check the next page. Can only be set on a delete. */ -npage: if (argp->next_pgno != PGNO_INVALID) { - change = 0; - REC_FGET(mpf, argp->next_pgno, &pagep, done); - - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->nextlsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->nextlsn); - if (cmp_p == 0 && DB_REDO(op)) { - PREV_PGNO(pagep) = PGNO_INVALID; - change = DB_MPOOL_DIRTY; - } else if (cmp_n == 0 && DB_UNDO(op)) { - PREV_PGNO(pagep) = argp->pgno; - change = DB_MPOOL_DIRTY; - } - if (change) - LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn; - if ((ret = __memp_fput(mpf, pagep, change)) != 0) - goto out; - } - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __db_ovref_recover -- - * Recovery function for __db_ovref(). - * - * PUBLIC: int __db_ovref_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_ovref_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_ovref_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp, modified, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__db_ovref_print); - REC_INTRO(__db_ovref_read, 1, 0); - - REC_FGET(mpf, argp->pgno, &pagep, done); - - modified = 0; - cmp = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(dbenv, op, cmp, &LSN(pagep), &argp->lsn); - if (cmp == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - OV_REF(pagep) += argp->adjust; - - pagep->lsn = *lsnp; - modified = 1; - } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - OV_REF(pagep) -= argp->adjust; - - pagep->lsn = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __db_debug_recover -- - * Recovery function for debug. - * - * PUBLIC: int __db_debug_recover __P((DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_debug_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_debug_args *argp; - int ret; - - COMPQUIET(dbenv, NULL); - COMPQUIET(op, DB_TXN_ABORT); - COMPQUIET(info, NULL); - - REC_PRINT(__db_debug_print); - REC_NOOP_INTRO(__db_debug_read); - - *lsnp = argp->prev_lsn; - ret = 0; - - REC_NOOP_CLOSE; -} - -/* - * __db_noop_recover -- - * Recovery function for noop. - * - * PUBLIC: int __db_noop_recover __P((DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_noop_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_noop_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - u_int32_t change; - int cmp_n, cmp_p, ret; - - pagep = NULL; - COMPQUIET(info, NULL); - REC_PRINT(__db_noop_print); - REC_INTRO(__db_noop_read, 0, 0); - - REC_FGET(mpf, argp->pgno, &pagep, done); - - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->prevlsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->prevlsn); - change = 0; - if (cmp_p == 0 && DB_REDO(op)) { - LSN(pagep) = *lsnp; - change = DB_MPOOL_DIRTY; - } else if (cmp_n == 0 && DB_UNDO(op)) { - LSN(pagep) = argp->prevlsn; - change = DB_MPOOL_DIRTY; - } - ret = __memp_fput(mpf, pagep, change); - pagep = NULL; - -done: *lsnp = argp->prev_lsn; -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - REC_CLOSE; -} - -/* - * __db_pg_alloc_recover -- - * Recovery function for pg_alloc. - * - * PUBLIC: int __db_pg_alloc_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_pg_alloc_args *argp; - DB *file_dbp; - DBC *dbc; - DBMETA *meta; - DB_MPOOLFILE *mpf; - PAGE *pagep; - db_pgno_t pgno; - int cmp_n, cmp_p, created, level, meta_modified, modified, ret; - - meta = NULL; - pagep = NULL; - created = meta_modified = modified = 0; - REC_PRINT(__db_pg_alloc_print); - REC_INTRO(__db_pg_alloc_read, 0, 0); - - /* - * Fix up the metadata page. If we're redoing the operation, we have - * to get the metadata page and update its LSN and its free pointer. - * If we're undoing the operation and the page was ever created, we put - * it on the freelist. - */ - pgno = PGNO_BASE_MD; - if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) { - /* The metadata page must always exist on redo. */ - if (DB_REDO(op)) { - ret = __db_pgerr(file_dbp, pgno, ret); - goto out; - } else - goto done; - } - cmp_n = log_compare(lsnp, &LSN(meta)); - cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(meta), &argp->meta_lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - LSN(meta) = *lsnp; - meta->free = argp->next; - meta_modified = 1; - if (argp->pgno > meta->last_pgno) - meta->last_pgno = argp->pgno; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo update described. */ - LSN(meta) = argp->meta_lsn; - /* - * If the page has a zero LSN then its newly created - * and will be truncated or go into limbo rather than - * directly on the free list. - */ - if (!IS_ZERO_LSN(argp->page_lsn)) - meta->free = argp->pgno; -#ifdef HAVE_FTRUNCATE - /* - * With truncate we will restore the file to - * its original length. Without truncate - * the last_pgno never goes backward. - */ - meta->last_pgno = argp->last_pgno; -#endif - meta_modified = 1; - } - -#ifdef HAVE_FTRUNCATE - /* - * Check to see if we are keeping a sorted - * freelist, if so put this back in the in - * memory list. It must be the first element. - */ - if (op == DB_TXN_ABORT && !IS_ZERO_LSN(argp->page_lsn)) { - db_pgno_t *list; - u_int32_t nelem; - - if ((ret = __memp_get_freelist(mpf, &nelem, &list)) != 0) - goto out; - if (list != NULL) { - if ((ret = - __memp_extend_freelist(mpf, nelem + 1, &list)) != 0) - goto out; - if (nelem != 0) - memmove(list + 1, list, nelem * sizeof(list)); - *list = argp->pgno; - } - } -#endif - - /* - * Fix up the allocated page. If the page does not exist - * and we can truncate it then don't create it. - * Otherwise if we're redoing the operation, we have - * to get the page (creating it if it doesn't exist), and update its - * LSN. If we're undoing the operation, we have to reset the page's - * LSN and put it on the free list, or into limbo.. - */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - /* - * We have to be able to identify if a page was newly - * created so we can recover it properly. We cannot simply - * look for an empty header, because hash uses a pgin - * function that will set the header. Instead, we explicitly - * try for the page without CREATE and if that fails, then - * create it. - */ -#ifdef HAVE_FTRUNCATE - if (DB_UNDO(op)) - goto do_truncate; -#endif - if ((ret = __memp_fget( - mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) { - if (DB_UNDO(op) && ret == ENOSPC) - goto do_truncate; - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } - created = modified = 1; - } - - /* Fix up the allocated page. */ - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), &argp->page_lsn); - - /* - * If an initial allocation is aborted and then reallocated during - * an archival restore the log record will have an LSN for the page - * but the page will be empty. - * If we we rolled back this allocation previously during an - * archive restore, the page may have INIT_LSN from the limbo list. - */ - if (IS_ZERO_LSN(LSN(pagep)) || - (IS_ZERO_LSN(argp->page_lsn) && IS_INIT_LSN(LSN(pagep)))) - cmp_p = 0; - - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->page_lsn); - /* - * Another special case we have to handle is if we ended up with a - * page of all 0's which can happen if we abort between allocating a - * page in mpool and initializing it. In that case, even if we're - * undoing, we need to re-initialize the page. - */ - if (DB_REDO(op) && cmp_p == 0) { - /* Need to redo update described. */ - switch (argp->ptype) { - case P_LBTREE: - case P_LRECNO: - case P_LDUP: - level = LEAFLEVEL; - break; - default: - level = 0; - break; - } - P_INIT(pagep, file_dbp->pgsize, - argp->pgno, PGNO_INVALID, PGNO_INVALID, level, argp->ptype); - - pagep->lsn = *lsnp; - modified = 1; - } else if (DB_UNDO(op) && (cmp_n == 0 || created)) { - /* - * This is where we handle the case of a 0'd page (pagep->pgno - * is equal to PGNO_INVALID). - * Undo the allocation, reinitialize the page and - * link its next pointer to the free list. - */ - P_INIT(pagep, file_dbp->pgsize, - argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID); - - pagep->lsn = argp->page_lsn; - modified = 1; - } - -do_truncate: - /* - * If the page was newly created, give it back, if - * possible. Otherwise put it into limbo. - */ - if ((pagep == NULL || IS_ZERO_LSN(LSN(pagep))) && - IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) { -#ifdef HAVE_FTRUNCATE - COMPQUIET(info, NULL); - /* Discard the page. */ - if (pagep != NULL) { - if ((ret = - __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) - goto out; - pagep = NULL; - /* Give the page back to the OS. */ - if (meta->last_pgno <= argp->pgno && - (ret = __memp_ftruncate(mpf, argp->pgno, 0)) != 0) - goto out; - } -#else - /* Put the page in limbo.*/ - if ((ret = __db_add_limbo(dbenv, - info, argp->fileid, argp->pgno, 1)) != 0) - goto out; - /* The last_pgno grows if this was a new page. */ - if (argp->pgno > meta->last_pgno) { - meta->last_pgno = argp->pgno; - meta_modified = 1; - } -#endif - } - - if (pagep != NULL && - (ret = __memp_fput(mpf, - pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - - if ((ret = __memp_fput(mpf, - meta, meta_modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - meta = NULL; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - if (meta != NULL) - (void)__memp_fput(mpf, meta, 0); - if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC) - ret = 0; - REC_CLOSE; -} - -/* - * __db_pg_free_recover_int -- - */ -static int -__db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data) - DB_ENV *dbenv; - __db_pg_freedata_args *argp; - DB *file_dbp; - DB_LSN *lsnp; - DB_MPOOLFILE *mpf; - db_recops op; - int data; -{ - DBMETA *meta; - DB_LSN copy_lsn; - PAGE *pagep, *prevp; - int cmp_n, cmp_p, is_meta, meta_modified, modified, ret; - - meta = NULL; - pagep = NULL; - prevp = NULL; - meta_modified = modified = 0; - - /* - * Get the "metapage". This will either be the metapage - * or the previous page in the free list if we are doing - * sorted allocations. If its a previous page then - * we will not be truncating. - */ - is_meta = argp->meta_pgno == PGNO_BASE_MD; - - REC_FGET(mpf, argp->meta_pgno, &meta, check_meta); - - if (argp->meta_pgno != PGNO_BASE_MD) - prevp = (PAGE *)meta; - - cmp_n = log_compare(lsnp, &LSN(meta)); - cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(meta), &argp->meta_lsn); - - /* - * Fix up the metadata page. If we're redoing or undoing the operation - * we get the page and update its LSN, last and free pointer. - */ - if (cmp_p == 0 && DB_REDO(op)) { -#ifdef HAVE_FTRUNCATE - /* - * If we are at the end of the file truncate, otherwise - * put on the free list. - */ - if (argp->pgno == argp->last_pgno) - meta->last_pgno = argp->pgno - 1; - else if (prevp == NULL) - meta->free = argp->pgno; - else - NEXT_PGNO(prevp) = argp->pgno; -#else - /* Need to redo the deallocation. */ - if (prevp == NULL) - meta->free = argp->pgno; - else - NEXT_PGNO(prevp) = argp->pgno; - /* - * If this was a compensating transaction and - * we are a replica, then we never executed the - * original allocation which incremented meta->free. - */ - if (prevp == NULL && meta->last_pgno < meta->free) - meta->last_pgno = meta->free; -#endif - LSN(meta) = *lsnp; - meta_modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to undo the deallocation. */ - if (prevp == NULL) - meta->free = argp->next; - else - NEXT_PGNO(prevp) = argp->next; - LSN(meta) = argp->meta_lsn; - if (prevp == NULL && meta->last_pgno < argp->pgno) - meta->last_pgno = argp->pgno; - meta_modified = 1; - } - -check_meta: - if (ret != 0 && is_meta) { - /* The metadata page must always exist. */ - ret = __db_pgerr(file_dbp, argp->meta_pgno, ret); - goto out; - } - - /* - * Get the freed page. If we support truncate then don't - * create the page if we are going to free it. If we're - * redoing the operation we get the page and explicitly discard - * its contents, then update its LSN. If we're undoing the - * operation, we get the page and restore its header. - * If we don't support truncate, then we must create the page - * and roll it back. - */ -#ifdef HAVE_FTRUNCATE - if (DB_REDO(op) || (is_meta && meta->last_pgno < argp->pgno)) { - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret == DB_PAGE_NOTFOUND) - goto done; - goto out; - } - } else -#endif - if ((ret = - __memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - - (void)__ua_memcpy(©_lsn, &LSN(argp->header.data), sizeof(DB_LSN)); - cmp_n = IS_ZERO_LSN(LSN(pagep)) ? 0 : log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), ©_lsn); - -#ifdef HAVE_FTRUNCATE - /* - * This page got extended by a later allocation, - * but its allocation was not in the scope of this - * recovery pass. - */ - if (IS_ZERO_LSN(LSN(pagep))) - cmp_p = 0; -#endif - - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), ©_lsn); - if (DB_REDO(op) && - (cmp_p == 0 || - (IS_ZERO_LSN(copy_lsn) && - log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) { - /* Need to redo the deallocation. */ -#ifdef HAVE_FTRUNCATE - /* - * The page can be truncated if it was truncated at runtime - * and the current metapage reflects the truncation. - */ - if (is_meta && meta->last_pgno <= argp->pgno && - argp->last_pgno <= argp->pgno) { - if ((ret = - __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0) - goto out; - pagep = NULL; - if ((ret = __memp_ftruncate(mpf, argp->pgno, 0)) != 0) - goto out; - } else if (argp->last_pgno == argp->pgno) { - /* The page was truncated at runtime, zero it out. */ - P_INIT(pagep, 0, PGNO_INVALID, - PGNO_INVALID, PGNO_INVALID, 0, P_INVALID); - ZERO_LSN(pagep->lsn); - modified = 1; - } else -#endif - { - P_INIT(pagep, file_dbp->pgsize, - argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID); - pagep->lsn = *lsnp; - - modified = 1; - } - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Need to reallocate the page. */ - memcpy(pagep, argp->header.data, argp->header.size); - if (data) - memcpy((u_int8_t*)pagep + HOFFSET(pagep), - argp->data.data, argp->data.size); - - modified = 1; - } - if (pagep != NULL && - (ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - - pagep = NULL; -#ifdef HAVE_FTRUNCATE - /* - * If we are keeping an in memory free list remove this - * element from the list. - */ - if (op == DB_TXN_ABORT && argp->pgno != argp->last_pgno) { - db_pgno_t *lp; - u_int32_t nelem, pos; - - if ((ret = __memp_get_freelist(mpf, &nelem, &lp)) != 0) - goto out; - if (lp != NULL) { - pos = 0; - if (!is_meta && nelem != 0) { - __db_freelist_pos(argp->pgno, lp, nelem, &pos); - - DB_ASSERT(argp->pgno == lp[pos]); - DB_ASSERT(argp->meta_pgno == lp[pos - 1]); - } - - if (nelem != 0 && pos != nelem) - memmove(&lp[pos], &lp[pos + 1], - (nelem - pos) * sizeof(*lp)); - - /* Shrink the list */ - if ((ret = - __memp_extend_freelist(mpf, nelem - 1, &lp)) != 0) - goto out; - } - } -done: -#endif - if (meta != NULL && (ret = __memp_fput(mpf, - meta, meta_modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - meta = NULL; - - ret = 0; - -out: if (pagep != NULL) - (void)__memp_fput(mpf, pagep, 0); - if (meta != NULL) - (void)__memp_fput(mpf, meta, 0); - - return (ret); -} - -/* - * __db_pg_free_recover -- - * Recovery function for pg_free. - * - * PUBLIC: int __db_pg_free_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_free_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - __db_pg_free_args *argp; - int ret; - - COMPQUIET(info, NULL); - REC_PRINT(__db_pg_free_print); - REC_INTRO(__db_pg_free_read, 1, 0); - - ret = __db_pg_free_recover_int(dbenv, - (__db_pg_freedata_args *)argp, file_dbp, lsnp, mpf, op, 0); - -done: *lsnp = argp->prev_lsn; -out: - REC_CLOSE; -} - -/* - * __db_pg_new_recover -- - * A new page from the file was put on the free list. - * This record is only generated during a LIMBO_COMPENSATE. - * - * PUBLIC: int __db_pg_new_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_new_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ -#ifndef HAVE_FTRUNCATE - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - __db_pg_free_args *argp; - int ret; - - REC_PRINT(__db_pg_free_print); - REC_INTRO(__db_pg_free_read, 1, 0); - COMPQUIET(op, DB_TXN_ABORT); - - if ((ret = - __db_add_limbo(dbenv, info, argp->fileid, argp->pgno, 1)) == 0) - *lsnp = argp->prev_lsn; - -done: -out: - REC_CLOSE; -#else - COMPQUIET(dbenv, NULL); - COMPQUIET(dbtp, NULL); - COMPQUIET(lsnp, NULL); - COMPQUIET(op, DB_TXN_PRINT); - COMPQUIET(info, NULL); - return (0); -#endif -} - -/* - * __db_pg_freedata_recover -- - * Recovery function for pg_freedata. - * - * PUBLIC: int __db_pg_freedata_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_freedata_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - __db_pg_freedata_args *argp; - int ret; - - COMPQUIET(info, NULL); - REC_PRINT(__db_pg_freedata_print); - REC_INTRO(__db_pg_freedata_read, 1, 0); - - ret = __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, 1); - -done: *lsnp = argp->prev_lsn; -out: - REC_CLOSE; -} - -/* - * __db_cksum_recover -- - * Recovery function for checksum failure log record. - * - * PUBLIC: int __db_cksum_recover __P((DB_ENV *, - * PUBLIC: DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_cksum_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_cksum_args *argp; - - int ret; - - COMPQUIET(info, NULL); - COMPQUIET(lsnp, NULL); - COMPQUIET(op, DB_TXN_ABORT); - - REC_PRINT(__db_cksum_print); - - if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0) - return (ret); - - /* - * We had a checksum failure -- the only option is to run catastrophic - * recovery. - */ - if (F_ISSET(dbenv, DB_ENV_FATAL)) - ret = 0; - else { - __db_err(dbenv, - "Checksum failure requires catastrophic recovery"); - ret = __db_panic(dbenv, DB_RUNRECOVERY); - } - - __os_free(dbenv, argp); - return (ret); -} - -/* - * __db_pg_prepare_recover -- - * Recovery function for pg_prepare. - * - * PUBLIC: int __db_pg_prepare_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_prepare_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ -#ifndef HAVE_FTRUNCATE - __db_pg_prepare_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int ret, t_ret; - - REC_PRINT(__db_pg_prepare_print); - REC_INTRO(__db_pg_prepare_read, 1, 0); - - mpf = file_dbp->mpf; - - /* - * If this made it into the limbo list at prepare time then - * it was a new free page allocated by an aborted subtransaction. - * Only that subtransaction could have toched the page. - * All other pages in the free list at this point are - * either of the same nature or were put there by this subtransactions - * other subtransactions that followed this one. If - * they were put there by this subtransaction the log records - * of the following allocations will reflect that. - * Note that only one transaction could have had the - * metapage locked at the point of the crash. - * All this is to say that we can P_INIT this page without - * loosing other pages on the free list because they - * will be linked in by records earlier in the log for - * this transaction which we will roll back. - */ - if (op == DB_TXN_ABORT) { - if ((ret = __memp_fget( - mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - P_INIT(pagep, file_dbp->pgsize, - argp->pgno, PGNO_INVALID, PGNO_INVALID, 0, P_INVALID); - ZERO_LSN(pagep->lsn); - ret = __db_add_limbo(dbenv, info, argp->fileid, argp->pgno, 1); - if ((t_ret = - __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0) - ret = t_ret; - } - -done: if (ret == 0) - *lsnp = argp->prev_lsn; -out: REC_CLOSE; -#else - COMPQUIET(dbenv, NULL); - COMPQUIET(dbtp, NULL); - COMPQUIET(lsnp, NULL); - COMPQUIET(op, DB_TXN_PRINT); - COMPQUIET(info, NULL); - return (0); -#endif - -} - -/* - * __db_pg_init_recover -- - * Recovery function to reinit pages for truncate. - * - * PUBLIC: int __db_pg_init_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_init_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - __db_pg_init_args *argp; - DB *file_dbp; - DBC *dbc; - DB_LSN copy_lsn; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret, type; - - COMPQUIET(info, NULL); - REC_PRINT(__db_pg_init_print); - REC_INTRO(__db_pg_init_read, 1, 0); - - mpf = file_dbp->mpf; - REC_FGET(mpf, argp->pgno, &pagep, done); - - modified = 0; - (void)__ua_memcpy(©_lsn, &LSN(argp->header.data), sizeof(DB_LSN)); - cmp_n = log_compare(lsnp, &LSN(pagep)); - cmp_p = log_compare(&LSN(pagep), ©_lsn); - CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), ©_lsn); - - if (cmp_p == 0 && DB_REDO(op)) { - if (TYPE(pagep) == P_HASH) - type = P_HASH; - else - type = file_dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE; - P_INIT(pagep, file_dbp->pgsize, PGNO(pagep), PGNO_INVALID, - PGNO_INVALID, TYPE(pagep) == P_HASH ? 0 : 1, type); - pagep->lsn = *lsnp; - modified = 1; - } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Put the data back on the page. */ - memcpy(pagep, argp->header.data, argp->header.size); - if (argp->data.size > 0) - memcpy((u_int8_t*)pagep + HOFFSET(pagep), - argp->data.data, argp->data.size); - - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - -done: *lsnp = argp->prev_lsn; -out: - REC_CLOSE; -} - -/* - * __db_pg_sort_recover -- - * Recovery function for pg_sort. - * - * PUBLIC: int __db_pg_sort_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__db_pg_sort_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ -#ifdef HAVE_FTRUNCATE - __db_pg_sort_args *argp; - DB *file_dbp; - DBC *dbc; - DBMETA *meta; - DB_MPOOLFILE *mpf; - PAGE *pagep; - db_pgno_t pgno, *list; - u_int32_t felem, nelem; - struct pglist *pglist, *lp; - int modified, ret; - - COMPQUIET(info, NULL); - - REC_PRINT(__db_pg_sort_print); - REC_INTRO(__db_pg_sort_read, 1, 1); - - modified = 0; - - pglist = (struct pglist *) argp->list.data; - nelem = argp->list.size / sizeof(struct pglist); - if (DB_REDO(op)) { - pgno = argp->last_pgno; - if ((ret = __db_pg_truncate(mpf, - pglist, NULL, &nelem, &pgno, lsnp, 1)) != 0) - goto out; - - if (argp->last_free != PGNO_INVALID) { - if ((ret = __memp_fget(mpf, - &argp->last_free, 0, &meta)) == 0) { - if (log_compare(&LSN(meta), - &argp->last_lsn) == 0) { - NEXT_PGNO(meta) = PGNO_INVALID; - LSN(meta) = *lsnp; - modified = 1; - } - if ((ret = __memp_fput(mpf, - meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - meta = NULL; - modified = 0; - } else if (ret != DB_PAGE_NOTFOUND) - goto out; - } - if ((ret = __memp_fget(mpf, &argp->meta, 0, &meta)) != 0) - goto out; - if (log_compare(&LSN(meta), &argp->meta_lsn) == 0) { - if (argp->last_free == PGNO_INVALID) { - if (nelem == 0) - meta->free = PGNO_INVALID; - else - meta->free = pglist->pgno; - } - meta->last_pgno = pgno; - LSN(meta) = *lsnp; - modified = 1; - } - } else { - /* Put the free list back in its original order. */ - for (lp = pglist; lp < &pglist[nelem]; lp++) { - if ((ret = __memp_fget(mpf, - &lp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - if (IS_ZERO_LSN(LSN(pagep)) || - log_compare(&LSN(pagep), lsnp) == 0) { - if (lp == &pglist[nelem - 1]) - pgno = PGNO_INVALID; - else - pgno = lp[1].pgno; - - P_INIT(pagep, file_dbp->pgsize, - lp->pgno, PGNO_INVALID, pgno, 0, P_INVALID); - LSN(pagep) = lp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, - pagep, modified ? DB_MPOOL_DIRTY: 0)) != 0) - goto out; - } - if (argp->last_free != PGNO_INVALID) { - if ((ret = __memp_fget(mpf, - &argp->last_free, 0, &meta)) == 0) { - if (log_compare(&LSN(meta), lsnp) == 0) { - NEXT_PGNO(meta) = pglist->pgno; - LSN(meta) = argp->last_lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, - meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - } else if (ret != DB_PAGE_NOTFOUND) - goto out; - modified = 0; - meta = NULL; - } - if ((ret = __memp_fget(mpf, &argp->meta, 0, &meta)) != 0) - goto out; - if (log_compare(&LSN(meta), lsnp) == 0) { - meta->last_pgno = argp->last_pgno; - if (argp->last_pgno == PGNO_INVALID) - meta->free = pglist->pgno; - LSN(meta) = argp->meta_lsn; - modified = 1; - } - } - if (op == DB_TXN_ABORT) { - if ((ret = __memp_get_freelist(mpf, &felem, &list)) != 0) - goto out; - if (list != NULL) { - DB_ASSERT(felem == 0 || - argp->last_free == list[felem - 1]); - if ((ret = __memp_extend_freelist( - mpf, felem + nelem, &list)) != 0) - goto out; - for (lp = pglist; lp < &pglist[nelem]; lp++) - list[felem++] = lp->pgno; - } - } - - if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - -done: *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -#else - /* - * If HAVE_FTRUNCATE is not defined, we'll never see pg_sort records - * to recover. - */ - COMPQUIET(dbenv, NULL); - COMPQUIET(dbtp, NULL); - COMPQUIET(lsnp, NULL); - COMPQUIET(op, DB_TXN_ABORT); - COMPQUIET(info, NULL); - return (EINVAL); -#endif -} diff --git a/storage/bdb/db/db_reclaim.c b/storage/bdb/db/db_reclaim.c deleted file mode 100644 index ed68bc6eae7..00000000000 --- a/storage/bdb/db/db_reclaim.c +++ /dev/null @@ -1,239 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_reclaim.c,v 12.2 2005/06/16 20:21:14 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/mp.h" - -/* - * __db_traverse_big - * Traverse a chain of overflow pages and call the callback routine - * on each one. The calling convention for the callback is: - * callback(dbp, page, cookie, did_put), - * where did_put is a return value indicating if the page in question has - * already been returned to the mpool. - * - * PUBLIC: int __db_traverse_big __P((DB *, - * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *)); - */ -int -__db_traverse_big(dbp, pgno, callback, cookie) - DB *dbp; - db_pgno_t pgno; - int (*callback) __P((DB *, PAGE *, void *, int *)); - void *cookie; -{ - DB_MPOOLFILE *mpf; - PAGE *p; - int did_put, ret; - - mpf = dbp->mpf; - - do { - did_put = 0; - if ((ret = __memp_fget(mpf, &pgno, 0, &p)) != 0) - return (ret); - /* - * If we are freeing pages only process the overflow - * chain if the head of the chain has a refcount of 1. - */ - pgno = NEXT_PGNO(p); - if (callback == __db_truncate_callback && OV_REF(p) != 1) - pgno = PGNO_INVALID; - if ((ret = callback(dbp, p, cookie, &did_put)) == 0 && - !did_put) - ret = __memp_fput(mpf, p, 0); - } while (ret == 0 && pgno != PGNO_INVALID); - - return (ret); -} - -/* - * __db_reclaim_callback - * This is the callback routine used during a delete of a subdatabase. - * we are traversing a btree or hash table and trying to free all the - * pages. Since they share common code for duplicates and overflow - * items, we traverse them identically and use this routine to do the - * actual free. The reason that this is callback is because hash uses - * the same traversal code for statistics gathering. - * - * PUBLIC: int __db_reclaim_callback __P((DB *, PAGE *, void *, int *)); - */ -int -__db_reclaim_callback(dbp, p, cookie, putp) - DB *dbp; - PAGE *p; - void *cookie; - int *putp; -{ - int ret; - - /* - * We don't want to log the free of the root with the subdb. - * If we abort then the subdb may not be openable to undo - * the free. - */ - - if ((dbp->type == DB_BTREE || dbp->type == DB_RECNO) && - PGNO(p) == ((BTREE *)dbp->bt_internal)->bt_root) - return (0); - if ((ret = __db_free(cookie, p)) != 0) - return (ret); - *putp = 1; - - return (0); -} - -/* - * __db_truncate_callback - * This is the callback routine used during a truncate. - * we are traversing a btree or hash table and trying to free all the - * pages. - * - * PUBLIC: int __db_truncate_callback __P((DB *, PAGE *, void *, int *)); - */ -int -__db_truncate_callback(dbp, p, cookie, putp) - DB *dbp; - PAGE *p; - void *cookie; - int *putp; -{ - DB_MPOOLFILE *mpf; - DBT ddbt, ldbt; - db_indx_t indx, len, off, tlen, top; - db_trunc_param *param; - u_int8_t *hk, type; - int ret; - - top = NUM_ENT(p); - mpf = dbp->mpf; - param = cookie; - *putp = 1; - - switch (TYPE(p)) { - case P_LBTREE: - /* Skip for off-page duplicates and deleted items. */ - for (indx = 0; indx < top; indx += P_INDX) { - type = GET_BKEYDATA(dbp, p, indx + O_INDX)->type; - if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE) - ++param->count; - } - /* FALLTHROUGH */ - case P_IBTREE: - case P_IRECNO: - case P_INVALID: - if (dbp->type != DB_HASH && - ((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) { - type = dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE; - goto reinit; - } - break; - case P_OVERFLOW: - if (DBC_LOGGING(param->dbc)) { - if ((ret = __db_ovref_log(dbp, param->dbc->txn, - &LSN(p), 0, p->pgno, -1, &LSN(p))) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(p)); - if (--OV_REF(p) != 0) - *putp = 0; - break; - case P_LRECNO: - for (indx = 0; indx < top; indx += O_INDX) { - type = GET_BKEYDATA(dbp, p, indx)->type; - if (!B_DISSET(type)) - ++param->count; - } - - if (((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) { - type = P_LRECNO; - goto reinit; - } - break; - case P_LDUP: - /* Correct for deleted items. */ - for (indx = 0; indx < top; indx += O_INDX) - if (!B_DISSET(GET_BKEYDATA(dbp, p, indx)->type)) - ++param->count; - - break; - case P_HASH: - /* Correct for on-page duplicates and deleted items. */ - for (indx = 0; indx < top; indx += P_INDX) { - switch (*H_PAIRDATA(dbp, p, indx)) { - case H_OFFDUP: - break; - case H_OFFPAGE: - case H_KEYDATA: - ++param->count; - break; - case H_DUPLICATE: - tlen = LEN_HDATA(dbp, p, 0, indx); - hk = H_PAIRDATA(dbp, p, indx); - for (off = 0; off < tlen; - off += len + 2 * sizeof(db_indx_t)) { - ++param->count; - memcpy(&len, - HKEYDATA_DATA(hk) - + off, sizeof(db_indx_t)); - } - break; - default: - return (__db_pgfmt(dbp->dbenv, p->pgno)); - } - } - /* Don't free the head of the bucket. */ - if (PREV_PGNO(p) == PGNO_INVALID) { - type = P_HASH; - -reinit: *putp = 0; - if (DBC_LOGGING(param->dbc)) { - memset(&ldbt, 0, sizeof(ldbt)); - memset(&ddbt, 0, sizeof(ddbt)); - ldbt.data = p; - ldbt.size = P_OVERHEAD(dbp); - ldbt.size += p->entries * sizeof(db_indx_t); - ddbt.data = (u_int8_t *)p + HOFFSET(p); - ddbt.size = dbp->pgsize - HOFFSET(p); - if ((ret = __db_pg_init_log(dbp, - param->dbc->txn, &LSN(p), 0, - p->pgno, &ldbt, &ddbt)) != 0) - return (ret); - } else - LSN_NOT_LOGGED(LSN(p)); - - P_INIT(p, dbp->pgsize, PGNO(p), PGNO_INVALID, - PGNO_INVALID, type == P_HASH ? 0 : 1, type); - } - break; - default: - return (__db_pgfmt(dbp->dbenv, p->pgno)); - } - - if (*putp == 1) { - if ((ret = __db_free(param->dbc, p)) != 0) - return (ret); - } else { - if ((ret = __memp_fput(mpf, p, DB_MPOOL_DIRTY)) != 0) - return (ret); - *putp = 1; - } - - return (0); -} diff --git a/storage/bdb/db/db_remove.c b/storage/bdb/db/db_remove.c deleted file mode 100644 index c37c1876dd7..00000000000 --- a/storage/bdb/db/db_remove.c +++ /dev/null @@ -1,490 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_remove.c,v 12.16 2005/10/27 01:25:53 mjc Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/fop.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -static int __db_dbtxn_remove __P((DB *, DB_TXN *, const char *, const char *)); -static int __db_subdb_remove __P((DB *, DB_TXN *, const char *, const char *)); - -/* - * __env_dbremove_pp - * DB_ENV->dbremove pre/post processing. - * - * PUBLIC: int __env_dbremove_pp __P((DB_ENV *, - * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t)); - */ -int -__env_dbremove_pp(dbenv, txn, name, subdb, flags) - DB_ENV *dbenv; - DB_TXN *txn; - const char *name, *subdb; - u_int32_t flags; -{ - DB *dbp; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret, txn_local; - - dbp = NULL; - txn_local = 0; - - PANIC_CHECK(dbenv); - ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbremove"); - - /* - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if ((ret = __db_fchk(dbenv, "DB->remove", flags, DB_AUTO_COMMIT)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0) { - handle_check = 0; - goto err; - } - - /* - * Create local transaction as necessary, check for consistent - * transaction usage. - */ - if (IS_ENV_AUTO_COMMIT(dbenv, txn, flags)) { - if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0) - goto err; - txn_local = 1; - } else - if (txn != NULL && !TXN_ON(dbenv)) { - ret = __db_not_txn_env(dbenv); - goto err; - } - LF_CLR(DB_AUTO_COMMIT); - - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - goto err; - - ret = __db_remove_int(dbp, txn, name, subdb, flags); - - if (txn_local) { - /* - * We created the DBP here and when we commit/abort, we'll - * release all the transactional locks, including the handle - * lock; mark the handle cleared explicitly. - */ - LOCK_INIT(dbp->handle_lock); - dbp->lid = DB_LOCK_INVALIDID; - } else if (txn != NULL) { - /* - * We created this handle locally so we need to close it - * and clean it up. Unfortunately, it's holding transactional - * locks that need to persist until the end of transaction. - * If we invalidate the locker id (dbp->lid), then the close - * won't free these locks prematurely. - */ - dbp->lid = DB_LOCK_INVALIDID; - } - -err: if (txn_local && (t_ret = - __db_txn_auto_resolve(dbenv, txn, 0, ret)) != 0 && ret == 0) - ret = t_ret; - - /* - * We never opened this dbp for real, so don't include a transaction - * handle, and use NOSYNC to avoid calling into mpool. - * - * !!! - * Note we're reversing the order of operations: we started the txn and - * then opened the DB handle; we're resolving the txn and then closing - * closing the DB handle -- it's safer. - */ - if (dbp != NULL && - (t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_remove_pp - * DB->remove pre/post processing. - * - * PUBLIC: int __db_remove_pp - * PUBLIC: __P((DB *, const char *, const char *, u_int32_t)); - */ -int -__db_remove_pp(dbp, name, subdb, flags) - DB *dbp; - const char *name, *subdb; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - - /* - * Validate arguments, continuing to destroy the handle on failure. - * - * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns. - * - * !!! - * We have a serious problem if we're here with a handle used to open - * a database -- we'll destroy the handle, and the application won't - * ever be able to close the database. - */ - if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) { - ret = __db_mi_open(dbenv, "DB->remove", 1); - return (ret); - } - - /* Validate arguments. */ - if ((ret = __db_fchk(dbenv, "DB->remove", flags, 0)) != 0) - return (ret); - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0) { - handle_check = 0; - goto err; - } - - /* Remove the file. */ - ret = __db_remove(dbp, NULL, name, subdb, flags); - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_remove - * DB->remove method. - * - * PUBLIC: int __db_remove - * PUBLIC: __P((DB *, DB_TXN *, const char *, const char *, u_int32_t)); - */ -int -__db_remove(dbp, txn, name, subdb, flags) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb; - u_int32_t flags; -{ - int ret, t_ret; - - ret = __db_remove_int(dbp, txn, name, subdb, flags); - - if ((t_ret = __db_close(dbp, txn, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_remove_int - * Worker function for the DB->remove method. - * - * PUBLIC: int __db_remove_int __P((DB *, - * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t)); - */ -int -__db_remove_int(dbp, txn, name, subdb, flags) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb; - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret; - char *real_name, *tmpname; - - dbenv = dbp->dbenv; - real_name = tmpname = NULL; - - if (name == NULL && subdb == NULL) { - __db_err(dbenv, "Remove on temporary files invalid"); - ret = EINVAL; - goto err; - } - - if (name == NULL) { - MAKE_INMEM(dbp); - real_name = (char *)subdb; - } else if (subdb != NULL) { - ret = __db_subdb_remove(dbp, txn, name, subdb); - goto err; - } - - /* Handle transactional file removes separately. */ - if (txn != NULL) { - ret = __db_dbtxn_remove(dbp, txn, name, subdb); - goto err; - } - - /* - * The remaining case is a non-transactional file remove. - * - * Find the real name of the file. - */ - if (!F_ISSET(dbp, DB_AM_INMEM) && (ret = - __db_appname(dbenv, DB_APP_DATA, name, 0, NULL, &real_name)) != 0) - goto err; - - /* - * If this is a file and force is set, remove the temporary file, which - * may have been left around. Ignore errors because the temporary file - * might not exist. - */ - if (!F_ISSET(dbp, DB_AM_INMEM) && LF_ISSET(DB_FORCE) && - (ret = __db_backup_name(dbenv, real_name, NULL, &tmpname)) == 0) - (void)__os_unlink(dbenv, tmpname); - - if ((ret = __fop_remove_setup(dbp, NULL, real_name, 0)) != 0) - goto err; - - if (dbp->db_am_remove != NULL && - (ret = dbp->db_am_remove(dbp, NULL, name, subdb)) != 0) - goto err; - - ret = F_ISSET(dbp, DB_AM_INMEM) ? - __db_inmem_remove(dbp, NULL, real_name) : - __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0); - -err: if (!F_ISSET(dbp, DB_AM_INMEM) && real_name != NULL) - __os_free(dbenv, real_name); - if (tmpname != NULL) - __os_free(dbenv, tmpname); - - return (ret); -} - -/* - * __db_inmem_remove -- - * Removal of a named in-memory database. - * PUBLIC: int __db_inmem_remove __P((DB *, DB_TXN *, const char *)); - */ -int -__db_inmem_remove(dbp, txn, name) - DB *dbp; - DB_TXN *txn; - const char *name; -{ - DB_ENV *dbenv; - DB_LSN lsn; - DBT fid_dbt, name_dbt; - u_int32_t locker; - int ret; - - dbenv = dbp->dbenv; - locker = DB_LOCK_INVALIDID; - - DB_ASSERT(name != NULL); - - /* This had better exist if we are trying to do a remove. */ - (void)__memp_set_flags(dbp->mpf, DB_MPOOL_NOFILE, 1); - if ((ret = __memp_fopen(dbp->mpf, NULL, name, 0, 0, 0)) != 0) - return (ret); - if ((ret = __memp_get_fileid(dbp->mpf, dbp->fileid)) != 0) - goto err; - dbp->preserve_fid = 1; - - if (LOCKING_ON(dbenv)) { - if (dbp->lid == DB_LOCK_INVALIDID && - (ret = __lock_id(dbenv, &dbp->lid, NULL)) != 0) - goto err; - locker = txn == NULL ? dbp->lid : txn->txnid; - } - - /* - * In a transactional environment, we'll play the same game - * that we play for databases in the file system -- create a - * temporary database and put it in with the current name - * and then rename this one to another name. We'll then use - * a commit-time event to remove the entry. - */ - - if ((ret = __fop_lock_handle(dbenv, - dbp, locker, DB_LOCK_WRITE, NULL, 0)) != 0) - goto err; - - if (LOGGING_ON(dbenv)) { - memset(&fid_dbt, 0, sizeof(fid_dbt)); - fid_dbt.data = dbp->fileid; - fid_dbt.size = DB_FILE_ID_LEN; - memset(&name_dbt, 0, sizeof(name_dbt)); - name_dbt.data = (void *)name; - name_dbt.size = (u_int32_t)strlen(name) + 1; - - if (txn != NULL && (ret = - __txn_remevent(dbenv, txn, name, dbp->fileid, 1)) != 0) - goto err; - - if ((ret = __crdel_inmem_remove_log(dbenv, - txn, &lsn, 0, &name_dbt, &fid_dbt)) != 0) - goto err; - } - - if (txn == NULL) - ret = __memp_nameop(dbenv, dbp->fileid, NULL, name, NULL, 1); - -err: return (ret); -} - -/* - * __db_subdb_remove -- - * Remove a subdatabase. - */ -static int -__db_subdb_remove(dbp, txn, name, subdb) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb; -{ - DB *mdbp, *sdbp; - int ret, t_ret; - - mdbp = sdbp = NULL; - - /* Open the subdatabase. */ - if ((ret = db_create(&sdbp, dbp->dbenv, 0)) != 0) - goto err; - if ((ret = __db_open(sdbp, - txn, name, subdb, DB_UNKNOWN, DB_WRITEOPEN, 0, PGNO_BASE_MD)) != 0) - goto err; - - DB_TEST_RECOVERY(sdbp, DB_TEST_PREDESTROY, ret, name); - - /* Free up the pages in the subdatabase. */ - switch (sdbp->type) { - case DB_BTREE: - case DB_RECNO: - if ((ret = __bam_reclaim(sdbp, txn)) != 0) - goto err; - break; - case DB_HASH: - if ((ret = __ham_reclaim(sdbp, txn)) != 0) - goto err; - break; - case DB_QUEUE: - case DB_UNKNOWN: - default: - ret = __db_unknown_type( - sdbp->dbenv, "__db_subdb_remove", sdbp->type); - goto err; - } - - /* - * Remove the entry from the main database and free the subdatabase - * metadata page. - */ - if ((ret = __db_master_open(sdbp, txn, name, 0, 0, &mdbp)) != 0) - goto err; - - if ((ret = __db_master_update( - mdbp, sdbp, txn, subdb, sdbp->type, MU_REMOVE, NULL, 0)) != 0) - goto err; - - DB_TEST_RECOVERY(sdbp, DB_TEST_POSTDESTROY, ret, name); - -DB_TEST_RECOVERY_LABEL -err: - /* Close the main and subdatabases. */ - if ((t_ret = __db_close(sdbp, txn, 0)) != 0 && ret == 0) - ret = t_ret; - - if (mdbp != NULL && - (t_ret = __db_close(mdbp, txn, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -static int -__db_dbtxn_remove(dbp, txn, name, subdb) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb; -{ - DB_ENV *dbenv; - int ret; - char *tmpname; - - dbenv = dbp->dbenv; - tmpname = NULL; - - /* - * This is a transactional remove, so we have to keep the name - * of the file locked until the transaction commits. As a result, - * we implement remove by renaming the file to some other name - * (which creates a dummy named file as a placeholder for the - * file being rename/dremoved) and then deleting that file as - * a delayed remove at commit. - */ - if ((ret = __db_backup_name(dbenv, - F_ISSET(dbp, DB_AM_INMEM) ? subdb : name, txn, &tmpname)) != 0) - return (ret); - - DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name); - - if ((ret = __db_rename_int(dbp, txn, name, subdb, tmpname)) != 0) - goto err; - - /* - * The internal removes will also translate into delayed removes. - */ - if (dbp->db_am_remove != NULL && - (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL)) != 0) - goto err; - - ret = F_ISSET(dbp, DB_AM_INMEM) ? - __db_inmem_remove(dbp, txn, tmpname) : - __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0); - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name); - -err: -DB_TEST_RECOVERY_LABEL - if (tmpname != NULL) - __os_free(dbenv, tmpname); - - return (ret); -} diff --git a/storage/bdb/db/db_rename.c b/storage/bdb/db/db_rename.c deleted file mode 100644 index 827d772751d..00000000000 --- a/storage/bdb/db/db_rename.c +++ /dev/null @@ -1,373 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_rename.c,v 12.11 2005/10/07 20:21:22 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_am.h" -#include "dbinc/fop.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" - -static int __db_subdb_rename __P((DB *, - DB_TXN *, const char *, const char *, const char *)); - -/* - * __env_dbrename_pp - * DB_ENV->dbrename pre/post processing. - * - * PUBLIC: int __env_dbrename_pp __P((DB_ENV *, DB_TXN *, - * PUBLIC: const char *, const char *, const char *, u_int32_t)); - */ -int -__env_dbrename_pp(dbenv, txn, name, subdb, newname, flags) - DB_ENV *dbenv; - DB_TXN *txn; - const char *name, *subdb, *newname; - u_int32_t flags; -{ - DB *dbp; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret, txn_local; - - dbp = NULL; - txn_local = 0; - - PANIC_CHECK(dbenv); - ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbrename"); - - /* - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if ((ret = __db_fchk(dbenv, "DB->rename", flags, DB_AUTO_COMMIT)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0) { - handle_check = 0; - goto err; - } - - /* - * Create local transaction as necessary, check for consistent - * transaction usage. - */ - if (IS_ENV_AUTO_COMMIT(dbenv, txn, flags)) { - if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0) - goto err; - txn_local = 1; - } else - if (txn != NULL && !TXN_ON(dbenv)) { - ret = __db_not_txn_env(dbenv); - goto err; - } - - LF_CLR(DB_AUTO_COMMIT); - - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - goto err; - - ret = __db_rename_int(dbp, txn, name, subdb, newname); - - if (txn_local) { - /* - * We created the DBP here and when we commit/abort, we'll - * release all the transactional locks, including the handle - * lock; mark the handle cleared explicitly. - */ - LOCK_INIT(dbp->handle_lock); - dbp->lid = DB_LOCK_INVALIDID; - } else if (txn != NULL) { - /* - * We created this handle locally so we need to close it - * and clean it up. Unfortunately, it's holding transactional - * locks that need to persist until the end of transaction. - * If we invalidate the locker id (dbp->lid), then the close - * won't free these locks prematurely. - */ - dbp->lid = DB_LOCK_INVALIDID; - } - -err: if (txn_local && (t_ret = - __db_txn_auto_resolve(dbenv, txn, 0, ret)) != 0 && ret == 0) - ret = t_ret; - - /* - * We never opened this dbp for real, so don't include a transaction - * handle, and use NOSYNC to avoid calling into mpool. - * - * !!! - * Note we're reversing the order of operations: we started the txn and - * then opened the DB handle; we're resolving the txn and then closing - * closing the DB handle -- it's safer. - */ - if (dbp != NULL && - (t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_rename_pp - * DB->rename pre/post processing. - * - * PUBLIC: int __db_rename_pp __P((DB *, - * PUBLIC: const char *, const char *, const char *, u_int32_t)); - */ -int -__db_rename_pp(dbp, name, subdb, newname, flags) - DB *dbp; - const char *name, *subdb, *newname; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - handle_check = 0; - - PANIC_CHECK(dbenv); - - /* - * Validate arguments, continuing to destroy the handle on failure. - * - * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns. - * - * !!! - * We have a serious problem if we're here with a handle used to open - * a database -- we'll destroy the handle, and the application won't - * ever be able to close the database. - */ - if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) - return (__db_mi_open(dbenv, "DB->rename", 1)); - - /* Validate arguments. */ - if ((ret = __db_fchk(dbenv, "DB->rename", flags, 0)) != 0) - return (ret); - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0) { - handle_check = 0; - goto err; - } - - /* Rename the file. */ - ret = __db_rename(dbp, NULL, name, subdb, newname); - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_rename - * DB->rename method. - * - * PUBLIC: int __db_rename - * PUBLIC: __P((DB *, DB_TXN *, const char *, const char *, const char *)); - */ -int -__db_rename(dbp, txn, name, subdb, newname) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb, *newname; -{ - int ret, t_ret; - - ret = __db_rename_int(dbp, txn, name, subdb, newname); - - if ((t_ret = __db_close(dbp, txn, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_rename_int - * Worker function for DB->rename method; the close of the dbp is - * left in the wrapper routine. - * - * PUBLIC: int __db_rename_int - * PUBLIC: __P((DB *, DB_TXN *, const char *, const char *, const char *)); - */ -int -__db_rename_int(dbp, txn, name, subdb, newname) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb, *newname; -{ - DB_ENV *dbenv; - int ret; - char *old, *real_name; - - dbenv = dbp->dbenv; - real_name = NULL; - - DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name); - - if (name == NULL && subdb == NULL) { - __db_err(dbenv, "Rename on temporary files invalid"); - ret = EINVAL; - goto err; - } - - if (name == NULL) - MAKE_INMEM(dbp); - else if (subdb != NULL) { - ret = __db_subdb_rename(dbp, txn, name, subdb, newname); - goto err; - } - - /* - * From here on down, this pertains to files or in-memory databases. - * - * Find the real name of the file. - */ - if (F_ISSET(dbp, DB_AM_INMEM)) { - old = (char *)subdb; - real_name = (char *)subdb; - } else { - if ((ret = __db_appname(dbenv, - DB_APP_DATA, name, 0, NULL, &real_name)) != 0) - goto err; - old = (char *)name; - } - - if ((ret = __fop_remove_setup(dbp, txn, real_name, 0)) != 0) - goto err; - - if (dbp->db_am_rename != NULL && - (ret = dbp->db_am_rename(dbp, txn, name, subdb, newname)) != 0) - goto err; - - /* - * The transactional case and non-transactional case are - * quite different. In the non-transactional case, we simply - * do the rename. In the transactional case, since we need - * the ability to back out and maintain locking, we have to - * create a temporary object as a placeholder. This is all - * taken care of in the fop layer. - */ - if (txn != NULL) { - if ((ret = __fop_dummy(dbp, txn, old, newname, 0)) != 0) - goto err; - } else { - if ((ret = __fop_dbrename(dbp, old, newname)) != 0) - goto err; - } - - /* - * I am pretty sure that we haven't gotten a dbreg id, so calling - * dbreg_filelist_update is not necessary. - */ - DB_ASSERT(dbp->log_filename == NULL || - dbp->log_filename->id == DB_LOGFILEID_INVALID); - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, newname); - -DB_TEST_RECOVERY_LABEL -err: if (!F_ISSET(dbp, DB_AM_INMEM) && real_name != NULL) - __os_free(dbenv, real_name); - - return (ret); -} - -/* - * __db_subdb_rename -- - * Rename a subdatabase. - */ -static int -__db_subdb_rename(dbp, txn, name, subdb, newname) - DB *dbp; - DB_TXN *txn; - const char *name, *subdb, *newname; -{ - DB *mdbp; - DB_ENV *dbenv; - PAGE *meta; - int ret, t_ret; - - mdbp = NULL; - meta = NULL; - dbenv = dbp->dbenv; - - /* - * We have not opened this dbp so it isn't marked as a subdb, - * but it ought to be. - */ - F_SET(dbp, DB_AM_SUBDB); - - /* - * Rename the entry in the main database. We need to first - * get the meta-data page number (via MU_OPEN) so that we can - * read the meta-data page and obtain a handle lock. Once we've - * done that, we can proceed to do the rename in the master. - */ - if ((ret = __db_master_open(dbp, txn, name, 0, 0, &mdbp)) != 0) - goto err; - - if ((ret = __db_master_update(mdbp, dbp, txn, subdb, dbp->type, - MU_OPEN, NULL, 0)) != 0) - goto err; - - if ((ret = __memp_fget(mdbp->mpf, &dbp->meta_pgno, 0, &meta)) != 0) - goto err; - memcpy(dbp->fileid, ((DBMETA *)meta)->uid, DB_FILE_ID_LEN); - if ((ret = __fop_lock_handle(dbenv, - dbp, mdbp->lid, DB_LOCK_WRITE, NULL, NOWAIT_FLAG(txn))) != 0) - goto err; - - ret = __memp_fput(mdbp->mpf, meta, 0); - meta = NULL; - if (ret != 0) - goto err; - - if ((ret = __db_master_update(mdbp, dbp, txn, - subdb, dbp->type, MU_RENAME, newname, 0)) != 0) - goto err; - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name); - -DB_TEST_RECOVERY_LABEL -err: - if (meta != NULL && - (t_ret = __memp_fput(mdbp->mpf, meta, 0)) != 0 && ret == 0) - ret = t_ret; - - if (mdbp != NULL && - (t_ret = __db_close(mdbp, txn, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} diff --git a/storage/bdb/db/db_ret.c b/storage/bdb/db/db_ret.c deleted file mode 100644 index 39446ea443c..00000000000 --- a/storage/bdb/db/db_ret.c +++ /dev/null @@ -1,154 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_ret.c,v 12.1 2005/06/16 20:21:14 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" - -/* - * __db_ret -- - * Build return DBT. - * - * PUBLIC: int __db_ret __P((DB *, - * PUBLIC: PAGE *, u_int32_t, DBT *, void **, u_int32_t *)); - */ -int -__db_ret(dbp, h, indx, dbt, memp, memsize) - DB *dbp; - PAGE *h; - u_int32_t indx; - DBT *dbt; - void **memp; - u_int32_t *memsize; -{ - BKEYDATA *bk; - HOFFPAGE ho; - BOVERFLOW *bo; - u_int32_t len; - u_int8_t *hk; - void *data; - - switch (TYPE(h)) { - case P_HASH: - hk = P_ENTRY(dbp, h, indx); - if (HPAGE_PTYPE(hk) == H_OFFPAGE) { - memcpy(&ho, hk, sizeof(HOFFPAGE)); - return (__db_goff(dbp, dbt, - ho.tlen, ho.pgno, memp, memsize)); - } - len = LEN_HKEYDATA(dbp, h, dbp->pgsize, indx); - data = HKEYDATA_DATA(hk); - break; - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - bk = GET_BKEYDATA(dbp, h, indx); - if (B_TYPE(bk->type) == B_OVERFLOW) { - bo = (BOVERFLOW *)bk; - return (__db_goff(dbp, dbt, - bo->tlen, bo->pgno, memp, memsize)); - } - len = bk->len; - data = bk->data; - break; - default: - return (__db_pgfmt(dbp->dbenv, h->pgno)); - } - - return (__db_retcopy(dbp->dbenv, dbt, data, len, memp, memsize)); -} - -/* - * __db_retcopy -- - * Copy the returned data into the user's DBT, handling special flags. - * - * PUBLIC: int __db_retcopy __P((DB_ENV *, DBT *, - * PUBLIC: void *, u_int32_t, void **, u_int32_t *)); - */ -int -__db_retcopy(dbenv, dbt, data, len, memp, memsize) - DB_ENV *dbenv; - DBT *dbt; - void *data; - u_int32_t len; - void **memp; - u_int32_t *memsize; -{ - int ret; - - ret = 0; - - /* If returning a partial record, reset the length. */ - if (F_ISSET(dbt, DB_DBT_PARTIAL)) { - data = (u_int8_t *)data + dbt->doff; - if (len > dbt->doff) { - len -= dbt->doff; - if (len > dbt->dlen) - len = dbt->dlen; - } else - len = 0; - } - - /* - * Allocate memory to be owned by the application: DB_DBT_MALLOC, - * DB_DBT_REALLOC. - * - * !!! - * We always allocate memory, even if we're copying out 0 bytes. This - * guarantees consistency, i.e., the application can always free memory - * without concern as to how many bytes of the record were requested. - * - * Use the memory specified by the application: DB_DBT_USERMEM. - * - * !!! - * If the length we're going to copy is 0, the application-supplied - * memory pointer is allowed to be NULL. - */ - if (F_ISSET(dbt, DB_DBT_MALLOC)) { - ret = __os_umalloc(dbenv, len, &dbt->data); - } else if (F_ISSET(dbt, DB_DBT_REALLOC)) { - if (dbt->data == NULL || dbt->size == 0 || dbt->size < len) - ret = __os_urealloc(dbenv, len, &dbt->data); - } else if (F_ISSET(dbt, DB_DBT_USERMEM)) { - if (len != 0 && (dbt->data == NULL || dbt->ulen < len)) - ret = DB_BUFFER_SMALL; - } else if (memp == NULL || memsize == NULL) { - ret = EINVAL; - } else { - if (len != 0 && (*memsize == 0 || *memsize < len)) { - if ((ret = __os_realloc(dbenv, len, memp)) == 0) - *memsize = len; - else - *memsize = 0; - } - if (ret == 0) - dbt->data = *memp; - } - - if (ret == 0 && len != 0) - memcpy(dbt->data, data, len); - - /* - * Return the length of the returned record in the DBT size field. - * This satisfies the requirement that if we're using user memory - * and insufficient memory was provided, return the amount necessary - * in the size field. - */ - dbt->size = len; - - return (ret); -} diff --git a/storage/bdb/db/db_setid.c b/storage/bdb/db/db_setid.c deleted file mode 100644 index 4ba3ae9b4d2..00000000000 --- a/storage/bdb/db/db_setid.c +++ /dev/null @@ -1,169 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_setid.c,v 12.8 2005/10/18 14:17:08 mjc Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_swap.h" -#include "dbinc/db_am.h" -#include "dbinc/mp.h" - -static int __env_fileid_reset __P((DB_ENV *, const char *, int)); - -/* - * __env_fileid_reset_pp -- - * DB_ENV->fileid_reset pre/post processing. - * - * PUBLIC: int __env_fileid_reset_pp __P((DB_ENV *, const char *, u_int32_t)); - */ -int -__env_fileid_reset_pp(dbenv, name, flags) - DB_ENV *dbenv; - const char *name; - u_int32_t flags; -{ - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - PANIC_CHECK(dbenv); - ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->fileid_reset"); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0 && flags != DB_ENCRYPT) - return (__db_ferr(dbenv, "DB_ENV->fileid_reset", 0)); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0) - goto err; - - ret = __env_fileid_reset(dbenv, name, LF_ISSET(DB_ENCRYPT) ? 1 : 0); - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __env_fileid_reset -- - * Reset the file IDs for every database in the file. - */ -static int -__env_fileid_reset(dbenv, name, encrypted) - DB_ENV *dbenv; - const char *name; - int encrypted; -{ - DB *dbp; - DBC *dbcp; - DBT key, data; - DB_MPOOLFILE *mpf; - db_pgno_t pgno; - int t_ret, ret; - void *pagep; - char *real_name; - u_int8_t fileid[DB_FILE_ID_LEN]; - - dbp = NULL; - dbcp = NULL; - real_name = NULL; - - /* Get the real backing file name. */ - if ((ret = - __db_appname(dbenv, DB_APP_DATA, name, 0, NULL, &real_name)) != 0) - return (ret); - - /* Get a new file ID. */ - if ((ret = __os_fileid(dbenv, real_name, 1, fileid)) != 0) - goto err; - - /* Create the DB object. */ - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - goto err; - - /* If configured with a password, the databases are encrypted. */ - if (encrypted && (ret = __db_set_flags(dbp, DB_ENCRYPT)) != 0) - goto err; - - /* - * Open the DB file. - * - * !!! - * Note DB_RDWRMASTER flag, we need to open the master database file - * for writing in this case. - */ - if ((ret = __db_open(dbp, NULL, - name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0, PGNO_BASE_MD)) != 0) - goto err; - - mpf = dbp->mpf; - - pgno = PGNO_BASE_MD; - if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) - goto err; - memcpy(((DBMETA *)pagep)->uid, fileid, DB_FILE_ID_LEN); - if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0) - goto err; - - /* - * If the database file doesn't support subdatabases, we only have - * to update a single metadata page. Otherwise, we have to open a - * cursor and step through the master database, and update all of - * the subdatabases' metadata pages. - */ - if (!F_ISSET(dbp, DB_AM_SUBDB)) - goto err; - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - if ((ret = __db_cursor(dbp, NULL, &dbcp, 0)) != 0) - goto err; - while ((ret = __db_c_get(dbcp, &key, &data, DB_NEXT)) == 0) { - /* - * XXX - * We're handling actual data, not on-page meta-data, so it - * hasn't been converted to/from opposite endian architectures. - * Do it explicitly, now. - */ - memcpy(&pgno, data.data, sizeof(db_pgno_t)); - DB_NTOHL(&pgno); - if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0) - goto err; - memcpy(((DBMETA *)pagep)->uid, fileid, DB_FILE_ID_LEN); - if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0) - goto err; - } - if (ret == DB_NOTFOUND) - ret = 0; - -err: if (dbcp != NULL && (t_ret = __db_c_close(dbcp)) != 0 && ret == 0) - ret = t_ret; - if (dbp != NULL && (t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - if (real_name != NULL) - __os_free(dbenv, real_name); - - return (ret); -} diff --git a/storage/bdb/db/db_setlsn.c b/storage/bdb/db/db_setlsn.c deleted file mode 100644 index ef07fc49925..00000000000 --- a/storage/bdb/db/db_setlsn.c +++ /dev/null @@ -1,116 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_setlsn.c,v 12.8 2005/10/21 19:17:40 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_am.h" -#include "dbinc/mp.h" - -static int __env_lsn_reset __P((DB_ENV *, const char *, int)); - -/* - * __env_lsn_reset_pp -- - * DB_ENV->lsn_reset pre/post processing. - * - * PUBLIC: int __env_lsn_reset_pp __P((DB_ENV *, const char *, u_int32_t)); - */ -int -__env_lsn_reset_pp(dbenv, name, flags) - DB_ENV *dbenv; - const char *name; - u_int32_t flags; -{ - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - PANIC_CHECK(dbenv); - ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->lsn_reset"); - - /* - * !!! - * The actual argument checking is simple, do it inline, outside of - * the replication block. - */ - if (flags != 0 && flags != DB_ENCRYPT) - return (__db_ferr(dbenv, "DB_ENV->lsn_reset", 0)); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0) - goto err; - - ret = __env_lsn_reset(dbenv, name, LF_ISSET(DB_ENCRYPT) ? 1 : 0); - - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __env_lsn_reset -- - * Reset the LSNs for every page in the file. - */ -static int -__env_lsn_reset(dbenv, name, encrypted) - DB_ENV *dbenv; - const char *name; - int encrypted; -{ - DB *dbp; - DB_MPOOLFILE *mpf; - PAGE *pagep; - db_pgno_t pgno; - int t_ret, ret; - - /* Create the DB object. */ - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - return (ret); - - /* If configured with a password, the databases are encrypted. */ - if (encrypted && (ret = __db_set_flags(dbp, DB_ENCRYPT)) != 0) - goto err; - - /* - * Open the DB file. - * - * !!! - * Note DB_RDWRMASTER flag, we need to open the master database file - * for writing in this case. - */ - if ((ret = __db_open(dbp, NULL, - name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0, PGNO_BASE_MD)) != 0) - goto err; - - /* Reset the LSN on every page of the database file. */ - mpf = dbp->mpf; - for (pgno = 0; - (ret = __memp_fget(mpf, &pgno, 0, &pagep)) == 0; ++pgno) { - LSN_NOT_LOGGED(pagep->lsn); - if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0) - goto err; - } - - if (ret == DB_PAGE_NOTFOUND) - ret = 0; - -err: if ((t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} diff --git a/storage/bdb/db/db_stati.c b/storage/bdb/db/db_stati.c deleted file mode 100644 index fb0bf4bee4f..00000000000 --- a/storage/bdb/db/db_stati.c +++ /dev/null @@ -1,514 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_stati.c,v 12.10 2005/11/08 03:13:31 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#endif - -#include "db_int.h" - -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/qam.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" - -#ifdef HAVE_STATISTICS -static int __db_print_all __P((DB *, u_int32_t)); -static int __db_print_citem __P((DBC *)); -static int __db_print_cursor __P((DB *)); -static int __db_print_stats __P((DB *, u_int32_t)); -static int __db_stat_arg __P((DB *, u_int32_t)); - -/* - * __db_stat_pp -- - * DB->stat pre/post processing. - * - * PUBLIC: int __db_stat_pp __P((DB *, DB_TXN *, void *, u_int32_t)); - */ -int -__db_stat_pp(dbp, txn, spp, flags) - DB *dbp; - DB_TXN *txn; - void *spp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbp->dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat"); - - if ((ret = __db_stat_arg(dbp, flags)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) { - handle_check = 0; - goto err; - } - - ret = __db_stat(dbp, txn, spp, flags); - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_stat -- - * DB->stat. - * - * PUBLIC: int __db_stat __P((DB *, DB_TXN *, void *, u_int32_t)); - */ -int -__db_stat(dbp, txn, spp, flags) - DB *dbp; - DB_TXN *txn; - void *spp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBC *dbc; - int ret, t_ret; - - dbenv = dbp->dbenv; - - /* Acquire a cursor. */ - if ((ret = __db_cursor(dbp, txn, - &dbc, LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED))) != 0) - return (ret); - - DEBUG_LWRITE(dbc, NULL, "DB->stat", NULL, NULL, flags); - LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED); - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ret = __bam_stat(dbc, spp, flags); - break; - case DB_HASH: - ret = __ham_stat(dbc, spp, flags); - break; - case DB_QUEUE: - ret = __qam_stat(dbc, spp, flags); - break; - case DB_UNKNOWN: - default: - ret = (__db_unknown_type(dbenv, "DB->stat", dbp->type)); - break; - } - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_stat_arg -- - * Check DB->stat arguments. - */ -static int -__db_stat_arg(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - /* Check for invalid function flags. */ - LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED); - switch (flags) { - case 0: - case DB_FAST_STAT: - case DB_CACHED_COUNTS: /* Deprecated and undocumented. */ - break; - case DB_RECORDCOUNT: /* Deprecated and undocumented. */ - if (dbp->type == DB_RECNO) - break; - if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM)) - break; - /* FALLTHROUGH */ - default: - return (__db_ferr(dbenv, "DB->stat", 0)); - } - - return (0); -} - -/* - * __db_stat_print_pp -- - * DB->stat_print pre/post processing. - * - * PUBLIC: int __db_stat_print_pp __P((DB *, u_int32_t)); - */ -int -__db_stat_print_pp(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat"); - - /* - * !!! - * The actual argument checking is simple, do it inline. - */ - if ((ret = __db_fchk(dbenv, - "DB->stat_print", flags, DB_FAST_STAT | DB_STAT_ALL)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) { - handle_check = 0; - goto err; - } - - ret = __db_stat_print(dbp, flags); - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - -err: ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_stat_print -- - * DB->stat_print. - * - * PUBLIC: int __db_stat_print __P((DB *, u_int32_t)); - */ -int -__db_stat_print(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - int ret; - time_t now; - - (void)time(&now); - __db_msg(dbp->dbenv, "%.24s\tLocal time", ctime(&now)); - - if (LF_ISSET(DB_STAT_ALL) && (ret = __db_print_all(dbp, flags)) != 0) - return (ret); - - if ((ret = __db_print_stats(dbp, flags)) != 0) - return (ret); - - return (0); -} - -/* - * __db_print_stats -- - * Display default DB handle statistics. - */ -static int -__db_print_stats(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - DBC *dbc; - DB_ENV *dbenv; - int ret, t_ret; - - dbenv = dbp->dbenv; - - /* Acquire a cursor. */ - if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) - return (ret); - - DEBUG_LWRITE(dbc, NULL, "DB->stat_print", NULL, NULL, 0); - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ret = __bam_stat_print(dbc, flags); - break; - case DB_HASH: - ret = __ham_stat_print(dbc, flags); - break; - case DB_QUEUE: - ret = __qam_stat_print(dbc, flags); - break; - case DB_UNKNOWN: - default: - ret = (__db_unknown_type(dbenv, "DB->stat_print", dbp->type)); - break; - } - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_print_all -- - * Display debugging DB handle statistics. - */ -static int -__db_print_all(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - static const FN fn[] = { - { DB_AM_CHKSUM, "DB_AM_CHKSUM" }, - { DB_AM_CL_WRITER, "DB_AM_CL_WRITER" }, - { DB_AM_COMPENSATE, "DB_AM_COMPENSATE" }, - { DB_AM_CREATED, "DB_AM_CREATED" }, - { DB_AM_CREATED_MSTR, "DB_AM_CREATED_MSTR" }, - { DB_AM_DBM_ERROR, "DB_AM_DBM_ERROR" }, - { DB_AM_DELIMITER, "DB_AM_DELIMITER" }, - { DB_AM_DISCARD, "DB_AM_DISCARD" }, - { DB_AM_DUP, "DB_AM_DUP" }, - { DB_AM_DUPSORT, "DB_AM_DUPSORT" }, - { DB_AM_ENCRYPT, "DB_AM_ENCRYPT" }, - { DB_AM_FIXEDLEN, "DB_AM_FIXEDLEN" }, - { DB_AM_INMEM, "DB_AM_INMEM" }, - { DB_AM_IN_RENAME, "DB_AM_IN_RENAME" }, - { DB_AM_NOT_DURABLE, "DB_AM_NOT_DURABLE" }, - { DB_AM_OPEN_CALLED, "DB_AM_OPEN_CALLED" }, - { DB_AM_PAD, "DB_AM_PAD" }, - { DB_AM_PGDEF, "DB_AM_PGDEF" }, - { DB_AM_RDONLY, "DB_AM_RDONLY" }, - { DB_AM_READ_UNCOMMITTED, "DB_AM_READ_UNCOMMITTED" }, - { DB_AM_RECNUM, "DB_AM_RECNUM" }, - { DB_AM_RECOVER, "DB_AM_RECOVER" }, - { DB_AM_RENUMBER, "DB_AM_RENUMBER" }, - { DB_AM_REVSPLITOFF, "DB_AM_REVSPLITOFF" }, - { DB_AM_SECONDARY, "DB_AM_SECONDARY" }, - { DB_AM_SNAPSHOT, "DB_AM_SNAPSHOT" }, - { DB_AM_SUBDB, "DB_AM_SUBDB" }, - { DB_AM_SWAP, "DB_AM_SWAP" }, - { DB_AM_TXN, "DB_AM_TXN" }, - { DB_AM_VERIFYING, "DB_AM_VERIFYING" }, - { 0, NULL } - }; - DB_ENV *dbenv; - - dbenv = dbp->dbenv; - - __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - __db_msg(dbenv, "DB handle information:"); - STAT_ULONG("Page size", dbp->pgsize); - STAT_ISSET("Append recno", dbp->db_append_recno); - STAT_ISSET("Feedback", dbp->db_feedback); - STAT_ISSET("Dup compare", dbp->dup_compare); - STAT_ISSET("App private", dbp->app_private); - STAT_ISSET("DbEnv", dbp->dbenv); - STAT_STRING("Type", __db_dbtype_to_string(dbp->type)); - - __mutex_print_debug_single(dbenv, "Thread mutex", dbp->mutex, flags); - - STAT_STRING("File", dbp->fname); - STAT_STRING("Database", dbp->dname); - STAT_HEX("Open flags", dbp->open_flags); - - __db_print_fileid(dbenv, dbp->fileid, "\tFile ID"); - - STAT_ULONG("Cursor adjust ID", dbp->adj_fileid); - STAT_ULONG("Meta pgno", dbp->meta_pgno); - STAT_ULONG("Locker ID", dbp->lid); - STAT_ULONG("Handle lock", dbp->cur_lid); - STAT_ULONG("Associate lock", dbp->associate_lid); - STAT_ULONG("RPC remote ID", dbp->cl_id); - - __db_msg(dbenv, - "%.24s\tReplication handle timestamp", - dbp->timestamp == 0 ? "0" : ctime(&dbp->timestamp)); - - STAT_ISSET("Secondary callback", dbp->s_callback); - STAT_ISSET("Primary handle", dbp->s_primary); - - STAT_ISSET("api internal", dbp->api_internal); - STAT_ISSET("Btree/Recno internal", dbp->bt_internal); - STAT_ISSET("Hash internal", dbp->h_internal); - STAT_ISSET("Queue internal", dbp->q_internal); - STAT_ISSET("XA internal", dbp->xa_internal); - - __db_prflags(dbenv, NULL, dbp->flags, fn, NULL, "\tFlags"); - - if (dbp->log_filename == NULL) - STAT_ISSET("File naming information", dbp->log_filename); - else - __dbreg_print_fname(dbenv, dbp->log_filename); - - (void)__db_print_cursor(dbp); - - return (0); -} - -/* - * __db_print_cursor -- - * Display the cursor active and free queues. - */ -static int -__db_print_cursor(dbp) - DB *dbp; -{ - DB_ENV *dbenv; - DBC *dbc; - int ret, t_ret; - - dbenv = dbp->dbenv; - - __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - __db_msg(dbenv, "DB handle cursors:"); - - ret = 0; - MUTEX_LOCK(dbp->dbenv, dbp->mutex); - __db_msg(dbenv, "Active queue:"); - for (dbc = TAILQ_FIRST(&dbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0) - ret = t_ret; - __db_msg(dbenv, "Join queue:"); - for (dbc = TAILQ_FIRST(&dbp->join_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0) - ret = t_ret; - __db_msg(dbenv, "Free queue:"); - for (dbc = TAILQ_FIRST(&dbp->free_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) - if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0) - ret = t_ret; - MUTEX_UNLOCK(dbp->dbenv, dbp->mutex); - - return (ret); -} - -static -int __db_print_citem(dbc) - DBC *dbc; -{ - static const FN fn[] = { - { DBC_ACTIVE, "DBC_ACTIVE" }, - { DBC_COMPENSATE, "DBC_COMPENSATE" }, - { DBC_MULTIPLE, "DBC_MULTIPLE" }, - { DBC_MULTIPLE_KEY, "DBC_MULTIPLE_KEY" }, - { DBC_OPD, "DBC_OPD" }, - { DBC_OWN_LID, "DBC_OWN_LID" }, - { DBC_READ_COMMITTED, "DBC_READ_COMMITTED" }, - { DBC_READ_UNCOMMITTED, "DBC_READ_UNCOMMITTED" }, - { DBC_RECOVER, "DBC_RECOVER" }, - { DBC_RMW, "DBC_RMW" }, - { DBC_TRANSIENT, "DBC_TRANSIENT" }, - { DBC_WRITECURSOR, "DBC_WRITECURSOR" }, - { DBC_WRITER, "DBC_WRITER" }, - { 0, NULL } - }; - DB *dbp; - DBC_INTERNAL *cp; - DB_ENV *dbenv; - - dbp = dbc->dbp; - dbenv = dbp->dbenv; - cp = dbc->internal; - - STAT_POINTER("DBC", dbc); - STAT_POINTER("Associated dbp", dbc->dbp); - STAT_POINTER("Associated txn", dbc->txn); - STAT_POINTER("Internal", cp); - STAT_HEX("Default locker ID", - dbc->lref == NULL ? 0 : ((DB_LOCKER *)dbc->lref)->id); - STAT_HEX("Locker", dbc->locker); - STAT_STRING("Type", __db_dbtype_to_string(dbc->dbtype)); - - STAT_POINTER("Off-page duplicate cursor", cp->opd); - STAT_POINTER("Referenced page", cp->page); - STAT_ULONG("Root", cp->root); - STAT_ULONG("Page number", cp->pgno); - STAT_ULONG("Page index", cp->indx); - STAT_STRING("Lock mode", __db_lockmode_to_string(cp->lock_mode)); - __db_prflags(dbenv, NULL, dbc->flags, fn, NULL, "\tFlags"); - - switch (dbc->dbtype) { - case DB_BTREE: - case DB_RECNO: - __bam_print_cursor(dbc); - break; - case DB_HASH: - __ham_print_cursor(dbc); - break; - case DB_UNKNOWN: - DB_ASSERT(dbp->type != DB_UNKNOWN); - /* FALLTHROUGH */ - case DB_QUEUE: - default: - break; - } - return (0); -} - -#else /* !HAVE_STATISTICS */ - -int -__db_stat_pp(dbp, txn, spp, flags) - DB *dbp; - DB_TXN *txn; - void *spp; - u_int32_t flags; -{ - COMPQUIET(spp, NULL); - COMPQUIET(txn, NULL); - COMPQUIET(flags, 0); - - return (__db_stat_not_built(dbp->dbenv)); -} - -int -__db_stat_print_pp(dbp, flags) - DB *dbp; - u_int32_t flags; -{ - COMPQUIET(flags, 0); - - return (__db_stat_not_built(dbp->dbenv)); -} -#endif diff --git a/storage/bdb/db/db_truncate.c b/storage/bdb/db/db_truncate.c deleted file mode 100644 index c6b740969fb..00000000000 --- a/storage/bdb/db/db_truncate.c +++ /dev/null @@ -1,233 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_truncate.c,v 12.10 2005/10/21 19:22:59 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/qam.h" -#include "dbinc/lock.h" -#include "dbinc/log.h" -#include "dbinc/txn.h" - -static int __db_cursor_check __P((DB *)); - -/* - * __db_truncate_pp - * DB->truncate pre/post processing. - * - * PUBLIC: int __db_truncate_pp __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); - */ -int -__db_truncate_pp(dbp, txn, countp, flags) - DB *dbp; - DB_TXN *txn; - u_int32_t *countp, flags; -{ - DB_ENV *dbenv; - DB_THREAD_INFO *ip; - int handle_check, ret, t_ret, txn_local; - - dbenv = dbp->dbenv; - txn_local = 0; - handle_check = 0; - - PANIC_CHECK(dbenv); - STRIP_AUTO_COMMIT(flags); - - /* Check for invalid flags. */ - if (F_ISSET(dbp, DB_AM_SECONDARY)) { - __db_err(dbenv, - "DB->truncate forbidden on secondary indices"); - return (EINVAL); - } - if ((ret = __db_fchk(dbenv, "DB->truncate", flags, 0)) != 0) - return (ret); - - ENV_ENTER(dbenv, ip); - - /* - * Make sure there are no active cursors on this db. Since we drop - * pages we cannot really adjust cursors. - */ - if (__db_cursor_check(dbp) != 0) { - __db_err(dbenv, - "DB->truncate not permitted with active cursors"); - goto err; - } - -#if CONFIG_TEST - if (IS_REP_MASTER(dbenv)) - DB_TEST_WAIT(dbenv, dbenv->test_check); -#endif - /* Check for replication block. */ - handle_check = IS_ENV_REPLICATED(dbenv); - if (handle_check && - (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { - handle_check = 0; - goto err; - } - - /* - * Check for changes to a read-only database. - * This must be after the replication block so that we - * cannot race master/client state changes. - */ - if (DB_IS_READONLY(dbp)) { - ret = __db_rdonly(dbenv, "DB->truncate"); - goto err; - } - - /* - * Create local transaction as necessary, check for consistent - * transaction usage. - */ - if (IS_DB_AUTO_COMMIT(dbp, txn)) { - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - txn_local = 1; - } - - /* Check for consistent transaction usage. */ - if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0) - goto err; - - ret = __db_truncate(dbp, txn, countp); - -err: if (txn_local && - (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0) - ret = t_ret; - - /* Release replication block. */ - if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) - ret = t_ret; - - ENV_LEAVE(dbenv, ip); - return (ret); -} - -/* - * __db_truncate - * DB->truncate. - * - * PUBLIC: int __db_truncate __P((DB *, DB_TXN *, u_int32_t *)); - */ -int -__db_truncate(dbp, txn, countp) - DB *dbp; - DB_TXN *txn; - u_int32_t *countp; -{ - DB *sdbp; - DBC *dbc; - DB_ENV *dbenv; - u_int32_t scount; - int ret, t_ret; - - dbenv = dbp->dbenv; - dbc = NULL; - ret = 0; - - /* - * Run through all secondaries and truncate them first. The count - * returned is the count of the primary only. QUEUE uses normal - * processing to truncate so it will update the secondaries normally. - */ - if (dbp->type != DB_QUEUE && LIST_FIRST(&dbp->s_secondaries) != NULL) { - if ((ret = __db_s_first(dbp, &sdbp)) != 0) - return (ret); - for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) - if ((ret = __db_truncate(sdbp, txn, &scount)) != 0) - break; - if (sdbp != NULL) - (void)__db_s_done(sdbp); - if (ret != 0) - return (ret); - } - - DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, NULL); - - /* Acquire a cursor. */ - if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) - return (ret); - - DEBUG_LWRITE(dbc, txn, "DB->truncate", NULL, NULL, 0); - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - ret = __bam_truncate(dbc, countp); - break; - case DB_HASH: - ret = __ham_truncate(dbc, countp); - break; - case DB_QUEUE: - ret = __qam_truncate(dbc, countp); - break; - case DB_UNKNOWN: - default: - ret = __db_unknown_type(dbenv, "DB->truncate", dbp->type); - break; - } - - /* Discard the cursor. */ - if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, NULL); - -DB_TEST_RECOVERY_LABEL - - return (ret); -} - -/* - * __db_cursor_check -- - * See if there are any active cursors on this db. - */ -static int -__db_cursor_check(dbp) - DB *dbp; -{ - DB *ldbp; - DBC *dbc; - DB_ENV *dbenv; - int found; - - dbenv = dbp->dbenv; - - MUTEX_LOCK(dbenv, dbenv->mtx_dblist); - for (found = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid); - ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; - ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_LOCK(dbenv, dbp->mutex); - for (dbc = TAILQ_FIRST(&ldbp->active_queue); - dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { - if (IS_INITIALIZED(dbc)) { - found = 1; - break; - } - } - MUTEX_UNLOCK(dbenv, dbp->mutex); - if (found == 1) - break; - } - MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - - return (found); -} diff --git a/storage/bdb/db/db_upg.c b/storage/bdb/db/db_upg.c deleted file mode 100644 index 674202d5bb7..00000000000 --- a/storage/bdb/db/db_upg.c +++ /dev/null @@ -1,370 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_upg.c,v 12.1 2005/06/16 20:21:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_swap.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/qam.h" - -static int (* const func_31_list[P_PAGETYPE_MAX]) - __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)) = { - NULL, /* P_INVALID */ - NULL, /* __P_DUPLICATE */ - __ham_31_hash, /* P_HASH */ - NULL, /* P_IBTREE */ - NULL, /* P_IRECNO */ - __bam_31_lbtree, /* P_LBTREE */ - NULL, /* P_LRECNO */ - NULL, /* P_OVERFLOW */ - __ham_31_hashmeta, /* P_HASHMETA */ - __bam_31_btreemeta, /* P_BTREEMETA */ - NULL, /* P_QAMMETA */ - NULL, /* P_QAMDATA */ - NULL, /* P_LDUP */ -}; - -static int __db_page_pass __P((DB *, char *, u_int32_t, int (* const []) - (DB *, char *, u_int32_t, DB_FH *, PAGE *, int *), DB_FH *)); - -/* - * __db_upgrade_pp -- - * DB->upgrade pre/post processing. - * - * PUBLIC: int __db_upgrade_pp __P((DB *, const char *, u_int32_t)); - */ -int -__db_upgrade_pp(dbp, fname, flags) - DB *dbp; - const char *fname; - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbp->dbenv); - - /* - * !!! - * The actual argument checking is simple, do it inline. - */ - if ((ret = __db_fchk(dbenv, "DB->upgrade", flags, DB_DUPSORT)) != 0) - return (ret); - - return (__db_upgrade(dbp, fname, flags)); -} - -/* - * __db_upgrade -- - * Upgrade an existing database. - * - * PUBLIC: int __db_upgrade __P((DB *, const char *, u_int32_t)); - */ -int -__db_upgrade(dbp, fname, flags) - DB *dbp; - const char *fname; - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_FH *fhp; - size_t n; - int ret, t_ret; - u_int8_t mbuf[256]; - char *real_name; - - dbenv = dbp->dbenv; - fhp = NULL; - - /* Get the real backing file name. */ - if ((ret = __db_appname(dbenv, - DB_APP_DATA, fname, 0, NULL, &real_name)) != 0) - return (ret); - - /* Open the file. */ - if ((ret = __os_open(dbenv, real_name, 0, 0, &fhp)) != 0) { - __db_err(dbenv, "%s: %s", real_name, db_strerror(ret)); - return (ret); - } - - /* Initialize the feedback. */ - if (dbp->db_feedback != NULL) - dbp->db_feedback(dbp, DB_UPGRADE, 0); - - /* - * Read the metadata page. We read 256 bytes, which is larger than - * any access method's metadata page and smaller than any disk sector. - */ - if ((ret = __os_read(dbenv, fhp, mbuf, sizeof(mbuf), &n)) != 0) - goto err; - - switch (((DBMETA *)mbuf)->magic) { - case DB_BTREEMAGIC: - switch (((DBMETA *)mbuf)->version) { - case 6: - /* - * Before V7 not all pages had page types, so we do the - * single meta-data page by hand. - */ - if ((ret = - __bam_30_btreemeta(dbp, real_name, mbuf)) != 0) - goto err; - if ((ret = __os_seek(dbenv, - fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0) - goto err; - if ((ret = __os_write(dbenv, fhp, mbuf, 256, &n)) != 0) - goto err; - /* FALLTHROUGH */ - case 7: - /* - * We need the page size to do more. Rip it out of - * the meta-data page. - */ - memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t)); - - if ((ret = __db_page_pass( - dbp, real_name, flags, func_31_list, fhp)) != 0) - goto err; - /* FALLTHROUGH */ - case 8: - case 9: - break; - default: - __db_err(dbenv, "%s: unsupported btree version: %lu", - real_name, (u_long)((DBMETA *)mbuf)->version); - ret = DB_OLD_VERSION; - goto err; - } - break; - case DB_HASHMAGIC: - switch (((DBMETA *)mbuf)->version) { - case 4: - case 5: - /* - * Before V6 not all pages had page types, so we do the - * single meta-data page by hand. - */ - if ((ret = - __ham_30_hashmeta(dbp, real_name, mbuf)) != 0) - goto err; - if ((ret = __os_seek(dbenv, - fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0) - goto err; - if ((ret = __os_write(dbenv, fhp, mbuf, 256, &n)) != 0) - goto err; - - /* - * Before V6, we created hash pages one by one as they - * were needed, using hashhdr.ovfl_point to reserve - * a block of page numbers for them. A consequence - * of this was that, if no overflow pages had been - * created, the current doubling might extend past - * the end of the database file. - * - * In DB 3.X, we now create all the hash pages - * belonging to a doubling atomically; it's not - * safe to just save them for later, because when - * we create an overflow page we'll just create - * a new last page (whatever that may be). Grow - * the database to the end of the current doubling. - */ - if ((ret = - __ham_30_sizefix(dbp, fhp, real_name, mbuf)) != 0) - goto err; - /* FALLTHROUGH */ - case 6: - /* - * We need the page size to do more. Rip it out of - * the meta-data page. - */ - memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t)); - - if ((ret = __db_page_pass( - dbp, real_name, flags, func_31_list, fhp)) != 0) - goto err; - /* FALLTHROUGH */ - case 7: - case 8: - break; - default: - __db_err(dbenv, "%s: unsupported hash version: %lu", - real_name, (u_long)((DBMETA *)mbuf)->version); - ret = DB_OLD_VERSION; - goto err; - } - break; - case DB_QAMMAGIC: - switch (((DBMETA *)mbuf)->version) { - case 1: - /* - * If we're in a Queue database, the only page that - * needs upgrading is the meta-database page, don't - * bother with a full pass. - */ - if ((ret = __qam_31_qammeta(dbp, real_name, mbuf)) != 0) - return (ret); - /* FALLTHROUGH */ - case 2: - if ((ret = __qam_32_qammeta(dbp, real_name, mbuf)) != 0) - return (ret); - if ((ret = __os_seek(dbenv, - fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0) - goto err; - if ((ret = __os_write(dbenv, fhp, mbuf, 256, &n)) != 0) - goto err; - /* FALLTHROUGH */ - case 3: - case 4: - break; - default: - __db_err(dbenv, "%s: unsupported queue version: %lu", - real_name, (u_long)((DBMETA *)mbuf)->version); - ret = DB_OLD_VERSION; - goto err; - } - break; - default: - M_32_SWAP(((DBMETA *)mbuf)->magic); - switch (((DBMETA *)mbuf)->magic) { - case DB_BTREEMAGIC: - case DB_HASHMAGIC: - case DB_QAMMAGIC: - __db_err(dbenv, - "%s: DB->upgrade only supported on native byte-order systems", - real_name); - break; - default: - __db_err(dbenv, - "%s: unrecognized file type", real_name); - break; - } - ret = EINVAL; - goto err; - } - - ret = __os_fsync(dbenv, fhp); - -err: if (fhp != NULL && - (t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0) - ret = t_ret; - __os_free(dbenv, real_name); - - /* We're done. */ - if (dbp->db_feedback != NULL) - dbp->db_feedback(dbp, DB_UPGRADE, 100); - - return (ret); -} - -/* - * __db_page_pass -- - * Walk the pages of the database, upgrading whatever needs it. - */ -static int -__db_page_pass(dbp, real_name, flags, fl, fhp) - DB *dbp; - char *real_name; - u_int32_t flags; - int (* const fl[P_PAGETYPE_MAX]) - __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)); - DB_FH *fhp; -{ - DB_ENV *dbenv; - PAGE *page; - db_pgno_t i, pgno_last; - size_t n; - int dirty, ret; - - dbenv = dbp->dbenv; - - /* Determine the last page of the file. */ - if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0) - return (ret); - - /* Allocate memory for a single page. */ - if ((ret = __os_malloc(dbenv, dbp->pgsize, &page)) != 0) - return (ret); - - /* Walk the file, calling the underlying conversion functions. */ - for (i = 0; i < pgno_last; ++i) { - if (dbp->db_feedback != NULL) - dbp->db_feedback( - dbp, DB_UPGRADE, (int)((i * 100)/pgno_last)); - if ((ret = __os_seek(dbenv, - fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0) - break; - if ((ret = __os_read(dbenv, fhp, page, dbp->pgsize, &n)) != 0) - break; - dirty = 0; - if (fl[TYPE(page)] != NULL && (ret = fl[TYPE(page)] - (dbp, real_name, flags, fhp, page, &dirty)) != 0) - break; - if (dirty) { - if ((ret = __os_seek(dbenv, - fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0) - break; - if ((ret = __os_write(dbenv, - fhp, page, dbp->pgsize, &n)) != 0) - break; - } - } - - __os_free(dbp->dbenv, page); - return (ret); -} - -/* - * __db_lastpgno -- - * Return the current last page number of the file. - * - * PUBLIC: int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *)); - */ -int -__db_lastpgno(dbp, real_name, fhp, pgno_lastp) - DB *dbp; - char *real_name; - DB_FH *fhp; - db_pgno_t *pgno_lastp; -{ - DB_ENV *dbenv; - db_pgno_t pgno_last; - u_int32_t mbytes, bytes; - int ret; - - dbenv = dbp->dbenv; - - if ((ret = __os_ioinfo(dbenv, - real_name, fhp, &mbytes, &bytes, NULL)) != 0) { - __db_err(dbenv, "%s: %s", real_name, db_strerror(ret)); - return (ret); - } - - /* Page sizes have to be a power-of-two. */ - if (bytes % dbp->pgsize != 0) { - __db_err(dbenv, - "%s: file size not a multiple of the pagesize", real_name); - return (EINVAL); - } - pgno_last = mbytes * (MEGABYTE / dbp->pgsize); - pgno_last += bytes / dbp->pgsize; - - *pgno_lastp = pgno_last; - return (0); -} diff --git a/storage/bdb/db/db_upg_opd.c b/storage/bdb/db/db_upg_opd.c deleted file mode 100644 index 23838be9ca8..00000000000 --- a/storage/bdb/db/db_upg_opd.c +++ /dev/null @@ -1,350 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_upg_opd.c,v 12.1 2005/06/16 20:21:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" - -static int __db_build_bi __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *)); -static int __db_build_ri __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *)); -static int __db_up_ovref __P((DB *, DB_FH *, db_pgno_t)); - -#define GET_PAGE(dbp, fhp, pgno, page) { \ - if ((ret = __os_seek(dbp->dbenv, \ - fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \ - goto err; \ - if ((ret = __os_read(dbp->dbenv, \ - fhp, page, (dbp)->pgsize, &n)) != 0) \ - goto err; \ -} -#define PUT_PAGE(dbp, fhp, pgno, page) { \ - if ((ret = __os_seek(dbp->dbenv, \ - fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \ - goto err; \ - if ((ret = __os_write(dbp->dbenv, \ - fhp, page, (dbp)->pgsize, &n)) != 0) \ - goto err; \ -} - -/* - * __db_31_offdup -- - * Convert 3.0 off-page duplicates to 3.1 off-page duplicates. - * - * PUBLIC: int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *)); - */ -int -__db_31_offdup(dbp, real_name, fhp, sorted, pgnop) - DB *dbp; - char *real_name; - DB_FH *fhp; - int sorted; - db_pgno_t *pgnop; -{ - PAGE *ipage, *page; - db_indx_t indx; - db_pgno_t cur_cnt, i, next_cnt, pgno, *pgno_cur, pgno_last; - db_pgno_t *pgno_next, pgno_max, *tmp; - db_recno_t nrecs; - size_t n; - int level, nomem, ret; - - ipage = page = NULL; - pgno_cur = pgno_next = NULL; - - /* Allocate room to hold a page. */ - if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0) - goto err; - - /* - * Walk the chain of 3.0 off-page duplicates. Each one is converted - * in place to a 3.1 off-page duplicate page. If the duplicates are - * sorted, they are converted to a Btree leaf page, otherwise to a - * Recno leaf page. - */ - for (nrecs = 0, cur_cnt = pgno_max = 0, - pgno = *pgnop; pgno != PGNO_INVALID;) { - if (pgno_max == cur_cnt) { - pgno_max += 20; - if ((ret = __os_realloc(dbp->dbenv, pgno_max * - sizeof(db_pgno_t), &pgno_cur)) != 0) - goto err; - } - pgno_cur[cur_cnt++] = pgno; - - GET_PAGE(dbp, fhp, pgno, page); - nrecs += NUM_ENT(page); - LEVEL(page) = LEAFLEVEL; - TYPE(page) = sorted ? P_LDUP : P_LRECNO; - /* - * !!! - * DB didn't zero the LSNs on off-page duplicates pages. - */ - ZERO_LSN(LSN(page)); - PUT_PAGE(dbp, fhp, pgno, page); - - pgno = NEXT_PGNO(page); - } - - /* If we only have a single page, it's easy. */ - if (cur_cnt <= 1) - goto done; - - /* - * pgno_cur is the list of pages we just converted. We're - * going to walk that list, but we'll need to create a new - * list while we do so. - */ - if ((ret = __os_malloc(dbp->dbenv, - cur_cnt * sizeof(db_pgno_t), &pgno_next)) != 0) - goto err; - - /* Figure out where we can start allocating new pages. */ - if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0) - goto err; - - /* Allocate room for an internal page. */ - if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &ipage)) != 0) - goto err; - PGNO(ipage) = PGNO_INVALID; - - /* - * Repeatedly walk the list of pages, building internal pages, until - * there's only one page at a level. - */ - for (level = LEAFLEVEL + 1; cur_cnt > 1; ++level) { - for (indx = 0, i = next_cnt = 0; i < cur_cnt;) { - if (indx == 0) { - P_INIT(ipage, dbp->pgsize, pgno_last, - PGNO_INVALID, PGNO_INVALID, - level, sorted ? P_IBTREE : P_IRECNO); - ZERO_LSN(LSN(ipage)); - - pgno_next[next_cnt++] = pgno_last++; - } - - GET_PAGE(dbp, fhp, pgno_cur[i], page); - - /* - * If the duplicates are sorted, put the first item on - * the lower-level page onto a Btree internal page. If - * the duplicates are not sorted, create an internal - * Recno structure on the page. If either case doesn't - * fit, push out the current page and start a new one. - */ - nomem = 0; - if (sorted) { - if ((ret = __db_build_bi( - dbp, fhp, ipage, page, indx, &nomem)) != 0) - goto err; - } else - if ((ret = __db_build_ri( - dbp, fhp, ipage, page, indx, &nomem)) != 0) - goto err; - if (nomem) { - indx = 0; - PUT_PAGE(dbp, fhp, PGNO(ipage), ipage); - } else { - ++indx; - ++NUM_ENT(ipage); - ++i; - } - } - - /* - * Push out the last internal page. Set the top-level record - * count if we've reached the top. - */ - if (next_cnt == 1) - RE_NREC_SET(ipage, nrecs); - PUT_PAGE(dbp, fhp, PGNO(ipage), ipage); - - /* Swap the current and next page number arrays. */ - cur_cnt = next_cnt; - tmp = pgno_cur; - pgno_cur = pgno_next; - pgno_next = tmp; - } - -done: *pgnop = pgno_cur[0]; - -err: if (pgno_cur != NULL) - __os_free(dbp->dbenv, pgno_cur); - if (pgno_next != NULL) - __os_free(dbp->dbenv, pgno_next); - if (ipage != NULL) - __os_free(dbp->dbenv, ipage); - if (page != NULL) - __os_free(dbp->dbenv, page); - - return (ret); -} - -/* - * __db_build_bi -- - * Build a BINTERNAL entry for a parent page. - */ -static int -__db_build_bi(dbp, fhp, ipage, page, indx, nomemp) - DB *dbp; - DB_FH *fhp; - PAGE *ipage, *page; - u_int32_t indx; - int *nomemp; -{ - BINTERNAL bi, *child_bi; - BKEYDATA *child_bk; - u_int8_t *p; - int ret; - db_indx_t *inp; - - inp = P_INP(dbp, ipage); - switch (TYPE(page)) { - case P_IBTREE: - child_bi = GET_BINTERNAL(dbp, page, 0); - if (P_FREESPACE(dbp, ipage) < BINTERNAL_PSIZE(child_bi->len)) { - *nomemp = 1; - return (0); - } - inp[indx] = - HOFFSET(ipage) -= BINTERNAL_SIZE(child_bi->len); - p = P_ENTRY(dbp, ipage, indx); - - bi.len = child_bi->len; - B_TSET(bi.type, child_bi->type, 0); - bi.pgno = PGNO(page); - bi.nrecs = __bam_total(dbp, page); - memcpy(p, &bi, SSZA(BINTERNAL, data)); - p += SSZA(BINTERNAL, data); - memcpy(p, child_bi->data, child_bi->len); - - /* Increment the overflow ref count. */ - if (B_TYPE(child_bi->type) == B_OVERFLOW) - if ((ret = __db_up_ovref(dbp, fhp, - ((BOVERFLOW *)(child_bi->data))->pgno)) != 0) - return (ret); - break; - case P_LDUP: - child_bk = GET_BKEYDATA(dbp, page, 0); - switch (B_TYPE(child_bk->type)) { - case B_KEYDATA: - if (P_FREESPACE(dbp, ipage) < - BINTERNAL_PSIZE(child_bk->len)) { - *nomemp = 1; - return (0); - } - inp[indx] = - HOFFSET(ipage) -= BINTERNAL_SIZE(child_bk->len); - p = P_ENTRY(dbp, ipage, indx); - - bi.len = child_bk->len; - B_TSET(bi.type, child_bk->type, 0); - bi.pgno = PGNO(page); - bi.nrecs = __bam_total(dbp, page); - memcpy(p, &bi, SSZA(BINTERNAL, data)); - p += SSZA(BINTERNAL, data); - memcpy(p, child_bk->data, child_bk->len); - break; - case B_OVERFLOW: - if (P_FREESPACE(dbp, ipage) < - BINTERNAL_PSIZE(BOVERFLOW_SIZE)) { - *nomemp = 1; - return (0); - } - inp[indx] = - HOFFSET(ipage) -= BINTERNAL_SIZE(BOVERFLOW_SIZE); - p = P_ENTRY(dbp, ipage, indx); - - bi.len = BOVERFLOW_SIZE; - B_TSET(bi.type, child_bk->type, 0); - bi.pgno = PGNO(page); - bi.nrecs = __bam_total(dbp, page); - memcpy(p, &bi, SSZA(BINTERNAL, data)); - p += SSZA(BINTERNAL, data); - memcpy(p, child_bk, BOVERFLOW_SIZE); - - /* Increment the overflow ref count. */ - if ((ret = __db_up_ovref(dbp, fhp, - ((BOVERFLOW *)child_bk)->pgno)) != 0) - return (ret); - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(page))); - } - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(page))); - } - - return (0); -} - -/* - * __db_build_ri -- - * Build a RINTERNAL entry for an internal parent page. - */ -static int -__db_build_ri(dbp, fhp, ipage, page, indx, nomemp) - DB *dbp; - DB_FH *fhp; - PAGE *ipage, *page; - u_int32_t indx; - int *nomemp; -{ - RINTERNAL ri; - db_indx_t *inp; - - COMPQUIET(fhp, NULL); - inp = P_INP(dbp, ipage); - if (P_FREESPACE(dbp, ipage) < RINTERNAL_PSIZE) { - *nomemp = 1; - return (0); - } - - ri.pgno = PGNO(page); - ri.nrecs = __bam_total(dbp, page); - inp[indx] = HOFFSET(ipage) -= RINTERNAL_SIZE; - memcpy(P_ENTRY(dbp, ipage, indx), &ri, RINTERNAL_SIZE); - - return (0); -} - -/* - * __db_up_ovref -- - * Increment/decrement the reference count on an overflow page. - */ -static int -__db_up_ovref(dbp, fhp, pgno) - DB *dbp; - DB_FH *fhp; - db_pgno_t pgno; -{ - PAGE *page; - size_t n; - int ret; - - /* Allocate room to hold a page. */ - if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0) - return (ret); - - GET_PAGE(dbp, fhp, pgno, page); - ++OV_REF(page); - PUT_PAGE(dbp, fhp, pgno, page); - -err: __os_free(dbp->dbenv, page); - - return (ret); -} diff --git a/storage/bdb/db/db_vrfy.c b/storage/bdb/db/db_vrfy.c deleted file mode 100644 index 4f33e451099..00000000000 --- a/storage/bdb/db/db_vrfy.c +++ /dev/null @@ -1,2592 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_vrfy.c,v 12.14 2005/10/07 16:49:47 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_swap.h" -#include "dbinc/db_verify.h" -#include "dbinc/btree.h" -#include "dbinc/hash.h" -#include "dbinc/lock.h" -#include "dbinc/mp.h" -#include "dbinc/qam.h" -#include "dbinc/txn.h" - -/* - * This is the code for DB->verify, the DB database consistency checker. - * For now, it checks all subdatabases in a database, and verifies - * everything it knows how to (i.e. it's all-or-nothing, and one can't - * check only for a subset of possible problems). - */ - -static u_int __db_guesspgsize __P((DB_ENV *, DB_FH *)); -static int __db_is_valid_magicno __P((u_int32_t, DBTYPE *)); -static int __db_is_valid_pagetype __P((u_int32_t)); -static int __db_meta2pgset - __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, DB *)); -static int __db_salvage_subdbpg __P((DB *, VRFY_DBINFO *, - PAGE *, void *, int (*)(void *, const void *), u_int32_t)); -static int __db_salvage_subdbs __P((DB *, VRFY_DBINFO *, void *, - int(*)(void *, const void *), u_int32_t, int *)); -static int __db_salvage_unknowns __P((DB *, VRFY_DBINFO *, void *, - int (*)(void *, const void *), u_int32_t)); -static int __db_verify __P((DB *, const char *, const char *, - void *, int (*)(void *, const void *), u_int32_t)); -static int __db_verify_arg __P((DB *, const char *, void *, u_int32_t)); -static int __db_vrfy_freelist - __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t)); -static int __db_vrfy_invalid - __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t)); -static int __db_vrfy_orderchkonly __P((DB *, - VRFY_DBINFO *, const char *, const char *, u_int32_t)); -static int __db_vrfy_pagezero __P((DB *, VRFY_DBINFO *, DB_FH *, u_int32_t)); -static int __db_vrfy_subdbs - __P((DB *, VRFY_DBINFO *, const char *, u_int32_t)); -static int __db_vrfy_structure - __P((DB *, VRFY_DBINFO *, const char *, db_pgno_t, u_int32_t)); -static int __db_vrfy_walkpages __P((DB *, VRFY_DBINFO *, - void *, int (*)(void *, const void *), u_int32_t)); - -#define VERIFY_FLAGS \ - (DB_AGGRESSIVE | \ - DB_NOORDERCHK | DB_ORDERCHKONLY | DB_PRINTABLE | DB_SALVAGE | DB_UNREF) - -/* - * __db_verify_pp -- - * DB->verify public interface. - * - * PUBLIC: int __db_verify_pp - * PUBLIC: __P((DB *, const char *, const char *, FILE *, u_int32_t)); - */ -int -__db_verify_pp(dbp, file, database, outfile, flags) - DB *dbp; - const char *file, *database; - FILE *outfile; - u_int32_t flags; -{ - /* - * __db_verify_pp is a wrapper to __db_verify_internal, which lets - * us pass appropriate equivalents to FILE * in from the non-C APIs. - */ - return (__db_verify_internal(dbp, - file, database, outfile, __db_pr_callback, flags)); -} - -/* - * __db_verify_internal -- - * - * PUBLIC: int __db_verify_internal __P((DB *, const char *, - * PUBLIC: const char *, void *, int (*)(void *, const void *), u_int32_t)); - */ -int -__db_verify_internal(dbp, fname, dname, handle, callback, flags) - DB *dbp; - const char *fname, *dname; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret, t_ret; - - dbenv = dbp->dbenv; - - PANIC_CHECK(dbenv); - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->verify"); - -#ifdef HAVE_FTRUNCATE - /* - * If we're using ftruncate to abort page-allocation functions, there - * should never be unreferenced pages. Always check for unreferenced - * pages on those systems. - */ - if (!LF_ISSET(DB_SALVAGE)) - LF_SET(DB_UNREF); -#endif - - if ((ret = __db_verify_arg(dbp, dname, handle, flags)) == 0) - ret = __db_verify(dbp, fname, dname, handle, callback, flags); - - /* Db.verify is a DB handle destructor. */ - if ((t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_verify_arg -- - * Check DB->verify arguments. - */ -static int -__db_verify_arg(dbp, dname, handle, flags) - DB *dbp; - const char *dname; - void *handle; - u_int32_t flags; -{ - DB_ENV *dbenv; - int ret; - - dbenv = dbp->dbenv; - - if ((ret = __db_fchk(dbenv, "DB->verify", flags, VERIFY_FLAGS)) != 0) - return (ret); - - /* - * DB_SALVAGE is mutually exclusive with the other flags except - * DB_AGGRESSIVE, DB_PRINTABLE. - * - * DB_AGGRESSIVE and DB_PRINTABLE are only meaningful when salvaging. - * - * DB_SALVAGE requires an output stream. - */ - if (LF_ISSET(DB_SALVAGE)) { - if (LF_ISSET(~(DB_AGGRESSIVE | DB_PRINTABLE | DB_SALVAGE))) - return (__db_ferr(dbenv, "DB->verify", 1)); - if (handle == NULL) { - __db_err(dbenv, - "DB_SALVAGE requires a an output handle"); - return (EINVAL); - } - } else - if (LF_ISSET(DB_AGGRESSIVE | DB_PRINTABLE)) - return (__db_ferr(dbenv, "DB->verify", 1)); - - /* - * DB_ORDERCHKONLY is mutually exclusive with DB_SALVAGE and - * DB_NOORDERCHK, and requires a database name. - */ - if ((ret = __db_fcchk(dbenv, "DB->verify", flags, - DB_ORDERCHKONLY, DB_SALVAGE | DB_NOORDERCHK)) != 0) - return (ret); - if (LF_ISSET(DB_ORDERCHKONLY) && dname == NULL) { - __db_err(dbenv, "DB_ORDERCHKONLY requires a database name"); - return (EINVAL); - } - return (0); -} - -/* - * __db_verify -- - * Walk the entire file page-by-page, either verifying with or without - * dumping in db_dump -d format, or DB_SALVAGE-ing whatever key/data - * pairs can be found and dumping them in standard (db_load-ready) - * dump format. - * - * (Salvaging isn't really a verification operation, but we put it - * here anyway because it requires essentially identical top-level - * code.) - * - * flags may be 0, DB_NOORDERCHK, DB_ORDERCHKONLY, or DB_SALVAGE - * (and optionally DB_AGGRESSIVE). - */ -static int -__db_verify(dbp, name, subdb, handle, callback, flags) - DB *dbp; - const char *name, *subdb; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_FH *fhp; - VRFY_DBINFO *vdp; - int has_subdbs, isbad, ret, t_ret; - char *real_name; - - dbenv = dbp->dbenv; - fhp = NULL; - vdp = NULL; - real_name = NULL; - has_subdbs = isbad = ret = 0; - - F_SET(dbp, DB_AM_VERIFYING); - - /* Initialize any feedback function. */ - if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL) - dbp->db_feedback(dbp, DB_VERIFY, 0); - - /* - * We don't know how large the cache is, and if the database - * in question uses a small page size--which we don't know - * yet!--it may be uncomfortably small for the default page - * size [#2143]. However, the things we need temporary - * databases for in dbinfo are largely tiny, so using a - * 1024-byte pagesize is probably not going to be a big hit, - * and will make us fit better into small spaces. - */ - if ((ret = __db_vrfy_dbinfo_create(dbenv, 1024, &vdp)) != 0) - goto err; - - /* - * Note whether the user has requested that we use printable - * chars where possible. We won't get here with this flag if - * we're not salvaging. - */ - if (LF_ISSET(DB_PRINTABLE)) - F_SET(vdp, SALVAGE_PRINTABLE); - - /* Find the real name of the file. */ - if ((ret = __db_appname(dbenv, - DB_APP_DATA, name, 0, NULL, &real_name)) != 0) - goto err; - - /* - * Our first order of business is to verify page 0, which is - * the metadata page for the master database of subdatabases - * or of the only database in the file. We want to do this by hand - * rather than just calling __db_open in case it's corrupt--various - * things in __db_open might act funny. - * - * Once we know the metadata page is healthy, I believe that it's - * safe to open the database normally and then use the page swapping - * code, which makes life easier. - */ - if ((ret = __os_open(dbenv, real_name, DB_OSO_RDONLY, 0, &fhp)) != 0) - goto err; - - /* Verify the metadata page 0; set pagesize and type. */ - if ((ret = __db_vrfy_pagezero(dbp, vdp, fhp, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* - * We can assume at this point that dbp->pagesize and dbp->type are - * set correctly, or at least as well as they can be, and that - * locking, logging, and txns are not in use. Thus we can trust - * the memp code not to look at the page, and thus to be safe - * enough to use. - * - * The dbp is not open, but the file is open in the fhp, and we - * cannot assume that __db_open is safe. Call __db_dbenv_setup, - * the [safe] part of __db_open that initializes the environment-- - * and the mpool--manually. - */ - if ((ret = __db_dbenv_setup(dbp, NULL, - name, subdb, TXN_INVALID, DB_ODDFILESIZE | DB_RDONLY)) != 0) - goto err; - - /* - * Set our name in the Queue subsystem; we may need it later - * to deal with extents. - */ - if (dbp->type == DB_QUEUE && - (ret = __qam_set_ext_data(dbp, name)) != 0) - goto err; - - /* Mark the dbp as opened, so that we correctly handle its close. */ - F_SET(dbp, DB_AM_OPEN_CALLED); - - /* Find out the page number of the last page in the database. */ - if ((ret = __memp_last_pgno(dbp->mpf, &vdp->last_pgno)) != 0) - goto err; - - /* - * DB_ORDERCHKONLY is a special case; our file consists of - * several subdatabases, which use different hash, bt_compare, - * and/or dup_compare functions. Consequently, we couldn't verify - * sorting and hashing simply by calling DB->verify() on the file. - * DB_ORDERCHKONLY allows us to come back and check those things; it - * requires a subdatabase, and assumes that everything but that - * database's sorting/hashing is correct. - */ - if (LF_ISSET(DB_ORDERCHKONLY)) { - ret = __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags); - goto done; - } - - /* - * When salvaging, we use a db to keep track of whether we've seen a - * given overflow or dup page in the course of traversing normal data. - * If in the end we have not, we assume its key got lost and print it - * with key "UNKNOWN". - */ - if (LF_ISSET(DB_SALVAGE)) { - if ((ret = __db_salvage_init(vdp)) != 0) - goto err; - - /* - * If we're not being aggressive, attempt to crack subdatabases. - * "has_subdbs" will indicate whether the attempt has succeeded - * (even in part), meaning that we have some semblance of - * subdatabases; on the walkpages pass, we print out whichever - * data pages we have not seen. - */ - if (!LF_ISSET(DB_AGGRESSIVE) && __db_salvage_subdbs( - dbp, vdp, handle, callback, flags, &has_subdbs) != 0) - isbad = 1; - - /* - * If we have subdatabases, flag if any keys are found that - * don't belong to a subdatabase -- they'll need to have an - * "__OTHER__" subdatabase header printed first. - */ - if (has_subdbs) - F_SET(vdp, SALVAGE_PRINTHEADER); - } - - if ((ret = - __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* If we're verifying, verify inter-page structure. */ - if (!LF_ISSET(DB_SALVAGE) && isbad == 0) - if ((ret = - __db_vrfy_structure(dbp, vdp, name, 0, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* - * If we're salvaging, output with key UNKNOWN any overflow or dup pages - * we haven't been able to put in context. Then destroy the salvager's - * state-saving database. - */ - if (LF_ISSET(DB_SALVAGE)) { - if ((ret = __db_salvage_unknowns(dbp, - vdp, handle, callback, flags)) != 0) - isbad = 1; - /* No return value, since there's little we can do. */ - __db_salvage_destroy(vdp); - } - - /* Don't display a footer for a database holding other databases. */ - if (LF_ISSET(DB_SALVAGE) && - (!has_subdbs || F_ISSET(vdp, SALVAGE_PRINTFOOTER))) - (void)__db_prfooter(handle, callback); - -done: err: - /* Send feedback that we're done. */ - if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL) - dbp->db_feedback(dbp, DB_VERIFY, 100); - - if (fhp != NULL && - (t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0) - ret = t_ret; - if (vdp != NULL && - (t_ret = __db_vrfy_dbinfo_destroy(dbenv, vdp)) != 0 && ret == 0) - ret = t_ret; - if (real_name != NULL) - __os_free(dbenv, real_name); - - /* - * DB_VERIFY_FATAL is a private error, translate to a public one. - * - * If we didn't find a page, it's probably a page number was corrupted. - * Return the standard corruption error. - * - * Otherwise, if we found corruption along the way, set the return. - */ - if (ret == DB_VERIFY_FATAL || - ret == DB_PAGE_NOTFOUND || (ret == 0 && isbad == 1)) - ret = DB_VERIFY_BAD; - - /* Make sure there's a public complaint if we found corruption. */ - if (ret != 0) - __db_err(dbenv, "%s: %s", name, db_strerror(ret)); - - return (ret); -} - -/* - * __db_vrfy_pagezero -- - * Verify the master metadata page. Use seek, read, and a local buffer - * rather than the DB paging code, for safety. - * - * Must correctly (or best-guess) set dbp->type and dbp->pagesize. - */ -static int -__db_vrfy_pagezero(dbp, vdp, fhp, flags) - DB *dbp; - VRFY_DBINFO *vdp; - DB_FH *fhp; - u_int32_t flags; -{ - DBMETA *meta; - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - db_pgno_t freelist; - size_t nr; - int isbad, ret, swapped; - u_int8_t mbuf[DBMETASIZE]; - - isbad = ret = swapped = 0; - freelist = 0; - dbenv = dbp->dbenv; - meta = (DBMETA *)mbuf; - dbp->type = DB_UNKNOWN; - - if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0) - return (ret); - - /* - * Seek to the metadata page. - * Note that if we're just starting a verification, dbp->pgsize - * may be zero; this is okay, as we want page zero anyway and - * 0*0 == 0. - */ - if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0 || - (ret = __os_read(dbenv, fhp, mbuf, DBMETASIZE, &nr)) != 0) { - __db_err(dbenv, - "Metadata page %lu cannot be read: %s", - (u_long)PGNO_BASE_MD, db_strerror(ret)); - return (ret); - } - - if (nr != DBMETASIZE) { - EPRINT((dbenv, - "Page %lu: Incomplete metadata page", - (u_long)PGNO_BASE_MD)); - return (DB_VERIFY_FATAL); - } - - if ((ret = __db_chk_meta(dbenv, dbp, meta, 1)) != 0) { - EPRINT((dbenv, - "Page %lu: metadata page corrupted", (u_long)PGNO_BASE_MD)); - isbad = 1; - if (ret != -1) { - EPRINT((dbenv, - "Page %lu: could not check metadata page", - (u_long)PGNO_BASE_MD)); - return (DB_VERIFY_FATAL); - } - } - - /* - * Check all of the fields that we can. - * - * 08-11: Current page number. Must == pgno. - * Note that endianness doesn't matter--it's zero. - */ - if (meta->pgno != PGNO_BASE_MD) { - isbad = 1; - EPRINT((dbenv, "Page %lu: pgno incorrectly set to %lu", - (u_long)PGNO_BASE_MD, (u_long)meta->pgno)); - } - - /* 12-15: Magic number. Must be one of valid set. */ - if (__db_is_valid_magicno(meta->magic, &dbp->type)) - swapped = 0; - else { - M_32_SWAP(meta->magic); - if (__db_is_valid_magicno(meta->magic, - &dbp->type)) - swapped = 1; - else { - isbad = 1; - EPRINT((dbenv, - "Page %lu: bad magic number %lu", - (u_long)PGNO_BASE_MD, (u_long)meta->magic)); - } - } - - /* - * 16-19: Version. Must be current; for now, we - * don't support verification of old versions. - */ - if (swapped) - M_32_SWAP(meta->version); - if ((dbp->type == DB_BTREE && - (meta->version > DB_BTREEVERSION || - meta->version < DB_BTREEOLDVER)) || - (dbp->type == DB_HASH && - (meta->version > DB_HASHVERSION || - meta->version < DB_HASHOLDVER)) || - (dbp->type == DB_QUEUE && - (meta->version > DB_QAMVERSION || - meta->version < DB_QAMOLDVER))) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: unsupported DB version %lu; extraneous errors may result", - (u_long)PGNO_BASE_MD, (u_long)meta->version)); - } - - /* - * 20-23: Pagesize. Must be power of two, - * greater than 512, and less than 64K. - */ - if (swapped) - M_32_SWAP(meta->pagesize); - if (IS_VALID_PAGESIZE(meta->pagesize)) - dbp->pgsize = meta->pagesize; - else { - isbad = 1; - EPRINT((dbenv, "Page %lu: bad page size %lu", - (u_long)PGNO_BASE_MD, (u_long)meta->pagesize)); - - /* - * Now try to settle on a pagesize to use. - * If the user-supplied one is reasonable, - * use it; else, guess. - */ - if (!IS_VALID_PAGESIZE(dbp->pgsize)) - dbp->pgsize = __db_guesspgsize(dbenv, fhp); - } - - /* - * 25: Page type. Must be correct for dbp->type, - * which is by now set as well as it can be. - */ - /* Needs no swapping--only one byte! */ - if ((dbp->type == DB_BTREE && meta->type != P_BTREEMETA) || - (dbp->type == DB_HASH && meta->type != P_HASHMETA) || - (dbp->type == DB_QUEUE && meta->type != P_QAMMETA)) { - isbad = 1; - EPRINT((dbenv, "Page %lu: bad page type %lu", - (u_long)PGNO_BASE_MD, (u_long)meta->type)); - } - - /* - * 26: Meta-flags. - */ - if (meta->metaflags != 0) { - if (meta->metaflags == DBMETA_CHKSUM) - F_SET(pip, VRFY_HAS_CHKSUM); - else { - isbad = 1; - EPRINT((dbenv, - "Page %lu: bad meta-data flags value %#lx", - (u_long)PGNO_BASE_MD, (u_long)meta->metaflags)); - } - } - - /* - * 28-31: Free list page number. - * We'll verify its sensibility when we do inter-page - * verification later; for now, just store it. - */ - if (swapped) - M_32_SWAP(meta->free); - freelist = meta->free; - - /* - * Initialize vdp->pages to fit a single pageinfo structure for - * this one page. We'll realloc later when we know how many - * pages there are. - */ - pip->pgno = PGNO_BASE_MD; - pip->type = meta->type; - - /* - * Signal that we still have to check the info specific to - * a given type of meta page. - */ - F_SET(pip, VRFY_INCOMPLETE); - - pip->free = freelist; - - if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - return (ret); - - /* Set up the dbp's fileid. We don't use the regular open path. */ - memcpy(dbp->fileid, meta->uid, DB_FILE_ID_LEN); - - if (swapped == 1) - F_SET(dbp, DB_AM_SWAP); - - return (isbad ? DB_VERIFY_BAD : 0); -} - -/* - * __db_vrfy_walkpages -- - * Main loop of the verifier/salvager. Walks through, - * page by page, and verifies all pages and/or prints all data pages. - */ -static int -__db_vrfy_walkpages(dbp, vdp, handle, callback, flags) - DB *dbp; - VRFY_DBINFO *vdp; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t i; - int ret, t_ret, isbad; - - dbenv = dbp->dbenv; - mpf = dbp->mpf; - h = NULL; - ret = isbad = t_ret = 0; - - for (i = 0; i <= vdp->last_pgno; i++) { - /* - * If DB_SALVAGE is set, we inspect our database of completed - * pages, and skip any we've already printed in the subdb pass. - */ - if (LF_ISSET(DB_SALVAGE) && (__db_salvage_isdone(vdp, i) != 0)) - continue; - - /* - * If an individual page get fails, keep going if and only - * if we're salvaging. - */ - if ((t_ret = __memp_fget(mpf, &i, 0, &h)) != 0) { - if (ret == 0) - ret = t_ret; - if (LF_ISSET(DB_SALVAGE)) - continue; - return (ret); - } - - if (LF_ISSET(DB_SALVAGE)) { - /* - * We pretty much don't want to quit unless a - * bomb hits. May as well return that something - * was screwy, however. - */ - if ((t_ret = __db_salvage(dbp, - vdp, i, h, handle, callback, flags)) != 0) { - if (ret == 0) - ret = t_ret; - isbad = 1; - } - } else { - /* - * If we are not salvaging, and we get any error - * other than DB_VERIFY_BAD, return immediately; - * it may not be safe to proceed. If we get - * DB_VERIFY_BAD, keep going; listing more errors - * may make it easier to diagnose problems and - * determine the magnitude of the corruption. - * - * Verify info common to all page types. - */ - if (i != PGNO_BASE_MD) { - ret = __db_vrfy_common(dbp, vdp, h, i, flags); - if (ret == DB_VERIFY_BAD) - isbad = 1; - else if (ret != 0) - goto err; - } - - switch (TYPE(h)) { - case P_INVALID: - ret = __db_vrfy_invalid(dbp, vdp, h, i, flags); - break; - case __P_DUPLICATE: - isbad = 1; - EPRINT((dbenv, - "Page %lu: old-style duplicate page", - (u_long)i)); - break; - case P_HASH: - ret = __ham_vrfy(dbp, vdp, h, i, flags); - break; - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LDUP: - ret = __bam_vrfy(dbp, vdp, h, i, flags); - break; - case P_LRECNO: - ret = __ram_vrfy_leaf(dbp, vdp, h, i, flags); - break; - case P_OVERFLOW: - ret = __db_vrfy_overflow(dbp, vdp, h, i, flags); - break; - case P_HASHMETA: - ret = __ham_vrfy_meta(dbp, - vdp, (HMETA *)h, i, flags); - break; - case P_BTREEMETA: - ret = __bam_vrfy_meta(dbp, - vdp, (BTMETA *)h, i, flags); - break; - case P_QAMMETA: - ret = __qam_vrfy_meta(dbp, - vdp, (QMETA *)h, i, flags); - break; - case P_QAMDATA: - ret = __qam_vrfy_data(dbp, - vdp, (QPAGE *)h, i, flags); - break; - default: - EPRINT((dbenv, - "Page %lu: unknown page type %lu", - (u_long)i, (u_long)TYPE(h))); - isbad = 1; - break; - } - - /* - * Set up error return. - */ - if (ret == DB_VERIFY_BAD) - isbad = 1; - else if (ret != 0) - goto err; - - /* - * Provide feedback to the application about our - * progress. The range 0-50% comes from the fact - * that this is the first of two passes through the - * database (front-to-back, then top-to-bottom). - */ - if (dbp->db_feedback != NULL) - dbp->db_feedback(dbp, DB_VERIFY, - (int)((i + 1) * 50 / (vdp->last_pgno + 1))); - } - - /* - * Just as with the page get, bail if and only if we're - * not salvaging. - */ - if ((t_ret = __memp_fput(mpf, h, 0)) != 0) { - if (ret == 0) - ret = t_ret; - if (!LF_ISSET(DB_SALVAGE)) - return (ret); - } - } - - /* - * If we've seen a Queue metadata page, we may need to walk Queue - * extent pages that won't show up between 0 and vdp->last_pgno. - */ - if (F_ISSET(vdp, VRFY_QMETA_SET) && (t_ret = - __qam_vrfy_walkqueue(dbp, vdp, handle, callback, flags)) != 0) { - if (ret == 0) - ret = t_ret; - if (t_ret == DB_VERIFY_BAD) - isbad = 1; - else if (!LF_ISSET(DB_SALVAGE)) - return (ret); - } - - if (0) { -err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0) - return (ret == 0 ? t_ret : ret); - } - - return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_vrfy_structure-- - * After a beginning-to-end walk through the database has been - * completed, put together the information that has been collected - * to verify the overall database structure. - * - * Should only be called if we want to do a database verification, - * i.e. if DB_SALVAGE is not set. - */ -static int -__db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - const char *dbname; - db_pgno_t meta_pgno; - u_int32_t flags; -{ - DB *pgset; - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - db_pgno_t i; - int ret, isbad, hassubs, p; - - isbad = 0; - pip = NULL; - dbenv = dbp->dbenv; - pgset = vdp->pgset; - - /* - * Providing feedback here is tricky; in most situations, - * we fetch each page one more time, but we do so in a top-down - * order that depends on the access method. Worse, we do this - * recursively in btree, such that on any call where we're traversing - * a subtree we don't know where that subtree is in the whole database; - * worse still, any given database may be one of several subdbs. - * - * The solution is to decrement a counter vdp->pgs_remaining each time - * we verify (and call feedback on) a page. We may over- or - * under-count, but the structure feedback function will ensure that we - * never give a percentage under 50 or over 100. (The first pass - * covered the range 0-50%.) - */ - if (dbp->db_feedback != NULL) - vdp->pgs_remaining = vdp->last_pgno + 1; - - /* - * Call the appropriate function to downwards-traverse the db type. - */ - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - if ((ret = __bam_vrfy_structure(dbp, vdp, 0, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - - /* - * If we have subdatabases and we know that the database is, - * thus far, sound, it's safe to walk the tree of subdatabases. - * Do so, and verify the structure of the databases within. - */ - if ((ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) != 0) - goto err; - hassubs = F_ISSET(pip, VRFY_HAS_SUBDBS) ? 1 : 0; - if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - goto err; - pip = NULL; - - if (isbad == 0 && hassubs) - if ((ret = - __db_vrfy_subdbs(dbp, vdp, dbname, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - break; - case DB_HASH: - if ((ret = __ham_vrfy_structure(dbp, vdp, 0, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - break; - case DB_QUEUE: - if ((ret = __qam_vrfy_structure(dbp, vdp, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - } - - /* - * Queue pages may be unreferenced and totally zeroed, if - * they're empty; queue doesn't have much structure, so - * this is unlikely to be wrong in any troublesome sense. - * Skip to "err". - */ - goto err; - case DB_UNKNOWN: - default: - /* This should only happen if the verifier is somehow broken. */ - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - /* Walk free list. */ - if ((ret = - __db_vrfy_freelist(dbp, vdp, meta_pgno, flags)) == DB_VERIFY_BAD) - isbad = 1; - - /* - * If structure checks up until now have failed, it's likely that - * checking what pages have been missed will result in oodles of - * extraneous error messages being EPRINTed. Skip to the end - * if this is the case; we're going to be printing at least one - * error anyway, and probably all the more salient ones. - */ - if (ret != 0 || isbad == 1) - goto err; - - /* - * Make sure no page has been missed and that no page is still marked - * "all zeroes" (only certain hash pages can be, and they're unmarked - * in __ham_vrfy_structure). - */ - for (i = 0; i < vdp->last_pgno + 1; i++) { - if ((ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0) - goto err; - if ((ret = __db_vrfy_pgset_get(pgset, i, &p)) != 0) - goto err; - if (pip->type == P_OVERFLOW) { - if ((u_int32_t)p != pip->refcount) { - EPRINT((dbenv, - "Page %lu: overflow refcount %lu, referenced %lu times", - (u_long)i, - (u_long)pip->refcount, (u_long)p)); - isbad = 1; - } - } else if (p == 0 && LF_ISSET(DB_UNREF)) { - EPRINT((dbenv, - "Page %lu: unreferenced page", (u_long)i)); - isbad = 1; - } - - if (F_ISSET(pip, VRFY_IS_ALLZEROES)) { - EPRINT((dbenv, - "Page %lu: totally zeroed page", (u_long)i)); - isbad = 1; - } - if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - goto err; - pip = NULL; - } - -err: if (pip != NULL) - (void)__db_vrfy_putpageinfo(dbenv, vdp, pip); - - return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_is_valid_pagetype - */ -static int -__db_is_valid_pagetype(type) - u_int32_t type; -{ - switch (type) { - case P_INVALID: /* Order matches ordinal value. */ - case P_HASH: - case P_IBTREE: - case P_IRECNO: - case P_LBTREE: - case P_LRECNO: - case P_OVERFLOW: - case P_HASHMETA: - case P_BTREEMETA: - case P_QAMMETA: - case P_QAMDATA: - case P_LDUP: - return (1); - default: - break; - } - return (0); -} - -/* - * __db_is_valid_magicno - */ -static int -__db_is_valid_magicno(magic, typep) - u_int32_t magic; - DBTYPE *typep; -{ - switch (magic) { - case DB_BTREEMAGIC: - *typep = DB_BTREE; - return (1); - case DB_HASHMAGIC: - *typep = DB_HASH; - return (1); - case DB_QAMMAGIC: - *typep = DB_QUEUE; - return (1); - default: - break; - } - *typep = DB_UNKNOWN; - return (0); -} - -/* - * __db_vrfy_common -- - * Verify info common to all page types. - * - * PUBLIC: int __db_vrfy_common - * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t)); - */ -int -__db_vrfy_common(dbp, vdp, h, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int ret, t_ret; - u_int8_t *p; - - dbenv = dbp->dbenv; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - pip->pgno = pgno; - F_CLR(pip, VRFY_IS_ALLZEROES); - - /* - * Hash expands the table by leaving some pages between the - * old last and the new last totally zeroed. Its pgin function - * should fix things, but we might not be using that (e.g. if - * we're a subdatabase). - * - * Queue will create sparse files if sparse record numbers are used. - */ - if (pgno != 0 && PGNO(h) == 0) { - for (p = (u_int8_t *)h; p < (u_int8_t *)h + dbp->pgsize; p++) - if (*p != 0) { - EPRINT((dbenv, - "Page %lu: partially zeroed page", - (u_long)pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - /* - * It's totally zeroed; mark it as a hash, and we'll - * check that that makes sense structurally later. - * (The queue verification doesn't care, since queues - * don't really have much in the way of structure.) - */ - pip->type = P_HASH; - F_SET(pip, VRFY_IS_ALLZEROES); - ret = 0; - goto err; /* well, not really an err. */ - } - - if (PGNO(h) != pgno) { - EPRINT((dbenv, "Page %lu: bad page number %lu", - (u_long)pgno, (u_long)h->pgno)); - ret = DB_VERIFY_BAD; - } - - if (!__db_is_valid_pagetype(h->type)) { - EPRINT((dbenv, "Page %lu: bad page type %lu", - (u_long)pgno, (u_long)h->type)); - ret = DB_VERIFY_BAD; - } - pip->type = h->type; - -err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - - return (ret); -} - -/* - * __db_vrfy_invalid -- - * Verify P_INVALID page. - * (Yes, there's not much to do here.) - */ -static int -__db_vrfy_invalid(dbp, vdp, h, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int ret, t_ret; - - dbenv = dbp->dbenv; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - pip->next_pgno = pip->prev_pgno = 0; - - if (!IS_VALID_PGNO(NEXT_PGNO(h))) { - EPRINT((dbenv, "Page %lu: invalid next_pgno %lu", - (u_long)pgno, (u_long)NEXT_PGNO(h))); - ret = DB_VERIFY_BAD; - } else - pip->next_pgno = NEXT_PGNO(h); - - if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __db_vrfy_datapage -- - * Verify elements common to data pages (P_HASH, P_LBTREE, - * P_IBTREE, P_IRECNO, P_LRECNO, P_OVERFLOW, P_DUPLICATE)--i.e., - * those defined in the PAGE structure. - * - * Called from each of the per-page routines, after the - * all-page-type-common elements of pip have been verified and filled - * in. - * - * PUBLIC: int __db_vrfy_datapage - * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t)); - */ -int -__db_vrfy_datapage(dbp, vdp, h, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *h; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int isbad, ret, t_ret; - - dbenv = dbp->dbenv; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - isbad = 0; - - /* - * prev_pgno and next_pgno: store for inter-page checks, - * verify that they point to actual pages and not to self. - * - * !!! - * Internal btree pages do not maintain these fields (indeed, - * they overload them). Skip. - */ - if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) { - if (!IS_VALID_PGNO(PREV_PGNO(h)) || PREV_PGNO(h) == pip->pgno) { - isbad = 1; - EPRINT((dbenv, "Page %lu: invalid prev_pgno %lu", - (u_long)pip->pgno, (u_long)PREV_PGNO(h))); - } - if (!IS_VALID_PGNO(NEXT_PGNO(h)) || NEXT_PGNO(h) == pip->pgno) { - isbad = 1; - EPRINT((dbenv, "Page %lu: invalid next_pgno %lu", - (u_long)pip->pgno, (u_long)NEXT_PGNO(h))); - } - pip->prev_pgno = PREV_PGNO(h); - pip->next_pgno = NEXT_PGNO(h); - } - - /* - * Verify the number of entries on the page. - * There is no good way to determine if this is accurate; the - * best we can do is verify that it's not more than can, in theory, - * fit on the page. Then, we make sure there are at least - * this many valid elements in inp[], and hope that this catches - * most cases. - */ - if (TYPE(h) != P_OVERFLOW) { - if (BKEYDATA_PSIZE(0) * NUM_ENT(h) > dbp->pgsize) { - isbad = 1; - EPRINT((dbenv, "Page %lu: too many entries: %lu", - (u_long)pgno, (u_long)NUM_ENT(h))); - } - pip->entries = NUM_ENT(h); - } - - /* - * btree level. Should be zero unless we're a btree; - * if we are a btree, should be between LEAFLEVEL and MAXBTREELEVEL, - * and we need to save it off. - */ - switch (TYPE(h)) { - case P_IBTREE: - case P_IRECNO: - if (LEVEL(h) < LEAFLEVEL + 1) { - isbad = 1; - EPRINT((dbenv, "Page %lu: bad btree level %lu", - (u_long)pgno, (u_long)LEVEL(h))); - } - pip->bt_level = LEVEL(h); - break; - case P_LBTREE: - case P_LDUP: - case P_LRECNO: - if (LEVEL(h) != LEAFLEVEL) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: btree leaf page has incorrect level %lu", - (u_long)pgno, (u_long)LEVEL(h))); - } - break; - default: - if (LEVEL(h) != 0) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: nonzero level %lu in non-btree database", - (u_long)pgno, (u_long)LEVEL(h))); - } - break; - } - - /* - * Even though inp[] occurs in all PAGEs, we look at it in the - * access-method-specific code, since btree and hash treat - * item lengths very differently, and one of the most important - * things we want to verify is that the data--as specified - * by offset and length--cover the right part of the page - * without overlaps, gaps, or violations of the page boundary. - */ - if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_vrfy_meta-- - * Verify the access-method common parts of a meta page, using - * normal mpool routines. - * - * PUBLIC: int __db_vrfy_meta - * PUBLIC: __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t)); - */ -int -__db_vrfy_meta(dbp, vdp, meta, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - DBMETA *meta; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - DBTYPE dbtype, magtype; - VRFY_PAGEINFO *pip; - int isbad, ret, t_ret; - - isbad = 0; - dbenv = dbp->dbenv; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - /* type plausible for a meta page */ - switch (meta->type) { - case P_BTREEMETA: - dbtype = DB_BTREE; - break; - case P_HASHMETA: - dbtype = DB_HASH; - break; - case P_QAMMETA: - dbtype = DB_QUEUE; - break; - default: - /* The verifier should never let us get here. */ - DB_ASSERT(0); - ret = EINVAL; - goto err; - } - - /* magic number valid */ - if (!__db_is_valid_magicno(meta->magic, &magtype)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: invalid magic number", (u_long)pgno)); - } - if (magtype != dbtype) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: magic number does not match database type", - (u_long)pgno)); - } - - /* version */ - if ((dbtype == DB_BTREE && - (meta->version > DB_BTREEVERSION || - meta->version < DB_BTREEOLDVER)) || - (dbtype == DB_HASH && - (meta->version > DB_HASHVERSION || - meta->version < DB_HASHOLDVER)) || - (dbtype == DB_QUEUE && - (meta->version > DB_QAMVERSION || - meta->version < DB_QAMOLDVER))) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: unsupported database version %lu; extraneous errors may result", - (u_long)pgno, (u_long)meta->version)); - } - - /* pagesize */ - if (meta->pagesize != dbp->pgsize) { - isbad = 1; - EPRINT((dbenv, "Page %lu: invalid pagesize %lu", - (u_long)pgno, (u_long)meta->pagesize)); - } - - /* Flags */ - if (meta->metaflags != 0) { - if (meta->metaflags == DBMETA_CHKSUM) - F_SET(pip, VRFY_HAS_CHKSUM); - else { - isbad = 1; - EPRINT((dbenv, - "Page %lu: bad meta-data flags value %#lx", - (u_long)PGNO_BASE_MD, (u_long)meta->metaflags)); - } - } - - /* - * Free list. - * - * If this is not the main, master-database meta page, it - * should not have a free list. - */ - if (pgno != PGNO_BASE_MD && meta->free != PGNO_INVALID) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: nonempty free list on subdatabase metadata page", - (u_long)pgno)); - } - - /* Can correctly be PGNO_INVALID--that's just the end of the list. */ - if (meta->free != PGNO_INVALID && IS_VALID_PGNO(meta->free)) - pip->free = meta->free; - else if (!IS_VALID_PGNO(meta->free)) { - isbad = 1; - EPRINT((dbenv, - "Page %lu: nonsensical free list pgno %lu", - (u_long)pgno, (u_long)meta->free)); - } - - /* - * We have now verified the common fields of the metadata page. - * Clear the flag that told us they had been incompletely checked. - */ - F_CLR(pip, VRFY_INCOMPLETE); - -err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_vrfy_freelist -- - * Walk free list, checking off pages and verifying absence of - * loops. - */ -static int -__db_vrfy_freelist(dbp, vdp, meta, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t meta; - u_int32_t flags; -{ - DB *pgset; - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - db_pgno_t cur_pgno, next_pgno; - int p, ret, t_ret; - - pgset = vdp->pgset; - DB_ASSERT(pgset != NULL); - dbenv = dbp->dbenv; - - if ((ret = __db_vrfy_getpageinfo(vdp, meta, &pip)) != 0) - return (ret); - for (next_pgno = pip->free; - next_pgno != PGNO_INVALID; next_pgno = pip->next_pgno) { - cur_pgno = pip->pgno; - if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - return (ret); - - /* This shouldn't happen, but just in case. */ - if (!IS_VALID_PGNO(next_pgno)) { - EPRINT((dbenv, - "Page %lu: invalid next_pgno %lu on free list page", - (u_long)cur_pgno, (u_long)next_pgno)); - return (DB_VERIFY_BAD); - } - - /* Detect cycles. */ - if ((ret = __db_vrfy_pgset_get(pgset, next_pgno, &p)) != 0) - return (ret); - if (p != 0) { - EPRINT((dbenv, - "Page %lu: page %lu encountered a second time on free list", - (u_long)cur_pgno, (u_long)next_pgno)); - return (DB_VERIFY_BAD); - } - if ((ret = __db_vrfy_pgset_inc(pgset, next_pgno)) != 0) - return (ret); - - if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0) - return (ret); - - if (pip->type != P_INVALID) { - EPRINT((dbenv, - "Page %lu: non-invalid page %lu on free list", - (u_long)cur_pgno, (u_long)next_pgno)); - ret = DB_VERIFY_BAD; /* unsafe to continue */ - break; - } - } - - if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - ret = t_ret; - return (ret); -} - -/* - * __db_vrfy_subdbs -- - * Walk the known-safe master database of subdbs with a cursor, - * verifying the structure of each subdatabase we encounter. - */ -static int -__db_vrfy_subdbs(dbp, vdp, dbname, flags) - DB *dbp; - VRFY_DBINFO *vdp; - const char *dbname; - u_int32_t flags; -{ - DB *mdbp; - DBC *dbc; - DBT key, data; - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - db_pgno_t meta_pgno; - int ret, t_ret, isbad; - u_int8_t type; - - isbad = 0; - dbc = NULL; - dbenv = dbp->dbenv; - - if ((ret = - __db_master_open(dbp, NULL, dbname, DB_RDONLY, 0, &mdbp)) != 0) - return (ret); - - if ((ret = __db_cursor_int(mdbp, - NULL, DB_BTREE, PGNO_INVALID, 0, DB_LOCK_INVALIDID, &dbc)) != 0) - goto err; - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - while ((ret = __db_c_get(dbc, &key, &data, DB_NEXT)) == 0) { - if (data.size != sizeof(db_pgno_t)) { - EPRINT((dbenv, - "Subdatabase entry not page-number size")); - isbad = 1; - goto err; - } - memcpy(&meta_pgno, data.data, data.size); - /* - * Subdatabase meta pgnos are stored in network byte - * order for cross-endian compatibility. Swap if appropriate. - */ - DB_NTOHL(&meta_pgno); - if (meta_pgno == PGNO_INVALID || meta_pgno > vdp->last_pgno) { - EPRINT((dbenv, - "Subdatabase entry references invalid page %lu", - (u_long)meta_pgno)); - isbad = 1; - goto err; - } - if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0) - goto err; - type = pip->type; - if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - goto err; - switch (type) { - case P_BTREEMETA: - if ((ret = __bam_vrfy_structure( - dbp, vdp, meta_pgno, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - break; - case P_HASHMETA: - if ((ret = __ham_vrfy_structure( - dbp, vdp, meta_pgno, flags)) != 0) { - if (ret == DB_VERIFY_BAD) - isbad = 1; - else - goto err; - } - break; - case P_QAMMETA: - default: - EPRINT((dbenv, - "Subdatabase entry references page %lu of invalid type %lu", - (u_long)meta_pgno, (u_long)type)); - ret = DB_VERIFY_BAD; - goto err; - } - } - - if (ret == DB_NOTFOUND) - ret = 0; - -err: if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - if ((t_ret = __db_close(mdbp, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - - return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret); -} - -/* - * __db_vrfy_struct_feedback -- - * Provide feedback during top-down database structure traversal. - * (See comment at the beginning of __db_vrfy_structure.) - * - * PUBLIC: void __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *)); - */ -void -__db_vrfy_struct_feedback(dbp, vdp) - DB *dbp; - VRFY_DBINFO *vdp; -{ - int progress; - - if (dbp->db_feedback == NULL) - return; - - if (vdp->pgs_remaining > 0) - vdp->pgs_remaining--; - - /* Don't allow a feedback call of 100 until we're really done. */ - progress = 100 - (int)(vdp->pgs_remaining * 50 / (vdp->last_pgno + 1)); - dbp->db_feedback(dbp, DB_VERIFY, progress == 100 ? 99 : progress); -} - -/* - * __db_vrfy_orderchkonly -- - * Do an sort-order/hashing check on a known-otherwise-good subdb. - */ -static int -__db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags) - DB *dbp; - VRFY_DBINFO *vdp; - const char *name, *subdb; - u_int32_t flags; -{ - BTMETA *btmeta; - DB *mdbp, *pgset; - DBC *pgsc; - DBT key, data; - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - HASH *h_internal; - HMETA *hmeta; - PAGE *h, *currpg; - db_pgno_t meta_pgno, p, pgno; - u_int32_t bucket; - int t_ret, ret; - - pgset = NULL; - pgsc = NULL; - dbenv = dbp->dbenv; - mpf = dbp->mpf; - currpg = h = NULL; - - LF_CLR(DB_NOORDERCHK); - - /* Open the master database and get the meta_pgno for the subdb. */ - if ((ret = __db_master_open(dbp, NULL, name, DB_RDONLY, 0, &mdbp)) != 0) - goto err; - - memset(&key, 0, sizeof(key)); - key.data = (void *)subdb; - key.size = (u_int32_t)strlen(subdb); - memset(&data, 0, sizeof(data)); - if ((ret = __db_get(mdbp, NULL, &key, &data, 0)) != 0) - goto err; - - if (data.size != sizeof(db_pgno_t)) { - EPRINT((dbenv, "Subdatabase entry of invalid size")); - ret = DB_VERIFY_BAD; - goto err; - } - - memcpy(&meta_pgno, data.data, data.size); - - /* - * Subdatabase meta pgnos are stored in network byte - * order for cross-endian compatibility. Swap if appropriate. - */ - DB_NTOHL(&meta_pgno); - - if ((ret = __memp_fget(mpf, &meta_pgno, 0, &h)) != 0) - goto err; - - if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0) - goto err; - - switch (TYPE(h)) { - case P_BTREEMETA: - btmeta = (BTMETA *)h; - if (F_ISSET(&btmeta->dbmeta, BTM_RECNO)) { - /* Recnos have no order to check. */ - ret = 0; - goto err; - } - if ((ret = - __db_meta2pgset(dbp, vdp, meta_pgno, flags, pgset)) != 0) - goto err; - if ((ret = __db_cursor_int(pgset, NULL, dbp->type, - PGNO_INVALID, 0, DB_LOCK_INVALIDID, &pgsc)) != 0) - goto err; - while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) { - if ((ret = __memp_fget(mpf, &p, 0, &currpg)) != 0) - goto err; - if ((ret = __bam_vrfy_itemorder(dbp, - NULL, currpg, p, NUM_ENT(currpg), 1, - F_ISSET(&btmeta->dbmeta, BTM_DUP), flags)) != 0) - goto err; - if ((ret = __memp_fput(mpf, currpg, 0)) != 0) - goto err; - currpg = NULL; - } - - /* - * The normal exit condition for the loop above is DB_NOTFOUND. - * If we see that, zero it and continue on to cleanup. - * Otherwise, it's a real error and will be returned. - */ - if (ret == DB_NOTFOUND) - ret = 0; - break; - case P_HASHMETA: - hmeta = (HMETA *)h; - h_internal = (HASH *)dbp->h_internal; - /* - * Make sure h_charkey is right. - */ - if (h_internal == NULL) { - EPRINT((dbenv, - "Page %lu: DB->h_internal field is NULL", - (u_long)meta_pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - if (h_internal->h_hash == NULL) - h_internal->h_hash = hmeta->dbmeta.version < 5 - ? __ham_func4 : __ham_func5; - if (hmeta->h_charkey != - h_internal->h_hash(dbp, CHARKEY, sizeof(CHARKEY))) { - EPRINT((dbenv, - "Page %lu: incorrect hash function for database", - (u_long)meta_pgno)); - ret = DB_VERIFY_BAD; - goto err; - } - - /* - * Foreach bucket, verify hashing on each page in the - * corresponding chain of pages. - */ - for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) { - pgno = BS_TO_PAGE(bucket, hmeta->spares); - while (pgno != PGNO_INVALID) { - if ((ret = __memp_fget(mpf, - &pgno, 0, &currpg)) != 0) - goto err; - if ((ret = __ham_vrfy_hashing(dbp, - NUM_ENT(currpg), hmeta, bucket, pgno, - flags, h_internal->h_hash)) != 0) - goto err; - pgno = NEXT_PGNO(currpg); - if ((ret = __memp_fput(mpf, currpg, 0)) != 0) - goto err; - currpg = NULL; - } - } - break; - default: - EPRINT((dbenv, "Page %lu: database metapage of bad type %lu", - (u_long)meta_pgno, (u_long)TYPE(h))); - ret = DB_VERIFY_BAD; - break; - } - -err: if (pgsc != NULL && (t_ret = __db_c_close(pgsc)) != 0 && ret == 0) - ret = t_ret; - if (pgset != NULL && - (t_ret = __db_close(pgset, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0) - ret = t_ret; - if (currpg != NULL && (t_ret = __memp_fput(mpf, currpg, 0)) != 0) - ret = t_ret; - if ((t_ret = __db_close(mdbp, NULL, 0)) != 0) - ret = t_ret; - return (ret); -} - -/* - * __db_salvage -- - * Walk through a page, salvaging all likely or plausible (w/ - * DB_AGGRESSIVE) key/data pairs. - * - * PUBLIC: int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, - * PUBLIC: PAGE *, void *, int (*)(void *, const void *), u_int32_t)); - */ -int -__db_salvage(dbp, vdp, pgno, h, handle, callback, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - PAGE *h; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int keyflag, ret, t_ret; - - DB_ASSERT(LF_ISSET(DB_SALVAGE)); - - dbenv = dbp->dbenv; - - /* - * !!! - * We dump record numbers when salvaging Queue databases, but not for - * immutable Recno databases. The problem is we can't figure out the - * record number from the database page in the Recno case, while the - * offset in the file is sufficient for Queue. - */ - keyflag = 0; - - /* If we got this page in the subdb pass, we can safely skip it. */ - if (__db_salvage_isdone(vdp, pgno)) - return (0); - - switch (TYPE(h)) { - case P_HASHMETA: - ret = __ham_vrfy_meta(dbp, vdp, (HMETA *)h, pgno, flags); - break; - case P_BTREEMETA: - ret = __bam_vrfy_meta(dbp, vdp, (BTMETA *)h, pgno, flags); - break; - case P_QAMMETA: - keyflag = 1; - ret = __qam_vrfy_meta(dbp, vdp, (QMETA *)h, pgno, flags); - break; - case P_HASH: - return (__ham_salvage( - dbp, vdp, pgno, h, handle, callback, flags)); - case P_LBTREE: - return (__bam_salvage(dbp, - vdp, pgno, P_LBTREE, h, handle, callback, NULL, flags)); - case P_LDUP: - return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LDUP)); - case P_OVERFLOW: - return (__db_salvage_markneeded(vdp, pgno, SALVAGE_OVERFLOW)); - case P_LRECNO: - /* - * Recnos are tricky -- they may represent dup pages, or - * they may be subdatabase/regular database pages in their - * own right. If the former, they need to be printed with a - * key, preferably when we hit the corresponding datum in - * a btree/hash page. If the latter, there is no key. - * - * If a database is sufficiently frotzed, we're not going - * to be able to get this right, so we best-guess: just - * mark it needed now, and if we're really a normal recno - * database page, the "unknowns" pass will pick us up. - */ - return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LRECNO)); - case P_QAMDATA: - return (__qam_salvage(dbp, - vdp, pgno, h, handle, callback, flags)); - case P_IBTREE: - case P_INVALID: - case P_IRECNO: - case __P_DUPLICATE: - default: - /* XXX: Should we be more aggressive here? */ - return (0); - } - if (ret != 0) - return (ret); - - /* - * We have to display the dump header if it's a metadata page. It's - * our last chance as the page was marked "seen" in the vrfy routine, - * and we won't see the page again. We don't display headers for - * the first database in a multi-database file, that database simply - * contains a list of subdatabases. - */ - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - if (!F_ISSET(pip, VRFY_HAS_SUBDBS)) - ret = __db_prheader( - dbp, NULL, 0, keyflag, handle, callback, vdp, pgno); - if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __db_salvage_unknowns -- - * Walk through the salvager database, printing with key "UNKNOWN" - * any pages we haven't dealt with. - */ -static int -__db_salvage_unknowns(dbp, vdp, handle, callback, flags) - DB *dbp; - VRFY_DBINFO *vdp; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - DBC *dbc; - DBT unkdbt, key, *dbt; - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t pgno; - u_int32_t pgtype; - int ret, t_ret; - void *ovflbuf; - - dbc = NULL; - dbenv = dbp->dbenv; - mpf = dbp->mpf; - - memset(&unkdbt, 0, sizeof(DBT)); - unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1; - unkdbt.data = "UNKNOWN"; - - if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0) - return (ret); - - /* - * We make two passes -- in the first pass, skip SALVAGE_OVERFLOW - * pages, because they may be referenced by the standard database - * pages that we're resolving. - */ - while ((t_ret = - __db_salvage_getnext(vdp, &dbc, &pgno, &pgtype, 1)) == 0) { - if ((t_ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) { - if (ret == 0) - ret = t_ret; - continue; - } - - dbt = NULL; - switch (pgtype) { - case SALVAGE_LDUP: - case SALVAGE_LRECNODUP: - dbt = &unkdbt; - /* FALLTHROUGH */ - case SALVAGE_LBTREE: - case SALVAGE_LRECNO: - if ((t_ret = __bam_salvage(dbp, vdp, pgno, pgtype, - h, handle, callback, dbt, flags)) != 0 && ret == 0) - ret = t_ret; - break; - case SALVAGE_OVERFLOW: - DB_ASSERT(0); /* Shouldn't ever happen. */ - break; - case SALVAGE_HASH: - if ((t_ret = __ham_salvage(dbp, vdp, - pgno, h, handle, callback, flags)) != 0 && ret == 0) - ret = t_ret; - break; - case SALVAGE_INVALID: - case SALVAGE_IGNORE: - default: - /* - * Shouldn't happen, but if it does, just do what the - * nice man says. - */ - DB_ASSERT(0); - break; - } - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - } - - /* We should have reached the end of the database. */ - if (t_ret == DB_NOTFOUND) - t_ret = 0; - if (t_ret != 0 && ret == 0) - ret = t_ret; - - /* Re-open the cursor so we traverse the database again. */ - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - dbc = NULL; - - /* Now, deal with any remaining overflow pages. */ - while ((t_ret = - __db_salvage_getnext(vdp, &dbc, &pgno, &pgtype, 0)) == 0) { - if ((t_ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) { - if (ret == 0) - ret = t_ret; - continue; - } - - switch (pgtype) { - case SALVAGE_OVERFLOW: - /* - * XXX: - * This may generate multiple "UNKNOWN" keys in - * a database with no dups. What to do? - */ - if ((t_ret = __db_safe_goff(dbp, - vdp, pgno, &key, &ovflbuf, flags)) != 0 || - ((vdp->type == DB_BTREE || vdp->type == DB_HASH) && - (t_ret = __db_vrfy_prdbt(&unkdbt, - 0, " ", handle, callback, 0, vdp)) != 0) || - (t_ret = __db_vrfy_prdbt( - &key, 0, " ", handle, callback, 0, vdp)) != 0) - if (ret == 0) - ret = t_ret; - break; - default: - DB_ASSERT(0); /* Shouldn't ever happen. */ - break; - } - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - } - - /* We should have reached the end of the database. */ - if (t_ret == DB_NOTFOUND) - t_ret = 0; - if (t_ret != 0 && ret == 0) - ret = t_ret; - - if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) - ret = t_ret; - - __os_free(dbenv, ovflbuf); - - return (ret); -} - -/* - * Offset of the ith inp array entry, which we can compare to the offset - * the entry stores. - */ -#define INP_OFFSET(dbp, h, i) \ - ((db_indx_t)((u_int8_t *)((P_INP(dbp,(h))) + (i)) - (u_int8_t *)(h))) - -/* - * __db_vrfy_inpitem -- - * Verify that a single entry in the inp array is sane, and update - * the high water mark and current item offset. (The former of these is - * used for state information between calls, and is required; it must - * be initialized to the pagesize before the first call.) - * - * Returns DB_VERIFY_FATAL if inp has collided with the data, - * since verification can't continue from there; returns DB_VERIFY_BAD - * if anything else is wrong. - * - * PUBLIC: int __db_vrfy_inpitem __P((DB *, PAGE *, - * PUBLIC: db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *)); - */ -int -__db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp) - DB *dbp; - PAGE *h; - db_pgno_t pgno; - u_int32_t i; - int is_btree; - u_int32_t flags, *himarkp, *offsetp; -{ - BKEYDATA *bk; - DB_ENV *dbenv; - db_indx_t *inp, offset, len; - - dbenv = dbp->dbenv; - - DB_ASSERT(himarkp != NULL); - inp = P_INP(dbp, h); - - /* - * Check that the inp array, which grows from the beginning of the - * page forward, has not collided with the data, which grow from the - * end of the page backward. - */ - if (inp + i >= (db_indx_t *)((u_int8_t *)h + *himarkp)) { - /* We've collided with the data. We need to bail. */ - EPRINT((dbenv, "Page %lu: entries listing %lu overlaps data", - (u_long)pgno, (u_long)i)); - return (DB_VERIFY_FATAL); - } - - offset = inp[i]; - - /* - * Check that the item offset is reasonable: it points somewhere - * after the inp array and before the end of the page. - */ - if (offset <= INP_OFFSET(dbp, h, i) || offset > dbp->pgsize) { - EPRINT((dbenv, "Page %lu: bad offset %lu at page index %lu", - (u_long)pgno, (u_long)offset, (u_long)i)); - return (DB_VERIFY_BAD); - } - - /* Update the high-water mark (what HOFFSET should be) */ - if (offset < *himarkp) - *himarkp = offset; - - if (is_btree) { - /* - * Check alignment; if it's unaligned, it's unsafe to - * manipulate this item. - */ - if (offset != DB_ALIGN(offset, sizeof(u_int32_t))) { - EPRINT((dbenv, - "Page %lu: unaligned offset %lu at page index %lu", - (u_long)pgno, (u_long)offset, (u_long)i)); - return (DB_VERIFY_BAD); - } - - /* - * Check that the item length remains on-page. - */ - bk = GET_BKEYDATA(dbp, h, i); - - /* - * We need to verify the type of the item here; - * we can't simply assume that it will be one of the - * expected three. If it's not a recognizable type, - * it can't be considered to have a verifiable - * length, so it's not possible to certify it as safe. - */ - switch (B_TYPE(bk->type)) { - case B_KEYDATA: - len = bk->len; - break; - case B_DUPLICATE: - case B_OVERFLOW: - len = BOVERFLOW_SIZE; - break; - default: - EPRINT((dbenv, - "Page %lu: item %lu of unrecognizable type", - (u_long)pgno, (u_long)i)); - return (DB_VERIFY_BAD); - } - - if ((size_t)(offset + len) > dbp->pgsize) { - EPRINT((dbenv, - "Page %lu: item %lu extends past page boundary", - (u_long)pgno, (u_long)i)); - return (DB_VERIFY_BAD); - } - } - - if (offsetp != NULL) - *offsetp = offset; - return (0); -} - -/* - * __db_vrfy_duptype-- - * Given a page number and a set of flags to __bam_vrfy_subtree, - * verify that the dup tree type is correct--i.e., it's a recno - * if DUPSORT is not set and a btree if it is. - * - * PUBLIC: int __db_vrfy_duptype - * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t)); - */ -int -__db_vrfy_duptype(dbp, vdp, pgno, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - u_int32_t flags; -{ - DB_ENV *dbenv; - VRFY_PAGEINFO *pip; - int ret, isbad; - - dbenv = dbp->dbenv; - isbad = 0; - - if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) - return (ret); - - switch (pip->type) { - case P_IBTREE: - case P_LDUP: - if (!LF_ISSET(ST_DUPSORT)) { - EPRINT((dbenv, - "Page %lu: sorted duplicate set in unsorted-dup database", - (u_long)pgno)); - isbad = 1; - } - break; - case P_IRECNO: - case P_LRECNO: - if (LF_ISSET(ST_DUPSORT)) { - EPRINT((dbenv, - "Page %lu: unsorted duplicate set in sorted-dup database", - (u_long)pgno)); - isbad = 1; - } - break; - default: - /* - * If the page is entirely zeroed, its pip->type will be a lie - * (we assumed it was a hash page, as they're allowed to be - * zeroed); handle this case specially. - */ - if (F_ISSET(pip, VRFY_IS_ALLZEROES)) - ZEROPG_ERR_PRINT(dbenv, pgno, "duplicate page"); - else - EPRINT((dbenv, - "Page %lu: duplicate page of inappropriate type %lu", - (u_long)pgno, (u_long)pip->type)); - isbad = 1; - break; - } - - if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0) - return (ret); - return (isbad == 1 ? DB_VERIFY_BAD : 0); -} - -/* - * __db_salvage_duptree -- - * Attempt to salvage a given duplicate tree, given its alleged root. - * - * The key that corresponds to this dup set has been passed to us - * in DBT *key. Because data items follow keys, though, it has been - * printed once already. - * - * The basic idea here is that pgno ought to be a P_LDUP, a P_LRECNO, a - * P_IBTREE, or a P_IRECNO. If it's an internal page, use the verifier - * functions to make sure it's safe; if it's not, we simply bail and the - * data will have to be printed with no key later on. if it is safe, - * recurse on each of its children. - * - * Whether or not it's safe, if it's a leaf page, __bam_salvage it. - * - * At all times, use the DB hanging off vdp to mark and check what we've - * done, so each page gets printed exactly once and we don't get caught - * in any cycles. - * - * PUBLIC: int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t, - * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t)); - */ -int -__db_salvage_duptree(dbp, vdp, pgno, key, handle, callback, flags) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - DBT *key; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - DB_MPOOLFILE *mpf; - PAGE *h; - int ret, t_ret; - - mpf = dbp->mpf; - - if (pgno == PGNO_INVALID || !IS_VALID_PGNO(pgno)) - return (DB_VERIFY_BAD); - - /* We have a plausible page. Try it. */ - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - return (ret); - - switch (TYPE(h)) { - case P_IBTREE: - case P_IRECNO: - if ((ret = __db_vrfy_common(dbp, vdp, h, pgno, flags)) != 0) - goto err; - if ((ret = __bam_vrfy(dbp, - vdp, h, pgno, flags | DB_NOORDERCHK)) != 0 || - (ret = __db_salvage_markdone(vdp, pgno)) != 0) - goto err; - /* - * We have a known-healthy internal page. Walk it. - */ - if ((ret = __bam_salvage_walkdupint(dbp, vdp, h, key, - handle, callback, flags)) != 0) - goto err; - break; - case P_LRECNO: - case P_LDUP: - if ((ret = __bam_salvage(dbp, - vdp, pgno, TYPE(h), h, handle, callback, key, flags)) != 0) - goto err; - break; - default: - ret = DB_VERIFY_BAD; - goto err; - } - -err: if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __db_salvage_subdbs -- - * Check and see if this database has subdbs; if so, try to salvage - * them independently. - */ -static int -__db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp) - DB *dbp; - VRFY_DBINFO *vdp; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; - int *hassubsp; -{ - DB *pgset; - DBC *pgsc; - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *h; - VRFY_PAGEINFO *pip; - db_pgno_t p, meta_pgno; - int ret, t_ret; - - *hassubsp = 0; - - dbenv = dbp->dbenv; - pgset = NULL; - pgsc = NULL; - mpf = dbp->mpf; - h = NULL; - pip = NULL; - ret = 0; - - /* - * Check to make sure the page is OK and find out if it contains - * subdatabases. - */ - meta_pgno = PGNO_BASE_MD; - if ((t_ret = __memp_fget(mpf, &meta_pgno, 0, &h)) == 0 && - (t_ret = __db_vrfy_common(dbp, vdp, h, PGNO_BASE_MD, flags)) == 0 && - (t_ret = __db_salvage( - dbp, vdp, PGNO_BASE_MD, h, handle, callback, flags)) == 0 && - (t_ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) == 0) - if (F_ISSET(pip, VRFY_HAS_SUBDBS)) - *hassubsp = 1; - if (pip != NULL && - (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0) - ret = t_ret; - if (h != NULL) { - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - h = NULL; - } - if (ret != 0 || *hassubsp == 0) - return (ret); - - /* - * We have subdbs. Try to crack them. - * - * To do so, get a set of leaf pages in the master database, and then - * walk each of the valid ones, salvaging subdbs as we go. If any - * prove invalid, just drop them; we'll pick them up on a later pass. - */ - if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0) - goto err; - if ((ret = __db_meta2pgset(dbp, vdp, PGNO_BASE_MD, flags, pgset)) != 0) - goto err; - if ((ret = __db_cursor(pgset, NULL, &pgsc, 0)) != 0) - goto err; - while ((t_ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) { - if ((t_ret = __memp_fget(mpf, &p, 0, &h)) == 0 && - (t_ret = __db_vrfy_common(dbp, vdp, h, p, flags)) == 0 && - (t_ret = - __bam_vrfy(dbp, vdp, h, p, flags | DB_NOORDERCHK)) == 0) - t_ret = __db_salvage_subdbpg( - dbp, vdp, h, handle, callback, flags); - if (t_ret != 0 && ret == 0) - ret = t_ret; - if (h != NULL) { - if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - h = NULL; - } - } - - if (t_ret != DB_NOTFOUND && ret == 0) - ret = t_ret; - -err: if (pgsc != NULL && (t_ret = __db_c_close(pgsc)) != 0 && ret == 0) - ret = t_ret; - if (pgset != NULL && - (t_ret = __db_close(pgset, NULL, 0)) != 0 && ret ==0) - ret = t_ret; - if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) - ret = t_ret; - return (ret); -} - -/* - * __db_salvage_subdbpg -- - * Given a known-good leaf page in the master database, salvage all - * leaf pages corresponding to each subdb. - */ -static int -__db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags) - DB *dbp; - VRFY_DBINFO *vdp; - PAGE *master; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - BKEYDATA *bkkey, *bkdata; - BOVERFLOW *bo; - DB *pgset; - DBC *pgsc; - DBT key; - DB_ENV *dbenv; - DB_MPOOLFILE *mpf; - PAGE *subpg; - db_indx_t i; - db_pgno_t meta_pgno, p; - int ret, err_ret, t_ret; - char *subdbname; - - dbenv = dbp->dbenv; - mpf = dbp->mpf; - ret = err_ret = 0; - subdbname = NULL; - - if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0) - return (ret); - - /* - * For each entry, get and salvage the set of pages - * corresponding to that entry. - */ - for (i = 0; i < NUM_ENT(master); i += P_INDX) { - bkkey = GET_BKEYDATA(dbp, master, i); - bkdata = GET_BKEYDATA(dbp, master, i + O_INDX); - - /* Get the subdatabase name. */ - if (B_TYPE(bkkey->type) == B_OVERFLOW) { - /* - * We can, in principle anyway, have a subdb - * name so long it overflows. Ick. - */ - bo = (BOVERFLOW *)bkkey; - if ((ret = __db_safe_goff(dbp, vdp, - bo->pgno, &key, &subdbname, flags)) != 0) { - err_ret = DB_VERIFY_BAD; - continue; - } - - /* Nul-terminate it. */ - if ((ret = __os_realloc(dbenv, - key.size + 1, &subdbname)) != 0) - goto err; - subdbname[key.size] = '\0'; - } else if (B_TYPE(bkkey->type) == B_KEYDATA) { - if ((ret = __os_realloc(dbenv, - bkkey->len + 1, &subdbname)) != 0) - goto err; - memcpy(subdbname, bkkey->data, bkkey->len); - subdbname[bkkey->len] = '\0'; - } - - /* Get the corresponding pgno. */ - if (bkdata->len != sizeof(db_pgno_t)) { - err_ret = DB_VERIFY_BAD; - continue; - } - memcpy(&meta_pgno, - (db_pgno_t *)bkdata->data, sizeof(db_pgno_t)); - - /* - * Subdatabase meta pgnos are stored in network byte - * order for cross-endian compatibility. Swap if appropriate. - */ - DB_NTOHL(&meta_pgno); - - /* If we can't get the subdb meta page, just skip the subdb. */ - if (!IS_VALID_PGNO(meta_pgno) || - (ret = __memp_fget(mpf, &meta_pgno, 0, &subpg)) != 0) { - err_ret = ret; - continue; - } - - /* - * Verify the subdatabase meta page. This has two functions. - * First, if it's bad, we have no choice but to skip the subdb - * and let the pages just get printed on a later pass. Second, - * the access-method-specific meta verification routines record - * the various state info (such as the presence of dups) - * that we need for __db_prheader(). - */ - if ((ret = - __db_vrfy_common(dbp, vdp, subpg, meta_pgno, flags)) != 0) { - err_ret = ret; - (void)__memp_fput(mpf, subpg, 0); - continue; - } - switch (TYPE(subpg)) { - case P_BTREEMETA: - if ((ret = __bam_vrfy_meta(dbp, - vdp, (BTMETA *)subpg, meta_pgno, flags)) != 0) { - err_ret = ret; - (void)__memp_fput(mpf, subpg, 0); - continue; - } - break; - case P_HASHMETA: - if ((ret = __ham_vrfy_meta(dbp, - vdp, (HMETA *)subpg, meta_pgno, flags)) != 0) { - err_ret = ret; - (void)__memp_fput(mpf, subpg, 0); - continue; - } - break; - default: - /* This isn't an appropriate page; skip this subdb. */ - err_ret = DB_VERIFY_BAD; - continue; - } - - if ((ret = __memp_fput(mpf, subpg, 0)) != 0) { - err_ret = ret; - continue; - } - - /* Print a subdatabase header. */ - if ((ret = __db_prheader(dbp, - subdbname, 0, 0, handle, callback, vdp, meta_pgno)) != 0) - goto err; - - if ((ret = __db_meta2pgset(dbp, vdp, meta_pgno, - flags, pgset)) != 0) { - err_ret = ret; - continue; - } - - if ((ret = __db_cursor(pgset, NULL, &pgsc, 0)) != 0) - goto err; - while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) { - if ((ret = __memp_fget(mpf, &p, 0, &subpg)) != 0) { - err_ret = ret; - continue; - } - if ((ret = __db_salvage(dbp, vdp, p, subpg, - handle, callback, flags)) != 0) - err_ret = ret; - if ((ret = __memp_fput(mpf, subpg, 0)) != 0) - err_ret = ret; - } - - if (ret != DB_NOTFOUND) - goto err; - - if ((ret = __db_c_close(pgsc)) != 0) - goto err; - if ((ret = __db_prfooter(handle, callback)) != 0) - goto err; - } -err: if (subdbname) - __os_free(dbenv, subdbname); - - if ((t_ret = __db_close(pgset, NULL, 0)) != 0) - ret = t_ret; - - if ((t_ret = __db_salvage_markdone(vdp, PGNO(master))) != 0) - return (t_ret); - - return ((err_ret != 0) ? err_ret : ret); -} - -/* - * __db_meta2pgset -- - * Given a known-safe meta page number, return the set of pages - * corresponding to the database it represents. Return DB_VERIFY_BAD if - * it's not a suitable meta page or is invalid. - */ -static int -__db_meta2pgset(dbp, vdp, pgno, flags, pgset) - DB *dbp; - VRFY_DBINFO *vdp; - db_pgno_t pgno; - u_int32_t flags; - DB *pgset; -{ - DB_MPOOLFILE *mpf; - PAGE *h; - int ret, t_ret; - - mpf = dbp->mpf; - - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - return (ret); - - switch (TYPE(h)) { - case P_BTREEMETA: - ret = __bam_meta2pgset(dbp, vdp, (BTMETA *)h, flags, pgset); - break; - case P_HASHMETA: - ret = __ham_meta2pgset(dbp, vdp, (HMETA *)h, flags, pgset); - break; - default: - ret = DB_VERIFY_BAD; - break; - } - - if ((t_ret = __memp_fput(mpf, h, 0)) != 0) - return (t_ret); - return (ret); -} - -/* - * __db_guesspgsize -- - * Try to guess what the pagesize is if the one on the meta page - * and the one in the db are invalid. - */ -static u_int -__db_guesspgsize(dbenv, fhp) - DB_ENV *dbenv; - DB_FH *fhp; -{ - db_pgno_t i; - size_t nr; - u_int32_t guess; - u_int8_t type; - - for (guess = DB_MAX_PGSIZE; guess >= DB_MIN_PGSIZE; guess >>= 1) { - /* - * We try to read three pages ahead after the first one - * and make sure we have plausible types for all of them. - * If the seeks fail, continue with a smaller size; - * we're probably just looking past the end of the database. - * If they succeed and the types are reasonable, also continue - * with a size smaller; we may be looking at pages N, - * 2N, and 3N for some N > 1. - * - * As soon as we hit an invalid type, we stop and return - * our previous guess; that last one was probably the page size. - */ - for (i = 1; i <= 3; i++) { - if (__os_seek(dbenv, fhp, guess, - i, SSZ(DBMETA, type), 0, DB_OS_SEEK_SET) != 0) - break; - if (__os_read(dbenv, - fhp, &type, 1, &nr) != 0 || nr == 0) - break; - if (type == P_INVALID || type >= P_PAGETYPE_MAX) - return (guess << 1); - } - } - - /* - * If we're just totally confused--the corruption takes up most of the - * beginning pages of the database--go with the default size. - */ - return (DB_DEF_IOSIZE); -} diff --git a/storage/bdb/db/db_vrfy_stub.c b/storage/bdb/db/db_vrfy_stub.c deleted file mode 100644 index 46f0b1134e1..00000000000 --- a/storage/bdb/db/db_vrfy_stub.c +++ /dev/null @@ -1,103 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_vrfy_stub.c,v 12.1 2005/06/16 20:21:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef HAVE_VERIFY -#ifndef NO_SYSTEM_INCLUDES -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" -#include "dbinc/db_verify.h" - -/* - * If the library wasn't compiled with the verification support, various - * routines aren't available. Stub them here, returning an appropriate - * error. - */ - -static int __db_novrfy __P((DB_ENV *)); - -/* - * __db_novrfy -- - * Error when a Berkeley DB build doesn't include the access method. - */ -static int -__db_novrfy(dbenv) - DB_ENV *dbenv; -{ - __db_err(dbenv, - "library build did not include support for database verification"); - return (DB_OPNOTSUP); -} - -int -__db_verify_pp(dbp, file, database, outfile, flags) - DB *dbp; - const char *file, *database; - FILE *outfile; - u_int32_t flags; -{ - int ret; - - COMPQUIET(file, NULL); - COMPQUIET(database, NULL); - COMPQUIET(outfile, NULL); - COMPQUIET(flags, 0); - - ret = __db_novrfy(dbp->dbenv); - - /* The verify method is a destructor. */ - (void)__db_close(dbp, NULL, 0); - - return (ret); -} - -int -__db_verify_internal(dbp, name, subdb, handle, callback, flags) - DB *dbp; - const char *name, *subdb; - void *handle; - int (*callback) __P((void *, const void *)); - u_int32_t flags; -{ - COMPQUIET(dbp, NULL); - COMPQUIET(name, NULL); - COMPQUIET(subdb, NULL); - COMPQUIET(handle, NULL); - COMPQUIET(callback, NULL); - COMPQUIET(flags, 0); - return (0); -} - -int -__db_vrfy_getpageinfo(vdp, pgno, pipp) - VRFY_DBINFO *vdp; - db_pgno_t pgno; - VRFY_PAGEINFO **pipp; -{ - COMPQUIET(pgno, 0); - COMPQUIET(pipp, NULL); - return (__db_novrfy(vdp->pgdbp->dbenv)); -} - -int -__db_vrfy_putpageinfo(dbenv, vdp, pip) - DB_ENV *dbenv; - VRFY_DBINFO *vdp; - VRFY_PAGEINFO *pip; -{ - COMPQUIET(vdp, NULL); - COMPQUIET(pip, NULL); - return (__db_novrfy(dbenv)); -} -#endif /* !HAVE_VERIFY */ diff --git a/storage/bdb/db/db_vrfyutil.c b/storage/bdb/db/db_vrfyutil.c deleted file mode 100644 index f1508872238..00000000000 --- a/storage/bdb/db/db_vrfyutil.c +++ /dev/null @@ -1,898 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_vrfyutil.c,v 12.5 2005/06/16 20:21:15 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_verify.h" -#include "dbinc/db_am.h" - -static int __db_vrfy_childinc __P((DBC *, VRFY_CHILDINFO *)); -static int __db_vrfy_pageinfo_create __P((DB_ENV *, VRFY_PAGEINFO **)); - -/* - * __db_vrfy_dbinfo_create -- - * Allocate and initialize a VRFY_DBINFO structure. - * - * PUBLIC: int __db_vrfy_dbinfo_create - * PUBLIC: __P((DB_ENV *, u_int32_t, VRFY_DBINFO **)); - */ -int -__db_vrfy_dbinfo_create(dbenv, pgsize, vdpp) - DB_ENV *dbenv; - u_int32_t pgsize; - VRFY_DBINFO **vdpp; -{ - DB *cdbp, *pgdbp, *pgset; - VRFY_DBINFO *vdp; - int ret; - - vdp = NULL; - cdbp = pgdbp = pgset = NULL; - - if ((ret = __os_calloc(NULL, 1, sizeof(VRFY_DBINFO), &vdp)) != 0) - goto err; - - if ((ret = db_create(&cdbp, dbenv, 0)) != 0) - goto err; - - if ((ret = __db_set_flags(cdbp, DB_DUP)) != 0) - goto err; - - if ((ret = __db_set_pagesize(cdbp, pgsize)) != 0) - goto err; - - if ((ret = __db_open(cdbp, - NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600, PGNO_BASE_MD)) != 0) - goto err; - - if ((ret = db_create(&pgdbp, dbenv, 0)) != 0) - goto err; - - if ((ret = __db_set_pagesize(pgdbp, pgsize)) != 0) - goto err; - - if ((ret = __db_open(pgdbp, - NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600, PGNO_BASE_MD)) != 0) - goto err; - - if ((ret = __db_vrfy_pgset(dbenv, pgsize, &pgset)) != 0) - goto err; - - LIST_INIT(&vdp->subdbs); - LIST_INIT(&vdp->activepips); - - vdp->cdbp = cdbp; - vdp->pgdbp = pgdbp; - vdp->pgset = pgset; - *vdpp = vdp; - return (0); - -err: if (cdbp != NULL) - (void)__db_close(cdbp, NULL, 0); - if (pgdbp != NULL) - (void)__db_close(pgdbp, NULL, 0); - if (vdp != NULL) - __os_free(dbenv, vdp); - return (ret); -} - -/* - * __db_vrfy_dbinfo_destroy -- - * Destructor for VRFY_DBINFO. Destroys VRFY_PAGEINFOs and deallocates - * structure. - * - * PUBLIC: int __db_vrfy_dbinfo_destroy __P((DB_ENV *, VRFY_DBINFO *)); - */ -int -__db_vrfy_dbinfo_destroy(dbenv, vdp) - DB_ENV *dbenv; - VRFY_DBINFO *vdp; -{ - VRFY_CHILDINFO *c; - int t_ret, ret; - - ret = 0; - - /* - * Discard active page structures. Ideally there wouldn't be any, - * but in some error cases we may not have cleared them all out. - */ - while (LIST_FIRST(&vdp->activepips) != NULL) - if ((t_ret = __db_vrfy_putpageinfo( - dbenv, vdp, LIST_FIRST(&vdp->activepips))) != 0) { - if (ret == 0) - ret = t_ret; - break; - } - - /* Discard subdatabase list structures. */ - while ((c = LIST_FIRST(&vdp->subdbs)) != NULL) { - LIST_REMOVE(c, links); - __os_free(NULL, c); - } - - if ((t_ret = __db_close(vdp->pgdbp, NULL, 0)) != 0) - ret = t_ret; - - if ((t_ret = __db_close(vdp->cdbp, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - - if ((t_ret = __db_close(vdp->pgset, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - - if (vdp->extents != NULL) - __os_free(dbenv, vdp->extents); - __os_free(dbenv, vdp); - return (ret); -} - -/* - * __db_vrfy_getpageinfo -- - * Get a PAGEINFO structure for a given page, creating it if necessary. - * - * PUBLIC: int __db_vrfy_getpageinfo - * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **)); - */ -int -__db_vrfy_getpageinfo(vdp, pgno, pipp) - VRFY_DBINFO *vdp; - db_pgno_t pgno; - VRFY_PAGEINFO **pipp; -{ - DBT key, data; - DB *pgdbp; - VRFY_PAGEINFO *pip; - int ret; - - /* - * We want a page info struct. There are three places to get it from, - * in decreasing order of preference: - * - * 1. vdp->activepips. If it's already "checked out", we're - * already using it, we return the same exact structure with a - * bumped refcount. This is necessary because this code is - * replacing array accesses, and it's common for f() to make some - * changes to a pip, and then call g() and h() which each make - * changes to the same pip. vdps are never shared between threads - * (they're never returned to the application), so this is safe. - * 2. The pgdbp. It's not in memory, but it's in the database, so - * get it, give it a refcount of 1, and stick it on activepips. - * 3. malloc. It doesn't exist yet; create it, then stick it on - * activepips. We'll put it in the database when we putpageinfo - * later. - */ - - /* Case 1. */ - for (pip = LIST_FIRST(&vdp->activepips); pip != NULL; - pip = LIST_NEXT(pip, links)) - if (pip->pgno == pgno) - /* Found it. */ - goto found; - - /* Case 2. */ - pgdbp = vdp->pgdbp; - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - F_SET(&data, DB_DBT_MALLOC); - key.data = &pgno; - key.size = sizeof(db_pgno_t); - - if ((ret = __db_get(pgdbp, NULL, &key, &data, 0)) == 0) { - /* Found it. */ - DB_ASSERT(data.size == sizeof(VRFY_PAGEINFO)); - pip = data.data; - LIST_INSERT_HEAD(&vdp->activepips, pip, links); - goto found; - } else if (ret != DB_NOTFOUND) /* Something nasty happened. */ - return (ret); - - /* Case 3 */ - if ((ret = __db_vrfy_pageinfo_create(pgdbp->dbenv, &pip)) != 0) - return (ret); - - LIST_INSERT_HEAD(&vdp->activepips, pip, links); -found: pip->pi_refcount++; - - *pipp = pip; - return (0); -} - -/* - * __db_vrfy_putpageinfo -- - * Put back a VRFY_PAGEINFO that we're done with. - * - * PUBLIC: int __db_vrfy_putpageinfo __P((DB_ENV *, - * PUBLIC: VRFY_DBINFO *, VRFY_PAGEINFO *)); - */ -int -__db_vrfy_putpageinfo(dbenv, vdp, pip) - DB_ENV *dbenv; - VRFY_DBINFO *vdp; - VRFY_PAGEINFO *pip; -{ - DBT key, data; - DB *pgdbp; - VRFY_PAGEINFO *p; - int ret; - - if (--pip->pi_refcount > 0) - return (0); - - pgdbp = vdp->pgdbp; - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - key.data = &pip->pgno; - key.size = sizeof(db_pgno_t); - data.data = pip; - data.size = sizeof(VRFY_PAGEINFO); - - if ((ret = __db_put(pgdbp, NULL, &key, &data, 0)) != 0) - return (ret); - - for (p = - LIST_FIRST(&vdp->activepips); p != NULL; p = LIST_NEXT(p, links)) - if (p == pip) - break; - if (p != NULL) - LIST_REMOVE(p, links); - - __os_ufree(dbenv, p); - return (0); -} - -/* - * __db_vrfy_pgset -- - * Create a temporary database for the storing of sets of page numbers. - * (A mapping from page number to int, used by the *_meta2pgset functions, - * as well as for keeping track of which pages the verifier has seen.) - * - * PUBLIC: int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **)); - */ -int -__db_vrfy_pgset(dbenv, pgsize, dbpp) - DB_ENV *dbenv; - u_int32_t pgsize; - DB **dbpp; -{ - DB *dbp; - int ret; - - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - return (ret); - if ((ret = __db_set_pagesize(dbp, pgsize)) != 0) - goto err; - if ((ret = __db_open(dbp, - NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600, PGNO_BASE_MD)) == 0) - *dbpp = dbp; - else -err: (void)__db_close(dbp, NULL, 0); - - return (ret); -} - -/* - * __db_vrfy_pgset_get -- - * Get the value associated in a page set with a given pgno. Return - * a 0 value (and succeed) if we've never heard of this page. - * - * PUBLIC: int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *)); - */ -int -__db_vrfy_pgset_get(dbp, pgno, valp) - DB *dbp; - db_pgno_t pgno; - int *valp; -{ - DBT key, data; - int ret, val; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - data.data = &val; - data.ulen = sizeof(int); - F_SET(&data, DB_DBT_USERMEM); - - if ((ret = __db_get(dbp, NULL, &key, &data, 0)) == 0) { - DB_ASSERT(data.size == sizeof(int)); - } else if (ret == DB_NOTFOUND) - val = 0; - else - return (ret); - - *valp = val; - return (0); -} - -/* - * __db_vrfy_pgset_inc -- - * Increment the value associated with a pgno by 1. - * - * PUBLIC: int __db_vrfy_pgset_inc __P((DB *, db_pgno_t)); - */ -int -__db_vrfy_pgset_inc(dbp, pgno) - DB *dbp; - db_pgno_t pgno; -{ - DBT key, data; - int ret; - int val; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - val = 0; - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - data.data = &val; - data.ulen = sizeof(int); - F_SET(&data, DB_DBT_USERMEM); - - if ((ret = __db_get(dbp, NULL, &key, &data, 0)) == 0) { - DB_ASSERT(data.size == sizeof(int)); - } else if (ret != DB_NOTFOUND) - return (ret); - - data.size = sizeof(int); - ++val; - - return (__db_put(dbp, NULL, &key, &data, 0)); -} - -/* - * __db_vrfy_pgset_next -- - * Given a cursor open in a pgset database, get the next page in the - * set. - * - * PUBLIC: int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *)); - */ -int -__db_vrfy_pgset_next(dbc, pgnop) - DBC *dbc; - db_pgno_t *pgnop; -{ - DBT key, data; - db_pgno_t pgno; - int ret; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - /* We don't care about the data, just the keys. */ - F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL); - F_SET(&key, DB_DBT_USERMEM); - key.data = &pgno; - key.ulen = sizeof(db_pgno_t); - - if ((ret = __db_c_get(dbc, &key, &data, DB_NEXT)) != 0) - return (ret); - - DB_ASSERT(key.size == sizeof(db_pgno_t)); - *pgnop = pgno; - - return (0); -} - -/* - * __db_vrfy_childcursor -- - * Create a cursor to walk the child list with. Returns with a nonzero - * final argument if the specified page has no children. - * - * PUBLIC: int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **)); - */ -int -__db_vrfy_childcursor(vdp, dbcp) - VRFY_DBINFO *vdp; - DBC **dbcp; -{ - DB *cdbp; - DBC *dbc; - int ret; - - cdbp = vdp->cdbp; - - if ((ret = __db_cursor(cdbp, NULL, &dbc, 0)) == 0) - *dbcp = dbc; - - return (ret); -} - -/* - * __db_vrfy_childput -- - * Add a child structure to the set for a given page. - * - * PUBLIC: int __db_vrfy_childput - * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *)); - */ -int -__db_vrfy_childput(vdp, pgno, cip) - VRFY_DBINFO *vdp; - db_pgno_t pgno; - VRFY_CHILDINFO *cip; -{ - DB *cdbp; - DBC *cc; - DBT key, data; - VRFY_CHILDINFO *oldcip; - int ret; - - cdbp = vdp->cdbp; - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - - /* - * We want to avoid adding multiple entries for a single child page; - * we only need to verify each child once, even if a child (such - * as an overflow key) is multiply referenced. - * - * However, we also need to make sure that when walking the list - * of children, we encounter them in the order they're referenced - * on a page. (This permits us, for example, to verify the - * prev_pgno/next_pgno chain of Btree leaf pages.) - * - * Check the child database to make sure that this page isn't - * already a child of the specified page number. If it's not, - * put it at the end of the duplicate set. - */ - if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0) - return (ret); - for (ret = __db_vrfy_ccset(cc, pgno, &oldcip); ret == 0; - ret = __db_vrfy_ccnext(cc, &oldcip)) - if (oldcip->pgno == cip->pgno) { - /* - * Found a matching child. Increment its reference - * count--we've run into it again--but don't put it - * again. - */ - if ((ret = __db_vrfy_childinc(cc, oldcip)) != 0 || - (ret = __db_vrfy_ccclose(cc)) != 0) - return (ret); - return (0); - } - if (ret != DB_NOTFOUND) { - (void)__db_vrfy_ccclose(cc); - return (ret); - } - if ((ret = __db_vrfy_ccclose(cc)) != 0) - return (ret); - - cip->refcnt = 1; - data.data = cip; - data.size = sizeof(VRFY_CHILDINFO); - - return (__db_put(cdbp, NULL, &key, &data, 0)); -} - -/* - * __db_vrfy_childinc -- - * Increment the refcount of the VRFY_CHILDINFO struct that the child - * cursor is pointing to. (The caller has just retrieved this struct, and - * passes it in as cip to save us a get.) - */ -static int -__db_vrfy_childinc(dbc, cip) - DBC *dbc; - VRFY_CHILDINFO *cip; -{ - DBT key, data; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - cip->refcnt++; - data.data = cip; - data.size = sizeof(VRFY_CHILDINFO); - - return (__db_c_put(dbc, &key, &data, DB_CURRENT)); -} - -/* - * __db_vrfy_ccset -- - * Sets a cursor created with __db_vrfy_childcursor to the first - * child of the given pgno, and returns it in the third arg. - * - * PUBLIC: int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **)); - */ -int -__db_vrfy_ccset(dbc, pgno, cipp) - DBC *dbc; - db_pgno_t pgno; - VRFY_CHILDINFO **cipp; -{ - DBT key, data; - int ret; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - - if ((ret = __db_c_get(dbc, &key, &data, DB_SET)) != 0) - return (ret); - - DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO)); - *cipp = (VRFY_CHILDINFO *)data.data; - - return (0); -} - -/* - * __db_vrfy_ccnext -- - * Gets the next child of the given cursor created with - * __db_vrfy_childcursor, and returns it in the memory provided in the - * second arg. - * - * PUBLIC: int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **)); - */ -int -__db_vrfy_ccnext(dbc, cipp) - DBC *dbc; - VRFY_CHILDINFO **cipp; -{ - DBT key, data; - int ret; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - if ((ret = __db_c_get(dbc, &key, &data, DB_NEXT_DUP)) != 0) - return (ret); - - DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO)); - *cipp = (VRFY_CHILDINFO *)data.data; - - return (0); -} - -/* - * __db_vrfy_ccclose -- - * Closes the cursor created with __db_vrfy_childcursor. - * - * This doesn't actually do anything interesting now, but it's - * not inconceivable that we might change the internal database usage - * and keep the interfaces the same, and a function call here or there - * seldom hurts anyone. - * - * PUBLIC: int __db_vrfy_ccclose __P((DBC *)); - */ -int -__db_vrfy_ccclose(dbc) - DBC *dbc; -{ - - return (__db_c_close(dbc)); -} - -/* - * __db_vrfy_pageinfo_create -- - * Constructor for VRFY_PAGEINFO; allocates and initializes. - */ -static int -__db_vrfy_pageinfo_create(dbenv, pipp) - DB_ENV *dbenv; - VRFY_PAGEINFO **pipp; -{ - VRFY_PAGEINFO *pip; - int ret; - - /* - * pageinfo structs are sometimes allocated here and sometimes - * allocated by fetching them from a database with DB_DBT_MALLOC. - * There's no easy way for the destructor to tell which was - * used, and so we always allocate with __os_umalloc so we can free - * with __os_ufree. - */ - if ((ret = __os_umalloc(dbenv, sizeof(VRFY_PAGEINFO), &pip)) != 0) - return (ret); - memset(pip, 0, sizeof(VRFY_PAGEINFO)); - - *pipp = pip; - return (0); -} - -/* - * __db_salvage_init -- - * Set up salvager database. - * - * PUBLIC: int __db_salvage_init __P((VRFY_DBINFO *)); - */ -int -__db_salvage_init(vdp) - VRFY_DBINFO *vdp; -{ - DB *dbp; - int ret; - - if ((ret = db_create(&dbp, NULL, 0)) != 0) - return (ret); - - if ((ret = __db_set_pagesize(dbp, 1024)) != 0) - goto err; - - if ((ret = __db_open(dbp, - NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0, PGNO_BASE_MD)) != 0) - goto err; - - vdp->salvage_pages = dbp; - return (0); - -err: (void)__db_close(dbp, NULL, 0); - return (ret); -} - -/* - * __db_salvage_destroy -- - * Close salvager database. - * PUBLIC: void __db_salvage_destroy __P((VRFY_DBINFO *)); - */ -void -__db_salvage_destroy(vdp) - VRFY_DBINFO *vdp; -{ - (void)__db_close(vdp->salvage_pages, NULL, 0); -} - -/* - * __db_salvage_getnext -- - * Get the next (first) unprinted page in the database of pages we need to - * print still. Delete entries for any already-printed pages we encounter - * in this search, as well as the page we're returning. - * - * PUBLIC: int __db_salvage_getnext - * PUBLIC: __P((VRFY_DBINFO *, DBC **, db_pgno_t *, u_int32_t *, int)); - */ -int -__db_salvage_getnext(vdp, dbcp, pgnop, pgtypep, skip_overflow) - VRFY_DBINFO *vdp; - DBC **dbcp; - db_pgno_t *pgnop; - u_int32_t *pgtypep; - int skip_overflow; -{ - DB *dbp; - DBT key, data; - int ret; - u_int32_t pgtype; - - dbp = vdp->salvage_pages; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - if (*dbcp == NULL && - (ret = __db_cursor(dbp, NULL, dbcp, 0)) != 0) - return (ret); - - while ((ret = __db_c_get(*dbcp, &key, &data, DB_NEXT)) == 0) { - DB_ASSERT(data.size == sizeof(u_int32_t)); - memcpy(&pgtype, data.data, sizeof(pgtype)); - - if (skip_overflow && pgtype == SALVAGE_OVERFLOW) - continue; - - if ((ret = __db_c_del(*dbcp, 0)) != 0) - return (ret); - if (pgtype != SALVAGE_IGNORE) { - DB_ASSERT(key.size == sizeof(db_pgno_t)); - DB_ASSERT(data.size == sizeof(u_int32_t)); - - *pgnop = *(db_pgno_t *)key.data; - *pgtypep = *(u_int32_t *)data.data; - break; - } - } - - return (ret); -} - -/* - * __db_salvage_isdone -- - * Return whether or not the given pgno is already marked - * SALVAGE_IGNORE (meaning that we don't need to print it again). - * - * Returns DB_KEYEXIST if it is marked, 0 if not, or another error on - * error. - * - * PUBLIC: int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t)); - */ -int -__db_salvage_isdone(vdp, pgno) - VRFY_DBINFO *vdp; - db_pgno_t pgno; -{ - DBT key, data; - DB *dbp; - int ret; - u_int32_t currtype; - - dbp = vdp->salvage_pages; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - currtype = SALVAGE_INVALID; - data.data = &currtype; - data.ulen = sizeof(u_int32_t); - data.flags = DB_DBT_USERMEM; - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - - /* - * Put an entry for this page, with pgno as key and type as data, - * unless it's already there and is marked done. - * If it's there and is marked anything else, that's fine--we - * want to mark it done. - */ - if ((ret = __db_get(dbp, NULL, &key, &data, 0)) == 0) { - /* - * The key's already here. Check and see if it's already - * marked done. If it is, return DB_KEYEXIST. If it's not, - * return 0. - */ - if (currtype == SALVAGE_IGNORE) - return (DB_KEYEXIST); - else - return (0); - } else if (ret != DB_NOTFOUND) - return (ret); - - /* The pgno is not yet marked anything; return 0. */ - return (0); -} - -/* - * __db_salvage_markdone -- - * Mark as done a given page. - * - * PUBLIC: int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t)); - */ -int -__db_salvage_markdone(vdp, pgno) - VRFY_DBINFO *vdp; - db_pgno_t pgno; -{ - DBT key, data; - DB *dbp; - int pgtype, ret; - u_int32_t currtype; - - pgtype = SALVAGE_IGNORE; - dbp = vdp->salvage_pages; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - currtype = SALVAGE_INVALID; - data.data = &currtype; - data.ulen = sizeof(u_int32_t); - data.flags = DB_DBT_USERMEM; - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - - /* - * Put an entry for this page, with pgno as key and type as data, - * unless it's already there and is marked done. - * If it's there and is marked anything else, that's fine--we - * want to mark it done, but db_salvage_isdone only lets - * us know if it's marked IGNORE. - * - * We don't want to return DB_KEYEXIST, though; this will - * likely get passed up all the way and make no sense to the - * application. Instead, use DB_VERIFY_BAD to indicate that - * we've seen this page already--it probably indicates a - * multiply-linked page. - */ - if ((ret = __db_salvage_isdone(vdp, pgno)) != 0) - return (ret == DB_KEYEXIST ? DB_VERIFY_BAD : ret); - - data.size = sizeof(u_int32_t); - data.data = &pgtype; - - return (__db_put(dbp, NULL, &key, &data, 0)); -} - -/* - * __db_salvage_markneeded -- - * If it has not yet been printed, make note of the fact that a page - * must be dealt with later. - * - * PUBLIC: int __db_salvage_markneeded - * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, u_int32_t)); - */ -int -__db_salvage_markneeded(vdp, pgno, pgtype) - VRFY_DBINFO *vdp; - db_pgno_t pgno; - u_int32_t pgtype; -{ - DB *dbp; - DBT key, data; - int ret; - - dbp = vdp->salvage_pages; - - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - - key.data = &pgno; - key.size = sizeof(db_pgno_t); - - data.data = &pgtype; - data.size = sizeof(u_int32_t); - - /* - * Put an entry for this page, with pgno as key and type as data, - * unless it's already there, in which case it's presumably - * already been marked done. - */ - ret = __db_put(dbp, NULL, &key, &data, DB_NOOVERWRITE); - return (ret == DB_KEYEXIST ? 0 : ret); -} - -/* - * __db_vrfy_prdbt -- - * Print out a DBT data element from a verification routine. - * - * PUBLIC: int __db_vrfy_prdbt __P((DBT *, int, const char *, void *, - * PUBLIC: int (*)(void *, const void *), int, VRFY_DBINFO *)); - */ -int -__db_vrfy_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp) - DBT *dbtp; - int checkprint; - const char *prefix; - void *handle; - int (*callback) __P((void *, const void *)); - int is_recno; - VRFY_DBINFO *vdp; -{ - if (vdp != NULL) { - /* - * If vdp is non-NULL, we might be the first key in the - * "fake" subdatabase used for key/data pairs we can't - * associate with a known subdb. - * - * Check and clear the SALVAGE_PRINTHEADER flag; if - * it was set, print a subdatabase header. - */ - if (F_ISSET(vdp, SALVAGE_PRINTHEADER)) { - (void)__db_prheader( - NULL, "__OTHER__", 0, 0, handle, callback, vdp, 0); - F_CLR(vdp, SALVAGE_PRINTHEADER); - F_SET(vdp, SALVAGE_PRINTFOOTER); - } - - /* - * Even if the printable flag wasn't set by our immediate - * caller, it may be set on a salvage-wide basis. - */ - if (F_ISSET(vdp, SALVAGE_PRINTABLE)) - checkprint = 1; - } - return ( - __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno)); -} diff --git a/storage/bdb/db185/db185.c b/storage/bdb/db185/db185.c deleted file mode 100644 index 59b3260e4f1..00000000000 --- a/storage/bdb/db185/db185.c +++ /dev/null @@ -1,583 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db185.c,v 12.2 2005/10/06 14:36:51 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "db185_int.h" - -static int db185_close __P((DB185 *)); -static int db185_compare __P((DB *, const DBT *, const DBT *)); -static int db185_del __P((const DB185 *, const DBT185 *, u_int)); -static int db185_fd __P((const DB185 *)); -static int db185_get __P((const DB185 *, const DBT185 *, DBT185 *, u_int)); -static u_int32_t - db185_hash __P((DB *, const void *, u_int32_t)); -static size_t db185_prefix __P((DB *, const DBT *, const DBT *)); -static int db185_put __P((const DB185 *, DBT185 *, const DBT185 *, u_int)); -static int db185_seq __P((const DB185 *, DBT185 *, DBT185 *, u_int)); -static int db185_sync __P((const DB185 *, u_int)); - -/* - * EXTERN: #ifdef _DB185_INT_H_ - * EXTERN: DB185 *__db185_open - * EXTERN: __P((const char *, int, int, DBTYPE, const void *)); - * EXTERN: #else - * EXTERN: DB *__db185_open - * EXTERN: __P((const char *, int, int, DBTYPE, const void *)); - * EXTERN: #endif - */ -DB185 * -__db185_open(file, oflags, mode, type, openinfo) - const char *file; - int oflags, mode; - DBTYPE type; - const void *openinfo; -{ - const BTREEINFO *bi; - const HASHINFO *hi; - const RECNOINFO *ri; - DB *dbp; - DB185 *db185p; - DB_FH *fhp; - int ret; - - dbp = NULL; - db185p = NULL; - - if ((ret = db_create(&dbp, NULL, 0)) != 0) - goto err; - - if ((ret = __os_calloc(NULL, 1, sizeof(DB185), &db185p)) != 0) - goto err; - - /* - * !!! - * The DBTYPE enum wasn't initialized in DB 185, so it's off-by-one - * from DB 2.0. - */ - switch (type) { - case 0: /* DB_BTREE */ - type = DB_BTREE; - if ((bi = openinfo) != NULL) { - if (bi->flags & ~R_DUP) - goto einval; - if (bi->flags & R_DUP) - (void)dbp->set_flags(dbp, DB_DUP); - if (bi->cachesize != 0) - (void)dbp->set_cachesize - (dbp, 0, bi->cachesize, 0); - if (bi->minkeypage != 0) - (void)dbp->set_bt_minkey(dbp, bi->minkeypage); - if (bi->psize != 0) - (void)dbp->set_pagesize(dbp, bi->psize); - if (bi->prefix != NULL) { - db185p->prefix = bi->prefix; - dbp->set_bt_prefix(dbp, db185_prefix); - } - if (bi->compare != NULL) { - db185p->compare = bi->compare; - dbp->set_bt_compare(dbp, db185_compare); - } - if (bi->lorder != 0) - dbp->set_lorder(dbp, bi->lorder); - } - break; - case 1: /* DB_HASH */ - type = DB_HASH; - if ((hi = openinfo) != NULL) { - if (hi->bsize != 0) - (void)dbp->set_pagesize(dbp, hi->bsize); - if (hi->ffactor != 0) - (void)dbp->set_h_ffactor(dbp, hi->ffactor); - if (hi->nelem != 0) - (void)dbp->set_h_nelem(dbp, hi->nelem); - if (hi->cachesize != 0) - (void)dbp->set_cachesize - (dbp, 0, hi->cachesize, 0); - if (hi->hash != NULL) { - db185p->hash = hi->hash; - (void)dbp->set_h_hash(dbp, db185_hash); - } - if (hi->lorder != 0) - dbp->set_lorder(dbp, hi->lorder); - } - - break; - case 2: /* DB_RECNO */ - type = DB_RECNO; - - /* DB 1.85 did renumbering by default. */ - (void)dbp->set_flags(dbp, DB_RENUMBER); - - /* - * !!! - * The file name given to DB 1.85 recno is the name of the DB - * 2.0 backing file. If the file doesn't exist, create it if - * the user has the O_CREAT flag set, DB 1.85 did it for you, - * and DB 2.0 doesn't. - * - * !!! - * Setting the file name to NULL specifies that we're creating - * a temporary backing file, in DB 2.X. If we're opening the - * DB file read-only, change the flags to read-write, because - * temporary backing files cannot be opened read-only, and DB - * 2.X will return an error. We are cheating here -- if the - * application does a put on the database, it will succeed -- - * although that would be a stupid thing for the application - * to do. - * - * !!! - * Note, the file name in DB 1.85 was a const -- we don't do - * that in DB 2.0, so do that cast. - */ - if (file != NULL) { - if (oflags & O_CREAT && __os_exists(file, NULL) != 0) - if (__os_openhandle(NULL, file, - oflags, mode, &fhp) == 0) - (void)__os_closehandle(NULL, fhp); - (void)dbp->set_re_source(dbp, file); - - if (O_RDONLY) - oflags &= ~O_RDONLY; - oflags |= O_RDWR; - file = NULL; - } - - if ((ri = openinfo) != NULL) { - /* - * !!! - * We can't support the bfname field. - */ -#define BFMSG \ - "Berkeley DB: DB 1.85's recno bfname field is not supported.\n" - if (ri->bfname != NULL) { - dbp->errx(dbp, "%s", BFMSG); - goto einval; - } - - if (ri->flags & ~(R_FIXEDLEN | R_NOKEY | R_SNAPSHOT)) - goto einval; - if (ri->flags & R_FIXEDLEN) { - if (ri->bval != 0) - (void)dbp->set_re_pad(dbp, ri->bval); - if (ri->reclen != 0) - (void)dbp->set_re_len(dbp, ri->reclen); - } else - if (ri->bval != 0) - (void)dbp->set_re_delim(dbp, ri->bval); - - /* - * !!! - * We ignore the R_NOKEY flag, but that's okay, it was - * only an optimization that was never implemented. - */ - if (ri->flags & R_SNAPSHOT) - (void)dbp->set_flags(dbp, DB_SNAPSHOT); - - if (ri->cachesize != 0) - (void)dbp->set_cachesize - (dbp, 0, ri->cachesize, 0); - if (ri->psize != 0) - (void)dbp->set_pagesize(dbp, ri->psize); - if (ri->lorder != 0) - dbp->set_lorder(dbp, ri->lorder); - } - break; - default: - goto einval; - } - - db185p->close = db185_close; - db185p->del = db185_del; - db185p->fd = db185_fd; - db185p->get = db185_get; - db185p->put = db185_put; - db185p->seq = db185_seq; - db185p->sync = db185_sync; - - /* - * Store a reference so we can indirect from the DB 1.85 structure - * to the underlying DB structure, and vice-versa. This has to be - * done BEFORE the DB::open method call because the hash callback - * is exercised as part of hash database initialization. - */ - db185p->dbp = dbp; - dbp->api_internal = db185p; - - /* Open the database. */ - if ((ret = dbp->open(dbp, NULL, - file, NULL, type, __db_oflags(oflags), mode)) != 0) - goto err; - - /* Create the cursor used for sequential ops. */ - if ((ret = dbp->cursor(dbp, NULL, &((DB185 *)db185p)->dbc, 0)) != 0) - goto err; - - return (db185p); - -einval: ret = EINVAL; - -err: if (db185p != NULL) - __os_free(NULL, db185p); - if (dbp != NULL) - (void)dbp->close(dbp, 0); - - __os_set_errno(ret); - return (NULL); -} - -static int -db185_close(db185p) - DB185 *db185p; -{ - DB *dbp; - int ret; - - dbp = db185p->dbp; - - ret = dbp->close(dbp, 0); - - __os_free(NULL, db185p); - - if (ret == 0) - return (0); - - __os_set_errno(ret); - return (-1); -} - -static int -db185_del(db185p, key185, flags) - const DB185 *db185p; - const DBT185 *key185; - u_int flags; -{ - DB *dbp; - DBT key; - int ret; - - dbp = db185p->dbp; - - memset(&key, 0, sizeof(key)); - key.data = key185->data; - key.size = key185->size; - - if (flags & ~R_CURSOR) - goto einval; - if (flags & R_CURSOR) - ret = db185p->dbc->c_del(db185p->dbc, 0); - else - ret = dbp->del(dbp, NULL, &key, 0); - - switch (ret) { - case 0: - return (0); - case DB_NOTFOUND: - return (1); - } - - if (0) { -einval: ret = EINVAL; - } - __os_set_errno(ret); - return (-1); -} - -static int -db185_fd(db185p) - const DB185 *db185p; -{ - DB *dbp; - int fd, ret; - - dbp = db185p->dbp; - - if ((ret = dbp->fd(dbp, &fd)) == 0) - return (fd); - - __os_set_errno(ret); - return (-1); -} - -static int -db185_get(db185p, key185, data185, flags) - const DB185 *db185p; - const DBT185 *key185; - DBT185 *data185; - u_int flags; -{ - DB *dbp; - DBT key, data; - int ret; - - dbp = db185p->dbp; - - memset(&key, 0, sizeof(key)); - key.data = key185->data; - key.size = key185->size; - memset(&data, 0, sizeof(data)); - data.data = data185->data; - data.size = data185->size; - - if (flags) - goto einval; - - switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) { - case 0: - data185->data = data.data; - data185->size = data.size; - return (0); - case DB_NOTFOUND: - return (1); - } - - if (0) { -einval: ret = EINVAL; - } - __os_set_errno(ret); - return (-1); -} - -static int -db185_put(db185p, key185, data185, flags) - const DB185 *db185p; - DBT185 *key185; - const DBT185 *data185; - u_int flags; -{ - DB *dbp; - DBC *dbcp_put; - DBT key, data; - int ret, t_ret; - - dbp = db185p->dbp; - - memset(&key, 0, sizeof(key)); - key.data = key185->data; - key.size = key185->size; - memset(&data, 0, sizeof(data)); - data.data = data185->data; - data.size = data185->size; - - switch (flags) { - case 0: - ret = dbp->put(dbp, NULL, &key, &data, 0); - break; - case R_CURSOR: - ret = db185p->dbc->c_put(db185p->dbc, &key, &data, DB_CURRENT); - break; - case R_IAFTER: - case R_IBEFORE: - if (dbp->type != DB_RECNO) - goto einval; - - if ((ret = dbp->cursor(dbp, NULL, &dbcp_put, 0)) != 0) - break; - if ((ret = - dbcp_put->c_get(dbcp_put, &key, &data, DB_SET)) == 0) { - memset(&data, 0, sizeof(data)); - data.data = data185->data; - data.size = data185->size; - ret = dbcp_put->c_put(dbcp_put, &key, &data, - flags == R_IAFTER ? DB_AFTER : DB_BEFORE); - } - if ((t_ret = dbcp_put->c_close(dbcp_put)) != 0 && ret == 0) - ret = t_ret; - break; - case R_NOOVERWRITE: - ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE); - break; - case R_SETCURSOR: - if (dbp->type != DB_BTREE && dbp->type != DB_RECNO) - goto einval; - - if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) != 0) - break; - ret = - db185p->dbc->c_get(db185p->dbc, &key, &data, DB_SET_RANGE); - break; - default: - goto einval; - } - - switch (ret) { - case 0: - key185->data = key.data; - key185->size = key.size; - return (0); - case DB_KEYEXIST: - return (1); - } - - if (0) { -einval: ret = EINVAL; - } - __os_set_errno(ret); - return (-1); -} - -static int -db185_seq(db185p, key185, data185, flags) - const DB185 *db185p; - DBT185 *key185, *data185; - u_int flags; -{ - DB *dbp; - DBT key, data; - int ret; - - dbp = db185p->dbp; - - memset(&key, 0, sizeof(key)); - key.data = key185->data; - key.size = key185->size; - memset(&data, 0, sizeof(data)); - data.data = data185->data; - data.size = data185->size; - - switch (flags) { - case R_CURSOR: - flags = DB_SET_RANGE; - break; - case R_FIRST: - flags = DB_FIRST; - break; - case R_LAST: - if (dbp->type != DB_BTREE && dbp->type != DB_RECNO) - goto einval; - flags = DB_LAST; - break; - case R_NEXT: - flags = DB_NEXT; - break; - case R_PREV: - if (dbp->type != DB_BTREE && dbp->type != DB_RECNO) - goto einval; - flags = DB_PREV; - break; - default: - goto einval; - } - switch (ret = db185p->dbc->c_get(db185p->dbc, &key, &data, flags)) { - case 0: - key185->data = key.data; - key185->size = key.size; - data185->data = data.data; - data185->size = data.size; - return (0); - case DB_NOTFOUND: - return (1); - } - - if (0) { -einval: ret = EINVAL; - } - __os_set_errno(ret); - return (-1); -} - -static int -db185_sync(db185p, flags) - const DB185 *db185p; - u_int flags; -{ - DB *dbp; - int ret; - - dbp = db185p->dbp; - - switch (flags) { - case 0: - break; - case R_RECNOSYNC: - /* - * !!! - * We can't support the R_RECNOSYNC flag. - */ -#define RSMSG \ - "Berkeley DB: DB 1.85's R_RECNOSYNC sync flag is not supported.\n" - dbp->errx(dbp, "%s", RSMSG); - /* FALLTHROUGH */ - default: - goto einval; - } - - if ((ret = dbp->sync(dbp, 0)) == 0) - return (0); - - if (0) { -einval: ret = EINVAL; - } - __os_set_errno(ret); - return (-1); -} - -/* - * db185_compare -- - * Cutout routine to call the user's Btree comparison function. - */ -static int -db185_compare(dbp, a, b) - DB *dbp; - const DBT *a, *b; -{ - DBT185 a185, b185; - - a185.data = a->data; - a185.size = a->size; - b185.data = b->data; - b185.size = b->size; - - return (((DB185 *)dbp->api_internal)->compare(&a185, &b185)); -} - -/* - * db185_prefix -- - * Cutout routine to call the user's Btree prefix function. - */ -static size_t -db185_prefix(dbp, a, b) - DB *dbp; - const DBT *a, *b; -{ - DBT185 a185, b185; - - a185.data = a->data; - a185.size = a->size; - b185.data = b->data; - b185.size = b->size; - - return (((DB185 *)dbp->api_internal)->prefix(&a185, &b185)); -} - -/* - * db185_hash -- - * Cutout routine to call the user's hash function. - */ -static u_int32_t -db185_hash(dbp, key, len) - DB *dbp; - const void *key; - u_int32_t len; -{ - return (((DB185 *)dbp->api_internal)->hash(key, (size_t)len)); -} diff --git a/storage/bdb/db185/db185_int.in b/storage/bdb/db185/db185_int.in deleted file mode 100644 index eba4fdb002d..00000000000 --- a/storage/bdb/db185/db185_int.in +++ /dev/null @@ -1,129 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db185_int.in,v 12.2 2005/10/06 14:36:52 bostic Exp $ - */ - -#ifndef _DB185_INT_H_ -#define _DB185_INT_H_ - -/* Routine flags. */ -#define R_CURSOR 1 /* del, put, seq */ -#define __R_UNUSED 2 /* UNUSED */ -#define R_FIRST 3 /* seq */ -#define R_IAFTER 4 /* put (RECNO) */ -#define R_IBEFORE 5 /* put (RECNO) */ -#define R_LAST 6 /* seq (BTREE, RECNO) */ -#define R_NEXT 7 /* seq */ -#define R_NOOVERWRITE 8 /* put */ -#define R_PREV 9 /* seq (BTREE, RECNO) */ -#define R_SETCURSOR 10 /* put (RECNO) */ -#define R_RECNOSYNC 11 /* sync (RECNO) */ - -typedef struct { - void *data; /* data */ - size_t size; /* data length */ -} DBT185; - -/* Access method description structure. */ -typedef struct __db185 { - DBTYPE type; /* Underlying db type. */ - int (*close) __P((struct __db185 *)); - int (*del) __P((const struct __db185 *, const DBT185 *, u_int)); - int (*get) - __P((const struct __db185 *, const DBT185 *, DBT185 *, u_int)); - int (*put) - __P((const struct __db185 *, DBT185 *, const DBT185 *, u_int)); - int (*seq) - __P((const struct __db185 *, DBT185 *, DBT185 *, u_int)); - int (*sync) __P((const struct __db185 *, u_int)); - DB *dbp; /* DB structure. Was void *internal. */ - int (*fd) __P((const struct __db185 *)); - - /* - * !!! - * The following elements added to the end of the DB 1.85 DB - * structure. - */ - DBC *dbc; /* DB cursor. */ - /* Various callback functions. */ - int (*compare) __P((const DBT185 *, const DBT185 *)); - size_t (*prefix) __P((const DBT185 *, const DBT185 *)); - u_int32_t (*hash) __P((const void *, size_t)); -} DB185; - -/* Structure used to pass parameters to the btree routines. */ -typedef struct { -#define R_DUP 0x01 /* duplicate keys */ - u_int32_t flags; - u_int32_t cachesize; /* bytes to cache */ - u_int32_t maxkeypage; /* maximum keys per page */ - u_int32_t minkeypage; /* minimum keys per page */ - u_int32_t psize; /* page size */ - int (*compare) /* comparison function */ - __P((const DBT185 *, const DBT185 *)); - size_t (*prefix) /* prefix function */ - __P((const DBT185 *, const DBT185 *)); - int lorder; /* byte order */ -} BTREEINFO; - -/* Structure used to pass parameters to the hashing routines. */ -typedef struct { - u_int32_t bsize; /* bucket size */ - u_int32_t ffactor; /* fill factor */ - u_int32_t nelem; /* number of elements */ - u_int32_t cachesize; /* bytes to cache */ - u_int32_t /* hash function */ - (*hash) __P((const void *, size_t)); - int lorder; /* byte order */ -} HASHINFO; - -/* Structure used to pass parameters to the record routines. */ -typedef struct { -#define R_FIXEDLEN 0x01 /* fixed-length records */ -#define R_NOKEY 0x02 /* key not required */ -#define R_SNAPSHOT 0x04 /* snapshot the input */ - u_int32_t flags; - u_int32_t cachesize; /* bytes to cache */ - u_int32_t psize; /* page size */ - int lorder; /* byte order */ - size_t reclen; /* record length (fixed-length records) */ - u_char bval; /* delimiting byte (variable-length records */ - char *bfname; /* btree file name */ -} RECNOINFO; -#endif /* !_DB185_INT_H_ */ diff --git a/storage/bdb/db_archive/db_archive.c b/storage/bdb/db_archive/db_archive.c deleted file mode 100644 index d00fc1bf9ea..00000000000 --- a/storage/bdb/db_archive/db_archive.c +++ /dev/null @@ -1,188 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_archive.c,v 12.4 2005/09/09 12:38:30 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#endif - -#include "db_int.h" - -int main __P((int, char *[])); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB_ENV *dbenv; - u_int32_t flags; - int ch, exitval, ret, verbose; - char **file, *home, **list, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbenv = NULL; - flags = 0; - exitval = verbose = 0; - home = passwd = NULL; - file = list = NULL; - while ((ch = getopt(argc, argv, "adh:lP:sVv")) != EOF) - switch (ch) { - case 'a': - LF_SET(DB_ARCH_ABS); - break; - case 'd': - LF_SET(DB_ARCH_REMOVE); - break; - case 'h': - home = optarg; - break; - case 'l': - LF_SET(DB_ARCH_LOG); - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 's': - LF_SET(DB_ARCH_DATA); - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - /* - * If attaching to a pre-existing environment fails, create a - * private one and try again. - */ - if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 && - (ret == DB_VERSION_MISMATCH || - (ret = dbenv->open(dbenv, home, DB_CREATE | - DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) { - dbenv->err(dbenv, ret, "DB_ENV->open"); - goto shutdown; - } - - /* Get the list of names. */ - if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->log_archive"); - goto shutdown; - } - - /* Print the list of names. */ - if (list != NULL) { - for (file = list; *file != NULL; ++file) - printf("%s\n", *file); - free(list); - } - - if (0) { -shutdown: exitval = 1; - } - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -usage() -{ - (void)fprintf(stderr, - "usage: %s [-adlsVv] [-h home] [-P password]\n", progname); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_checkpoint/db_checkpoint.c b/storage/bdb/db_checkpoint/db_checkpoint.c deleted file mode 100644 index c1ee7b50596..00000000000 --- a/storage/bdb/db_checkpoint/db_checkpoint.c +++ /dev/null @@ -1,243 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_checkpoint.c,v 12.6 2005/09/09 12:38:30 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#include -#include -#include -#endif - -#include "db_int.h" - -int main __P((int, char *[])); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB_ENV *dbenv; - time_t now; - long argval; - u_int32_t flags, kbytes, minutes, seconds; - int ch, exitval, once, ret, verbose; - char *home, *logfile, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - /* - * !!! - * Don't allow a fully unsigned 32-bit number, some compilers get - * upset and require it to be specified in hexadecimal and so on. - */ -#define MAX_UINT32_T 2147483647 - - dbenv = NULL; - kbytes = minutes = 0; - exitval = once = verbose = 0; - flags = 0; - home = logfile = passwd = NULL; - while ((ch = getopt(argc, argv, "1h:k:L:P:p:Vv")) != EOF) - switch (ch) { - case '1': - once = 1; - flags = DB_FORCE; - break; - case 'h': - home = optarg; - break; - case 'k': - if (__db_getlong(NULL, progname, - optarg, 1, (long)MAX_UINT32_T, &argval)) - return (EXIT_FAILURE); - kbytes = (u_int32_t)argval; - break; - case 'L': - logfile = optarg; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'p': - if (__db_getlong(NULL, progname, - optarg, 1, (long)MAX_UINT32_T, &argval)) - return (EXIT_FAILURE); - minutes = (u_int32_t)argval; - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 0) - return (usage()); - - if (once == 0 && kbytes == 0 && minutes == 0) { - (void)fprintf(stderr, - "%s: at least one of -1, -k and -p must be specified\n", - progname); - return (EXIT_FAILURE); - } - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* Log our process ID. */ - if (logfile != NULL && __db_util_logset(progname, logfile)) - goto shutdown; - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - /* Initialize the environment. */ - if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); - goto shutdown; - } - - /* - * If we have only a time delay, then we'll sleep the right amount - * to wake up when a checkpoint is necessary. If we have a "kbytes" - * field set, then we'll check every 30 seconds. - */ - seconds = kbytes != 0 ? 30 : minutes * 60; - while (!__db_util_interrupted()) { - if (verbose) { - (void)time(&now); - dbenv->errx(dbenv, "checkpoint begin: %s", ctime(&now)); - } - - if ((ret = dbenv->txn_checkpoint(dbenv, - kbytes, minutes, flags)) != 0) { - dbenv->err(dbenv, ret, "txn_checkpoint"); - goto shutdown; - } - - if (verbose) { - (void)time(&now); - dbenv->errx(dbenv, - "checkpoint complete: %s", ctime(&now)); - } - - if (once) - break; - - __os_sleep(dbenv, seconds, 0); - } - - if (0) { -shutdown: exitval = 1; - } - - /* Clean up the logfile. */ - if (logfile != NULL) - (void)remove(logfile); - - /* Clean up the environment. */ - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -usage() -{ - (void)fprintf(stderr, "usage: %s [-1Vv]\n\t%s\n", progname, - "[-h home] [-k kbytes] [-L file] [-P password] [-p min]"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_deadlock/db_deadlock.c b/storage/bdb/db_deadlock/db_deadlock.c deleted file mode 100644 index 67078a6937a..00000000000 --- a/storage/bdb/db_deadlock/db_deadlock.c +++ /dev/null @@ -1,257 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_deadlock.c,v 12.4 2005/10/03 16:00:16 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#include -#include -#include -#include -#endif - -#include "db_int.h" - -int main __P((int, char *[])); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB_ENV *dbenv; - u_int32_t atype; - time_t now; - u_long secs, usecs; - int ch, exitval, ret, verbose; - char *home, *logfile, *passwd, *str; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbenv = NULL; - atype = DB_LOCK_DEFAULT; - home = logfile = passwd = NULL; - secs = usecs = 0; - exitval = verbose = 0; - while ((ch = getopt(argc, argv, "a:h:L:P:t:Vvw")) != EOF) - switch (ch) { - case 'a': - switch (optarg[0]) { - case 'e': - atype = DB_LOCK_EXPIRE; - break; - case 'm': - atype = DB_LOCK_MAXLOCKS; - break; - case 'n': - atype = DB_LOCK_MINLOCKS; - break; - case 'o': - atype = DB_LOCK_OLDEST; - break; - case 'W': - atype = DB_LOCK_MAXWRITE; - break; - case 'w': - atype = DB_LOCK_MINWRITE; - break; - case 'y': - atype = DB_LOCK_YOUNGEST; - break; - default: - return (usage()); - /* NOTREACHED */ - } - if (optarg[1] != '\0') - return (usage()); - break; - case 'h': - home = optarg; - break; - case 'L': - logfile = optarg; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - case 't': - if ((str = strchr(optarg, '.')) != NULL) { - *str++ = '\0'; - if (*str != '\0' && __db_getulong( - NULL, progname, str, 0, LONG_MAX, &usecs)) - return (EXIT_FAILURE); - } - if (*optarg != '\0' && __db_getulong( - NULL, progname, optarg, 0, LONG_MAX, &secs)) - return (EXIT_FAILURE); - if (secs == 0 && usecs == 0) - return (usage()); - - break; - - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case 'w': /* Undocumented. */ - /* Detect every 100ms (100000 us) when polling. */ - secs = 0; - usecs = 100000; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* Log our process ID. */ - if (logfile != NULL && __db_util_logset(progname, logfile)) - goto shutdown; - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - - if (verbose) { - (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1); - (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1); - } - - /* An environment is required. */ - if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); - goto shutdown; - } - - while (!__db_util_interrupted()) { - if (verbose) { - (void)time(&now); - dbenv->errx(dbenv, "running at %.24s", ctime(&now)); - } - - if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->lock_detect"); - goto shutdown; - } - - /* Make a pass every "secs" secs and "usecs" usecs. */ - if (secs == 0 && usecs == 0) - break; - __os_sleep(dbenv, secs, usecs); - } - - if (0) { -shutdown: exitval = 1; - } - - /* Clean up the logfile. */ - if (logfile != NULL) - (void)remove(logfile); - - /* Clean up the environment. */ - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -usage() -{ - (void)fprintf(stderr, - "usage: %s [-Vv] [-a e | m | n | o | W | w | y]\n\t%s\n", progname, - "[-h home] [-L file] [-P password] [-t sec.usec]"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_dump/db_dump.c b/storage/bdb/db_dump/db_dump.c deleted file mode 100644 index fbae7373004..00000000000 --- a/storage/bdb/db_dump/db_dump.c +++ /dev/null @@ -1,520 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_dump.c,v 12.4 2005/09/09 12:38:30 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" - -int db_init __P((DB_ENV *, char *, int, u_int32_t, int *)); -int dump_sub __P((DB_ENV *, DB *, char *, int, int)); -int is_sub __P((DB *, int *)); -int main __P((int, char *[])); -int show_subs __P((DB *)); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB_ENV *dbenv; - DB *dbp; - u_int32_t cache; - int ch; - int exitval, keyflag, lflag, nflag, pflag, private; - int ret, Rflag, rflag, resize, subs; - char *dopt, *home, *passwd, *subname; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbenv = NULL; - dbp = NULL; - exitval = lflag = nflag = pflag = rflag = Rflag = 0; - keyflag = 0; - cache = MEGABYTE; - private = 0; - dopt = home = passwd = subname = NULL; - while ((ch = getopt(argc, argv, "d:f:h:klNpP:rRs:V")) != EOF) - switch (ch) { - case 'd': - dopt = optarg; - break; - case 'f': - if (freopen(optarg, "w", stdout) == NULL) { - fprintf(stderr, "%s: %s: reopen: %s\n", - progname, optarg, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'h': - home = optarg; - break; - case 'k': - keyflag = 1; - break; - case 'l': - lflag = 1; - break; - case 'N': - nflag = 1; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'p': - pflag = 1; - break; - case 's': - subname = optarg; - break; - case 'R': - Rflag = 1; - /* DB_AGGRESSIVE requires DB_SALVAGE */ - /* FALLTHROUGH */ - case 'r': - rflag = 1; - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 1) - return (usage()); - - if (dopt != NULL && pflag) { - fprintf(stderr, - "%s: the -d and -p options may not both be specified\n", - progname); - return (EXIT_FAILURE); - } - if (lflag && subname != NULL) { - fprintf(stderr, - "%s: the -l and -s options may not both be specified\n", - progname); - return (EXIT_FAILURE); - } - - if (keyflag && rflag) { - fprintf(stderr, "%s: %s", - "the -k and -r or -R options may not both be specified\n", - progname); - return (EXIT_FAILURE); - } - - if (subname != NULL && rflag) { - fprintf(stderr, "%s: %s", - "the -s and -r or R options may not both be specified\n", - progname); - return (EXIT_FAILURE); - } - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ -retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto err; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - if (nflag) { - if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto err; - } - if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto err; - } - } - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto err; - } - - /* Initialize the environment. */ - if (db_init(dbenv, home, rflag, cache, &private) != 0) - goto err; - - /* Create the DB object and open the file. */ - if ((ret = db_create(&dbp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "db_create"); - goto err; - } - - /* - * If we're salvaging, don't do an open; it might not be safe. - * Dispatch now into the salvager. - */ - if (rflag) { - /* The verify method is a destructor. */ - ret = dbp->verify(dbp, argv[0], NULL, stdout, - DB_SALVAGE | - (Rflag ? DB_AGGRESSIVE : 0) | - (pflag ? DB_PRINTABLE : 0)); - dbp = NULL; - if (ret != 0) - goto err; - goto done; - } - - if ((ret = dbp->open(dbp, NULL, - argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbp->err(dbp, ret, "open: %s", argv[0]); - goto err; - } - if (private != 0) { - if ((ret = __db_util_cache(dbp, &cache, &resize)) != 0) - goto err; - if (resize) { - (void)dbp->close(dbp, 0); - dbp = NULL; - - (void)dbenv->close(dbenv, 0); - dbenv = NULL; - goto retry; - } - } - - if (dopt != NULL) { - if ((ret = __db_dumptree(dbp, dopt, NULL)) != 0) { - dbp->err(dbp, ret, "__db_dumptree: %s", argv[0]); - goto err; - } - } else if (lflag) { - if (is_sub(dbp, &subs)) - goto err; - if (subs == 0) { - dbp->errx(dbp, - "%s: does not contain multiple databases", argv[0]); - goto err; - } - if (show_subs(dbp)) - goto err; - } else { - subs = 0; - if (subname == NULL && is_sub(dbp, &subs)) - goto err; - if (subs) { - if (dump_sub(dbenv, dbp, argv[0], pflag, keyflag)) - goto err; - } else - if (dbp->dump(dbp, NULL, - __db_pr_callback, stdout, pflag, keyflag)) - goto err; - } - - if (0) { -err: exitval = 1; - } -done: if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) { - exitval = 1; - dbenv->err(dbenv, ret, "close"); - } - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -/* - * db_init -- - * Initialize the environment. - */ -int -db_init(dbenv, home, is_salvage, cache, is_privatep) - DB_ENV *dbenv; - char *home; - int is_salvage; - u_int32_t cache; - int *is_privatep; -{ - int ret; - - /* - * Try and use the underlying environment when opening a database. - * We wish to use the buffer pool so our information is as up-to-date - * as possible, even if the mpool cache hasn't been flushed. - * - * If we are not doing a salvage, we want to join the environment; - * if a locking system is present, this will let us use it and be - * safe to run concurrently with other threads of control. (We never - * need to use transactions explicitly, as we're read-only.) Note - * that in CDB, too, this will configure our environment - * appropriately, and our cursors will (correctly) do locking as CDB - * read cursors. - * - * If we are doing a salvage, the verification code will protest - * if we initialize transactions, logging, or locking; do an - * explicit DB_INIT_MPOOL to try to join any existing environment - * before we create our own. - */ - *is_privatep = 0; - if ((ret = dbenv->open(dbenv, home, - DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : 0), 0)) == 0) - return (0); - if (ret == DB_VERSION_MISMATCH) - goto err; - - /* - * An environment is required because we may be trying to look at - * databases in directories other than the current one. We could - * avoid using an environment iff the -h option wasn't specified, - * but that seems like more work than it's worth. - * - * No environment exists (or, at least no environment that includes - * an mpool region exists). Create one, but make it private so that - * no files are actually created. - */ - *is_privatep = 1; - if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) == 0 && - (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0) - return (0); - - /* An environment is required. */ -err: dbenv->err(dbenv, ret, "DB_ENV->open"); - return (1); -} - -/* - * is_sub -- - * Return if the database contains subdatabases. - */ -int -is_sub(dbp, yesno) - DB *dbp; - int *yesno; -{ - DB_BTREE_STAT *btsp; - DB_HASH_STAT *hsp; - int ret; - - switch (dbp->type) { - case DB_BTREE: - case DB_RECNO: - if ((ret = dbp->stat(dbp, NULL, &btsp, DB_FAST_STAT)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (ret); - } - *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0; - free(btsp); - break; - case DB_HASH: - if ((ret = dbp->stat(dbp, NULL, &hsp, DB_FAST_STAT)) != 0) { - dbp->err(dbp, ret, "DB->stat"); - return (ret); - } - *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0; - free(hsp); - break; - case DB_QUEUE: - break; - case DB_UNKNOWN: - default: - dbp->errx(dbp, "unknown database type"); - return (1); - } - return (0); -} - -/* - * dump_sub -- - * Dump out the records for a DB containing subdatabases. - */ -int -dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag) - DB_ENV *dbenv; - DB *parent_dbp; - char *parent_name; - int pflag, keyflag; -{ - DB *dbp; - DBC *dbcp; - DBT key, data; - int ret; - char *subdb; - - /* - * Get a cursor and step through the database, dumping out each - * subdatabase. - */ - if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->cursor"); - return (1); - } - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) { - /* Nul terminate the subdatabase name. */ - if ((subdb = malloc(key.size + 1)) == NULL) { - dbenv->err(dbenv, ENOMEM, NULL); - return (1); - } - memcpy(subdb, key.data, key.size); - subdb[key.size] = '\0'; - - /* Create the DB object and open the file. */ - if ((ret = db_create(&dbp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "db_create"); - free(subdb); - return (1); - } - if ((ret = dbp->open(dbp, NULL, - parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) - dbp->err(dbp, ret, - "DB->open: %s:%s", parent_name, subdb); - if (ret == 0 && dbp->dump( - dbp, subdb, __db_pr_callback, stdout, pflag, keyflag)) - ret = 1; - (void)dbp->close(dbp, 0); - free(subdb); - if (ret != 0) - return (1); - } - if (ret != DB_NOTFOUND) { - parent_dbp->err(parent_dbp, ret, "DBcursor->get"); - return (1); - } - - if ((ret = dbcp->c_close(dbcp)) != 0) { - parent_dbp->err(parent_dbp, ret, "DBcursor->close"); - return (1); - } - - return (0); -} - -/* - * show_subs -- - * Display the subdatabases for a database. - */ -int -show_subs(dbp) - DB *dbp; -{ - DBC *dbcp; - DBT key, data; - int ret; - - /* - * Get a cursor and step through the database, printing out the key - * of each key/data pair. - */ - if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) { - dbp->err(dbp, ret, "DB->cursor"); - return (1); - } - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) { - if ((ret = dbp->dbenv->prdbt( - &key, 1, NULL, stdout, __db_pr_callback, 0)) != 0) { - dbp->errx(dbp, NULL); - return (1); - } - } - if (ret != DB_NOTFOUND) { - dbp->err(dbp, ret, "DBcursor->get"); - return (1); - } - - if ((ret = dbcp->c_close(dbcp)) != 0) { - dbp->err(dbp, ret, "DBcursor->close"); - return (1); - } - return (0); -} - -/* - * usage -- - * Display the usage message. - */ -int -usage() -{ - (void)fprintf(stderr, "usage: %s [-klNprRV]\n\t%s\n", - progname, - "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_dump185/db_dump185.c b/storage/bdb/db_dump185/db_dump185.c deleted file mode 100644 index 0e39c913dd6..00000000000 --- a/storage/bdb/db_dump185/db_dump185.c +++ /dev/null @@ -1,355 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_dump185.c,v 12.1 2005/06/16 20:21:22 bostic Exp $ - */ - -#ifndef lint -static char copyright[] = - "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#include - -#include -#include -#include -#include -#include -#include - -#include - -/* Hash Table Information */ -typedef struct hashhdr185 { /* Disk resident portion */ - int magic; /* Magic NO for hash tables */ - int version; /* Version ID */ - u_int32_t lorder; /* Byte Order */ - int bsize; /* Bucket/Page Size */ - int bshift; /* Bucket shift */ - int dsize; /* Directory Size */ - int ssize; /* Segment Size */ - int sshift; /* Segment shift */ - int ovfl_point; /* Where overflow pages are being - * allocated */ - int last_freed; /* Last overflow page freed */ - int max_bucket; /* ID of Maximum bucket in use */ - int high_mask; /* Mask to modulo into entire table */ - int low_mask; /* Mask to modulo into lower half of - * table */ - int ffactor; /* Fill factor */ - int nkeys; /* Number of keys in hash table */ -} HASHHDR185; -typedef struct htab185 { /* Memory resident data structure */ - HASHHDR185 hdr; /* Header */ -} HTAB185; - -/* Hash Table Information */ -typedef struct hashhdr186 { /* Disk resident portion */ - int32_t magic; /* Magic NO for hash tables */ - int32_t version; /* Version ID */ - int32_t lorder; /* Byte Order */ - int32_t bsize; /* Bucket/Page Size */ - int32_t bshift; /* Bucket shift */ - int32_t ovfl_point; /* Where overflow pages are being allocated */ - int32_t last_freed; /* Last overflow page freed */ - int32_t max_bucket; /* ID of Maximum bucket in use */ - int32_t high_mask; /* Mask to modulo into entire table */ - int32_t low_mask; /* Mask to modulo into lower half of table */ - int32_t ffactor; /* Fill factor */ - int32_t nkeys; /* Number of keys in hash table */ - int32_t hdrpages; /* Size of table header */ - int32_t h_charkey; /* value of hash(CHARKEY) */ -#define NCACHED 32 /* number of bit maps and spare points */ - int32_t spares[NCACHED];/* spare pages for overflow */ - /* address of overflow page bitmaps */ - u_int16_t bitmaps[NCACHED]; -} HASHHDR186; -typedef struct htab186 { /* Memory resident data structure */ - void *unused[2]; - HASHHDR186 hdr; /* Header */ -} HTAB186; - -typedef struct _epgno { - u_int32_t pgno; /* the page number */ - u_int16_t index; /* the index on the page */ -} EPGNO; - -typedef struct _epg { - void *page; /* the (pinned) page */ - u_int16_t index; /* the index on the page */ -} EPG; - -typedef struct _cursor { - EPGNO pg; /* B: Saved tree reference. */ - DBT key; /* B: Saved key, or key.data == NULL. */ - u_int32_t rcursor; /* R: recno cursor (1-based) */ - -#define CURS_ACQUIRE 0x01 /* B: Cursor needs to be reacquired. */ -#define CURS_AFTER 0x02 /* B: Unreturned cursor after key. */ -#define CURS_BEFORE 0x04 /* B: Unreturned cursor before key. */ -#define CURS_INIT 0x08 /* RB: Cursor initialized. */ - u_int8_t flags; -} CURSOR; - -/* The in-memory btree/recno data structure. */ -typedef struct _btree { - void *bt_mp; /* memory pool cookie */ - - void *bt_dbp; /* pointer to enclosing DB */ - - EPG bt_cur; /* current (pinned) page */ - void *bt_pinned; /* page pinned across calls */ - - CURSOR bt_cursor; /* cursor */ - - EPGNO bt_stack[50]; /* stack of parent pages */ - EPGNO *bt_sp; /* current stack pointer */ - - DBT bt_rkey; /* returned key */ - DBT bt_rdata; /* returned data */ - - int bt_fd; /* tree file descriptor */ - - u_int32_t bt_free; /* next free page */ - u_int32_t bt_psize; /* page size */ - u_int16_t bt_ovflsize; /* cut-off for key/data overflow */ - int bt_lorder; /* byte order */ - /* sorted order */ - enum { NOT, BACK, FORWARD } bt_order; - EPGNO bt_last; /* last insert */ - - /* B: key comparison function */ - int (*bt_cmp) __P((DBT *, DBT *)); - /* B: prefix comparison function */ - size_t (*bt_pfx) __P((DBT *, DBT *)); - /* R: recno input function */ - int (*bt_irec) __P((struct _btree *, u_int32_t)); - - FILE *bt_rfp; /* R: record FILE pointer */ - int bt_rfd; /* R: record file descriptor */ - - void *bt_cmap; /* R: current point in mapped space */ - void *bt_smap; /* R: start of mapped space */ - void *bt_emap; /* R: end of mapped space */ - size_t bt_msize; /* R: size of mapped region. */ - - u_int32_t bt_nrecs; /* R: number of records */ - size_t bt_reclen; /* R: fixed record length */ - u_char bt_bval; /* R: delimiting byte/pad character */ - -/* - * NB: - * B_NODUPS and R_RECNO are stored on disk, and may not be changed. - */ -#define B_INMEM 0x00001 /* in-memory tree */ -#define B_METADIRTY 0x00002 /* need to write metadata */ -#define B_MODIFIED 0x00004 /* tree modified */ -#define B_NEEDSWAP 0x00008 /* if byte order requires swapping */ -#define B_RDONLY 0x00010 /* read-only tree */ - -#define B_NODUPS 0x00020 /* no duplicate keys permitted */ -#define R_RECNO 0x00080 /* record oriented tree */ - -#define R_CLOSEFP 0x00040 /* opened a file pointer */ -#define R_EOF 0x00100 /* end of input file reached. */ -#define R_FIXLEN 0x00200 /* fixed length records */ -#define R_MEMMAPPED 0x00400 /* memory mapped file. */ -#define R_INMEM 0x00800 /* in-memory file */ -#define R_MODIFIED 0x01000 /* modified file */ -#define R_RDONLY 0x02000 /* read-only file */ - -#define B_DB_LOCK 0x04000 /* DB_LOCK specified. */ -#define B_DB_SHMEM 0x08000 /* DB_SHMEM specified. */ -#define B_DB_TXN 0x10000 /* DB_TXN specified. */ - u_int32_t flags; -} BTREE; - -void db_btree __P((DB *, int)); -void db_hash __P((DB *, int)); -void dbt_dump __P((DBT *)); -void dbt_print __P((DBT *)); -int main __P((int, char *[])); -int usage __P((void)); - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB *dbp; - DBT key, data; - int ch, pflag, rval; - - pflag = 0; - while ((ch = getopt(argc, argv, "f:p")) != EOF) - switch (ch) { - case 'f': - if (freopen(optarg, "w", stdout) == NULL) { - fprintf(stderr, "db_dump185: %s: %s\n", - optarg, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'p': - pflag = 1; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 1) - return (usage()); - - if ((dbp = dbopen(argv[0], O_RDONLY, 0, DB_BTREE, NULL)) == NULL) { - if ((dbp = - dbopen(argv[0], O_RDONLY, 0, DB_HASH, NULL)) == NULL) { - fprintf(stderr, - "db_dump185: %s: %s\n", argv[0], strerror(errno)); - return (EXIT_FAILURE); - } - db_hash(dbp, pflag); - } else - db_btree(dbp, pflag); - - /* - * !!! - * DB 1.85 DBTs are a subset of DB 2.0 DBTs, so we just use the - * new dump/print routines. - */ - if (pflag) - while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) { - dbt_print(&key); - dbt_print(&data); - } - else - while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) { - dbt_dump(&key); - dbt_dump(&data); - } - - if (rval == -1) { - fprintf(stderr, "db_dump185: seq: %s\n", strerror(errno)); - return (EXIT_FAILURE); - } - return (EXIT_SUCCESS); -} - -/* - * db_hash -- - * Dump out hash header information. - */ -void -db_hash(dbp, pflag) - DB *dbp; - int pflag; -{ - HTAB185 *hash185p; - HTAB186 *hash186p; - - printf("format=%s\n", pflag ? "print" : "bytevalue"); - printf("type=hash\n"); - - /* DB 1.85 was version 2, DB 1.86 was version 3. */ - hash185p = dbp->internal; - if (hash185p->hdr.version > 2) { - hash186p = dbp->internal; - printf("h_ffactor=%lu\n", (u_long)hash186p->hdr.ffactor); - if (hash186p->hdr.lorder != 0) - printf("db_lorder=%lu\n", (u_long)hash186p->hdr.lorder); - printf("db_pagesize=%lu\n", (u_long)hash186p->hdr.bsize); - } else { - printf("h_ffactor=%lu\n", (u_long)hash185p->hdr.ffactor); - if (hash185p->hdr.lorder != 0) - printf("db_lorder=%lu\n", (u_long)hash185p->hdr.lorder); - printf("db_pagesize=%lu\n", (u_long)hash185p->hdr.bsize); - } - printf("HEADER=END\n"); -} - -/* - * db_btree -- - * Dump out btree header information. - */ -void -db_btree(dbp, pflag) - DB *dbp; - int pflag; -{ - BTREE *btp; - - btp = dbp->internal; - - printf("format=%s\n", pflag ? "print" : "bytevalue"); - printf("type=btree\n"); -#ifdef NOT_AVAILABLE_IN_185 - printf("bt_minkey=%lu\n", (u_long)XXX); - printf("bt_maxkey=%lu\n", (u_long)XXX); -#endif - if (btp->bt_lorder != 0) - printf("db_lorder=%lu\n", (u_long)btp->bt_lorder); - printf("db_pagesize=%lu\n", (u_long)btp->bt_psize); - if (!(btp->flags & B_NODUPS)) - printf("duplicates=1\n"); - printf("HEADER=END\n"); -} - -static char hex[] = "0123456789abcdef"; - -/* - * dbt_dump -- - * Write out a key or data item using byte values. - */ -void -dbt_dump(dbtp) - DBT *dbtp; -{ - size_t len; - u_int8_t *p; - - for (len = dbtp->size, p = dbtp->data; len--; ++p) - (void)printf("%c%c", - hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]); - printf("\n"); -} - -/* - * dbt_print -- - * Write out a key or data item using printable characters. - */ -void -dbt_print(dbtp) - DBT *dbtp; -{ - size_t len; - u_int8_t *p; - - for (len = dbtp->size, p = dbtp->data; len--; ++p) - if (isprint((int)*p)) { - if (*p == '\\') - (void)printf("\\"); - (void)printf("%c", *p); - } else - (void)printf("\\%c%c", - hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]); - printf("\n"); -} - -/* - * usage -- - * Display the usage message. - */ -int -usage() -{ - (void)fprintf(stderr, "usage: db_dump185 [-p] [-f file] db_file\n"); - return (EXIT_FAILURE); -} diff --git a/storage/bdb/db_hotbackup/db_hotbackup.c b/storage/bdb/db_hotbackup/db_hotbackup.c deleted file mode 100644 index b96de7a4165..00000000000 --- a/storage/bdb/db_hotbackup/db_hotbackup.c +++ /dev/null @@ -1,708 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_hotbackup.c,v 1.16 2005/10/27 01:25:54 mjc Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include -#include - -#include -#include -#include -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/log.h" - -enum which_open { OPEN_ORIGINAL, OPEN_HOT_BACKUP }; - -int backup_dir_clean __P((DB_ENV *, char *, int *, int, int)); -int data_copy __P((DB_ENV *, char *, char *, char *, int)); -int env_init __P((DB_ENV **, char *, char *, char *, enum which_open)); -int main __P((int, char *[])); -int read_data_dir __P((DB_ENV *, char *, char *, int)); -int read_log_dir __P((DB_ENV *, char *, char *, int *, int, int)); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - time_t now; - DB_ENV *dbenv; - u_int data_cnt, data_next; - int ch, checkpoint, copy_min, exitval, remove_max, ret, update, verbose; - char *backup_dir, **data_dir, **dir, *home, *log_dir, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - checkpoint = data_cnt = data_next = exitval = update = verbose = 0; - data_dir = NULL; - backup_dir = home = log_dir = passwd = NULL; - copy_min = remove_max = 0; - while ((ch = getopt(argc, argv, "b:cd:h:l:P:uVv")) != EOF) - switch (ch) { - case 'b': - backup_dir = optarg; - break; - case 'c': - checkpoint = 1; - break; - case 'd': - /* - * User can specify a list of directories -- keep an - * array, leaving room for the trailing NULL. - */ - if (data_dir == NULL || data_next >= data_cnt - 2) { - data_cnt = data_cnt == 0 ? 20 : data_cnt * 2; - if ((data_dir = realloc(data_dir, - data_cnt * sizeof(*data_dir))) == NULL) { - fprintf(stderr, "%s: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - } - data_dir[data_next++] = optarg; - break; - case 'h': - home = optarg; - break; - case 'l': - log_dir = optarg; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'u': - update = 1; - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * The home directory defaults to the environment variable DB_HOME. - * The log directory defaults to the home directory. - * - * We require a source database environment directory and a target - * backup directory. - */ - if (home == NULL) - home = getenv("DB_HOME"); - if (home == NULL) { - fprintf(stderr, - "%s: no source database environment specified\n", progname); - return (usage()); - } - if (log_dir == NULL) - log_dir = home; - if (backup_dir == NULL) { - fprintf(stderr, - "%s: no target backup directory specified\n", progname); - return (usage()); - } - - /* NULL-terminate any list of data directories. */ - if (data_dir != NULL) - data_dir[data_next] = NULL; - - if (verbose) { - (void)time(&now); - printf("%s: hot backup started at %s", progname, ctime(&now)); - } - - /* Open the source environment. */ - if ((ret = env_init(&dbenv, home, log_dir, passwd, OPEN_ORIGINAL)) != 0) - goto shutdown; - - /* - * If the -c option is specified, checkpoint the source home - * database environment, and remove any unnecessary log files. - */ - if (checkpoint) { - if (verbose) - printf("%s: %s: force checkpoint\n", progname, home); - if ((ret = - dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->txn_checkpoint"); - goto shutdown; - } - if (!update) { - if (verbose) - printf("%s: %s: remove unnecessary log files\n", - progname, home); - if ((ret = dbenv->log_archive(dbenv, - NULL, DB_ARCH_REMOVE)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->log_archive"); - goto shutdown; - } - } - } - - /* - * If the target directory for the backup does not exist, create it - * with mode read-write-execute for the owner. Ignore errors here, - * it's simpler and more portable to just always try the create. If - * there's a problem, we'll fail with reasonable errors later. - */ - (void)__os_mkdir(NULL, backup_dir, __db_omode("rwx------")); - - /* - * If the target directory for the backup does exist and the -u option - * was specified, all log files in the target directory are removed; - * if the -u option was not specified, all files in the target directory - * are removed. - */ - if ((ret = backup_dir_clean( - dbenv, backup_dir, &remove_max, update, verbose)) != 0) - goto shutdown; - - /* - * If the -u option was not specified, copy all database files found in - * the database environment home directory, or any directory specified - * using the -d option, into the target directory for the backup. - */ - if (!update) { - if (read_data_dir(dbenv, backup_dir, home, verbose) != 0) - goto shutdown; - if (data_dir != NULL) - for (dir = &data_dir[0]; *dir != NULL; ++dir) - if (read_data_dir( - dbenv, backup_dir, *dir, verbose) != 0) - goto shutdown; - } - - /* - * Copy all log files found in the directory specified by the -l option - * (or in the database environment home directory, if no -l option was - * specified), into the target directory for the backup. - * - * The log directory defaults to the home directory. - */ - if (read_log_dir(dbenv, - backup_dir, log_dir, ©_min, update, verbose) != 0) - goto shutdown; - - /* - * If we're updating a snapshot, the lowest-numbered log file copied - * into the backup directory should be less than, or equal to, the - * highest-numbered log file removed from the backup directory during - * cleanup. - */ - if (update && remove_max < copy_min && - !(remove_max == 0 && copy_min == 1)) { - fprintf(stderr, - "%s: the largest log file removed (%d) must be greater\n", - progname, remove_max); - fprintf(stderr, - "%s: than or equal the smallest log file copied (%d)\n", - progname, copy_min); - goto shutdown; - } - - /* Close the source environment. */ - if ((ret = dbenv->close(dbenv, 0)) != 0) { - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - dbenv = NULL; - goto shutdown; - } - /* Perform catastrophic recovery on the hot backup. */ - if (verbose) - printf("%s: %s: run catastrophic recovery\n", - progname, backup_dir); - if ((ret = env_init( - &dbenv, backup_dir, NULL, passwd, OPEN_HOT_BACKUP)) != 0) - goto shutdown; - - /* - * Remove any unnecessary log files from the hot backup. - */ - if (verbose) - printf("%s: %s: remove unnecessary log files\n", - progname, backup_dir); - if ((ret = - dbenv->log_archive(dbenv, NULL, DB_ARCH_REMOVE)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->log_archive"); - goto shutdown; - } - - if (0) { -shutdown: exitval = 1; - } - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (data_dir != NULL) - free(data_dir); - if (passwd != NULL) - free(passwd); - - if (exitval == 0) { - if (verbose) { - (void)time(&now); - printf("%s: hot backup completed at %s", - progname, ctime(&now)); - } - } else { - fprintf(stderr, "%s: HOT BACKUP FAILED!\n", progname); - } - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); - -} - -/* - * env_init -- - * Open a database environment. - */ -int -env_init(dbenvp, home, log_dir, passwd, which) - DB_ENV **dbenvp; - char *home, *log_dir, *passwd; - enum which_open which; -{ - DB_ENV *dbenv; - int ret; - - *dbenvp = NULL; - - /* - * Create an environment object and initialize it for error reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - return (1); - } - - dbenv->set_errfile(dbenv, stderr); - setbuf(stderr, NULL); - dbenv->set_errpfx(dbenv, progname); - setvbuf(stdout, NULL, _IOLBF, 0); - - /* - * If a log directory has been specified, and it's not the same as the - * home directory, set it for the environment. - */ - if (log_dir != NULL && log_dir != home && - (ret = dbenv->set_lg_dir(dbenv, log_dir)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->set_lg_dir: %s", log_dir); - return (1); - } - - /* Optionally set the password. */ - if (passwd != NULL && - (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->set_encrypt"); - return (1); - } - - switch (which) { - case OPEN_ORIGINAL: - /* - * Opening the database environment we're trying to back up. - * We try to attach to a pre-existing environment; if that - * fails, we create a private environment and try again. - */ - if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 && - (ret == DB_VERSION_MISMATCH || - (ret = dbenv->open(dbenv, home, DB_CREATE | - DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, - 0)) != 0)) { - dbenv->err(dbenv, ret, "DB_ENV->open: %s", home); - return (1); - } - break; - case OPEN_HOT_BACKUP: - /* - * Opening the backup copy of the database environment. We - * better be the only user, we're running recovery. - */ - if ((ret = dbenv->open(dbenv, home, DB_CREATE | - DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE | - DB_RECOVER_FATAL | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->open: %s", home); - return (1); - } - break; - } - - *dbenvp = dbenv; - return (0); -} - -/* - * backup_dir_clean -- - * Clean out the backup directory. - */ -int -backup_dir_clean(dbenv, backup_dir, remove_maxp, update, verbose) - DB_ENV *dbenv; - char *backup_dir; - int *remove_maxp, update, verbose; -{ - int cnt, fcnt, ret, v; - char **names; - char buf[2048]; /* MAXPATHLEN is too hard to find. */ - - /* Get a list of file names. */ - if ((ret = __os_dirlist(dbenv, backup_dir, &names, &fcnt)) != 0) { - dbenv->err(dbenv, ret, "%s: directory read", backup_dir); - return (1); - } - for (cnt = fcnt; --cnt >= 0;) { - /* - * Skip ".", ".." and log files (if update wasn't specified). - */ - if (!strcmp(names[cnt], ".") || !strcmp(names[cnt], "..")) - continue; - if (strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1)) { - if (update) - continue; - } else { - /* Track the highest-numbered log file removed. */ - v = atoi(names[cnt] + sizeof(LFPREFIX) - 1); - if (*remove_maxp < v) - *remove_maxp = v; - } - if ((size_t)snprintf(buf, sizeof(buf), - "%s/%s", backup_dir, names[cnt]) == sizeof(buf)) { - dbenv->err(dbenv, ret, - "%s/%s: path too long", backup_dir, names[cnt]); - return (1); - } - if (verbose) - printf("%s: removing %s\n", progname, buf); - if ((ret = remove(buf)) != 0) { - dbenv->err(dbenv, ret, "%s: remove", buf); - return (1); - } - } - - __os_dirfree(dbenv, names, fcnt); - - if (verbose && *remove_maxp != 0) - printf("%s: highest numbered log file removed: %d\n", - progname, *remove_maxp); - - return (0); -} - -/* - * read_data_dir -- - * Read a directory looking for databases to copy. - */ -int -read_data_dir(dbenv, backup_dir, dir, verbose) - DB_ENV *dbenv; - char *backup_dir, *dir; - int verbose; -{ - int cnt, fcnt, ret; - char **names; - char buf[2048]; /* MAXPATHLEN is too hard to find. */ - - /* Get a list of file names. */ - if ((ret = __os_dirlist(dbenv, dir, &names, &fcnt)) != 0) { - dbenv->err(dbenv, ret, "%s: directory read", dir); - return (1); - } - for (cnt = fcnt; --cnt >= 0;) { - /* - * Skip ".", ".." and files in DB's name space (but not Queue - * extent files, we need them). - */ - if (!strcmp(names[cnt], ".") || !strcmp(names[cnt], "..")) - continue; - if (!strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1)) - continue; - if (!strncmp(names[cnt], - DB_REGION_PREFIX, sizeof(DB_REGION_PREFIX) - 1)) - continue; - - /* Build a path name to the source. */ - if ((size_t)snprintf(buf, sizeof(buf), - "%s/%s", dir, names[cnt]) == sizeof(buf)) { - dbenv->errx(dbenv, - "%s/%s: path too long", dir, names[cnt]); - return (1); - } - - /* Copy the file. */ - if ((ret = data_copy( - dbenv, buf, backup_dir, names[cnt], verbose)) != 0) - return (1); - } - - __os_dirfree(dbenv, names, fcnt); - - return (0); -} - -/* - * read_log_dir -- - * Read a directory looking for log files to copy. - */ -int -read_log_dir(dbenv, backup_dir, log_dir, copy_minp, update, verbose) - DB_ENV *dbenv; - char *backup_dir, *log_dir; - int *copy_minp, update, verbose; -{ - int aflag, ret, v; - char **begin, **names; - char from[2048], to[2048]; /* MAXPATHLEN is too hard to find. */ - -again: aflag = DB_ARCH_LOG; - - /* - * If this is an update and we are deleting files, first process - * those files that can be removed, then repeat with the rest. - */ - if (update) - aflag = 0; - /* Get a list of file names to be copied. */ - if ((ret = dbenv->log_archive(dbenv, &names, aflag)) != 0) { - dbenv->err(dbenv, ret, "%s: log_archive", log_dir); - return (1); - } - if (names == NULL) - goto done; - begin = names; - for (; *names != NULL; names++) { - /* Track the lowest-numbered log file copied. */ - v = atoi(*names + sizeof(LFPREFIX) - 1); - if (*copy_minp == 0 || *copy_minp > v) - *copy_minp = v; - - /* Build a path name to the source. */ - if ((size_t)snprintf(from, sizeof(from), - "%s/%s", log_dir, *names) == sizeof(from)) { - dbenv->errx(dbenv, - "%s/%s: path too long", log_dir, *names); - return (1); - } - - /* - * If we're going to remove the file, attempt to rename the - * instead of copying and then removing. The likely failure - * is EXDEV (source and destination are on different volumes). - * Fall back to a copy, regardless of the error. We don't - * worry about partial contents, the copy truncates the file - * on open. - */ - if (update) { - if ((size_t)snprintf(to, sizeof(to), - "%s/%s", backup_dir, *names) == sizeof(to)) { - dbenv->errx(dbenv, - "%s/%s: path too long", backup_dir, *names); - return (1); - } - if (rename(from, to) == 0) { - if (verbose) - printf("%s: moving %s to %s\n", - progname, from, to); - continue; - } - } - - /* Copy the file. */ - if ((ret = data_copy(dbenv, - from, backup_dir, *names, verbose)) != 0) - return (1); - - if (update) { - if (verbose) - printf("%s: removing %s\n", progname, from); - if ((ret = __os_unlink(dbenv, from)) != 0) { - dbenv->err(dbenv, ret, - "unlink of %s failed", from); - return (1); - } - } - - } - - free(begin); -done: if (update) { - update = 0; - goto again; - } - - if (verbose && *copy_minp != 0) - printf("%s: lowest numbered log file copied: %d\n", - progname, *copy_minp); - - return (0); -} - -/* - * data_copy -- - * Copy a file into the backup directory. - */ -int -data_copy(dbenv, from, to_dir, to_file, verbose) - DB_ENV *dbenv; - char *from, *to_dir, *to_file; - int verbose; -{ - ssize_t nr, nw; - size_t offset; - int ret, rfd, wfd; - char *buf, *taddr; - - ret = 0; - rfd = wfd = -1; - - if (verbose) - printf("%s: copying %s to %s/%s\n", - progname, from, to_dir, to_file); - - /* - * We MUST copy multiples of the page size, atomically, to ensure a - * database page is not updated by another thread of control during - * the copy. - * - * !!! - * The current maximum page size for Berkeley DB is 64KB; we will have - * to increase this value if the maximum page size is ever more than a - * megabyte - */ - if ((buf = malloc(MEGABYTE)) == NULL) { - dbenv->err(dbenv, - errno, "%lu buffer allocation", (u_long)MEGABYTE); - return (1); - } - - /* Open the input file. */ - if ((rfd = open(from, O_RDONLY, 0)) == -1) { - dbenv->err(dbenv, errno, "%s", from); - goto err; - } - - /* Open the output file. */ - if ((u_int32_t)snprintf( - buf, MEGABYTE, "%s/%s", to_dir, to_file) == MEGABYTE) { - dbenv->errx(dbenv, "%s/%s: path too long", to_dir, to_file); - goto err; - } - if ((wfd = open( - buf, O_CREAT | O_TRUNC | O_WRONLY, __db_omode(OWNER_RW))) == -1) - goto err; - - /* Copy the data. */ - while ((nr = read(rfd, buf, MEGABYTE)) > 0) - for (taddr = buf, offset = 0; - offset < (size_t)nr; taddr += nw, offset += (size_t)nw) { - RETRY_CHK(((nw = write(wfd, - taddr, (u_int)(nr - offset))) < 0 ? 1 : 0), ret); - if (ret != 0) - break; - } - if (nr == -1) { - dbenv->err(dbenv, errno, "%s: read", from); - goto err; - } - - if (ret != 0) { - dbenv->err(dbenv, errno, "%s: write %s/%s", to_dir, to_file); - goto err; - } - - if (0) { -err: ret = 1; - } - if (buf != NULL) - free(buf); - - if (rfd != -1) - (void)close(rfd); - - /* We may be running on a remote filesystem; force the flush. */ - if (wfd != -1 && (fsync(wfd) != 0 || close(wfd) != 0)) { - dbenv->err(dbenv, - errno, "%s: fsync %s/%s", to_dir, to_file); - ret = 1; - } - return (ret); -} - -int -usage() -{ - (void)fprintf(stderr, "usage: %s [-cuVv]\n\t%s\n", progname, - "[-d data_dir ...] [-h home] [-l log_dir] [-P password] -b backup_dir"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_load/db_load.c b/storage/bdb/db_load/db_load.c deleted file mode 100644 index c47bd585452..00000000000 --- a/storage/bdb/db_load/db_load.c +++ /dev/null @@ -1,1321 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_load.c,v 12.8 2005/06/16 20:21:23 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" - -typedef struct { /* XXX: Globals. */ - const char *progname; /* Program name. */ - char *hdrbuf; /* Input file header. */ - u_long lineno; /* Input file line number. */ - u_long origline; /* Original file line number. */ - int endodata; /* Reached the end of a database. */ - int endofile; /* Reached the end of the input. */ - int version; /* Input version. */ - char *home; /* Env home. */ - char *passwd; /* Env passwd. */ - int private; /* Private env. */ - u_int32_t cache; /* Env cache size. */ -} LDG; - -void badend __P((DB_ENV *)); -void badnum __P((DB_ENV *)); -int configure __P((DB_ENV *, DB *, char **, char **, int *)); -int convprintable __P((DB_ENV *, char *, char **)); -int db_init __P((DB_ENV *, char *, u_int32_t, int *)); -int dbt_rdump __P((DB_ENV *, DBT *)); -int dbt_rprint __P((DB_ENV *, DBT *)); -int dbt_rrecno __P((DB_ENV *, DBT *, int)); -int dbt_to_recno __P((DB_ENV *, DBT *, db_recno_t *)); -int digitize __P((DB_ENV *, int, int *)); -int env_create __P((DB_ENV **, LDG *)); -int load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *)); -int main __P((int, char *[])); -int rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *)); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -#define G(f) ((LDG *)dbenv->app_private)->f - - /* Flags to the load function. */ -#define LDF_NOHEADER 0x01 /* No dump header. */ -#define LDF_NOOVERWRITE 0x02 /* Don't overwrite existing rows. */ -#define LDF_PASSWORD 0x04 /* Encrypt created databases. */ - -int -main(argc, argv) - int argc; - char *argv[]; -{ - enum { NOTSET, FILEID_RESET, LSN_RESET, STANDARD_LOAD } mode; - extern char *optarg; - extern int optind; - DBTYPE dbtype; - DB_ENV *dbenv; - LDG ldg; - u_int ldf; - int ch, existed, exitval, ret; - char **clist, **clp; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - ldg.progname = progname; - ldg.lineno = 0; - ldg.endodata = ldg.endofile = 0; - ldg.version = 1; - ldg.cache = MEGABYTE; - ldg.hdrbuf = NULL; - ldg.home = NULL; - ldg.passwd = NULL; - - mode = NOTSET; - ldf = 0; - exitval = existed = 0; - dbtype = DB_UNKNOWN; - - /* Allocate enough room for configuration arguments. */ - if ((clp = clist = - (char **)calloc((size_t)argc + 1, sizeof(char *))) == NULL) { - fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM)); - return (EXIT_FAILURE); - } - - /* - * There are two modes for db_load: -r and everything else. The -r - * option zeroes out the database LSN's or resets the file ID, it - * doesn't really "load" a new database. The functionality is in - * db_load because we don't have a better place to put it, and we - * don't want to create a new utility for just that functionality. - */ - while ((ch = getopt(argc, argv, "c:f:h:nP:r:Tt:V")) != EOF) - switch (ch) { - case 'c': - if (mode != NOTSET && mode != STANDARD_LOAD) - return (usage()); - mode = STANDARD_LOAD; - - *clp++ = optarg; - break; - case 'f': - if (mode != NOTSET && mode != STANDARD_LOAD) - return (usage()); - mode = STANDARD_LOAD; - - if (freopen(optarg, "r", stdin) == NULL) { - fprintf(stderr, "%s: %s: reopen: %s\n", - ldg.progname, optarg, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'h': - ldg.home = optarg; - break; - case 'n': - if (mode != NOTSET && mode != STANDARD_LOAD) - return (usage()); - mode = STANDARD_LOAD; - - ldf |= LDF_NOOVERWRITE; - break; - case 'P': - ldg.passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (ldg.passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - ldg.progname, strerror(errno)); - return (EXIT_FAILURE); - } - ldf |= LDF_PASSWORD; - break; - case 'r': - if (mode == STANDARD_LOAD) - return (usage()); - if (strcmp(optarg, "lsn") == 0) - mode = LSN_RESET; - else if (strcmp(optarg, "fileid") == 0) - mode = FILEID_RESET; - else - return (usage()); - break; - case 'T': - if (mode != NOTSET && mode != STANDARD_LOAD) - return (usage()); - mode = STANDARD_LOAD; - - ldf |= LDF_NOHEADER; - break; - case 't': - if (mode != NOTSET && mode != STANDARD_LOAD) - return (usage()); - mode = STANDARD_LOAD; - - if (strcmp(optarg, "btree") == 0) { - dbtype = DB_BTREE; - break; - } - if (strcmp(optarg, "hash") == 0) { - dbtype = DB_HASH; - break; - } - if (strcmp(optarg, "recno") == 0) { - dbtype = DB_RECNO; - break; - } - if (strcmp(optarg, "queue") == 0) { - dbtype = DB_QUEUE; - break; - } - return (usage()); - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 1) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object initialized for error reporting, and - * then open it. - */ - if (env_create(&dbenv, &ldg) != 0) - goto shutdown; - - /* If we're resetting the LSNs, that's an entirely separate path. */ - switch (mode) { - case FILEID_RESET: - exitval = dbenv->fileid_reset( - dbenv, argv[0], ldf & LDF_PASSWORD ? DB_ENCRYPT : 0); - break; - case LSN_RESET: - exitval = dbenv->lsn_reset( - dbenv, argv[0], ldf & LDF_PASSWORD ? DB_ENCRYPT : 0); - break; - case NOTSET: - case STANDARD_LOAD: - while (!ldg.endofile) - if (load(dbenv, argv[0], dbtype, clist, ldf, - &ldg, &existed) != 0) - goto shutdown; - break; - } - - if (0) { -shutdown: exitval = 1; - } - if ((ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret)); - } - - /* Resend any caught signal. */ - __db_util_sigresend(); - free(clist); - if (ldg.passwd != NULL) - free(ldg.passwd); - - /* - * Return 0 on success, 1 if keys existed already, and 2 on failure. - * - * Technically, this is wrong, because exit of anything other than - * 0 is implementation-defined by the ANSI C standard. I don't see - * any good solutions that don't involve API changes. - */ - return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2); -} - -/* - * load -- - * Load a database. - */ -int -load(dbenv, name, argtype, clist, flags, ldg, existedp) - DB_ENV *dbenv; - char *name, **clist; - DBTYPE argtype; - u_int flags; - LDG *ldg; - int *existedp; -{ - DB *dbp; - DBT key, rkey, data, *readp, *writep; - DBTYPE dbtype; - DB_TXN *ctxn, *txn; - db_recno_t recno, datarecno; - u_int32_t put_flags; - int ascii_recno, checkprint, hexkeys, keyflag, keys, resize, ret, rval; - char *subdb; - - put_flags = LF_ISSET(LDF_NOOVERWRITE) ? DB_NOOVERWRITE : 0; - G(endodata) = 0; - - subdb = NULL; - ctxn = txn = NULL; - memset(&key, 0, sizeof(DBT)); - memset(&data, 0, sizeof(DBT)); - memset(&rkey, 0, sizeof(DBT)); - -retry_db: - dbtype = DB_UNKNOWN; - keys = -1; - hexkeys = -1; - keyflag = -1; - - /* Create the DB object. */ - if ((ret = db_create(&dbp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "db_create"); - goto err; - } - - /* Read the header -- if there's no header, we expect flat text. */ - if (LF_ISSET(LDF_NOHEADER)) { - checkprint = 1; - dbtype = argtype; - } else { - if (rheader(dbenv, - dbp, &dbtype, &subdb, &checkprint, &keys) != 0) - goto err; - if (G(endofile)) - goto done; - } - - /* - * Apply command-line configuration changes. (We apply command-line - * configuration changes to all databases that are loaded, e.g., all - * subdatabases.) - */ - if (configure(dbenv, dbp, clist, &subdb, &keyflag)) - goto err; - - if (keys != 1) { - if (keyflag == 1) { - dbp->err(dbp, EINVAL, "No keys specified in file"); - goto err; - } - } - else if (keyflag == 0) { - dbp->err(dbp, EINVAL, "Keys specified in file"); - goto err; - } - else - keyflag = 1; - - if (dbtype == DB_BTREE || dbtype == DB_HASH) { - if (keyflag == 0) - dbp->err(dbp, - EINVAL, "Btree and Hash must specify keys"); - else - keyflag = 1; - } - - if (argtype != DB_UNKNOWN) { - - if (dbtype == DB_RECNO || dbtype == DB_QUEUE) - if (keyflag != 1 && argtype != DB_RECNO && - argtype != DB_QUEUE) { - dbenv->errx(dbenv, - "improper database type conversion specified"); - goto err; - } - dbtype = argtype; - } - - if (dbtype == DB_UNKNOWN) { - dbenv->errx(dbenv, "no database type specified"); - goto err; - } - - if (keyflag == -1) - keyflag = 0; - - /* - * Recno keys have only been printed in hexadecimal starting - * with db_dump format version 3 (DB 3.2). - * - * !!! - * Note that version is set in rheader(), which must be called before - * this assignment. - */ - hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0); - - if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE)) - ascii_recno = 1; - else - ascii_recno = 0; - - /* If configured with a password, encrypt databases we create. */ - if (LF_ISSET(LDF_PASSWORD) && - (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) { - dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT"); - goto err; - } - -#if 0 - Set application-specific btree comparison or hash functions here. - For example: - - if ((ret = dbp->set_bt_compare(dbp, local_comparison_func)) != 0) { - dbp->err(dbp, ret, "DB->set_bt_compare"); - goto err; - } - if ((ret = dbp->set_h_hash(dbp, local_hash_func)) != 0) { - dbp->err(dbp, ret, "DB->set_h_hash"); - goto err; - } -#endif - - /* Open the DB file. */ - if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype, - DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0), - __db_omode("rw-rw-rw-"))) != 0) { - dbp->err(dbp, ret, "DB->open: %s", name); - goto err; - } - if (ldg->private != 0) { - if ((ret = __db_util_cache(dbp, &ldg->cache, &resize)) != 0) - goto err; - if (resize) { - if ((ret = dbp->close(dbp, 0)) != 0) - goto err; - dbp = NULL; - if ((ret = dbenv->close(dbenv, 0)) != 0) - goto err; - if ((ret = env_create(&dbenv, ldg)) != 0) - goto err; - goto retry_db; - } - } - - /* Initialize the key/data pair. */ - readp = writep = &key; - if (dbtype == DB_RECNO || dbtype == DB_QUEUE) { - key.size = sizeof(recno); - if (keyflag) { - key.data = &datarecno; - if (checkprint) { - readp = &rkey; - goto key_data; - } - } else - key.data = &recno; - } else -key_data: if ((readp->data = malloc(readp->ulen = 1024)) == NULL) { - dbenv->err(dbenv, ENOMEM, NULL); - goto err; - } - if ((data.data = malloc(data.ulen = 1024)) == NULL) { - dbenv->err(dbenv, ENOMEM, NULL); - goto err; - } - - if (TXN_ON(dbenv) && - (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - - /* Get each key/data pair and add them to the database. */ - for (recno = 1; !__db_util_interrupted(); ++recno) { - if (!keyflag) { - if (checkprint) { - if (dbt_rprint(dbenv, &data)) - goto err; - } else { - if (dbt_rdump(dbenv, &data)) - goto err; - } - } else { - if (checkprint) { - if (dbt_rprint(dbenv, readp)) - goto err; - if (ascii_recno && - dbt_to_recno(dbenv, readp, &datarecno) != 0) - goto err; - - if (!G(endodata) && dbt_rprint(dbenv, &data)) - goto odd_count; - } else { - if (ascii_recno) { - if (dbt_rrecno(dbenv, readp, hexkeys)) - goto err; - } else - if (dbt_rdump(dbenv, readp)) - goto err; - - if (!G(endodata) && dbt_rdump(dbenv, &data)) { -odd_count: dbenv->errx(dbenv, - "odd number of key/data pairs"); - goto err; - } - } - } - if (G(endodata)) - break; -retry: if (txn != NULL) - if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0) - goto err; - switch (ret = dbp->put(dbp, ctxn, writep, &data, put_flags)) { - case 0: - if (ctxn != NULL) { - if ((ret = - ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0) - goto err; - ctxn = NULL; - } - break; - case DB_KEYEXIST: - *existedp = 1; - dbenv->errx(dbenv, - "%s: line %d: key already exists, not loaded:", - name, - !keyflag ? recno : recno * 2 - 1); - - (void)dbenv->prdbt(&key, - checkprint, 0, stderr, __db_pr_callback, 0); - break; - case DB_LOCK_DEADLOCK: - /* If we have a child txn, retry--else it's fatal. */ - if (ctxn != NULL) { - if ((ret = ctxn->abort(ctxn)) != 0) - goto err; - ctxn = NULL; - goto retry; - } - /* FALLTHROUGH */ - default: - dbenv->err(dbenv, ret, NULL); - if (ctxn != NULL) { - (void)ctxn->abort(ctxn); - ctxn = NULL; - } - goto err; - } - if (ctxn != NULL) { - if ((ret = ctxn->abort(ctxn)) != 0) - goto err; - ctxn = NULL; - } - } -done: rval = 0; - DB_ASSERT(ctxn == NULL); - if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) { - txn = NULL; - goto err; - } - - if (0) { -err: rval = 1; - DB_ASSERT(ctxn == NULL); - if (txn != NULL) - (void)txn->abort(txn); - } - - /* Close the database. */ - if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->close"); - rval = 1; - } - - if (G(hdrbuf) != NULL) - free(G(hdrbuf)); - G(hdrbuf) = NULL; - /* Free allocated memory. */ - if (subdb != NULL) - free(subdb); - if (dbtype != DB_RECNO && dbtype != DB_QUEUE && key.data != NULL) - free(key.data); - if (rkey.data != NULL) - free(rkey.data); - free(data.data); - - return (rval); -} - -/* - * env_create -- - * Create the environment and initialize it for error reporting. - */ -int -env_create(dbenvp, ldg) - DB_ENV **dbenvp; - LDG *ldg; -{ - DB_ENV *dbenv; - int ret; - - if ((ret = db_env_create(dbenvp, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", ldg->progname, db_strerror(ret)); - return (ret); - } - dbenv = *dbenvp; - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, ldg->progname); - if (ldg->passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - ldg->passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - return (ret); - } - if ((ret = db_init(dbenv, ldg->home, ldg->cache, &ldg->private)) != 0) - return (ret); - dbenv->app_private = ldg; - - return (0); -} - -/* - * db_init -- - * Initialize the environment. - */ -int -db_init(dbenv, home, cache, is_private) - DB_ENV *dbenv; - char *home; - u_int32_t cache; - int *is_private; -{ - u_int32_t flags; - int ret; - - *is_private = 0; - /* We may be loading into a live environment. Try and join. */ - flags = DB_USE_ENVIRON | - DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN; - if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0) - return (0); - if (ret == DB_VERSION_MISMATCH) - goto err; - - /* - * We're trying to load a database. - * - * An environment is required because we may be trying to look at - * databases in directories other than the current one. We could - * avoid using an environment iff the -h option wasn't specified, - * but that seems like more work than it's worth. - * - * No environment exists (or, at least no environment that includes - * an mpool region exists). Create one, but make it private so that - * no files are actually created. - */ - LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN); - LF_SET(DB_CREATE | DB_PRIVATE); - *is_private = 1; - if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { - dbenv->err(dbenv, ret, "set_cachesize"); - return (1); - } - if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0) - return (0); - - /* An environment is required. */ -err: dbenv->err(dbenv, ret, "DB_ENV->open"); - return (1); -} - -#define FLAG(name, value, keyword, flag) \ - if (strcmp(name, keyword) == 0) { \ - switch (*value) { \ - case '1': \ - if ((ret = dbp->set_flags(dbp, flag)) != 0) { \ - dbp->err(dbp, ret, "%s: set_flags: %s", \ - G(progname), name); \ - goto err; \ - } \ - break; \ - case '0': \ - break; \ - default: \ - badnum(dbenv); \ - goto err; \ - } \ - continue; \ - } -#define NUMBER(name, value, keyword, func, t) \ - if (strcmp(name, keyword) == 0) { \ - if ((ret = __db_getlong(dbenv, \ - NULL, value, 0, LONG_MAX, &val)) != 0 || \ - (ret = dbp->func(dbp, (t)val)) != 0) \ - goto nameerr; \ - continue; \ - } -#define STRING(name, value, keyword, func) \ - if (strcmp(name, keyword) == 0) { \ - if ((ret = dbp->func(dbp, value[0])) != 0) \ - goto nameerr; \ - continue; \ - } - -/* - * configure -- - * Handle command-line configuration options. - */ -int -configure(dbenv, dbp, clp, subdbp, keysp) - DB_ENV *dbenv; - DB *dbp; - char **clp, **subdbp; - int *keysp; -{ - long val; - int ret, savech; - char *name, *value; - - for (; (name = *clp) != NULL; *--value = savech, ++clp) { - if ((value = strchr(name, '=')) == NULL) { - dbp->errx(dbp, - "command-line configuration uses name=value format"); - return (1); - } - savech = *value; - *value++ = '\0'; - - if (strcmp(name, "database") == 0 || - strcmp(name, "subdatabase") == 0) { - if (*subdbp != NULL) - free(*subdbp); - if ((*subdbp = strdup(value)) == NULL) { - dbp->err(dbp, ENOMEM, NULL); - return (1); - } - continue; - } - if (strcmp(name, "keys") == 0) { - if (strcmp(value, "1") == 0) - *keysp = 1; - else if (strcmp(value, "0") == 0) - *keysp = 0; - else { - badnum(dbenv); - return (1); - } - continue; - } - - NUMBER(name, value, "bt_minkey", set_bt_minkey, u_int32_t); - NUMBER(name, value, "db_lorder", set_lorder, int); - NUMBER(name, value, "db_pagesize", set_pagesize, u_int32_t); - FLAG(name, value, "chksum", DB_CHKSUM); - FLAG(name, value, "duplicates", DB_DUP); - FLAG(name, value, "dupsort", DB_DUPSORT); - NUMBER(name, value, "h_ffactor", set_h_ffactor, u_int32_t); - NUMBER(name, value, "h_nelem", set_h_nelem, u_int32_t); - NUMBER(name, value, "re_len", set_re_len, u_int32_t); - STRING(name, value, "re_pad", set_re_pad); - FLAG(name, value, "recnum", DB_RECNUM); - FLAG(name, value, "renumber", DB_RENUMBER); - - dbp->errx(dbp, - "unknown command-line configuration keyword \"%s\"", name); - return (1); - } - return (0); - -nameerr: - dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value); -err: return (1); -} - -/* - * rheader -- - * Read the header message. - */ -int -rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp) - DB_ENV *dbenv; - DB *dbp; - DBTYPE *dbtypep; - char **subdbp; - int *checkprintp, *keysp; -{ - size_t buflen, linelen, start; - long val; - int ch, first, hdr, ret; - char *buf, *name, *p, *value; - - *dbtypep = DB_UNKNOWN; - *checkprintp = 0; - name = NULL; - - /* - * We start with a smallish buffer; most headers are small. - * We may need to realloc it for a large subdatabase name. - */ - buflen = 4096; - if (G(hdrbuf) == NULL) { - hdr = 0; - if ((buf = malloc(buflen)) == NULL) - goto memerr; - G(hdrbuf) = buf; - G(origline) = G(lineno); - } else { - hdr = 1; - buf = G(hdrbuf); - G(lineno) = G(origline); - } - - start = 0; - for (first = 1;; first = 0) { - ++G(lineno); - - /* Read a line, which may be of arbitrary length, into buf. */ - linelen = 0; - buf = &G(hdrbuf)[start]; - if (hdr == 0) { - for (;;) { - if ((ch = getchar()) == EOF) { - if (!first || ferror(stdin)) - goto badfmt; - G(endofile) = 1; - break; - } - - /* - * If the buffer is too small, double it. - */ - if (linelen + start == buflen) { - G(hdrbuf) = - realloc(G(hdrbuf), buflen *= 2); - if (G(hdrbuf) == NULL) - goto memerr; - buf = &G(hdrbuf)[start]; - } - - if (ch == '\n') - break; - - buf[linelen++] = ch; - } - if (G(endofile) == 1) - break; - buf[linelen++] = '\0'; - } else - linelen = strlen(buf) + 1; - start += linelen; - - if (name != NULL) { - free(name); - name = NULL; - } - /* If we don't see the expected information, it's an error. */ - if ((name = strdup(buf)) == NULL) - goto memerr; - if ((p = strchr(name, '=')) == NULL) - goto badfmt; - *p++ = '\0'; - - value = p--; - - if (name[0] == '\0' || value[0] == '\0') - goto badfmt; - - if (strcmp(name, "HEADER") == 0) - break; - if (strcmp(name, "VERSION") == 0) { - /* - * Version 1 didn't have a "VERSION" header line. We - * only support versions 1, 2, and 3 of the dump format. - */ - G(version) = atoi(value); - - if (G(version) > 3) { - dbp->errx(dbp, - "line %lu: VERSION %d is unsupported", - G(lineno), G(version)); - goto err; - } - continue; - } - if (strcmp(name, "format") == 0) { - if (strcmp(value, "bytevalue") == 0) { - *checkprintp = 0; - continue; - } - if (strcmp(value, "print") == 0) { - *checkprintp = 1; - continue; - } - goto badfmt; - } - if (strcmp(name, "type") == 0) { - if (strcmp(value, "btree") == 0) { - *dbtypep = DB_BTREE; - continue; - } - if (strcmp(value, "hash") == 0) { - *dbtypep = DB_HASH; - continue; - } - if (strcmp(value, "recno") == 0) { - *dbtypep = DB_RECNO; - continue; - } - if (strcmp(value, "queue") == 0) { - *dbtypep = DB_QUEUE; - continue; - } - dbp->errx(dbp, "line %lu: unknown type", G(lineno)); - goto err; - } - if (strcmp(name, "database") == 0 || - strcmp(name, "subdatabase") == 0) { - if ((ret = convprintable(dbenv, value, subdbp)) != 0) { - dbp->err(dbp, ret, "error reading db name"); - goto err; - } - continue; - } - if (strcmp(name, "keys") == 0) { - if (strcmp(value, "1") == 0) - *keysp = 1; - else if (strcmp(value, "0") == 0) - *keysp = 0; - else { - badnum(dbenv); - goto err; - } - continue; - } - -#ifdef notyet - NUMBER(name, value, "bt_maxkey", set_bt_maxkey, u_int32_t); -#endif - NUMBER(name, value, "bt_minkey", set_bt_minkey, u_int32_t); - NUMBER(name, value, "db_lorder", set_lorder, int); - NUMBER(name, value, "db_pagesize", set_pagesize, u_int32_t); - NUMBER(name, value, "extentsize", set_q_extentsize, u_int32_t); - FLAG(name, value, "chksum", DB_CHKSUM); - FLAG(name, value, "duplicates", DB_DUP); - FLAG(name, value, "dupsort", DB_DUPSORT); - NUMBER(name, value, "h_ffactor", set_h_ffactor, u_int32_t); - NUMBER(name, value, "h_nelem", set_h_nelem, u_int32_t); - NUMBER(name, value, "re_len", set_re_len, u_int32_t); - STRING(name, value, "re_pad", set_re_pad); - FLAG(name, value, "recnum", DB_RECNUM); - FLAG(name, value, "renumber", DB_RENUMBER); - - dbp->errx(dbp, - "unknown input-file header configuration keyword \"%s\"", - name); - goto err; - } - ret = 0; - - if (0) { -nameerr: dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value); - ret = 1; - } - if (0) { -badfmt: dbp->errx(dbp, "line %lu: unexpected format", G(lineno)); - ret = 1; - } - if (0) { -memerr: dbp->errx(dbp, "unable to allocate memory"); -err: ret = 1; - } - if (name != NULL) - free(name); - return (ret); -} - -/* - * convprintable -- - * Convert a printable-encoded string into a newly allocated string. - * - * In an ideal world, this would probably share code with dbt_rprint, but - * that's set up to read character-by-character (to avoid large memory - * allocations that aren't likely to be a problem here), and this has fewer - * special cases to deal with. - * - * Note that despite the printable encoding, the char * interface to this - * function (which is, not coincidentally, also used for database naming) - * means that outstr cannot contain any nuls. - */ -int -convprintable(dbenv, instr, outstrp) - DB_ENV *dbenv; - char *instr, **outstrp; -{ - char c, *outstr; - int e1, e2; - - /* - * Just malloc a string big enough for the whole input string; - * the output string will be smaller (or of equal length). - */ - if ((outstr = malloc(strlen(instr) + 1)) == NULL) - return (ENOMEM); - - *outstrp = outstr; - - e1 = e2 = 0; - for ( ; *instr != '\0'; instr++) - if (*instr == '\\') { - if (*++instr == '\\') { - *outstr++ = '\\'; - continue; - } - c = digitize(dbenv, *instr, &e1) << 4; - c |= digitize(dbenv, *++instr, &e2); - if (e1 || e2) { - badend(dbenv); - return (EINVAL); - } - - *outstr++ = c; - } else - *outstr++ = *instr; - - *outstr = '\0'; - - return (0); -} - -/* - * dbt_rprint -- - * Read a printable line into a DBT structure. - */ -int -dbt_rprint(dbenv, dbtp) - DB_ENV *dbenv; - DBT *dbtp; -{ - u_int32_t len; - u_int8_t *p; - int c1, c2, e, escape, first; - char buf[32]; - - ++G(lineno); - - first = 1; - e = escape = 0; - for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) { - if (c1 == EOF) { - if (len == 0) { - G(endofile) = G(endodata) = 1; - return (0); - } - badend(dbenv); - return (1); - } - if (first) { - first = 0; - if (G(version) > 1) { - if (c1 != ' ') { - buf[0] = c1; - if (fgets(buf + 1, - sizeof(buf) - 1, stdin) == NULL || - strcmp(buf, "DATA=END\n") != 0) { - badend(dbenv); - return (1); - } - G(endodata) = 1; - return (0); - } - continue; - } - } - if (escape) { - if (c1 != '\\') { - if ((c2 = getchar()) == EOF) { - badend(dbenv); - return (1); - } - c1 = digitize(dbenv, - c1, &e) << 4 | digitize(dbenv, c2, &e); - if (e) - return (1); - } - escape = 0; - } else - if (c1 == '\\') { - escape = 1; - continue; - } - if (len >= dbtp->ulen - 10) { - dbtp->ulen *= 2; - if ((dbtp->data = - realloc(dbtp->data, dbtp->ulen)) == NULL) { - dbenv->err(dbenv, ENOMEM, NULL); - return (1); - } - p = (u_int8_t *)dbtp->data + len; - } - ++len; - *p++ = c1; - } - dbtp->size = len; - - return (0); -} - -/* - * dbt_rdump -- - * Read a byte dump line into a DBT structure. - */ -int -dbt_rdump(dbenv, dbtp) - DB_ENV *dbenv; - DBT *dbtp; -{ - u_int32_t len; - u_int8_t *p; - int c1, c2, e, first; - char buf[32]; - - ++G(lineno); - - first = 1; - e = 0; - for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) { - if (c1 == EOF) { - if (len == 0) { - G(endofile) = G(endodata) = 1; - return (0); - } - badend(dbenv); - return (1); - } - if (first) { - first = 0; - if (G(version) > 1) { - if (c1 != ' ') { - buf[0] = c1; - if (fgets(buf + 1, - sizeof(buf) - 1, stdin) == NULL || - strcmp(buf, "DATA=END\n") != 0) { - badend(dbenv); - return (1); - } - G(endodata) = 1; - return (0); - } - continue; - } - } - if ((c2 = getchar()) == EOF) { - badend(dbenv); - return (1); - } - if (len >= dbtp->ulen - 10) { - dbtp->ulen *= 2; - if ((dbtp->data = - realloc(dbtp->data, dbtp->ulen)) == NULL) { - dbenv->err(dbenv, ENOMEM, NULL); - return (1); - } - p = (u_int8_t *)dbtp->data + len; - } - ++len; - *p++ = digitize(dbenv, c1, &e) << 4 | digitize(dbenv, c2, &e); - if (e) - return (1); - } - dbtp->size = len; - - return (0); -} - -/* - * dbt_rrecno -- - * Read a record number dump line into a DBT structure. - */ -int -dbt_rrecno(dbenv, dbtp, ishex) - DB_ENV *dbenv; - DBT *dbtp; - int ishex; -{ - char buf[32], *p, *q; - u_long recno; - - ++G(lineno); - - if (fgets(buf, sizeof(buf), stdin) == NULL) { - G(endofile) = G(endodata) = 1; - return (0); - } - - if (strcmp(buf, "DATA=END\n") == 0) { - G(endodata) = 1; - return (0); - } - - if (buf[0] != ' ') - goto bad; - - /* - * If we're expecting a hex key, do an in-place conversion - * of hex to straight ASCII before calling __db_getulong(). - */ - if (ishex) { - for (p = q = buf + 1; *q != '\0' && *q != '\n';) { - /* - * 0-9 in hex are 0x30-0x39, so this is easy. - * We should alternate between 3's and [0-9], and - * if the [0-9] are something unexpected, - * __db_getulong will fail, so we only need to catch - * end-of-string conditions. - */ - if (*q++ != '3') - goto bad; - if (*q == '\n' || *q == '\0') - goto bad; - *p++ = *q++; - } - *p = '\0'; - } - - if (__db_getulong(dbenv, G(progname), buf + 1, 0, 0, &recno)) { -bad: badend(dbenv); - return (1); - } - - *((db_recno_t *)dbtp->data) = recno; - dbtp->size = sizeof(db_recno_t); - return (0); -} - -int -dbt_to_recno(dbenv, dbt, recnop) - DB_ENV *dbenv; - DBT *dbt; - db_recno_t *recnop; -{ - char buf[32]; /* Large enough for 2^64. */ - - memcpy(buf, dbt->data, dbt->size); - buf[dbt->size] = '\0'; - - return (__db_getulong(dbenv, G(progname), buf, 0, 0, (u_long *)recnop)); -} - -/* - * digitize -- - * Convert a character to an integer. - */ -int -digitize(dbenv, c, errorp) - DB_ENV *dbenv; - int c, *errorp; -{ - switch (c) { /* Don't depend on ASCII ordering. */ - case '0': return (0); - case '1': return (1); - case '2': return (2); - case '3': return (3); - case '4': return (4); - case '5': return (5); - case '6': return (6); - case '7': return (7); - case '8': return (8); - case '9': return (9); - case 'a': return (10); - case 'b': return (11); - case 'c': return (12); - case 'd': return (13); - case 'e': return (14); - case 'f': return (15); - default: /* Not possible. */ - break; - } - - dbenv->errx(dbenv, "unexpected hexadecimal value"); - *errorp = 1; - - return (0); -} - -/* - * badnum -- - * Display the bad number message. - */ -void -badnum(dbenv) - DB_ENV *dbenv; -{ - dbenv->errx(dbenv, - "boolean name=value pairs require a value of 0 or 1"); -} - -/* - * badend -- - * Display the bad end to input message. - */ -void -badend(dbenv) - DB_ENV *dbenv; -{ - dbenv->errx(dbenv, "unexpected end of input data or key/data pair"); -} - -/* - * usage -- - * Display the usage message. - */ -int -usage() -{ - (void)fprintf(stderr, "usage: %s %s\n\t%s\n", progname, - "[-nTV] [-c name=value] [-f file]", - "[-h home] [-P password] [-t btree | hash | recno | queue] db_file"); - (void)fprintf(stderr, "usage: %s %s\n", - progname, "-r lsn | fileid [-h home] [-P password] db_file"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_printlog/README b/storage/bdb/db_printlog/README deleted file mode 100644 index eca5383cb58..00000000000 --- a/storage/bdb/db_printlog/README +++ /dev/null @@ -1,34 +0,0 @@ -# $Id: README,v 12.0 2004/11/17 03:43:23 bostic Exp $ - -Berkeley DB log dump utility. This utility dumps out a DB log in human -readable form, a record at a time, to assist in recovery and transaction -abort debugging. - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -commit.awk Output transaction ID of committed transactions. - -count.awk Print out the number of log records for transactions - that we encountered. - -dbname.awk Take a comma-separated list of database names and spit - out all the log records that affect those databases. - -fileid.awk Take a comma-separated list of file numbers and spit out - all the log records that affect those file numbers. - -logstat.awk Display log record count/size statistics. - -pgno.awk Take a comma-separated list of page numbers and spit - out all the log records that affect those page numbers. - -range.awk Print out a range of the log. - -rectype.awk Print out a range of the log -- command line should - set RECTYPE to the a comma separated list of the - rectypes (or partial strings of rectypes) sought. - -status.awk Read through db_printlog output and list the transactions - encountered, and whether they committed or aborted. - -txn.awk Print out all the records for a comma-separated list of - transaction IDs. diff --git a/storage/bdb/db_printlog/commit.awk b/storage/bdb/db_printlog/commit.awk deleted file mode 100644 index 4f03fd2ce50..00000000000 --- a/storage/bdb/db_printlog/commit.awk +++ /dev/null @@ -1,7 +0,0 @@ -# $Id: commit.awk,v 12.0 2004/11/17 03:43:24 bostic Exp $ -# -# Output tid of committed transactions. - -/txn_regop/ { - print $5 -} diff --git a/storage/bdb/db_printlog/count.awk b/storage/bdb/db_printlog/count.awk deleted file mode 100644 index 6a80cbe1b60..00000000000 --- a/storage/bdb/db_printlog/count.awk +++ /dev/null @@ -1,9 +0,0 @@ -# $Id: count.awk,v 12.0 2004/11/17 03:43:24 bostic Exp $ -# -# Print out the number of log records for transactions that we -# encountered. - -/^\[/{ - if ($5 != 0) - print $5 -} diff --git a/storage/bdb/db_printlog/db_printlog.c b/storage/bdb/db_printlog/db_printlog.c deleted file mode 100644 index 4a96efd9073..00000000000 --- a/storage/bdb/db_printlog/db_printlog.c +++ /dev/null @@ -1,434 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_printlog.c,v 12.5 2005/09/09 12:38:33 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/btree.h" -#include "dbinc/fop.h" -#include "dbinc/hash.h" -#include "dbinc/log.h" -#include "dbinc/qam.h" -#include "dbinc/txn.h" - -int lsn_arg __P((char *, DB_LSN *)); -int main __P((int, char *[])); -int open_rep_db __P((DB_ENV *, DB **, DBC **)); -int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB *dbp; - DBC *dbc; - DBT data, keydbt; - DB_ENV *dbenv; - DB_LOGC *logc; - DB_LSN key, start, stop; - size_t dtabsize; - u_int32_t logcflag; - int ch, cmp, exitval, nflag, rflag, ret, repflag; - int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - char *home, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbp = NULL; - dbc = NULL; - dbenv = NULL; - logc = NULL; - ZERO_LSN(start); - ZERO_LSN(stop); - dtabsize = 0; - exitval = nflag = rflag = repflag = 0; - dtab = NULL; - home = passwd = NULL; - - while ((ch = getopt(argc, argv, "b:e:h:NP:rRV")) != EOF) - switch (ch) { - case 'b': - if (lsn_arg(optarg, &start)) - return (usage()); - break; - case 'e': - if (lsn_arg(optarg, &stop)) - return (usage()); - break; - case 'h': - home = optarg; - break; - case 'N': - nflag = 1; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'r': - rflag = 1; - break; - case 'R': /* Undocumented */ - repflag = 1; - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc > 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (nflag) { - if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto shutdown; - } - if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto shutdown; - } - } - - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - - /* - * Set up an app-specific dispatch function so that we can gracefully - * handle app-specific log records. - */ - if ((ret = dbenv->set_app_dispatch(dbenv, print_app_record)) != 0) { - dbenv->err(dbenv, ret, "app_dispatch"); - goto shutdown; - } - - /* - * An environment is required, but as all we're doing is reading log - * files, we create one if it doesn't already exist. If we create - * it, create it private so it automatically goes away when we're done. - * If we are reading the replication database, do not open the env - * with logging, because we don't want to log the opens. - */ - if (repflag) { - if ((ret = dbenv->open(dbenv, home, - DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 && - (ret == DB_VERSION_MISMATCH || - (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) - != 0)) { - dbenv->err(dbenv, ret, "DB_ENV->open"); - goto shutdown; - } - } else if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 && - (ret == DB_VERSION_MISMATCH || - (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) { - dbenv->err(dbenv, ret, "DB_ENV->open"); - goto shutdown; - } - - /* Initialize print callbacks. */ - if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 || - (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 || -#ifdef HAVE_HASH - (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 || -#endif -#ifdef HAVE_QUEUE - (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 || -#endif - (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) { - dbenv->err(dbenv, ret, "callback: initialization"); - goto shutdown; - } - - /* Allocate a log cursor. */ - if (repflag) { - if ((ret = open_rep_db(dbenv, &dbp, &dbc)) != 0) - goto shutdown; - } else if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->log_cursor"); - goto shutdown; - } - - if (IS_ZERO_LSN(start)) { - memset(&keydbt, 0, sizeof(keydbt)); - logcflag = rflag ? DB_PREV : DB_NEXT; - } else { - key = start; - logcflag = DB_SET; - } - memset(&data, 0, sizeof(data)); - - for (; !__db_util_interrupted(); logcflag = rflag ? DB_PREV : DB_NEXT) { - if (repflag) { - ret = dbc->c_get(dbc, &keydbt, &data, logcflag); - if (ret == 0) - key = ((REP_CONTROL *)keydbt.data)->lsn; - } else - ret = logc->get(logc, &key, &data, logcflag); - if (ret != 0) { - if (ret == DB_NOTFOUND) - break; - dbenv->err(dbenv, - ret, repflag ? "DB_LOGC->get" : "DBC->get"); - goto shutdown; - } - - /* - * We may have reached the end of the range we're displaying. - */ - if (!IS_ZERO_LSN(stop)) { - cmp = log_compare(&key, &stop); - if ((rflag && cmp < 0) || (!rflag && cmp > 0)) - break; - } - - ret = __db_dispatch(dbenv, - dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL); - - /* - * XXX - * Just in case the underlying routines don't flush. - */ - (void)fflush(stdout); - - if (ret != 0) { - dbenv->err(dbenv, ret, "tx: dispatch"); - goto shutdown; - } - } - - if (0) { -shutdown: exitval = 1; - } - if (logc != NULL && (ret = logc->close(logc, 0)) != 0) - exitval = 1; - - if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0) - exitval = 1; - - if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) - exitval = 1; - - /* - * The dtab is allocated by __db_add_recovery (called by *_init_print) - * using the library malloc function (__os_malloc). It thus needs to be - * freed using the corresponding free (__os_free). - */ - if (dtab != NULL) - __os_free(dbenv, dtab); - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -usage() -{ - fprintf(stderr, "usage: %s %s\n", progname, - "[-NrV] [-b file/offset] [-e file/offset] [-h home] [-P password]"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} - -/* Print an unknown, application-specific log record as best we can. */ -int -print_app_record(dbenv, dbt, lsnp, op) - DB_ENV *dbenv; - DBT *dbt; - DB_LSN *lsnp; - db_recops op; -{ - int ch; - u_int32_t i, rectype; - - DB_ASSERT(op == DB_TXN_PRINT); - - COMPQUIET(dbenv, NULL); - COMPQUIET(op, DB_TXN_PRINT); - - /* - * Fetch the rectype, which always must be at the beginning of the - * record (if dispatching is to work at all). - */ - memcpy(&rectype, dbt->data, sizeof(rectype)); - - /* - * Applications may wish to customize the output here based on the - * rectype. We just print the entire log record in the generic - * mixed-hex-and-printable format we use for binary data. - */ - printf("[%lu][%lu]application specific record: rec: %lu\n", - (u_long)lsnp->file, (u_long)lsnp->offset, (u_long)rectype); - printf("\tdata: "); - for (i = 0; i < dbt->size; i++) { - ch = ((u_int8_t *)dbt->data)[i]; - printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch); - } - printf("\n\n"); - - return (0); -} - -int -open_rep_db(dbenv, dbpp, dbcp) - DB_ENV *dbenv; - DB **dbpp; - DBC **dbcp; -{ - int ret; - - DB *dbp; - *dbpp = NULL; - *dbcp = NULL; - - if ((ret = db_create(dbpp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "db_create"); - return (ret); - } - - dbp = *dbpp; - if ((ret = - dbp->open(dbp, NULL, "__db.rep.db", NULL, DB_BTREE, 0, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->open"); - goto err; - } - - if ((ret = dbp->cursor(dbp, NULL, dbcp, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->cursor"); - goto err; - } - - return (0); - -err: if (*dbpp != NULL) - (void)(*dbpp)->close(*dbpp, 0); - return (ret); -} - -/* - * lsn_arg -- - * Parse a LSN argument. - */ -int -lsn_arg(arg, lsnp) - char *arg; - DB_LSN *lsnp; -{ - char *p; - u_long uval; - - /* - * Expected format is: lsn.file/lsn.offset. - * - * Don't use getsubopt(3), some systems don't have it. - */ - if ((p = strchr(arg, '/')) == NULL) - return (1); - *p = '\0'; - - if (__db_getulong(NULL, progname, arg, 0, 0, &uval)) - return (1); - if (uval > UINT32_MAX) - return (1); - lsnp->file = uval; - if (__db_getulong(NULL, progname, p + 1, 0, 0, &uval)) - return (1); - if (uval > UINT32_MAX) - return (1); - lsnp->offset = uval; - return (0); -} diff --git a/storage/bdb/db_printlog/dbname.awk b/storage/bdb/db_printlog/dbname.awk deleted file mode 100644 index a864c95dd53..00000000000 --- a/storage/bdb/db_printlog/dbname.awk +++ /dev/null @@ -1,82 +0,0 @@ -# $Id: dbname.awk,v 12.1 2005/03/23 04:56:51 ubell Exp $ -# -# Take a comma-separated list of database names and spit out all the -# log records that affect those databases. - -NR == 1 { - nfiles = 0 - while ((ndx = index(DBNAME, ",")) != 0) { - filenames[nfiles] = substr(DBNAME, 1, ndx - 1) 0; - DBNAME = substr(DBNAME, ndx + 1, length(DBNAME) - ndx); - files[nfiles] = -1 - nfiles++ - } - filenames[nfiles] = DBNAME 0; - files[nfiles] = -1 - myfile = -1; - nreg = 0; -} - -/^\[.*dbreg_register/ { - register = 1; -} -/opcode:/ { - if (register == 1) { - if ($2 == 1) - register = 3; - else - register = $2; - } -} -/name:/ { - if (register >= 2) { - myfile = -2; - for (i = 0; i <= nfiles; i++) { - if ($2 == filenames[i]) { - if (register == 2) { - printme = 0; - myfile = -2; - } else { - myfile = i; - } - break; - } - } - } - register = 0; -} -/fileid:/{ - if (myfile == -2) - files[$2] = 0; - else if (myfile != -1) { - files[$2] = 1; - if ($2 > nreg) - nreg = $2; - printme = 1; - register = 0; - myfile = -1; - } else if ($2 <= nreg && files[$2] == 1) { - printme = 1 - } - myfile = -1; -} - -/^\[/{ - if (printme == 1) { - printf("%s\n", rec); - printme = 0 - } - rec = ""; - - rec = $0 -} - -TXN == 1 && /txn_regop/ {printme = 1} -/^ /{ - rec = sprintf("%s\n%s", rec, $0); -} - -END { - if (printme == 1) - printf("%s\n", rec); -} diff --git a/storage/bdb/db_printlog/fileid.awk b/storage/bdb/db_printlog/fileid.awk deleted file mode 100644 index 853ba866c99..00000000000 --- a/storage/bdb/db_printlog/fileid.awk +++ /dev/null @@ -1,37 +0,0 @@ -# $Id: fileid.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Take a comma-separated list of file numbers and spit out all the -# log records that affect those file numbers. - -NR == 1 { - nfiles = 0 - while ((ndx = index(FILEID, ",")) != 0) { - files[nfiles] = substr(FILEID, 1, ndx - 1); - FILEID = substr(FILEID, ndx + 1, length(FILEID) - ndx); - nfiles++ - } - files[nfiles] = FILEID; -} - -/^\[/{ - if (printme == 1) { - printf("%s\n", rec); - printme = 0 - } - rec = ""; - - rec = $0 -} -/^ /{ - rec = sprintf("%s\n%s", rec, $0); -} -/fileid/{ - for (i = 0; i <= nfiles; i++) - if ($2 == files[i]) - printme = 1 -} - -END { - if (printme == 1) - printf("%s\n", rec); -} diff --git a/storage/bdb/db_printlog/logstat.awk b/storage/bdb/db_printlog/logstat.awk deleted file mode 100644 index 83386465375..00000000000 --- a/storage/bdb/db_printlog/logstat.awk +++ /dev/null @@ -1,36 +0,0 @@ -# $Id: logstat.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Output accumulated log record count/size statistics. -BEGIN { - l_file = 0; - l_offset = 0; -} - -/^\[/{ - gsub("[][: ]", " ", $1) - split($1, a) - - if (a[1] == l_file) { - l[a[3]] += a[2] - l_offset - ++n[a[3]] - } else - ++s[a[3]] - - l_file = a[1] - l_offset = a[2] -} - -END { - # We can't figure out the size of the first record in each log file, - # use the average for other records we found as an estimate. - for (i in s) - if (s[i] != 0 && n[i] != 0) { - l[i] += s[i] * (l[i]/n[i]) - n[i] += s[i] - delete s[i] - } - for (i in l) - printf "%s: %d (n: %d, avg: %.2f)\n", i, l[i], n[i], l[i]/n[i] - for (i in s) - printf "%s: unknown (n: %d, unknown)\n", i, s[i] -} diff --git a/storage/bdb/db_printlog/pgno.awk b/storage/bdb/db_printlog/pgno.awk deleted file mode 100644 index f58713523f1..00000000000 --- a/storage/bdb/db_printlog/pgno.awk +++ /dev/null @@ -1,47 +0,0 @@ -# $Id: pgno.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Take a comma-separated list of page numbers and spit out all the -# log records that affect those page numbers. - -NR == 1 { - npages = 0 - while ((ndx = index(PGNO, ",")) != 0) { - pgno[npages] = substr(PGNO, 1, ndx - 1); - PGNO = substr(PGNO, ndx + 1, length(PGNO) - ndx); - npages++ - } - pgno[npages] = PGNO; -} - -/^\[/{ - if (printme == 1) { - printf("%s\n", rec); - printme = 0 - } - rec = ""; - - rec = $0 -} -/^ /{ - rec = sprintf("%s\n%s", rec, $0); -} -/pgno/{ - for (i = 0; i <= npages; i++) - if ($2 == pgno[i]) - printme = 1 -} -/right/{ - for (i = 0; i <= npages; i++) - if ($2 == pgno[i]) - printme = 1 -} -/left/{ - for (i = 0; i <= npages; i++) - if ($2 == pgno[i]) - printme = 1 -} - -END { - if (printme == 1) - printf("%s\n", rec); -} diff --git a/storage/bdb/db_printlog/range.awk b/storage/bdb/db_printlog/range.awk deleted file mode 100644 index 045c7fb2070..00000000000 --- a/storage/bdb/db_printlog/range.awk +++ /dev/null @@ -1,27 +0,0 @@ -# $Id: range.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Print out a range of the log - -/^\[/{ - l = length($1) - 1; - i = index($1, "]"); - file = substr($1, 2, i - 2); - file += 0; - start = i + 2; - offset = substr($1, start, l - start + 1); - i = index(offset, "]"); - offset = substr($1, start, i - 1); - offset += 0; - - if ((file == START_FILE && offset >= START_OFFSET || file > START_FILE)\ - && (file < END_FILE || (file == END_FILE && offset < END_OFFSET))) - printme = 1 - else if (file == END_FILE && offset > END_OFFSET || file > END_FILE) - exit - else - printme = 0 -} -{ - if (printme == 1) - print $0 -} diff --git a/storage/bdb/db_printlog/rectype.awk b/storage/bdb/db_printlog/rectype.awk deleted file mode 100644 index 25b28008561..00000000000 --- a/storage/bdb/db_printlog/rectype.awk +++ /dev/null @@ -1,27 +0,0 @@ -# $Id: rectype.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Print out a range of the log. -# Command line should set RECTYPE to a comma separated list -# of the rectypes (or partial strings of rectypes) sought. -NR == 1 { - ntypes = 0 - while ((ndx = index(RECTYPE, ",")) != 0) { - types[ntypes] = substr(RECTYPE, 1, ndx - 1); - RECTYPE = substr(RECTYPE, ndx + 1, length(RECTYPE) - ndx); - ntypes++ - } - types[ntypes] = RECTYPE; -} - -/^\[/{ - printme = 0 - for (i = 0; i <= ntypes; i++) - if (index($1, types[i]) != 0) { - printme = 1 - break; - } -} -{ - if (printme == 1) - print $0 -} diff --git a/storage/bdb/db_printlog/status.awk b/storage/bdb/db_printlog/status.awk deleted file mode 100644 index 0433312debf..00000000000 --- a/storage/bdb/db_printlog/status.awk +++ /dev/null @@ -1,50 +0,0 @@ -# $Id: status.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Read through db_printlog output and list all the transactions encountered -# and whether they committed or aborted. -# -# 1 = started -# 2 = committed -# 3 = explicitly aborted -# 4 = other -BEGIN { - cur_txn = 0 -} -/^\[/{ - in_regop = 0 - if (status[$5] == 0) { - status[$5] = 1; - txns[cur_txn] = $5; - cur_txn++; - } -} -/ child:/ { - txnid = substr($2, 3); - status[txnid] = 2; -} -/txn_regop/ { - txnid = $5 - in_regop = 1 -} -/opcode:/ { - if (in_regop == 1) { - if ($2 == 1) - status[txnid] = 2 - else if ($2 == 3) - status[txnid] = 3 - else - status[txnid] = 4 - } -} -END { - for (i = 0; i < cur_txn; i++) { - if (status[txns[i]] == 1) - printf("%s\tABORT\n", txns[i]); - else if (status[txns[i]] == 2) - printf("%s\tCOMMIT\n", txns[i]); - else if (status[txns[i]] == 3) - printf("%s\tABORT\n", txns[i]); - else if (status[txns[i]] == 4) - printf("%s\tOTHER\n", txns[i]); - } -} diff --git a/storage/bdb/db_printlog/txn.awk b/storage/bdb/db_printlog/txn.awk deleted file mode 100644 index 12f283ebf79..00000000000 --- a/storage/bdb/db_printlog/txn.awk +++ /dev/null @@ -1,34 +0,0 @@ -# $Id: txn.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Print out all the records for a comma-separated list of transaction ids. -NR == 1 { - ntxns = 0 - while ((ndx = index(TXN, ",")) != 0) { - txn[ntxns] = substr(TXN, 1, ndx - 1); - TXN = substr(TXN, ndx + 1, length(TXN) - ndx); - ntxns++ - } - txn[ntxns] = TXN; -} - -/^\[/{ - if (printme == 1) { - printf("%s\n", rec); - printme = 0 - } - rec = ""; - - for (i = 0; i <= ntxns; i++) - if (txn[i] == $5) { - rec = $0 - printme = 1 - } -} -/^ /{ - rec = sprintf("%s\n%s", rec, $0); -} - -END { - if (printme == 1) - printf("%s\n", rec); -} diff --git a/storage/bdb/db_recover/db_recover.c b/storage/bdb/db_recover/db_recover.c deleted file mode 100644 index 5d9b5886b81..00000000000 --- a/storage/bdb/db_recover/db_recover.c +++ /dev/null @@ -1,303 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_recover.c,v 12.5 2005/06/16 20:21:29 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#include -#include -#endif - -#include "db_int.h" - -int main __P((int, char *[])); -int read_timestamp __P((char *, time_t *)); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB_ENV *dbenv; - time_t timestamp; - u_int32_t flags; - int ch, exitval, fatal_recover, ret, retain_env, verbose; - char *home, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - home = passwd = NULL; - timestamp = 0; - exitval = fatal_recover = retain_env = verbose = 0; - while ((ch = getopt(argc, argv, "ceh:P:t:Vv")) != EOF) - switch (ch) { - case 'c': - fatal_recover = 1; - break; - case 'e': - retain_env = 1; - break; - case 'h': - home = optarg; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 't': - if ((ret = read_timestamp(optarg, ×tamp)) != 0) - return (ret); - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc != 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - return (EXIT_FAILURE); - } - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - if (verbose) - (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1); - if (timestamp && - (ret = dbenv->set_tx_timestamp(dbenv, ×tamp)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->set_timestamp"); - goto shutdown; - } - - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - - /* - * Initialize the environment -- we don't actually do anything - * else, that all that's needed to run recovery. - * - * Note that unless the caller specified the -e option, we use a - * private environment, as we're about to create a region, and we - * don't want to to leave it around. If we leave the region around, - * the application that should create it will simply join it instead, - * and will then be running with incorrectly sized (and probably - * terribly small) caches. Applications that use -e should almost - * certainly use DB_CONFIG files in the directory. - */ - flags = 0; - LF_SET(DB_CREATE | DB_INIT_LOG | - DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON); - LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER); - LF_SET(retain_env ? DB_INIT_LOCK : DB_PRIVATE); - if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->open"); - goto shutdown; - } - - if (0) { -shutdown: exitval = 1; - } - - /* Clean up the environment. */ - if ((ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2; - -/* - * read_timestamp -- - * Convert a time argument to Epoch seconds. - * - * Copyright (c) 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -int -read_timestamp(arg, timep) - char *arg; - time_t *timep; -{ - struct tm *t; - time_t now; - int yearset; - char *p; - /* Start with the current time. */ - (void)time(&now); - if ((t = localtime(&now)) == NULL) { - fprintf(stderr, - "%s: localtime: %s\n", progname, strerror(errno)); - return (EXIT_FAILURE); - } - /* [[CC]YY]MMDDhhmm[.SS] */ - if ((p = strchr(arg, '.')) == NULL) - t->tm_sec = 0; /* Seconds defaults to 0. */ - else { - if (strlen(p + 1) != 2) - goto terr; - *p++ = '\0'; - t->tm_sec = ATOI2(p); - } - - yearset = 0; - switch (strlen(arg)) { - case 12: /* CCYYMMDDhhmm */ - t->tm_year = ATOI2(arg); - t->tm_year *= 100; - yearset = 1; - /* FALLTHROUGH */ - case 10: /* YYMMDDhhmm */ - if (yearset) { - yearset = ATOI2(arg); - t->tm_year += yearset; - } else { - yearset = ATOI2(arg); - if (yearset < 69) - t->tm_year = yearset + 2000; - else - t->tm_year = yearset + 1900; - } - t->tm_year -= 1900; /* Convert to UNIX time. */ - /* FALLTHROUGH */ - case 8: /* MMDDhhmm */ - t->tm_mon = ATOI2(arg); - --t->tm_mon; /* Convert from 01-12 to 00-11 */ - t->tm_mday = ATOI2(arg); - t->tm_hour = ATOI2(arg); - t->tm_min = ATOI2(arg); - break; - default: - goto terr; - } - - t->tm_isdst = -1; /* Figure out DST. */ - - *timep = mktime(t); - if (*timep == -1) { -terr: fprintf(stderr, - "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]", - progname); - return (EXIT_FAILURE); - } - return (0); -} - -int -usage() -{ - (void)fprintf(stderr, "usage: %s %s\n", progname, - "[-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_stat/db_stat.c b/storage/bdb/db_stat/db_stat.c deleted file mode 100644 index 9b6fff88f6c..00000000000 --- a/storage/bdb/db_stat/db_stat.c +++ /dev/null @@ -1,506 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_stat.c,v 12.6 2005/10/05 22:27:27 ubell Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" - -typedef enum { T_NOTSET, - T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_MUTEX, T_REP, T_TXN } test_t; - -int db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *)); -int main __P((int, char *[])); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB_ENV *dbenv; - DB_BTREE_STAT *sp; - DB *alt_dbp, *dbp; - test_t ttype; - u_int32_t cache, env_flags, fast, flags; - int ch, exitval; - int nflag, private, resize, ret; - char *db, *home, *p, *passwd, *subdb; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbenv = NULL; - dbp = NULL; - ttype = T_NOTSET; - cache = MEGABYTE; - exitval = fast = flags = nflag = private = 0; - db = home = passwd = subdb = NULL; - env_flags = 0; - - while ((ch = getopt(argc, - argv, "C:cd:Eefh:L:lM:mNP:R:rs:tVxX:Z")) != EOF) - switch (ch) { - case 'C': case 'c': - if (ttype != T_NOTSET && ttype != T_LOCK) - goto argcombo; - ttype = T_LOCK; - if (ch != 'c') - for (p = optarg; *p; ++p) - switch (*p) { - case 'A': - LF_SET(DB_STAT_ALL); - break; - case 'c': - LF_SET(DB_STAT_LOCK_CONF); - break; - case 'l': - LF_SET(DB_STAT_LOCK_LOCKERS); - break; - case 'm': /* Backward compatible. */ - break; - case 'o': - LF_SET(DB_STAT_LOCK_OBJECTS); - break; - case 'p': - LF_SET(DB_STAT_LOCK_PARAMS); - break; - default: - return (usage()); - } - break; - case 'd': - if (ttype != T_NOTSET && ttype != T_DB) - goto argcombo; - ttype = T_DB; - db = optarg; - break; - case 'E': case 'e': - if (ttype != T_NOTSET && ttype != T_ENV) - goto argcombo; - ttype = T_ENV; - LF_SET(DB_STAT_SUBSYSTEM); - if (ch == 'E') - LF_SET(DB_STAT_ALL); - break; - case 'f': - fast = DB_FAST_STAT; - break; - case 'h': - home = optarg; - break; - case 'L': case 'l': - if (ttype != T_NOTSET && ttype != T_LOG) - goto argcombo; - ttype = T_LOG; - if (ch != 'l') - for (p = optarg; *p; ++p) - switch (*p) { - case 'A': - LF_SET(DB_STAT_ALL); - break; - default: - return (usage()); - } - break; - case 'M': case 'm': - if (ttype != T_NOTSET && ttype != T_MPOOL) - goto argcombo; - ttype = T_MPOOL; - if (ch != 'm') - for (p = optarg; *p; ++p) - switch (*p) { - case 'A': - LF_SET(DB_STAT_ALL); - break; - case 'h': - LF_SET(DB_STAT_MEMP_HASH); - break; - case 'm': /* Backward compatible. */ - break; - default: - return (usage()); - } - break; - case 'N': - nflag = 1; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'R': case 'r': - if (ttype != T_NOTSET && ttype != T_REP) - goto argcombo; - ttype = T_REP; - if (ch != 'r') - for (p = optarg; *p; ++p) - switch (*p) { - case 'A': - LF_SET(DB_STAT_ALL); - break; - default: - return (usage()); - } - break; - case 's': - if (ttype != T_NOTSET && ttype != T_DB) - goto argcombo; - ttype = T_DB; - subdb = optarg; - break; - case 't': - if (ttype != T_NOTSET) { -argcombo: fprintf(stderr, - "%s: illegal option combination\n", - progname); - return (EXIT_FAILURE); - } - ttype = T_TXN; - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'X': case 'x': - if (ttype != T_NOTSET && ttype != T_MUTEX) - goto argcombo; - ttype = T_MUTEX; - if (ch != 'x') - for (p = optarg; *p; ++p) - switch (*p) { - case 'A': - LF_SET(DB_STAT_ALL); - break; - default: - return (usage()); - } - break; - case 'Z': - LF_SET(DB_STAT_CLEAR); - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - switch (ttype) { - case T_DB: - if (db == NULL) - return (usage()); - break; - case T_NOTSET: - return (usage()); - /* NOTREACHED */ - case T_ENV: - case T_LOCK: - case T_LOG: - case T_MPOOL: - case T_REP: - case T_TXN: - case T_MUTEX: - if (fast != 0) - return (usage()); - break; - } - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ -retry: if ((ret = db_env_create(&dbenv, env_flags)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto err; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (nflag) { - if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto err; - } - if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto err; - } - } - - if (passwd != NULL && - (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto err; - } - - /* Initialize the environment. */ - if (db_init(dbenv, home, ttype, cache, &private) != 0) - goto err; - - switch (ttype) { - case T_DB: - if (flags != 0) - return (usage()); - - /* Create the DB object and open the file. */ - if ((ret = db_create(&dbp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "db_create"); - goto err; - } - - if ((ret = dbp->open(dbp, - NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - dbenv->err(dbenv, ret, "DB->open: %s", db); - goto err; - } - - /* Check if cache is too small for this DB's pagesize. */ - if (private) { - if ((ret = __db_util_cache(dbp, &cache, &resize)) != 0) - goto err; - if (resize) { - (void)dbp->close(dbp, DB_NOSYNC); - dbp = NULL; - - (void)dbenv->close(dbenv, 0); - dbenv = NULL; - goto retry; - } - } - - /* - * See if we can open this db read/write to update counts. - * If its a master-db then we cannot. So check to see, - * if its btree then it might be. - */ - if (subdb == NULL && dbp->type == DB_BTREE && - (ret = dbp->stat(dbp, NULL, &sp, DB_FAST_STAT)) != 0) { - dbenv->err(dbenv, ret, "DB->stat"); - goto err; - } - - if (subdb != NULL || - dbp->type != DB_BTREE || - (sp->bt_metaflags & BTM_SUBDB) == 0) { - if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "db_create"); - goto err; - } - if ((ret = dbp->open(alt_dbp, NULL, - db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) { - if (subdb == NULL) - dbenv->err(dbenv, - ret, "DB->open: %s", db); - else - dbenv->err(dbenv, - ret, "DB->open: %s:%s", db, subdb); - (void)alt_dbp->close(alt_dbp, DB_NOSYNC); - goto err; - } - - (void)dbp->close(dbp, DB_NOSYNC); - dbp = alt_dbp; - } - - if (dbp->stat_print(dbp, flags)) - goto err; - break; - case T_ENV: - if (dbenv->stat_print(dbenv, flags)) - goto err; - break; - case T_LOCK: - if (dbenv->lock_stat_print(dbenv, flags)) - goto err; - break; - case T_LOG: - if (dbenv->log_stat_print(dbenv, flags)) - goto err; - break; - case T_MPOOL: - if (dbenv->memp_stat_print(dbenv, flags)) - goto err; - break; - case T_MUTEX: - if (dbenv->mutex_stat_print(dbenv, flags)) - goto err; - break; - case T_REP: - if (dbenv->rep_stat_print(dbenv, flags)) - goto err; - break; - case T_TXN: - if (dbenv->txn_stat_print(dbenv, flags)) - goto err; - break; - case T_NOTSET: - dbenv->errx(dbenv, "Unknown statistics flag"); - goto err; - } - - if (0) { -err: exitval = 1; - } - if (dbp != NULL && (ret = dbp->close(dbp, DB_NOSYNC)) != 0) { - exitval = 1; - dbenv->err(dbenv, ret, "close"); - } - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -/* - * db_init -- - * Initialize the environment. - */ -int -db_init(dbenv, home, ttype, cache, is_private) - DB_ENV *dbenv; - char *home; - test_t ttype; - u_int32_t cache; - int *is_private; -{ - u_int32_t oflags; - int ret; - - /* - * If our environment open fails, and we're trying to look at a - * shared region, it's a hard failure. - * - * We will probably just drop core if the environment we join does - * not include a memory pool. This is probably acceptable; trying - * to use an existing environment that does not contain a memory - * pool to look at a database can be safely construed as operator - * error, I think. - */ - *is_private = 0; - if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) == 0) - return (0); - if (ret == DB_VERSION_MISMATCH) - goto err; - if (ttype != T_DB && ttype != T_LOG) { - dbenv->err(dbenv, ret, "DB_ENV->open%s%s", - home == NULL ? "" : ": ", home == NULL ? "" : home); - return (1); - } - - /* - * We're looking at a database or set of log files and no environment - * exists. Create one, but make it private so no files are actually - * created. Declare a reasonably large cache so that we don't fail - * when reporting statistics on large databases. - * - * An environment is required to look at databases because we may be - * trying to look at databases in directories other than the current - * one. - */ - if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { - dbenv->err(dbenv, ret, "set_cachesize"); - return (1); - } - *is_private = 1; - oflags = DB_CREATE | DB_PRIVATE | DB_USE_ENVIRON; - if (ttype == T_DB) - oflags |= DB_INIT_MPOOL; - if (ttype == T_LOG) - oflags |= DB_INIT_LOG; - if (ttype == T_REP) - oflags |= DB_INIT_REP; - if ((ret = dbenv->open(dbenv, home, oflags, 0)) == 0) - return (0); - - /* An environment is required. */ -err: dbenv->err(dbenv, ret, "DB_ENV->open"); - return (1); -} - -int -usage() -{ - fprintf(stderr, "usage: %s %s\n", progname, - "-d file [-fN] [-h home] [-P password] [-s database]"); - fprintf(stderr, "usage: %s %s\n\t%s\n", progname, - "[-cEelmNrtVxZ] [-C Aclop]", - "[-h home] [-L A] [-M A] [-P password] [-R A] [-X A]"); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_stat/dd.sh b/storage/bdb/db_stat/dd.sh deleted file mode 100644 index 4e00c289a5a..00000000000 --- a/storage/bdb/db_stat/dd.sh +++ /dev/null @@ -1,79 +0,0 @@ -#! /bin/sh -# $Id: dd.sh,v 12.0 2004/11/17 03:43:25 bostic Exp $ -# -# Display environment's deadlocks based on "db_stat -Co" output. - -t1=__a -t2=__b - -trap 'rm -f $t1 $t2; exit 0' 0 1 2 3 13 15 - -if [ $# -ne 1 ]; then - echo "Usage: dd.sh [db_stat -Co output]" - exit 1 -fi - -if `egrep '\.*\' $1 > /dev/null`; then - n=`egrep '\.*\' $1 | wc -l | awk '{print $1}'` - echo "dd.sh: $1: $n page locks in a WAIT state." -else - echo "dd.sh: $1: No page locks in a WAIT state found." - exit 1 -fi - -# Print out list of node wait states, and output cycles in the graph. -egrep '\.*\' $1 | awk '{print $1 " " $7}' | -while read l p; do - p=`egrep "\.*\[ ][ ]*$p$" $1 | awk '{print $1}'` - echo "$l $p" -done | tsort > /dev/null 2>$t1 - -# Display the locks in a single cycle. -display_one() { - if [ -s $1 ]; then - echo "Deadlock #$c ============" - c=`expr $c + 1` - cat $1 | sort -n +6 - :> $1 - fi -} - -# Display the locks in all of the cycles. -# -# Requires tsort output some text before each list of nodes in the cycle, -# and the actual node displayed on the line be the second (white-space) -# separated item on the line. For example: -# -# tsort: cycle in data -# tsort: 8000177f -# tsort: 80001792 -# tsort: 80001774 -# tsort: cycle in data -# tsort: 80001776 -# tsort: 80001793 -# tsort: cycle in data -# tsort: 8000176a -# tsort: 8000178a -# -# XXX -# Currently, db_stat doesn't display the implicit wait relationship between -# parent and child transactions, where the parent won't release a lock until -# the child commits/aborts. This means the deadlock where parent holds a -# lock, thread A waits on parent, child waits on thread A won't be shown. -if [ -s $t1 ]; then - c=1 - :>$t2 - while read a b; do - case $b in - [0-9]*) - egrep $b $1 >> $t2;; - *) - display_one $t2;; - esac - done < $t1 - display_one $t2 -else - echo 'No deadlocks found.' -fi - -exit 0 diff --git a/storage/bdb/db_upgrade/db_upgrade.c b/storage/bdb/db_upgrade/db_upgrade.c deleted file mode 100644 index 724034dc73c..00000000000 --- a/storage/bdb/db_upgrade/db_upgrade.c +++ /dev/null @@ -1,207 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_upgrade.c,v 12.5 2005/09/09 12:38:36 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#endif - -#include "db_int.h" - -int main __P((int, char *[])); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB *dbp; - DB_ENV *dbenv; - u_int32_t flags; - int ch, exitval, nflag, ret, t_ret, verbose; - char *home, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbenv = NULL; - flags = nflag = verbose = 0; - exitval = 0; - home = passwd = NULL; - while ((ch = getopt(argc, argv, "h:NP:sVv")) != EOF) - switch (ch) { - case 'h': - home = optarg; - break; - case 'N': - nflag = 1; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 's': - LF_SET(DB_DUPSORT); - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc <= 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, "%s: db_env_create: %s\n", - progname, db_strerror(ret)); - goto shutdown; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (nflag) { - if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto shutdown; - } - if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto shutdown; - } - } - - if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv, - passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - - /* - * If attaching to a pre-existing environment fails, create a - * private one and try again. - */ - if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 && - (ret == DB_VERSION_MISMATCH || - (ret = dbenv->open(dbenv, home, - DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, - 0)) != 0)) { - dbenv->err(dbenv, ret, "DB_ENV->open"); - goto shutdown; - } - - for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) { - if ((ret = db_create(&dbp, dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - dbp->set_errfile(dbp, stderr); - dbp->set_errpfx(dbp, progname); - if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0) - dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]); - if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) { - dbenv->err(dbenv, ret, "DB->close: %s", argv[0]); - ret = t_ret; - } - if (ret != 0) - goto shutdown; - /* - * People get concerned if they don't see a success message. - * If verbose is set, give them one. - */ - if (verbose) - printf("%s: %s upgraded successfully\n", - progname, argv[0]); - } - - if (0) { -shutdown: exitval = 1; - } - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -usage() -{ - fprintf(stderr, "usage: %s %s\n", progname, - "[-NsVv] [-h home] [-P password] db_file ..."); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/db_verify/db_verify.c b/storage/bdb/db_verify/db_verify.c deleted file mode 100644 index d2763429239..00000000000 --- a/storage/bdb/db_verify/db_verify.c +++ /dev/null @@ -1,259 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_verify.c,v 12.3 2005/06/16 20:21:37 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2005\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#endif - -#include "db_int.h" - -int main __P((int, char *[])); -int usage __P((void)); -int version_check __P((void)); - -const char *progname; - -int -main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind; - DB *dbp, *dbp1; - DB_ENV *dbenv; - u_int32_t flags, cache; - int ch, exitval, nflag, private; - int quiet, resize, ret; - char *home, *passwd; - - if ((progname = strrchr(argv[0], '/')) == NULL) - progname = argv[0]; - else - ++progname; - - if ((ret = version_check()) != 0) - return (ret); - - dbenv = NULL; - dbp = NULL; - cache = MEGABYTE; - exitval = nflag = quiet = 0; - flags = 0; - home = passwd = NULL; - while ((ch = getopt(argc, argv, "h:NoP:quV")) != EOF) - switch (ch) { - case 'h': - home = optarg; - break; - case 'N': - nflag = 1; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - progname, strerror(errno)); - return (EXIT_FAILURE); - } - break; - case 'o': - LF_SET(DB_NOORDERCHK); - break; - case 'q': - quiet = 1; - break; - case 'u': /* Undocumented. */ - LF_SET(DB_UNREF); - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case '?': - default: - return (usage()); - } - argc -= optind; - argv += optind; - - if (argc <= 0) - return (usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* - * Create an environment object and initialize it for error - * reporting. - */ -retry: if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - - if (!quiet) { - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - } - - if (nflag) { - if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING"); - goto shutdown; - } - if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) { - dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC"); - goto shutdown; - } - } - - if (passwd != NULL && - (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) { - dbenv->err(dbenv, ret, "set_passwd"); - goto shutdown; - } - /* - * Attach to an mpool if it exists, but if that fails, attach to a - * private region. In the latter case, declare a reasonably large - * cache so that we don't fail when verifying large databases. - */ - private = 0; - if ((ret = - dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) { - if (ret != DB_VERSION_MISMATCH) { - if ((ret = - dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) { - dbenv->err(dbenv, ret, "set_cachesize"); - goto shutdown; - } - private = 1; - ret = dbenv->open(dbenv, home, DB_CREATE | - DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0); - } - if (ret != 0) { - dbenv->err(dbenv, ret, "DB_ENV->open"); - goto shutdown; - } - } - - for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) { - if ((ret = db_create(&dbp, dbenv, 0)) != 0) { - dbenv->err(dbenv, ret, "%s: db_create", progname); - goto shutdown; - } - - /* - * We create a 2nd dbp to this database to get its pagesize - * because the dbp we're using for verify cannot be opened. - * - * If the database is corrupted, we may not be able to open - * it, of course. In that case, just continue, using the - * cache size we have. - */ - if (private) { - if ((ret = db_create(&dbp1, dbenv, 0)) != 0) { - dbenv->err( - dbenv, ret, "%s: db_create", progname); - goto shutdown; - } - - ret = dbp1->open(dbp1, - NULL, argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0); - - /* - * If we get here, we can check the cache/page. - * !!! - * If we have to retry with an env with a larger - * cache, we jump out of this loop. However, we - * will still be working on the same argv when we - * get back into the for-loop. - */ - if (ret == 0) { - if (__db_util_cache( - dbp1, &cache, &resize) == 0 && resize) { - (void)dbp1->close(dbp1, 0); - (void)dbp->close(dbp, 0); - dbp = NULL; - - (void)dbenv->close(dbenv, 0); - dbenv = NULL; - goto retry; - } - } - (void)dbp1->close(dbp1, 0); - } - - /* The verify method is a destructor. */ - ret = dbp->verify(dbp, argv[0], NULL, NULL, flags); - dbp = NULL; - if (ret != 0) - goto shutdown; - } - - if (0) { -shutdown: exitval = 1; - } - - if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) { - exitval = 1; - dbenv->err(dbenv, ret, "close"); - } - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - if (passwd != NULL) - free(passwd); - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -usage() -{ - fprintf(stderr, "usage: %s %s\n", progname, - "[-NoqV] [-h home] [-P password] db_file ..."); - return (EXIT_FAILURE); -} - -int -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/dbinc/btree.h b/storage/bdb/dbinc/btree.h deleted file mode 100644 index b5fe4f2bbca..00000000000 --- a/storage/bdb/dbinc/btree.h +++ /dev/null @@ -1,333 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995, 1996 - * Keith Bostic. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994, 1995 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Mike Olson. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: btree.h,v 12.8 2005/08/08 14:52:30 bostic Exp $ - */ -#ifndef _DB_BTREE_H_ -#define _DB_BTREE_H_ - -/* Forward structure declarations. */ -struct __btree; typedef struct __btree BTREE; -struct __cursor; typedef struct __cursor BTREE_CURSOR; -struct __epg; typedef struct __epg EPG; -struct __recno; typedef struct __recno RECNO; - -#define DEFMINKEYPAGE (2) - -/* - * A recno order of 0 indicates that we don't have an order, not that we've - * an order less than 1. - */ -#define INVALID_ORDER 0 - -#define ISINTERNAL(p) (TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) -#define ISLEAF(p) (TYPE(p) == P_LBTREE || \ - TYPE(p) == P_LRECNO || TYPE(p) == P_LDUP) - -/* Flags for __bam_cadjust_log(). */ -#define CAD_UPDATEROOT 0x01 /* Root page count was updated. */ - -/* Flags for __bam_split_log(). */ -#define SPL_NRECS 0x01 /* Split tree has record count. */ - -/* Flags for __bam_iitem(). */ -#define BI_DELETED 0x01 /* Key/data pair only placeholder. */ - -/* Flags for __bam_stkrel(). */ -#define STK_CLRDBC 0x01 /* Clear dbc->page reference. */ -#define STK_NOLOCK 0x02 /* Don't retain locks. */ -#define STK_PGONLY 0x04 - -/* Flags for __ram_ca(). These get logged, so make the values explicit. */ -typedef enum { - CA_DELETE = 0, /* Delete the current record. */ - CA_IAFTER = 1, /* Insert before the current record. */ - CA_IBEFORE = 2, /* Insert after the current record. */ - CA_ICURRENT = 3 /* Overwrite the current record. */ -} ca_recno_arg; - -/* - * Flags for __bam_search() and __bam_rsearch(). - * - * Note, internal page searches must find the largest record less than key in - * the tree so that descents work. Leaf page searches must find the smallest - * record greater than key so that the returned index is the record's correct - * position for insertion. - * - * The flags parameter to the search routines describes three aspects of the - * search: the type of locking required (including if we're locking a pair of - * pages), the item to return in the presence of duplicates and whether or not - * to return deleted entries. To simplify both the mnemonic representation - * and the code that checks for various cases, we construct a set of bitmasks. - */ -#define S_READ 0x00001 /* Read locks. */ -#define S_WRITE 0x00002 /* Write locks. */ - -#define S_APPEND 0x00040 /* Append to the tree. */ -#define S_DELNO 0x00080 /* Don't return deleted items. */ -#define S_DUPFIRST 0x00100 /* Return first duplicate. */ -#define S_DUPLAST 0x00200 /* Return last duplicate. */ -#define S_EXACT 0x00400 /* Exact items only. */ -#define S_PARENT 0x00800 /* Lock page pair. */ -#define S_STACK 0x01000 /* Need a complete stack. */ -#define S_PAST_EOF 0x02000 /* If doing insert search (or keyfirst - * or keylast operations), or a split - * on behalf of an insert, it's okay to - * return an entry one past end-of-page. - */ -#define S_STK_ONLY 0x04000 /* Just return info in the stack */ -#define S_MAX 0x08000 /* Get the right most key */ -#define S_MIN 0x10000 /* Get the left most key */ -#define S_NEXT 0x20000 /* Get the page after this key */ -#define S_DEL 0x40000 /* Get the tree to delete this key. */ -#define S_START 0x80000 /* Level to start stack. */ - -#define S_DELETE (S_WRITE | S_DUPFIRST | S_DELNO | S_EXACT | S_STACK) -#define S_FIND (S_READ | S_DUPFIRST | S_DELNO) -#define S_FIND_WR (S_WRITE | S_DUPFIRST | S_DELNO) -#define S_INSERT (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK) -#define S_KEYFIRST (S_WRITE | S_DUPFIRST | S_PAST_EOF | S_STACK) -#define S_KEYLAST (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK) -#define S_WRPAIR (S_WRITE | S_DUPLAST | S_PAST_EOF | S_PARENT) - -/* - * Various routines pass around page references. A page reference is - * a pointer to the page, and the indx indicates an item on the page. - * Each page reference may include a lock. - */ -struct __epg { - PAGE *page; /* The page. */ - db_indx_t indx; /* The index on the page. */ - db_indx_t entries; /* The number of entries on page */ - DB_LOCK lock; /* The page's lock. */ - db_lockmode_t lock_mode; /* The lock mode. */ -}; - -/* - * We maintain a stack of the pages that we're locking in the tree. Grow - * the stack as necessary. - * - * XXX - * Temporary fix for #3243 -- clear the page and lock from the stack entry. - * The correct fix is to never release a stack that doesn't hold items. - */ -#define BT_STK_CLR(c) do { \ - (c)->csp = (c)->sp; \ - (c)->csp->page = NULL; \ - LOCK_INIT((c)->csp->lock); \ -} while (0) - -#define BT_STK_ENTER(dbenv, c, pagep, page_indx, l, mode, ret) do { \ - if ((ret = ((c)->csp == (c)->esp ? \ - __bam_stkgrow(dbenv, c) : 0)) == 0) { \ - (c)->csp->page = pagep; \ - (c)->csp->indx = (page_indx); \ - (c)->csp->entries = NUM_ENT(pagep); \ - (c)->csp->lock = l; \ - (c)->csp->lock_mode = mode; \ - } \ -} while (0) - -#define BT_STK_PUSH(dbenv, c, pagep, page_indx, lock, mode, ret) do { \ - BT_STK_ENTER(dbenv, c, pagep, page_indx, lock, mode, ret); \ - ++(c)->csp; \ -} while (0) - -#define BT_STK_NUM(dbenv, c, pagep, page_indx, ret) do { \ - if ((ret = ((c)->csp == \ - (c)->esp ? __bam_stkgrow(dbenv, c) : 0)) == 0) { \ - (c)->csp->page = NULL; \ - (c)->csp->indx = (page_indx); \ - (c)->csp->entries = NUM_ENT(pagep); \ - LOCK_INIT((c)->csp->lock); \ - (c)->csp->lock_mode = DB_LOCK_NG; \ - } \ -} while (0) - -#define BT_STK_NUMPUSH(dbenv, c, pagep, page_indx, ret) do { \ - BT_STK_NUM(dbenv, cp, pagep, page_indx, ret); \ - ++(c)->csp; \ -} while (0) - -#define BT_STK_POP(c) \ - ((c)->csp == (c)->sp ? NULL : --(c)->csp) - -/* Btree/Recno cursor. */ -struct __cursor { - /* struct __dbc_internal */ - __DBC_INTERNAL - - /* btree private part */ - EPG *sp; /* Stack pointer. */ - EPG *csp; /* Current stack entry. */ - EPG *esp; /* End stack pointer. */ - EPG stack[5]; - - db_indx_t ovflsize; /* Maximum key/data on-page size. */ - - db_recno_t recno; /* Current record number. */ - u_int32_t order; /* Relative order among deleted curs. */ - - /* - * Btree: - * We set a flag in the cursor structure if the underlying object has - * been deleted. It's not strictly necessary, we could get the same - * information by looking at the page itself, but this method doesn't - * require us to retrieve the page on cursor delete. - * - * Recno: - * When renumbering recno databases during deletes, cursors referencing - * "deleted" records end up positioned between two records, and so must - * be specially adjusted on the next operation. - */ -#define C_DELETED 0x0001 /* Record was deleted. */ - /* - * There are three tree types that require maintaining record numbers. - * Recno AM trees, Btree AM trees for which the DB_RECNUM flag was set, - * and Btree off-page duplicate trees. - */ -#define C_RECNUM 0x0002 /* Tree requires record counts. */ - /* - * Recno trees have immutable record numbers by default, but optionally - * support mutable record numbers. Off-page duplicate Recno trees have - * mutable record numbers. All Btrees with record numbers (including - * off-page duplicate trees) are mutable by design, no flag is needed. - */ -#define C_RENUMBER 0x0004 /* Tree records are mutable. */ - u_int32_t flags; -}; - -/* - * Threshhold value, as a function of bt_minkey, of the number of - * bytes a key/data pair can use before being placed on an overflow - * page. Assume every item requires the maximum alignment for - * padding, out of sheer paranoia. - */ -#define B_MINKEY_TO_OVFLSIZE(dbp, minkey, pgsize) \ - ((u_int16_t)(((pgsize) - P_OVERHEAD(dbp)) / ((minkey) * P_INDX) -\ - (BKEYDATA_PSIZE(0) + DB_ALIGN(1, sizeof(int32_t))))) - -/* - * The maximum space that a single item can ever take up on one page. - * Used by __bam_split to determine whether a split is still necessary. - */ -#define B_MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define B_MAXSIZEONPAGE(ovflsize) \ - (B_MAX(BOVERFLOW_PSIZE, BKEYDATA_PSIZE(ovflsize))) - -/* - * The in-memory, per-tree btree/recno data structure. - */ -struct __btree { /* Btree access method. */ - /* - * !!! - * These fields are write-once (when the structure is created) and - * so are ignored as far as multi-threading is concerned. - */ - db_pgno_t bt_meta; /* Database meta-data page. */ - db_pgno_t bt_root; /* Database root page. */ - - u_int32_t bt_minkey; /* Minimum keys per page. */ - - /* Btree comparison function. */ - int (*bt_compare) __P((DB *, const DBT *, const DBT *)); - /* Btree prefix function. */ - size_t (*bt_prefix) __P((DB *, const DBT *, const DBT *)); - - /* Recno access method. */ - int re_pad; /* Fixed-length padding byte. */ - int re_delim; /* Variable-length delimiting byte. */ - u_int32_t re_len; /* Length for fixed-length records. */ - char *re_source; /* Source file name. */ - - /* - * !!! - * The bt_lpgno field is NOT protected by any mutex, and for this - * reason must be advisory only, so, while it is read/written by - * multiple threads, DB is completely indifferent to the quality - * of its information. - */ - db_pgno_t bt_lpgno; /* Last insert location. */ - DB_LSN bt_llsn; /* Last insert LSN. */ - - /* - * !!! - * The re_modified field is NOT protected by any mutex, and for this - * reason cannot be anything more complicated than a zero/non-zero - * value. The actual writing of the backing source file cannot be - * threaded, so clearing the flag isn't a problem. - */ - int re_modified; /* If the tree was modified. */ - - /* - * !!! - * These fields are ignored as far as multi-threading is concerned. - * There are no transaction semantics associated with backing files, - * nor is there any thread protection. - */ - FILE *re_fp; /* Source file handle. */ - int re_eof; /* Backing source file EOF reached. */ - db_recno_t re_last; /* Last record number read. */ - -}; - -/* - * Modes for the __bam_curadj recovery records (btree_curadj). - * These appear in log records, so we wire the values and - * do not leave it up to the compiler. - */ -typedef enum { - DB_CA_DI = 1, - DB_CA_DUP = 2, - DB_CA_RSPLIT = 3, - DB_CA_SPLIT = 4 -} db_ca_mode; - -/* - * Flags for __bam_pinsert. - */ -#define BPI_SPACEONLY 0x01 /* Only check for space to update. */ -#define BPI_NORECNUM 0x02 /* Not update the recnum on the left. */ - -#include "dbinc_auto/btree_auto.h" -#include "dbinc_auto/btree_ext.h" -#include "dbinc/db_am.h" -#endif /* !_DB_BTREE_H_ */ diff --git a/storage/bdb/dbinc/crypto.h b/storage/bdb/dbinc/crypto.h deleted file mode 100644 index 419c16ffe2c..00000000000 --- a/storage/bdb/dbinc/crypto.h +++ /dev/null @@ -1,78 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: crypto.h,v 12.2 2005/07/20 16:51:03 bostic Exp $ - */ - -#ifndef _DB_CRYPTO_H_ -#define _DB_CRYPTO_H_ - -/* - * !!! - * These are the internal representations of the algorithm flags. - * They are used in both the DB_CIPHER structure and the CIPHER - * structure so we can tell if users specified both passwd and alg - * correctly. - * - * CIPHER_ANY is used when an app joins an existing env but doesn't - * know the algorithm originally used. This is only valid in the - * DB_CIPHER structure until we open and can set the alg. - */ -/* - * We store the algorithm in an 8-bit field on the meta-page. So we - * use a numeric value, not bit fields. - * now we are limited to 8 algorithms before we cannot use bits and - * need numeric values. That should be plenty. It is okay for the - * CIPHER_ANY flag to go beyond that since that is never stored on disk. - */ - -/* - * This structure is per-process, not in shared memory. - */ -struct __db_cipher { - u_int (*adj_size) __P((size_t)); - int (*close) __P((DB_ENV *, void *)); - int (*decrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t)); - int (*encrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t)); - int (*init) __P((DB_ENV *, DB_CIPHER *)); - - u_int8_t mac_key[DB_MAC_KEY]; /* MAC key. */ - void *data; /* Algorithm-specific information */ - -#define CIPHER_AES 1 /* AES algorithm */ - u_int8_t alg; /* Algorithm used - See above */ - u_int8_t spare[3]; /* Spares */ - -#define CIPHER_ANY 0x00000001 /* Only for DB_CIPHER */ - u_int32_t flags; /* Other flags */ -}; - -#ifdef HAVE_CRYPTO - -#include "crypto/rijndael/rijndael-api-fst.h" - -/* - * Shared ciphering structure - * No mutex needed because all information is read-only after creation. - */ -typedef struct __cipher { - roff_t passwd; /* Offset to shared passwd */ - size_t passwd_len; /* Length of passwd */ - u_int32_t flags; /* Algorithm used - see above */ -} CIPHER; - -#define DB_AES_KEYLEN 128 /* AES key length */ -#define DB_AES_CHUNK 16 /* AES byte unit size */ - -typedef struct __aes_cipher { - keyInstance decrypt_ki; /* Decryption key instance */ - keyInstance encrypt_ki; /* Encryption key instance */ - u_int32_t flags; /* AES-specific flags */ -} AES_CIPHER; - -#include "dbinc_auto/crypto_ext.h" -#endif /* HAVE_CRYPTO */ -#endif /* !_DB_CRYPTO_H_ */ diff --git a/storage/bdb/dbinc/cxx_common.h b/storage/bdb/dbinc/cxx_common.h deleted file mode 100644 index e5cb3a9aef4..00000000000 --- a/storage/bdb/dbinc/cxx_common.h +++ /dev/null @@ -1,45 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2002 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_common.h,v 11.2 2002/01/11 15:52:23 bostic Exp $ - */ - -#ifndef _CXX_COMMON_H_ -#define _CXX_COMMON_H_ - -// -// Common definitions used by all of Berkeley DB's C++ include files. -// - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Mechanisms for declaring classes -// - -// -// Every class defined in this file has an _exported next to the class name. -// This is needed for WinTel machines so that the class methods can -// be exported or imported in a DLL as appropriate. Users of the DLL -// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL -// must be defined. -// -#if defined(_MSC_VER) - -# if defined(DB_CREATE_DLL) -# define _exported __declspec(dllexport) // creator of dll -# elif defined(DB_USE_DLL) -# define _exported __declspec(dllimport) // user of dll -# else -# define _exported // static lib creator or user -# endif - -#else /* _MSC_VER */ - -# define _exported - -#endif /* _MSC_VER */ -#endif /* !_CXX_COMMON_H_ */ diff --git a/storage/bdb/dbinc/cxx_except.h b/storage/bdb/dbinc/cxx_except.h deleted file mode 100644 index f9bf4f859f8..00000000000 --- a/storage/bdb/dbinc/cxx_except.h +++ /dev/null @@ -1,141 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2002 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_except.h,v 11.5 2002/08/01 23:32:34 mjc Exp $ - */ - -#ifndef _CXX_EXCEPT_H_ -#define _CXX_EXCEPT_H_ - -#include "cxx_common.h" - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Forward declarations -// - -class DbDeadlockException; // forward -class DbException; // forward -class DbLockNotGrantedException; // forward -class DbLock; // forward -class DbMemoryException; // forward -class DbRunRecoveryException; // forward -class Dbt; // forward - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Exception classes -// - -// Almost any error in the DB library throws a DbException. -// Every exception should be considered an abnormality -// (e.g. bug, misuse of DB, file system error). -// -// NOTE: We would like to inherit from class exception and -// let it handle what(), but there are -// MSVC++ problems when is included. -// -class _exported DbException -{ -public: - virtual ~DbException(); - DbException(int err); - DbException(const char *description); - DbException(const char *prefix, int err); - DbException(const char *prefix1, const char *prefix2, int err); - int get_errno() const; - virtual const char *what() const; - - DbException(const DbException &); - DbException &operator = (const DbException &); - -private: - char *what_; - int err_; // errno -}; - -// -// A specific sort of exception that occurs when -// an operation is aborted to resolve a deadlock. -// -class _exported DbDeadlockException : public DbException -{ -public: - virtual ~DbDeadlockException(); - DbDeadlockException(const char *description); - - DbDeadlockException(const DbDeadlockException &); - DbDeadlockException &operator = (const DbDeadlockException &); -}; - -// -// A specific sort of exception that occurs when -// a lock is not granted, e.g. by lock_get or lock_vec. -// Note that the Dbt is only live as long as the Dbt used -// in the offending call. -// -class _exported DbLockNotGrantedException : public DbException -{ -public: - virtual ~DbLockNotGrantedException(); - DbLockNotGrantedException(const char *prefix, db_lockop_t op, - db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); - DbLockNotGrantedException(const DbLockNotGrantedException &); - DbLockNotGrantedException &operator = - (const DbLockNotGrantedException &); - - db_lockop_t get_op() const; - db_lockmode_t get_mode() const; - const Dbt* get_obj() const; - DbLock *get_lock() const; - int get_index() const; - -private: - db_lockop_t op_; - db_lockmode_t mode_; - const Dbt *obj_; - DbLock *lock_; - int index_; -}; - -// -// A specific sort of exception that occurs when -// user declared memory is insufficient in a Dbt. -// -class _exported DbMemoryException : public DbException -{ -public: - virtual ~DbMemoryException(); - DbMemoryException(Dbt *dbt); - DbMemoryException(const char *description); - DbMemoryException(const char *prefix, Dbt *dbt); - DbMemoryException(const char *prefix1, const char *prefix2, Dbt *dbt); - Dbt *get_dbt() const; - - DbMemoryException(const DbMemoryException &); - DbMemoryException &operator = (const DbMemoryException &); - -private: - Dbt *dbt_; -}; - -// -// A specific sort of exception that occurs when -// recovery is required before continuing DB activity. -// -class _exported DbRunRecoveryException : public DbException -{ -public: - virtual ~DbRunRecoveryException(); - DbRunRecoveryException(const char *description); - - DbRunRecoveryException(const DbRunRecoveryException &); - DbRunRecoveryException &operator = (const DbRunRecoveryException &); -}; - -#endif /* !_CXX_EXCEPT_H_ */ diff --git a/storage/bdb/dbinc/cxx_int.h b/storage/bdb/dbinc/cxx_int.h deleted file mode 100644 index 7686058e85f..00000000000 --- a/storage/bdb/dbinc/cxx_int.h +++ /dev/null @@ -1,77 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: cxx_int.h,v 12.1 2005/06/16 20:21:43 bostic Exp $ - */ - -#ifndef _CXX_INT_H_ -#define _CXX_INT_H_ - -// private data structures known to the implementation only - -// -// Using FooImp classes will allow the implementation to change in the -// future without any modification to user code or even to header files -// that the user includes. FooImp * is just like void * except that it -// provides a little extra protection, since you cannot randomly assign -// any old pointer to a FooImp* as you can with void *. Currently, a -// pointer to such an opaque class is always just a pointer to the -// appropriate underlying implementation struct. These are converted -// back and forth using the various overloaded wrap()/unwrap() methods. -// This is essentially a use of the "Bridge" Design Pattern. -// -// WRAPPED_CLASS implements the appropriate wrap() and unwrap() methods -// for a wrapper class that has an underlying pointer representation. -// -#define WRAPPED_CLASS(_WRAPPER_CLASS, _IMP_CLASS, _WRAPPED_TYPE) \ - \ - class _IMP_CLASS {}; \ - \ - inline _WRAPPED_TYPE *unwrap(_WRAPPER_CLASS *val) \ - { \ - if (!val) return (0); \ - return (val->get_##_WRAPPED_TYPE()); \ - } \ - \ - inline const _WRAPPED_TYPE *unwrapConst(const _WRAPPER_CLASS *val) \ - { \ - if (!val) return (0); \ - return (val->get_const_##_WRAPPED_TYPE()); \ - } - -WRAPPED_CLASS(Db, DbImp, DB) -WRAPPED_CLASS(DbEnv, DbEnvImp, DB_ENV) -WRAPPED_CLASS(DbMpoolFile, DbMpoolFileImp, DB_MPOOLFILE) -WRAPPED_CLASS(DbSequence, DbSequenceImp, DB_SEQUENCE) -WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN) - -// A tristate integer value used by the DB_ERROR macro below. -// We chose not to make this an enumerated type so it can -// be kept private, even though methods that return the -// tristate int can be declared in db_cxx.h . -// -#define ON_ERROR_THROW 1 -#define ON_ERROR_RETURN 0 -#define ON_ERROR_UNKNOWN (-1) - -// Macros that handle detected errors, in case we want to -// change the default behavior. The 'policy' is one of -// the tristate values given above. If UNKNOWN is specified, -// the behavior is taken from the last initialized DbEnv. -// -#define DB_ERROR(env, caller, ecode, policy) \ - DbEnv::runtime_error(env, caller, ecode, policy) - -#define DB_ERROR_DBT(env, caller, dbt, policy) \ - DbEnv::runtime_error_dbt(env, caller, dbt, policy) - -#define DB_OVERFLOWED_DBT(dbt) \ - (F_ISSET(dbt, DB_DBT_USERMEM) && dbt->size > dbt->ulen) - -/* values for Db::flags_ */ -#define DB_CXX_PRIVATE_ENV 0x00000001 - -#endif /* !_CXX_INT_H_ */ diff --git a/storage/bdb/dbinc/db.in b/storage/bdb/dbinc/db.in deleted file mode 100644 index 741b2e259b6..00000000000 --- a/storage/bdb/dbinc/db.in +++ /dev/null @@ -1,2369 +0,0 @@ -/* - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db.in,v 12.67 2005/11/10 21:10:24 bostic Exp $ - * - * db.h include file layout: - * General. - * Database Environment. - * Locking subsystem. - * Logging subsystem. - * Shared buffer cache (mpool) subsystem. - * Transaction subsystem. - * Access methods. - * Access method cursors. - * Dbm/Ndbm, Hsearch historic interfaces. - */ - -#ifndef _DB_H_ -#define _DB_H_ - -#ifndef __NO_SYSTEM_INCLUDES -#include -@inttypes_h_decl@ -@stdint_h_decl@ -@stddef_h_decl@ -#include -@unistd_h_decl@ -@thread_h_decl@ -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -@DB_CONST@ -@DB_PROTO1@ -@DB_PROTO2@ - -/* - * Berkeley DB version information. - */ -#define DB_VERSION_MAJOR @DB_VERSION_MAJOR@ -#define DB_VERSION_MINOR @DB_VERSION_MINOR@ -#define DB_VERSION_PATCH @DB_VERSION_PATCH@ -#define DB_VERSION_STRING @DB_VERSION_STRING@ - -/* - * !!! - * Berkeley DB uses specifically sized types. If they're not provided by - * the system, typedef them here. - * - * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__, - * as does BIND and Kerberos, since we don't know for sure what #include - * files the user is using. - * - * !!! - * We also provide the standard u_int, u_long etc., if they're not provided - * by the system. - */ -#ifndef __BIT_TYPES_DEFINED__ -#define __BIT_TYPES_DEFINED__ -@u_int8_decl@ -@int16_decl@ -@u_int16_decl@ -@int32_decl@ -@u_int32_decl@ -@int64_decl@ -@u_int64_decl@ -#endif - -@u_char_decl@ -@u_short_decl@ -@u_int_decl@ -@u_long_decl@ -@ssize_t_decl@ - -/* - * uintmax_t -- - * Largest unsigned type, used to align structures in memory. We don't store - * floating point types in structures, so integral types should be sufficient - * (and we don't have to worry about systems that store floats in other than - * power-of-2 numbers of bytes). Additionally this fixes compilers that rewrite - * structure assignments and ANSI C memcpy calls to be in-line instructions - * that happen to require alignment. - * - * uintptr_t -- - * Unsigned type that's the same size as a pointer. There are places where - * DB modifies pointers by discarding the bottom bits to guarantee alignment. - * We can't use uintmax_t, it may be larger than the pointer, and compilers - * get upset about that. So far we haven't run on any machine where there's - * no unsigned type the same size as a pointer -- here's hoping. - */ -@uintmax_t_decl@ -@uintptr_t_decl@ - -/* - * Sequences are only available on machines with 64-bit integral types. - */ -@db_seq_decl@ - -/* Thread and process identification. */ -@db_threadid_t_decl@ -@pid_t_decl@ - -/* Basic types that are exported or quasi-exported. */ -typedef u_int32_t db_pgno_t; /* Page number type. */ -typedef u_int16_t db_indx_t; /* Page offset type. */ -#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */ - -typedef u_int32_t db_recno_t; /* Record number type. */ -#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */ - -typedef u_int32_t db_timeout_t; /* Type of a timeout. */ - -/* - * Region offsets are the difference between a pointer in a region and the - * region's base address. With private environments, both addresses are the - * result of calling malloc, and we can't assume anything about what malloc - * will return, so region offsets have to be able to hold differences between - * arbitrary pointers. - */ -typedef uintptr_t roff_t; - -/* - * Forward structure declarations, so we can declare pointers and - * applications can get type checking. - */ -struct __db; typedef struct __db DB; -struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT; -struct __db_cipher; typedef struct __db_cipher DB_CIPHER; -struct __db_compact; typedef struct __db_compact DB_COMPACT; -struct __db_dbt; typedef struct __db_dbt DBT; -struct __db_env; typedef struct __db_env DB_ENV; -struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT; -struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK; -struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT; -struct __db_lock_u; typedef struct __db_lock_u DB_LOCK; -struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ; -struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC; -struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT; -struct __db_lsn; typedef struct __db_lsn DB_LSN; -struct __db_mpool; typedef struct __db_mpool DB_MPOOL; -struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT; -struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT; -struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE; -struct __db_mutex_stat; typedef struct __db_mutex_stat DB_MUTEX_STAT; -struct __db_preplist; typedef struct __db_preplist DB_PREPLIST; -struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT; -struct __db_rep; typedef struct __db_rep DB_REP; -struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT; -struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD; -struct __db_seq_stat; typedef struct __db_seq_stat DB_SEQUENCE_STAT; -struct __db_sequence; typedef struct __db_sequence DB_SEQUENCE; -struct __db_txn; typedef struct __db_txn DB_TXN; -struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE; -struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT; -struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR; -struct __dbc; typedef struct __dbc DBC; -struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL; -struct __fh_t; typedef struct __fh_t DB_FH; -struct __fname; typedef struct __fname FNAME; -struct __key_range; typedef struct __key_range DB_KEY_RANGE; -struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE; - -/* Key/data structure -- a Data-Base Thang. */ -struct __db_dbt { - void *data; /* Key/data */ - u_int32_t size; /* key/data length */ - - u_int32_t ulen; /* RO: length of user buffer. */ - u_int32_t dlen; /* RO: get/put record length. */ - u_int32_t doff; /* RO: get/put record offset. */ - - void *app_private; /* Application-private handle. */ - -#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */ -#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */ -#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */ -#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */ -#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */ -#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */ -#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */ - u_int32_t flags; -}; - -/* - * Common flags -- - * Interfaces which use any of these common flags should never have - * interface specific flags in this range. - */ -#define DB_CREATE 0x0000001 /* Create file as necessary. */ -#define DB_DURABLE_UNKNOWN 0x0000002 /* Durability on open (internal). */ -#define DB_FORCE 0x0000004 /* Force (anything). */ -#define DB_NOMMAP 0x0000008 /* Don't mmap underlying file. */ -#define DB_RDONLY 0x0000010 /* Read-only (O_RDONLY). */ -#define DB_RECOVER 0x0000020 /* Run normal recovery. */ -#define DB_THREAD 0x0000040 /* Applications are threaded. */ -#define DB_TRUNCATE 0x0000080 /* Discard existing DB (O_TRUNC). */ -#define DB_TXN_NOSYNC 0x0000100 /* Do not sync log on commit. */ -#define DB_TXN_NOT_DURABLE 0x0000200 /* Do not log changes. */ -#define DB_TXN_WRITE_NOSYNC 0x0000400 /* Write the log but don't sync. */ -#define DB_USE_ENVIRON 0x0000800 /* Use the environment. */ -#define DB_USE_ENVIRON_ROOT 0x0001000 /* Use the environment if root. */ - -/* - * Common flags -- - * Interfaces which use any of these common flags should never have - * interface specific flags in this range. - * - * DB_AUTO_COMMIT: - * DB_ENV->set_flags, DB->open - * (Note: until the 4.3 release, legal to DB->associate, DB->del, - * DB->put, DB->remove, DB->rename and DB->truncate, and others.) - * DB_READ_COMMITTED: - * DB->cursor, DB->get, DB->join, DBcursor->c_get, DB_ENV->txn_begin - * DB_READ_UNCOMMITTED: - * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get, - * DB_ENV->txn_begin - * - * !!! - * The DB_READ_COMMITTED and DB_READ_UNCOMMITTED bit masks can't be changed - * without also changing the masks for the flags that can be OR'd into DB - * access method and cursor operation values. - */ -#define DB_AUTO_COMMIT 0x01000000/* Implied transaction. */ - -#define DB_READ_COMMITTED 0x02000000/* Degree 2 isolation. */ -#define DB_DEGREE_2 0x02000000/* Historic name. */ - -#define DB_READ_UNCOMMITTED 0x04000000/* Degree 1 isolation. */ -#define DB_DIRTY_READ 0x04000000/* Historic name. */ - -/* - * Flags common to db_env_create and db_create. - */ -#define DB_CXX_NO_EXCEPTIONS 0x0000001 /* C++: return error values. */ - -/* - * Flags private to db_env_create. - * Shared flags up to 0x0000001 */ -#define DB_RPCCLIENT 0x0000002 /* An RPC client environment. */ - -/* - * Flags private to db_create. - * Shared flags up to 0x0000001 */ -#define DB_XA_CREATE 0x0000002 /* Open in an XA environment. */ - -/* - * Flags private to DB_ENV->open. - * Shared flags up to 0x0001000 */ -#define DB_INIT_CDB 0x0002000 /* Concurrent Access Methods. */ -#define DB_INIT_LOCK 0x0004000 /* Initialize locking. */ -#define DB_INIT_LOG 0x0008000 /* Initialize logging. */ -#define DB_INIT_MPOOL 0x0010000 /* Initialize mpool. */ -#define DB_INIT_REP 0x0020000 /* Initialize replication. */ -#define DB_INIT_TXN 0x0040000 /* Initialize transactions. */ -#define DB_LOCKDOWN 0x0080000 /* Lock memory into physical core. */ -#define DB_PRIVATE 0x0100000 /* DB_ENV is process local. */ -#define DB_RECOVER_FATAL 0x0200000 /* Run catastrophic recovery. */ -#define DB_REGISTER 0x0400000 /* Multi-process registry. */ -#define DB_SYSTEM_MEM 0x0800000 /* Use system-backed memory. */ - -#define DB_JOINENV 0x0 /* Compatibility. */ - -/* - * Flags private to DB->open. - * Shared flags up to 0x0001000 */ -#define DB_EXCL 0x0002000 /* Exclusive open (O_EXCL). */ -#define DB_FCNTL_LOCKING 0x0004000 /* UNDOC: fcntl(2) locking. */ -#define DB_NO_AUTO_COMMIT 0x0008000 /* Override env-wide AUTOCOMMIT. */ -#define DB_RDWRMASTER 0x0010000 /* UNDOC: allow subdb master open R/W */ -#define DB_WRITEOPEN 0x0020000 /* UNDOC: open with write lock. */ - -/* - * Flags private to DB->associate. - * Shared flags up to 0x0001000 */ -#define DB_IMMUTABLE_KEY 0x0002000 /* Secondary key is immutable. */ -/* Shared flags at 0x1000000 */ - -/* - * Flags private to DB_ENV->txn_begin. - * Shared flags up to 0x0001000 */ -#define DB_TXN_NOWAIT 0x0002000 /* Do not wait for locks in this TXN. */ -#define DB_TXN_SYNC 0x0004000 /* Always sync log on commit. */ - -/* - * Flags private to DB_ENV->set_encrypt. - */ -#define DB_ENCRYPT_AES 0x0000001 /* AES, assumes SHA1 checksum */ - -/* - * Flags private to DB_ENV->set_flags. - * Shared flags up to 0x00001000 */ -#define DB_CDB_ALLDB 0x00002000/* Set CDB locking per environment. */ -#define DB_DIRECT_DB 0x00004000/* Don't buffer databases in the OS. */ -#define DB_DIRECT_LOG 0x00008000/* Don't buffer log files in the OS. */ -#define DB_DSYNC_DB 0x00010000/* Set O_DSYNC on the databases. */ -#define DB_DSYNC_LOG 0x00020000/* Set O_DSYNC on the log. */ -#define DB_LOG_AUTOREMOVE 0x00040000/* Automatically remove log files. */ -#define DB_LOG_INMEMORY 0x00080000/* Store logs in buffers in memory. */ -#define DB_NOLOCKING 0x00100000/* Set locking/mutex behavior. */ -#define DB_NOPANIC 0x00200000/* Set panic state per DB_ENV. */ -#define DB_OVERWRITE 0x00400000/* Overwrite unlinked region files. */ -#define DB_PANIC_ENVIRONMENT 0x00800000/* Set panic state per environment. */ -/* Shared flags at 0x01000000 */ -/* Shared flags at 0x02000000 */ -/* Shared flags at 0x04000000 */ -#define DB_REGION_INIT 0x08000000/* Page-fault regions on open. */ -#define DB_TIME_NOTGRANTED 0x10000000/* Return NOTGRANTED on timeout. */ -#define DB_YIELDCPU 0x20000000/* Yield the CPU (a lot). */ - -/* - * Flags private to DB->set_feedback's callback. - */ -#define DB_UPGRADE 0x0000001 /* Upgrading. */ -#define DB_VERIFY 0x0000002 /* Verifying. */ - -/* - * Flags private to DB->compact. - * Shared flags up to 0x00001000 - */ -#define DB_FREELIST_ONLY 0x00002000 /* Just sort and truncate. */ -#define DB_FREE_SPACE 0x00004000 /* Free space . */ -#define DB_COMPACT_FLAGS \ - (DB_FREELIST_ONLY | DB_FREE_SPACE) - -/* - * Flags private to DB_MPOOLFILE->open. - * Shared flags up to 0x0001000 */ -#define DB_DIRECT 0x0002000 /* Don't buffer the file in the OS. */ -#define DB_EXTENT 0x0004000 /* internal: dealing with an extent. */ -#define DB_ODDFILESIZE 0x0008000 /* Truncate file to N * pgsize. */ - -/* - * Flags private to DB->set_flags. - * Shared flags up to 0x00001000 */ -#define DB_CHKSUM 0x00002000 /* Do checksumming */ -#define DB_DUP 0x00004000 /* Btree, Hash: duplicate keys. */ -#define DB_DUPSORT 0x00008000 /* Btree, Hash: duplicate keys. */ -#define DB_ENCRYPT 0x00010000 /* Btree, Hash: duplicate keys. */ -#define DB_INORDER 0x00020000 /* Queue: strict ordering on consume */ -#define DB_RECNUM 0x00040000 /* Btree: record numbers. */ -#define DB_RENUMBER 0x00080000 /* Recno: renumber on insert/delete. */ -#define DB_REVSPLITOFF 0x00100000 /* Btree: turn off reverse splits. */ -#define DB_SNAPSHOT 0x00200000 /* Recno: snapshot the input. */ - -/* - * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods. - */ -#define DB_STAT_ALL 0x0000001 /* Print: Everything. */ -#define DB_STAT_CLEAR 0x0000002 /* Clear stat after returning values. */ -#define DB_STAT_LOCK_CONF 0x0000004 /* Print: Lock conflict matrix. */ -#define DB_STAT_LOCK_LOCKERS 0x0000008 /* Print: Lockers. */ -#define DB_STAT_LOCK_OBJECTS 0x0000010 /* Print: Lock objects. */ -#define DB_STAT_LOCK_PARAMS 0x0000020 /* Print: Lock parameters. */ -#define DB_STAT_MEMP_HASH 0x0000040 /* Print: Mpool hash buckets. */ -#define DB_STAT_SUBSYSTEM 0x0000080 /* Print: Subsystems too. */ - -/* - * Flags private to DB->join. - */ -#define DB_JOIN_NOSORT 0x0000001 /* Don't try to optimize join. */ - -/* - * Flags private to DB->verify. - */ -#define DB_AGGRESSIVE 0x0000001 /* Salvage whatever could be data.*/ -#define DB_NOORDERCHK 0x0000002 /* Skip sort order/hashing check. */ -#define DB_ORDERCHKONLY 0x0000004 /* Only perform the order check. */ -#define DB_PR_PAGE 0x0000008 /* Show page contents (-da). */ -#define DB_PR_RECOVERYTEST 0x0000010 /* Recovery test (-dr). */ -#define DB_PRINTABLE 0x0000020 /* Use printable format for salvage. */ -#define DB_SALVAGE 0x0000040 /* Salvage what looks like data. */ -#define DB_UNREF 0x0000080 /* Report unreferenced pages. */ -/* - * !!! - * These must not go over 0x8000, or they will collide with the flags - * used by __bam_vrfy_subtree. - */ - -/* - * Flags private to DB->set_rep_transport's send callback. - */ -#define DB_REP_ANYWHERE 0x0000001 /* Message can be serviced anywhere. */ -#define DB_REP_NOBUFFER 0x0000002 /* Do not buffer this message. */ -#define DB_REP_PERMANENT 0x0000004 /* Important--app. may want to flush. */ -#define DB_REP_REREQUEST 0x0000008 /* This msg already been requested. */ - -/******************************************************* - * Mutexes. - *******************************************************/ -typedef u_int32_t db_mutex_t; - -/* - * Flag arguments for DbEnv.mutex_alloc and for the DB_MUTEX structure. - */ -#define DB_MUTEX_ALLOCATED 0x01 /* Mutex currently allocated. */ -#define DB_MUTEX_LOCKED 0x02 /* Mutex currently locked. */ -#define DB_MUTEX_LOGICAL_LOCK 0x04 /* Mutex backs a database lock. */ -#define DB_MUTEX_SELF_BLOCK 0x08 /* Must be able to block self. */ -#define DB_MUTEX_THREAD 0x10 /* Thread-only mutex. */ - -struct __db_mutex_stat { - /* The following fields are maintained in the region's copy. */ - u_int32_t st_mutex_align; /* Mutex alignment */ - u_int32_t st_mutex_tas_spins; /* Mutex test-and-set spins */ - u_int32_t st_mutex_cnt; /* Mutex count */ - u_int32_t st_mutex_free; /* Available mutexes */ - u_int32_t st_mutex_inuse; /* Mutexes in use */ - u_int32_t st_mutex_inuse_max; /* Maximum mutexes ever in use */ - - /* The following fields are filled-in from other places. */ - u_int32_t st_region_wait; /* Region lock granted after wait. */ - u_int32_t st_region_nowait; /* Region lock granted without wait. */ - roff_t st_regsize; /* Region size. */ -}; - -/* This is the length of the buffer passed to DB_ENV->thread_id_string() */ -#define DB_THREADID_STRLEN 128 - -/******************************************************* - * Locking. - *******************************************************/ -#define DB_LOCKVERSION 1 - -#define DB_FILE_ID_LEN 20 /* Unique file ID length. */ - -/* - * Deadlock detector modes; used in the DB_ENV structure to configure the - * locking subsystem. - */ -#define DB_LOCK_NORUN 0 -#define DB_LOCK_DEFAULT 1 /* Default policy. */ -#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */ -#define DB_LOCK_MAXLOCKS 3 /* Select locker with max locks. */ -#define DB_LOCK_MAXWRITE 4 /* Select locker with max writelocks. */ -#define DB_LOCK_MINLOCKS 5 /* Select locker with min locks. */ -#define DB_LOCK_MINWRITE 6 /* Select locker with min writelocks. */ -#define DB_LOCK_OLDEST 7 /* Select oldest locker. */ -#define DB_LOCK_RANDOM 8 /* Select random locker. */ -#define DB_LOCK_YOUNGEST 9 /* Select youngest locker. */ - -/* Flag values for lock_vec(), lock_get(). */ -#define DB_LOCK_ABORT 0x001 /* Internal: Lock during abort. */ -#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */ -#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */ -#define DB_LOCK_SET_TIMEOUT 0x008 /* Internal: set lock timeout. */ -#define DB_LOCK_SWITCH 0x010 /* Internal: switch existing lock. */ -#define DB_LOCK_UPGRADE 0x020 /* Internal: upgrade existing lock. */ - -/* - * Simple R/W lock modes and for multi-granularity intention locking. - * - * !!! - * These values are NOT random, as they are used as an index into the lock - * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD - * must be == 4. - */ -typedef enum { - DB_LOCK_NG=0, /* Not granted. */ - DB_LOCK_READ=1, /* Shared/read. */ - DB_LOCK_WRITE=2, /* Exclusive/write. */ - DB_LOCK_WAIT=3, /* Wait for event */ - DB_LOCK_IWRITE=4, /* Intent exclusive/write. */ - DB_LOCK_IREAD=5, /* Intent to share/read. */ - DB_LOCK_IWR=6, /* Intent to read and write. */ - DB_LOCK_READ_UNCOMMITTED=7, /* Degree 1 isolation. */ - DB_LOCK_WWRITE=8 /* Was Written. */ -} db_lockmode_t; - -/* - * Request types. - */ -typedef enum { - DB_LOCK_DUMP=0, /* Display held locks. */ - DB_LOCK_GET=1, /* Get the lock. */ - DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */ - DB_LOCK_INHERIT=3, /* Pass locks to parent. */ - DB_LOCK_PUT=4, /* Release the lock. */ - DB_LOCK_PUT_ALL=5, /* Release locker's locks. */ - DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */ - DB_LOCK_PUT_READ=7, /* Release locker's read locks. */ - DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */ - DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */ - DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */ -} db_lockop_t; - -/* - * Status of a lock. - */ -typedef enum { - DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */ - DB_LSTAT_EXPIRED=2, /* Lock has expired. */ - DB_LSTAT_FREE=3, /* Lock is unallocated. */ - DB_LSTAT_HELD=4, /* Lock is currently held. */ - DB_LSTAT_PENDING=5, /* Lock was waiting and has been - * promoted; waiting for the owner - * to run and upgrade it to held. */ - DB_LSTAT_WAITING=6 /* Lock is on the wait queue. */ -}db_status_t; - -/* Lock statistics structure. */ -struct __db_lock_stat { - u_int32_t st_id; /* Last allocated locker ID. */ - u_int32_t st_cur_maxid; /* Current maximum unused ID. */ - u_int32_t st_maxlocks; /* Maximum number of locks in table. */ - u_int32_t st_maxlockers; /* Maximum num of lockers in table. */ - u_int32_t st_maxobjects; /* Maximum num of objects in table. */ - int st_nmodes; /* Number of lock modes. */ - u_int32_t st_nlocks; /* Current number of locks. */ - u_int32_t st_maxnlocks; /* Maximum number of locks so far. */ - u_int32_t st_nlockers; /* Current number of lockers. */ - u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */ - u_int32_t st_nobjects; /* Current number of objects. */ - u_int32_t st_maxnobjects; /* Maximum number of objects so far. */ - u_int32_t st_nrequests; /* Number of lock gets. */ - u_int32_t st_nreleases; /* Number of lock puts. */ - u_int32_t st_nupgrade; /* Number of lock upgrades. */ - u_int32_t st_ndowngrade; /* Number of lock downgrades. */ - u_int32_t st_lock_wait; /* Lock conflicts w/ subsequent wait */ - u_int32_t st_lock_nowait; /* Lock conflicts w/o subsequent wait */ - u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */ - db_timeout_t st_locktimeout; /* Lock timeout. */ - u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */ - db_timeout_t st_txntimeout; /* Transaction timeout. */ - u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */ - u_int32_t st_region_wait; /* Region lock granted after wait. */ - u_int32_t st_region_nowait; /* Region lock granted without wait. */ - roff_t st_regsize; /* Region size. */ -}; - -/* - * DB_LOCK_ILOCK -- - * Internal DB access method lock. - */ -struct __db_ilock { - db_pgno_t pgno; /* Page being locked. */ - u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */ -#define DB_HANDLE_LOCK 1 -#define DB_RECORD_LOCK 2 -#define DB_PAGE_LOCK 3 - u_int32_t type; /* Type of lock. */ -}; - -/* - * DB_LOCK -- - * The structure is allocated by the caller and filled in during a - * lock_get request (or a lock_vec/DB_LOCK_GET). - */ -struct __db_lock_u { - roff_t off; /* Offset of the lock in the region */ - u_int32_t ndx; /* Index of the object referenced by - * this lock; used for locking. */ - u_int32_t gen; /* Generation number of this lock. */ - db_lockmode_t mode; /* mode of this lock. */ -}; - -/* Lock request structure. */ -struct __db_lockreq { - db_lockop_t op; /* Operation. */ - db_lockmode_t mode; /* Requested mode. */ - db_timeout_t timeout; /* Time to expire lock. */ - DBT *obj; /* Object being locked. */ - DB_LOCK lock; /* Lock returned. */ -}; - -/******************************************************* - * Logging. - *******************************************************/ -#define DB_LOGVERSION 11 /* Current log version. */ -#define DB_LOGOLDVER 11 /* Oldest log version supported. */ -#define DB_LOGMAGIC 0x040988 - -/* Flag values for DB_ENV->log_archive(). */ -#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */ -#define DB_ARCH_DATA 0x002 /* Data files. */ -#define DB_ARCH_LOG 0x004 /* Log files. */ -#define DB_ARCH_REMOVE 0x008 /* Remove log files. */ - -/* Flag values for DB_ENV->log_put(). */ -#define DB_FLUSH 0x001 /* Flush data to disk (public). */ -#define DB_LOG_CHKPNT 0x002 /* Flush supports a checkpoint */ -#define DB_LOG_COMMIT 0x004 /* Flush supports a commit */ -#define DB_LOG_NOCOPY 0x008 /* Don't copy data */ -#define DB_LOG_NOT_DURABLE 0x010 /* Do not log; keep in memory */ -#define DB_LOG_PERM 0x020 /* Flag record with REP_PERMANENT */ -#define DB_LOG_RESEND 0x040 /* Resent log record */ -#define DB_LOG_WRNOSYNC 0x080 /* Write, don't sync log_put */ - -/* - * A DB_LSN has two parts, a fileid which identifies a specific file, and an - * offset within that file. The fileid is an unsigned 4-byte quantity that - * uniquely identifies a file within the log directory -- currently a simple - * counter inside the log. The offset is also an unsigned 4-byte value. The - * log manager guarantees the offset is never more than 4 bytes by switching - * to a new log file before the maximum length imposed by an unsigned 4-byte - * offset is reached. - */ -struct __db_lsn { - u_int32_t file; /* File ID. */ - u_int32_t offset; /* File offset. */ -}; - -/* - * Application-specified log record types start at DB_user_BEGIN, and must not - * equal or exceed DB_debug_FLAG. - * - * DB_debug_FLAG is the high-bit of the u_int32_t that specifies a log record - * type. If the flag is set, it's a log record that was logged for debugging - * purposes only, even if it reflects a database change -- the change was part - * of a non-durable transaction. - */ -#define DB_user_BEGIN 10000 -#define DB_debug_FLAG 0x80000000 - -/* - * DB_LOGC -- - * Log cursor. - */ -struct __db_log_cursor { - DB_ENV *dbenv; /* Enclosing dbenv. */ - - DB_FH *c_fhp; /* File handle. */ - DB_LSN c_lsn; /* Cursor: LSN */ - u_int32_t c_len; /* Cursor: record length */ - u_int32_t c_prev; /* Cursor: previous record's offset */ - - DBT c_dbt; /* Return DBT. */ - -#define DB_LOGC_BUF_SIZE (32 * 1024) - u_int8_t *bp; /* Allocated read buffer. */ - u_int32_t bp_size; /* Read buffer length in bytes. */ - u_int32_t bp_rlen; /* Read buffer valid data length. */ - DB_LSN bp_lsn; /* Read buffer first byte LSN. */ - - u_int32_t bp_maxrec; /* Max record length in the log file. */ - - /* DB_LOGC PUBLIC HANDLE LIST BEGIN */ - int (*close) __P((DB_LOGC *, u_int32_t)); - int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t)); - /* DB_LOGC PUBLIC HANDLE LIST END */ - -#define DB_LOG_DISK 0x01 /* Log record came from disk. */ -#define DB_LOG_LOCKED 0x02 /* Log region already locked */ -#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */ - u_int32_t flags; -}; - -/* Log statistics structure. */ -struct __db_log_stat { - u_int32_t st_magic; /* Log file magic number. */ - u_int32_t st_version; /* Log file version number. */ - int st_mode; /* Log file permissions mode. */ - u_int32_t st_lg_bsize; /* Log buffer size. */ - u_int32_t st_lg_size; /* Log file size. */ - u_int32_t st_record; /* Records entered into the log. */ - u_int32_t st_w_bytes; /* Bytes to log. */ - u_int32_t st_w_mbytes; /* Megabytes to log. */ - u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */ - u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */ - u_int32_t st_wcount; /* Total I/O writes to the log. */ - u_int32_t st_wcount_fill; /* Overflow writes to the log. */ - u_int32_t st_rcount; /* Total I/O reads from the log. */ - u_int32_t st_scount; /* Total syncs to the log. */ - u_int32_t st_region_wait; /* Region lock granted after wait. */ - u_int32_t st_region_nowait; /* Region lock granted without wait. */ - u_int32_t st_cur_file; /* Current log file number. */ - u_int32_t st_cur_offset; /* Current log file offset. */ - u_int32_t st_disk_file; /* Known on disk log file number. */ - u_int32_t st_disk_offset; /* Known on disk log file offset. */ - roff_t st_regsize; /* Region size. */ - u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */ - u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */ -}; - -/* - * We need to record the first log record of a transaction. For user - * defined logging this macro returns the place to put that information, - * if it is need in rlsnp, otherwise it leaves it unchanged. We also - * need to track the last record of the transaction, this returns the - * place to put that info. - */ -#define DB_SET_TXN_LSNP(txn, blsnp, llsnp) \ - ((txn)->set_txn_lsnp(txn, blsnp, llsnp)) - -/******************************************************* - * Shared buffer cache (mpool). - *******************************************************/ -/* Flag values for DB_MPOOLFILE->get. */ -#define DB_MPOOL_CREATE 0x001 /* Create a page. */ -#define DB_MPOOL_LAST 0x002 /* Return the last page. */ -#define DB_MPOOL_NEW 0x004 /* Create a new page. */ -#define DB_MPOOL_FREE 0x008 /* Free page if present. */ - -/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */ -#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */ -#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */ -#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */ - -/* Flags values for DB_MPOOLFILE->set_flags. */ -#define DB_MPOOL_NOFILE 0x001 /* Never open a backing file. */ -#define DB_MPOOL_UNLINK 0x002 /* Unlink the file on last close. */ - -/* Priority values for DB_MPOOLFILE->set_priority. */ -typedef enum { - DB_PRIORITY_VERY_LOW=1, - DB_PRIORITY_LOW=2, - DB_PRIORITY_DEFAULT=3, - DB_PRIORITY_HIGH=4, - DB_PRIORITY_VERY_HIGH=5 -} DB_CACHE_PRIORITY; - -/* Per-process DB_MPOOLFILE information. */ -struct __db_mpoolfile { - DB_FH *fhp; /* Underlying file handle. */ - - /* - * !!! - * The ref, pinref and q fields are protected by the region lock. - */ - u_int32_t ref; /* Reference count. */ - - u_int32_t pinref; /* Pinned block reference count. */ - - /* - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_ENTRY(__db_mpoolfile) q; - */ - struct { - struct __db_mpoolfile *tqe_next; - struct __db_mpoolfile **tqe_prev; - } q; /* Linked list of DB_MPOOLFILE's. */ - - /* - * !!! - * The rest of the fields (with the exception of the MP_FLUSH flag) - * are not thread-protected, even when they may be modified at any - * time by the application. The reason is the DB_MPOOLFILE handle - * is single-threaded from the viewpoint of the application, and so - * the only fields needing to be thread-protected are those accessed - * by checkpoint or sync threads when using DB_MPOOLFILE structures - * to flush buffers from the cache. - */ - DB_ENV *dbenv; /* Overlying DB_ENV. */ - MPOOLFILE *mfp; /* Underlying MPOOLFILE. */ - - u_int32_t clear_len; /* Cleared length on created pages. */ - u_int8_t /* Unique file ID. */ - fileid[DB_FILE_ID_LEN]; - int ftype; /* File type. */ - int32_t lsn_offset; /* LSN offset in page. */ - u_int32_t gbytes, bytes; /* Maximum file size. */ - DBT *pgcookie; /* Byte-string passed to pgin/pgout. */ - int32_t priority; /* Cache priority. */ - - void *addr; /* Address of mmap'd region. */ - size_t len; /* Length of mmap'd region. */ - - u_int32_t config_flags; /* Flags to DB_MPOOLFILE->set_flags. */ - - /* DB_MPOOLFILE PUBLIC HANDLE LIST BEGIN */ - int (*close) __P((DB_MPOOLFILE *, u_int32_t)); - int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *)); - int (*open) __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t)); - int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t)); - int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t)); - int (*get_clear_len) __P((DB_MPOOLFILE *, u_int32_t *)); - int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t)); - int (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *)); - int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *)); - int (*get_flags) __P((DB_MPOOLFILE *, u_int32_t *)); - int (*set_flags) __P((DB_MPOOLFILE *, u_int32_t, int)); - int (*get_ftype) __P((DB_MPOOLFILE *, int *)); - int (*set_ftype) __P((DB_MPOOLFILE *, int)); - int (*get_lsn_offset) __P((DB_MPOOLFILE *, int32_t *)); - int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t)); - int (*get_maxsize) __P((DB_MPOOLFILE *, u_int32_t *, u_int32_t *)); - int (*set_maxsize) __P((DB_MPOOLFILE *, u_int32_t, u_int32_t)); - int (*get_pgcookie) __P((DB_MPOOLFILE *, DBT *)); - int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *)); - int (*get_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY *)); - int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY)); - int (*sync) __P((DB_MPOOLFILE *)); - /* DB_MPOOLFILE PUBLIC HANDLE LIST END */ - - /* - * MP_FILEID_SET, MP_OPEN_CALLED and MP_READONLY do not need to be - * thread protected because they are initialized before the file is - * linked onto the per-process lists, and never modified. - * - * MP_FLUSH is thread protected because it is potentially read/set by - * multiple threads of control. - */ -#define MP_FILEID_SET 0x001 /* Application supplied a file ID. */ -#define MP_FLUSH 0x002 /* Was opened to flush a buffer. */ -#define MP_OPEN_CALLED 0x004 /* File opened. */ -#define MP_READONLY 0x008 /* File is readonly. */ - u_int32_t flags; -}; - -/* Mpool statistics structure. */ -struct __db_mpool_stat { - u_int32_t st_gbytes; /* Total cache size: GB. */ - u_int32_t st_bytes; /* Total cache size: B. */ - u_int32_t st_ncache; /* Number of caches. */ - roff_t st_regsize; /* Region size. */ - size_t st_mmapsize; /* Maximum file size for mmap. */ - int st_maxopenfd; /* Maximum number of open fd's. */ - int st_maxwrite; /* Maximum buffers to write. */ - int st_maxwrite_sleep; /* Sleep after writing max buffers. */ - u_int32_t st_map; /* Pages from mapped files. */ - u_int32_t st_cache_hit; /* Pages found in the cache. */ - u_int32_t st_cache_miss; /* Pages not found in the cache. */ - u_int32_t st_page_create; /* Pages created in the cache. */ - u_int32_t st_page_in; /* Pages read in. */ - u_int32_t st_page_out; /* Pages written out. */ - u_int32_t st_ro_evict; /* Clean pages forced from the cache. */ - u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */ - u_int32_t st_page_trickle; /* Pages written by memp_trickle. */ - u_int32_t st_pages; /* Total number of pages. */ - u_int32_t st_page_clean; /* Clean pages. */ - u_int32_t st_page_dirty; /* Dirty pages. */ - u_int32_t st_hash_buckets; /* Number of hash buckets. */ - u_int32_t st_hash_searches; /* Total hash chain searches. */ - u_int32_t st_hash_longest; /* Longest hash chain searched. */ - u_int32_t st_hash_examined; /* Total hash entries searched. */ - u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */ - u_int32_t st_hash_wait; /* Hash lock granted after wait. */ - u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */ - u_int32_t st_region_nowait; /* Region lock granted with nowait. */ - u_int32_t st_region_wait; /* Region lock granted after wait. */ - u_int32_t st_alloc; /* Number of page allocations. */ - u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */ - u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */ - u_int32_t st_alloc_pages; /* Pages checked during allocation. */ - u_int32_t st_alloc_max_pages; /* Max checked during allocation. */ -}; - -/* Mpool file statistics structure. */ -struct __db_mpool_fstat { - char *file_name; /* File name. */ - u_int32_t st_pagesize; /* Page size. */ - u_int32_t st_map; /* Pages from mapped files. */ - u_int32_t st_cache_hit; /* Pages found in the cache. */ - u_int32_t st_cache_miss; /* Pages not found in the cache. */ - u_int32_t st_page_create; /* Pages created in the cache. */ - u_int32_t st_page_in; /* Pages read in. */ - u_int32_t st_page_out; /* Pages written out. */ -}; - -/******************************************************* - * Transactions and recovery. - *******************************************************/ -#define DB_TXNVERSION 1 - -typedef enum { - DB_TXN_ABORT=0, /* Public. */ - DB_TXN_APPLY=1, /* Public. */ - DB_TXN_BACKWARD_ALLOC=2, /* Internal. */ - DB_TXN_BACKWARD_ROLL=3, /* Public. */ - DB_TXN_FORWARD_ROLL=4, /* Public. */ - DB_TXN_OPENFILES=5, /* Internal. */ - DB_TXN_POPENFILES=6, /* Internal. */ - DB_TXN_PRINT=7 /* Public. */ -} db_recops; - -/* - * BACKWARD_ALLOC is used during the forward pass to pick up any aborted - * allocations for files that were created during the forward pass. - * The main difference between _ALLOC and _ROLL is that the entry for - * the file not exist during the rollforward pass. - */ -#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \ - (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC) -#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY) - -struct __db_txn { - DB_TXNMGR *mgrp; /* Pointer to transaction manager. */ - DB_TXN *parent; /* Pointer to transaction's parent. */ - - u_int32_t txnid; /* Unique transaction id. */ - char *name; /* Transaction name */ - - db_threadid_t tid; /* Thread id for use in MT XA. */ - void *td; /* Detail structure within region. */ - db_timeout_t lock_timeout; /* Timeout for locks for this txn. */ - db_timeout_t expire; /* Time transaction expires. */ - void *txn_list; /* Undo information for parent. */ - - /* - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_ENTRY(__db_txn) links; - * TAILQ_ENTRY(__db_txn) xalinks; - */ - struct { - struct __db_txn *tqe_next; - struct __db_txn **tqe_prev; - } links; /* Links transactions off manager. */ - struct { - struct __db_txn *tqe_next; - struct __db_txn **tqe_prev; - } xalinks; /* Links active XA transactions. */ - - /* - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_HEAD(__kids, __db_txn) kids; - */ - struct __kids { - struct __db_txn *tqh_first; - struct __db_txn **tqh_last; - } kids; - - /* - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_HEAD(__events, __txn_event) events; - */ - struct { - struct __txn_event *tqh_first; - struct __txn_event **tqh_last; - } events; - - /* - * !!! - * Explicit representations of structures from queue.h. - * STAILQ_HEAD(__logrec, __txn_logrec) logs; - */ - struct { - struct __txn_logrec *stqh_first; - struct __txn_logrec **stqh_last; - } logs; /* Links deferred events. */ - - /* - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_ENTRY(__db_txn) klinks; - */ - struct { - struct __db_txn *tqe_next; - struct __db_txn **tqe_prev; - } klinks; - - void *api_internal; /* C++ API private. */ - void *xml_internal; /* XML API private. */ - - u_int32_t cursors; /* Number of cursors open for txn */ - - /* DB_TXN PUBLIC HANDLE LIST BEGIN */ - int (*abort) __P((DB_TXN *)); - int (*commit) __P((DB_TXN *, u_int32_t)); - int (*discard) __P((DB_TXN *, u_int32_t)); - int (*get_name) __P((DB_TXN *, const char **)); - u_int32_t (*id) __P((DB_TXN *)); - int (*prepare) __P((DB_TXN *, u_int8_t *)); - int (*set_name) __P((DB_TXN *, const char *)); - int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t)); - /* DB_TXN PUBLIC HANDLE LIST END */ - - /* DB_TXN PRIVATE HANDLE LIST BEGIN */ - void (*set_txn_lsnp) __P((DB_TXN *txn, DB_LSN **, DB_LSN **)); - /* DB_TXN PRIVATE HANDLE LIST END */ - -#define TXN_CHILDCOMMIT 0x001 /* Txn has committed. */ -#define TXN_COMPENSATE 0x002 /* Compensating transaction. */ -#define TXN_DEADLOCK 0x004 /* Txn has deadlocked. */ -#define TXN_LOCKTIMEOUT 0x008 /* Txn has a lock timeout. */ -#define TXN_MALLOC 0x010 /* Structure allocated by TXN system. */ -#define TXN_NOSYNC 0x020 /* Do not sync on prepare and commit. */ -#define TXN_NOWAIT 0x040 /* Do not wait on locks. */ -#define TXN_READ_COMMITTED 0x080 /* Txn has degree 2 isolation. */ -#define TXN_READ_UNCOMMITTED 0x100 /* Txn has degree 1 isolation. */ -#define TXN_RESTORED 0x200 /* Txn has been restored. */ -#define TXN_SYNC 0x400 /* Write and sync on prepare/commit. */ -#define TXN_WRITE_NOSYNC 0x800 /* Write only on prepare/commit. */ - u_int32_t flags; -}; - -#define TXN_SYNC_FLAGS (TXN_SYNC | TXN_NOSYNC | TXN_WRITE_NOSYNC) - -/* - * Structure used for two phase commit interface. Berkeley DB support for two - * phase commit is compatible with the X/Open XA interface. - * - * The XA #define XIDDATASIZE defines the size of a global transaction ID. We - * have our own version here (for name space reasons) which must have the same - * value. - */ -#define DB_XIDDATASIZE 128 -struct __db_preplist { - DB_TXN *txn; - u_int8_t gid[DB_XIDDATASIZE]; -}; - -/* Transaction statistics structure. */ -struct __db_txn_active { - u_int32_t txnid; /* Transaction ID */ - u_int32_t parentid; /* Transaction ID of parent */ - pid_t pid; /* Process owning txn ID */ - db_threadid_t tid; /* Thread owning txn ID */ - DB_LSN lsn; /* LSN when transaction began */ - u_int32_t xa_status; /* XA status */ - u_int8_t xid[DB_XIDDATASIZE]; /* XA global transaction ID */ - char name[51]; /* 50 bytes of name, nul termination */ -}; - -struct __db_txn_stat { - DB_LSN st_last_ckp; /* lsn of the last checkpoint */ - time_t st_time_ckp; /* time of last checkpoint */ - u_int32_t st_last_txnid; /* last transaction id given out */ - u_int32_t st_maxtxns; /* maximum txns possible */ - u_int32_t st_naborts; /* number of aborted transactions */ - u_int32_t st_nbegins; /* number of begun transactions */ - u_int32_t st_ncommits; /* number of committed transactions */ - u_int32_t st_nactive; /* number of active transactions */ - u_int32_t st_nrestores; /* number of restored transactions - after recovery. */ - u_int32_t st_maxnactive; /* maximum active transactions */ - DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */ - u_int32_t st_region_wait; /* Region lock granted after wait. */ - u_int32_t st_region_nowait; /* Region lock granted without wait. */ - roff_t st_regsize; /* Region size. */ -}; - -/******************************************************* - * Replication. - *******************************************************/ -/* Special, out-of-band environment IDs. */ -#define DB_EID_BROADCAST -1 -#define DB_EID_INVALID -2 - -/* rep_config flag values. */ -#define DB_REP_CONF_BULK 0x0001 /* Bulk transfer. */ -#define DB_REP_CONF_DELAYCLIENT 0x0002 /* Delay client synchronization. */ -#define DB_REP_CONF_NOAUTOINIT 0x0004 /* No automatic client init. */ -#define DB_REP_CONF_NOWAIT 0x0008 /* Don't wait, return error. */ - -/* rep_start flags values. */ -#define DB_REP_CLIENT 0x001 -#define DB_REP_MASTER 0x002 - -/* Replication statistics. */ -struct __db_rep_stat { - /* !!! - * Many replication statistics fields cannot be protected by a mutex - * without an unacceptable performance penalty, since most message - * processing is done without the need to hold a region-wide lock. - * Fields whose comments end with a '+' may be updated without holding - * the replication or log mutexes (as appropriate), and thus may be - * off somewhat (or, on unreasonable architectures under unlucky - * circumstances, garbaged). - */ - u_int32_t st_status; /* Current replication status. */ - DB_LSN st_next_lsn; /* Next LSN to use or expect. */ - DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */ - db_pgno_t st_next_pg; /* Next pg we expect. */ - db_pgno_t st_waiting_pg; /* pg we're awaiting, if any. */ - - u_int32_t st_dupmasters; /* # of times a duplicate master - condition was detected.+ */ - int st_env_id; /* Current environment ID. */ - int st_env_priority; /* Current environment priority. */ - u_int32_t st_bulk_fills; /* Bulk buffer fills. */ - u_int32_t st_bulk_overflows; /* Bulk buffer overflows. */ - u_int32_t st_bulk_records; /* Bulk records stored. */ - u_int32_t st_bulk_transfers; /* Transfers of bulk buffers. */ - u_int32_t st_client_rerequests; /* Number of forced rerequests. */ - u_int32_t st_client_svc_req; /* Number of client service requests - received by this client. */ - u_int32_t st_client_svc_miss; /* Number of client service requests - missing on this client. */ - u_int32_t st_gen; /* Current generation number. */ - u_int32_t st_egen; /* Current election gen number. */ - u_int32_t st_log_duplicated; /* Log records received multiply.+ */ - u_int32_t st_log_queued; /* Log records currently queued.+ */ - u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */ - u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */ - u_int32_t st_log_records; /* Log records received and put.+ */ - u_int32_t st_log_requested; /* Log recs. missed and requested.+ */ - int st_master; /* Env. ID of the current master. */ - u_int32_t st_master_changes; /* # of times we've switched masters. */ - u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */ - u_int32_t st_msgs_processed; /* Messages received and processed.+ */ - u_int32_t st_msgs_recover; /* Messages ignored because this site - was a client in recovery.+ */ - u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */ - u_int32_t st_msgs_sent; /* # of successful message sends.+ */ - u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */ - int st_nsites; /* Current number of sites we will - assume during elections. */ - u_int32_t st_nthrottles; /* # of times we were throttled. */ - u_int32_t st_outdated; /* # of times we detected and returned - an OUTDATED condition.+ */ - u_int32_t st_pg_duplicated; /* Pages received multiply.+ */ - u_int32_t st_pg_records; /* Pages received and stored.+ */ - u_int32_t st_pg_requested; /* Pages missed and requested.+ */ - u_int32_t st_startup_complete; /* Site completed client sync-up. */ - u_int32_t st_txns_applied; /* # of transactions applied.+ */ - - /* Elections generally. */ - u_int32_t st_elections; /* # of elections held.+ */ - u_int32_t st_elections_won; /* # of elections won by this site.+ */ - - /* Statistics about an in-progress election. */ - int st_election_cur_winner; /* Current front-runner. */ - u_int32_t st_election_gen; /* Election generation number. */ - DB_LSN st_election_lsn; /* Max. LSN of current winner. */ - int st_election_nsites; /* # of "registered voters". */ - int st_election_nvotes; /* # of "registered voters" needed. */ - int st_election_priority; /* Current election priority. */ - int st_election_status; /* Current election status. */ - u_int32_t st_election_tiebreaker;/* Election tiebreaker value. */ - int st_election_votes; /* Votes received in this round. */ - u_int32_t st_election_sec; /* Last election time seconds. */ - u_int32_t st_election_usec; /* Last election time useconds. */ -}; - -/******************************************************* - * Sequences. - *******************************************************/ -/* - * The storage record for a sequence. - */ -struct __db_seq_record { - u_int32_t seq_version; /* Version size/number. */ -#define DB_SEQ_DEC 0x00000001 /* Decrement sequence. */ -#define DB_SEQ_INC 0x00000002 /* Increment sequence. */ -#define DB_SEQ_RANGE_SET 0x00000004 /* Range set (internal). */ -#define DB_SEQ_WRAP 0x00000008 /* Wrap sequence at min/max. */ -#define DB_SEQ_WRAPPED 0x00000010 /* Just wrapped (internal). */ - u_int32_t flags; /* Flags. */ - db_seq_t seq_value; /* Current value. */ - db_seq_t seq_max; /* Max permitted. */ - db_seq_t seq_min; /* Min permitted. */ -}; - -/* - * Handle for a sequence object. - */ -struct __db_sequence { - DB *seq_dbp; /* DB handle for this sequence. */ - db_mutex_t mtx_seq; /* Mutex if sequence is threaded. */ - DB_SEQ_RECORD *seq_rp; /* Pointer to current data. */ - DB_SEQ_RECORD seq_record; /* Data from DB_SEQUENCE. */ - int32_t seq_cache_size; /* Number of values cached. */ - db_seq_t seq_last_value; /* Last value cached. */ - DBT seq_key; /* DBT pointing to sequence key. */ - DBT seq_data; /* DBT pointing to seq_record. */ - - /* API-private structure: used by C++ and Java. */ - void *api_internal; - - /* DB_SEQUENCE PUBLIC HANDLE LIST BEGIN */ - int (*close) __P((DB_SEQUENCE *, u_int32_t)); - int (*get) __P((DB_SEQUENCE *, - DB_TXN *, int32_t, db_seq_t *, u_int32_t)); - int (*get_cachesize) __P((DB_SEQUENCE *, int32_t *)); - int (*get_db) __P((DB_SEQUENCE *, DB **)); - int (*get_flags) __P((DB_SEQUENCE *, u_int32_t *)); - int (*get_key) __P((DB_SEQUENCE *, DBT *)); - int (*get_range) __P((DB_SEQUENCE *, - db_seq_t *, db_seq_t *)); - int (*initial_value) __P((DB_SEQUENCE *, db_seq_t)); - int (*open) __P((DB_SEQUENCE *, - DB_TXN *, DBT *, u_int32_t)); - int (*remove) __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); - int (*set_cachesize) __P((DB_SEQUENCE *, int32_t)); - int (*set_flags) __P((DB_SEQUENCE *, u_int32_t)); - int (*set_range) __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); - int (*stat) __P((DB_SEQUENCE *, - DB_SEQUENCE_STAT **, u_int32_t)); - int (*stat_print) __P((DB_SEQUENCE *, u_int32_t)); - /* DB_SEQUENCE PUBLIC HANDLE LIST END */ -}; - -struct __db_seq_stat { - u_int32_t st_wait; /* Sequence lock granted w/o wait. */ - u_int32_t st_nowait; /* Sequence lock granted after wait. */ - db_seq_t st_current; /* Current value in db. */ - db_seq_t st_value; /* Current cached value. */ - db_seq_t st_last_value; /* Last cached value. */ - db_seq_t st_min; /* Minimum value. */ - db_seq_t st_max; /* Maximum value. */ - int32_t st_cache_size; /* Cache size. */ - u_int32_t st_flags; /* Flag value. */ -}; - -/******************************************************* - * Access methods. - *******************************************************/ -typedef enum { - DB_BTREE=1, - DB_HASH=2, - DB_RECNO=3, - DB_QUEUE=4, - DB_UNKNOWN=5 /* Figure it out on open. */ -} DBTYPE; - -#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */ - -#define DB_BTREEVERSION 9 /* Current btree version. */ -#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */ -#define DB_BTREEMAGIC 0x053162 - -#define DB_HASHVERSION 8 /* Current hash version. */ -#define DB_HASHOLDVER 7 /* Oldest hash version supported. */ -#define DB_HASHMAGIC 0x061561 - -#define DB_QAMVERSION 4 /* Current queue version. */ -#define DB_QAMOLDVER 3 /* Oldest queue version supported. */ -#define DB_QAMMAGIC 0x042253 - -#define DB_SEQUENCE_VERSION 2 /* Current sequence version. */ -#define DB_SEQUENCE_OLDVER 1 /* Oldest sequence version supported. */ - -/* - * DB access method and cursor operation values. Each value is an operation - * code to which additional bit flags are added. - */ -#define DB_AFTER 1 /* c_put() */ -#define DB_APPEND 2 /* put() */ -#define DB_BEFORE 3 /* c_put() */ -#define DB_CACHED_COUNTS 4 /* stat() */ -#define DB_CONSUME 5 /* get() */ -#define DB_CONSUME_WAIT 6 /* get() */ -#define DB_CURRENT 7 /* c_get(), c_put(), DB_LOGC->get() */ -#define DB_FAST_STAT 8 /* stat() */ -#define DB_FIRST 9 /* c_get(), DB_LOGC->get() */ -#define DB_GET_BOTH 10 /* get(), c_get() */ -#define DB_GET_BOTHC 11 /* c_get() (internal) */ -#define DB_GET_BOTH_RANGE 12 /* get(), c_get() */ -#define DB_GET_RECNO 13 /* c_get() */ -#define DB_JOIN_ITEM 14 /* c_get(); do not do primary lookup */ -#define DB_KEYFIRST 15 /* c_put() */ -#define DB_KEYLAST 16 /* c_put() */ -#define DB_LAST 17 /* c_get(), DB_LOGC->get() */ -#define DB_NEXT 18 /* c_get(), DB_LOGC->get() */ -#define DB_NEXT_DUP 19 /* c_get() */ -#define DB_NEXT_NODUP 20 /* c_get() */ -#define DB_NODUPDATA 21 /* put(), c_put() */ -#define DB_NOOVERWRITE 22 /* put() */ -#define DB_NOSYNC 23 /* close() */ -#define DB_POSITION 24 /* c_dup() */ -#define DB_PREV 25 /* c_get(), DB_LOGC->get() */ -#define DB_PREV_NODUP 26 /* c_get(), DB_LOGC->get() */ -#define DB_RECORDCOUNT 27 /* stat() */ -#define DB_SET 28 /* c_get(), DB_LOGC->get() */ -#define DB_SET_LOCK_TIMEOUT 29 /* set_timout() */ -#define DB_SET_RANGE 30 /* c_get() */ -#define DB_SET_RECNO 31 /* get(), c_get() */ -#define DB_SET_TXN_NOW 32 /* set_timout() (internal) */ -#define DB_SET_TXN_TIMEOUT 33 /* set_timout() */ -#define DB_UPDATE_SECONDARY 34 /* c_get(), c_del() (internal) */ -#define DB_WRITECURSOR 35 /* cursor() */ -#define DB_WRITELOCK 36 /* cursor() (internal) */ - -/* This has to change when the max opcode hits 255. */ -#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */ - -/* - * Masks for flags that can be OR'd into DB access method and cursor - * operation values. Three top bits have already been taken: - * - * DB_AUTO_COMMIT 0x01000000 - * DB_READ_COMMITTED 0x02000000 - * DB_READ_UNCOMMITTED 0x04000000 - */ -#define DB_MULTIPLE 0x08000000 /* Return multiple data values. */ -#define DB_MULTIPLE_KEY 0x10000000 /* Return multiple data/key pairs. */ -#define DB_RMW 0x20000000 /* Acquire write lock immediately. */ - -/* - * DB (user visible) error return codes. - * - * !!! - * For source compatibility with DB 2.X deadlock return (EAGAIN), use the - * following: - * #include - * #define DB_LOCK_DEADLOCK EAGAIN - * - * !!! - * We don't want our error returns to conflict with other packages where - * possible, so pick a base error value that's hopefully not common. We - * document that we own the error name space from -30,800 to -30,999. - */ -/* DB (public) error return codes. */ -#define DB_BUFFER_SMALL (-30999)/* User memory too small for return. */ -#define DB_DONOTINDEX (-30998)/* "Null" return from 2ndary callbk. */ -#define DB_KEYEMPTY (-30997)/* Key/data deleted or never created. */ -#define DB_KEYEXIST (-30996)/* The key/data pair already exists. */ -#define DB_LOCK_DEADLOCK (-30995)/* Deadlock. */ -#define DB_LOCK_NOTGRANTED (-30994)/* Lock unavailable. */ -#define DB_LOG_BUFFER_FULL (-30993)/* In-memory log buffer full. */ -#define DB_NOSERVER (-30992)/* Server panic return. */ -#define DB_NOSERVER_HOME (-30991)/* Bad home sent to server. */ -#define DB_NOSERVER_ID (-30990)/* Bad ID sent to server. */ -#define DB_NOTFOUND (-30989)/* Key/data pair not found (EOF). */ -#define DB_OLD_VERSION (-30988)/* Out-of-date version. */ -#define DB_PAGE_NOTFOUND (-30987)/* Requested page not found. */ -#define DB_REP_DUPMASTER (-30986)/* There are two masters. */ -#define DB_REP_HANDLE_DEAD (-30985)/* Rolled back a commit. */ -#define DB_REP_HOLDELECTION (-30984)/* Time to hold an election. */ -#define DB_REP_IGNORE (-30983)/* This msg should be ignored.*/ -#define DB_REP_ISPERM (-30982)/* Cached not written perm written.*/ -#define DB_REP_JOIN_FAILURE (-30981)/* Unable to join replication group. */ -#define DB_REP_LOCKOUT (-30980)/* API/Replication lockout now. */ -#define DB_REP_NEWMASTER (-30979)/* We have learned of a new master. */ -#define DB_REP_NEWSITE (-30978)/* New site entered system. */ -#define DB_REP_NOTPERM (-30977)/* Permanent log record not written. */ -#define DB_REP_STARTUPDONE (-30976)/* Client startup complete. */ -#define DB_REP_UNAVAIL (-30975)/* Site cannot currently be reached. */ -#define DB_RUNRECOVERY (-30974)/* Panic return. */ -#define DB_SECONDARY_BAD (-30973)/* Secondary index corrupt. */ -#define DB_VERIFY_BAD (-30972)/* Verify failed; bad format. */ -#define DB_VERSION_MISMATCH (-30971)/* Environment version mismatch. */ - -/* DB (private) error return codes. */ -#define DB_ALREADY_ABORTED (-30899) -#define DB_DELETED (-30898)/* Recovery file marked deleted. */ -#define DB_NEEDSPLIT (-30897)/* Page needs to be split. */ -#define DB_REP_BULKOVF (-30896)/* Rep bulk buffer overflow. */ -#define DB_REP_EGENCHG (-30895)/* Egen changed while in election. */ -#define DB_REP_LOGREADY (-30894)/* Rep log ready for recovery. */ -#define DB_REP_PAGEDONE (-30893)/* This page was already done. */ -#define DB_SURPRISE_KID (-30892)/* Child commit where parent - didn't know it was a parent. */ -#define DB_SWAPBYTES (-30891)/* Database needs byte swapping. */ -#define DB_TIMEOUT (-30890)/* Timed out waiting for election. */ -#define DB_TXN_CKP (-30889)/* Encountered ckp record in log. */ -#define DB_VERIFY_FATAL (-30888)/* DB->verify cannot proceed. */ - -/* Database handle. */ -struct __db { - /******************************************************* - * Public: owned by the application. - *******************************************************/ - u_int32_t pgsize; /* Database logical page size. */ - - /* Callbacks. */ - int (*db_append_recno) __P((DB *, DBT *, db_recno_t)); - void (*db_feedback) __P((DB *, int, int)); - int (*dup_compare) __P((DB *, const DBT *, const DBT *)); - - void *app_private; /* Application-private handle. */ - - /******************************************************* - * Private: owned by DB. - *******************************************************/ - DB_ENV *dbenv; /* Backing environment. */ - - DBTYPE type; /* DB access method type. */ - - DB_MPOOLFILE *mpf; /* Backing buffer pool. */ - - db_mutex_t mutex; /* Synchronization for free threading */ - - char *fname, *dname; /* File/database passed to DB->open. */ - u_int32_t open_flags; /* Flags passed to DB->open. */ - - u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */ - - u_int32_t adj_fileid; /* File's unique ID for curs. adj. */ - -#define DB_LOGFILEID_INVALID -1 - FNAME *log_filename; /* File's naming info for logging. */ - - db_pgno_t meta_pgno; /* Meta page number */ - u_int32_t lid; /* Locker id for handle locking. */ - u_int32_t cur_lid; /* Current handle lock holder. */ - u_int32_t associate_lid; /* Locker id for DB->associate call. */ - DB_LOCK handle_lock; /* Lock held on this handle. */ - - u_int cl_id; /* RPC: remote client id. */ - - time_t timestamp; /* Handle timestamp for replication. */ - u_int32_t fid_gen; /* Rep generation number for fids. */ - - /* - * Returned data memory for DB->get() and friends. - */ - DBT my_rskey; /* Secondary key. */ - DBT my_rkey; /* [Primary] key. */ - DBT my_rdata; /* Data. */ - - /* - * !!! - * Some applications use DB but implement their own locking outside of - * DB. If they're using fcntl(2) locking on the underlying database - * file, and we open and close a file descriptor for that file, we will - * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an - * undocumented interface to support this usage which leaves any file - * descriptors we open until DB->close. This will only work with the - * DB->open interface and simple caches, e.g., creating a transaction - * thread may open/close file descriptors this flag doesn't protect. - * Locking with fcntl(2) on a file that you don't own is a very, very - * unsafe thing to do. 'Nuff said. - */ - DB_FH *saved_open_fhp; /* Saved file handle. */ - - /* - * Linked list of DBP's, linked from the DB_ENV, used to keep track - * of all open db handles for cursor adjustment. - * - * !!! - * Explicit representations of structures from queue.h. - * LIST_ENTRY(__db) dblistlinks; - */ - struct { - struct __db *le_next; - struct __db **le_prev; - } dblistlinks; - - /* - * Cursor queues. - * - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_HEAD(__cq_fq, __dbc) free_queue; - * TAILQ_HEAD(__cq_aq, __dbc) active_queue; - * TAILQ_HEAD(__cq_jq, __dbc) join_queue; - */ - struct __cq_fq { - struct __dbc *tqh_first; - struct __dbc **tqh_last; - } free_queue; - struct __cq_aq { - struct __dbc *tqh_first; - struct __dbc **tqh_last; - } active_queue; - struct __cq_jq { - struct __dbc *tqh_first; - struct __dbc **tqh_last; - } join_queue; - - /* - * Secondary index support. - * - * Linked list of secondary indices -- set in the primary. - * - * !!! - * Explicit representations of structures from queue.h. - * LIST_HEAD(s_secondaries, __db); - */ - struct { - struct __db *lh_first; - } s_secondaries; - - /* - * List entries for secondaries, and reference count of how - * many threads are updating this secondary (see __db_c_put). - * - * !!! - * Note that these are synchronized by the primary's mutex, but - * filled in in the secondaries. - * - * !!! - * Explicit representations of structures from queue.h. - * LIST_ENTRY(__db) s_links; - */ - struct { - struct __db *le_next; - struct __db **le_prev; - } s_links; - u_int32_t s_refcnt; - - /* Secondary callback and free functions -- set in the secondary. */ - int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *)); - - /* Reference to primary -- set in the secondary. */ - DB *s_primary; - -#define DB_ASSOC_IMMUTABLE_KEY 0x00000001 /* Secondary key is immutable. */ - - /* Flags passed to associate -- set in the secondary. */ - u_int32_t s_assoc_flags; - - /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */ - void *api_internal; - - /* Subsystem-private structure. */ - void *bt_internal; /* Btree/Recno access method. */ - void *h_internal; /* Hash access method. */ - void *q_internal; /* Queue access method. */ - void *xa_internal; /* XA. */ - - /* DB PUBLIC HANDLE LIST BEGIN */ - int (*associate) __P((DB *, DB_TXN *, DB *, - int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t)); - int (*close) __P((DB *, u_int32_t)); - int (*compact) __P((DB *, - DB_TXN *, DBT *, DBT *, DB_COMPACT *, u_int32_t, DBT *)); - int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t)); - int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t)); - void (*err) __P((DB *, int, const char *, ...)); - void (*errx) __P((DB *, const char *, ...)); - int (*fd) __P((DB *, int *)); - int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - int (*get_bt_minkey) __P((DB *, u_int32_t *)); - int (*get_byteswapped) __P((DB *, int *)); - int (*get_cachesize) __P((DB *, u_int32_t *, u_int32_t *, int *)); - int (*get_dbname) __P((DB *, const char **, const char **)); - int (*get_encrypt_flags) __P((DB *, u_int32_t *)); - DB_ENV *(*get_env) __P((DB *)); - void (*get_errfile) __P((DB *, FILE **)); - void (*get_errpfx) __P((DB *, const char **)); - int (*get_flags) __P((DB *, u_int32_t *)); - int (*get_h_ffactor) __P((DB *, u_int32_t *)); - int (*get_h_nelem) __P((DB *, u_int32_t *)); - int (*get_lorder) __P((DB *, int *)); - DB_MPOOLFILE *(*get_mpf) __P((DB *)); - void (*get_msgfile) __P((DB *, FILE **)); - int (*get_open_flags) __P((DB *, u_int32_t *)); - int (*get_pagesize) __P((DB *, u_int32_t *)); - int (*get_q_extentsize) __P((DB *, u_int32_t *)); - int (*get_re_delim) __P((DB *, int *)); - int (*get_re_len) __P((DB *, u_int32_t *)); - int (*get_re_pad) __P((DB *, int *)); - int (*get_re_source) __P((DB *, const char **)); - int (*get_transactional) __P((DB *)); - int (*get_type) __P((DB *, DBTYPE *)); - int (*join) __P((DB *, DBC **, DBC **, u_int32_t)); - int (*key_range) - __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t)); - int (*open) __P((DB *, - DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int)); - int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t)); - int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - int (*remove) __P((DB *, const char *, const char *, u_int32_t)); - int (*rename) __P((DB *, - const char *, const char *, const char *, u_int32_t)); - int (*set_alloc) __P((DB *, void *(*)(size_t), - void *(*)(void *, size_t), void (*)(void *))); - int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t))); - int (*set_bt_compare) - __P((DB *, int (*)(DB *, const DBT *, const DBT *))); - int (*set_bt_minkey) __P((DB *, u_int32_t)); - int (*set_bt_prefix) - __P((DB *, size_t (*)(DB *, const DBT *, const DBT *))); - int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int)); - int (*set_dup_compare) - __P((DB *, int (*)(DB *, const DBT *, const DBT *))); - int (*set_encrypt) __P((DB *, const char *, u_int32_t)); - void (*set_errcall) __P((DB *, - void (*)(const DB_ENV *, const char *, const char *))); - void (*set_errfile) __P((DB *, FILE *)); - void (*set_errpfx) __P((DB *, const char *)); - int (*set_feedback) __P((DB *, void (*)(DB *, int, int))); - int (*set_flags) __P((DB *, u_int32_t)); - int (*set_h_ffactor) __P((DB *, u_int32_t)); - int (*set_h_hash) - __P((DB *, u_int32_t (*)(DB *, const void *, u_int32_t))); - int (*set_h_nelem) __P((DB *, u_int32_t)); - int (*set_lorder) __P((DB *, int)); - void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *))); - void (*set_msgfile) __P((DB *, FILE *)); - int (*set_pagesize) __P((DB *, u_int32_t)); - int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int))); - int (*set_q_extentsize) __P((DB *, u_int32_t)); - int (*set_re_delim) __P((DB *, int)); - int (*set_re_len) __P((DB *, u_int32_t)); - int (*set_re_pad) __P((DB *, int)); - int (*set_re_source) __P((DB *, const char *)); - int (*stat) __P((DB *, DB_TXN *, void *, u_int32_t)); - int (*stat_print) __P((DB *, u_int32_t)); - int (*sync) __P((DB *, u_int32_t)); - int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); - int (*upgrade) __P((DB *, const char *, u_int32_t)); - int (*verify) - __P((DB *, const char *, const char *, FILE *, u_int32_t)); - /* DB PUBLIC HANDLE LIST END */ - - /* DB PRIVATE HANDLE LIST BEGIN */ - int (*dump) __P((DB *, const char *, - int (*)(void *, const void *), void *, int, int)); - int (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *)); - int (*db_am_rename) __P((DB *, DB_TXN *, - const char *, const char *, const char *)); - /* DB PRIVATE HANDLE LIST END */ - - /* - * Never called; these are a place to save function pointers - * so that we can undo an associate. - */ - int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); - int (*stored_close) __P((DB *, u_int32_t)); - -#define DB_OK_BTREE 0x01 -#define DB_OK_HASH 0x02 -#define DB_OK_QUEUE 0x04 -#define DB_OK_RECNO 0x08 - u_int32_t am_ok; /* Legal AM choices. */ - - /* - * This field really ought to be an AM_FLAG, but we have - * have run out of bits. If/when we decide to split up - * the flags, we can incorporate it. - */ - int preserve_fid; /* Do not free fileid on close. */ - -#define DB_AM_CHKSUM 0x00000001 /* Checksumming */ -#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica */ -#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn */ -#define DB_AM_CREATED 0x00000008 /* Database was created upon open */ -#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created */ -#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database */ -#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set */ -#define DB_AM_DISCARD 0x00000080 /* Discard any cached pages */ -#define DB_AM_DUP 0x00000100 /* DB_DUP */ -#define DB_AM_DUPSORT 0x00000200 /* DB_DUPSORT */ -#define DB_AM_ENCRYPT 0x00000400 /* Encryption */ -#define DB_AM_FIXEDLEN 0x00000800 /* Fixed-length records */ -#define DB_AM_INMEM 0x00001000 /* In-memory; no sync on close */ -#define DB_AM_INORDER 0x00002000 /* DB_INORDER */ -#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed */ -#define DB_AM_NOT_DURABLE 0x00008000 /* Do not log changes */ -#define DB_AM_OPEN_CALLED 0x00010000 /* DB->open called */ -#define DB_AM_PAD 0x00020000 /* Fixed-length record pad */ -#define DB_AM_PGDEF 0x00040000 /* Page size was defaulted */ -#define DB_AM_RDONLY 0x00080000 /* Database is readonly */ -#define DB_AM_READ_UNCOMMITTED 0x00100000 /* Support degree 1 isolation */ -#define DB_AM_RECNUM 0x00200000 /* DB_RECNUM */ -#define DB_AM_RECOVER 0x00400000 /* DB opened by recovery routine */ -#define DB_AM_RENUMBER 0x00800000 /* DB_RENUMBER */ -#define DB_AM_REVSPLITOFF 0x01000000 /* DB_REVSPLITOFF */ -#define DB_AM_SECONDARY 0x02000000 /* Database is a secondary index */ -#define DB_AM_SNAPSHOT 0x04000000 /* DB_SNAPSHOT */ -#define DB_AM_SUBDB 0x08000000 /* Subdatabases supported */ -#define DB_AM_SWAP 0x10000000 /* Pages need to be byte-swapped */ -#define DB_AM_TXN 0x20000000 /* Opened in a transaction */ -#define DB_AM_VERIFYING 0x40000000 /* DB handle is in the verifier */ - u_int32_t orig_flags; /* Flags at open, for refresh */ - u_int32_t flags; -}; - -/* - * Macros for bulk get. These are only intended for the C API. - * For C++, use DbMultiple*Iterator. - */ -#define DB_MULTIPLE_INIT(pointer, dbt) \ - (pointer = (u_int8_t *)(dbt)->data + \ - (dbt)->ulen - sizeof(u_int32_t)) -#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \ - do { \ - if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \ - retdata = NULL; \ - pointer = NULL; \ - break; \ - } \ - retdata = (u_int8_t *) \ - (dbt)->data + *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - retdlen = *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - if (retdlen == 0 && \ - retdata == (u_int8_t *)(dbt)->data) \ - retdata = NULL; \ - } while (0) -#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \ - do { \ - if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \ - retdata = NULL; \ - retkey = NULL; \ - pointer = NULL; \ - break; \ - } \ - retkey = (u_int8_t *) \ - (dbt)->data + *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - retklen = *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - retdata = (u_int8_t *) \ - (dbt)->data + *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - retdlen = *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - } while (0) - -#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \ - do { \ - if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \ - recno = 0; \ - retdata = NULL; \ - pointer = NULL; \ - break; \ - } \ - recno = *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - retdata = (u_int8_t *) \ - (dbt)->data + *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - retdlen = *(u_int32_t *)(pointer); \ - (pointer) = (u_int32_t *)(pointer) - 1; \ - } while (0) - -/******************************************************* - * Access method cursors. - *******************************************************/ -struct __dbc { - DB *dbp; /* Related DB access method. */ - DB_TXN *txn; /* Associated transaction. */ - - /* - * Active/free cursor queues. - * - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_ENTRY(__dbc) links; - */ - struct { - DBC *tqe_next; - DBC **tqe_prev; - } links; - - /* - * The DBT *'s below are used by the cursor routines to return - * data to the user when DBT flags indicate that DB should manage - * the returned memory. They point at a DBT containing the buffer - * and length that will be used, and "belonging" to the handle that - * should "own" this memory. This may be a "my_*" field of this - * cursor--the default--or it may be the corresponding field of - * another cursor, a DB handle, a join cursor, etc. In general, it - * will be whatever handle the user originally used for the current - * DB interface call. - */ - DBT *rskey; /* Returned secondary key. */ - DBT *rkey; /* Returned [primary] key. */ - DBT *rdata; /* Returned data. */ - - DBT my_rskey; /* Space for returned secondary key. */ - DBT my_rkey; /* Space for returned [primary] key. */ - DBT my_rdata; /* Space for returned data. */ - - void *lref; /* Reference to default locker. */ - u_int32_t locker; /* Locker for this operation. */ - DBT lock_dbt; /* DBT referencing lock. */ - DB_LOCK_ILOCK lock; /* Object to be locked. */ - DB_LOCK mylock; /* CDB lock held on this cursor. */ - - u_int cl_id; /* Remote client id. */ - - DBTYPE dbtype; /* Cursor type. */ - - DBC_INTERNAL *internal; /* Access method private. */ - - /* DBC PUBLIC HANDLE LIST BEGIN */ - int (*c_close) __P((DBC *)); - int (*c_count) __P((DBC *, db_recno_t *, u_int32_t)); - int (*c_del) __P((DBC *, u_int32_t)); - int (*c_dup) __P((DBC *, DBC **, u_int32_t)); - int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t)); - int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t)); - int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t)); - /* DBC PUBLIC HANDLE LIST END */ - - /* DBC PRIVATE HANDLE LIST BEGIN */ - int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t)); - int (*c_am_close) __P((DBC *, db_pgno_t, int *)); - int (*c_am_del) __P((DBC *)); - int (*c_am_destroy) __P((DBC *)); - int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); - int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); - int (*c_am_writelock) __P((DBC *)); - /* DBC PRIVATE HANDLE LIST END */ - -/* - * DBC_COMPENSATE and DBC_RECOVER are used during recovery and transaction - * abort. If a transaction is being aborted or recovered then DBC_RECOVER - * will be set and locking and logging will be disabled on this cursor. If - * we are performing a compensating transaction (e.g. free page processing) - * then DB_COMPENSATE will be set to inhibit locking, but logging will still - * be required. - */ -#define DBC_ACTIVE 0x0001 /* Cursor in use. */ -#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */ -#define DBC_MULTIPLE 0x0004 /* Return Multiple data. */ -#define DBC_MULTIPLE_KEY 0x0008 /* Return Multiple keys and data. */ -#define DBC_OPD 0x0010 /* Cursor references off-page dups. */ -#define DBC_OWN_LID 0x0020 /* Free lock id on destroy. */ -#define DBC_READ_COMMITTED 0x0040 /* Cursor has degree 2 isolation. */ -#define DBC_READ_UNCOMMITTED 0x0080 /* Cursor has degree 1 isolation. */ -#define DBC_RECOVER 0x0100 /* Recovery cursor; don't log/lock. */ -#define DBC_RMW 0x0200 /* Acquire write flag in read op. */ -#define DBC_TRANSIENT 0x0400 /* Cursor is transient. */ -#define DBC_WRITECURSOR 0x0800 /* Cursor may be used to write (CDB). */ -#define DBC_WRITER 0x1000 /* Cursor immediately writing (CDB). */ - u_int32_t flags; -}; - -/* Key range statistics structure */ -struct __key_range { - double less; - double equal; - double greater; -}; - -/* Btree/Recno statistics structure. */ -struct __db_bt_stat { - u_int32_t bt_magic; /* Magic number. */ - u_int32_t bt_version; /* Version number. */ - u_int32_t bt_metaflags; /* Metadata flags. */ - u_int32_t bt_nkeys; /* Number of unique keys. */ - u_int32_t bt_ndata; /* Number of data items. */ - u_int32_t bt_pagesize; /* Page size. */ - u_int32_t bt_minkey; /* Minkey value. */ - u_int32_t bt_re_len; /* Fixed-length record length. */ - u_int32_t bt_re_pad; /* Fixed-length record pad. */ - u_int32_t bt_levels; /* Tree levels. */ - u_int32_t bt_int_pg; /* Internal pages. */ - u_int32_t bt_leaf_pg; /* Leaf pages. */ - u_int32_t bt_dup_pg; /* Duplicate pages. */ - u_int32_t bt_over_pg; /* Overflow pages. */ - u_int32_t bt_empty_pg; /* Empty pages. */ - u_int32_t bt_free; /* Pages on the free list. */ - u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */ - u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */ - u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */ - u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */ -}; - -struct __db_compact { - /* Input Parameters. */ - u_int32_t compact_fillpercent; /* Desired fillfactor: 1-100 */ - db_timeout_t compact_timeout; /* Lock timeout. */ - u_int32_t compact_pages; /* Max pages to process. */ - /* Output Stats. */ - u_int32_t compact_pages_free; /* Number of pages freed. */ - u_int32_t compact_pages_examine; /* Number of pages examine. */ - u_int32_t compact_levels; /* Number of levels removed. */ - u_int32_t compact_deadlock; /* Number of deadlocks. */ - db_pgno_t compact_pages_truncated; /* Pages truncated to OS. */ - /* Internal. */ - db_pgno_t compact_truncate; /* Page number for truncation */ -}; - -/* Hash statistics structure. */ -struct __db_h_stat { - u_int32_t hash_magic; /* Magic number. */ - u_int32_t hash_version; /* Version number. */ - u_int32_t hash_metaflags; /* Metadata flags. */ - u_int32_t hash_nkeys; /* Number of unique keys. */ - u_int32_t hash_ndata; /* Number of data items. */ - u_int32_t hash_pagesize; /* Page size. */ - u_int32_t hash_ffactor; /* Fill factor specified at create. */ - u_int32_t hash_buckets; /* Number of hash buckets. */ - u_int32_t hash_free; /* Pages on the free list. */ - u_int32_t hash_bfree; /* Bytes free on bucket pages. */ - u_int32_t hash_bigpages; /* Number of big key/data pages. */ - u_int32_t hash_big_bfree; /* Bytes free on big item pages. */ - u_int32_t hash_overflows; /* Number of overflow pages. */ - u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */ - u_int32_t hash_dup; /* Number of dup pages. */ - u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */ -}; - -/* Queue statistics structure. */ -struct __db_qam_stat { - u_int32_t qs_magic; /* Magic number. */ - u_int32_t qs_version; /* Version number. */ - u_int32_t qs_metaflags; /* Metadata flags. */ - u_int32_t qs_nkeys; /* Number of unique keys. */ - u_int32_t qs_ndata; /* Number of data items. */ - u_int32_t qs_pagesize; /* Page size. */ - u_int32_t qs_extentsize; /* Pages per extent. */ - u_int32_t qs_pages; /* Data pages. */ - u_int32_t qs_re_len; /* Fixed-length record length. */ - u_int32_t qs_re_pad; /* Fixed-length record pad. */ - u_int32_t qs_pgfree; /* Bytes free in data pages. */ - u_int32_t qs_first_recno; /* First not deleted record. */ - u_int32_t qs_cur_recno; /* Next available record number. */ -}; - -/******************************************************* - * Environment. - *******************************************************/ -#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */ - -/* Database Environment handle. */ -struct __db_env { - /******************************************************* - * Public: owned by the application. - *******************************************************/ - /* Error message callback. */ - void (*db_errcall) __P((const DB_ENV *, const char *, const char *)); - FILE *db_errfile; /* Error message file stream. */ - const char *db_errpfx; /* Error message prefix. */ - - FILE *db_msgfile; /* Statistics message file stream. */ - /* Statistics message callback. */ - void (*db_msgcall) __P((const DB_ENV *, const char *)); - - /* Other Callbacks. */ - void (*db_feedback) __P((DB_ENV *, int, int)); - void (*db_paniccall) __P((DB_ENV *, int)); - - /* App-specified alloc functions. */ - void *(*db_malloc) __P((size_t)); - void *(*db_realloc) __P((void *, size_t)); - void (*db_free) __P((void *)); - - /* - * Currently, the verbose list is a bit field with room for 32 - * entries. There's no reason that it needs to be limited, if - * there are ever more than 32 entries, convert to a bit array. - */ -#define DB_VERB_DEADLOCK 0x0001 /* Deadlock detection information. */ -#define DB_VERB_RECOVERY 0x0002 /* Recovery information. */ -#define DB_VERB_REGISTER 0x0004 /* Dump waits-for table. */ -#define DB_VERB_REPLICATION 0x0008 /* Replication information. */ -#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */ - u_int32_t verbose; /* Verbose output. */ - - void *app_private; /* Application-private handle. */ - - int (*app_dispatch) /* User-specified recovery dispatch. */ - __P((DB_ENV *, DBT *, DB_LSN *, db_recops)); - - /* Mutexes. */ - u_int32_t mutex_align; /* Mutex alignment */ - u_int32_t mutex_cnt; /* Number of mutexes to configure */ - u_int32_t mutex_inc; /* Number of mutexes to add */ - u_int32_t mutex_tas_spins;/* Test-and-set spin count */ - - struct { - int alloc_id; /* Allocation ID argument */ - u_int32_t flags; /* Flags argument */ - } *mutex_iq; /* Initial mutexes queue */ - u_int mutex_iq_next; /* Count of initial mutexes */ - u_int mutex_iq_max; /* Maximum initial mutexes */ - - /* Locking. */ - u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */ - int lk_modes; /* Number of lock modes in table. */ - u_int32_t lk_max; /* Maximum number of locks. */ - u_int32_t lk_max_lockers;/* Maximum number of lockers. */ - u_int32_t lk_max_objects;/* Maximum number of locked objects. */ - u_int32_t lk_detect; /* Deadlock detect on all conflicts. */ - db_timeout_t lk_timeout; /* Lock timeout period. */ - - /* Logging. */ - u_int32_t lg_bsize; /* Buffer size. */ - u_int32_t lg_size; /* Log file size. */ - u_int32_t lg_regionmax; /* Region size. */ - int lg_filemode; /* Log file permission mode. */ - - /* Memory pool. */ - u_int32_t mp_gbytes; /* Cachesize: GB. */ - u_int32_t mp_bytes; /* Cachesize: Bytes. */ - u_int mp_ncache; /* Number of cache regions. */ - size_t mp_mmapsize; /* Maximum file size for mmap. */ - int mp_maxopenfd; /* Maximum open file descriptors. */ - int mp_maxwrite; /* Maximum buffers to write. */ - int /* Sleep after writing max buffers. */ - mp_maxwrite_sleep; - - /* Replication */ - int rep_eid; /* environment id. */ - int (*rep_send) /* Send function. */ - __P((DB_ENV *, const DBT *, const DBT *, - const DB_LSN *, int, u_int32_t)); - - /* Transactions. */ - u_int32_t tx_max; /* Maximum number of transactions. */ - time_t tx_timestamp; /* Recover to specific timestamp. */ - db_timeout_t tx_timeout; /* Timeout for transactions. */ - - /* Thread tracking. */ - u_int32_t thr_nbucket; /* Number of hash buckets. */ - u_int32_t thr_max; /* Max before garbage collection. */ - void *thr_hashtab; /* Hash table of DB_THREAD_INFO. */ - - /******************************************************* - * Private: owned by DB. - *******************************************************/ - pid_t pid_cache; /* Cached process ID. */ - - /* User files, paths. */ - char *db_home; /* Database home. */ - char *db_abshome; /* Absolute path when started. */ - char *db_log_dir; /* Database log file directory. */ - char *db_tmp_dir; /* Database tmp file directory. */ - - char **db_data_dir; /* Database data file directories. */ - int data_cnt; /* Database data file slots. */ - int data_next; /* Next Database data file slot. */ - - int db_mode; /* Default open permissions. */ - int dir_mode; /* Intermediate directory perms. */ - void *env_lref; /* Locker in non-threaded handles. */ - u_int32_t open_flags; /* Flags passed to DB_ENV->open. */ - - void *reginfo; /* REGINFO structure reference. */ - DB_FH *lockfhp; /* fcntl(2) locking file handle. */ - - DB_FH *registry; /* DB_REGISTER file handle. */ - u_int32_t registry_off; /* - * Offset of our slot. We can't use - * off_t because its size depends on - * build settings. - */ - - /* Return ID, check if ID alive. */ - void (*thread_id) __P((DB_ENV *, pid_t *, db_threadid_t *)); - int (*is_alive) __P((DB_ENV *, pid_t, db_threadid_t)); - char *(*thread_id_string) - __P((DB_ENV *, pid_t, db_threadid_t, char *)); - - int (**recover_dtab) /* Dispatch table for recover funcs. */ - __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - size_t recover_dtab_size; - /* Slots in the dispatch table. */ - - void *cl_handle; /* RPC: remote client handle. */ - u_int cl_id; /* RPC: remote client env id. */ - - int db_ref; /* DB reference count. */ - - long shm_key; /* shmget(2) key. */ - - /* - * List of open DB handles for this DB_ENV, used for cursor - * adjustment. Must be protected for multi-threaded support. - * - * !!! - * As this structure is allocated in per-process memory, the - * mutex may need to be stored elsewhere on architectures unable - * to support mutexes in heap memory, e.g. HP/UX 9. - * - * !!! - * Explicit representation of structure in queue.h. - * LIST_HEAD(dblist, __db); - */ - db_mutex_t mtx_dblist; /* Mutex. */ - struct { - struct __db *lh_first; - } dblist; - - /* - * XA support. - * - * !!! - * Explicit representations of structures from queue.h. - * TAILQ_ENTRY(__db_env) links; - * TAILQ_HEAD(xa_txn, __db_txn); - */ - struct { - struct __db_env *tqe_next; - struct __db_env **tqe_prev; - } links; - struct __xa_txn { /* XA Active Transactions. */ - struct __db_txn *tqh_first; - struct __db_txn **tqh_last; - } xa_txn; - int xa_rmid; /* XA Resource Manager ID. */ - - char *passwd; /* Cryptography support. */ - size_t passwd_len; - void *crypto_handle; /* Primary handle. */ - db_mutex_t mtx_mt; /* Mersenne Twister mutex. */ - int mti; /* Mersenne Twister index. */ - u_long *mt; /* Mersenne Twister state vector. */ - - /* API-private structure. */ - void *api1_internal; /* C++, Perl API private */ - void *api2_internal; /* Java API private */ - - void *lg_handle; /* Log handle. */ - void *lk_handle; /* Lock handle. */ - void *mp_handle; /* Mpool handle. */ - void *mutex_handle; /* Mutex handle. */ - void *rep_handle; /* Replication handle. */ - void *tx_handle; /* Txn handle. */ - - /* DB_ENV PUBLIC HANDLE LIST BEGIN */ - int (*close) __P((DB_ENV *, u_int32_t)); - int (*dbremove) __P((DB_ENV *, - DB_TXN *, const char *, const char *, u_int32_t)); - int (*dbrename) __P((DB_ENV *, - DB_TXN *, const char *, const char *, const char *, u_int32_t)); - void (*err) __P((const DB_ENV *, int, const char *, ...)); - void (*errx) __P((const DB_ENV *, const char *, ...)); - int (*failchk) __P((DB_ENV *, u_int32_t)); - int (*fileid_reset) __P((DB_ENV *, const char *, u_int32_t)); - int (*get_cachesize) __P((DB_ENV *, u_int32_t *, u_int32_t *, int *)); - int (*get_data_dirs) __P((DB_ENV *, const char ***)); - int (*get_encrypt_flags) __P((DB_ENV *, u_int32_t *)); - void (*get_errfile) __P((DB_ENV *, FILE **)); - void (*get_errpfx) __P((DB_ENV *, const char **)); - int (*get_flags) __P((DB_ENV *, u_int32_t *)); - int (*get_home) __P((DB_ENV *, const char **)); - int (*get_lg_bsize) __P((DB_ENV *, u_int32_t *)); - int (*get_lg_dir) __P((DB_ENV *, const char **)); - int (*get_lg_filemode) __P((DB_ENV *, int *)); - int (*get_lg_max) __P((DB_ENV *, u_int32_t *)); - int (*get_lg_regionmax) __P((DB_ENV *, u_int32_t *)); - int (*get_lk_conflicts) __P((DB_ENV *, const u_int8_t **, int *)); - int (*get_lk_detect) __P((DB_ENV *, u_int32_t *)); - int (*get_lk_max_lockers) __P((DB_ENV *, u_int32_t *)); - int (*get_lk_max_locks) __P((DB_ENV *, u_int32_t *)); - int (*get_lk_max_objects) __P((DB_ENV *, u_int32_t *)); - int (*get_mp_max_openfd) __P((DB_ENV *, int *)); - int (*get_mp_max_write) __P((DB_ENV *, int *, int *)); - int (*get_mp_mmapsize) __P((DB_ENV *, size_t *)); - void (*get_msgfile) __P((DB_ENV *, FILE **)); - int (*get_open_flags) __P((DB_ENV *, u_int32_t *)); - int (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *)); - int (*get_shm_key) __P((DB_ENV *, long *)); - int (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t)); - int (*get_tmp_dir) __P((DB_ENV *, const char **)); - int (*get_tx_max) __P((DB_ENV *, u_int32_t *)); - int (*get_tx_timestamp) __P((DB_ENV *, time_t *)); - int (*get_verbose) __P((DB_ENV *, u_int32_t, int *)); - int (*is_bigendian) __P((void)); - int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *)); - int (*lock_get) __P((DB_ENV *, - u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *)); - int (*lock_id) __P((DB_ENV *, u_int32_t *)); - int (*lock_id_free) __P((DB_ENV *, u_int32_t)); - int (*lock_put) __P((DB_ENV *, DB_LOCK *)); - int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t)); - int (*lock_stat_print) __P((DB_ENV *, u_int32_t)); - int (*lock_vec) __P((DB_ENV *, - u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); - int (*log_archive) __P((DB_ENV *, char **[], u_int32_t)); - int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t)); - int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t)); - int (*log_flush) __P((DB_ENV *, const DB_LSN *)); - int (*log_printf) __P((DB_ENV *, DB_TXN *, const char *, ...)); - int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t)); - int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t)); - int (*log_stat_print) __P((DB_ENV *, u_int32_t)); - int (*lsn_reset) __P((DB_ENV *, const char *, u_int32_t)); - int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t)); - int (*memp_register) __P((DB_ENV *, int, int (*)(DB_ENV *, - db_pgno_t, void *, DBT *), int (*)(DB_ENV *, - db_pgno_t, void *, DBT *))); - int (*memp_stat) __P((DB_ENV *, - DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t)); - int (*memp_stat_print) __P((DB_ENV *, u_int32_t)); - int (*memp_sync) __P((DB_ENV *, DB_LSN *)); - int (*memp_trickle) __P((DB_ENV *, int, int *)); - int (*mutex_alloc) __P((DB_ENV *, u_int32_t, db_mutex_t *)); - int (*mutex_free) __P((DB_ENV *, db_mutex_t)); - int (*mutex_get_align) __P((DB_ENV *, u_int32_t *)); - int (*mutex_get_increment) __P((DB_ENV *, u_int32_t *)); - int (*mutex_get_max) __P((DB_ENV *, u_int32_t *)); - int (*mutex_get_tas_spins) __P((DB_ENV *, u_int32_t *)); - int (*mutex_lock) __P((DB_ENV *, db_mutex_t)); - int (*mutex_set_align) __P((DB_ENV *, u_int32_t)); - int (*mutex_set_increment) __P((DB_ENV *, u_int32_t)); - int (*mutex_set_max) __P((DB_ENV *, u_int32_t)); - int (*mutex_set_tas_spins) __P((DB_ENV *, u_int32_t)); - int (*mutex_stat) __P((DB_ENV *, DB_MUTEX_STAT **, u_int32_t)); - int (*mutex_stat_print) __P((DB_ENV *, u_int32_t)); - int (*mutex_unlock) __P((DB_ENV *, db_mutex_t)); - int (*open) __P((DB_ENV *, const char *, u_int32_t, int)); - int (*remove) __P((DB_ENV *, const char *, u_int32_t)); - int (*rep_elect) - __P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t)); - int (*rep_flush) __P((DB_ENV *)); - int (*rep_get_config) __P((DB_ENV *, u_int32_t, int *)); - int (*rep_process_message) - __P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *)); - int (*rep_set_config) __P((DB_ENV *, u_int32_t, int)); - int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t)); - int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t)); - int (*rep_stat_print) __P((DB_ENV *, u_int32_t)); - int (*rep_sync) __P((DB_ENV *, u_int32_t)); - int (*set_alloc) __P((DB_ENV *, void *(*)(size_t), - void *(*)(void *, size_t), void (*)(void *))); - int (*set_app_dispatch) - __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops))); - int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int)); - int (*set_data_dir) __P((DB_ENV *, const char *)); - int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t)); - void (*set_errcall) __P((DB_ENV *, - void (*)(const DB_ENV *, const char *, const char *))); - void (*set_errfile) __P((DB_ENV *, FILE *)); - void (*set_errpfx) __P((DB_ENV *, const char *)); - int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int))); - int (*set_flags) __P((DB_ENV *, u_int32_t, int)); - int (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t)); - int (*set_isalive) __P((DB_ENV *, - int (*)(DB_ENV *, pid_t, db_threadid_t))); - int (*set_lg_bsize) __P((DB_ENV *, u_int32_t)); - int (*set_lg_dir) __P((DB_ENV *, const char *)); - int (*set_lg_filemode) __P((DB_ENV *, int)); - int (*set_lg_max) __P((DB_ENV *, u_int32_t)); - int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t)); - int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int)); - int (*set_lk_detect) __P((DB_ENV *, u_int32_t)); - int (*set_lk_max) __P((DB_ENV *, u_int32_t)); - int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t)); - int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t)); - int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t)); - int (*set_mp_max_openfd) __P((DB_ENV *, int)); - int (*set_mp_max_write) __P((DB_ENV *, int, int)); - int (*set_mp_mmapsize) __P((DB_ENV *, size_t)); - void (*set_msgcall) - __P((DB_ENV *, void (*)(const DB_ENV *, const char *))); - void (*set_msgfile) __P((DB_ENV *, FILE *)); - int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int))); - int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t)); - int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t)); - int (*set_rep_transport) __P((DB_ENV *, int, int (*)(DB_ENV *, - const DBT *, const DBT *, const DB_LSN *, int, u_int32_t))); - int (*set_rpc_server) - __P((DB_ENV *, void *, const char *, long, long, u_int32_t)); - int (*set_shm_key) __P((DB_ENV *, long)); - int (*set_thread_count) __P((DB_ENV *, u_int32_t)); - int (*set_thread_id) __P((DB_ENV *, - void (*)(DB_ENV *, pid_t *, db_threadid_t *))); - int (*set_thread_id_string) __P((DB_ENV *, - char *(*)(DB_ENV *, pid_t, db_threadid_t, char *))); - int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t)); - int (*set_tmp_dir) __P((DB_ENV *, const char *)); - int (*set_tx_max) __P((DB_ENV *, u_int32_t)); - int (*set_tx_timestamp) __P((DB_ENV *, time_t *)); - int (*set_verbose) __P((DB_ENV *, u_int32_t, int)); - int (*stat_print) __P((DB_ENV *, u_int32_t)); - int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t)); - int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); - int (*txn_recover) - __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t)); - int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t)); - int (*txn_stat_print) __P((DB_ENV *, u_int32_t)); - /* DB_ENV PUBLIC HANDLE LIST END */ - - /* DB_ENV PRIVATE HANDLE LIST BEGIN */ - int (*prdbt) __P((DBT *, - int, const char *, void *, int (*)(void *, const void *), int)); - /* DB_ENV PRIVATE HANDLE LIST END */ - -#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */ -#define DB_TEST_ELECTVOTE1 2 /* after sending VOTE1 */ -#define DB_TEST_POSTDESTROY 3 /* after destroy op */ -#define DB_TEST_POSTLOG 4 /* after logging all pages */ -#define DB_TEST_POSTLOGMETA 5 /* after logging meta in btree */ -#define DB_TEST_POSTOPEN 6 /* after __os_open */ -#define DB_TEST_POSTSYNC 7 /* after syncing the log */ -#define DB_TEST_PREDESTROY 8 /* before destroy op */ -#define DB_TEST_PREOPEN 9 /* before __os_open */ -#define DB_TEST_SUBDB_LOCKS 10 /* subdb locking tests */ - int test_abort; /* Abort value for testing. */ - int test_check; /* Checkpoint value for testing. */ - int test_copy; /* Copy value for testing. */ - -#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */ -#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */ -#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */ -#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */ -#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */ -#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */ -#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */ -#define DB_ENV_DSYNC_DB 0x0000080 /* DB_DSYNC_DB set. */ -#define DB_ENV_DSYNC_LOG 0x0000100 /* DB_DSYNC_LOG set. */ -#define DB_ENV_FATAL 0x0000200 /* Doing fatal recovery in env. */ -#define DB_ENV_LOCKDOWN 0x0000400 /* DB_LOCKDOWN set. */ -#define DB_ENV_LOG_AUTOREMOVE 0x0000800 /* DB_LOG_AUTOREMOVE set. */ -#define DB_ENV_LOG_INMEMORY 0x0001000 /* DB_LOG_INMEMORY set. */ -#define DB_ENV_NOLOCKING 0x0002000 /* DB_NOLOCKING set. */ -#define DB_ENV_NOMMAP 0x0004000 /* DB_NOMMAP set. */ -#define DB_ENV_NOPANIC 0x0008000 /* Okay if panic set. */ -#define DB_ENV_OPEN_CALLED 0x0010000 /* DB_ENV->open called. */ -#define DB_ENV_OVERWRITE 0x0020000 /* DB_OVERWRITE set. */ -#define DB_ENV_PRIVATE 0x0040000 /* DB_PRIVATE set. */ -#define DB_ENV_REGION_INIT 0x0080000 /* DB_REGION_INIT set. */ -#define DB_ENV_RPCCLIENT 0x0100000 /* DB_RPCCLIENT set. */ -#define DB_ENV_RPCCLIENT_GIVEN 0x0200000 /* User-supplied RPC client struct */ -#define DB_ENV_SYSTEM_MEM 0x0400000 /* DB_SYSTEM_MEM set. */ -#define DB_ENV_THREAD 0x0800000 /* DB_THREAD set. */ -#define DB_ENV_TIME_NOTGRANTED 0x1000000 /* DB_TIME_NOTGRANTED set. */ -#define DB_ENV_TXN_NOSYNC 0x2000000 /* DB_TXN_NOSYNC set. */ -#define DB_ENV_TXN_WRITE_NOSYNC 0x4000000 /* DB_TXN_WRITE_NOSYNC set. */ -#define DB_ENV_YIELDCPU 0x8000000 /* DB_YIELDCPU set. */ - u_int32_t flags; -}; - -#ifndef DB_DBM_HSEARCH -#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */ -#endif -#if DB_DBM_HSEARCH != 0 -/******************************************************* - * Dbm/Ndbm historic interfaces. - *******************************************************/ -typedef struct __db DBM; - -#define DBM_INSERT 0 /* Flags to dbm_store(). */ -#define DBM_REPLACE 1 - -/* - * The DB support for ndbm(3) always appends this suffix to the - * file name to avoid overwriting the user's original database. - */ -#define DBM_SUFFIX ".db" - -#if defined(_XPG4_2) -typedef struct { - char *dptr; - size_t dsize; -} datum; -#else -typedef struct { - char *dptr; - int dsize; -} datum; -#endif - -/* - * Translate NDBM calls into DB calls so that DB doesn't step on the - * application's name space. - */ -#define dbm_clearerr(a) __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_close(a) __db_ndbm_close@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_delete(a, b) __db_ndbm_delete@DB_VERSION_UNIQUE_NAME@(a, b) -#define dbm_dirfno(a) __db_ndbm_dirfno@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_error(a) __db_ndbm_error@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_fetch(a, b) __db_ndbm_fetch@DB_VERSION_UNIQUE_NAME@(a, b) -#define dbm_firstkey(a) __db_ndbm_firstkey@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_nextkey(a) __db_ndbm_nextkey@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_open(a, b, c) __db_ndbm_open@DB_VERSION_UNIQUE_NAME@(a, b, c) -#define dbm_pagfno(a) __db_ndbm_pagfno@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_rdonly(a) __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@(a) -#define dbm_store(a, b, c, d) \ - __db_ndbm_store@DB_VERSION_UNIQUE_NAME@(a, b, c, d) - -/* - * Translate DBM calls into DB calls so that DB doesn't step on the - * application's name space. - * - * The global variables dbrdonly, dirf and pagf were not retained when 4BSD - * replaced the dbm interface with ndbm, and are not supported here. - */ -#define dbminit(a) __db_dbm_init@DB_VERSION_UNIQUE_NAME@(a) -#define dbmclose __db_dbm_close@DB_VERSION_UNIQUE_NAME@ -#if !defined(__cplusplus) -#define delete(a) __db_dbm_delete@DB_VERSION_UNIQUE_NAME@(a) -#endif -#define fetch(a) __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@(a) -#define firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@ -#define nextkey(a) __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@(a) -#define store(a, b) __db_dbm_store@DB_VERSION_UNIQUE_NAME@(a, b) - -/******************************************************* - * Hsearch historic interface. - *******************************************************/ -typedef enum { - FIND, ENTER -} ACTION; - -typedef struct entry { - char *key; - char *data; -} ENTRY; - -#define hcreate(a) __db_hcreate@DB_VERSION_UNIQUE_NAME@(a) -#define hdestroy __db_hdestroy@DB_VERSION_UNIQUE_NAME@ -#define hsearch(a, b) __db_hsearch@DB_VERSION_UNIQUE_NAME@(a, b) - -#endif /* DB_DBM_HSEARCH */ - -#if defined(__cplusplus) -} -#endif -#endif /* !_DB_H_ */ diff --git a/storage/bdb/dbinc/db_185.in b/storage/bdb/dbinc/db_185.in deleted file mode 100644 index 56b909cd934..00000000000 --- a/storage/bdb/dbinc/db_185.in +++ /dev/null @@ -1,169 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_185.in,v 12.2 2005/06/16 20:21:45 bostic Exp $ - */ - -#ifndef _DB_185_H_ -#define _DB_185_H_ - -#include - -#include - -/* - * XXX - * Handle function prototypes and the keyword "const". This steps on name - * space that DB doesn't control, but all of the other solutions are worse. - */ -#undef __P -#if defined(__STDC__) || defined(__cplusplus) -#define __P(protos) protos /* ANSI C prototypes */ -#else -#define const -#define __P(protos) () /* K&R C preprocessor */ -#endif - -#define RET_ERROR -1 /* Return values. */ -#define RET_SUCCESS 0 -#define RET_SPECIAL 1 - -#ifndef __BIT_TYPES_DEFINED__ -#define __BIT_TYPES_DEFINED__ -@u_int8_decl@ -@int16_decl@ -@u_int16_decl@ -@int32_decl@ -@u_int32_decl@ -#endif - -/* - * XXX - * SGI/IRIX already has a pgno_t. - */ -#ifdef __sgi -#define pgno_t db_pgno_t -#endif - -#define MAX_PAGE_NUMBER 0xffffffff /* >= # of pages in a file */ -typedef u_int32_t pgno_t; -#define MAX_PAGE_OFFSET 65535 /* >= # of bytes in a page */ -typedef u_int16_t indx_t; -#define MAX_REC_NUMBER 0xffffffff /* >= # of records in a tree */ -typedef u_int32_t recno_t; - -/* Key/data structure -- a Data-Base Thang. */ -typedef struct { - void *data; /* data */ - size_t size; /* data length */ -} DBT; - -/* Routine flags. */ -#define R_CURSOR 1 /* del, put, seq */ -#define __R_UNUSED 2 /* UNUSED */ -#define R_FIRST 3 /* seq */ -#define R_IAFTER 4 /* put (RECNO) */ -#define R_IBEFORE 5 /* put (RECNO) */ -#define R_LAST 6 /* seq (BTREE, RECNO) */ -#define R_NEXT 7 /* seq */ -#define R_NOOVERWRITE 8 /* put */ -#define R_PREV 9 /* seq (BTREE, RECNO) */ -#define R_SETCURSOR 10 /* put (RECNO) */ -#define R_RECNOSYNC 11 /* sync (RECNO) */ - -typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE; - -/* Access method description structure. */ -typedef struct __db { - DBTYPE type; /* Underlying db type. */ - int (*close) __P((struct __db *)); - int (*del) __P((const struct __db *, const DBT *, u_int)); - int (*get) __P((const struct __db *, const DBT *, DBT *, u_int)); - int (*put) __P((const struct __db *, DBT *, const DBT *, u_int)); - int (*seq) __P((const struct __db *, DBT *, DBT *, u_int)); - int (*sync) __P((const struct __db *, u_int)); - void *internal; /* Access method private. */ - int (*fd) __P((const struct __db *)); -} DB; - -#define BTREEMAGIC 0x053162 -#define BTREEVERSION 3 - -/* Structure used to pass parameters to the btree routines. */ -typedef struct { -#define R_DUP 0x01 /* duplicate keys */ - u_int32_t flags; - u_int32_t cachesize; /* bytes to cache */ - u_int32_t maxkeypage; /* maximum keys per page */ - u_int32_t minkeypage; /* minimum keys per page */ - u_int32_t psize; /* page size */ - int (*compare) /* comparison function */ - __P((const DBT *, const DBT *)); - size_t (*prefix) /* prefix function */ - __P((const DBT *, const DBT *)); - int lorder; /* byte order */ -} BTREEINFO; - -#define HASHMAGIC 0x061561 -#define HASHVERSION 2 - -/* Structure used to pass parameters to the hashing routines. */ -typedef struct { - u_int32_t bsize; /* bucket size */ - u_int32_t ffactor; /* fill factor */ - u_int32_t nelem; /* number of elements */ - u_int32_t cachesize; /* bytes to cache */ - u_int32_t /* hash function */ - (*hash) __P((const void *, size_t)); - int lorder; /* byte order */ -} HASHINFO; - -/* Structure used to pass parameters to the record routines. */ -typedef struct { -#define R_FIXEDLEN 0x01 /* fixed-length records */ -#define R_NOKEY 0x02 /* key not required */ -#define R_SNAPSHOT 0x04 /* snapshot the input */ - u_int32_t flags; - u_int32_t cachesize; /* bytes to cache */ - u_int32_t psize; /* page size */ - int lorder; /* byte order */ - size_t reclen; /* record length (fixed-length records) */ - u_char bval; /* delimiting byte (variable-length records */ - char *bfname; /* btree file name */ -} RECNOINFO; - -/* Re-define the user's dbopen calls. */ -#define dbopen __db185_open@DB_VERSION_UNIQUE_NAME@ - -#endif /* !_DB_185_H_ */ diff --git a/storage/bdb/dbinc/db_am.h b/storage/bdb/dbinc/db_am.h deleted file mode 100644 index d9d6c51700a..00000000000 --- a/storage/bdb/dbinc/db_am.h +++ /dev/null @@ -1,197 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_am.h,v 12.8 2005/09/28 17:44:24 margo Exp $ - */ -#ifndef _DB_AM_H_ -#define _DB_AM_H_ - -/* - * IS_ENV_AUTO_COMMIT -- - * Auto-commit test for enviroment operations: DbEnv::{open,remove,rename} - */ -#define IS_ENV_AUTO_COMMIT(dbenv, txn, flags) \ - (LF_ISSET(DB_AUTO_COMMIT) || \ - ((txn) == NULL && F_ISSET((dbenv), DB_ENV_AUTO_COMMIT) && \ - !LF_ISSET(DB_NO_AUTO_COMMIT))) - -/* - * IS_DB_AUTO_COMMIT -- - * Auto-commit test for database operations. - */ -#define IS_DB_AUTO_COMMIT(dbp, txn) \ - ((txn) == NULL && F_ISSET((dbp), DB_AM_TXN)) - -/* - * STRIP_AUTO_COMMIT -- - * Releases after 4.3 no longer requires DB operations to specify the - * AUTO_COMMIT flag, but the API continues to allow it to be specified. - */ -#define STRIP_AUTO_COMMIT(f) FLD_CLR((f), DB_AUTO_COMMIT) - -/* DB recovery operation codes. */ -#define DB_ADD_DUP 1 -#define DB_REM_DUP 2 -#define DB_ADD_BIG 3 -#define DB_REM_BIG 4 - -/* - * Standard initialization and shutdown macros for all recovery functions. - */ -#define REC_INTRO(func, inc_count, do_cursor) do { \ - argp = NULL; \ - file_dbp = NULL; \ - COMPQUIET(dbc, NULL); \ - /* mpf isn't used by all of the recovery functions. */ \ - COMPQUIET(mpf, NULL); \ - if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \ - goto out; \ - if ((ret = __dbreg_id_to_db(dbenv, argp->txnid, \ - &file_dbp, argp->fileid, inc_count)) != 0) { \ - if (ret == DB_DELETED) { \ - ret = 0; \ - goto done; \ - } \ - goto out; \ - } \ - if (do_cursor) { \ - if ((ret = __db_cursor(file_dbp, NULL, &dbc, 0)) != 0) \ - goto out; \ - F_SET(dbc, DBC_RECOVER); \ - } \ - mpf = file_dbp->mpf; \ -} while (0) - -#define REC_CLOSE { \ - int __t_ret; \ - if (argp != NULL) \ - __os_free(dbenv, argp); \ - if (dbc != NULL && \ - (__t_ret = __db_c_close(dbc)) != 0 && ret == 0) \ - ret = __t_ret; \ - } \ - return (ret) - -/* - * No-op versions of the same macros. - */ -#define REC_NOOP_INTRO(func) do { \ - argp = NULL; \ - if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \ - return (ret); \ -} while (0) -#define REC_NOOP_CLOSE \ - if (argp != NULL) \ - __os_free(dbenv, argp); \ - return (ret) - -/* - * Macro for reading pages during recovery. In most cases we - * want to avoid an error if the page is not found during rollback - * or if we are using truncate to remove pages from the file. - */ -#ifndef HAVE_FTRUNCATE -#define REC_FGET(mpf, pgno, pagep, cont) \ - if ((ret = __memp_fget(mpf, &(pgno), 0, pagep)) != 0) { \ - if (ret != DB_PAGE_NOTFOUND || DB_REDO(op)) { \ - ret = __db_pgerr(file_dbp, pgno, ret); \ - goto out; \ - } else \ - goto cont; \ - } -#else -#define REC_FGET(mpf, pgno, pagep, cont) \ - if ((ret = __memp_fget(mpf, &(pgno), 0, pagep)) != 0) { \ - if (ret != DB_PAGE_NOTFOUND) { \ - ret = __db_pgerr(file_dbp, pgno, ret); \ - goto out; \ - } else \ - goto cont; \ - } -#endif - -/* - * Standard debugging macro for all recovery functions. - */ -#ifdef DEBUG_RECOVER -#define REC_PRINT(func) \ - (void)func(dbenv, dbtp, lsnp, op, info); -#else -#define REC_PRINT(func) -#endif - -/* - * Actions to __db_lget - */ -#define LCK_ALWAYS 1 /* Lock even for off page dup cursors */ -#define LCK_COUPLE 2 /* Lock Couple */ -#define LCK_COUPLE_ALWAYS 3 /* Lock Couple even in txn. */ -#define LCK_DOWNGRADE 4 /* Downgrade the lock. (internal) */ -#define LCK_ROLLBACK 5 /* Lock even if in rollback */ - -/* - * If doing transactions we have to hold the locks associated with a data item - * from a page for the entire transaction. However, we don't have to hold the - * locks associated with walking the tree. Distinguish between the two so that - * we don't tie up the internal pages of the tree longer than necessary. - */ -#define __LPUT(dbc, lock) \ - __ENV_LPUT((dbc)->dbp->dbenv, lock) - -#define __ENV_LPUT(dbenv, lock) \ - (LOCK_ISSET(lock) ? __lock_put(dbenv, &(lock)) : 0) - -/* - * __TLPUT -- transactional lock put - * If the lock is valid then - * If we are not in a transaction put the lock. - * Else if the cursor is doing dirty reads and this was a read then - * put the lock. - * Else if the db is supporting dirty reads and this is a write then - * downgrade it. - * Else do nothing. - */ -#define __TLPUT(dbc, lock) \ - (LOCK_ISSET(lock) ? __db_lput(dbc, &(lock)) : 0) - -typedef struct { - DBC *dbc; - u_int32_t count; -} db_trunc_param; - -/* - * A database should be required to be readonly if it's been explicitly - * specified as such or if we're a client in a replicated environment and - * we don't have the special "client-writer" designation. - */ -#define DB_IS_READONLY(dbp) \ - (F_ISSET(dbp, DB_AM_RDONLY) || \ - (IS_REP_CLIENT((dbp)->dbenv) && \ - !F_ISSET((dbp), DB_AM_CL_WRITER))) - -/* - * For portability, primary keys that are record numbers are stored in - * secondaries in the same byte order as the secondary database. As a - * consequence, we need to swap the byte order of these keys before attempting - * to use them for lookups in the primary. We also need to swap user-supplied - * primary keys that are used in secondary lookups (for example, with the - * DB_GET_BOTH flag on a secondary get). - */ -#include "dbinc/db_swap.h" - -#define SWAP_IF_NEEDED(pdbp, sdbp, pkey) \ - do { \ - if (((pdbp)->type == DB_QUEUE || \ - (pdbp)->type == DB_RECNO) && \ - F_ISSET((sdbp), DB_AM_SWAP)) \ - P_32_SWAP((pkey)->data); \ - } while (0) - -#include "dbinc/db_dispatch.h" -#include "dbinc_auto/db_auto.h" -#include "dbinc_auto/crdel_auto.h" -#include "dbinc_auto/db_ext.h" -#endif /* !_DB_AM_H_ */ diff --git a/storage/bdb/dbinc/db_cxx.in b/storage/bdb/dbinc/db_cxx.in deleted file mode 100644 index b1a28d6f2bb..00000000000 --- a/storage/bdb/dbinc/db_cxx.in +++ /dev/null @@ -1,1147 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_cxx.in,v 12.13 2005/10/18 14:17:08 mjc Exp $ - */ - -#ifndef _DB_CXX_H_ -#define _DB_CXX_H_ -// -// C++ assumptions: -// -// To ensure portability to many platforms, both new and old, we make -// few assumptions about the C++ compiler and library. For example, -// we do not expect STL, templates or namespaces to be available. The -// "newest" C++ feature used is exceptions, which are used liberally -// to transmit error information. Even the use of exceptions can be -// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags -// with the DbEnv or Db constructor. -// -// C++ naming conventions: -// -// - All top level class names start with Db. -// - All class members start with lower case letter. -// - All private data members are suffixed with underscore. -// - Use underscores to divide names into multiple words. -// - Simple data accessors are named with get_ or set_ prefix. -// - All method names are taken from names of functions in the C -// layer of db (usually by dropping a prefix like "db_"). -// These methods have the same argument types and order, -// other than dropping the explicit arg that acts as "this". -// -// As a rule, each DbFoo object has exactly one underlying DB_FOO struct -// (defined in db.h) associated with it. In some cases, we inherit directly -// from the DB_FOO structure to make this relationship explicit. Often, -// the underlying C layer allocates and deallocates these structures, so -// there is no easy way to add any data to the DbFoo class. When you see -// a comment about whether data is permitted to be added, this is what -// is going on. Of course, if we need to add data to such C++ classes -// in the future, we will arrange to have an indirect pointer to the -// DB_FOO struct (as some of the classes already have). -// - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Forward declarations -// - -#include - -@cxx_have_stdheaders@ -#ifdef HAVE_CXX_STDHEADERS -#include -#include -#define __DB_STD(x) std::x -#else -#include -#include -#define __DB_STD(x) x -#endif - -#include "db.h" - -class Db; // forward -class Dbc; // forward -class DbEnv; // forward -class DbInfo; // forward -class DbLock; // forward -class DbLogc; // forward -class DbLsn; // forward -class DbMpoolFile; // forward -class DbPreplist; // forward -class Dbt; // forward -class DbTxn; // forward -class DbLock; // forward -class DbSequence; // forward -class Dbt; // forward - -class DbMultipleIterator; // forward -class DbMultipleKeyDataIterator; // forward -class DbMultipleRecnoDataIterator; // forward -class DbMultipleDataIterator; // forward - -class DbException; // forward -class DbDeadlockException; // forward -class DbLockNotGrantedException; // forward -class DbMemoryException; // forward -class DbRepHandleDeadException; // forward -class DbRunRecoveryException; // forward - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Turn off inappropriate compiler warnings -// - -#ifdef _MSC_VER - -// These are level 4 warnings that are explicitly disabled. -// With Visual C++, by default you do not see above level 3 unless -// you use /W4. But we like to compile with the highest level -// warnings to catch other errors. -// -// 4201: nameless struct/union -// triggered by standard include file -// -// 4514: unreferenced inline function has been removed -// certain include files in MSVC define methods that are not called -// -#pragma warning(disable: 4201 4514) - -#endif - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Mechanisms for declaring classes -// - -// -// Every class defined in this file has an _exported next to the class name. -// This is needed for WinTel machines so that the class methods can -// be exported or imported in a DLL as appropriate. Users of the DLL -// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL -// must be defined. -// -#if defined(_MSC_VER) - -# if defined(DB_CREATE_DLL) -# define _exported __declspec(dllexport) // creator of dll -# elif defined(DB_USE_DLL) -# define _exported __declspec(dllimport) // user of dll -# else -# define _exported // static lib creator or user -# endif - -#else /* _MSC_VER */ - -# define _exported - -#endif /* _MSC_VER */ - -// Some interfaces can be customized by allowing users to define -// callback functions. For performance and logistical reasons, some -// callback functions must be declared in extern "C" blocks. For others, -// we allow you to declare the callbacks in C++ or C (or an extern "C" -// block) as you wish. See the set methods for the callbacks for -// the choices. -// -extern "C" { - typedef void * (*db_malloc_fcn_type) - (size_t); - typedef void * (*db_realloc_fcn_type) - (void *, size_t); - typedef void (*db_free_fcn_type) - (void *); - typedef int (*bt_compare_fcn_type) /*C++ version available*/ - (DB *, const DBT *, const DBT *); - typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/ - (DB *, const DBT *, const DBT *); - typedef int (*dup_compare_fcn_type) /*C++ version available*/ - (DB *, const DBT *, const DBT *); - typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/ - (DB *, const void *, u_int32_t); - typedef int (*pgin_fcn_type) - (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); - typedef int (*pgout_fcn_type) - (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie); -} - -// -// Represents a database table = a set of keys with associated values. -// -class _exported Db -{ - friend class DbEnv; - -public: - Db(DbEnv*, u_int32_t); // Create a Db object. - virtual ~Db(); // Calls close() if the user hasn't. - - // These methods exactly match those in the C interface. - // - virtual int associate(DbTxn *txn, Db *secondary, - int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *), - u_int32_t flags); - virtual int close(u_int32_t flags); - virtual int compact(DbTxn *txnid, Dbt *start, Dbt *stop, - DB_COMPACT *c_data, u_int32_t flags, Dbt *end); - virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags); - virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags); - virtual void err(int, const char *, ...); - virtual void errx(const char *, ...); - virtual int fd(int *fdp); - virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags); - virtual void *get_app_private() const; - virtual int get_byteswapped(int *); - virtual int get_dbname(const char **, const char **); - virtual int get_open_flags(u_int32_t *); - virtual int get_type(DBTYPE *); - virtual int get_transactional(); - virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags); - virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t); - virtual int open(DbTxn *txnid, - const char *, const char *subname, DBTYPE, u_int32_t, int); - virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, - u_int32_t flags); - virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t); - virtual int remove(const char *, const char *, u_int32_t); - virtual int rename(const char *, const char *, const char *, u_int32_t); - virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, - db_free_fcn_type); - virtual void set_app_private(void *); - virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t)); - virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/ - virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_bt_minkey(u_int32_t *); - virtual int set_bt_minkey(u_int32_t); - virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/ - virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); - virtual int set_cachesize(u_int32_t, u_int32_t, int); - virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/ - virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *)); - virtual int get_encrypt_flags(u_int32_t *); - virtual int set_encrypt(const char *, u_int32_t); - virtual void set_errcall( - void (*)(const DbEnv *, const char *, const char *)); - virtual void get_errfile(FILE **); - virtual void set_errfile(FILE *); - virtual void get_errpfx(const char **); - virtual void set_errpfx(const char *); - virtual int set_feedback(void (*)(Db *, int, int)); - virtual int get_flags(u_int32_t *); - virtual int set_flags(u_int32_t); - virtual int get_h_ffactor(u_int32_t *); - virtual int set_h_ffactor(u_int32_t); - virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/ - virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t)); - virtual int get_h_nelem(u_int32_t *); - virtual int set_h_nelem(u_int32_t); - virtual int get_lorder(int *); - virtual int set_lorder(int); - virtual void set_msgcall(void (*)(const DbEnv *, const char *)); - virtual void get_msgfile(FILE **); - virtual void set_msgfile(FILE *); - virtual int get_pagesize(u_int32_t *); - virtual int set_pagesize(u_int32_t); - virtual int set_paniccall(void (*)(DbEnv *, int)); - virtual int get_re_delim(int *); - virtual int set_re_delim(int); - virtual int get_re_len(u_int32_t *); - virtual int set_re_len(u_int32_t); - virtual int get_re_pad(int *); - virtual int set_re_pad(int); - virtual int get_re_source(const char **); - virtual int set_re_source(const char *); - virtual int get_q_extentsize(u_int32_t *); - virtual int set_q_extentsize(u_int32_t); - virtual int stat(DbTxn *, void *sp, u_int32_t flags); - virtual int stat_print(u_int32_t flags); - virtual int sync(u_int32_t flags); - virtual int truncate(DbTxn *, u_int32_t *, u_int32_t); - virtual int upgrade(const char *name, u_int32_t flags); - virtual int verify(const char *, const char *, __DB_STD(ostream) *, - u_int32_t); - - // These additional methods are not in the C interface, and - // are only available for C++. - // - virtual __DB_STD(ostream) *get_error_stream(); - virtual void set_error_stream(__DB_STD(ostream) *); - virtual __DB_STD(ostream) *get_message_stream(); - virtual void set_message_stream(__DB_STD(ostream) *); - - virtual DbEnv *get_env(); - virtual DbMpoolFile *get_mpf(); - - virtual DB *get_DB() - { - return imp_; - } - - virtual const DB *get_const_DB() const - { - return imp_; - } - - static Db* get_Db(DB *db) - { - return (Db *)db->api_internal; - } - - static const Db* get_const_Db(const DB *db) - { - return (const Db *)db->api_internal; - } - -private: - // no copying - Db(const Db &); - Db &operator = (const Db &); - - void cleanup(); - int initialize(); - int error_policy(); - - // instance data - DB *imp_; - DbEnv *env_; - DbMpoolFile *mpf_; - int construct_error_; - u_int32_t flags_; - u_int32_t construct_flags_; - -public: - // These are public only because they need to be called - // via C callback functions. They should never be used by - // external users of this class. - // - int (*append_recno_callback_)(Db *, Dbt *, db_recno_t); - int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *); - int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *); - size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *); - int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *); - void (*feedback_callback_)(Db *, int, int); - u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t); -}; - -// -// Cursor -// -class _exported Dbc : protected DBC -{ - friend class Db; - -public: - int close(); - int count(db_recno_t *countp, u_int32_t flags); - int del(u_int32_t flags); - int dup(Dbc** cursorp, u_int32_t flags); - int get(Dbt* key, Dbt *data, u_int32_t flags); - int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags); - int put(Dbt* key, Dbt *data, u_int32_t flags); - -private: - // No data is permitted in this class (see comment at top) - - // Note: use Db::cursor() to get pointers to a Dbc, - // and call Dbc::close() rather than delete to release them. - // - Dbc(); - ~Dbc(); - - // no copying - Dbc(const Dbc &); - Dbc &operator = (const Dbc &); -}; - -// -// Berkeley DB environment class. Provides functions for opening databases. -// User of this library can use this class as a starting point for -// developing a DB application - derive their application class from -// this one, add application control logic. -// -// Note that if you use the default constructor, you must explicitly -// call appinit() before any other db activity (e.g. opening files) -// -class _exported DbEnv -{ - friend class Db; - friend class DbLock; - friend class DbMpoolFile; - -public: - // After using this constructor, you can set any needed - // parameters for the environment using the set_* methods. - // Then call open() to finish initializing the environment - // and attaching it to underlying files. - // - DbEnv(u_int32_t flags); - - virtual ~DbEnv(); - - // These methods match those in the C interface. - // - virtual int close(u_int32_t); - virtual int dbremove(DbTxn *txn, const char *name, const char *subdb, - u_int32_t flags); - virtual int dbrename(DbTxn *txn, const char *name, const char *subdb, - const char *newname, u_int32_t flags); - virtual void err(int, const char *, ...); - virtual void errx(const char *, ...); - virtual int failchk(u_int32_t); - virtual int fileid_reset(const char *, u_int32_t); - virtual void *get_app_private() const; - virtual int get_home(const char **); - virtual int get_open_flags(u_int32_t *); - virtual int open(const char *, u_int32_t, int); - virtual int remove(const char *, u_int32_t); - virtual int stat_print(u_int32_t flags); - - virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type, - db_free_fcn_type); - virtual void set_app_private(void *); - virtual int get_cachesize(u_int32_t *, u_int32_t *, int *); - virtual int set_cachesize(u_int32_t, u_int32_t, int); - virtual int get_data_dirs(const char ***); - virtual int set_data_dir(const char *); - virtual int get_encrypt_flags(u_int32_t *); - virtual int set_intermediate_dir(int, u_int32_t); - virtual int set_isalive(int (*)(DbEnv *, pid_t, db_threadid_t)); - virtual int set_encrypt(const char *, u_int32_t); - virtual void set_errcall( - void (*)(const DbEnv *, const char *, const char *)); - virtual void get_errfile(FILE **); - virtual void set_errfile(FILE *); - virtual void get_errpfx(const char **); - virtual void set_errpfx(const char *); - virtual int get_flags(u_int32_t *); - virtual int set_flags(u_int32_t, int); - virtual bool is_bigendian(); - virtual int lsn_reset(const char *, u_int32_t); - virtual int set_feedback(void (*)(DbEnv *, int, int)); - virtual int get_lg_bsize(u_int32_t *); - virtual int set_lg_bsize(u_int32_t); - virtual int get_lg_dir(const char **); - virtual int set_lg_dir(const char *); - virtual int get_lg_filemode(int *); - virtual int set_lg_filemode(int); - virtual int get_lg_max(u_int32_t *); - virtual int set_lg_max(u_int32_t); - virtual int get_lg_regionmax(u_int32_t *); - virtual int set_lg_regionmax(u_int32_t); - virtual int get_lk_conflicts(const u_int8_t **, int *); - virtual int set_lk_conflicts(u_int8_t *, int); - virtual int get_lk_detect(u_int32_t *); - virtual int set_lk_detect(u_int32_t); - virtual int set_lk_max(u_int32_t); - virtual int get_lk_max_lockers(u_int32_t *); - virtual int set_lk_max_lockers(u_int32_t); - virtual int get_lk_max_locks(u_int32_t *); - virtual int set_lk_max_locks(u_int32_t); - virtual int get_lk_max_objects(u_int32_t *); - virtual int set_lk_max_objects(u_int32_t); - virtual int get_mp_mmapsize(size_t *); - virtual int set_mp_mmapsize(size_t); - virtual int get_mp_max_openfd(int *); - virtual int set_mp_max_openfd(int); - virtual int get_mp_max_write(int *, int *); - virtual int set_mp_max_write(int, int); - virtual void set_msgcall(void (*)(const DbEnv *, const char *)); - virtual void get_msgfile(FILE **); - virtual void set_msgfile(FILE *); - virtual int set_paniccall(void (*)(DbEnv *, int)); - virtual int set_rpc_server(void *, char *, long, long, u_int32_t); - virtual int get_shm_key(long *); - virtual int set_shm_key(long); - virtual int get_timeout(db_timeout_t *, u_int32_t); - virtual int set_timeout(db_timeout_t, u_int32_t); - virtual int get_tmp_dir(const char **); - virtual int set_tmp_dir(const char *); - virtual int get_tx_max(u_int32_t *); - virtual int set_tx_max(u_int32_t); - virtual int set_app_dispatch(int (*)(DbEnv *, - Dbt *, DbLsn *, db_recops)); - virtual int get_tx_timestamp(time_t *); - virtual int set_tx_timestamp(time_t *); - virtual int get_verbose(u_int32_t which, int *); - virtual int set_verbose(u_int32_t which, int); - - // Version information. A static method so it can be obtained anytime. - // - static char *version(int *major, int *minor, int *patch); - - // Convert DB errors to strings - static char *strerror(int); - - // If an error is detected and the error call function - // or stream is set, a message is dispatched or printed. - // If a prefix is set, each message is prefixed. - // - // You can use set_errcall() or set_errfile() above to control - // error functionality. Alternatively, you can call - // set_error_stream() to force all errors to a C++ stream. - // It is unwise to mix these approaches. - // - virtual __DB_STD(ostream) *get_error_stream(); - virtual void set_error_stream(__DB_STD(ostream) *); - virtual __DB_STD(ostream) *get_message_stream(); - virtual void set_message_stream(__DB_STD(ostream) *); - - // used internally - static void runtime_error(DbEnv *env, const char *caller, int err, - int error_policy); - static void runtime_error_dbt(DbEnv *env, const char *caller, Dbt *dbt, - int error_policy); - static void runtime_error_lock_get(DbEnv *env, const char *caller, - int err, db_lockop_t op, db_lockmode_t mode, - const Dbt *obj, DbLock lock, int index, - int error_policy); - - // Lock functions - // - virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted); - virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj, - db_lockmode_t lock_mode, DbLock *lock); - virtual int lock_id(u_int32_t *idp); - virtual int lock_id_free(u_int32_t id); - virtual int lock_put(DbLock *lock); - virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags); - virtual int lock_stat_print(u_int32_t flags); - virtual int lock_vec(u_int32_t locker, u_int32_t flags, - DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp); - - // Log functions - // - virtual int log_archive(char **list[], u_int32_t flags); - static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1); - virtual int log_cursor(DbLogc **cursorp, u_int32_t flags); - virtual int log_file(DbLsn *lsn, char *namep, size_t len); - virtual int log_flush(const DbLsn *lsn); - virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags); - virtual int log_printf(DbTxn *, const char *, ...); - - virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags); - virtual int log_stat_print(u_int32_t flags); - - // Mpool functions - // - virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags); - virtual int memp_register(int ftype, - pgin_fcn_type pgin_fcn, - pgout_fcn_type pgout_fcn); - virtual int memp_stat(DB_MPOOL_STAT - **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags); - virtual int memp_stat_print(u_int32_t flags); - virtual int memp_sync(DbLsn *lsn); - virtual int memp_trickle(int pct, int *nwrotep); - - // Mpool functions - // - virtual int mutex_alloc(u_int32_t, db_mutex_t *); - virtual int mutex_free(db_mutex_t); - virtual int mutex_get_align(u_int32_t *); - virtual int mutex_get_increment(u_int32_t *); - virtual int mutex_get_max(u_int32_t *); - virtual int mutex_get_tas_spins(u_int32_t *); - virtual int mutex_lock(db_mutex_t); - virtual int mutex_set_align(u_int32_t); - virtual int mutex_set_increment(u_int32_t); - virtual int mutex_set_max(u_int32_t); - virtual int mutex_set_tas_spins(u_int32_t); - virtual int mutex_stat(DB_MUTEX_STAT **, u_int32_t); - virtual int mutex_stat_print(u_int32_t); - virtual int mutex_unlock(db_mutex_t); - - // Transaction functions - // - virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags); - virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, - u_int32_t flags); - virtual int txn_recover(DbPreplist *preplist, long count, - long *retp, u_int32_t flags); - virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags); - virtual int txn_stat_print(u_int32_t flags); - - // Replication functions - // - virtual int rep_elect(int, int, int, u_int32_t, int *, u_int32_t); - virtual int rep_flush(); - virtual int rep_process_message(Dbt *, Dbt *, int *, DbLsn *); - virtual int rep_start(Dbt *, u_int32_t); - virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags); - virtual int rep_stat_print(u_int32_t flags); - virtual int get_rep_limit(u_int32_t *, u_int32_t *); - virtual int set_rep_limit(u_int32_t, u_int32_t); - virtual int set_rep_transport(int, int (*)(DbEnv *, - const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t)); - virtual int set_rep_request(u_int32_t, u_int32_t); - virtual int set_thread_count(u_int32_t); - virtual int set_thread_id(void (*)(DbEnv *, pid_t *, db_threadid_t *)); - virtual int set_thread_id_string(char *(*)(DbEnv *, pid_t, db_threadid_t, char *)); - virtual int rep_set_config(u_int32_t which, int onoff); - virtual int rep_get_config(u_int32_t which, int *onoffp); - virtual int rep_sync(u_int32_t flags); - - // Conversion functions - // - virtual DB_ENV *get_DB_ENV() - { - return imp_; - } - - virtual const DB_ENV *get_const_DB_ENV() const - { - return imp_; - } - - static DbEnv* get_DbEnv(DB_ENV *dbenv) - { - return dbenv ? (DbEnv *)dbenv->api1_internal : 0; - } - - static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv) - { - return dbenv ? (const DbEnv *)dbenv->api1_internal : 0; - } - - // For internal use only. - static DbEnv* wrap_DB_ENV(DB_ENV *dbenv); - - // These are public only because they need to be called - // via C functions. They should never be called by users - // of this class. - // - static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn, - db_recops op); - static void _paniccall_intercept(DB_ENV *env, int errval); - static void _feedback_intercept(DB_ENV *env, int opcode, int pct); - static int _isalive_intercept(DB_ENV *env, pid_t pid, - db_threadid_t thrid); - static int _rep_send_intercept(DB_ENV *env, const DBT *cntrl, - const DBT *data, const DB_LSN *lsn, int id, u_int32_t flags); - static void _stream_error_function(const DB_ENV *env, - const char *prefix, const char *message); - static void _stream_message_function(const DB_ENV *env, - const char *message); - static void _thread_id_intercept(DB_ENV *env, pid_t *pidp, - db_threadid_t *thridp); - static char *_thread_id_string_intercept(DB_ENV *env, pid_t pid, - db_threadid_t thrid, char *buf); - -private: - void cleanup(); - int initialize(DB_ENV *env); - int error_policy(); - - // For internal use only. - DbEnv(DB_ENV *, u_int32_t flags); - - // no copying - DbEnv(const DbEnv &); - void operator = (const DbEnv &); - - // instance data - DB_ENV *imp_; - int construct_error_; - u_int32_t construct_flags_; - __DB_STD(ostream) *error_stream_; - __DB_STD(ostream) *message_stream_; - - int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops); - int (*isalive_callback_)(DbEnv *, pid_t, db_threadid_t); - void (*error_callback_)(const DbEnv *, const char *, const char *); - void (*feedback_callback_)(DbEnv *, int, int); - void (*message_callback_)(const DbEnv *, const char *); - void (*paniccall_callback_)(DbEnv *, int); - int (*rep_send_callback_)(DbEnv *, const Dbt *, const Dbt *, - const DbLsn *, int, u_int32_t); - void (*thread_id_callback_)(DbEnv *, pid_t *, db_threadid_t *); - char *(*thread_id_string_callback_)(DbEnv *, pid_t, db_threadid_t, - char *); -}; - -// -// Lock -// -class _exported DbLock -{ - friend class DbEnv; - -public: - DbLock(); - DbLock(const DbLock &); - DbLock &operator = (const DbLock &); - -protected: - // We can add data to this class if needed - // since its contained class is not allocated by db. - // (see comment at top) - - DbLock(DB_LOCK); - DB_LOCK lock_; -}; - -// -// Log cursor -// -class _exported DbLogc : protected DB_LOGC -{ - friend class DbEnv; - -public: - int close(u_int32_t _flags); - int get(DbLsn *lsn, Dbt *data, u_int32_t _flags); - -private: - // No data is permitted in this class (see comment at top) - - // Note: use Db::cursor() to get pointers to a Dbc, - // and call Dbc::close() rather than delete to release them. - // - DbLogc(); - ~DbLogc(); - - // no copying - DbLogc(const Dbc &); - DbLogc &operator = (const Dbc &); -}; - -// -// Log sequence number -// -class _exported DbLsn : public DB_LSN -{ - friend class DbEnv; // friendship needed to cast to base class - friend class DbLogc; // friendship needed to cast to base class -}; - -// -// Memory pool file -// -class _exported DbMpoolFile -{ - friend class DbEnv; - friend class Db; - -public: - int close(u_int32_t flags); - int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep); - int open(const char *file, u_int32_t flags, int mode, size_t pagesize); - int get_transactional(void); - int put(void *pgaddr, u_int32_t flags); - int set(void *pgaddr, u_int32_t flags); - int get_clear_len(u_int32_t *len); - int set_clear_len(u_int32_t len); - int get_fileid(u_int8_t *fileid); - int set_fileid(u_int8_t *fileid); - int get_flags(u_int32_t *flagsp); - int set_flags(u_int32_t flags, int onoff); - int get_ftype(int *ftype); - int set_ftype(int ftype); - int get_lsn_offset(int32_t *offsetp); - int set_lsn_offset(int32_t offset); - int get_maxsize(u_int32_t *gbytes, u_int32_t *bytes); - int set_maxsize(u_int32_t gbytes, u_int32_t bytes); - int get_pgcookie(DBT *dbt); - int set_pgcookie(DBT *dbt); - int get_priority(DB_CACHE_PRIORITY *priorityp); - int set_priority(DB_CACHE_PRIORITY priority); - int sync(); - - virtual DB_MPOOLFILE *get_DB_MPOOLFILE() - { - return imp_; - } - - virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const - { - return imp_; - } - -private: - DB_MPOOLFILE *imp_; - - // We can add data to this class if needed - // since it is implemented via a pointer. - // (see comment at top) - - // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile, - // and call DbMpoolFile::close() rather than delete to release them. - // - DbMpoolFile(); - - // Shut g++ up. -protected: - virtual ~DbMpoolFile(); - -private: - // no copying - DbMpoolFile(const DbMpoolFile &); - void operator = (const DbMpoolFile &); -}; - -// -// This is filled in and returned by the DbEnv::txn_recover() method. -// -class _exported DbPreplist -{ -public: - DbTxn *txn; - u_int8_t gid[DB_XIDDATASIZE]; -}; - -// -// A sequence record in a database -// -class _exported DbSequence -{ -public: - DbSequence(Db *db, u_int32_t flags); - virtual ~DbSequence(); - - int open(DbTxn *txnid, Dbt *key, u_int32_t flags); - int initial_value(db_seq_t value); - int close(u_int32_t flags); - int remove(DbTxn *txnid, u_int32_t flags); - int stat(DB_SEQUENCE_STAT **sp, u_int32_t flags); - int stat_print(u_int32_t flags); - - int get(DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags); - int get_cachesize(int32_t *sizep); - int set_cachesize(int32_t size); - int get_flags(u_int32_t *flagsp); - int set_flags(u_int32_t flags); - int get_range(db_seq_t *minp, db_seq_t *maxp); - int set_range(db_seq_t min, db_seq_t max); - - Db *get_db(); - Dbt *get_key(); - - virtual DB_SEQUENCE *get_DB_SEQUENCE() - { - return imp_; - } - - virtual const DB_SEQUENCE *get_const_DB_SEQUENCE() const - { - return imp_; - } - - static DbSequence* get_DbSequence(DB_SEQUENCE *seq) - { - return (DbSequence *)seq->api_internal; - } - - static const DbSequence* get_const_DbSequence(const DB_SEQUENCE *seq) - { - return (const DbSequence *)seq->api_internal; - } - - // For internal use only. - static DbSequence* wrap_DB_SEQUENCE(DB_SEQUENCE *seq); - -private: - DbSequence(DB_SEQUENCE *seq); - // no copying - DbSequence(const DbSequence &); - DbSequence &operator = (const DbSequence &); - - DB_SEQUENCE *imp_; - DBT key_; -}; - -// -// Transaction -// -class _exported DbTxn -{ - friend class DbEnv; - -public: - int abort(); - int commit(u_int32_t flags); - int discard(u_int32_t flags); - u_int32_t id(); - int get_name(const char **namep); - int prepare(u_int8_t *gid); - int set_name(const char *name); - int set_timeout(db_timeout_t timeout, u_int32_t flags); - - virtual DB_TXN *get_DB_TXN() - { - return imp_; - } - - virtual const DB_TXN *get_const_DB_TXN() const - { - return imp_; - } - - static DbTxn* get_DbTxn(DB_TXN *txn) - { - return (DbTxn *)txn->api_internal; - } - - static const DbTxn* get_const_DbTxn(const DB_TXN *txn) - { - return (const DbTxn *)txn->api_internal; - } - - // For internal use only. - static DbTxn* wrap_DB_TXN(DB_TXN *txn); - -private: - DB_TXN *imp_; - - // We can add data to this class if needed - // since it is implemented via a pointer. - // (see comment at top) - - // Note: use DbEnv::txn_begin() to get pointers to a DbTxn, - // and call DbTxn::abort() or DbTxn::commit rather than - // delete to release them. - // - DbTxn(); - // For internal use only. - DbTxn(DB_TXN *txn); - virtual ~DbTxn(); - - // no copying - DbTxn(const DbTxn &); - void operator = (const DbTxn &); -}; - -// -// A chunk of data, maybe a key or value. -// -class _exported Dbt : private DBT -{ - friend class Db; - friend class Dbc; - friend class DbEnv; - friend class DbLogc; - friend class DbSequence; - -public: - // key/data - void *get_data() const { return data; } - void set_data(void *value) { data = value; } - - // key/data length - u_int32_t get_size() const { return size; } - void set_size(u_int32_t value) { size = value; } - - // RO: length of user buffer. - u_int32_t get_ulen() const { return ulen; } - void set_ulen(u_int32_t value) { ulen = value; } - - // RO: get/put record length. - u_int32_t get_dlen() const { return dlen; } - void set_dlen(u_int32_t value) { dlen = value; } - - // RO: get/put record offset. - u_int32_t get_doff() const { return doff; } - void set_doff(u_int32_t value) { doff = value; } - - // flags - u_int32_t get_flags() const { return flags; } - void set_flags(u_int32_t value) { flags = value; } - - // Conversion functions - DBT *get_DBT() { return (DBT *)this; } - const DBT *get_const_DBT() const { return (const DBT *)this; } - - static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; } - static const Dbt* get_const_Dbt(const DBT *dbt) - { return (const Dbt *)dbt; } - - Dbt(void *data, u_int32_t size); - Dbt(); - ~Dbt(); - Dbt(const Dbt &); - Dbt &operator = (const Dbt &); - -private: - // Note: no extra data appears in this class (other than - // inherited from DBT) since we need DBT and Dbt objects - // to have interchangable pointers. - // - // When subclassing this class, remember that callback - // methods like bt_compare, bt_prefix, dup_compare may - // internally manufacture DBT objects (which later are - // cast to Dbt), so such callbacks might receive objects - // not of your subclassed type. -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// multiple key/data/reco iterator classes -// - -// DbMultipleIterator is a shared private base class for the three types -// of bulk-return Iterator; it should never be instantiated directly, -// but it handles the functionality shared by its subclasses. -class _exported DbMultipleIterator -{ -public: - DbMultipleIterator(const Dbt &dbt); -protected: - u_int8_t *data_; - u_int32_t *p_; -}; - -class _exported DbMultipleKeyDataIterator : private DbMultipleIterator -{ -public: - DbMultipleKeyDataIterator(const Dbt &dbt) : DbMultipleIterator(dbt) {} - bool next(Dbt &key, Dbt &data); -}; - -class _exported DbMultipleRecnoDataIterator : private DbMultipleIterator -{ -public: - DbMultipleRecnoDataIterator(const Dbt &dbt) : DbMultipleIterator(dbt) {} - bool next(db_recno_t &recno, Dbt &data); -}; - -class _exported DbMultipleDataIterator : private DbMultipleIterator -{ -public: - DbMultipleDataIterator(const Dbt &dbt) : DbMultipleIterator(dbt) {} - bool next(Dbt &data); -}; - -//////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////// -// -// Exception classes -// - -// Almost any error in the DB library throws a DbException. -// Every exception should be considered an abnormality -// (e.g. bug, misuse of DB, file system error). -// -class _exported DbException : public __DB_STD(exception) -{ -public: - virtual ~DbException() throw(); - DbException(int err); - DbException(const char *description); - DbException(const char *description, int err); - DbException(const char *prefix, const char *description, int err); - int get_errno() const; - virtual const char *what() const throw(); - DbEnv *get_env() const; - void set_env(DbEnv *env); - - DbException(const DbException &); - DbException &operator = (const DbException &); - -private: - void describe(const char *prefix, const char *description); - - char *what_; - int err_; // errno - DbEnv *env_; -}; - -// -// A specific sort of exception that occurs when -// an operation is aborted to resolve a deadlock. -// -class _exported DbDeadlockException : public DbException -{ -public: - virtual ~DbDeadlockException() throw(); - DbDeadlockException(const char *description); - - DbDeadlockException(const DbDeadlockException &); - DbDeadlockException &operator = (const DbDeadlockException &); -}; - -// -// A specific sort of exception that occurs when -// a lock is not granted, e.g. by lock_get or lock_vec. -// Note that the Dbt is only live as long as the Dbt used -// in the offending call. -// -class _exported DbLockNotGrantedException : public DbException -{ -public: - virtual ~DbLockNotGrantedException() throw(); - DbLockNotGrantedException(const char *prefix, db_lockop_t op, - db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index); - DbLockNotGrantedException(const char *description); - - DbLockNotGrantedException(const DbLockNotGrantedException &); - DbLockNotGrantedException &operator = - (const DbLockNotGrantedException &); - - db_lockop_t get_op() const; - db_lockmode_t get_mode() const; - const Dbt* get_obj() const; - DbLock *get_lock() const; - int get_index() const; - -private: - db_lockop_t op_; - db_lockmode_t mode_; - const Dbt *obj_; - DbLock *lock_; - int index_; -}; - -// -// A specific sort of exception that occurs when -// user declared memory is insufficient in a Dbt. -// -class _exported DbMemoryException : public DbException -{ -public: - virtual ~DbMemoryException() throw(); - DbMemoryException(Dbt *dbt); - DbMemoryException(const char *prefix, Dbt *dbt); - - DbMemoryException(const DbMemoryException &); - DbMemoryException &operator = (const DbMemoryException &); - - Dbt *get_dbt() const; -private: - Dbt *dbt_; -}; - -// -// A specific sort of exception that occurs when a change of replication -// master requires that all handles be re-opened. -// -class _exported DbRepHandleDeadException : public DbException -{ -public: - virtual ~DbRepHandleDeadException() throw(); - DbRepHandleDeadException(const char *description); - - DbRepHandleDeadException(const DbRepHandleDeadException &); - DbRepHandleDeadException &operator = (const DbRepHandleDeadException &); -}; - -// -// A specific sort of exception that occurs when -// recovery is required before continuing DB activity. -// -class _exported DbRunRecoveryException : public DbException -{ -public: - virtual ~DbRunRecoveryException() throw(); - DbRunRecoveryException(const char *description); - - DbRunRecoveryException(const DbRunRecoveryException &); - DbRunRecoveryException &operator = (const DbRunRecoveryException &); -}; -#endif /* !_DB_CXX_H_ */ diff --git a/storage/bdb/dbinc/db_dispatch.h b/storage/bdb/dbinc/db_dispatch.h deleted file mode 100644 index eee9c59d2a8..00000000000 --- a/storage/bdb/dbinc/db_dispatch.h +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1995, 1996 - * The President and Fellows of Harvard University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_dispatch.h,v 12.5 2005/10/19 15:10:44 bostic Exp $ - */ - -#ifndef _DB_DISPATCH_H_ -#define _DB_DISPATCH_H_ - -/* - * Declarations and typedefs for the list of transaction IDs used during - * recovery. This is a generic list used to pass along whatever information - * we need during recovery. - */ -typedef enum { - TXNLIST_DELETE, - TXNLIST_LSN, - TXNLIST_PGNO, - TXNLIST_TXNID -} db_txnlist_type; - -#define DB_TXNLIST_MASK(hp, n) (n % hp->nslots) -struct __db_txnhead { - u_int32_t maxid; /* Maximum transaction id. */ - DB_LSN maxlsn; /* Maximum commit lsn. */ - DB_LSN ckplsn; /* LSN of last retained checkpoint. */ - DB_LSN trunc_lsn; /* Lsn to which we are going to truncate; - * make sure we abort anyone after this. */ - u_int32_t generation; /* Current generation number. */ - u_int32_t gen_alloc; /* Number of generations allocated. */ - struct { - u_int32_t generation; - u_int32_t txn_min; - u_int32_t txn_max; - } *gen_array; /* Array of txnids associated with a gen. */ - u_int nslots; - LIST_HEAD(__db_headlink, __db_txnlist) head[1]; -}; - -#define DB_LSN_STACK_SIZE 4 -struct __db_txnlist { - db_txnlist_type type; - LIST_ENTRY(__db_txnlist) links; - union { - struct { - u_int32_t txnid; - u_int32_t generation; - u_int32_t status; - } t; - struct { - u_int32_t stack_size; - u_int32_t stack_indx; - DB_LSN *lsn_stack; - } l; - struct { - u_int32_t nentries; - u_int32_t maxentry; - int32_t locked; - char *fname; - int32_t fileid; - db_pgno_t *pgno_array; - u_int8_t uid[DB_FILE_ID_LEN]; - } p; - } u; -}; - -/* - * States for limbo list processing. - */ -typedef enum { - LIMBO_NORMAL, /* Normal processing. */ - LIMBO_PREPARE, /* We are preparing a transaction. */ - LIMBO_RECOVER, /* We are in recovery. */ - LIMBO_TIMESTAMP, /* We are recovering to a timestamp. */ - LIMBO_COMPENSATE /* After recover to ts, generate log records. */ -} db_limbo_state; - -#endif /* !_DB_DISPATCH_H_ */ diff --git a/storage/bdb/dbinc/db_int.in b/storage/bdb/dbinc/db_int.in deleted file mode 100644 index 55be4366326..00000000000 --- a/storage/bdb/dbinc/db_int.in +++ /dev/null @@ -1,670 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_int.in,v 12.15 2005/11/03 17:46:08 bostic Exp $ - */ - -#ifndef _DB_INTERNAL_H_ -#define _DB_INTERNAL_H_ - -/******************************************************* - * System includes, db.h, a few general DB includes. The DB includes are - * here because it's OK if db_int.h includes queue structure declarations. - *******************************************************/ -#ifndef NO_SYSTEM_INCLUDES -#if defined(STDC_HEADERS) || defined(__cplusplus) -#include -#else -#include -#endif -#include -#endif - -#include "db.h" - -#include "dbinc/queue.h" -#include "dbinc/shqueue.h" - -#if defined(__cplusplus) -extern "C" { -#endif - -/******************************************************* - * General purpose constants and macros. - *******************************************************/ -#ifndef UINT16_MAX -#define UINT16_MAX 65535 /* Maximum 16-bit unsigned. */ -#endif -#ifndef UINT32_MAX -#ifdef __STDC__ -#define UINT32_MAX 4294967295U /* Maximum 32-bit unsigned. */ -#else -#define UINT32_MAX 0xffffffff /* Maximum 32-bit unsigned. */ -#endif -#endif - -#if defined(HAVE_64BIT_TYPES) -#undef INT64_MAX -#undef INT64_MIN -#undef UINT64_MAX - -#ifdef DB_WIN32 -#define INT64_MAX _I64_MAX -#define INT64_MIN _I64_MIN -#define UINT64_MAX _UI64_MAX - -#define INT64_FMT "%l64d" -#define UINT64_FMT "%l64u" -#else -/* - * Override the system's 64-bit min/max constants. AIX's 32-bit compiler can - * handle 64-bit values, but the system's constants don't include the LL/ULL - * suffix, and so can't be compiled using the 32-bit compiler. - */ -#define INT64_MAX 9223372036854775807LL -#define INT64_MIN (-INT64_MAX-1) -#define UINT64_MAX 18446744073709551615ULL - -@INT64_FMT@ -@UINT64_FMT@ -#endif /* DB_WIN32 */ -#endif /* HAVE_LONG_LONG && HAVE_UNSIGNED_LONG_LONG */ - -#define MEGABYTE 1048576 -#define GIGABYTE 1073741824 - -#define MS_PER_SEC 1000 /* Milliseconds in a second. */ -#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */ - -#define RECNO_OOB 0 /* Illegal record number. */ - -/* Test for a power-of-two (tests true for zero, which doesn't matter here). */ -#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0) - -/* Test for valid page sizes. */ -#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */ -#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */ -#define IS_VALID_PAGESIZE(x) \ - (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE)) - -/* Minimum number of pages cached, by default. */ -#define DB_MINPAGECACHE 16 - -/* - * If we are unable to determine the underlying filesystem block size, use - * 8K on the grounds that most OS's use less than 8K for a VM page size. - */ -#define DB_DEF_IOSIZE (8 * 1024) - -/* Align an integer to a specific boundary. */ -#undef DB_ALIGN -#define DB_ALIGN(v, bound) \ - (((v) + (bound) - 1) & ~(((uintmax_t)(bound)) - 1)) - -/* Increment a pointer to a specific boundary. */ -#undef ALIGNP_INC -#define ALIGNP_INC(p, bound) \ - (void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)(bound)) - 1)) - -/* Decrement a pointer to a specific boundary. */ -#undef ALIGNP_DEC -#define ALIGNP_DEC(p, bound) \ - (void *)((uintptr_t)(p) & ~(((uintptr_t)(bound)) - 1)) - -/* - * Print an address as a u_long (a u_long is the largest type we can print - * portably). Most 64-bit systems have made longs 64-bits, so this should - * work. - */ -#define P_TO_ULONG(p) ((u_long)(uintptr_t)(p)) - -/* - * Convert a pointer to a small integral value. - * - * The (u_int16_t)(uintptr_t) cast avoids warnings: the (uintptr_t) cast - * converts the value to an integral type, and the (u_int16_t) cast converts - * it to a small integral type so we don't get complaints when we assign the - * final result to an integral type smaller than uintptr_t. - */ -#define P_TO_UINT32(p) ((u_int32_t)(uintptr_t)(p)) -#define P_TO_UINT16(p) ((u_int16_t)(uintptr_t)(p)) - -/* - * There are several on-page structures that are declared to have a number of - * fields followed by a variable length array of items. The structure size - * without including the variable length array or the address of the first of - * those elements can be found using SSZ. - * - * This macro can also be used to find the offset of a structure element in a - * structure. This is used in various places to copy structure elements from - * unaligned memory references, e.g., pointers into a packed page. - * - * There are two versions because compilers object if you take the address of - * an array. - */ -#undef SSZ -#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field)) - -#undef SSZA -#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0])) - -/* Structure used to print flag values. */ -typedef struct __fn { - u_int32_t mask; /* Flag value. */ - const char *name; /* Flag name. */ -} FN; - -/* Set, clear and test flags. */ -#define FLD_CLR(fld, f) (fld) &= ~(f) -#define FLD_ISSET(fld, f) ((fld) & (f)) -#define FLD_SET(fld, f) (fld) |= (f) -#define F_CLR(p, f) (p)->flags &= ~(f) -#define F_ISSET(p, f) ((p)->flags & (f)) -#define F_SET(p, f) (p)->flags |= (f) -#define LF_CLR(f) ((flags) &= ~(f)) -#define LF_ISSET(f) ((flags) & (f)) -#define LF_SET(f) ((flags) |= (f)) - -/* - * Calculate a percentage. The values can overflow 32-bit integer arithmetic - * so we use floating point. - * - * When calculating a bytes-vs-page size percentage, we're getting the inverse - * of the percentage in all cases, that is, we want 100 minus the percentage we - * calculate. - */ -#define DB_PCT(v, total) \ - ((int)((total) == 0 ? 0 : ((double)(v) * 100) / (total))) -#define DB_PCT_PG(v, total, pgsize) \ - ((int)((total) == 0 ? 0 : \ - 100 - ((double)(v) * 100) / (((double)total) * (pgsize)))) - -/* - * Structure used for callback message aggregation. - * - * Display values in XXX_stat_print calls. - */ -typedef struct __db_msgbuf { - char *buf; /* Heap allocated buffer. */ - char *cur; /* Current end of message. */ - size_t len; /* Allocated length of buffer. */ -} DB_MSGBUF; -#define DB_MSGBUF_INIT(a) do { \ - (a)->buf = (a)->cur = NULL; \ - (a)->len = 0; \ -} while (0) -#define DB_MSGBUF_FLUSH(dbenv, a) do { \ - if ((a)->buf != NULL) { \ - if ((a)->cur != (a)->buf) \ - __db_msg(dbenv, "%s", (a)->buf); \ - __os_free(dbenv, (a)->buf); \ - DB_MSGBUF_INIT(a); \ - } \ -} while (0) -#define STAT_FMT(msg, fmt, type, v) do { \ - DB_MSGBUF __mb; \ - DB_MSGBUF_INIT(&__mb); \ - __db_msgadd(dbenv, &__mb, fmt, (type)(v)); \ - __db_msgadd(dbenv, &__mb, "\t%s", msg); \ - DB_MSGBUF_FLUSH(dbenv, &__mb); \ -} while (0) -#define STAT_HEX(msg, v) \ - __db_msg(dbenv, "%#lx\t%s", (u_long)(v), msg) -#define STAT_ISSET(msg, p) \ - __db_msg(dbenv, "%sSet\t%s", (p) == NULL ? "!" : " ", msg) -#define STAT_LONG(msg, v) \ - __db_msg(dbenv, "%ld\t%s", (long)(v), msg) -#define STAT_LSN(msg, lsnp) \ - __db_msg(dbenv, "%lu/%lu\t%s", \ - (u_long)(lsnp)->file, (u_long)(lsnp)->offset, msg) -#define STAT_POINTER(msg, v) \ - __db_msg(dbenv, "%#lx\t%s", P_TO_ULONG(v), msg) -#define STAT_STRING(msg, p) do { \ - const char *__p = p; /* p may be a function call. */ \ - __db_msg(dbenv, "%s\t%s", __p == NULL ? "!Set" : __p, msg); \ -} while (0) -#define STAT_ULONG(msg, v) \ - __db_msg(dbenv, "%lu\t%s", (u_long)(v), msg) - -/******************************************************* - * API return values - *******************************************************/ -/* - * Return values that are OK for each different call. Most calls have a - * standard 'return of 0 is only OK value', but some, like db->get have - * DB_NOTFOUND as a return value, but it really isn't an error. - */ -#define DB_RETOK_STD(ret) ((ret) == 0) -#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \ - (ret) == DB_NOTFOUND) -#define DB_RETOK_DBCGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \ - (ret) == DB_NOTFOUND) -#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \ - (ret) == DB_NOTFOUND) -#define DB_RETOK_DBDEL(ret) DB_RETOK_DBCDEL(ret) -#define DB_RETOK_DBGET(ret) DB_RETOK_DBCGET(ret) -#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST) -#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND) -#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND) -#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || \ - (ret) == DB_REP_IGNORE || \ - (ret) == DB_REP_ISPERM || \ - (ret) == DB_REP_NEWMASTER || \ - (ret) == DB_REP_NEWSITE || \ - (ret) == DB_REP_NOTPERM || \ - (ret) == DB_REP_STARTUPDONE) - -/* Find a reasonable operation-not-supported error. */ -#ifdef EOPNOTSUPP -#define DB_OPNOTSUP EOPNOTSUPP -#else -#ifdef ENOTSUP -#define DB_OPNOTSUP ENOTSUP -#else -#define DB_OPNOTSUP EINVAL -#endif -#endif - -/******************************************************* - * Files. - *******************************************************/ -/* - * We use 1024 as the maximum path length. It's too hard to figure out what - * the real path length is, as it was traditionally stored in , - * and that file isn't always available. - */ -#undef MAXPATHLEN -#define MAXPATHLEN 1024 - -#define PATH_DOT "." /* Current working directory. */ - /* Path separator character(s). */ -#define PATH_SEPARATOR "@PATH_SEPARATOR@" - -/******************************************************* - * Environment. - *******************************************************/ -/* Type passed to __db_appname(). */ -typedef enum { - DB_APP_NONE=0, /* No type (region). */ - DB_APP_DATA, /* Data file. */ - DB_APP_LOG, /* Log file. */ - DB_APP_TMP /* Temporary file. */ -} APPNAME; - -/* - * ALIVE_ON The is_alive function is configured. - * CDB_LOCKING CDB product locking. - * CRYPTO_ON Security has been configured. - * LOCKING_ON Locking has been configured. - * LOGGING_ON Logging has been configured. - * MUTEX_ON Mutexes have been configured. - * MPOOL_ON Memory pool has been configured. - * REP_ON Replication has been configured. - * RPC_ON RPC has been configured. - * TXN_ON Transactions have been configured. - */ -#define ALIVE_ON(dbenv) ((dbenv)->is_alive != NULL) -#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB) -#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL) -#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL) -#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL) -#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL) -#define MUTEX_ON(dbenv) ((dbenv)->mutex_handle != NULL) -#define REP_ON(dbenv) ((dbenv)->rep_handle != NULL) -#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL) -#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL) - -/* - * STD_LOCKING Standard locking, that is, locking was configured and CDB - * was not. We do not do locking in off-page duplicate trees, - * so we check for that in the cursor first. - */ -#define STD_LOCKING(dbc) \ - (!F_ISSET(dbc, DBC_OPD) && \ - !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv)) - -/* - * IS_RECOVERING: The system is running recovery. - */ -#define IS_RECOVERING(dbenv) \ - (LOGGING_ON(dbenv) && \ - F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER)) - -/* Initialization methods are often illegal before/after open is called. */ -#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \ - if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ - return (__db_mi_open(dbenv, name, 1)); -#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \ - if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ - return (__db_mi_open(dbenv, name, 0)); - -/* We're not actually user hostile, honest. */ -#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \ - if (handle == NULL) \ - return (__db_env_config(dbenv, i, flags)); -#define ENV_NOT_CONFIGURED(dbenv, handle, i, flags) \ - if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \ - ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) - -#define ENV_ENTER(dbenv, ip) do { \ - int __ret; \ - if ((dbenv)->thr_hashtab == NULL) \ - ip = NULL; \ - else { \ - if ((__ret = \ - __env_set_state(dbenv, &(ip), THREAD_ACTIVE)) != 0) \ - return (__ret); \ - } \ -} while (0) - -#ifdef DIAGNOSTIC -#define ENV_LEAVE(dbenv, ip) do { \ - if ((ip) != NULL) { \ - DB_ASSERT(ip->dbth_state == THREAD_ACTIVE); \ - (ip)->dbth_state = THREAD_OUT; \ - } \ -} while (0) -#else -#define ENV_LEAVE(dbenv, ip) do { \ - if ((ip) != NULL) \ - (ip)->dbth_state = THREAD_OUT; \ -} while (0) -#endif -#ifdef DIAGNOSTIC -#define CHECK_THREAD(dbenv) do { \ - DB_THREAD_INFO *__ip; \ - if ((dbenv)->thr_hashtab != NULL) { \ - (void)__env_set_state(dbenv, &__ip, THREAD_DIAGNOSTIC); \ - DB_ASSERT(__ip != NULL && \ - __ip->dbth_state != THREAD_OUT); \ - } \ -} while (0) -#define CHECK_MTX_THREAD(dbenv, mtx) do { \ - if (mtx->alloc_id != MTX_MUTEX_REGION && \ - mtx->alloc_id != MTX_ENV_REGION && \ - mtx->alloc_id != MTX_APPLICATION) \ - CHECK_THREAD(dbenv); \ -} while (0) -#else -#define CHECK_THREAD(dbenv) -#define CHECK_MTX_THREAD(dbenv, mtx) -#endif - -typedef enum { - THREAD_SLOT_NOT_IN_USE=0, - THREAD_OUT, - THREAD_ACTIVE, - THREAD_BLOCKED -#ifdef DIAGNOSTIC - , THREAD_DIAGNOSTIC -#endif -} DB_THREAD_STATE; - -typedef struct __db_thread_info { - pid_t dbth_pid; - db_threadid_t dbth_tid; - DB_THREAD_STATE dbth_state; - SH_TAILQ_ENTRY dbth_links; -} DB_THREAD_INFO; - -typedef struct __env_thread_info { - u_int32_t thr_count; - u_int32_t thr_max; - u_int32_t thr_nbucket; - roff_t thr_hashoff; -} THREAD_INFO; - -/******************************************************* - * Database Access Methods. - *******************************************************/ -/* - * DB_IS_THREADED -- - * The database handle is free-threaded (was opened with DB_THREAD). - */ -#define DB_IS_THREADED(dbp) \ - ((dbp)->mutex != MUTEX_INVALID) - -/* Initialization methods are often illegal before/after open is called. */ -#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \ - if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \ - return (__db_mi_open((dbp)->dbenv, name, 1)); -#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \ - if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \ - return (__db_mi_open((dbp)->dbenv, name, 0)); -/* Some initialization methods are illegal if environment isn't local. */ -#define DB_ILLEGAL_IN_ENV(dbp, name) \ - if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \ - return (__db_mi_env((dbp)->dbenv, name)); -#define DB_ILLEGAL_METHOD(dbp, flags) { \ - int __ret; \ - if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \ - return (__ret); \ -} - -/* - * Common DBC->internal fields. Each access method adds additional fields - * to this list, but the initial fields are common. - */ -#define __DBC_INTERNAL \ - DBC *opd; /* Off-page duplicate cursor. */\ - \ - void *page; /* Referenced page. */ \ - db_pgno_t root; /* Tree root. */ \ - db_pgno_t pgno; /* Referenced page number. */ \ - db_indx_t indx; /* Referenced key item index. */\ - \ - DB_LOCK lock; /* Cursor lock. */ \ - db_lockmode_t lock_mode; /* Lock mode. */ - -struct __dbc_internal { - __DBC_INTERNAL -}; - -/* Actions that __db_master_update can take. */ -typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action; - -/* - * Access-method-common macro for determining whether a cursor - * has been initialized. - */ -#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID) - -/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */ -#define FREE_IF_NEEDED(sdbp, dbt) \ - if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \ - __os_ufree((sdbp)->dbenv, (dbt)->data); \ - F_CLR((dbt), DB_DBT_APPMALLOC); \ - } - -/* - * Use memory belonging to object "owner" to return the results of - * any no-DBT-flag get ops on cursor "dbc". - */ -#define SET_RET_MEM(dbc, owner) \ - do { \ - (dbc)->rskey = &(owner)->my_rskey; \ - (dbc)->rkey = &(owner)->my_rkey; \ - (dbc)->rdata = &(owner)->my_rdata; \ - } while (0) - -/* Use the return-data memory src is currently set to use in dest as well. */ -#define COPY_RET_MEM(src, dest) \ - do { \ - (dest)->rskey = (src)->rskey; \ - (dest)->rkey = (src)->rkey; \ - (dest)->rdata = (src)->rdata; \ - } while (0) - -/* Reset the returned-memory pointers to their defaults. */ -#define RESET_RET_MEM(dbc) \ - do { \ - (dbc)->rskey = &(dbc)->my_rskey; \ - (dbc)->rkey = &(dbc)->my_rkey; \ - (dbc)->rdata = &(dbc)->my_rdata; \ - } while (0) - -/******************************************************* - * Mpool. - *******************************************************/ -/* - * File types for DB access methods. Negative numbers are reserved to DB. - */ -#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */ -#define DB_FTYPE_NOTSET 0 /* Don't call... */ -#define DB_LSN_OFF_NOTSET -1 /* Not yet set. */ -#define DB_CLEARLEN_NOTSET UINT32_MAX /* Not yet set. */ - -/* Structure used as the DB pgin/pgout pgcookie. */ -typedef struct __dbpginfo { - size_t db_pagesize; /* Underlying page size. */ - u_int32_t flags; /* Some DB_AM flags needed. */ - DBTYPE type; /* DB type */ -} DB_PGINFO; - -/******************************************************* - * Log. - *******************************************************/ -/* Initialize an LSN to 'zero'. */ -#define ZERO_LSN(LSN) do { \ - (LSN).file = 0; \ - (LSN).offset = 0; \ -} while (0) -#define IS_ZERO_LSN(LSN) ((LSN).file == 0 && (LSN).offset == 0) - -#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0) -#define INIT_LSN(LSN) do { \ - (LSN).file = 1; \ - (LSN).offset = 0; \ -} while (0) - -#define MAX_LSN(LSN) do { \ - (LSN).file = UINT32_MAX; \ - (LSN).offset = UINT32_MAX; \ -} while (0) -#define IS_MAX_LSN(LSN) \ - ((LSN).file == UINT32_MAX && (LSN).offset == UINT32_MAX) - -/* If logging is turned off, smash the lsn. */ -#define LSN_NOT_LOGGED(LSN) do { \ - (LSN).file = 0; \ - (LSN).offset = 1; \ -} while (0) -#define IS_NOT_LOGGED_LSN(LSN) \ - ((LSN).file == 0 && (LSN).offset == 1) - -/******************************************************* - * Txn. - *******************************************************/ -#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT)) -#define NOWAIT_FLAG(txn) \ - ((txn) != NULL && F_ISSET((txn), TXN_NOWAIT) ? DB_LOCK_NOWAIT : 0) -#define IS_SUBTRANSACTION(txn) \ - ((txn) != NULL && (txn)->parent != NULL) - -/******************************************************* - * Crypto. - *******************************************************/ -#define DB_IV_BYTES 16 /* Bytes per IV */ -#define DB_MAC_KEY 20 /* Bytes per MAC checksum */ - -/******************************************************* - * Secondaries over RPC. - *******************************************************/ -#ifdef CONFIG_TEST -/* - * These are flags passed to DB->associate calls by the Tcl API if running - * over RPC. The RPC server will mask out these flags before making the real - * DB->associate call. - * - * These flags must coexist with the valid flags to DB->associate (currently - * DB_AUTO_COMMIT and DB_CREATE). DB_AUTO_COMMIT is in the group of - * high-order shared flags (0xff000000), and DB_CREATE is in the low-order - * group (0x00000fff), so we pick a range in between. - */ -#define DB_RPC2ND_MASK 0x00f00000 /* Reserved bits. */ - -#define DB_RPC2ND_REVERSEDATA 0x00100000 /* callback_n(0) _s_reversedata. */ -#define DB_RPC2ND_NOOP 0x00200000 /* callback_n(1) _s_noop */ -#define DB_RPC2ND_CONCATKEYDATA 0x00300000 /* callback_n(2) _s_concatkeydata */ -#define DB_RPC2ND_CONCATDATAKEY 0x00400000 /* callback_n(3) _s_concatdatakey */ -#define DB_RPC2ND_REVERSECONCAT 0x00500000 /* callback_n(4) _s_reverseconcat */ -#define DB_RPC2ND_TRUNCDATA 0x00600000 /* callback_n(5) _s_truncdata */ -#define DB_RPC2ND_CONSTANT 0x00700000 /* callback_n(6) _s_constant */ -#define DB_RPC2ND_GETZIP 0x00800000 /* sj_getzip */ -#define DB_RPC2ND_GETNAME 0x00900000 /* sj_getname */ -#endif - -/******************************************************* - * Forward structure declarations. - *******************************************************/ -struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO; -struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD; -struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST; -struct __vrfy_childinfo;typedef struct __vrfy_childinfo VRFY_CHILDINFO; -struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO; -struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO; - -#if defined(__cplusplus) -} -#endif - -/******************************************************* - * Remaining general DB includes. - *******************************************************/ -@db_int_def@ - -#include "dbinc/globals.h" -#include "dbinc/debug.h" -#include "dbinc/region.h" -#include "dbinc_auto/env_ext.h" -#include "dbinc/mutex.h" -#include "dbinc/os.h" -#include "dbinc/rep.h" -#include "dbinc_auto/clib_ext.h" -#include "dbinc_auto/common_ext.h" - -/******************************************************* - * Remaining Log. - * These need to be defined after the general includes - * because they need rep.h from above. - *******************************************************/ -/* - * Test if the environment is currently logging changes. If we're in recovery - * or we're a replication client, we don't need to log changes because they're - * already in the log, even though we have a fully functional log system. - */ -#define DBENV_LOGGING(dbenv) \ - (LOGGING_ON(dbenv) && !IS_REP_CLIENT(dbenv) && \ - (!IS_RECOVERING(dbenv))) - -/* - * Test if we need to log a change. By default, we don't log operations without - * associated transactions, unless DIAGNOSTIC, DEBUG_ROP or DEBUG_WOP are on. - * This is because we want to get log records for read/write operations, and, if - * we trying to debug something, more information is always better. - * - * The DBC_RECOVER flag is set when we're in abort, as well as during recovery; - * thus DBC_LOGGING may be false for a particular dbc even when DBENV_LOGGING - * is true. - * - * We explicitly use LOGGING_ON/IS_REP_CLIENT here because we don't want to pull - * in the log headers, which IS_RECOVERING (and thus DBENV_LOGGING) rely on, and - * because DBC_RECOVER should be set anytime IS_RECOVERING would be true. - * - * If we're not in recovery (master - doing an abort a client applying - * a txn), then a client's only path through here is on an internal - * operation, and a master's only path through here is a transactional - * operation. Detect if either is not the case. - */ -#if defined(DIAGNOSTIC) || defined(DEBUG_ROP) || defined(DEBUG_WOP) -#define DBC_LOGGING(dbc) __dbc_logging(dbc) -#else -#define DBC_LOGGING(dbc) \ - ((dbc)->txn != NULL && LOGGING_ON((dbc)->dbp->dbenv) && \ - !F_ISSET((dbc), DBC_RECOVER) && !IS_REP_CLIENT((dbc)->dbp->dbenv)) -#endif - -#endif /* !_DB_INTERNAL_H_ */ diff --git a/storage/bdb/dbinc/db_join.h b/storage/bdb/dbinc/db_join.h deleted file mode 100644 index ff43216479c..00000000000 --- a/storage/bdb/dbinc/db_join.h +++ /dev/null @@ -1,30 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_join.h,v 12.2 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_JOIN_H_ -#define _DB_JOIN_H_ -/* - * Joins use a join cursor that is similar to a regular DB cursor except - * that it only supports c_get and c_close functionality. Also, it does - * not support the full range of flags for get. - */ -typedef struct __join_cursor { - u_int8_t *j_exhausted; /* Array of flags; is cursor i exhausted? */ - DBC **j_curslist; /* Array of cursors in the join: constant. */ - DBC **j_fdupcurs; /* Cursors w/ first instances of current dup. */ - DBC **j_workcurs; /* Scratch cursor copies to muck with. */ - DB *j_primary; /* Primary dbp. */ - DBT j_key; /* Used to do lookups. */ - DBT j_rdata; /* Memory used for data return. */ - u_int32_t j_ncurs; /* How many cursors do we have? */ -#define JOIN_RETRY 0x01 /* Error on primary get; re-return same key. */ - u_int32_t flags; -} JOIN_CURSOR; - -#endif /* !_DB_JOIN_H_ */ diff --git a/storage/bdb/dbinc/db_page.h b/storage/bdb/dbinc/db_page.h deleted file mode 100644 index 883b7d450fe..00000000000 --- a/storage/bdb/dbinc/db_page.h +++ /dev/null @@ -1,672 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_page.h,v 12.6 2005/08/08 14:52:30 bostic Exp $ - */ - -#ifndef _DB_PAGE_H_ -#define _DB_PAGE_H_ - -#if defined(__cplusplus) -extern "C" { -#endif - -/* - * DB page formats. - * - * !!! - * This implementation requires that values within the following structures - * NOT be padded -- note, ANSI C permits random padding within structures. - * If your compiler pads randomly you can just forget ever making DB run on - * your system. In addition, no data type can require larger alignment than - * its own size, e.g., a 4-byte data element may not require 8-byte alignment. - * - * Note that key/data lengths are often stored in db_indx_t's -- this is - * not accidental, nor does it limit the key/data size. If the key/data - * item fits on a page, it's guaranteed to be small enough to fit into a - * db_indx_t, and storing it in one saves space. - */ - -#define PGNO_INVALID 0 /* Invalid page number in any database. */ -#define PGNO_BASE_MD 0 /* Base database: metadata page number. */ - -/* Page types. */ -#define P_INVALID 0 /* Invalid page type. */ -#define __P_DUPLICATE 1 /* Duplicate. DEPRECATED in 3.1 */ -#define P_HASH 2 /* Hash. */ -#define P_IBTREE 3 /* Btree internal. */ -#define P_IRECNO 4 /* Recno internal. */ -#define P_LBTREE 5 /* Btree leaf. */ -#define P_LRECNO 6 /* Recno leaf. */ -#define P_OVERFLOW 7 /* Overflow. */ -#define P_HASHMETA 8 /* Hash metadata page. */ -#define P_BTREEMETA 9 /* Btree metadata page. */ -#define P_QAMMETA 10 /* Queue metadata page. */ -#define P_QAMDATA 11 /* Queue data page. */ -#define P_LDUP 12 /* Off-page duplicate leaf. */ -#define P_PAGETYPE_MAX 13 -/* Flag to __db_new */ -#define P_DONTEXTEND 0x8000 /* Don't allocate if there are no free pages. */ - -/* - * When we create pages in mpool, we ask mpool to clear some number of bytes - * in the header. This number must be at least as big as the regular page - * headers and cover enough of the btree and hash meta-data pages to obliterate - * the page type. - */ -#define DB_PAGE_DB_LEN 32 -#define DB_PAGE_QUEUE_LEN 0 - -/************************************************************************ - GENERIC METADATA PAGE HEADER - * - * !!! - * The magic and version numbers have to be in the same place in all versions - * of the metadata page as the application may not have upgraded the database. - ************************************************************************/ -typedef struct _dbmeta33 { - DB_LSN lsn; /* 00-07: LSN. */ - db_pgno_t pgno; /* 08-11: Current page number. */ - u_int32_t magic; /* 12-15: Magic number. */ - u_int32_t version; /* 16-19: Version. */ - u_int32_t pagesize; /* 20-23: Pagesize. */ - u_int8_t encrypt_alg; /* 24: Encryption algorithm. */ - u_int8_t type; /* 25: Page type. */ -#define DBMETA_CHKSUM 0x01 - u_int8_t metaflags; /* 26: Meta-only flags */ - u_int8_t unused1; /* 27: Unused. */ - u_int32_t free; /* 28-31: Free list page number. */ - db_pgno_t last_pgno; /* 32-35: Page number of last page in db. */ - u_int32_t unused3; /* 36-39: Unused. */ - u_int32_t key_count; /* 40-43: Cached key count. */ - u_int32_t record_count; /* 44-47: Cached record count. */ - u_int32_t flags; /* 48-51: Flags: unique to each AM. */ - /* 52-71: Unique file ID. */ - u_int8_t uid[DB_FILE_ID_LEN]; -} DBMETA33, DBMETA; - -/************************************************************************ - BTREE METADATA PAGE LAYOUT - ************************************************************************/ -typedef struct _btmeta33 { -#define BTM_DUP 0x001 /* Duplicates. */ -#define BTM_RECNO 0x002 /* Recno tree. */ -#define BTM_RECNUM 0x004 /* Btree: maintain record count. */ -#define BTM_FIXEDLEN 0x008 /* Recno: fixed length records. */ -#define BTM_RENUMBER 0x010 /* Recno: renumber on insert/delete. */ -#define BTM_SUBDB 0x020 /* Subdatabases. */ -#define BTM_DUPSORT 0x040 /* Duplicates are sorted. */ -#define BTM_MASK 0x07f - DBMETA dbmeta; /* 00-71: Generic meta-data header. */ - - u_int32_t unused1; /* 72-75: Unused space. */ - u_int32_t minkey; /* 76-79: Btree: Minkey. */ - u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */ - u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */ - u_int32_t root; /* 88-91: Root page. */ - u_int32_t unused2[92]; /* 92-459: Unused space. */ - u_int32_t crypto_magic; /* 460-463: Crypto magic number */ - u_int32_t trash[3]; /* 464-475: Trash space - Do not use */ - u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */ - u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */ - - /* - * Minimum page size is 512. - */ -} BTMETA33, BTMETA; - -/************************************************************************ - HASH METADATA PAGE LAYOUT - ************************************************************************/ -typedef struct _hashmeta33 { -#define DB_HASH_DUP 0x01 /* Duplicates. */ -#define DB_HASH_SUBDB 0x02 /* Subdatabases. */ -#define DB_HASH_DUPSORT 0x04 /* Duplicates are sorted. */ - DBMETA dbmeta; /* 00-71: Generic meta-data page header. */ - - u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */ - u_int32_t high_mask; /* 76-79: Modulo mask into table */ - u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */ - u_int32_t ffactor; /* 84-87: Fill factor */ - u_int32_t nelem; /* 88-91: Number of keys in hash table */ - u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */ -#define NCACHED 32 /* number of spare points */ - /* 96-223: Spare pages for overflow */ - u_int32_t spares[NCACHED]; - u_int32_t unused[59]; /* 224-459: Unused space */ - u_int32_t crypto_magic; /* 460-463: Crypto magic number */ - u_int32_t trash[3]; /* 464-475: Trash space - Do not use */ - u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */ - u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */ - - /* - * Minimum page size is 512. - */ -} HMETA33, HMETA; - -/************************************************************************ - QUEUE METADATA PAGE LAYOUT - ************************************************************************/ -/* - * QAM Meta data page structure - * - */ -typedef struct _qmeta33 { - DBMETA dbmeta; /* 00-71: Generic meta-data header. */ - - u_int32_t first_recno; /* 72-75: First not deleted record. */ - u_int32_t cur_recno; /* 76-79: Next recno to be allocated. */ - u_int32_t re_len; /* 80-83: Fixed-length record length. */ - u_int32_t re_pad; /* 84-87: Fixed-length record pad. */ - u_int32_t rec_page; /* 88-91: Records Per Page. */ - u_int32_t page_ext; /* 92-95: Pages per extent */ - - u_int32_t unused[91]; /* 96-459: Unused space */ - u_int32_t crypto_magic; /* 460-463: Crypto magic number */ - u_int32_t trash[3]; /* 464-475: Trash space - Do not use */ - u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */ - u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */ - /* - * Minimum page size is 512. - */ -} QMETA33, QMETA; - -/* - * DBMETASIZE is a constant used by __db_file_setup and DB->verify - * as a buffer which is guaranteed to be larger than any possible - * metadata page size and smaller than any disk sector. - */ -#define DBMETASIZE 512 - -/************************************************************************ - BTREE/HASH MAIN PAGE LAYOUT - ************************************************************************/ -/* - * +-----------------------------------+ - * | lsn | pgno | prev pgno | - * +-----------------------------------+ - * | next pgno | entries | hf offset | - * +-----------------------------------+ - * | level | type | chksum | - * +-----------------------------------+ - * | iv | index | free --> | - * +-----------+-----------------------+ - * | F R E E A R E A | - * +-----------------------------------+ - * | <-- free | item | - * +-----------------------------------+ - * | item | item | item | - * +-----------------------------------+ - * - * sizeof(PAGE) == 26 bytes + possibly 20 bytes of checksum and possibly - * 16 bytes of IV (+ 2 bytes for alignment), and the following indices - * are guaranteed to be two-byte aligned. If we aren't doing crypto or - * checksumming the bytes are reclaimed for data storage. - * - * For hash and btree leaf pages, index items are paired, e.g., inp[0] is the - * key for inp[1]'s data. All other types of pages only contain single items. - */ -typedef struct __pg_chksum { - u_int8_t unused[2]; /* 26-27: For alignment */ - u_int8_t chksum[4]; /* 28-31: Checksum */ -} PG_CHKSUM; - -typedef struct __pg_crypto { - u_int8_t unused[2]; /* 26-27: For alignment */ - u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */ - u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */ - /* !!! - * Must be 16-byte aligned for crypto - */ -} PG_CRYPTO; - -typedef struct _db_page { - DB_LSN lsn; /* 00-07: Log sequence number. */ - db_pgno_t pgno; /* 08-11: Current page number. */ - db_pgno_t prev_pgno; /* 12-15: Previous page number. */ - db_pgno_t next_pgno; /* 16-19: Next page number. */ - db_indx_t entries; /* 20-21: Number of items on the page. */ - db_indx_t hf_offset; /* 22-23: High free byte page offset. */ - - /* - * The btree levels are numbered from the leaf to the root, starting - * with 1, so the leaf is level 1, its parent is level 2, and so on. - * We maintain this level on all btree pages, but the only place that - * we actually need it is on the root page. It would not be difficult - * to hide the byte on the root page once it becomes an internal page, - * so we could get this byte back if we needed it for something else. - */ -#define LEAFLEVEL 1 -#define MAXBTREELEVEL 255 - u_int8_t level; /* 24: Btree tree level. */ - u_int8_t type; /* 25: Page type. */ -} PAGE; - -/* - * With many compilers sizeof(PAGE) == 28, while SIZEOF_PAGE == 26. - * We add in other things directly after the page header and need - * the SIZEOF_PAGE. When giving the sizeof(), many compilers will - * pad it out to the next 4-byte boundary. - */ -#define SIZEOF_PAGE 26 -/* - * !!! - * DB_AM_ENCRYPT always implies DB_AM_CHKSUM so that must come first. - */ -#define P_INP(dbp, pg) \ - ((db_indx_t *)((u_int8_t *)(pg) + SIZEOF_PAGE + \ - (F_ISSET((dbp), DB_AM_ENCRYPT) ? sizeof(PG_CRYPTO) : \ - (F_ISSET((dbp), DB_AM_CHKSUM) ? sizeof(PG_CHKSUM) : 0)))) - -#define P_IV(dbp, pg) \ - (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \ - SIZEOF_PAGE + SSZA(PG_CRYPTO, iv)) \ - : NULL) - -#define P_CHKSUM(dbp, pg) \ - (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \ - SIZEOF_PAGE + SSZA(PG_CRYPTO, chksum)) : \ - (F_ISSET((dbp), DB_AM_CHKSUM) ? ((u_int8_t *)(pg) + \ - SIZEOF_PAGE + SSZA(PG_CHKSUM, chksum)) \ - : NULL)) - -/* PAGE element macros. */ -#define LSN(p) (((PAGE *)p)->lsn) -#define PGNO(p) (((PAGE *)p)->pgno) -#define PREV_PGNO(p) (((PAGE *)p)->prev_pgno) -#define NEXT_PGNO(p) (((PAGE *)p)->next_pgno) -#define NUM_ENT(p) (((PAGE *)p)->entries) -#define HOFFSET(p) (((PAGE *)p)->hf_offset) -#define LEVEL(p) (((PAGE *)p)->level) -#define TYPE(p) (((PAGE *)p)->type) - -/************************************************************************ - QUEUE MAIN PAGE LAYOUT - ************************************************************************/ -/* - * Sizes of page below. Used to reclaim space if not doing - * crypto or checksumming. If you change the QPAGE below you - * MUST adjust this too. - */ -#define QPAGE_NORMAL 28 -#define QPAGE_CHKSUM 48 -#define QPAGE_SEC 64 - -typedef struct _qpage { - DB_LSN lsn; /* 00-07: Log sequence number. */ - db_pgno_t pgno; /* 08-11: Current page number. */ - u_int32_t unused0[3]; /* 12-23: Unused. */ - u_int8_t unused1[1]; /* 24: Unused. */ - u_int8_t type; /* 25: Page type. */ - u_int8_t unused2[2]; /* 26-27: Unused. */ - u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */ - u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */ -} QPAGE; - -#define QPAGE_SZ(dbp) \ - (F_ISSET((dbp), DB_AM_ENCRYPT) ? QPAGE_SEC : \ - F_ISSET((dbp), DB_AM_CHKSUM) ? QPAGE_CHKSUM : QPAGE_NORMAL) -/* - * !!! - * The next_pgno and prev_pgno fields are not maintained for btree and recno - * internal pages. Doing so only provides a minor performance improvement, - * it's hard to do when deleting internal pages, and it increases the chance - * of deadlock during deletes and splits because we have to re-link pages at - * more than the leaf level. - * - * !!! - * The btree/recno access method needs db_recno_t bytes of space on the root - * page to specify how many records are stored in the tree. (The alternative - * is to store the number of records in the meta-data page, which will create - * a second hot spot in trees being actively modified, or recalculate it from - * the BINTERNAL fields on each access.) Overload the PREV_PGNO field. - */ -#define RE_NREC(p) \ - ((TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) ? PREV_PGNO(p) : \ - (db_pgno_t)(TYPE(p) == P_LBTREE ? NUM_ENT(p) / 2 : NUM_ENT(p))) -#define RE_NREC_ADJ(p, adj) \ - PREV_PGNO(p) += adj; -#define RE_NREC_SET(p, num) \ - PREV_PGNO(p) = (num); - -/* - * Initialize a page. - * - * !!! - * Don't modify the page's LSN, code depends on it being unchanged after a - * P_INIT call. - */ -#define P_INIT(pg, pg_size, n, pg_prev, pg_next, btl, pg_type) do { \ - PGNO(pg) = (n); \ - PREV_PGNO(pg) = (pg_prev); \ - NEXT_PGNO(pg) = (pg_next); \ - NUM_ENT(pg) = (0); \ - HOFFSET(pg) = (db_indx_t)(pg_size); \ - LEVEL(pg) = (btl); \ - TYPE(pg) = (pg_type); \ -} while (0) - -/* Page header length (offset to first index). */ -#define P_OVERHEAD(dbp) P_TO_UINT16(P_INP(dbp, 0)) - -/* First free byte. */ -#define LOFFSET(dbp, pg) \ - (P_OVERHEAD(dbp) + NUM_ENT(pg) * sizeof(db_indx_t)) - -/* Free space on a regular page. */ -#define P_FREESPACE(dbp, pg) (HOFFSET(pg) - LOFFSET(dbp, pg)) - -/* Get a pointer to the bytes at a specific index. */ -#define P_ENTRY(dbp, pg, indx) ((u_int8_t *)pg + P_INP(dbp, pg)[indx]) - -/************************************************************************ - OVERFLOW PAGE LAYOUT - ************************************************************************/ - -/* - * Overflow items are referenced by HOFFPAGE and BOVERFLOW structures, which - * store a page number (the first page of the overflow item) and a length - * (the total length of the overflow item). The overflow item consists of - * some number of overflow pages, linked by the next_pgno field of the page. - * A next_pgno field of PGNO_INVALID flags the end of the overflow item. - * - * Overflow page overloads: - * The amount of overflow data stored on each page is stored in the - * hf_offset field. - * - * The implementation reference counts overflow items as it's possible - * for them to be promoted onto btree internal pages. The reference - * count is stored in the entries field. - */ -#define OV_LEN(p) (((PAGE *)p)->hf_offset) -#define OV_REF(p) (((PAGE *)p)->entries) - -/* Maximum number of bytes that you can put on an overflow page. */ -#define P_MAXSPACE(dbp, psize) ((psize) - P_OVERHEAD(dbp)) - -/* Free space on an overflow page. */ -#define P_OVFLSPACE(dbp, psize, pg) (P_MAXSPACE(dbp, psize) - HOFFSET(pg)) - -/************************************************************************ - HASH PAGE LAYOUT - ************************************************************************/ - -/* Each index references a group of bytes on the page. */ -#define H_KEYDATA 1 /* Key/data item. */ -#define H_DUPLICATE 2 /* Duplicate key/data item. */ -#define H_OFFPAGE 3 /* Overflow key/data item. */ -#define H_OFFDUP 4 /* Overflow page of duplicates. */ - -/* - * !!! - * Items on hash pages are (potentially) unaligned, so we can never cast the - * (page + offset) pointer to an HKEYDATA, HOFFPAGE or HOFFDUP structure, as - * we do with B+tree on-page structures. Because we frequently want the type - * field, it requires no alignment, and it's in the same location in all three - * structures, there's a pair of macros. - */ -#define HPAGE_PTYPE(p) (*(u_int8_t *)p) -#define HPAGE_TYPE(dbp, pg, indx) (*P_ENTRY(dbp, pg, indx)) - -/* - * The first and second types are H_KEYDATA and H_DUPLICATE, represented - * by the HKEYDATA structure: - * - * +-----------------------------------+ - * | type | key/data ... | - * +-----------------------------------+ - * - * For duplicates, the data field encodes duplicate elements in the data - * field: - * - * +---------------------------------------------------------------+ - * | type | len1 | element1 | len1 | len2 | element2 | len2 | - * +---------------------------------------------------------------+ - * - * Thus, by keeping track of the offset in the element, we can do both - * backward and forward traversal. - */ -typedef struct _hkeydata { - u_int8_t type; /* 00: Page type. */ - u_int8_t data[1]; /* Variable length key/data item. */ -} HKEYDATA; -#define HKEYDATA_DATA(p) (((u_int8_t *)p) + SSZA(HKEYDATA, data)) - -/* - * The length of any HKEYDATA item. Note that indx is an element index, - * not a PAIR index. - */ -#define LEN_HITEM(dbp, pg, pgsize, indx) \ - (((indx) == 0 ? (pgsize) : \ - (P_INP(dbp, pg)[(indx) - 1])) - (P_INP(dbp, pg)[indx])) - -#define LEN_HKEYDATA(dbp, pg, psize, indx) \ - (db_indx_t)(LEN_HITEM(dbp, pg, psize, indx) - HKEYDATA_SIZE(0)) - -/* - * Page space required to add a new HKEYDATA item to the page, with and - * without the index value. - */ -#define HKEYDATA_SIZE(len) \ - ((len) + SSZA(HKEYDATA, data)) -#define HKEYDATA_PSIZE(len) \ - (HKEYDATA_SIZE(len) + sizeof(db_indx_t)) - -/* Put a HKEYDATA item at the location referenced by a page entry. */ -#define PUT_HKEYDATA(pe, kd, len, type) { \ - ((HKEYDATA *)pe)->type = type; \ - memcpy((u_int8_t *)pe + sizeof(u_int8_t), kd, len); \ -} - -/* - * Macros the describe the page layout in terms of key-data pairs. - */ -#define H_NUMPAIRS(pg) (NUM_ENT(pg) / 2) -#define H_KEYINDEX(indx) (indx) -#define H_DATAINDEX(indx) ((indx) + 1) -#define H_PAIRKEY(dbp, pg, indx) P_ENTRY(dbp, pg, H_KEYINDEX(indx)) -#define H_PAIRDATA(dbp, pg, indx) P_ENTRY(dbp, pg, H_DATAINDEX(indx)) -#define H_PAIRSIZE(dbp, pg, psize, indx) \ - (LEN_HITEM(dbp, pg, psize, H_KEYINDEX(indx)) + \ - LEN_HITEM(dbp, pg, psize, H_DATAINDEX(indx))) -#define LEN_HDATA(dbp, p, psize, indx) \ - LEN_HKEYDATA(dbp, p, psize, H_DATAINDEX(indx)) -#define LEN_HKEY(dbp, p, psize, indx) \ - LEN_HKEYDATA(dbp, p, psize, H_KEYINDEX(indx)) - -/* - * The third type is the H_OFFPAGE, represented by the HOFFPAGE structure: - */ -typedef struct _hoffpage { - u_int8_t type; /* 00: Page type and delete flag. */ - u_int8_t unused[3]; /* 01-03: Padding, unused. */ - db_pgno_t pgno; /* 04-07: Offpage page number. */ - u_int32_t tlen; /* 08-11: Total length of item. */ -} HOFFPAGE; - -#define HOFFPAGE_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, pgno)) -#define HOFFPAGE_TLEN(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, tlen)) - -/* - * Page space required to add a new HOFFPAGE item to the page, with and - * without the index value. - */ -#define HOFFPAGE_SIZE (sizeof(HOFFPAGE)) -#define HOFFPAGE_PSIZE (HOFFPAGE_SIZE + sizeof(db_indx_t)) - -/* - * The fourth type is H_OFFDUP represented by the HOFFDUP structure: - */ -typedef struct _hoffdup { - u_int8_t type; /* 00: Page type and delete flag. */ - u_int8_t unused[3]; /* 01-03: Padding, unused. */ - db_pgno_t pgno; /* 04-07: Offpage page number. */ -} HOFFDUP; -#define HOFFDUP_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFDUP, pgno)) - -/* - * Page space required to add a new HOFFDUP item to the page, with and - * without the index value. - */ -#define HOFFDUP_SIZE (sizeof(HOFFDUP)) - -/************************************************************************ - BTREE PAGE LAYOUT - ************************************************************************/ - -/* Each index references a group of bytes on the page. */ -#define B_KEYDATA 1 /* Key/data item. */ -#define B_DUPLICATE 2 /* Duplicate key/data item. */ -#define B_OVERFLOW 3 /* Overflow key/data item. */ - -/* - * We have to store a deleted entry flag in the page. The reason is complex, - * but the simple version is that we can't delete on-page items referenced by - * a cursor -- the return order of subsequent insertions might be wrong. The - * delete flag is an overload of the top bit of the type byte. - */ -#define B_DELETE (0x80) -#define B_DCLR(t) (t) &= ~B_DELETE -#define B_DSET(t) (t) |= B_DELETE -#define B_DISSET(t) ((t) & B_DELETE) - -#define B_TYPE(t) ((t) & ~B_DELETE) -#define B_TSET(t, type, deleted) { \ - (t) = (type); \ - if (deleted) \ - B_DSET(t); \ -} - -/* - * The first type is B_KEYDATA, represented by the BKEYDATA structure: - */ -typedef struct _bkeydata { - db_indx_t len; /* 00-01: Key/data item length. */ - u_int8_t type; /* 02: Page type AND DELETE FLAG. */ - u_int8_t data[1]; /* Variable length key/data item. */ -} BKEYDATA; - -/* Get a BKEYDATA item for a specific index. */ -#define GET_BKEYDATA(dbp, pg, indx) \ - ((BKEYDATA *)P_ENTRY(dbp, pg, indx)) - -/* - * Page space required to add a new BKEYDATA item to the page, with and - * without the index value. The (u_int16_t) cast avoids warnings: DB_ALIGN - * casts to uintmax_t, the cast converts it to a small integral type so we - * don't get complaints when we assign the final result to an integral type - * smaller than uintmax_t. - */ -#define BKEYDATA_SIZE(len) \ - (u_int16_t)DB_ALIGN((len) + SSZA(BKEYDATA, data), sizeof(u_int32_t)) -#define BKEYDATA_PSIZE(len) \ - (BKEYDATA_SIZE(len) + sizeof(db_indx_t)) - -/* - * The second and third types are B_DUPLICATE and B_OVERFLOW, represented - * by the BOVERFLOW structure. - */ -typedef struct _boverflow { - db_indx_t unused1; /* 00-01: Padding, unused. */ - u_int8_t type; /* 02: Page type AND DELETE FLAG. */ - u_int8_t unused2; /* 03: Padding, unused. */ - db_pgno_t pgno; /* 04-07: Next page number. */ - u_int32_t tlen; /* 08-11: Total length of item. */ -} BOVERFLOW; - -/* Get a BOVERFLOW item for a specific index. */ -#define GET_BOVERFLOW(dbp, pg, indx) \ - ((BOVERFLOW *)P_ENTRY(dbp, pg, indx)) - -/* - * Page space required to add a new BOVERFLOW item to the page, with and - * without the index value. - */ -#define BOVERFLOW_SIZE \ - ((u_int16_t)DB_ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t))) -#define BOVERFLOW_PSIZE \ - (BOVERFLOW_SIZE + sizeof(db_indx_t)) - -#define BITEM_SIZE(bk) \ - (B_TYPE((bk)->type) != B_KEYDATA ? BOVERFLOW_SIZE : \ - BKEYDATA_SIZE((bk)->len)) - -#define BITEM_PSIZE(bk) \ - (B_TYPE((bk)->type) != B_KEYDATA ? BOVERFLOW_PSIZE : \ - BKEYDATA_PSIZE((bk)->len)) - -/* - * Btree leaf and hash page layouts group indices in sets of two, one for the - * key and one for the data. Everything else does it in sets of one to save - * space. Use the following macros so that it's real obvious what's going on. - */ -#define O_INDX 1 -#define P_INDX 2 - -/************************************************************************ - BTREE INTERNAL PAGE LAYOUT - ************************************************************************/ - -/* - * Btree internal entry. - */ -typedef struct _binternal { - db_indx_t len; /* 00-01: Key/data item length. */ - u_int8_t type; /* 02: Page type AND DELETE FLAG. */ - u_int8_t unused; /* 03: Padding, unused. */ - db_pgno_t pgno; /* 04-07: Page number of referenced page. */ - db_recno_t nrecs; /* 08-11: Subtree record count. */ - u_int8_t data[1]; /* Variable length key item. */ -} BINTERNAL; - -/* Get a BINTERNAL item for a specific index. */ -#define GET_BINTERNAL(dbp, pg, indx) \ - ((BINTERNAL *)P_ENTRY(dbp, pg, indx)) - -/* - * Page space required to add a new BINTERNAL item to the page, with and - * without the index value. - */ -#define BINTERNAL_SIZE(len) \ - (u_int16_t)DB_ALIGN((len) + SSZA(BINTERNAL, data), sizeof(u_int32_t)) -#define BINTERNAL_PSIZE(len) \ - (BINTERNAL_SIZE(len) + sizeof(db_indx_t)) - -/************************************************************************ - RECNO INTERNAL PAGE LAYOUT - ************************************************************************/ - -/* - * The recno internal entry. - */ -typedef struct _rinternal { - db_pgno_t pgno; /* 00-03: Page number of referenced page. */ - db_recno_t nrecs; /* 04-07: Subtree record count. */ -} RINTERNAL; - -/* Get a RINTERNAL item for a specific index. */ -#define GET_RINTERNAL(dbp, pg, indx) \ - ((RINTERNAL *)P_ENTRY(dbp, pg, indx)) - -/* - * Page space required to add a new RINTERNAL item to the page, with and - * without the index value. - */ -#define RINTERNAL_SIZE \ - (u_int16_t)DB_ALIGN(sizeof(RINTERNAL), sizeof(u_int32_t)) -#define RINTERNAL_PSIZE \ - (RINTERNAL_SIZE + sizeof(db_indx_t)) - -struct pglist { - db_pgno_t pgno; - DB_LSN lsn; -}; - -#if defined(__cplusplus) -} -#endif - -#endif /* !_DB_PAGE_H_ */ diff --git a/storage/bdb/dbinc/db_server_int.h b/storage/bdb/dbinc/db_server_int.h deleted file mode 100644 index aee9ad194c7..00000000000 --- a/storage/bdb/dbinc/db_server_int.h +++ /dev/null @@ -1,153 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_server_int.h,v 12.4 2005/08/08 14:52:30 bostic Exp $ - */ - -#ifndef _DB_SERVER_INT_H_ -#define _DB_SERVER_INT_H_ - -#define DB_SERVER_TIMEOUT 300 /* 5 minutes */ -#define DB_SERVER_MAXTIMEOUT 1200 /* 20 minutes */ -#define DB_SERVER_IDLETIMEOUT 86400 /* 1 day */ - -/* - * Ignore/mask off the following env->open flags: - * Most are illegal for a client to specify as they would control - * server resource usage. We will just ignore them. - * DB_LOCKDOWN - * DB_PRIVATE - * DB_RECOVER - * DB_RECOVER_FATAL - * DB_SYSTEM_MEM - * DB_USE_ENVIRON, DB_USE_ENVIRON_ROOT - handled on client - */ -#define DB_SERVER_FLAGMASK ( \ -DB_LOCKDOWN | DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | \ -DB_SYSTEM_MEM | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT) - -#define CT_CURSOR 0x001 /* Cursor */ -#define CT_DB 0x002 /* Database */ -#define CT_ENV 0x004 /* Env */ -#define CT_TXN 0x008 /* Txn */ - -#define CT_JOIN 0x10000000 /* Join cursor component */ -#define CT_JOINCUR 0x20000000 /* Join cursor */ - -typedef struct home_entry home_entry; -struct home_entry { - LIST_ENTRY(home_entry) entries; - char *home; - char *dir; - char *name; - char *passwd; -}; - -/* - * Data needed for sharing handles. - * To share an env handle, on the open call, they must have matching - * env flags, and matching set_flags. - * - * To share a db handle on the open call, the db, subdb and flags must - * all be the same. - */ -#define DB_SERVER_ENVFLAGS ( \ -DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \ -DB_INIT_TXN | DB_JOINENV) - -#define DB_SERVER_DBFLAGS (DB_NOMMAP | DB_RDONLY | DB_READ_UNCOMMITTED) -#define DB_SERVER_DBNOSHARE (DB_EXCL | DB_TRUNCATE) - -typedef struct ct_envdata ct_envdata; -typedef struct ct_dbdata ct_dbdata; -struct ct_envdata { - u_int32_t envflags; - u_int32_t onflags; - u_int32_t offflags; - home_entry *home; -}; - -struct ct_dbdata { - u_int32_t dbflags; - u_int32_t setflags; - char *db; - char *subdb; - DBTYPE type; -}; - -/* - * We maintain an activity timestamp for each handle. However, we - * set it to point, possibly to the ct_active field of its own handle - * or it may point to the ct_active field of a parent. In the case - * of nested transactions and any cursors within transactions it must - * point to the ct_active field of the ultimate parent of the transaction - * no matter how deeply it is nested. - */ -typedef struct ct_entry ct_entry; -struct ct_entry { - LIST_ENTRY(ct_entry) entries; /* List of entries */ - union { -#ifdef __cplusplus - DbEnv *envp; /* H_ENV */ - DbTxn *txnp; /* H_TXN */ - Db *dbp; /* H_DB */ - Dbc *dbc; /* H_CURSOR */ -#else - DB_ENV *envp; /* H_ENV */ - DB_TXN *txnp; /* H_TXN */ - DB *dbp; /* H_DB */ - DBC *dbc; /* H_CURSOR */ -#endif - void *anyp; - } handle_u; - union { /* Private data per type */ - ct_envdata envdp; /* Env info */ - ct_dbdata dbdp; /* Db info */ - } private_u; - long ct_id; /* Client ID */ - long *ct_activep; /* Activity timestamp pointer*/ - long *ct_origp; /* Original timestamp pointer*/ - long ct_active; /* Activity timestamp */ - long ct_timeout; /* Resource timeout */ - long ct_idle; /* Idle timeout */ - u_int32_t ct_refcount; /* Ref count for sharing */ - u_int32_t ct_type; /* This entry's type */ - struct ct_entry *ct_parent; /* Its parent */ - struct ct_entry *ct_envparent; /* Its environment */ -}; - -#define ct_envp handle_u.envp -#define ct_txnp handle_u.txnp -#define ct_dbp handle_u.dbp -#define ct_dbc handle_u.dbc -#define ct_anyp handle_u.anyp - -#define ct_envdp private_u.envdp -#define ct_dbdp private_u.dbdp - -extern int __dbsrv_verbose; - -/* - * Get ctp and activate it. - * Assumes local variable 'replyp'. - * NOTE: May 'return' from macro. - */ -#define ACTIVATE_CTP(ctp, id, type) { \ - (ctp) = get_tableent(id); \ - if ((ctp) == NULL) { \ - replyp->status = DB_NOSERVER_ID;\ - return; \ - } \ - DB_ASSERT((ctp)->ct_type & (type)); \ - __dbsrv_active(ctp); \ -} - -#define FREE_IF_CHANGED(dbenv, p, orig) do { \ - if ((p) != NULL && (p) != (orig)) \ - __os_ufree((dbenv), (p)); \ -} while (0) - -#endif /* !_DB_SERVER_INT_H_ */ diff --git a/storage/bdb/dbinc/db_shash.h b/storage/bdb/dbinc/db_shash.h deleted file mode 100644 index 89c544fcc91..00000000000 --- a/storage/bdb/dbinc/db_shash.h +++ /dev/null @@ -1,81 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_shash.h,v 12.1 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_SHASH_H_ -#define _DB_SHASH_H_ - -/* Hash Headers */ -typedef SH_TAILQ_HEAD(__hash_head) DB_HASHTAB; - -/* - * HASHLOOKUP -- - * - * Look up something in a shared memory hash table. The "elt" argument - * should be a key, and cmp_func must know how to compare a key to whatever - * structure it is that appears in the hash table. The comparison function - * - * begin: address of the beginning of the hash table. - * ndx: index into table for this item. - * type: the structure type of the elements that are linked in each bucket. - * field: the name of the field by which the "type" structures are linked. - * elt: the item for which we are searching in the hash table. - * res: the variable into which we'll store the element if we find it. - * cmp: called as: cmp(lookup_elt, table_elt). - * - * If the element is not in the hash table, this macro exits with res set - * to NULL. - */ -#define HASHLOOKUP(begin, ndx, type, field, elt, res, cmp) do { \ - DB_HASHTAB *__bucket; \ - \ - __bucket = &begin[ndx]; \ - for (res = SH_TAILQ_FIRST(__bucket, type); \ - res != NULL; res = SH_TAILQ_NEXT(res, field, type)) \ - if (cmp(elt, res)) \ - break; \ -} while (0) - -/* - * HASHINSERT -- - * - * Insert a new entry into the hash table. This assumes that you already - * have the bucket locked and that lookup has failed; don't call it if you - * haven't already called HASHLOOKUP. If you do, you could get duplicate - * entries. - * - * begin: the beginning address of the hash table. - * ndx: the index for this element. - * type: the structure type of the elements that are linked in each bucket. - * field: the name of the field by which the "type" structures are linked. - * elt: the item to be inserted. - */ -#define HASHINSERT(begin, ndx, type, field, elt) do { \ - DB_HASHTAB *__bucket; \ - \ - __bucket = &begin[ndx]; \ - SH_TAILQ_INSERT_HEAD(__bucket, elt, field, type); \ -} while (0) - -/* - * HASHREMOVE_EL -- - * Given the object "obj" in the table, remove it. - * - * begin: address of the beginning of the hash table. - * ndx: index into hash table of where this element belongs. - * type: the structure type of the elements that are linked in each bucket. - * field: the name of the field by which the "type" structures are linked. - * obj: the object in the table that we with to delete. - */ -#define HASHREMOVE_EL(begin, ndx, type, field, obj) { \ - DB_HASHTAB *__bucket; \ - \ - __bucket = &begin[ndx]; \ - SH_TAILQ_REMOVE(__bucket, obj, field, type); \ -} -#endif /* !_DB_SHASH_H_ */ diff --git a/storage/bdb/dbinc/db_swap.h b/storage/bdb/dbinc/db_swap.h deleted file mode 100644 index 6350ae6a1b2..00000000000 --- a/storage/bdb/dbinc/db_swap.h +++ /dev/null @@ -1,170 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: db_swap.h,v 12.3 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_SWAP_H_ -#define _DB_SWAP_H_ - -/* - * Little endian <==> big endian 64-bit swap macros. - * M_64_SWAP swap a memory location - * P_64_COPY copy potentially unaligned 4 byte quantities - * P_64_SWAP swap a referenced memory location - */ -#undef M_64_SWAP -#define M_64_SWAP(a) { \ - u_int64_t _tmp; \ - _tmp = (u_int64_t)a; \ - ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[7]; \ - ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[6]; \ - ((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[5]; \ - ((u_int8_t *)&a)[3] = ((u_int8_t *)&_tmp)[4]; \ - ((u_int8_t *)&a)[4] = ((u_int8_t *)&_tmp)[3]; \ - ((u_int8_t *)&a)[5] = ((u_int8_t *)&_tmp)[2]; \ - ((u_int8_t *)&a)[6] = ((u_int8_t *)&_tmp)[1]; \ - ((u_int8_t *)&a)[7] = ((u_int8_t *)&_tmp)[0]; \ -} -#undef P_64_COPY -#define P_64_COPY(a, b) { \ - ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \ - ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \ - ((u_int8_t *)b)[2] = ((u_int8_t *)a)[2]; \ - ((u_int8_t *)b)[3] = ((u_int8_t *)a)[3]; \ - ((u_int8_t *)b)[4] = ((u_int8_t *)a)[4]; \ - ((u_int8_t *)b)[5] = ((u_int8_t *)a)[5]; \ - ((u_int8_t *)b)[6] = ((u_int8_t *)a)[6]; \ - ((u_int8_t *)b)[7] = ((u_int8_t *)a)[7]; \ -} -#undef P_64_SWAP -#define P_64_SWAP(a) { \ - u_int64_t _tmp; \ - P_64_COPY(a, &_tmp); \ - ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[7]; \ - ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[6]; \ - ((u_int8_t *)a)[2] = ((u_int8_t *)&_tmp)[5]; \ - ((u_int8_t *)a)[3] = ((u_int8_t *)&_tmp)[4]; \ - ((u_int8_t *)a)[4] = ((u_int8_t *)&_tmp)[3]; \ - ((u_int8_t *)a)[5] = ((u_int8_t *)&_tmp)[2]; \ - ((u_int8_t *)a)[6] = ((u_int8_t *)&_tmp)[1]; \ - ((u_int8_t *)a)[7] = ((u_int8_t *)&_tmp)[0]; \ -} - -/* - * Little endian <==> big endian 32-bit swap macros. - * M_32_SWAP swap a memory location - * P_32_COPY copy potentially unaligned 4 byte quantities - * P_32_SWAP swap a referenced memory location - */ -#undef M_32_SWAP -#define M_32_SWAP(a) { \ - u_int32_t _tmp; \ - _tmp = (u_int32_t)a; \ - ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[3]; \ - ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[2]; \ - ((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[1]; \ - ((u_int8_t *)&a)[3] = ((u_int8_t *)&_tmp)[0]; \ -} -#undef P_32_COPY -#define P_32_COPY(a, b) { \ - ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \ - ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \ - ((u_int8_t *)b)[2] = ((u_int8_t *)a)[2]; \ - ((u_int8_t *)b)[3] = ((u_int8_t *)a)[3]; \ -} -#undef P_32_SWAP -#define P_32_SWAP(a) { \ - u_int32_t _tmp; \ - P_32_COPY(a, &_tmp); \ - ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[3]; \ - ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[2]; \ - ((u_int8_t *)a)[2] = ((u_int8_t *)&_tmp)[1]; \ - ((u_int8_t *)a)[3] = ((u_int8_t *)&_tmp)[0]; \ -} - -/* - * Little endian <==> big endian 16-bit swap macros. - * M_16_SWAP swap a memory location - * P_16_COPY copy potentially unaligned 2 byte quantities - * P_16_SWAP swap a referenced memory location - */ -#undef M_16_SWAP -#define M_16_SWAP(a) { \ - u_int16_t _tmp; \ - _tmp = (u_int16_t)a; \ - ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[1]; \ - ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[0]; \ -} -#undef P_16_COPY -#define P_16_COPY(a, b) { \ - ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \ - ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \ -} -#undef P_16_SWAP -#define P_16_SWAP(a) { \ - u_int16_t _tmp; \ - P_16_COPY(a, &_tmp); \ - ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[1]; \ - ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[0]; \ -} - -#undef SWAP32 -#define SWAP32(p) { \ - P_32_SWAP(p); \ - (p) += sizeof(u_int32_t); \ -} -#undef SWAP16 -#define SWAP16(p) { \ - P_16_SWAP(p); \ - (p) += sizeof(u_int16_t); \ -} - -/* - * Berkeley DB has local versions of htonl() and ntohl() that operate on - * pointers to the right size memory locations; the portability magic for - * finding the real system functions isn't worth the effort. - */ -#undef DB_HTONL -#define DB_HTONL(p) do { \ - if (!__db_isbigendian()) \ - P_32_SWAP(p); \ -} while (0) -#undef DB_NTOHL -#define DB_NTOHL(p) do { \ - if (!__db_isbigendian()) \ - P_32_SWAP(p); \ -} while (0) - -#endif /* !_DB_SWAP_H_ */ diff --git a/storage/bdb/dbinc/db_upgrade.h b/storage/bdb/dbinc/db_upgrade.h deleted file mode 100644 index e4081e9b6ef..00000000000 --- a/storage/bdb/dbinc/db_upgrade.h +++ /dev/null @@ -1,242 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_upgrade.h,v 12.1 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_UPGRADE_H_ -#define _DB_UPGRADE_H_ - -/* - * This file defines the metadata pages from the previous release. - * These structures are only used to upgrade old versions of databases. - */ - -/* Structures from the 3.1 release */ -typedef struct _dbmeta31 { - DB_LSN lsn; /* 00-07: LSN. */ - db_pgno_t pgno; /* 08-11: Current page number. */ - u_int32_t magic; /* 12-15: Magic number. */ - u_int32_t version; /* 16-19: Version. */ - u_int32_t pagesize; /* 20-23: Pagesize. */ - u_int8_t unused1[1]; /* 24: Unused. */ - u_int8_t type; /* 25: Page type. */ - u_int8_t unused2[2]; /* 26-27: Unused. */ - u_int32_t free; /* 28-31: Free list page number. */ - DB_LSN unused3; /* 36-39: Unused. */ - u_int32_t key_count; /* 40-43: Cached key count. */ - u_int32_t record_count; /* 44-47: Cached record count. */ - u_int32_t flags; /* 48-51: Flags: unique to each AM. */ - /* 52-71: Unique file ID. */ - u_int8_t uid[DB_FILE_ID_LEN]; -} DBMETA31; - -typedef struct _btmeta31 { - DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */ - - u_int32_t maxkey; /* 72-75: Btree: Maxkey. */ - u_int32_t minkey; /* 76-79: Btree: Minkey. */ - u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */ - u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */ - u_int32_t root; /* 88-92: Root page. */ - - /* - * Minimum page size is 128. - */ -} BTMETA31; - -/************************************************************************ - HASH METADATA PAGE LAYOUT - ************************************************************************/ -typedef struct _hashmeta31 { - DBMETA31 dbmeta; /* 00-71: Generic meta-data page header. */ - - u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */ - u_int32_t high_mask; /* 76-79: Modulo mask into table */ - u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */ - u_int32_t ffactor; /* 84-87: Fill factor */ - u_int32_t nelem; /* 88-91: Number of keys in hash table */ - u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */ -#define NCACHED 32 /* number of spare points */ - /* 96-223: Spare pages for overflow */ - u_int32_t spares[NCACHED]; - - /* - * Minimum page size is 256. - */ -} HMETA31; - -/* - * QAM Meta data page structure - * - */ -typedef struct _qmeta31 { - DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */ - - u_int32_t start; /* 72-75: Start offset. */ - u_int32_t first_recno; /* 76-79: First not deleted record. */ - u_int32_t cur_recno; /* 80-83: Last recno allocated. */ - u_int32_t re_len; /* 84-87: Fixed-length record length. */ - u_int32_t re_pad; /* 88-91: Fixed-length record pad. */ - u_int32_t rec_page; /* 92-95: Records Per Page. */ - - /* - * Minimum page size is 128. - */ -} QMETA31; -/* Structures from the 3.2 release */ -typedef struct _qmeta32 { - DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */ - - u_int32_t first_recno; /* 72-75: First not deleted record. */ - u_int32_t cur_recno; /* 76-79: Last recno allocated. */ - u_int32_t re_len; /* 80-83: Fixed-length record length. */ - u_int32_t re_pad; /* 84-87: Fixed-length record pad. */ - u_int32_t rec_page; /* 88-91: Records Per Page. */ - u_int32_t page_ext; /* 92-95: Pages per extent */ - - /* - * Minimum page size is 128. - */ -} QMETA32; - -/* Structures from the 3.0 release */ - -typedef struct _dbmeta30 { - DB_LSN lsn; /* 00-07: LSN. */ - db_pgno_t pgno; /* 08-11: Current page number. */ - u_int32_t magic; /* 12-15: Magic number. */ - u_int32_t version; /* 16-19: Version. */ - u_int32_t pagesize; /* 20-23: Pagesize. */ - u_int8_t unused1[1]; /* 24: Unused. */ - u_int8_t type; /* 25: Page type. */ - u_int8_t unused2[2]; /* 26-27: Unused. */ - u_int32_t free; /* 28-31: Free list page number. */ - u_int32_t flags; /* 32-35: Flags: unique to each AM. */ - /* 36-55: Unique file ID. */ - u_int8_t uid[DB_FILE_ID_LEN]; -} DBMETA30; - -/************************************************************************ - BTREE METADATA PAGE LAYOUT - ************************************************************************/ -typedef struct _btmeta30 { - DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */ - - u_int32_t maxkey; /* 56-59: Btree: Maxkey. */ - u_int32_t minkey; /* 60-63: Btree: Minkey. */ - u_int32_t re_len; /* 64-67: Recno: fixed-length record length. */ - u_int32_t re_pad; /* 68-71: Recno: fixed-length record pad. */ - u_int32_t root; /* 72-75: Root page. */ - - /* - * Minimum page size is 128. - */ -} BTMETA30; - -/************************************************************************ - HASH METADATA PAGE LAYOUT - ************************************************************************/ -typedef struct _hashmeta30 { - DBMETA30 dbmeta; /* 00-55: Generic meta-data page header. */ - - u_int32_t max_bucket; /* 56-59: ID of Maximum bucket in use */ - u_int32_t high_mask; /* 60-63: Modulo mask into table */ - u_int32_t low_mask; /* 64-67: Modulo mask into table lower half */ - u_int32_t ffactor; /* 68-71: Fill factor */ - u_int32_t nelem; /* 72-75: Number of keys in hash table */ - u_int32_t h_charkey; /* 76-79: Value of hash(CHARKEY) */ -#define NCACHED30 32 /* number of spare points */ - /* 80-207: Spare pages for overflow */ - u_int32_t spares[NCACHED30]; - - /* - * Minimum page size is 256. - */ -} HMETA30; - -/************************************************************************ - QUEUE METADATA PAGE LAYOUT - ************************************************************************/ -/* - * QAM Meta data page structure - * - */ -typedef struct _qmeta30 { - DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */ - - u_int32_t start; /* 56-59: Start offset. */ - u_int32_t first_recno; /* 60-63: First not deleted record. */ - u_int32_t cur_recno; /* 64-67: Last recno allocated. */ - u_int32_t re_len; /* 68-71: Fixed-length record length. */ - u_int32_t re_pad; /* 72-75: Fixed-length record pad. */ - u_int32_t rec_page; /* 76-79: Records Per Page. */ - - /* - * Minimum page size is 128. - */ -} QMETA30; - -/* Structures from Release 2.x */ - -/************************************************************************ - BTREE METADATA PAGE LAYOUT - ************************************************************************/ - -/* - * Btree metadata page layout: - */ -typedef struct _btmeta2X { - DB_LSN lsn; /* 00-07: LSN. */ - db_pgno_t pgno; /* 08-11: Current page number. */ - u_int32_t magic; /* 12-15: Magic number. */ - u_int32_t version; /* 16-19: Version. */ - u_int32_t pagesize; /* 20-23: Pagesize. */ - u_int32_t maxkey; /* 24-27: Btree: Maxkey. */ - u_int32_t minkey; /* 28-31: Btree: Minkey. */ - u_int32_t free; /* 32-35: Free list page number. */ - u_int32_t flags; /* 36-39: Flags. */ - u_int32_t re_len; /* 40-43: Recno: fixed-length record length. */ - u_int32_t re_pad; /* 44-47: Recno: fixed-length record pad. */ - /* 48-67: Unique file ID. */ - u_int8_t uid[DB_FILE_ID_LEN]; -} BTMETA2X; - -/************************************************************************ - HASH METADATA PAGE LAYOUT - ************************************************************************/ - -/* - * Hash metadata page layout: - */ -/* Hash Table Information */ -typedef struct hashhdr { /* Disk resident portion */ - DB_LSN lsn; /* 00-07: LSN of the header page */ - db_pgno_t pgno; /* 08-11: Page number (btree compatibility). */ - u_int32_t magic; /* 12-15: Magic NO for hash tables */ - u_int32_t version; /* 16-19: Version ID */ - u_int32_t pagesize; /* 20-23: Bucket/Page Size */ - u_int32_t ovfl_point; /* 24-27: Overflow page allocation location */ - u_int32_t last_freed; /* 28-31: Last freed overflow page pgno */ - u_int32_t max_bucket; /* 32-35: ID of Maximum bucket in use */ - u_int32_t high_mask; /* 36-39: Modulo mask into table */ - u_int32_t low_mask; /* 40-43: Modulo mask into table lower half */ - u_int32_t ffactor; /* 44-47: Fill factor */ - u_int32_t nelem; /* 48-51: Number of keys in hash table */ - u_int32_t h_charkey; /* 52-55: Value of hash(CHARKEY) */ - u_int32_t flags; /* 56-59: Allow duplicates. */ -#define NCACHED2X 32 /* number of spare points */ - /* 60-187: Spare pages for overflow */ - u_int32_t spares[NCACHED2X]; - /* 188-207: Unique file ID. */ - u_int8_t uid[DB_FILE_ID_LEN]; - - /* - * Minimum page size is 256. - */ -} HASHHDR; - -#endif /* !_DB_UPGRADE_H_ */ diff --git a/storage/bdb/dbinc/db_verify.h b/storage/bdb/dbinc/db_verify.h deleted file mode 100644 index 43bbff0c27b..00000000000 --- a/storage/bdb/dbinc/db_verify.h +++ /dev/null @@ -1,218 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: db_verify.h,v 12.4 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_VERIFY_H_ -#define _DB_VERIFY_H_ - -/* - * Structures and macros for the storage and retrieval of all information - * needed for inter-page verification of a database. - */ - -/* - * EPRINT is the macro for error printing. Takes as an arg the arg set - * for DB->err. - */ -#define EPRINT(x) do { \ - if (!LF_ISSET(DB_SALVAGE)) \ - __db_err x; \ -} while (0) - -/* For fatal type errors--i.e., verifier bugs. */ -#define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) \ - EPRINT(((dbenv), \ - "Page %lu: %s called on nonsensical page of type %lu", \ - (u_long)(pgno), (func), (u_long)(ptype))); - -/* Complain about a totally zeroed page where we don't expect one. */ -#define ZEROPG_ERR_PRINT(dbenv, pgno, str) do { \ - EPRINT(((dbenv), "Page %lu: %s is of inappropriate type %lu", \ - (u_long)(pgno), str, (u_long)P_INVALID)); \ - EPRINT(((dbenv), "Page %lu: totally zeroed page", \ - (u_long)(pgno))); \ -} while (0) - -/* - * Note that 0 is, in general, a valid pgno, despite equalling PGNO_INVALID; - * we have to test it separately where it's not appropriate. - */ -#define IS_VALID_PGNO(x) ((x) <= vdp->last_pgno) - -/* - * Flags understood by the btree structure checks (esp. __bam_vrfy_subtree). - * These share the same space as the global flags to __db_verify, and must not - * dip below 0x00010000. - */ -#define ST_DUPOK 0x00010000 /* Duplicates are acceptable. */ -#define ST_DUPSET 0x00020000 /* Subtree is in a duplicate tree. */ -#define ST_DUPSORT 0x00040000 /* Duplicates are sorted. */ -#define ST_IS_RECNO 0x00080000 /* Subtree is a recno. */ -#define ST_OVFL_LEAF 0x00100000 /* Overflow reffed from leaf page. */ -#define ST_RECNUM 0x00200000 /* Subtree has record numbering on. */ -#define ST_RELEN 0x00400000 /* Subtree has fixed-length records. */ -#define ST_TOPLEVEL 0x00800000 /* Subtree == entire tree */ - -/* - * Flags understood by __bam_salvage and __db_salvage. These need not share - * the same space with the __bam_vrfy_subtree flags, but must share with - * __db_verify. - */ -#define SA_SKIPFIRSTKEY 0x00080000 - -/* - * VRFY_DBINFO is the fundamental structure; it either represents the database - * of subdatabases, or the sole database if there are no subdatabases. - */ -struct __vrfy_dbinfo { - /* Info about this database in particular. */ - DBTYPE type; - - /* List of subdatabase meta pages, if any. */ - LIST_HEAD(__subdbs, __vrfy_childinfo) subdbs; - - /* File-global info--stores VRFY_PAGEINFOs for each page. */ - DB *pgdbp; - - /* Child database--stores VRFY_CHILDINFOs of each page. */ - DB *cdbp; - - /* Page info structures currently in use. */ - LIST_HEAD(__activepips, __vrfy_pageinfo) activepips; - - /* - * DB we use to keep track of which pages are linked somehow - * during verification. 0 is the default, "unseen"; 1 is seen. - */ - DB *pgset; - - /* - * This is a database we use during salvaging to keep track of which - * overflow and dup pages we need to come back to at the end and print - * with key "UNKNOWN". Pages which print with a good key get set - * to SALVAGE_IGNORE; others get set, as appropriate, to SALVAGE_LDUP, - * SALVAGE_LRECNODUP, SALVAGE_OVERFLOW for normal db overflow pages, - * and SALVAGE_BTREE, SALVAGE_LRECNO, and SALVAGE_HASH for subdb - * pages. - */ -#define SALVAGE_INVALID 0 -#define SALVAGE_IGNORE 1 -#define SALVAGE_LDUP 2 -#define SALVAGE_LRECNODUP 3 -#define SALVAGE_OVERFLOW 4 -#define SALVAGE_LBTREE 5 -#define SALVAGE_HASH 6 -#define SALVAGE_LRECNO 7 - DB *salvage_pages; - - db_pgno_t last_pgno; - db_pgno_t pgs_remaining; /* For dbp->db_feedback(). */ - - /* - * These are used during __bam_vrfy_subtree to keep track, while - * walking up and down the Btree structure, of the prev- and next-page - * chain of leaf pages and verify that it's intact. Also, make sure - * that this chain contains pages of only one type. - */ - db_pgno_t prev_pgno; - db_pgno_t next_pgno; - u_int8_t leaf_type; - - /* Queue needs these to verify data pages in the first pass. */ - u_int32_t re_pad; /* Record pad character. */ - u_int32_t re_len; /* Record length. */ - u_int32_t rec_page; - u_int32_t page_ext; - u_int32_t first_recno; - u_int32_t last_recno; - int nextents; - db_pgno_t *extents; - -#define SALVAGE_PRINTABLE 0x01 /* Output printable chars literally. */ -#define SALVAGE_PRINTHEADER 0x02 /* Print the unknown-key header. */ -#define SALVAGE_PRINTFOOTER 0x04 /* Print the unknown-key footer. */ -#define VRFY_LEAFCHAIN_BROKEN 0x08 /* Lost one or more Btree leaf pgs. */ -#define VRFY_QMETA_SET 0x10 /* We've seen a QUEUE meta page and - set things up for it. */ - u_int32_t flags; -}; /* VRFY_DBINFO */ - -/* - * The amount of state information we need per-page is small enough that - * it's not worth the trouble to define separate structures for each - * possible type of page, and since we're doing verification with these we - * have to be open to the possibility that page N will be of a completely - * unexpected type anyway. So we define one structure here with all the - * info we need for inter-page verification. - */ -struct __vrfy_pageinfo { - u_int8_t type; - u_int8_t bt_level; - u_int8_t unused1; - u_int8_t unused2; - db_pgno_t pgno; - db_pgno_t prev_pgno; - db_pgno_t next_pgno; - - /* meta pages */ - db_pgno_t root; - db_pgno_t free; /* Free list head. */ - - db_indx_t entries; /* Actual number of entries. */ - u_int16_t unused; - db_recno_t rec_cnt; /* Record count. */ - u_int32_t re_pad; /* Record pad character. */ - u_int32_t re_len; /* Record length. */ - u_int32_t bt_minkey; - u_int32_t h_ffactor; - u_int32_t h_nelem; - - /* overflow pages */ - /* - * Note that refcount is the refcount for an overflow page; pi_refcount - * is this structure's own refcount! - */ - u_int32_t refcount; - u_int32_t olen; - -#define VRFY_DUPS_UNSORTED 0x0001 /* Have to flag the negative! */ -#define VRFY_HAS_CHKSUM 0x0002 -#define VRFY_HAS_DUPS 0x0004 -#define VRFY_HAS_DUPSORT 0x0008 /* Has the flag set. */ -#define VRFY_HAS_RECNUMS 0x0010 -#define VRFY_HAS_SUBDBS 0x0020 -#define VRFY_INCOMPLETE 0x0040 /* Meta or item order checks incomp. */ -#define VRFY_IS_ALLZEROES 0x0080 /* Hash page we haven't touched? */ -#define VRFY_IS_FIXEDLEN 0x0100 -#define VRFY_IS_RECNO 0x0200 -#define VRFY_IS_RRECNO 0x0400 -#define VRFY_OVFL_LEAFSEEN 0x0800 - u_int32_t flags; - - LIST_ENTRY(__vrfy_pageinfo) links; - u_int32_t pi_refcount; -}; /* VRFY_PAGEINFO */ - -struct __vrfy_childinfo { - /* The following fields are set by the caller of __db_vrfy_childput. */ - db_pgno_t pgno; - -#define V_DUPLICATE 1 /* off-page dup metadata */ -#define V_OVERFLOW 2 /* overflow page */ -#define V_RECNO 3 /* btree internal or leaf page */ - u_int32_t type; - db_recno_t nrecs; /* record count on a btree subtree */ - u_int32_t tlen; /* ovfl. item total size */ - - /* The following field is maintained by __db_vrfy_childput. */ - u_int32_t refcnt; /* # of times parent points to child. */ - - LIST_ENTRY(__vrfy_childinfo) links; -}; /* VRFY_CHILDINFO */ - -#endif /* !_DB_VERIFY_H_ */ diff --git a/storage/bdb/dbinc/debug.h b/storage/bdb/dbinc/debug.h deleted file mode 100644 index 642920eb2f2..00000000000 --- a/storage/bdb/dbinc/debug.h +++ /dev/null @@ -1,264 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: debug.h,v 12.2 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_DEBUG_H_ -#define _DB_DEBUG_H_ - -#if defined(__cplusplus) -extern "C" { -#endif - -/* - * Turn on additional error checking in gcc 3.X. - */ -#if !defined(__GNUC__) || __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 5) -#define __attribute__(s) -#endif - -/* - * When running with #DIAGNOSTIC defined, we smash memory and do memory - * guarding with a special byte value. - */ -#define CLEAR_BYTE 0xdb -#define GUARD_BYTE 0xdc - -/* - * DB assertions. - * - * Use __STDC__ rather than STDC_HEADERS, the #e construct is ANSI C specific. - */ -#if defined(__STDC__) && defined(DIAGNOSTIC) -#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(#e, __FILE__, __LINE__)) -#else -#define DB_ASSERT(e) -#endif - -/* - * "Shut that bloody compiler up!" - * - * Unused, or not-used-yet variable. We need to write and then read the - * variable, some compilers are too bloody clever by half. - */ -#define COMPQUIET(n, v) \ - (n) = (v); \ - (n) = (n) - -/* - * Purify and other run-time tools complain about uninitialized reads/writes - * of structure fields whose only purpose is padding, as well as when heap - * memory that was never initialized is written to disk. - */ -#ifdef UMRW -#define UMRW_SET(v) (v) = 0 -#else -#define UMRW_SET(v) -#endif - -/* - * Message handling. Use a macro instead of a function because va_list - * references to variadic arguments cannot be reset to the beginning of the - * variadic argument list (and then rescanned), by functions other than the - * original routine that took the variadic list of arguments. - */ -#if defined(STDC_HEADERS) || defined(__cplusplus) -#define DB_REAL_ERR(env, error, error_set, default_stream, fmt) { \ - va_list ap; \ - \ - /* Call the application's callback function, if specified. */ \ - va_start(ap, fmt); \ - if ((env) != NULL && (env)->db_errcall != NULL) \ - __db_errcall(env, error, error_set, fmt, ap); \ - va_end(ap); \ - \ - /* Write to the application's file descriptor, if specified. */\ - va_start(ap, fmt); \ - if ((env) != NULL && (env)->db_errfile != NULL) \ - __db_errfile(env, error, error_set, fmt, ap); \ - va_end(ap); \ - \ - /* \ - * If we have a default and we didn't do either of the above, \ - * write to the default. \ - */ \ - va_start(ap, fmt); \ - if ((default_stream) && ((env) == NULL || \ - ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \ - __db_errfile(env, error, error_set, fmt, ap); \ - va_end(ap); \ -} -#else -#define DB_REAL_ERR(env, error, error_set, default_stream, fmt) { \ - va_list ap; \ - \ - /* Call the application's callback function, if specified. */ \ - va_start(ap); \ - if ((env) != NULL && (env)->db_errcall != NULL) \ - __db_errcall(env, error, error_set, fmt, ap); \ - va_end(ap); \ - \ - /* Write to the application's file descriptor, if specified. */\ - va_start(ap); \ - if ((env) != NULL && (env)->db_errfile != NULL) \ - __db_errfile(env, error, error_set, fmt, ap); \ - va_end(ap); \ - \ - /* \ - * If we have a default and we didn't do either of the above, \ - * write to the default. \ - */ \ - va_start(ap); \ - if ((default_stream) && ((env) == NULL || \ - ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \ - __db_errfile(env, error, error_set, fmt, ap); \ - va_end(ap); \ -} -#endif -#if defined(STDC_HEADERS) || defined(__cplusplus) -#define DB_REAL_MSG(env, fmt) { \ - va_list ap; \ - \ - /* Call the application's callback function, if specified. */ \ - va_start(ap, fmt); \ - if ((env) != NULL && (env)->db_msgcall != NULL) \ - __db_msgcall(env, fmt, ap); \ - va_end(ap); \ - \ - /* \ - * If the application specified a file descriptor, or we wrote \ - * to neither the application's callback routine or to its file \ - * descriptor, write to stdout. \ - */ \ - va_start(ap, fmt); \ - if ((env) == NULL || \ - (env)->db_msgfile != NULL || (env)->db_msgcall == NULL) { \ - __db_msgfile(env, fmt, ap); \ - } \ - va_end(ap); \ -} -#else -#define DB_REAL_MSG(env, fmt) { \ - va_list ap; \ - \ - /* Call the application's callback function, if specified. */ \ - va_start(ap); \ - if ((env) != NULL && (env)->db_msgcall != NULL) \ - __db_msgcall(env, fmt, ap); \ - va_end(ap); \ - \ - /* \ - * If the application specified a file descriptor, or we wrote \ - * to neither the application's callback routine or to its file \ - * descriptor, write to stdout. \ - */ \ - va_start(ap); \ - if ((env) == NULL || \ - (env)->db_msgfile != NULL || (env)->db_msgcall == NULL) { \ - __db_msgfile(env, fmt, ap); \ - } \ - va_end(ap); \ -} -#endif - -/* - * Debugging macro to log operations. - * If DEBUG_WOP is defined, log operations that modify the database. - * If DEBUG_ROP is defined, log operations that read the database. - * - * D dbp - * T txn - * O operation (string) - * K key - * A data - * F flags - */ -#define LOG_OP(C, T, O, K, A, F) { \ - DB_LSN __lsn; \ - DBT __op; \ - if (DBC_LOGGING((C))) { \ - memset(&__op, 0, sizeof(__op)); \ - __op.data = O; \ - __op.size = strlen(O) + 1; \ - (void)__db_debug_log((C)->dbp->dbenv, T, &__lsn, 0, \ - &__op, (C)->dbp->log_filename->id, K, A, F); \ - } \ -} -#ifdef DEBUG_ROP -#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F) -#else -#define DEBUG_LREAD(C, T, O, K, A, F) -#endif -#ifdef DEBUG_WOP -#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F) -#else -#define DEBUG_LWRITE(C, T, O, K, A, F) -#endif - -/* - * Hook for testing recovery at various places in the create/delete paths. - * Hook for testing subdb locks. - */ -#if CONFIG_TEST -#define DB_TEST_SUBLOCKS(env, flags) do { \ - if ((env)->test_abort == DB_TEST_SUBDB_LOCKS) \ - (flags) |= DB_LOCK_NOWAIT; \ -} while (0) - -#define DB_ENV_TEST_RECOVERY(env, val, ret, name) do { \ - int __ret; \ - PANIC_CHECK((env)); \ - if ((env)->test_copy == (val)) { \ - /* COPY the FILE */ \ - if ((__ret = __db_testcopy((env), NULL, (name))) != 0) \ - (ret) = __db_panic((env), __ret); \ - } \ - if ((env)->test_abort == (val)) { \ - /* ABORT the TXN */ \ - (env)->test_abort = 0; \ - (ret) = EINVAL; \ - goto db_tr_err; \ - } \ -} while (0) - -#define DB_TEST_RECOVERY(dbp, val, ret, name) do { \ - int __ret; \ - PANIC_CHECK((dbp)->dbenv); \ - if ((dbp)->dbenv->test_copy == (val)) { \ - /* Copy the file. */ \ - if (F_ISSET((dbp), \ - DB_AM_OPEN_CALLED) && (dbp)->mpf != NULL) \ - (void)__db_sync(dbp); \ - if ((__ret = \ - __db_testcopy((dbp)->dbenv, (dbp), (name))) != 0) \ - (ret) = __db_panic((dbp)->dbenv, __ret); \ - } \ - if ((dbp)->dbenv->test_abort == (val)) { \ - /* Abort the transaction. */ \ - (dbp)->dbenv->test_abort = 0; \ - (ret) = EINVAL; \ - goto db_tr_err; \ - } \ -} while (0) - -#define DB_TEST_RECOVERY_LABEL db_tr_err: - -#define DB_TEST_WAIT(env, val) \ - if ((val) != 0) \ - __os_sleep((env), (u_long)(val), 0) -#else -#define DB_TEST_SUBLOCKS(env, flags) -#define DB_ENV_TEST_RECOVERY(env, val, ret, name) -#define DB_TEST_RECOVERY(dbp, val, ret, name) -#define DB_TEST_RECOVERY_LABEL -#define DB_TEST_WAIT(env, val) -#endif - -#if defined(__cplusplus) -} -#endif -#endif /* !_DB_DEBUG_H_ */ diff --git a/storage/bdb/dbinc/fop.h b/storage/bdb/dbinc/fop.h deleted file mode 100644 index 98f7c59b362..00000000000 --- a/storage/bdb/dbinc/fop.h +++ /dev/null @@ -1,21 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: fop.h,v 12.3 2005/10/04 18:22:22 bostic Exp $ - */ - -#ifndef _FOP_H_ -#define _FOP_H_ - -#define MAKE_INMEM(D) do { \ - F_SET((D), DB_AM_INMEM); \ - (void)__memp_set_flags((D)->mpf, DB_MPOOL_NOFILE, 1); \ -} while (0) - -#include "dbinc_auto/fileops_auto.h" -#include "dbinc_auto/fileops_ext.h" - -#endif /* !_FOP_H_ */ diff --git a/storage/bdb/dbinc/globals.h b/storage/bdb/dbinc/globals.h deleted file mode 100644 index aaef6309fb1..00000000000 --- a/storage/bdb/dbinc/globals.h +++ /dev/null @@ -1,92 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: globals.h,v 12.1 2005/06/16 20:21:47 bostic Exp $ - */ - -/******************************************************* - * Global variables. - * - * Held in a single structure to minimize the name-space pollution. - *******************************************************/ -#ifdef HAVE_VXWORKS -#include "semLib.h" -#endif - -typedef struct __db_globals { -#ifdef HAVE_VXWORKS - u_int32_t db_global_init; /* VxWorks: inited */ - SEM_ID db_global_lock; /* VxWorks: global semaphore */ -#endif - /* XA: list of opened environments. */ - TAILQ_HEAD(__db_envq, __db_env) db_envq; - - char *db_line; /* DB display string. */ - - int (*j_close) __P((int)); /* Underlying OS interface jump table.*/ - void (*j_dirfree) __P((char **, int)); - int (*j_dirlist) __P((const char *, char ***, int *)); - int (*j_exists) __P((const char *, int *)); - void (*j_free) __P((void *)); - int (*j_fsync) __P((int)); - int (*j_ftruncate) __P((int, off_t)); - int (*j_ioinfo) __P((const char *, - int, u_int32_t *, u_int32_t *, u_int32_t *)); - void *(*j_malloc) __P((size_t)); - int (*j_map) __P((char *, size_t, int, int, void **)); - int (*j_open) __P((const char *, int, ...)); - ssize_t (*j_pread) __P((int, void *, size_t, off_t)); - ssize_t (*j_pwrite) __P((int, const void *, size_t, off_t)); - ssize_t (*j_read) __P((int, void *, size_t)); - void *(*j_realloc) __P((void *, size_t)); - int (*j_rename) __P((const char *, const char *)); - int (*j_seek) __P((int, off_t, int)); - int (*j_sleep) __P((u_long, u_long)); - int (*j_unlink) __P((const char *)); - int (*j_unmap) __P((void *, size_t)); - ssize_t (*j_write) __P((int, const void *, size_t)); - int (*j_yield) __P((void)); -} DB_GLOBALS; - -#ifdef DB_INITIALIZE_DB_GLOBALS -DB_GLOBALS __db_global_values = { -#ifdef HAVE_VXWORKS - 0, /* VxWorks: initialized */ - NULL, /* VxWorks: global semaphore */ -#endif - /* XA: list of opened environments. */ - {NULL, &__db_global_values.db_envq.tqh_first}, - - "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=", - - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL -}; -#else -extern DB_GLOBALS __db_global_values; -#endif - -#define DB_GLOBAL(v) __db_global_values.v diff --git a/storage/bdb/dbinc/hash.h b/storage/bdb/dbinc/hash.h deleted file mode 100644 index feb124cbab2..00000000000 --- a/storage/bdb/dbinc/hash.h +++ /dev/null @@ -1,147 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994 - * Margo Seltzer. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993, 1994 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Margo Seltzer. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: hash.h,v 12.1 2005/06/16 20:21:47 bostic Exp $ - */ - -#ifndef _DB_HASH_H_ -#define _DB_HASH_H_ - -/* Hash internal structure. */ -typedef struct hash_t { - db_pgno_t meta_pgno; /* Page number of the meta data page. */ - u_int32_t h_ffactor; /* Fill factor. */ - u_int32_t h_nelem; /* Number of elements. */ - /* Hash function. */ - u_int32_t (*h_hash) __P((DB *, const void *, u_int32_t)); -} HASH; - -/* Cursor structure definitions. */ -typedef struct cursor_t { - /* struct __dbc_internal */ - __DBC_INTERNAL - - /* Hash private part */ - - /* Per-thread information */ - DB_LOCK hlock; /* Metadata page lock. */ - HMETA *hdr; /* Pointer to meta-data page. */ - PAGE *split_buf; /* Temporary buffer for splits. */ - - /* Hash cursor information */ - db_pgno_t bucket; /* Bucket we are traversing. */ - db_pgno_t lbucket; /* Bucket for which we are locked. */ - db_indx_t dup_off; /* Offset within a duplicate set. */ - db_indx_t dup_len; /* Length of current duplicate. */ - db_indx_t dup_tlen; /* Total length of duplicate entry. */ - u_int32_t seek_size; /* Number of bytes we need for add. */ - db_pgno_t seek_found_page;/* Page on which we can insert. */ - u_int32_t order; /* Relative order among deleted curs. */ - -#define H_CONTINUE 0x0001 /* Join--search strictly fwd for data */ -#define H_DELETED 0x0002 /* Cursor item is deleted. */ -#define H_DIRTY 0x0004 /* Meta-data page needs to be written */ -#define H_DUPONLY 0x0008 /* Dups only; do not change key. */ -#define H_EXPAND 0x0010 /* Table expanded. */ -#define H_ISDUP 0x0020 /* Cursor is within duplicate set. */ -#define H_NEXT_NODUP 0x0040 /* Get next non-dup entry. */ -#define H_NOMORE 0x0080 /* No more entries in bucket. */ -#define H_OK 0x0100 /* Request succeeded. */ - u_int32_t flags; -} HASH_CURSOR; - -/* Test string. */ -#define CHARKEY "%$sniglet^&" - -/* Overflow management */ -/* - * The spares table indicates the page number at which each doubling begins. - * From this page number we subtract the number of buckets already allocated - * so that we can do a simple addition to calculate the page number here. - */ -#define BS_TO_PAGE(bucket, spares) \ - ((bucket) + (spares)[__db_log2((bucket) + 1)]) -#define BUCKET_TO_PAGE(I, B) (BS_TO_PAGE((B), (I)->hdr->spares)) - -/* Constraints about much data goes on a page. */ - -#define MINFILL 4 -#define ISBIG(I, N) (((N) > ((I)->hdr->dbmeta.pagesize / MINFILL)) ? 1 : 0) - -/* Shorthands for accessing structure */ -#define NDX_INVALID 0xFFFF -#define BUCKET_INVALID 0xFFFFFFFF - -/* On page duplicates are stored as a string of size-data-size triples. */ -#define DUP_SIZE(len) ((len) + 2 * sizeof(db_indx_t)) - -/* Log messages types (these are subtypes within a record type) */ -#define PAIR_KEYMASK 0x1 -#define PAIR_DATAMASK 0x2 -#define PAIR_DUPMASK 0x4 -#define PAIR_MASK 0xf -#define PAIR_ISKEYBIG(N) (N & PAIR_KEYMASK) -#define PAIR_ISDATABIG(N) (N & PAIR_DATAMASK) -#define PAIR_ISDATADUP(N) (N & PAIR_DUPMASK) -#define OPCODE_OF(N) (N & ~PAIR_MASK) - -#define PUTPAIR 0x20 -#define DELPAIR 0x30 -#define PUTOVFL 0x40 -#define DELOVFL 0x50 -#define HASH_UNUSED1 0x60 -#define HASH_UNUSED2 0x70 -#define SPLITOLD 0x80 -#define SPLITNEW 0x90 - -typedef enum { - DB_HAM_CHGPG = 1, - DB_HAM_DELFIRSTPG = 2, - DB_HAM_DELMIDPG = 3, - DB_HAM_DELLASTPG = 4, - DB_HAM_DUP = 5, - DB_HAM_SPLIT = 6 -} db_ham_mode; - -#include "dbinc_auto/hash_auto.h" -#include "dbinc_auto/hash_ext.h" -#include "dbinc/db_am.h" -#endif /* !_DB_HASH_H_ */ diff --git a/storage/bdb/dbinc/hmac.h b/storage/bdb/dbinc/hmac.h deleted file mode 100644 index a30756febcf..00000000000 --- a/storage/bdb/dbinc/hmac.h +++ /dev/null @@ -1,32 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: hmac.h,v 12.1 2005/06/16 20:21:48 bostic Exp $ - */ - -#ifndef _DB_HMAC_H_ -#define _DB_HMAC_H_ - -/* - * Algorithm specific information. - */ -/* - * SHA1 checksumming - */ -typedef struct { - u_int32_t state[5]; - u_int32_t count[2]; - unsigned char buffer[64]; -} SHA1_CTX; - -/* - * AES assumes the SHA1 checksumming (also called MAC) - */ -#define DB_MAC_MAGIC "mac derivation key magic value" -#define DB_ENC_MAGIC "encryption and decryption key value magic" - -#include "dbinc_auto/hmac_ext.h" -#endif /* !_DB_HMAC_H_ */ diff --git a/storage/bdb/dbinc/lock.h b/storage/bdb/dbinc/lock.h deleted file mode 100644 index b3b7186bf6c..00000000000 --- a/storage/bdb/dbinc/lock.h +++ /dev/null @@ -1,236 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: lock.h,v 12.7 2005/10/07 20:21:23 ubell Exp $ - */ - -#ifndef _DB_LOCK_H_ -#define _DB_LOCK_H_ - -#define DB_LOCK_DEFAULT_N 1000 /* Default # of locks in region. */ - -/* - * The locker id space is divided between the transaction manager and the lock - * manager. Lock IDs start at 1 and go to DB_LOCK_MAXID. Txn IDs start at - * DB_LOCK_MAXID + 1 and go up to TXN_MAXIMUM. - */ -#define DB_LOCK_INVALIDID 0 -#define DB_LOCK_MAXID 0x7fffffff - -/* - * Out of band value for a lock. Locks contain an offset into a lock region, - * so we use an invalid region offset to indicate an invalid or unset lock. - */ -#define LOCK_INVALID INVALID_ROFF -#define LOCK_ISSET(lock) ((lock).off != LOCK_INVALID) -#define LOCK_INIT(lock) ((lock).off = LOCK_INVALID) - -/* - * Macro to identify a write lock for the purpose of counting locks - * for the NUMWRITES option to deadlock detection. - */ -#define IS_WRITELOCK(m) \ - ((m) == DB_LOCK_WRITE || (m) == DB_LOCK_WWRITE || \ - (m) == DB_LOCK_IWRITE || (m) == DB_LOCK_IWR) - -/* - * Lock timers. - */ -typedef struct { - u_int32_t tv_sec; /* Seconds. */ - u_int32_t tv_usec; /* Microseconds. */ -} db_timeval_t; - -#define LOCK_TIME_ISVALID(time) ((time)->tv_sec != 0) -#define LOCK_SET_TIME_INVALID(time) ((time)->tv_sec = 0) -#define LOCK_TIME_ISMAX(time) ((time)->tv_sec == UINT32_MAX) -#define LOCK_SET_TIME_MAX(time) ((time)->tv_sec = UINT32_MAX) -#define LOCK_TIME_EQUAL(t1, t2) \ - ((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec == (t2)->tv_usec) -#define LOCK_TIME_GREATER(t1, t2) \ - ((t1)->tv_sec > (t2)->tv_sec || \ - ((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec > (t2)->tv_usec)) - -/* Macros to lock/unlock the lock region as a whole. */ -#define LOCK_SYSTEM_LOCK(dbenv) \ - MUTEX_LOCK(dbenv, ((DB_LOCKREGION *)((DB_LOCKTAB *) \ - (dbenv)->lk_handle)->reginfo.primary)->mtx_region) -#define LOCK_SYSTEM_UNLOCK(dbenv) \ - MUTEX_UNLOCK(dbenv, ((DB_LOCKREGION *)((DB_LOCKTAB *) \ - (dbenv)->lk_handle)->reginfo.primary)->mtx_region) - -/* - * DB_LOCKREGION -- - * The lock shared region. - */ -typedef struct __db_lockregion { - db_mutex_t mtx_region; /* Region mutex. */ - - u_int32_t need_dd; /* flag for deadlock detector */ - u_int32_t detect; /* run dd on every conflict */ - db_timeval_t next_timeout; /* next time to expire a lock */ - /* free lock header */ - SH_TAILQ_HEAD(__flock) free_locks; - /* free obj header */ - SH_TAILQ_HEAD(__fobj) free_objs; - /* free locker header */ - SH_TAILQ_HEAD(__flocker) free_lockers; - SH_TAILQ_HEAD(__dobj) dd_objs; /* objects with waiters */ - SH_TAILQ_HEAD(__lkrs) lockers; /* list of lockers */ - - db_timeout_t lk_timeout; /* timeout for locks. */ - db_timeout_t tx_timeout; /* timeout for txns. */ - - u_int32_t locker_t_size; /* size of locker hash table */ - u_int32_t object_t_size; /* size of object hash table */ - - roff_t conf_off; /* offset of conflicts array */ - roff_t obj_off; /* offset of object hash table */ - roff_t locker_off; /* offset of locker hash table */ - - DB_LOCK_STAT stat; /* stats about locking. */ -} DB_LOCKREGION; - -/* - * Since we will store DBTs in shared memory, we need the equivalent of a - * DBT that will work in shared memory. - */ -typedef struct __sh_dbt { - u_int32_t size; /* Byte length. */ - roff_t off; /* Region offset. */ -} SH_DBT; - -#define SH_DBT_PTR(p) ((void *)(((u_int8_t *)(p)) + (p)->off)) - -/* - * Object structures; these live in the object hash table. - */ -typedef struct __db_lockobj { - SH_DBT lockobj; /* Identifies object locked. */ - SH_TAILQ_ENTRY links; /* Links for free list or hash list. */ - SH_TAILQ_ENTRY dd_links; /* Links for dd list. */ - SH_TAILQ_HEAD(__waitl) waiters; /* List of waiting locks. */ - SH_TAILQ_HEAD(__holdl) holders; /* List of held locks. */ - /* Declare room in the object to hold - * typical DB lock structures so that - * we do not have to allocate them from - * shalloc at run-time. */ - u_int8_t objdata[sizeof(struct __db_ilock)]; -} DB_LOCKOBJ; - -/* - * Locker structures; these live in the locker hash table. - */ -typedef struct __db_locker { - u_int32_t id; /* Locker id. */ - - pid_t pid; /* Process owning locker ID */ - db_threadid_t tid; /* Thread owning locker ID */ - - u_int32_t dd_id; /* Deadlock detector id. */ - - u_int32_t nlocks; /* Number of locks held. */ - u_int32_t nwrites; /* Number of write locks held. */ - - roff_t master_locker; /* Locker of master transaction. */ - roff_t parent_locker; /* Parent of this child. */ - SH_LIST_HEAD(_child) child_locker; /* List of descendant txns; - only used in a "master" - txn. */ - SH_LIST_ENTRY child_link; /* Links transactions in the family; - elements of the child_locker - list. */ - SH_TAILQ_ENTRY links; /* Links for free and hash list. */ - SH_TAILQ_ENTRY ulinks; /* Links in-use list. */ - SH_LIST_HEAD(_held) heldby; /* Locks held by this locker. */ - db_timeval_t lk_expire; /* When current lock expires. */ - db_timeval_t tx_expire; /* When this txn expires. */ - db_timeout_t lk_timeout; /* How long do we let locks live. */ - -#define DB_LOCKER_DELETED 0x0001 -#define DB_LOCKER_DIRTY 0x0002 -#define DB_LOCKER_INABORT 0x0004 -#define DB_LOCKER_TIMEOUT 0x0008 - u_int32_t flags; -} DB_LOCKER; - -/* - * DB_LOCKTAB -- - * The primary library lock data structure (i.e., the one referenced - * by the environment, as opposed to the internal one laid out in the region.) - */ -typedef struct __db_locktab { - DB_ENV *dbenv; /* Environment. */ - REGINFO reginfo; /* Region information. */ - u_int8_t *conflicts; /* Pointer to conflict matrix. */ - DB_HASHTAB *obj_tab; /* Beginning of object hash table. */ - DB_HASHTAB *locker_tab; /* Beginning of locker hash table. */ -} DB_LOCKTAB; - -/* - * Test for conflicts. - * - * Cast HELD and WANTED to ints, they are usually db_lockmode_t enums. - */ -#define CONFLICTS(T, R, HELD, WANTED) \ - (T)->conflicts[((int)HELD) * (R)->stat.st_nmodes + ((int)WANTED)] - -#define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev != -1) - -struct __db_lock { - /* - * Wait on mutex to wait on lock. You reference your own mutex with - * ID 0 and others reference your mutex with ID 1. - */ - db_mutex_t mtx_lock; - - u_int32_t holder; /* Who holds this lock. */ - u_int32_t gen; /* Generation count. */ - SH_TAILQ_ENTRY links; /* Free or holder/waiter list. */ - SH_LIST_ENTRY locker_links; /* List of locks held by a locker. */ - u_int32_t refcount; /* Reference count the lock. */ - db_lockmode_t mode; /* What sort of lock. */ - roff_t obj; /* Relative offset of object struct. */ - db_status_t status; /* Status of this lock. */ -}; - -/* - * Flag values for __lock_put_internal: - * DB_LOCK_DOALL: Unlock all references in this lock (instead of only 1). - * DB_LOCK_FREE: Free the lock (used in checklocker). - * DB_LOCK_NOPROMOTE: Don't bother running promotion when releasing locks - * (used by __lock_put_internal). - * DB_LOCK_UNLINK: Remove from the locker links (used in checklocker). - * Make sure that these do not conflict with the interface flags because - * we pass some of those around. - */ -#define DB_LOCK_DOALL 0x010000 -#define DB_LOCK_FREE 0x040000 -#define DB_LOCK_NOPROMOTE 0x080000 -#define DB_LOCK_UNLINK 0x100000 -#define DB_LOCK_NOREGION 0x200000 -#define DB_LOCK_NOWAITERS 0x400000 - -/* - * Macros to get/release different types of mutexes. - */ -#define OBJECT_LOCK(lt, reg, obj, ndx) \ - ndx = __lock_ohash(obj) % (reg)->object_t_size -#define SHOBJECT_LOCK(lt, reg, shobj, ndx) \ - ndx = __lock_lhash(shobj) % (reg)->object_t_size - -/* - * __lock_locker_hash -- - * Hash function for entering lockers into the locker hash table. - * Since these are simply 32-bit unsigned integers at the moment, - * just return the locker value. - */ -#define __lock_locker_hash(locker) (locker) -#define LOCKER_LOCK(lt, reg, locker, ndx) \ - ndx = __lock_locker_hash(locker) % (reg)->locker_t_size; - -#include "dbinc_auto/lock_ext.h" -#endif /* !_DB_LOCK_H_ */ diff --git a/storage/bdb/dbinc/log.h b/storage/bdb/dbinc/log.h deleted file mode 100644 index 6f97526441f..00000000000 --- a/storage/bdb/dbinc/log.h +++ /dev/null @@ -1,387 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: log.h,v 12.12 2005/10/20 18:57:05 bostic Exp $ - */ - -#ifndef _LOG_H_ -#define _LOG_H_ - -/******************************************************* - * DBREG: - * The DB file register code keeps track of open files. It's stored - * in the log subsystem's shared region, and so appears in the log.h - * header file, but is logically separate. - *******************************************************/ -/* - * The per-process table that maps log file-id's to DB structures. - */ -typedef struct __db_entry { - DB *dbp; /* Open dbp for this file id. */ - int deleted; /* File was not found during open. */ -} DB_ENTRY; - -/* - * FNAME -- - * File name and id. - */ -struct __fname { - SH_TAILQ_ENTRY q; /* File name queue. */ - - int32_t id; /* Logging file id. */ - DBTYPE s_type; /* Saved DB type. */ - - roff_t name_off; /* Name offset. */ - db_pgno_t meta_pgno; /* Page number of the meta page. */ - u_int8_t ufid[DB_FILE_ID_LEN]; /* Unique file id. */ - - u_int32_t create_txnid; /* - * Txn ID of the DB create, stored so - * we can log it at register time. - */ -#define DB_FNAME_NOTLOGGED 0x01 /* Log of close failed. */ -#define DB_FNAME_DURABLE 0x02 /* File is durable. */ - u_int32_t flags; -}; - -/* File open/close register log record opcodes. */ -#define DBREG_CHKPNT 1 /* Checkpoint: file name/id dump. */ -#define DBREG_CLOSE 2 /* File close. */ -#define DBREG_OPEN 3 /* File open. */ -#define DBREG_PREOPEN 4 /* Open in mpool only. */ -#define DBREG_RCLOSE 5 /* File close after recovery. */ -#define DBREG_REOPEN 6 /* Open for in-memory database. */ - -/******************************************************* - * LOG: - * The log subsystem information. - *******************************************************/ -struct __db_log; typedef struct __db_log DB_LOG; -struct __hdr; typedef struct __hdr HDR; -struct __log; typedef struct __log LOG; -struct __log_persist; typedef struct __log_persist LOGP; - -#define LFPREFIX "log." /* Log file name prefix. */ -#define LFNAME "log.%010d" /* Log file name template. */ -#define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */ - -#define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */ -#define LG_MAX_INMEM (256 * 1024) /* 256 KB. */ -#define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */ -#define LG_BSIZE_INMEM (1 * MEGABYTE) /* 1 MB. */ -#define LG_BASE_REGION_SIZE (60 * 1024) /* 60 KB. */ - -/* - * DB_LOG - * Per-process log structure. - */ -struct __db_log { - /* - * These fields need to be protected for multi-threaded support. - */ - db_mutex_t mtx_dbreg; /* Mutex for thread protection. */ - - DB_ENTRY *dbentry; /* Recovery file-id mapping. */ -#define DB_GROW_SIZE 64 - int32_t dbentry_cnt; /* Entries. Grows by DB_GROW_SIZE. */ - - /* - * These fields are only accessed when the region lock is held, so - * they do not have to be protected by the thread lock as well. - */ - u_int32_t lfname; /* Log file "name". */ - DB_FH *lfhp; /* Log file handle. */ - - u_int8_t *bufp; /* Region buffer. */ - - /* These fields are not thread protected. */ - DB_ENV *dbenv; /* Reference to error information. */ - REGINFO reginfo; /* Region information. */ - -#define DBLOG_RECOVER 0x01 /* We are in recovery. */ -#define DBLOG_FORCE_OPEN 0x02 /* Force the DB open even if it appears - * to be deleted. */ - u_int32_t flags; -}; - -/* - * HDR -- - * Log record header. - */ -struct __hdr { - u_int32_t prev; /* Previous offset. */ - u_int32_t len; /* Current length. */ - u_int8_t chksum[DB_MAC_KEY]; /* Current checksum. */ - u_int8_t iv[DB_IV_BYTES]; /* IV */ - u_int32_t orig_size; /* Original size of log record */ - /* !!! - 'size' is not written to log, must be last in hdr */ - size_t size; /* Size of header to use */ -}; - -/* - * We use HDR internally, and then when we write out, we write out - * prev, len, and then a 4-byte checksum if normal operation or - * a crypto-checksum and IV and original size if running in crypto - * mode. We must store the original size in case we pad. Set the - * size when we set up the header. We compute a DB_MAC_KEY size - * checksum regardless, but we can safely just use the first 4 bytes. - */ -#define HDR_NORMAL_SZ 12 -#define HDR_CRYPTO_SZ 12 + DB_MAC_KEY + DB_IV_BYTES - -struct __log_persist { - u_int32_t magic; /* DB_LOGMAGIC */ - u_int32_t version; /* DB_LOGVERSION */ - - u_int32_t log_size; /* Log file size. */ - u_int32_t notused; /* Historically the log file mode. */ -}; - -/* Macros to lock/unlock the log region as a whole. */ -#define LOG_SYSTEM_LOCK(dbenv) \ - MUTEX_LOCK(dbenv, ((LOG *)((DB_LOG *) \ - (dbenv)->lg_handle)->reginfo.primary)->mtx_region) -#define LOG_SYSTEM_UNLOCK(dbenv) \ - MUTEX_UNLOCK(dbenv, ((LOG *)((DB_LOG *) \ - (dbenv)->lg_handle)->reginfo.primary)->mtx_region) - -/* - * LOG -- - * Shared log region. One of these is allocated in shared memory, - * and describes the log. - */ -struct __log { - db_mutex_t mtx_region; /* Region mutex. */ - - db_mutex_t mtx_filelist; /* Mutex guarding file name list. */ - - LOGP persist; /* Persistent information. */ - - SH_TAILQ_HEAD(__fq1) fq; /* List of file names. */ - int32_t fid_max; /* Max fid allocated. */ - roff_t free_fid_stack; /* Stack of free file ids. */ - u_int free_fids; /* Height of free fid stack. */ - u_int free_fids_alloced; /* N free fid slots allocated. */ - - /* - * The lsn LSN is the file offset that we're about to write and which - * we will return to the user. - */ - DB_LSN lsn; /* LSN at current file offset. */ - - /* - * The f_lsn LSN is the LSN (returned to the user) that "owns" the - * first byte of the buffer. If the record associated with the LSN - * spans buffers, it may not reflect the physical file location of - * the first byte of the buffer. - */ - DB_LSN f_lsn; /* LSN of first byte in the buffer. */ - size_t b_off; /* Current offset in the buffer. */ - u_int32_t w_off; /* Current write offset in the file. */ - u_int32_t len; /* Length of the last record. */ - - DB_LSN active_lsn; /* Oldest active LSN in the buffer. */ - size_t a_off; /* Offset in the buffer of first active - file. */ - - /* - * The s_lsn LSN is the last LSN that we know is on disk, not just - * written, but synced. This field is protected by the flush mutex - * rather than by the region mutex. - */ - db_mutex_t mtx_flush; /* Mutex guarding flushing. */ - int in_flush; /* Log flush in progress. */ - DB_LSN s_lsn; /* LSN of the last sync. */ - - DB_LOG_STAT stat; /* Log statistics. */ - - /* - * !!! - * NOTE: the next 11 fields, waiting_lsn, verify_lsn, max_wait_lsn, - * maxperm_lsn, wait_recs, rcvd_recs, ready_lsn and bulk_* are NOT - * protected by the log region lock. They are protected by - * REP->mtx_clientdb. If you need access to both, you must acquire - * REP->mtx_clientdb before acquiring the log region lock. - * - * The waiting_lsn is used by the replication system. It is the - * first LSN that we are holding without putting in the log, because - * we received one or more log records out of order. Associated with - * the waiting_lsn is the number of log records that we still have to - * receive before we decide that we should request it again. - * - * The max_wait_lsn is used to control retransmission in the face - * of dropped messages. If we are requesting all records from the - * current gap (i.e., chunk of the log that we are missing), then - * the max_wait_lsn contains the first LSN that we are known to have - * in the __db.rep.db. If we requested only a single record, then - * the max_wait_lsn has the LSN of that record we requested. - */ - DB_LSN waiting_lsn; /* First log record after a gap. */ - DB_LSN verify_lsn; /* LSN we are waiting to verify. */ - DB_LSN max_wait_lsn; /* Maximum LSN requested. */ - DB_LSN max_perm_lsn; /* Maximum PERMANENT LSN processed. */ - u_int32_t wait_recs; /* Records to wait before requesting. */ - u_int32_t rcvd_recs; /* Records received while waiting. */ - /* - * The ready_lsn is also used by the replication system. It is the - * next LSN we expect to receive. It's normally equal to "lsn", - * except at the beginning of a log file, at which point it's set - * to the LSN of the first record of the new file (after the - * header), rather than to 0. - */ - DB_LSN ready_lsn; - /* - * The bulk_buf is used by replication for bulk transfer. While this - * is protected by REP->mtx_clientdb, this doesn't contend with the - * above fields because the above are used by clients and the bulk - * fields below are used by a master. - */ - roff_t bulk_buf; /* Bulk transfer buffer in region. */ - uintptr_t bulk_off; /* Current offset into bulk buffer. */ - u_int32_t bulk_len; /* Length of buffer. */ - u_int32_t bulk_flags; /* Bulk buffer flags. */ - - /* - * During initialization, the log system walks forward through the - * last log file to find its end. If it runs into a checkpoint - * while it's doing so, it caches it here so that the transaction - * system doesn't need to walk through the file again on its - * initialization. - */ - DB_LSN cached_ckp_lsn; - - u_int32_t regionmax; /* Configured size of the region. */ - - roff_t buffer_off; /* Log buffer offset in the region. */ - u_int32_t buffer_size; /* Log buffer size. */ - - u_int32_t log_size; /* Log file's size. */ - u_int32_t log_nsize; /* Next log file's size. */ - - int filemode; /* Log file permissions mode. */ - - /* - * DB_LOG_AUTOREMOVE and DB_LOG_INMEMORY: not protected by a mutex, - * all we care about is if they're zero or non-zero. - */ - int db_log_autoremove; - int db_log_inmemory; - - u_int32_t ncommit; /* Number of txns waiting to commit. */ - DB_LSN t_lsn; /* LSN of first commit */ - SH_TAILQ_HEAD(__commit) commits;/* list of txns waiting to commit. */ - SH_TAILQ_HEAD(__free) free_commits;/* free list of commit structs. */ - - /* - * In-memory logs maintain a list of the start positions of all log - * files currently active in the in-memory buffer. This is to make the - * lookup from LSN to log buffer offset efficient. - */ - SH_TAILQ_HEAD(__logfile) logfiles; - SH_TAILQ_HEAD(__free_logfile) free_logfiles; -}; - -/* - * __db_commit structure -- - * One of these is allocated for each transaction waiting to commit. - */ -struct __db_commit { - db_mutex_t mtx_txnwait; /* Mutex for txn to wait on. */ - DB_LSN lsn; /* LSN of commit record. */ - SH_TAILQ_ENTRY links; /* Either on free or waiting list. */ - -#define DB_COMMIT_FLUSH 0x0001 /* Flush the log when you wake up. */ - u_int32_t flags; -}; - -/* - * Check for the proper progression of Log Sequence Numbers. - * If we are rolling forward the LSN on the page must be greater - * than or equal to the previous LSN in log record. - * We ignore NOT LOGGED LSNs. The user did an unlogged update. - * We should eventually see a log record that matches and continue - * forward. - * If truncate is supported then a ZERO LSN implies a page that was - * allocated prior to the recovery start pont and then truncated - * later in the log. An allocation of a page after this - * page will extend the file, leaving a hole. We want to - * ignore this page until it is truncated again. - * - */ - -#ifdef HAVE_FTRUNCATE -#define CHECK_LSN(e, redo, cmp, lsn, prev) \ - if (DB_REDO(redo) && (cmp) < 0 && \ - ((!IS_NOT_LOGGED_LSN(*(lsn)) && !IS_ZERO_LSN(*(lsn))) || \ - IS_REP_CLIENT(e))) { \ - ret = __db_check_lsn(dbenv, lsn, prev); \ - goto out; \ - } -#else -#define CHECK_LSN(e, redo, cmp, lsn, prev) \ - if (DB_REDO(redo) && (cmp) < 0 && \ - (!IS_NOT_LOGGED_LSN(*(lsn)) || IS_REP_CLIENT(e))) { \ - ret = __db_check_lsn(dbenv, lsn, prev); \ - goto out; \ - } -#endif - -/* - * Helper for in-memory logs -- check whether an offset is in range - * in a ring buffer (inclusive of start, exclusive of end). - */ -struct __db_filestart { - u_int32_t file; - size_t b_off; - - SH_TAILQ_ENTRY links; /* Either on free or waiting list. */ -}; - -#define RINGBUF_LEN(lp, start, end) \ - ((start) < (end) ? \ - (end) - (start) : (lp)->buffer_size - ((start) - (end))) - -/* - * Internal macro to set pointer to the begin_lsn for generated - * logging routines. If begin_lsn is already set then do nothing. - * Return a pointer to the last lsn too. - */ -#undef DB_SET_TXN_LSNP -#define DB_SET_TXN_LSNP(txn, blsnp, llsnp) do { \ - DB_LSN *__lsnp; \ - TXN_DETAIL *__td; \ - __td = (txn)->td; \ - *(llsnp) = &__td->last_lsn; \ - while (__td->parent != INVALID_ROFF) \ - __td = R_ADDR(&(txn)->mgrp->reginfo, __td->parent); \ - __lsnp = &__td->begin_lsn; \ - if (IS_ZERO_LSN(*__lsnp)) \ - *(blsnp) = __lsnp; \ -} while (0) - -/* - * These are used in __log_backup to determine which LSN in the - * checkpoint record to compare and return. - */ -#define CKPLSN_CMP 0 -#define LASTCKP_CMP 1 - -/* - * Status codes indicating the validity of a log file examined by - * __log_valid(). - */ -typedef enum { - DB_LV_INCOMPLETE, - DB_LV_NONEXISTENT, - DB_LV_NORMAL, - DB_LV_OLD_READABLE, - DB_LV_OLD_UNREADABLE -} logfile_validity; - -#include "dbinc_auto/dbreg_auto.h" -#include "dbinc_auto/dbreg_ext.h" -#include "dbinc_auto/log_ext.h" -#endif /* !_LOG_H_ */ diff --git a/storage/bdb/dbinc/mp.h b/storage/bdb/dbinc/mp.h deleted file mode 100644 index 86e1905e950..00000000000 --- a/storage/bdb/dbinc/mp.h +++ /dev/null @@ -1,377 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: mp.h,v 12.5 2005/08/08 14:52:30 bostic Exp $ - */ - -#ifndef _DB_MP_H_ -#define _DB_MP_H_ - -struct __bh; typedef struct __bh BH; -struct __db_mpool_hash; typedef struct __db_mpool_hash DB_MPOOL_HASH; -struct __db_mpreg; typedef struct __db_mpreg DB_MPREG; -struct __mpool; typedef struct __mpool MPOOL; - - /* We require at least 20KB of cache. */ -#define DB_CACHESIZE_MIN (20 * 1024) - -/* - * DB_MPOOLFILE initialization methods cannot be called after open is called, - * other methods cannot be called before open is called - */ -#define MPF_ILLEGAL_AFTER_OPEN(dbmfp, name) \ - if (F_ISSET(dbmfp, MP_OPEN_CALLED)) \ - return (__db_mi_open((dbmfp)->dbenv, name, 1)); -#define MPF_ILLEGAL_BEFORE_OPEN(dbmfp, name) \ - if (!F_ISSET(dbmfp, MP_OPEN_CALLED)) \ - return (__db_mi_open((dbmfp)->dbenv, name, 0)); - -typedef enum { - DB_SYNC_ALLOC, /* Flush for allocation. */ - DB_SYNC_CACHE, /* Checkpoint or flush entire cache. */ - DB_SYNC_FILE, /* Flush file. */ - DB_SYNC_TRICKLE /* Trickle sync. */ -} db_sync_op; - -/* - * DB_MPOOL -- - * Per-process memory pool structure. - */ -struct __db_mpool { - /* These fields need to be protected for multi-threaded support. */ - db_mutex_t mutex; /* Thread mutex. */ - - /* - * DB_MPREG structure for the DB pgin/pgout routines. - * - * Linked list of application-specified pgin/pgout routines. - */ - DB_MPREG *pg_inout; - LIST_HEAD(__db_mpregh, __db_mpreg) dbregq; - - /* List of DB_MPOOLFILE's. */ - TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq; - - /* - * The dbenv, nreg and reginfo fields are not thread protected, - * as they are initialized during mpool creation, and not modified - * again. - */ - DB_ENV *dbenv; /* Enclosing environment. */ - - u_int32_t nreg; /* N underlying cache regions. */ - REGINFO *reginfo; /* Underlying cache regions. */ -}; - -/* - * DB_MPREG -- - * DB_MPOOL registry of pgin/pgout functions. - */ -struct __db_mpreg { - LIST_ENTRY(__db_mpreg) q; /* Linked list. */ - - int32_t ftype; /* File type. */ - /* Pgin, pgout routines. */ - int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *)); - int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *)); -}; - -/* - * NCACHE -- - * Select a cache based on the file and the page number. Assumes accesses - * are uniform across pages, which is probably OK. What we really want to - * avoid is anything that puts all pages from any single file in the same - * cache, as we expect that file access will be bursty, and to avoid - * putting all page number N pages in the same cache as we expect access - * to the metapages (page 0) and the root of a btree (page 1) to be much - * more frequent than a random data page. - */ -#define NCACHE(mp, mf_offset, pgno) \ - (((pgno) ^ ((u_int32_t)(mf_offset) >> 3)) % ((MPOOL *)mp)->nreg) - -/* - * NBUCKET -- - * We make the assumption that early pages of the file are more likely - * to be retrieved than the later pages, which means the top bits will - * be more interesting for hashing as they're less likely to collide. - * That said, as 512 8K pages represents a 4MB file, so only reasonably - * large files will have page numbers with any other than the bottom 9 - * bits set. We XOR in the MPOOL offset of the MPOOLFILE that backs the - * page, since that should also be unique for the page. We don't want - * to do anything very fancy -- speed is more important to us than using - * good hashing. - */ -#define NBUCKET(mc, mf_offset, pgno) \ - (((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets) - -/* Macros to lock/unlock the mpool region as a whole. */ -#define MPOOL_SYSTEM_LOCK(dbenv) \ - MUTEX_LOCK(dbenv, ((MPOOL *)((DB_MPOOL *) \ - (dbenv)->mp_handle)->reginfo[0].primary)->mtx_region) -#define MPOOL_SYSTEM_UNLOCK(dbenv) \ - MUTEX_UNLOCK(dbenv, ((MPOOL *)((DB_MPOOL *) \ - (dbenv)->mp_handle)->reginfo[0].primary)->mtx_region) - -/* Macros to lock/unlock a specific mpool region. */ -#define MPOOL_REGION_LOCK(dbenv, infop) \ - MUTEX_LOCK(dbenv, ((MPOOL *)(infop)->primary)->mtx_region) -#define MPOOL_REGION_UNLOCK(dbenv, infop) \ - MUTEX_UNLOCK(dbenv, ((MPOOL *)(infop)->primary)->mtx_region) - -/* - * MPOOL -- - * Shared memory pool region. - */ -struct __mpool { - /* - * The memory pool can be broken up into individual pieces/files. - * Not what we would have liked, but on Solaris you can allocate - * only a little more than 2GB of memory in a contiguous chunk, - * and I expect to see more systems with similar issues. - * - * While this structure is duplicated in each piece of the cache, - * the first of these pieces/files describes the entire pool, the - * second only describe a piece of the cache. - */ - db_mutex_t mtx_region; /* Region mutex. */ - - /* - * The lsn field and list of underlying MPOOLFILEs are thread protected - * by the region lock. - */ - DB_LSN lsn; /* Maximum checkpoint LSN. */ - - SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */ - - /* Configuration information: protected by the region lock. */ - size_t mp_mmapsize; /* Maximum file size for mmap. */ - int mp_maxopenfd; /* Maximum open file descriptors. */ - int mp_maxwrite; /* Maximum buffers to write. */ - int mp_maxwrite_sleep; /* Sleep after writing max buffers. */ - - /* - * The nreg, regids and maint_off fields are not thread protected, - * as they are initialized during mpool creation, and not modified - * again. - */ - u_int32_t nreg; /* Number of underlying REGIONS. */ - roff_t regids; /* Array of underlying REGION Ids. */ - - /* - * The following structure fields only describe the per-cache portion - * of the region. - * - * The htab and htab_buckets fields are not thread protected as they - * are initialized during mpool creation, and not modified again. - * - * The last_checked and lru_count fields are thread protected by - * the region lock. - */ - u_int32_t htab_buckets; /* Number of hash table entries. */ - roff_t htab; /* Hash table offset. */ - u_int32_t last_checked; /* Last bucket checked for free. */ - u_int32_t lru_count; /* Counter for buffer LRU */ - - /* - * The stat fields are generally not thread protected, and cannot be - * trusted. Note that st_pages is an exception, and is always updated - * inside a region lock (although it is sometimes read outside of the - * region lock). - */ - DB_MPOOL_STAT stat; /* Per-cache mpool statistics. */ - - /* - * We track page puts so that we can decide when allocation is never - * going to succeed. We don't lock the field, all we care about is - * if it changes. - */ - u_int32_t put_counter; /* Count of page put calls. */ -}; - -struct __db_mpool_hash { - db_mutex_t mtx_hash; /* Per-bucket mutex. */ - - DB_HASHTAB hash_bucket; /* Head of bucket. */ - - u_int32_t hash_page_dirty;/* Count of dirty pages. */ - u_int32_t hash_priority; /* Minimum priority of bucket buffer. */ -}; - -/* - * The base mpool priority is 1/4th of the name space, or just under 2^30. - * When the LRU counter wraps, we shift everybody down to a base-relative - * value. - */ -#define MPOOL_BASE_DECREMENT (UINT32_MAX - (UINT32_MAX / 4)) - -/* - * Mpool priorities from low to high. Defined in terms of fractions of the - * buffers in the pool. - */ -#define MPOOL_PRI_VERY_LOW -1 /* Dead duck. Check and set to 0. */ -#define MPOOL_PRI_LOW -2 /* Low. */ -#define MPOOL_PRI_DEFAULT 0 /* No adjustment -- special case.*/ -#define MPOOL_PRI_HIGH 10 /* With the dirty buffers. */ -#define MPOOL_PRI_DIRTY 10 /* Dirty gets a 10% boost. */ -#define MPOOL_PRI_VERY_HIGH 1 /* Add number of buffers in pool. */ - -/* - * MPOOLFILE -- - * Shared DB_MPOOLFILE information. - */ -struct __mpoolfile { - db_mutex_t mutex; /* MPOOLFILE mutex. */ - - /* Protected by MPOOLFILE mutex. */ - u_int32_t mpf_cnt; /* Ref count: DB_MPOOLFILEs. */ - u_int32_t block_cnt; /* Ref count: blocks in cache. */ - - roff_t path_off; /* File name location. */ - - /* - * The following are used for file compaction processing. - * They are only used when a thread is in the process - * of trying to move free pages to the end of the file. - * Other threads may look here when freeing a page. - * Protected by a lock on the metapage. - */ - u_int32_t free_ref; /* Refcount to freelist. */ - u_int32_t free_cnt; /* Count of free pages. */ - size_t free_size; /* Allocated size of free list. */ - roff_t free_list; /* Offset to free list. */ - - /* - * We normally don't lock the deadfile field when we read it since we - * only care if the field is zero or non-zero. We do lock on read when - * searching for a matching MPOOLFILE -- see that code for more detail. - */ - int32_t deadfile; /* Dirty pages can be discarded. */ - - /* Protected by mpool cache 0 region lock. */ - SH_TAILQ_ENTRY q; /* List of MPOOLFILEs */ - db_pgno_t last_pgno; /* Last page in the file. */ - db_pgno_t orig_last_pgno; /* Original last page in the file. */ - db_pgno_t maxpgno; /* Maximum page number. */ - - /* - * None of the following fields are thread protected. - * - * There are potential races with the ftype field because it's read - * without holding a lock. However, it has to be set before adding - * any buffers to the cache that depend on it being set, so there - * would need to be incorrect operation ordering to have a problem. - */ - int32_t ftype; /* File type. */ - - /* - * There are potential races with the priority field because it's read - * without holding a lock. However, a collision is unlikely and if it - * happens is of little consequence. - */ - int32_t priority; /* Priority when unpinning buffer. */ - - /* - * There are potential races with the file_written field (many threads - * may be writing blocks at the same time), and with no_backing_file - * and unlink_on_close fields, as they may be set while other threads - * are reading them. However, we only care if the field value is zero - * or non-zero, so don't lock the memory. - * - * !!! - * Theoretically, a 64-bit architecture could put two of these fields - * in a single memory operation and we could race. I have never seen - * an architecture where that's a problem, and I believe Java requires - * that to never be the case. - * - * File_written is set whenever a buffer is marked dirty in the cache. - * It can be cleared in some cases, after all dirty buffers have been - * written AND the file has been flushed to disk. - */ - int32_t file_written; /* File was written. */ - int32_t no_backing_file; /* Never open a backing file. */ - int32_t unlink_on_close; /* Unlink file on last close. */ - - /* - * We do not protect the statistics in "stat" because of the cost of - * the mutex in the get/put routines. There is a chance that a count - * will get lost. - */ - DB_MPOOL_FSTAT stat; /* Per-file mpool statistics. */ - - /* - * The remaining fields are initialized at open and never subsequently - * modified. - */ - int32_t lsn_off; /* Page's LSN offset. */ - u_int32_t clear_len; /* Bytes to clear on page create. */ - - roff_t fileid_off; /* File ID string location. */ - - roff_t pgcookie_len; /* Pgin/pgout cookie length. */ - roff_t pgcookie_off; /* Pgin/pgout cookie location. */ - - /* - * The flags are initialized at open and never subsequently modified. - */ -#define MP_CAN_MMAP 0x001 /* If the file can be mmap'd. */ -#define MP_DIRECT 0x002 /* No OS buffering. */ -#define MP_DURABLE_UNKNOWN 0x004 /* We don't care about durability. */ -#define MP_EXTENT 0x008 /* Extent file. */ -#define MP_FAKE_DEADFILE 0x010 /* Deadfile field: fake flag. */ -#define MP_FAKE_FILEWRITTEN 0x020 /* File_written field: fake flag. */ -#define MP_FAKE_NB 0x040 /* No_backing_file field: fake flag. */ -#define MP_FAKE_UOC 0x080 /* Unlink_on_close field: fake flag. */ -#define MP_NOT_DURABLE 0x100 /* File is not durable. */ -#define MP_TEMP 0x200 /* Backing file is a temporary. */ - u_int32_t flags; -}; - -/* - * Flags to __memp_bh_free. - */ -#define BH_FREE_FREEMEM 0x01 -#define BH_FREE_UNLOCKED 0x02 - -/* - * BH -- - * Buffer header. - */ -struct __bh { - db_mutex_t mtx_bh; /* Buffer thread/process mutex. */ - - u_int16_t ref; /* Reference count. */ - u_int16_t ref_sync; /* Sync wait-for reference count. */ - -#define BH_CALLPGIN 0x001 /* Convert the page before use. */ -#define BH_DIRTY 0x002 /* Page was modified. */ -#define BH_DIRTY_CREATE 0x004 /* Page created, must be written. */ -#define BH_DISCARD 0x008 /* Page is useless. */ -#define BH_LOCKED 0x010 /* Page is locked (I/O in progress). */ -#define BH_TRASH 0x020 /* Page is garbage. */ - u_int16_t flags; - - u_int32_t priority; /* LRU priority. */ - SH_TAILQ_ENTRY hq; /* MPOOL hash bucket queue. */ - - db_pgno_t pgno; /* Underlying MPOOLFILE page number. */ - roff_t mf_offset; /* Associated MPOOLFILE offset. */ - - /* - * !!! - * This array must be at least size_t aligned -- the DB access methods - * put PAGE and other structures into it, and then access them directly. - * (We guarantee size_t alignment to applications in the documentation, - * too.) - */ - u_int8_t buf[1]; /* Variable length data. */ -}; -/* - * Flags to __memp_ftruncate. - */ -#define MP_TRUNC_RECOVER 0x01 - -#include "dbinc_auto/mp_ext.h" -#endif /* !_DB_MP_H_ */ diff --git a/storage/bdb/dbinc/mutex.h b/storage/bdb/dbinc/mutex.h deleted file mode 100644 index 4937e4f7d3a..00000000000 --- a/storage/bdb/dbinc/mutex.h +++ /dev/null @@ -1,142 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: mutex.h,v 12.14 2005/10/13 00:56:52 bostic Exp $ - */ - -#ifndef _DB_MUTEX_H_ -#define _DB_MUTEX_H_ - -/* - * Mutexes are represented by unsigned, 32-bit integral values. As the - * OOB value is 0, mutexes can be initialized by zero-ing out the memory - * in which they reside. - */ -#define MUTEX_INVALID 0 - -/* - * We track mutex allocations by ID. - */ -#define MTX_APPLICATION 1 -#define MTX_DB_HANDLE 2 -#define MTX_ENV_DBLIST 3 -#define MTX_ENV_REGION 4 -#define MTX_LOCK_REGION 5 -#define MTX_LOGICAL_LOCK 6 -#define MTX_LOG_FILENAME 7 -#define MTX_LOG_FLUSH 8 -#define MTX_LOG_HANDLE 9 -#define MTX_LOG_REGION 10 -#define MTX_MPOOLFILE_HANDLE 11 -#define MTX_MPOOL_BUFFER 12 -#define MTX_MPOOL_FH 13 -#define MTX_MPOOL_HANDLE 14 -#define MTX_MPOOL_HASH_BUCKET 15 -#define MTX_MPOOL_REGION 16 -#define MTX_MUTEX_REGION 17 -#define MTX_MUTEX_TEST 18 -#define MTX_REP_DATABASE 19 -#define MTX_REP_REGION 20 -#define MTX_SEQUENCE 21 -#define MTX_TWISTER 22 -#define MTX_TXN_ACTIVE 23 -#define MTX_TXN_CHKPT 24 -#define MTX_TXN_COMMIT 25 -#define MTX_TXN_REGION 26 -#define MTX_MAX_ENTRY 26 - -/* Redirect mutex calls to the correct functions. */ -#if defined(HAVE_MUTEX_PTHREADS) || \ - defined(HAVE_MUTEX_SOLARIS_LWP) || \ - defined(HAVE_MUTEX_UI_THREADS) -#define __mutex_init(a, b, c) __db_pthread_mutex_init(a, b, c) -#define __mutex_lock(a, b) __db_pthread_mutex_lock(a, b) -#define __mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b) -#define __mutex_destroy(a, b) __db_pthread_mutex_destroy(a, b) -#endif - -#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC) -#define __mutex_init(a, b, c) __db_win32_mutex_init(a, b, c) -#define __mutex_lock(a, b) __db_win32_mutex_lock(a, b) -#define __mutex_unlock(a, b) __db_win32_mutex_unlock(a, b) -#define __mutex_destroy(a, b) __db_win32_mutex_destroy(a, b) -#endif - -#if defined(HAVE_MUTEX_FCNTL) -#define __mutex_init(a, b, c) __db_fcntl_mutex_init(a, b, c) -#define __mutex_lock(a, b) __db_fcntl_mutex_lock(a, b) -#define __mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b) -#define __mutex_destroy(a, b) __db_fcntl_mutex_destroy(a, b) -#endif - -#ifndef __mutex_init /* Test-and-set is the default */ -#define __mutex_init(a, b, c) __db_tas_mutex_init(a, b, c) -#define __mutex_lock(a, b) __db_tas_mutex_lock(a, b) -#define __mutex_unlock(a, b) __db_tas_mutex_unlock(a, b) -#define __mutex_destroy(a, b) __db_tas_mutex_destroy(a, b) -#endif - -/* - * Lock/unlock a mutex. If the mutex was never required, the thread of - * control can proceed without it. - * - * We never fail to acquire or release a mutex without panicing. Simplify - * the macros to always return a panic value rather than saving the actual - * return value of the mutex routine. - */ -#define MUTEX_LOCK(dbenv, mutex) do { \ - if ((mutex) != MUTEX_INVALID && \ - __mutex_lock(dbenv, mutex) != 0) \ - return (DB_RUNRECOVERY); \ -} while (0) -#define MUTEX_UNLOCK(dbenv, mutex) do { \ - if ((mutex) != MUTEX_INVALID && \ - __mutex_unlock(dbenv, mutex) != 0) \ - return (DB_RUNRECOVERY); \ -} while (0) - -/* - * Berkeley DB ports may require single-threading at places in the code. - */ -#ifdef HAVE_MUTEX_VXWORKS -#include "taskLib.h" -/* - * Use the taskLock() mutex to eliminate a race where two tasks are - * trying to initialize the global lock at the same time. - */ -#define DB_BEGIN_SINGLE_THREAD do { \ - if (DB_GLOBAL(db_global_init)) \ - (void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER); \ - else { \ - taskLock(); \ - if (DB_GLOBAL(db_global_init)) { \ - taskUnlock(); \ - (void)semTake(DB_GLOBAL(db_global_lock), \ - WAIT_FOREVER); \ - continue; \ - } \ - DB_GLOBAL(db_global_lock) = \ - semBCreate(SEM_Q_FIFO, SEM_EMPTY); \ - if (DB_GLOBAL(db_global_lock) != NULL) \ - DB_GLOBAL(db_global_init) = 1; \ - taskUnlock(); \ - } \ -} while (DB_GLOBAL(db_global_init) == 0) -#define DB_END_SINGLE_THREAD (void)semGive(DB_GLOBAL(db_global_lock)) -#endif - -/* - * Single-threading defaults to a no-op. - */ -#ifndef DB_BEGIN_SINGLE_THREAD -#define DB_BEGIN_SINGLE_THREAD -#endif -#ifndef DB_END_SINGLE_THREAD -#define DB_END_SINGLE_THREAD -#endif - -#include "dbinc_auto/mutex_ext.h" -#endif /* !_DB_MUTEX_H_ */ diff --git a/storage/bdb/dbinc/mutex_int.h b/storage/bdb/dbinc/mutex_int.h deleted file mode 100644 index a99ff614a4c..00000000000 --- a/storage/bdb/dbinc/mutex_int.h +++ /dev/null @@ -1,844 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: mutex_int.h,v 12.17 2005/11/08 22:26:49 mjc Exp $ - */ - -#ifndef _DB_MUTEX_INT_H_ -#define _DB_MUTEX_INT_H_ - -/********************************************************************* - * POSIX.1 pthreads interface. - *********************************************************************/ -#ifdef HAVE_MUTEX_PTHREADS -#include - -#define MUTEX_FIELDS \ - pthread_mutex_t mutex; /* Mutex. */ \ - pthread_cond_t cond; /* Condition variable. */ -#endif - -#ifdef HAVE_MUTEX_UI_THREADS -#include -#endif - -/********************************************************************* - * Solaris lwp threads interface. - * - * !!! - * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of - * which are available), for two reasons. First, the Solaris C library - * includes versions of the both UI and POSIX thread mutex interfaces, but - * they are broken in that they don't support inter-process locking, and - * there's no way to detect it, e.g., calls to configure the mutexes for - * inter-process locking succeed without error. So, we use LWP mutexes so - * that we don't fail in fairly undetectable ways because the application - * wasn't linked with the appropriate threads library. Second, there were - * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library - * before loading the libthread/libpthread threads libraries (e.g., by using - * dlopen to load the DB library), the pwrite64 interface would be translated - * into a call to pwrite and DB would drop core. - *********************************************************************/ -#ifdef HAVE_MUTEX_SOLARIS_LWP -/* - * XXX - * Don't change to -- although lwp.h is listed in the - * Solaris manual page as the correct include to use, it causes the Solaris - * compiler on SunOS 2.6 to fail. - */ -#include - -#define MUTEX_FIELDS \ - lwp_mutex_t mutex; /* Mutex. */ \ - lwp_cond_t cond; /* Condition variable. */ -#endif - -/********************************************************************* - * Solaris/Unixware threads interface. - *********************************************************************/ -#ifdef HAVE_MUTEX_UI_THREADS -#include -#include - -#define MUTEX_FIELDS \ - mutex_t mutex; /* Mutex. */ \ - cond_t cond; /* Condition variable. */ -#endif - -/********************************************************************* - * AIX C library functions. - *********************************************************************/ -#ifdef HAVE_MUTEX_AIX_CHECK_LOCK -#include -typedef int tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) 0 -#define MUTEX_SET(x) (!_check_lock(x, 0, 1)) -#define MUTEX_UNSET(x) _clear_lock(x, 0) -#endif -#endif - -/********************************************************************* - * Apple/Darwin library functions. - *********************************************************************/ -#ifdef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY -typedef u_int32_t tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -extern int _spin_lock_try(tsl_t *); -extern void _spin_unlock(tsl_t *); -#define MUTEX_SET(tsl) _spin_lock_try(tsl) -#define MUTEX_UNSET(tsl) _spin_unlock(tsl) -#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0) -#endif -#endif - -/********************************************************************* - * General C library functions (msemaphore). - * - * !!! - * Check for HPPA as a special case, because it requires unusual alignment, - * and doesn't support semaphores in malloc(3) or shmget(2) memory. - * - * !!! - * Do not remove the MSEM_IF_NOWAIT flag. The problem is that if a single - * process makes two msem_lock() calls in a row, the second one returns an - * error. We depend on the fact that we can lock against ourselves in the - * locking subsystem, where we set up a mutex so that we can block ourselves. - * Tested on OSF1 v4.0. - *********************************************************************/ -#ifdef HAVE_MUTEX_HPPA_MSEM_INIT -#define MUTEX_ALIGN 16 -#endif - -#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT) -#include -typedef msemaphore tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0) -#define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT)) -#define MUTEX_UNSET(x) msem_unlock(x, 0) -#endif -#endif - -/********************************************************************* - * Plan 9 library functions. - *********************************************************************/ -#ifdef HAVE_MUTEX_PLAN9 -typedef Lock tsl_t; - -#define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0) -#define MUTEX_SET(x) canlock(x) -#define MUTEX_UNSET(x) unlock(x) -#endif - -/********************************************************************* - * Reliant UNIX C library functions. - *********************************************************************/ -#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN -#include -typedef spinlock_t tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) (initspin(x, 1), 0) -#define MUTEX_SET(x) (cspinlock(x) == 0) -#define MUTEX_UNSET(x) spinunlock(x) -#endif -#endif - -/********************************************************************* - * General C library functions (POSIX 1003.1 sema_XXX). - * - * !!! - * Never selected by autoconfig in this release (semaphore calls are known - * to not work in Solaris 5.5). - *********************************************************************/ -#ifdef HAVE_MUTEX_SEMA_INIT -#include -typedef sema_t tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_DESTROY(x) sema_destroy(x) -#define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0) -#define MUTEX_SET(x) (sema_wait(x) == 0) -#define MUTEX_UNSET(x) sema_post(x) -#endif -#endif - -/********************************************************************* - * SGI C library functions. - *********************************************************************/ -#ifdef HAVE_MUTEX_SGI_INIT_LOCK -#include -typedef abilock_t tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) (init_lock(x) != 0) -#define MUTEX_SET(x) (!acquire_lock(x)) -#define MUTEX_UNSET(x) release_lock(x) -#endif -#endif - -/********************************************************************* - * Solaris C library functions. - * - * !!! - * These are undocumented functions, but they're the only ones that work - * correctly as far as we know. - *********************************************************************/ -#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY -#include -typedef lock_t tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) 0 -#define MUTEX_SET(x) _lock_try(x) -#define MUTEX_UNSET(x) _lock_clear(x) -#endif -#endif - -/********************************************************************* - * VMS. - *********************************************************************/ -#ifdef HAVE_MUTEX_VMS -#include ; -#include -typedef volatile unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#ifdef __ALPHA -#define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0)) -#else /* __VAX */ -#define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl)) -#endif -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * VxWorks - * Use basic binary semaphores in VxWorks, as we currently do not need - * any special features. We do need the ability to single-thread the - * entire system, however, because VxWorks doesn't support the open(2) - * flag O_EXCL, the mechanism we normally use to single thread access - * when we're first looking for a DB environment. - *********************************************************************/ -#ifdef HAVE_MUTEX_VXWORKS -#include "taskLib.h" -typedef SEM_ID tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK) -#define MUTEX_UNSET(tsl) (semGive((*tsl))) -#define MUTEX_INIT(tsl) \ - ((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL) -#define MUTEX_DESTROY(tsl) semDelete(*tsl) -#endif -#endif - -/********************************************************************* - * Win16 - * - * Win16 spinlocks are simple because we cannot possibly be preempted. - * - * !!! - * We should simplify this by always returning a no-need-to-lock lock - * when we initialize the mutex. - *********************************************************************/ -#ifdef HAVE_MUTEX_WIN16 -typedef unsigned int tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) 0 -#define MUTEX_SET(tsl) (*(tsl) = 1) -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#endif -#endif - -/********************************************************************* - * Win32 - *********************************************************************/ -#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC) -#define MUTEX_FIELDS \ - LONG volatile tas; \ - LONG nwaiters; \ - u_int32_t id; /* ID used for creating events */ \ - -#if defined(LOAD_ACTUAL_MUTEX_CODE) -#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1)) -#define MUTEX_UNSET(tsl) InterlockedExchange((PLONG)tsl, 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) - -/* - * From Intel's performance tuning documentation (and see SR #6975): - * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf - * - * "For this reason, it is highly recommended that you insert the PAUSE - * instruction into all spin-wait code immediately. Using the PAUSE - * instruction does not affect the correctness of programs on existing - * platforms, and it improves performance on Pentium 4 processor platforms." - */ -#ifdef HAVE_MUTEX_WIN32 -#ifndef _WIN64 -#define MUTEX_PAUSE {__asm{_emit 0xf3}; __asm{_emit 0x90}} -#endif -#endif -#ifdef HAVE_MUTEX_WIN32_GCC -#define MUTEX_PAUSE asm volatile ("rep; nop" : : ); -#endif -#endif -#endif - -/********************************************************************* - * 68K/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY -typedef unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* gcc/68K: 0 is clear, 1 is set. */ -#define MUTEX_SET(tsl) ({ \ - register tsl_t *__l = (tsl); \ - int __r; \ - asm volatile("tas %1; \n \ - seq %0" \ - : "=dm" (__r), "=m" (*__l) \ - : "1" (*__l) \ - ); \ - __r & 1; \ -}) - -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * ALPHA/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY -typedef u_int32_t tsl_t; - -#define MUTEX_ALIGN 4 - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For gcc/alpha. Should return 0 if could not acquire the lock, 1 if - * lock was acquired properly. - */ -static inline int -MUTEX_SET(tsl_t *tsl) { - register tsl_t *__l = tsl; - register tsl_t __r; - asm volatile( - "1: ldl_l %0,%2\n" - " blbs %0,2f\n" - " or $31,1,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" - " mb\n" - " br 3f\n" - "2: xor %0,%0\n" - "3:" - : "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory"); - return __r; -} - -/* - * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction - * might be necessary before unlocking - */ -static inline int -MUTEX_UNSET(tsl_t *tsl) { - asm volatile(" mb\n"); - return *tsl = 0; -} - -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * Tru64/cc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_TRU64_CC_ASSEMBLY -typedef volatile u_int32_t tsl_t; - -#define MUTEX_ALIGN 4 - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#include -#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0) -#define MUTEX_UNSET(tsl) (__UNLOCK_LONG(tsl)) - -#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0) -#endif -#endif - -/********************************************************************* - * ARM/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY -typedef unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* gcc/arm: 0 is clear, 1 is set. */ -#define MUTEX_SET(tsl) ({ \ - int __r; \ - asm volatile( \ - "swpb %0, %1, [%2]\n\t" \ - "eor %0, %0, #1\n\t" \ - : "=&r" (__r) \ - : "r" (1), "r" (tsl) \ - ); \ - __r & 1; \ -}) - -#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * HPPA/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY -typedef u_int32_t tsl_t; - -#define MUTEX_ALIGN 16 - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * The PA-RISC has a "load and clear" instead of a "test and set" instruction. - * The 32-bit word used by that instruction must be 16-byte aligned. We could - * use the "aligned" attribute in GCC but that doesn't work for stack variables. - */ -#define MUTEX_SET(tsl) ({ \ - register tsl_t *__l = (tsl); \ - int __r; \ - asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \ - __r & 1; \ -}) - -#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = -1) -#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0) -#endif -#endif - -/********************************************************************* - * IA64/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY -typedef volatile unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* gcc/ia64: 0 is clear, 1 is set. */ -#define MUTEX_SET(tsl) ({ \ - register tsl_t *__l = (tsl); \ - long __r; \ - asm volatile("xchg1 %0=%1,%2" : \ - "=r"(__r), "+m"(*__l) : "r"(1)); \ - __r ^ 1; \ -}) - -/* - * Store through a "volatile" pointer so we get a store with "release" - * semantics. - */ -#define MUTEX_UNSET(tsl) (*(tsl_t *)(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * PowerPC/gcc assembly. - *********************************************************************/ -#if defined(HAVE_MUTEX_PPC_GCC_ASSEMBLY) -typedef u_int32_t tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * The PowerPC does a sort of pseudo-atomic locking. You set up a - * 'reservation' on a chunk of memory containing a mutex by loading the - * mutex value with LWARX. If the mutex has an 'unlocked' (arbitrary) - * value, you then try storing into it with STWCX. If no other process or - * thread broke your 'reservation' by modifying the memory containing the - * mutex, then the STCWX succeeds; otherwise it fails and you try to get - * a reservation again. - * - * While mutexes are explicitly 4 bytes, a 'reservation' applies to an - * entire cache line, normally 32 bytes, aligned naturally. If the mutex - * lives near data that gets changed a lot, there's a chance that you'll - * see more broken reservations than you might otherwise. The only - * situation in which this might be a problem is if one processor is - * beating on a variable in the same cache block as the mutex while another - * processor tries to acquire the mutex. That's bad news regardless - * because of the way it bashes caches, but if you can't guarantee that a - * mutex will reside in a relatively quiescent cache line, you might - * consider padding the mutex to force it to live in a cache line by - * itself. No, you aren't guaranteed that cache lines are 32 bytes. Some - * embedded processors use 16-byte cache lines, while some 64-bit - * processors use 128-bit cache lines. But assuming a 32-byte cache line - * won't get you into trouble for now. - * - * If mutex locking is a bottleneck, then you can speed it up by adding a - * regular LWZ load before the LWARX load, so that you can test for the - * common case of a locked mutex without wasting cycles making a reservation. - * - * gcc/ppc: 0 is clear, 1 is set. - */ -static inline int -MUTEX_SET(int *tsl) { - int __r; - asm volatile ( -"0: \n\t" -" lwarx %0,0,%1 \n\t" -" cmpwi %0,0 \n\t" -" bne- 1f \n\t" -" stwcx. %1,0,%1 \n\t" -" isync \n\t" -" beq+ 2f \n\t" -" b 0b \n\t" -"1: \n\t" -" li %1,0 \n\t" -"2: \n\t" - : "=&r" (__r), "+r" (tsl) - : - : "cr0", "memory"); - return (int)tsl; -} - -static inline int -MUTEX_UNSET(tsl_t *tsl) { - asm volatile("sync" : : : "memory"); - return *tsl = 0; -} -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * OS/390 C - *********************************************************************/ -#ifdef HAVE_MUTEX_S390_CC_ASSEMBLY -typedef int tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * cs() is declared in but is built in to the compiler. - * Must use LANGLVL(EXTENDED) to get its declaration. - */ -#define MUTEX_SET(tsl) (!cs(&zero, (tsl), 1)) -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * S/390 32-bit assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY -typedef int tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* gcc/S390: 0 is clear, 1 is set. */ -static inline int -MUTEX_SET(tsl_t *tsl) { \ - register tsl_t *__l = (tsl); \ - int __r; \ - asm volatile( \ - " la 1,%1\n" \ - " lhi 0,1\n" \ - " l %0,%1\n" \ - "0: cs %0,0,0(1)\n" \ - " jl 0b" \ - : "=&d" (__r), "+m" (*__l) \ - : : "0", "1", "cc"); \ - return !__r; \ -} - -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * SCO/cc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY -typedef unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * UnixWare has threads in libthread, but OpenServer doesn't (yet). - * - * cc/x86: 0 is clear, 1 is set. - */ -#if defined(__USLC__) -asm int -_tsl_set(void *tsl) -{ -%mem tsl - movl tsl, %ecx - movl $1, %eax - lock - xchgb (%ecx),%al - xorl $1,%eax -} -#endif - -#define MUTEX_SET(tsl) _tsl_set(tsl) -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * Sparc/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY -typedef unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * - * The ldstub instruction takes the location specified by its first argument - * (a register containing a memory address) and loads its contents into its - * second argument (a register) and atomically sets the contents the location - * specified by its first argument to a byte of 1s. (The value in the second - * argument is never read, but only overwritten.) - * - * The stbar is needed for v8, and is implemented as membar #sync on v9, - * so is functional there as well. For v7, stbar may generate an illegal - * instruction and we have no way to tell what we're running on. Some - * operating systems notice and skip this instruction in the fault handler. - * - * gcc/sparc: 0 is clear, 1 is set. - */ -#define MUTEX_SET(tsl) ({ \ - register tsl_t *__l = (tsl); \ - register tsl_t __r; \ - __asm__ volatile \ - ("ldstub [%1],%0; stbar" \ - : "=r"( __r) : "r" (__l)); \ - !__r; \ -}) - -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * UTS/cc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY -typedef int tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -#define MUTEX_INIT(x) 0 -#define MUTEX_SET(x) (!uts_lock(x, 1)) -#define MUTEX_UNSET(x) (*(x) = 0) -#endif -#endif - -/********************************************************************* - * MIPS/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_MIPS_GCC_ASSEMBLY -typedef u_int32_t tsl_t; - -#define MUTEX_ALIGN 4 - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* - * For gcc/MIPS. Should return 0 if could not acquire the lock, 1 if - * lock was acquired properly. - */ -static inline int -MUTEX_SET(tsl_t *tsl) { - register tsl_t *__l = tsl; - register tsl_t __r; - __asm__ __volatile__( - " .set push \n" - " .set mips2 \n" - " .set noreorder \n" - " .set nomacro \n" - "1: ll %0,%1 \n" - " bne %0,$0,1f \n" - " xori %0,%0,1 \n" - " sc %0,%1 \n" - " beql %0,$0,1b \n" - " xori %0,1 \n" - "1: .set pop " - : "=&r" (__r), "+R" (*__l)); - return __r; -} - -#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/********************************************************************* - * x86/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY -typedef unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* gcc/x86: 0 is clear, 1 is set. */ -#define MUTEX_SET(tsl) ({ \ - register tsl_t *__l = (tsl); \ - int __r; \ - asm volatile("movl $1,%%eax\n" \ - "lock\n" \ - "xchgb %1,%%al\n" \ - "xorl $1,%%eax" \ - : "=&a" (__r), "=m" (*__l) \ - : "m1" (*__l) \ - ); \ - __r & 1; \ -}) - -#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) - -/* - * From Intel's performance tuning documentation (and see SR #6975): - * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf - * - * "For this reason, it is highly recommended that you insert the PAUSE - * instruction into all spin-wait code immediately. Using the PAUSE - * instruction does not affect the correctness of programs on existing - * platforms, and it improves performance on Pentium 4 processor platforms." - */ -#define MUTEX_PAUSE asm volatile ("rep; nop" : : ); -#endif -#endif - -/********************************************************************* - * x86_64/gcc assembly. - *********************************************************************/ -#ifdef HAVE_MUTEX_X86_64_GCC_ASSEMBLY -typedef unsigned char tsl_t; - -#ifdef LOAD_ACTUAL_MUTEX_CODE -/* gcc/x86_64: 0 is clear, 1 is set. */ -#define MUTEX_SET(tsl) ({ \ - register tsl_t *__l = (tsl); \ - int __r; \ - asm volatile("mov $1,%%rax\n" \ - "lock\n" \ - "xchgb %1,%%al\n" \ - "xor $1,%%rax" \ - : "=&a" (__r), "=m" (*__l) \ - : "1m" (*__l) \ - ); \ - __r & 1; \ -}) - -#define MUTEX_UNSET(tsl) (*(tsl) = 0) -#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) -#endif -#endif - -/* - * Mutex alignment defaults to sizeof(unsigned int). - * - * !!! - * Various systems require different alignments for mutexes (the worst we've - * seen so far is 16-bytes on some HP architectures). Malloc(3) is assumed - * to return reasonable alignment, all other mutex users must ensure proper - * alignment locally. - */ -#ifndef MUTEX_ALIGN -#define MUTEX_ALIGN sizeof(unsigned int) -#endif - -/* - * Mutex destruction defaults to a no-op. - */ -#ifndef MUTEX_DESTROY -#define MUTEX_DESTROY(x) -#endif - -/* - * DB_MUTEXMGR -- - * The mutex manager encapsulates the mutex system. - */ -typedef struct __db_mutexmgr { - /* These fields are never updated after creation, so not protected. */ - DB_ENV *dbenv; /* Environment */ - REGINFO reginfo; /* Region information */ - - void *mutex_array; /* Base of the mutex array */ -} DB_MUTEXMGR; - -/* Macros to lock/unlock the mutex region as a whole. */ -#define MUTEX_SYSTEM_LOCK(dbenv) \ - MUTEX_LOCK(dbenv, ((DB_MUTEXREGION *)((DB_MUTEXMGR *) \ - (dbenv)->mutex_handle)->reginfo.primary)->mtx_region) -#define MUTEX_SYSTEM_UNLOCK(dbenv) \ - MUTEX_UNLOCK(dbenv, ((DB_MUTEXREGION *)((DB_MUTEXMGR *) \ - (dbenv)->mutex_handle)->reginfo.primary)->mtx_region) - -/* - * DB_MUTEXREGION -- - * The primary mutex data structure in the shared memory region. - */ -typedef struct __db_mutexregion { - /* These fields are initialized at create time and never modified. */ - roff_t mutex_offset; /* Offset of mutex array */ - size_t mutex_size; /* Size of the aligned mutex */ - roff_t thread_off; /* Offset of the thread area. */ - - db_mutex_t mtx_region; /* Region mutex. */ - - /* Protected using the region mutex. */ - u_int32_t mutex_next; /* Next free mutex */ - - DB_MUTEX_STAT stat; /* Mutex statistics */ -} DB_MUTEXREGION; - -typedef struct __mutex_t { /* Mutex. */ -#ifdef MUTEX_FIELDS - MUTEX_FIELDS -#endif -#if !defined(MUTEX_FIELDS) && !defined(HAVE_MUTEX_FCNTL) - tsl_t tas; /* Test and set. */ -#endif - pid_t pid; /* Process owning mutex */ - db_threadid_t tid; /* Thread owning mutex */ - - u_int32_t mutex_next_link; /* Linked list of free mutexes. */ - -#ifdef HAVE_STATISTICS - int alloc_id; /* Allocation ID. */ - - u_int32_t mutex_set_wait; /* Granted after wait. */ - u_int32_t mutex_set_nowait; /* Granted without waiting. */ -#endif - - /* - * A subset of the flag arguments for __mutex_alloc(). - * - * Flags should be an unsigned integer even if it's not required by - * the possible flags values, getting a single byte on some machines - * is expensive, and the mutex structure is a MP hot spot. - */ - u_int32_t flags; /* MUTEX_XXX */ -} DB_MUTEX; - -/* Macro to get a reference to a specific mutex. */ -#define MUTEXP_SET(indx) \ - (DB_MUTEX *) \ - ((u_int8_t *)mtxmgr->mutex_array + (indx) * mtxregion->mutex_size); - -#endif /* !_DB_MUTEX_INT_H_ */ diff --git a/storage/bdb/dbinc/os.h b/storage/bdb/dbinc/os.h deleted file mode 100644 index 52013630908..00000000000 --- a/storage/bdb/dbinc/os.h +++ /dev/null @@ -1,123 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: os.h,v 12.10 2005/10/31 02:22:24 bostic Exp $ - */ - -#ifndef _DB_OS_H_ -#define _DB_OS_H_ - -#if defined(__cplusplus) -extern "C" { -#endif - -/* Number of times to retry system calls that return EINTR or EBUSY. */ -#define DB_RETRY 100 - -#ifdef __TANDEM -/* - * OSS Tandem problem: fsync can return a Guardian file system error of 70, - * which has no symbolic name in OSS. HP says to retry the fsync. [#12957] - */ -#define RETRY_CHK(op, ret) do { \ - int __retries = DB_RETRY; \ - do { \ - (ret) = (op); \ - } while ((ret) != 0 && (((ret) = __os_get_errno()) == EAGAIN || \ - (ret) == EBUSY || (ret) == EINTR || (ret) == EIO || \ - (ret) == 70) && \ - --__retries > 0); \ -} while (0) -#else -#define RETRY_CHK(op, ret) do { \ - int __retries = DB_RETRY; \ - do { \ - (ret) = (op); \ - } while ((ret) != 0 && (((ret) = __os_get_errno()) == EAGAIN || \ - (ret) == EBUSY || (ret) == EINTR || (ret) == EIO) && \ - --__retries > 0); \ -} while (0) -#endif - -#define RETRY_CHK_EINTR_ONLY(op, ret) do { \ - int __retries = DB_RETRY; \ - do { \ - (ret) = (op); \ - } while ((ret) != 0 && \ - (((ret) = __os_get_errno()) == EINTR) && --__retries > 0); \ -} while (0) - -/* - * Flags understood by __os_open. - */ -#define DB_OSO_ABSMODE 0x0001 /* Absolute mode specified. */ -#define DB_OSO_CREATE 0x0002 /* POSIX: O_CREAT */ -#define DB_OSO_DIRECT 0x0004 /* Don't buffer the file in the OS. */ -#define DB_OSO_DSYNC 0x0008 /* POSIX: O_DSYNC. */ -#define DB_OSO_EXCL 0x0010 /* POSIX: O_EXCL */ -#define DB_OSO_RDONLY 0x0020 /* POSIX: O_RDONLY */ -#define DB_OSO_REGION 0x0040 /* Opening a region file. */ -#define DB_OSO_SEQ 0x0080 /* Expected sequential access. */ -#define DB_OSO_TEMP 0x0100 /* Remove after last close. */ -#define DB_OSO_TRUNC 0x0200 /* POSIX: O_TRUNC */ - -/* - * Seek options understood by __os_seek. - */ -typedef enum { - DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */ - DB_OS_SEEK_END, /* POSIX: SEEK_END */ - DB_OS_SEEK_SET /* POSIX: SEEK_SET */ -} DB_OS_SEEK; - -/* - * We group certain seek/write calls into a single function so that we - * can use pread(2)/pwrite(2) where they're available. - */ -#define DB_IO_READ 1 -#define DB_IO_WRITE 2 - -/* DB filehandle. */ -struct __fh_t { - /* - * The file-handle mutex is only used to protect the handle/fd - * across seek and read/write pairs, it does not protect the - * the reference count, or any other fields in the structure. - */ - db_mutex_t mtx_fh; /* Mutex to lock. */ - - int ref; /* Reference count. */ - -#if defined(DB_WIN32) - HANDLE handle; /* Windows/32 file handle. */ -#endif - int fd; /* POSIX file descriptor. */ - - char *name; /* File name (ref DB_FH_UNLINK) */ - - /* - * Last seek statistics, used for zero-filling on filesystems - * that don't support it directly. - */ - db_pgno_t pgno; - u_int32_t pgsize; - u_int32_t offset; - -#define DB_FH_NOSYNC 0x01 /* Handle doesn't need to be sync'd. */ -#define DB_FH_OPENED 0x02 /* Handle is valid. */ -#define DB_FH_UNLINK 0x04 /* Unlink on close */ - u_int8_t flags; -}; - -/* Standard 600 mode for __db_omode. */ -#define OWNER_RW "rw-------" - -#if defined(__cplusplus) -} -#endif - -#include "dbinc_auto/os_ext.h" -#endif /* !_DB_OS_H_ */ diff --git a/storage/bdb/dbinc/qam.h b/storage/bdb/dbinc/qam.h deleted file mode 100644 index fdf1aa96eec..00000000000 --- a/storage/bdb/dbinc/qam.h +++ /dev/null @@ -1,176 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: qam.h,v 12.3 2005/10/16 18:42:40 bostic Exp $ - */ - -#ifndef _DB_QAM_H_ -#define _DB_QAM_H_ - -/* - * QAM data elements: a status field and the data. - */ -typedef struct _qamdata { - u_int8_t flags; /* 00: delete bit. */ -#define QAM_VALID 0x01 -#define QAM_SET 0x02 - u_int8_t data[1]; /* Record. */ -} QAMDATA; - -struct __queue; typedef struct __queue QUEUE; -struct __qcursor; typedef struct __qcursor QUEUE_CURSOR; - -struct __qcursor { - /* struct __dbc_internal */ - __DBC_INTERNAL - - /* Queue private part */ - - /* Per-thread information: queue private. */ - db_recno_t recno; /* Current record number. */ - - u_int32_t flags; -}; - -typedef struct __mpfarray { - u_int32_t n_extent; /* Number of extents in table. */ - u_int32_t low_extent; /* First extent open. */ - u_int32_t hi_extent; /* Last extent open. */ - struct __qmpf { - int pinref; - DB_MPOOLFILE *mpf; - } *mpfarray; /* Array of open extents. */ -} MPFARRAY; - -/* - * The in-memory, per-tree queue data structure. - */ -struct __queue { - db_pgno_t q_meta; /* Database meta-data page. */ - db_pgno_t q_root; /* Database root page. */ - - int re_pad; /* Fixed-length padding byte. */ - u_int32_t re_len; /* Length for fixed-length records. */ - u_int32_t rec_page; /* records per page */ - u_int32_t page_ext; /* Pages per extent */ - MPFARRAY array1, array2; /* File arrays. */ - - /* Extent file configuration: */ - DBT pgcookie; /* Initialized pgcookie. */ - DB_PGINFO pginfo; /* Initialized pginfo struct. */ - - char *path; /* Space allocated to file pathname. */ - char *name; /* The name of the file. */ - char *dir; /* The dir of the file. */ - int mode; /* Mode to open extents. */ -}; - -/* Format for queue extent names. */ -#define QUEUE_EXTENT "%s%c__dbq.%s.%d" -#define QUEUE_EXTENT_HEAD "__dbq.%s." - -typedef struct __qam_filelist { - DB_MPOOLFILE *mpf; - u_int32_t id; -} QUEUE_FILELIST; - -/* - * Calculate the page number of a recno. - * - * Number of records per page = - * Divide the available space on the page by the record len + header. - * - * Page number for record = - * divide the physical record number by the records per page - * add the root page number - * For now the root page will always be 1, but we might want to change - * in the future (e.g. multiple fixed len queues per file). - * - * Index of record on page = - * physical record number, less the logical pno times records/page - */ -#define CALC_QAM_RECNO_PER_PAGE(dbp) \ - (((dbp)->pgsize - QPAGE_SZ(dbp)) / \ - (u_int32_t)DB_ALIGN((uintmax_t)SSZA(QAMDATA, data) + \ - ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t))) - -#define QAM_RECNO_PER_PAGE(dbp) (((QUEUE*)(dbp)->q_internal)->rec_page) - -#define QAM_RECNO_PAGE(dbp, recno) \ - (((QUEUE *)(dbp)->q_internal)->q_root \ - + (((recno) - 1) / QAM_RECNO_PER_PAGE(dbp))) - -#define QAM_PAGE_EXTENT(dbp, pgno) \ - (((pgno) - 1) / ((QUEUE *)(dbp)->q_internal)->page_ext) - -#define QAM_RECNO_EXTENT(dbp, recno) \ - QAM_PAGE_EXTENT(dbp, QAM_RECNO_PAGE(dbp, recno)) - -#define QAM_RECNO_INDEX(dbp, pgno, recno) \ - (((recno) - 1) - (QAM_RECNO_PER_PAGE(dbp) \ - * (pgno - ((QUEUE *)(dbp)->q_internal)->q_root))) - -#define QAM_GET_RECORD(dbp, page, index) \ - ((QAMDATA *)((u_int8_t *)(page) + (QPAGE_SZ(dbp) + \ - (DB_ALIGN((uintmax_t)SSZA(QAMDATA, data) + \ - ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t)) * index)))) - -#define QAM_AFTER_CURRENT(meta, recno) \ - ((recno) > (meta)->cur_recno && \ - ((meta)->first_recno <= (meta)->cur_recno || \ - ((recno) < (meta)->first_recno && \ - (recno) - (meta)->cur_recno < (meta)->first_recno - (recno)))) - -#define QAM_BEFORE_FIRST(meta, recno) \ - ((recno) < (meta)->first_recno && \ - ((meta->first_recno <= (meta)->cur_recno || \ - ((recno) > (meta)->cur_recno && \ - (recno) - (meta)->cur_recno > (meta)->first_recno - (recno))))) - -#define QAM_NOT_VALID(meta, recno) \ - (recno == RECNO_OOB || \ - QAM_BEFORE_FIRST(meta, recno) || QAM_AFTER_CURRENT(meta, recno)) - -/* - * Log opcodes for the mvptr routine. - */ -#define QAM_SETFIRST 0x01 -#define QAM_SETCUR 0x02 -#define QAM_TRUNCATE 0x04 - -/* - * Parameter to __qam_position. - */ -typedef enum { - QAM_READ, - QAM_WRITE, - QAM_CONSUME -} qam_position_mode; - -typedef enum { - QAM_PROBE_GET, - QAM_PROBE_PUT, - QAM_PROBE_MPF -} qam_probe_mode; - -/* - * Ops for __qam_nameop. - */ -typedef enum { - QAM_NAME_DISCARD, - QAM_NAME_RENAME, - QAM_NAME_REMOVE -} qam_name_op; - -#define __qam_fget(dbp, pgnoaddr, flags, addrp) \ - __qam_fprobe(dbp, *pgnoaddr, addrp, QAM_PROBE_GET, flags) - -#define __qam_fput(dbp, pageno, addrp, flags) \ - __qam_fprobe(dbp, pageno, addrp, QAM_PROBE_PUT, flags) - -#include "dbinc_auto/qam_auto.h" -#include "dbinc_auto/qam_ext.h" -#endif /* !_DB_QAM_H_ */ diff --git a/storage/bdb/dbinc/queue.h b/storage/bdb/dbinc/queue.h deleted file mode 100644 index d76f2019f6f..00000000000 --- a/storage/bdb/dbinc/queue.h +++ /dev/null @@ -1,563 +0,0 @@ -/* - * Copyright (c) 1991, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)queue.h 8.5 (Berkeley) 8/20/94 - * $FreeBSD: src/sys/sys/queue.h,v 1.54 2002/08/05 05:18:43 alfred Exp $ - */ - -#ifndef _DB_QUEUE_H_ -#define _DB_QUEUE_H_ - -#if defined(__cplusplus) -extern "C" { -#endif - -/* - * This file defines four types of data structures: singly-linked lists, - * singly-linked tail queues, lists and tail queues. - * - * A singly-linked list is headed by a single forward pointer. The elements - * are singly linked for minimum space and pointer manipulation overhead at - * the expense of O(n) removal for arbitrary elements. New elements can be - * added to the list after an existing element or at the head of the list. - * Elements being removed from the head of the list should use the explicit - * macro for this purpose for optimum efficiency. A singly-linked list may - * only be traversed in the forward direction. Singly-linked lists are ideal - * for applications with large datasets and few or no removals or for - * implementing a LIFO queue. - * - * A singly-linked tail queue is headed by a pair of pointers, one to the - * head of the list and the other to the tail of the list. The elements are - * singly linked for minimum space and pointer manipulation overhead at the - * expense of O(n) removal for arbitrary elements. New elements can be added - * to the list after an existing element, at the head of the list, or at the - * end of the list. Elements being removed from the head of the tail queue - * should use the explicit macro for this purpose for optimum efficiency. - * A singly-linked tail queue may only be traversed in the forward direction. - * Singly-linked tail queues are ideal for applications with large datasets - * and few or no removals or for implementing a FIFO queue. - * - * A list is headed by a single forward pointer (or an array of forward - * pointers for a hash table header). The elements are doubly linked - * so that an arbitrary element can be removed without a need to - * traverse the list. New elements can be added to the list before - * or after an existing element or at the head of the list. A list - * may only be traversed in the forward direction. - * - * A tail queue is headed by a pair of pointers, one to the head of the - * list and the other to the tail of the list. The elements are doubly - * linked so that an arbitrary element can be removed without a need to - * traverse the list. New elements can be added to the list before or - * after an existing element, at the head of the list, or at the end of - * the list. A tail queue may be traversed in either direction. - * - * For details on the use of these macros, see the queue(3) manual page. - * - * - * SLIST LIST STAILQ TAILQ - * _HEAD + + + + - * _HEAD_INITIALIZER + + + + - * _ENTRY + + + + - * _INIT + + + + - * _EMPTY + + + + - * _FIRST + + + + - * _NEXT + + + + - * _PREV - - - + - * _LAST - - + + - * _FOREACH + + + + - * _FOREACH_REVERSE - - - + - * _INSERT_HEAD + + + + - * _INSERT_BEFORE - + - + - * _INSERT_AFTER + + + + - * _INSERT_TAIL - - + + - * _CONCAT - - + + - * _REMOVE_HEAD + - + - - * _REMOVE + + + + - * - */ - -/* - * XXX - * We #undef all of the macros because there are incompatible versions of this - * file and these macros on various systems. What makes the problem worse is - * they are included and/or defined by system include files which we may have - * already loaded into Berkeley DB before getting here. For example, FreeBSD's - * includes its system , and VxWorks UnixLib.h defines - * several of the LIST_XXX macros. Visual C.NET 7.0 also defines some of these - * same macros in Vc7\PlatformSDK\Include\WinNT.h. Make sure we use ours. - */ -#undef LIST_EMPTY -#undef LIST_ENTRY -#undef LIST_FIRST -#undef LIST_FOREACH -#undef LIST_HEAD -#undef LIST_HEAD_INITIALIZER -#undef LIST_INIT -#undef LIST_INSERT_AFTER -#undef LIST_INSERT_BEFORE -#undef LIST_INSERT_HEAD -#undef LIST_NEXT -#undef LIST_REMOVE -#undef QMD_TRACE_ELEM -#undef QMD_TRACE_HEAD -#undef QUEUE_MACRO_DEBUG -#undef SLIST_EMPTY -#undef SLIST_ENTRY -#undef SLIST_FIRST -#undef SLIST_FOREACH -#undef SLIST_FOREACH_PREVPTR -#undef SLIST_HEAD -#undef SLIST_HEAD_INITIALIZER -#undef SLIST_INIT -#undef SLIST_INSERT_AFTER -#undef SLIST_INSERT_HEAD -#undef SLIST_NEXT -#undef SLIST_REMOVE -#undef SLIST_REMOVE_HEAD -#undef STAILQ_CONCAT -#undef STAILQ_EMPTY -#undef STAILQ_ENTRY -#undef STAILQ_FIRST -#undef STAILQ_FOREACH -#undef STAILQ_HEAD -#undef STAILQ_HEAD_INITIALIZER -#undef STAILQ_INIT -#undef STAILQ_INSERT_AFTER -#undef STAILQ_INSERT_HEAD -#undef STAILQ_INSERT_TAIL -#undef STAILQ_LAST -#undef STAILQ_NEXT -#undef STAILQ_REMOVE -#undef STAILQ_REMOVE_HEAD -#undef STAILQ_REMOVE_HEAD_UNTIL -#undef TAILQ_CONCAT -#undef TAILQ_EMPTY -#undef TAILQ_ENTRY -#undef TAILQ_FIRST -#undef TAILQ_FOREACH -#undef TAILQ_FOREACH_REVERSE -#undef TAILQ_HEAD -#undef TAILQ_HEAD_INITIALIZER -#undef TAILQ_INIT -#undef TAILQ_INSERT_AFTER -#undef TAILQ_INSERT_BEFORE -#undef TAILQ_INSERT_HEAD -#undef TAILQ_INSERT_TAIL -#undef TAILQ_LAST -#undef TAILQ_NEXT -#undef TAILQ_PREV -#undef TAILQ_REMOVE -#undef TRACEBUF -#undef TRASHIT - -#define QUEUE_MACRO_DEBUG 0 -#if QUEUE_MACRO_DEBUG -/* Store the last 2 places the queue element or head was altered */ -struct qm_trace { - char * lastfile; - int lastline; - char * prevfile; - int prevline; -}; - -#define TRACEBUF struct qm_trace trace; -#define TRASHIT(x) do {(x) = (void *)-1;} while (0) - -#define QMD_TRACE_HEAD(head) do { \ - (head)->trace.prevline = (head)->trace.lastline; \ - (head)->trace.prevfile = (head)->trace.lastfile; \ - (head)->trace.lastline = __LINE__; \ - (head)->trace.lastfile = __FILE__; \ -} while (0) - -#define QMD_TRACE_ELEM(elem) do { \ - (elem)->trace.prevline = (elem)->trace.lastline; \ - (elem)->trace.prevfile = (elem)->trace.lastfile; \ - (elem)->trace.lastline = __LINE__; \ - (elem)->trace.lastfile = __FILE__; \ -} while (0) - -#else -#define QMD_TRACE_ELEM(elem) -#define QMD_TRACE_HEAD(head) -#define TRACEBUF -#define TRASHIT(x) -#endif /* QUEUE_MACRO_DEBUG */ - -/* - * Singly-linked List declarations. - */ -#define SLIST_HEAD(name, type) \ -struct name { \ - struct type *slh_first; /* first element */ \ -} - -#define SLIST_HEAD_INITIALIZER(head) \ - { NULL } - -#define SLIST_ENTRY(type) \ -struct { \ - struct type *sle_next; /* next element */ \ -} - -/* - * Singly-linked List functions. - */ -#define SLIST_EMPTY(head) ((head)->slh_first == NULL) - -#define SLIST_FIRST(head) ((head)->slh_first) - -#define SLIST_FOREACH(var, head, field) \ - for ((var) = SLIST_FIRST((head)); \ - (var); \ - (var) = SLIST_NEXT((var), field)) - -#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ - for ((varp) = &SLIST_FIRST((head)); \ - ((var) = *(varp)) != NULL; \ - (varp) = &SLIST_NEXT((var), field)) - -#define SLIST_INIT(head) do { \ - SLIST_FIRST((head)) = NULL; \ -} while (0) - -#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ - SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ - SLIST_NEXT((slistelm), field) = (elm); \ -} while (0) - -#define SLIST_INSERT_HEAD(head, elm, field) do { \ - SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ - SLIST_FIRST((head)) = (elm); \ -} while (0) - -#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) - -#define SLIST_REMOVE(head, elm, type, field) do { \ - if (SLIST_FIRST((head)) == (elm)) { \ - SLIST_REMOVE_HEAD((head), field); \ - } \ - else { \ - struct type *curelm = SLIST_FIRST((head)); \ - while (SLIST_NEXT(curelm, field) != (elm)) \ - curelm = SLIST_NEXT(curelm, field); \ - SLIST_NEXT(curelm, field) = \ - SLIST_NEXT(SLIST_NEXT(curelm, field), field); \ - } \ -} while (0) - -#define SLIST_REMOVE_HEAD(head, field) do { \ - SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ -} while (0) - -/* - * Singly-linked Tail queue declarations. - */ -#define STAILQ_HEAD(name, type) \ -struct name { \ - struct type *stqh_first;/* first element */ \ - struct type **stqh_last;/* addr of last next element */ \ -} - -#define STAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).stqh_first } - -#define STAILQ_ENTRY(type) \ -struct { \ - struct type *stqe_next; /* next element */ \ -} - -/* - * Singly-linked Tail queue functions. - */ -#define STAILQ_CONCAT(head1, head2) do { \ - if (!STAILQ_EMPTY((head2))) { \ - *(head1)->stqh_last = (head2)->stqh_first; \ - (head1)->stqh_last = (head2)->stqh_last; \ - STAILQ_INIT((head2)); \ - } \ -} while (0) - -#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) - -#define STAILQ_FIRST(head) ((head)->stqh_first) - -#define STAILQ_FOREACH(var, head, field) \ - for ((var) = STAILQ_FIRST((head)); \ - (var); \ - (var) = STAILQ_NEXT((var), field)) - -#define STAILQ_INIT(head) do { \ - STAILQ_FIRST((head)) = NULL; \ - (head)->stqh_last = &STAILQ_FIRST((head)); \ -} while (0) - -#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ - if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ - STAILQ_NEXT((tqelm), field) = (elm); \ -} while (0) - -#define STAILQ_INSERT_HEAD(head, elm, field) do { \ - if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ - STAILQ_FIRST((head)) = (elm); \ -} while (0) - -#define STAILQ_INSERT_TAIL(head, elm, field) do { \ - STAILQ_NEXT((elm), field) = NULL; \ - *(head)->stqh_last = (elm); \ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ -} while (0) - -#define STAILQ_LAST(head, type, field) \ - (STAILQ_EMPTY((head)) ? \ - NULL : \ - ((struct type *) \ - ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) - -#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) - -#define STAILQ_REMOVE(head, elm, type, field) do { \ - if (STAILQ_FIRST((head)) == (elm)) { \ - STAILQ_REMOVE_HEAD((head), field); \ - } \ - else { \ - struct type *curelm = STAILQ_FIRST((head)); \ - while (STAILQ_NEXT(curelm, field) != (elm)) \ - curelm = STAILQ_NEXT(curelm, field); \ - if ((STAILQ_NEXT(curelm, field) = \ - STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\ - (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ - } \ -} while (0) - -#define STAILQ_REMOVE_HEAD(head, field) do { \ - if ((STAILQ_FIRST((head)) = \ - STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ - (head)->stqh_last = &STAILQ_FIRST((head)); \ -} while (0) - -#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ - if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ - (head)->stqh_last = &STAILQ_FIRST((head)); \ -} while (0) - -/* - * List declarations. - */ -#define LIST_HEAD(name, type) \ -struct name { \ - struct type *lh_first; /* first element */ \ -} - -#define LIST_HEAD_INITIALIZER(head) \ - { NULL } - -#define LIST_ENTRY(type) \ -struct { \ - struct type *le_next; /* next element */ \ - struct type **le_prev; /* address of previous next element */ \ -} - -/* - * List functions. - */ - -#define LIST_EMPTY(head) ((head)->lh_first == NULL) - -#define LIST_FIRST(head) ((head)->lh_first) - -#define LIST_FOREACH(var, head, field) \ - for ((var) = LIST_FIRST((head)); \ - (var); \ - (var) = LIST_NEXT((var), field)) - -#define LIST_INIT(head) do { \ - LIST_FIRST((head)) = NULL; \ -} while (0) - -#define LIST_INSERT_AFTER(listelm, elm, field) do { \ - if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ - LIST_NEXT((listelm), field)->field.le_prev = \ - &LIST_NEXT((elm), field); \ - LIST_NEXT((listelm), field) = (elm); \ - (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ -} while (0) - -#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.le_prev = (listelm)->field.le_prev; \ - LIST_NEXT((elm), field) = (listelm); \ - *(listelm)->field.le_prev = (elm); \ - (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ -} while (0) - -#define LIST_INSERT_HEAD(head, elm, field) do { \ - if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ - LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ - LIST_FIRST((head)) = (elm); \ - (elm)->field.le_prev = &LIST_FIRST((head)); \ -} while (0) - -#define LIST_NEXT(elm, field) ((elm)->field.le_next) - -#define LIST_REMOVE(elm, field) do { \ - if (LIST_NEXT((elm), field) != NULL) \ - LIST_NEXT((elm), field)->field.le_prev = \ - (elm)->field.le_prev; \ - *(elm)->field.le_prev = LIST_NEXT((elm), field); \ -} while (0) - -/* - * Tail queue declarations. - */ -#define TAILQ_HEAD(name, type) \ -struct name { \ - struct type *tqh_first; /* first element */ \ - struct type **tqh_last; /* addr of last next element */ \ - TRACEBUF \ -} - -#define TAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).tqh_first } - -#define TAILQ_ENTRY(type) \ -struct { \ - struct type *tqe_next; /* next element */ \ - struct type **tqe_prev; /* address of previous next element */ \ - TRACEBUF \ -} - -/* - * Tail queue functions. - */ -#define TAILQ_CONCAT(head1, head2, field) do { \ - if (!TAILQ_EMPTY(head2)) { \ - *(head1)->tqh_last = (head2)->tqh_first; \ - (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ - (head1)->tqh_last = (head2)->tqh_last; \ - TAILQ_INIT((head2)); \ - QMD_TRACE_HEAD(head); \ - QMD_TRACE_HEAD(head2); \ - } \ -} while (0) - -#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) - -#define TAILQ_FIRST(head) ((head)->tqh_first) - -#define TAILQ_FOREACH(var, head, field) \ - for ((var) = TAILQ_FIRST((head)); \ - (var); \ - (var) = TAILQ_NEXT((var), field)) - -#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ - for ((var) = TAILQ_LAST((head), headname); \ - (var); \ - (var) = TAILQ_PREV((var), headname, field)) - -#define TAILQ_INIT(head) do { \ - TAILQ_FIRST((head)) = NULL; \ - (head)->tqh_last = &TAILQ_FIRST((head)); \ - QMD_TRACE_HEAD(head); \ -} while (0) - -#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ - if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ - TAILQ_NEXT((elm), field)->field.tqe_prev = \ - &TAILQ_NEXT((elm), field); \ - else { \ - (head)->tqh_last = &TAILQ_NEXT((elm), field); \ - QMD_TRACE_HEAD(head); \ - } \ - TAILQ_NEXT((listelm), field) = (elm); \ - (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ - QMD_TRACE_ELEM(&(elm)->field); \ - QMD_TRACE_ELEM(&listelm->field); \ -} while (0) - -#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ - TAILQ_NEXT((elm), field) = (listelm); \ - *(listelm)->field.tqe_prev = (elm); \ - (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ - QMD_TRACE_ELEM(&(elm)->field); \ - QMD_TRACE_ELEM(&listelm->field); \ -} while (0) - -#define TAILQ_INSERT_HEAD(head, elm, field) do { \ - if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ - TAILQ_FIRST((head))->field.tqe_prev = \ - &TAILQ_NEXT((elm), field); \ - else \ - (head)->tqh_last = &TAILQ_NEXT((elm), field); \ - TAILQ_FIRST((head)) = (elm); \ - (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ - QMD_TRACE_HEAD(head); \ - QMD_TRACE_ELEM(&(elm)->field); \ -} while (0) - -#define TAILQ_INSERT_TAIL(head, elm, field) do { \ - TAILQ_NEXT((elm), field) = NULL; \ - (elm)->field.tqe_prev = (head)->tqh_last; \ - *(head)->tqh_last = (elm); \ - (head)->tqh_last = &TAILQ_NEXT((elm), field); \ - QMD_TRACE_HEAD(head); \ - QMD_TRACE_ELEM(&(elm)->field); \ -} while (0) - -#define TAILQ_LAST(head, headname) \ - (*(((struct headname *)((head)->tqh_last))->tqh_last)) - -#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) - -#define TAILQ_PREV(elm, headname, field) \ - (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) - -#define TAILQ_REMOVE(head, elm, field) do { \ - if ((TAILQ_NEXT((elm), field)) != NULL) \ - TAILQ_NEXT((elm), field)->field.tqe_prev = \ - (elm)->field.tqe_prev; \ - else { \ - (head)->tqh_last = (elm)->field.tqe_prev; \ - QMD_TRACE_HEAD(head); \ - } \ - *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ - TRASHIT((elm)->field.tqe_next); \ - TRASHIT((elm)->field.tqe_prev); \ - QMD_TRACE_ELEM(&(elm)->field); \ -} while (0) - -#if defined(__cplusplus) -} -#endif -#endif /* !_DB_QUEUE_H_ */ diff --git a/storage/bdb/dbinc/region.h b/storage/bdb/dbinc/region.h deleted file mode 100644 index 5999893962a..00000000000 --- a/storage/bdb/dbinc/region.h +++ /dev/null @@ -1,274 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: region.h,v 12.7 2005/10/13 00:53:00 bostic Exp $ - */ - -#ifndef _DB_REGION_H_ -#define _DB_REGION_H_ - -/* - * The DB environment consists of some number of "regions", which are described - * by the following four structures: - * - * REGENV -- shared information about the environment - * REGENV_REF -- file describing system memory version of REGENV - * REGION -- shared information about a single region - * REGINFO -- per-process information about a REGION - * - * There are three types of memory that hold regions: - * per-process heap (malloc) - * file mapped into memory (mmap, MapViewOfFile) - * system memory (shmget, CreateFileMapping) - * - * By default, regions are created in filesystem-backed shared memory. They - * can also be created in system shared memory (DB_SYSTEM_MEM), or, if private - * to a process, in heap memory (DB_PRIVATE). - * - * Regions in the filesystem are named "__db.001", "__db.002" and so on. If - * we're not using a private environment allocated in heap, "__db.001" will - * always exist, as we use it to synchronize on the regions, whether they are - * in filesystem-backed memory or system memory. - * - * The file "__db.001" contains a REGENV structure and an array of REGION - * structures. Each REGION structures describes an underlying chunk of - * shared memory. - * - * __db.001 - * +---------+ - * |REGENV | - * +---------+ +----------+ - * |REGION |-> | __db.002 | - * | | +----------+ - * +---------+ +----------+ - * |REGION |-> | __db.003 | - * | | +----------+ - * +---------+ +----------+ - * |REGION |-> | __db.004 | - * | | +----------+ - * +---------+ - * - * The tricky part about manipulating the regions is creating or joining the - * database environment. We have to be sure only a single thread of control - * creates and/or recovers a database environment. All other threads should - * then join without seeing inconsistent data. - * - * We do this in two parts: first, we use the underlying O_EXCL flag to the - * open system call to serialize creation of the __db.001 file. The thread - * of control creating that file then proceeds to create the remaining - * regions in the environment, including the mutex region. Once the mutex - * region has been created, the creating thread of control fills in the - * __db.001 file's magic number. Other threads of control (the ones that - * didn't create the __db.001 file), wait on the initialization of the - * __db.001 file's magic number. After it has been initialized, all threads - * of control can proceed, using normal shared mutex locking procedures for - * exclusion. - * - * REGIONs are not moved or removed during the life of the environment, and - * so processes can have long-lived references to them. - * - * One of the REGION structures describes the environment region itself. - * - * The REGION array is not locked in any way. It's an array so we don't have - * to manipulate data structures after a crash -- on some systems, we have to - * join and clean up the mutex region after application failure. Using an - * array means we don't have to worry about broken links or other nastiness - * after the failure. - * - * All requests to create or join a region return a REGINFO structure, which - * is held by the caller and used to open and subsequently close the reference - * to the region. The REGINFO structure contains the per-process information - * that we need to access the region. - * - * The one remaining complication. If the regions (including the environment - * region) live in system memory, and the system memory isn't "named" somehow - * in the filesystem name space, we need some way of finding it. Do this by - * by writing the REGENV_REF structure into the "__db.001" file. When we find - * a __db.001 file that is too small to be a real, on-disk environment, we use - * the information it contains to redirect to the real "__db.001" file/memory. - * This currently only happens when the REGENV file is in shared system memory. - * - * Although DB does not currently grow regions when they run out of memory, it - * would be possible to do so. To grow a region, allocate a new region of the - * appropriate size, then copy the old region over it and insert the additional - * memory into the already existing shalloc arena. Region users must reset - * their base addresses and any local pointers into the memory, of course. - * This failed in historic versions of DB because the region mutexes lived in - * the mapped memory, and when it was unmapped and remapped (or copied), - * threads could lose track of it. Also, some systems didn't support mutex - * copying, e.g., from OSF1 V4.0: - * - * The address of an msemaphore structure may be significant. If the - * msemaphore structure contains any value copied from an msemaphore - * structure at a different address, the result is undefined. - * - * All mutexes are now maintained in a separate region which is never unmapped, - * so growing regions should be possible. - */ - -#if defined(__cplusplus) -extern "C" { -#endif - -#define DB_REGION_PREFIX "__db" /* DB file name prefix. */ -#define DB_REGION_FMT "__db.%03d" /* Region file name format. */ -#define DB_REGION_ENV "__db.001" /* Primary environment name. */ -#define DB_REGION_NAME_LENGTH 8 /* Length of file names. */ - -#define INVALID_REGION_ID 0 /* Out-of-band region ID. */ -#define REGION_ID_ENV 1 /* Primary environment ID. */ - -typedef enum { - INVALID_REGION_TYPE=0, /* Region type. */ - REGION_TYPE_ENV, - REGION_TYPE_LOCK, - REGION_TYPE_LOG, - REGION_TYPE_MPOOL, - REGION_TYPE_MUTEX, - REGION_TYPE_TXN } reg_type_t; - -#define INVALID_REGION_SEGID -1 /* Segment IDs are either shmget(2) or - * Win16 segment identifiers. They are - * both stored in a "long", and we need - * an out-of-band value. - */ -/* - * Nothing can live at region offset 0, because, in all cases, that's where - * we store *something*. Lots of code needs an out-of-band value for region - * offsets, so we use 0. - */ -#define INVALID_ROFF 0 - -/* Reference describing system memory version of REGENV. */ -typedef struct __db_reg_env_ref { - roff_t size; /* Region size. */ - long segid; /* UNIX shmget ID, VxWorks ID. */ -} REGENV_REF; - -/* Per-environment region information. */ -typedef struct __db_reg_env { - /* - * !!! - * The magic, panic, version and envid fields of the region are fixed - * in size, the timestamp field is the first field which is variable - * length. These fields must never change in order, to guarantee we - * can always read them, no matter what Berkeley DB release we have. - * - * !!! - * The magic and panic fields are NOT protected by any mutex, and for - * this reason cannot be anything more complicated than zero/non-zero. - */ - u_int32_t magic; /* Valid region magic number. */ - u_int32_t panic; /* Environment is dead. */ - - u_int32_t majver; /* Major DB version number. */ - u_int32_t minver; /* Minor DB version number. */ - u_int32_t patchver; /* Patch DB version number. */ - - u_int32_t envid; /* Unique environment ID. */ - - time_t timestamp; /* Creation time. */ - - u_int32_t init_flags; /* Flags environment initialized with.*/ - - /* - * The mtx_regenv mutex protects the environment reference count and - * memory allocation from the primary shared region (the crypto and - * replication implementations allocate memory from the primary shared - * region). The rest of the fields are initialized at creation time, - * and so don't need mutex protection. The flags, op_timestamp and - * rep_timestamp fields are used by replication only and are - * protected * by the replication mutex. The rep_timestamp is - * is not protected when it is used in recovery as that is already - * single threaded. - */ - db_mutex_t mtx_regenv; /* Refcnt, region allocation mutex. */ - u_int32_t refcnt; /* References to the environment. */ - - u_int32_t region_cnt; /* Number of REGIONs. */ - roff_t region_off; /* Offset of region array */ - - roff_t cipher_off; /* Offset of cipher area */ - - roff_t rep_off; /* Offset of the replication area. */ -#define DB_REGENV_REPLOCKED 0x0001 /* Env locked for rep backup. */ - u_int32_t flags; /* Shared environment flags. */ -#define DB_REGENV_TIMEOUT 30 /* Backup timeout. */ - time_t op_timestamp; /* Timestamp for operations. */ - time_t rep_timestamp; /* Timestamp for rep db handles. */ - - size_t pad; /* Guarantee that following memory is - * size_t aligned. This is necessary - * because we're going to store the - * allocation region information there. - */ -} REGENV; - -/* Per-region shared region information. */ -typedef struct __db_region { - u_int32_t id; /* Region id. */ - reg_type_t type; /* Region type. */ - - roff_t size_orig; /* Region size in bytes (original). */ - roff_t size; /* Region size in bytes (adjusted). */ - - roff_t primary; /* Primary data structure offset. */ - - long segid; /* UNIX shmget(2), Win16 segment ID. */ -} REGION; - -/* - * Per-process/per-attachment information about a single region. - */ -struct __db_reginfo_t { /* __db_r_attach IN parameters. */ - DB_ENV *dbenv; /* Enclosing environment. */ - reg_type_t type; /* Region type. */ - u_int32_t id; /* Region id. */ - - /* __db_r_attach OUT parameters. */ - REGION *rp; /* Shared region. */ - - char *name; /* Region file name. */ - - void *addr_orig; /* Region address (original). */ - void *addr; /* Region address (adjusted). */ - void *primary; /* Primary data structure address. */ - - size_t max_alloc; /* Maximum bytes allocated. */ - size_t allocated; /* Bytes allocated. */ - -#ifdef DB_WIN32 - HANDLE wnt_handle; /* Win/NT HANDLE. */ -#endif - -#define REGION_CREATE 0x01 /* Caller created region. */ -#define REGION_CREATE_OK 0x02 /* Caller willing to create region. */ -#define REGION_JOIN_OK 0x04 /* Caller is looking for a match. */ - u_int32_t flags; -}; - -/* - * R_ADDR Return a per-process address for a shared region offset. - * R_OFFSET Return a shared region offset for a per-process address. - */ -#define R_ADDR(reginfop, offset) \ - (F_ISSET((reginfop)->dbenv, DB_ENV_PRIVATE) ? (void *)(offset) :\ - (void *)((u_int8_t *)((reginfop)->addr) + (offset))) -#define R_OFFSET(reginfop, p) \ - (F_ISSET((reginfop)->dbenv, DB_ENV_PRIVATE) ? (roff_t)(p) : \ - (roff_t)((u_int8_t *)(p) - (u_int8_t *)(reginfop)->addr)) - -/* PANIC_CHECK: Check to see if the DB environment is dead. */ -#define PANIC_CHECK(dbenv) \ - if ((dbenv)->reginfo != NULL && ((REGENV *) \ - ((REGINFO *)(dbenv)->reginfo)->primary)->panic != 0 && \ - !F_ISSET((dbenv), DB_ENV_NOPANIC)) \ - return (__db_panic_msg(dbenv)); - -#if defined(__cplusplus) -} -#endif -#endif /* !_DB_REGION_H_ */ diff --git a/storage/bdb/dbinc/rep.h b/storage/bdb/dbinc/rep.h deleted file mode 100644 index effecaba8a1..00000000000 --- a/storage/bdb/dbinc/rep.h +++ /dev/null @@ -1,392 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: rep.h,v 12.22 2005/10/27 13:27:01 bostic Exp $ - */ - -#ifndef _REP_H_ -#define _REP_H_ - -#include "dbinc_auto/rep_auto.h" - -/* - * Message types - */ -#define REP_ALIVE 1 /* I am alive message. */ -#define REP_ALIVE_REQ 2 /* Request for alive messages. */ -#define REP_ALL_REQ 3 /* Request all log records greater than LSN. */ -#define REP_BULK_LOG 4 /* Bulk transfer of log records. */ -#define REP_BULK_PAGE 5 /* Bulk transfer of pages. */ -#define REP_DUPMASTER 6 /* Duplicate master detected; propagate. */ -#define REP_FILE 7 /* Page of a database file. NOTUSED */ -#define REP_FILE_FAIL 8 /* File requested does not exist. */ -#define REP_FILE_REQ 9 /* Request for a database file. NOTUSED */ -#define REP_LOG 10 /* Log record. */ -#define REP_LOG_MORE 11 /* There are more log records to request. */ -#define REP_LOG_REQ 12 /* Request for a log record. */ -#define REP_MASTER_REQ 13 /* Who is the master */ -#define REP_NEWCLIENT 14 /* Announces the presence of a new client. */ -#define REP_NEWFILE 15 /* Announce a log file change. */ -#define REP_NEWMASTER 16 /* Announces who the master is. */ -#define REP_NEWSITE 17 /* Announces that a site has heard from a new - * site; like NEWCLIENT, but indirect. A - * NEWCLIENT message comes directly from the new - * client while a NEWSITE comes indirectly from - * someone who heard about a NEWSITE. - */ -#define REP_PAGE 18 /* Database page. */ -#define REP_PAGE_FAIL 19 /* Requested page does not exist. */ -#define REP_PAGE_MORE 20 /* There are more pages to request. */ -#define REP_PAGE_REQ 21 /* Request for a database page. */ -#define REP_REREQUEST 22 /* Force rerequest. */ -#define REP_UPDATE 23 /* Environment hotcopy information. */ -#define REP_UPDATE_REQ 24 /* Request for hotcopy information. */ -#define REP_VERIFY 25 /* A log record for verification. */ -#define REP_VERIFY_FAIL 26 /* The client is outdated. */ -#define REP_VERIFY_REQ 27 /* Request for a log record to verify. */ -#define REP_VOTE1 28 /* Send out your information for an election. */ -#define REP_VOTE2 29 /* Send a "you are master" vote. */ - -/* - * REP_PRINT_MESSAGE - * A function to print a debugging message. - * - * RPRINT - * A macro for debug printing. Takes as an arg the arg set for __db_msg. - * - * !!! This function assumes a local DB_MSGBUF variable called 'mb'. - */ -#ifdef DIAGNOSTIC -#define REP_PRINT_MESSAGE(dbenv, eid, rp, str) \ - __rep_print_message(dbenv, eid, rp, str) -#define RPRINT(e, r, x) do { \ - if (FLD_ISSET((e)->verbose, DB_VERB_REPLICATION)) { \ - DB_MSGBUF_INIT(&mb); \ - if ((e)->db_errpfx == NULL) { \ - if (F_ISSET((r), REP_F_CLIENT)) \ - __db_msgadd((e), &mb, "CLIENT: "); \ - else if (F_ISSET((r), REP_F_MASTER)) \ - __db_msgadd((e), &mb, "MASTER: "); \ - else \ - __db_msgadd((e), &mb, "REP_UNDEF: "); \ - } else \ - __db_msgadd((e), &mb, "%s: ",(e)->db_errpfx); \ - __db_msgadd x; \ - DB_MSGBUF_FLUSH((e), &mb); \ - } \ -} while (0) -#else -#define REP_PRINT_MESSAGE(dbenv, eid, rp, str) -#define RPRINT(e, r, x) -#endif - -/* - * Election gen file name - * The file contains an egen number for an election this client has NOT - * participated in. I.e. it is the number of a future election. We - * create it when we create the rep region, if it doesn't already exist - * and initialize egen to 1. If it does exist, we read it when we create - * the rep region. We write it immediately before sending our VOTE1 in - * an election. That way, if a client has ever sent a vote for any - * election, the file is already going to be updated to reflect a future - * election, should it crash. - */ -#define REP_EGENNAME "__db.rep.egen" - -/* - * Database types for __rep_client_dbinit - */ -typedef enum { - REP_DB, /* Log record database. */ - REP_PG /* Pg database. */ -} repdb_t; - -/* Macros to lock/unlock the replication region as a whole. */ -#define REP_SYSTEM_LOCK(dbenv) \ - MUTEX_LOCK(dbenv, ((DB_REP *) \ - (dbenv)->rep_handle)->region->mtx_region) -#define REP_SYSTEM_UNLOCK(dbenv) \ - MUTEX_UNLOCK(dbenv, ((DB_REP *) \ - (dbenv)->rep_handle)->region->mtx_region) - -/* - * REP -- - * Shared replication structure. - */ -typedef struct __rep { - db_mutex_t mtx_region; /* Region mutex. */ - db_mutex_t mtx_clientdb; /* Client database mutex. */ - roff_t tally_off; /* Offset of the tally region. */ - roff_t v2tally_off; /* Offset of the vote2 tally region. */ - int eid; /* Environment id. */ - int master_id; /* ID of the master site. */ - u_int32_t egen; /* Replication election generation. */ - u_int32_t gen; /* Replication generation number. */ - u_int32_t recover_gen; /* Last generation number in log. */ - int asites; /* Space allocated for sites. */ - int nsites; /* Number of sites in group. */ - int nvotes; /* Number of votes needed. */ - int priority; /* My priority in an election. */ - u_int32_t gbytes; /* Limit on data sent in single... */ - u_int32_t bytes; /* __rep_process_message call. */ -#define DB_REP_REQUEST_GAP 4 -#define DB_REP_MAX_GAP 128 - u_int32_t request_gap; /* # of records to receive before we - * request a missing log record. */ - u_int32_t max_gap; /* Maximum number of records before - * requesting a missing log record. */ - /* Status change information */ - int elect_th; /* A thread is in rep_elect. */ - u_int32_t msg_th; /* Number of callers in rep_proc_msg. */ - int start_th; /* A thread is in rep_start. */ - u_int32_t handle_cnt; /* Count of handles in library. */ - u_int32_t op_cnt; /* Multi-step operation count.*/ - int in_recovery; /* Running recovery now. */ - - /* Backup information. */ - u_int32_t nfiles; /* Number of files we have info on. */ - u_int32_t curfile; /* Current file we're getting. */ - __rep_fileinfo_args *curinfo; /* Current file info ptr. */ - void *finfo; /* Current file info buffer. */ - void *nextinfo; /* Next file info buffer. */ - void *originfo; /* Original file info buffer. */ - DB_LSN first_lsn; /* Earliest LSN we need. */ - DB_LSN last_lsn; /* Latest LSN we need. */ - db_pgno_t ready_pg; /* Next pg expected. */ - db_pgno_t waiting_pg; /* First pg after gap. */ - db_pgno_t max_wait_pg; /* Maximum pg requested. */ - u_int32_t npages; /* Num of pages rcvd for this file. */ - DB_MPOOLFILE *file_mpf; /* Mpoolfile for in-mem database. */ - DB *file_dbp; /* This file's page info. */ - DB *queue_dbp; /* Dbp for a queue file. */ - - /* Vote tallying information. */ - int sites; /* Sites heard from. */ - int winner; /* Current winner. */ - int w_priority; /* Winner priority. */ - u_int32_t w_gen; /* Winner generation. */ - DB_LSN w_lsn; /* Winner LSN. */ - u_int32_t w_tiebreaker; /* Winner tiebreaking value. */ - int votes; /* Number of votes for this site. */ - u_int32_t esec; /* Election start seconds. */ - u_int32_t eusec; /* Election start useconds. */ - - /* Statistics. */ - DB_REP_STAT stat; - - /* Configuration. */ -#define REP_C_BULK 0x00001 /* Bulk transfer. */ -#define REP_C_DELAYCLIENT 0x00002 /* Delay client sync-up. */ -#define REP_C_NOAUTOINIT 0x00004 /* No auto initialization. */ -#define REP_C_NOWAIT 0x00008 /* Immediate error return. */ - u_int32_t config; /* Configuration flags. */ - -#define REP_F_CLIENT 0x00001 /* Client replica. */ -#define REP_F_DELAY 0x00002 /* Delaying client sync-up. */ -#define REP_F_EPHASE1 0x00004 /* In phase 1 of election. */ -#define REP_F_EPHASE2 0x00008 /* In phase 2 of election. */ -#define REP_F_MASTER 0x00010 /* Master replica. */ -#define REP_F_MASTERELECT 0x00020 /* Master elect */ -#define REP_F_NOARCHIVE 0x00040 /* Rep blocks log_archive */ -#define REP_F_READY 0x00080 /* Wait for txn_cnt to be 0. */ -#define REP_F_RECOVER_LOG 0x00100 /* In recovery - log. */ -#define REP_F_RECOVER_PAGE 0x00200 /* In recovery - pages. */ -#define REP_F_RECOVER_UPDATE 0x00400 /* In recovery - files. */ -#define REP_F_RECOVER_VERIFY 0x00800 /* In recovery - verify. */ -#define REP_F_TALLY 0x01000 /* Tallied vote before elect. */ - u_int32_t flags; -} REP; - -/* - * Recovery flag mask to easily check any/all recovery bits. That is - * REP_F_READY and all REP_F_RECOVER*. This must change if the values - * of the flags change. - */ -#define REP_F_RECOVER_MASK \ - (REP_F_READY | REP_F_RECOVER_LOG | REP_F_RECOVER_PAGE | \ - REP_F_RECOVER_UPDATE | REP_F_RECOVER_VERIFY) - -#define IN_ELECTION(R) F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2) -#define IN_ELECTION_TALLY(R) \ - F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2 | REP_F_TALLY) -#define IS_REP_MASTER(dbenv) \ - (REP_ON(dbenv) && ((DB_REP *)(dbenv)->rep_handle)->region && \ - F_ISSET(((REP *)((DB_REP *)(dbenv)->rep_handle)->region), \ - REP_F_MASTER)) - -#define IS_REP_CLIENT(dbenv) \ - (REP_ON(dbenv) && ((DB_REP *)(dbenv)->rep_handle)->region && \ - F_ISSET(((REP *)((DB_REP *)(dbenv)->rep_handle)->region), \ - REP_F_CLIENT)) - -#define IS_CLIENT_PGRECOVER(dbenv) \ - (IS_REP_CLIENT(dbenv) && \ - F_ISSET(((REP *)((DB_REP *)(dbenv)->rep_handle)->region), \ - REP_F_RECOVER_PAGE)) - -/* - * Macros to figure out if we need to do replication pre/post-amble processing. - * Skip for specific DB handles owned by the replication layer, either because - * replication is running recovery or because it's a handle entirely owned by - * the replication code (replication opens its own databases to track state). - */ -#define IS_ENV_REPLICATED(E) (REP_ON(E) && \ - ((DB_REP *)((E)->rep_handle))->region != NULL && \ - ((DB_REP *)((E)->rep_handle))->region->flags != 0) - -/* - * Gap processing flags. These provide control over the basic - * gap processing algorithm for some special cases. - */ -#define REP_GAP_FORCE 0x001 /* Force a request for a gap. */ -#define REP_GAP_REREQUEST 0x002 /* Gap request is a forced rerequest. */ - /* REREQUEST is a superset of FORCE. */ - -/* - * Basic pre/post-amble processing. - */ -#define REPLICATION_WRAP(dbenv, func_call, ret) do { \ - int __rep_check, __t_ret; \ - __rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; \ - if (__rep_check && ((ret) = __env_rep_enter(dbenv, 0)) != 0) \ - return ((ret)); \ - (ret) = func_call; \ - if (__rep_check && \ - (__t_ret = __env_db_rep_exit(dbenv)) != 0 && (ret) == 0) \ - (ret) = __t_ret; \ -} while (0) - -/* - * Per-process replication structure. - * - * There are 2 mutexes used in replication. - * 1. mtx_region - This protects the fields of the rep region above. - * 2. mtx_clientdb - This protects the per-process flags, and bookkeeping - * database and all of the components that maintain it. Those - * components include the following fields in the log region (see log.h): - * a. ready_lsn - * b. waiting_lsn - * c. verify_lsn - * d. wait_recs - * e. rcvd_recs - * f. max_wait_lsn - * These fields in the log region are NOT protected by the log region lock at - * all. - * - * Note that the per-process flags should truly be protected by a special - * per-process thread mutex, but it is currently set in so isolated a manner - * that it didn't make sense to do so and in most case we're already holding - * the mtx_clientdb anyway. - * - * The lock ordering protocol is that mtx_clientdb must be acquired first and - * then either REP->mtx_region, or the LOG->mtx_region mutex may be acquired if - * necessary. - */ -struct __db_rep { - DB *rep_db; /* Bookkeeping database. */ - - REP *region; /* In memory structure. */ - u_int8_t *bulk; /* Shared memory bulk area. */ -#define DBREP_OPENFILES 0x0001 /* This handle has opened files. */ - u_int32_t flags; /* per-process flags. */ -}; - -/* - * Control structure for replication communication infrastructure. - * - * Note that the version information should be at the beginning of the - * structure, so that we can rearrange the rest of it while letting the - * version checks continue to work. DB_REPVERSION should be revved any time - * the rest of the structure changes or when the message numbers change. - */ -typedef struct __rep_control { -#define DB_REPVERSION 3 - u_int32_t rep_version; /* Replication version number. */ - u_int32_t log_version; /* Log version number. */ - - DB_LSN lsn; /* Log sequence number. */ - u_int32_t rectype; /* Message type. */ - u_int32_t gen; /* Generation number. */ - u_int32_t flags; /* log_put flag value. */ -} REP_CONTROL; - -/* Election vote information. */ -typedef struct __rep_vote { - u_int32_t egen; /* Election generation. */ - int nsites; /* Number of sites I've been in - * communication with. */ - int nvotes; /* Number of votes needed to win. */ - int priority; /* My site's priority. */ - u_int32_t tiebreaker; /* Tie-breaking quasi-random value. */ -} REP_VOTE_INFO; - -typedef struct __rep_vtally { - u_int32_t egen; /* Voter's election generation. */ - int eid; /* Voter's ID. */ -} REP_VTALLY; - -/* - * The REP_THROTTLE_ONLY flag is used to do throttle processing only. - * If set, it will only allow sending the REP_*_MORE message, but not - * the normal, non-throttled message. It is used to support throttling - * with bulk transfer. - */ -/* Flags for __rep_send_throttle. */ -#define REP_THROTTLE_ONLY 0x0001 /* Send _MORE message only. */ - -/* Throttled message processing information. */ -typedef struct __rep_throttle { - DB_LSN lsn; /* LSN of this record. */ - DBT *data_dbt; /* DBT of this record. */ - u_int32_t gbytes; /* This call's max gbytes sent. */ - u_int32_t bytes; /* This call's max bytes sent. */ - u_int32_t type; /* Record type. */ -} REP_THROTTLE; - -/* Bulk processing information. */ -/* - * !!! - * We use a uintptr_t for the offset. We'd really like to use a ptrdiff_t - * since that really is what it is. But ptrdiff_t is not portable and - * doesn't exist everywhere. - */ -typedef struct __rep_bulk { - u_int8_t *addr; /* Address of bulk buffer. */ - uintptr_t *offp; /* Ptr to current offset into buffer. */ - u_int32_t len; /* Bulk buffer length. */ - u_int32_t type; /* Item type in buffer (log, page). */ - DB_LSN lsn; /* First LSN in buffer. */ - int eid; /* ID of potential recipients. */ -#define BULK_FORCE 0x001 /* Force buffer after this record. */ -#define BULK_XMIT 0x002 /* Buffer in transit. */ - u_int32_t *flagsp; /* Buffer flags. */ -} REP_BULK; - -/* - * This structure takes care of representing a transaction. - * It holds all the records, sorted by page number so that - * we can obtain locks and apply updates in a deadlock free - * order. - */ -typedef struct __lsn_collection { - u_int nlsns; - u_int nalloc; - DB_LSN *array; -} LSN_COLLECTION; - -/* - * This is used by the page-prep routines to do the lock_vec call to - * apply the updates for a single transaction or a collection of - * transactions. - */ -typedef struct _linfo { - int n; - DB_LOCKREQ *reqs; - DBT *objs; -} linfo_t; - -#include "dbinc_auto/rep_ext.h" -#endif /* !_REP_H_ */ diff --git a/storage/bdb/dbinc/shqueue.h b/storage/bdb/dbinc/shqueue.h deleted file mode 100644 index 55cba7fc179..00000000000 --- a/storage/bdb/dbinc/shqueue.h +++ /dev/null @@ -1,347 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: shqueue.h,v 12.2 2005/08/12 13:17:21 bostic Exp $ - */ - -#ifndef _SYS_SHQUEUE_H_ -#define _SYS_SHQUEUE_H_ - -/* - * This file defines two types of data structures: lists and tail queues - * similarly to the include file . - * - * The difference is that this set of macros can be used for structures that - * reside in shared memory that may be mapped at different addresses in each - * process. In most cases, the macros for shared structures exactly mirror - * the normal macros, although the macro calls require an additional type - * parameter, only used by the HEAD and ENTRY macros of the standard macros. - * - * Since we use relative offsets of type ssize_t rather than pointers, 0 - * (aka NULL) is a valid offset and cannot be used to indicate the end - * of a list. Therefore, we use -1 to indicate end of list. - * - * The macros ending in "P" return pointers without checking for end or - * beginning of lists, the others check for end of list and evaluate to - * either a pointer or NULL. - * - * For details on the use of these macros, see the queue(3) manual page. - */ - -#if defined(__cplusplus) -extern "C" { -#endif - -/* - * Shared memory list definitions. - */ -#define SH_LIST_HEAD(name) \ -struct name { \ - ssize_t slh_first; /* first element */ \ -} - -#define SH_LIST_HEAD_INITIALIZER(head) \ - { -1 } - -#define SH_LIST_ENTRY \ -struct { \ - ssize_t sle_next; /* relative offset to next element */ \ - ssize_t sle_prev; /* relative offset of prev element */ \ -} - -/* - * Shared memory list functions. - */ - -#define SH_LIST_EMPTY(head) \ - ((head)->slh_first == -1) - -#define SH_LIST_FIRSTP(head, type) \ - ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first)) - -#define SH_LIST_FIRST(head, type) \ - (SH_LIST_EMPTY(head) ? NULL : \ - ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first))) - -#define SH_LIST_NEXTP(elm, field, type) \ - ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next)) - -#define SH_LIST_NEXT(elm, field, type) \ - ((elm)->field.sle_next == -1 ? NULL : \ - ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next))) - - /* - *__SH_LIST_PREV_OFF is private API. It calculates the address of - * the elm->field.sle_next member of a SH_LIST structure. All offsets - * between elements are relative to that point in SH_LIST structures. - */ -#define __SH_LIST_PREV_OFF(elm, field) \ - ((ssize_t *)(((u_int8_t *)(elm)) + (elm)->field.sle_prev)) - -#define SH_LIST_PREV(elm, field, type) \ - (struct type *)((ssize_t)elm - (*__SH_LIST_PREV_OFF(elm, field))) - -#define SH_LIST_FOREACH(var, head, field, type) \ - for ((var) = SH_LIST_FIRST((head), type); \ - (var); \ - (var) = SH_LIST_NEXT((var), field, type)) - -#define SH_PTR_TO_OFF(src, dest) \ - ((ssize_t)(((u_int8_t *)(dest)) - ((u_int8_t *)(src)))) - -/* - * Given correct A.next: B.prev = SH_LIST_NEXT_TO_PREV(A) - * in a list [A, B] - * The prev value is always the offset from an element to its preceding - * element's next location, not the beginning of the structure. To get - * to the beginning of an element structure in memory given an element - * do the following: - * A = B - (B.prev + (&B.next - B)) - * Take the element's next pointer and calculate what the corresponding - * Prev pointer should be -- basically it is the negation plus the offset - * of the next field in the structure. - */ -#define SH_LIST_NEXT_TO_PREV(elm, field) \ - (((elm)->field.sle_next == -1 ? 0 : -(elm)->field.sle_next) + \ - SH_PTR_TO_OFF(elm, &(elm)->field.sle_next)) - -#define SH_LIST_INIT(head) (head)->slh_first = -1 - -#define SH_LIST_INSERT_BEFORE(head, listelm, elm, field, type) do { \ - if (listelm == SH_LIST_FIRST(head, type)) { \ - SH_LIST_INSERT_HEAD(head, elm, field, type); \ - } else { \ - (elm)->field.sle_next = SH_PTR_TO_OFF(elm, listelm); \ - (elm)->field.sle_prev = SH_LIST_NEXT_TO_PREV( \ - SH_LIST_PREV((listelm), field, type), field) + \ - (elm)->field.sle_next; \ - (SH_LIST_PREV(listelm, field, type))->field.sle_next = \ - (SH_PTR_TO_OFF((SH_LIST_PREV(listelm, field, \ - type)), elm)); \ - (listelm)->field.sle_prev = SH_LIST_NEXT_TO_PREV(elm, field); \ - } \ -} while (0) - -#define SH_LIST_INSERT_AFTER(listelm, elm, field, type) do { \ - if ((listelm)->field.sle_next != -1) { \ - (elm)->field.sle_next = SH_PTR_TO_OFF(elm, \ - SH_LIST_NEXTP(listelm, field, type)); \ - SH_LIST_NEXTP(listelm, field, type)->field.sle_prev = \ - SH_LIST_NEXT_TO_PREV(elm, field); \ - } else \ - (elm)->field.sle_next = -1; \ - (listelm)->field.sle_next = SH_PTR_TO_OFF(listelm, elm); \ - (elm)->field.sle_prev = SH_LIST_NEXT_TO_PREV(listelm, field); \ -} while (0) - -#define SH_LIST_INSERT_HEAD(head, elm, field, type) do { \ - if ((head)->slh_first != -1) { \ - (elm)->field.sle_next = \ - (head)->slh_first - SH_PTR_TO_OFF(head, elm); \ - SH_LIST_FIRSTP(head, type)->field.sle_prev = \ - SH_LIST_NEXT_TO_PREV(elm, field); \ - } else \ - (elm)->field.sle_next = -1; \ - (head)->slh_first = SH_PTR_TO_OFF(head, elm); \ - (elm)->field.sle_prev = SH_PTR_TO_OFF(elm, &(head)->slh_first); \ -} while (0) - -#define SH_LIST_REMOVE(elm, field, type) do { \ - if ((elm)->field.sle_next != -1) { \ - SH_LIST_NEXTP(elm, field, type)->field.sle_prev = \ - (elm)->field.sle_prev - (elm)->field.sle_next; \ - *__SH_LIST_PREV_OFF(elm, field) += (elm)->field.sle_next;\ - } else \ - *__SH_LIST_PREV_OFF(elm, field) = -1; \ -} while (0) - -#define SH_LIST_REMOVE_HEAD(head, field, type) do { \ - if (!SH_LIST_EMPTY(head)) { \ - SH_LIST_REMOVE(SH_LIST_FIRSTP(head, type), field, type);\ - } \ -} while (0) - -/* - * Shared memory tail queue definitions. - */ -#define SH_TAILQ_HEAD(name) \ -struct name { \ - ssize_t stqh_first; /* relative offset of first element */ \ - ssize_t stqh_last; /* relative offset of last's next */ \ -} - -#define SH_TAILQ_HEAD_INITIALIZER(head) \ - { -1, 0 } - -#define SH_TAILQ_ENTRY \ -struct { \ - ssize_t stqe_next; /* relative offset of next element */ \ - ssize_t stqe_prev; /* relative offset of prev's next */ \ -} - -/* - * Shared memory tail queue functions. - */ - -#define SH_TAILQ_EMPTY(head) \ - ((head)->stqh_first == -1) - -#define SH_TAILQ_FIRSTP(head, type) \ - ((struct type *)((u_int8_t *)(head) + (head)->stqh_first)) - -#define SH_TAILQ_FIRST(head, type) \ - (SH_TAILQ_EMPTY(head) ? NULL : SH_TAILQ_FIRSTP(head, type)) - -#define SH_TAILQ_NEXTP(elm, field, type) \ - ((struct type *)((u_int8_t *)(elm) + (elm)->field.stqe_next)) - -#define SH_TAILQ_NEXT(elm, field, type) \ - ((elm)->field.stqe_next == -1 ? NULL : \ - ((struct type *)((u_int8_t *)(elm) + (elm)->field.stqe_next))) - - /* - * __SH_TAILQ_PREV_OFF is private API. It calculates the address of - * the elm->field.stqe_next member of a SH_TAILQ structure. All - * offsets between elements are relative to that point in SH_TAILQ - * structures. - */ -#define __SH_TAILQ_PREV_OFF(elm, field) \ - ((ssize_t *)(((u_int8_t *)(elm)) + (elm)->field.stqe_prev)) - -#define SH_TAILQ_PREVP(elm, field, type) \ - (struct type *)((ssize_t)elm - (*__SH_TAILQ_PREV_OFF(elm, field))) - -#define SH_TAILQ_PREV(head, elm, field, type) \ - (((elm) == SH_TAILQ_FIRST(head, type)) ? NULL : \ - (struct type *)((ssize_t)elm - (*__SH_TAILQ_PREV_OFF(elm, field)))) - - /* - * __SH_TAILQ_LAST_OFF is private API. It calculates the address of - * the stqe_next member of a SH_TAILQ structure in the last element - * of this list. All offsets between elements are relative to that - * point in SH_TAILQ structures. - */ -#define __SH_TAILQ_LAST_OFF(head) \ - ((ssize_t *)(((u_int8_t *)(head)) + (head)->stqh_last)) - -#define SH_TAILQ_LASTP(head, field, type) \ - ((struct type *)((ssize_t)(head) + \ - ((ssize_t)((head)->stqh_last) - \ - ((ssize_t)SH_PTR_TO_OFF(SH_TAILQ_FIRST(head, type), \ - &(SH_TAILQ_FIRSTP(head, type)->field.stqe_next)))))) - -#define SH_TAILQ_LAST(head, field, type) \ - (SH_TAILQ_EMPTY(head) ? NULL : SH_TAILQ_LASTP(head, field, type)) - -/* - * Given correct A.next: B.prev = SH_TAILQ_NEXT_TO_PREV(A) - * in a list [A, B] - * The prev value is always the offset from an element to its preceding - * element's next location, not the beginning of the structure. To get - * to the beginning of an element structure in memory given an element - * do the following: - * A = B - (B.prev + (&B.next - B)) - */ -#define SH_TAILQ_NEXT_TO_PREV(elm, field) \ - (((elm)->field.stqe_next == -1 ? 0 : \ - (-(elm)->field.stqe_next) + \ - SH_PTR_TO_OFF(elm, &(elm)->field.stqe_next))) - -#define SH_TAILQ_FOREACH(var, head, field, type) \ - for ((var) = SH_TAILQ_FIRST((head), type); \ - (var); \ - (var) = SH_TAILQ_NEXT((var), field, type)) - -#define SH_TAILQ_FOREACH_REVERSE(var, head, field, type) \ - for ((var) = SH_TAILQ_LAST((head), field, type); \ - (var); \ - (var) = SH_TAILQ_PREV((head), (var), field, type)) - -#define SH_TAILQ_INIT(head) { \ - (head)->stqh_first = -1; \ - (head)->stqh_last = SH_PTR_TO_OFF(head, &(head)->stqh_first); \ -} - -#define SH_TAILQ_INSERT_HEAD(head, elm, field, type) do { \ - if ((head)->stqh_first != -1) { \ - (elm)->field.stqe_next = \ - (head)->stqh_first - SH_PTR_TO_OFF(head, elm); \ - SH_TAILQ_FIRSTP(head, type)->field.stqe_prev = \ - SH_TAILQ_NEXT_TO_PREV(elm, field); \ - } else { \ - (head)->stqh_last = \ - SH_PTR_TO_OFF(head, &(elm)->field.stqe_next); \ - (elm)->field.stqe_next = -1; \ - } \ - (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \ - (elm)->field.stqe_prev = \ - SH_PTR_TO_OFF(elm, &(head)->stqh_first); \ -} while (0) - -#define SH_TAILQ_INSERT_TAIL(head, elm, field) do { \ - (elm)->field.stqe_next = -1; \ - (elm)->field.stqe_prev = \ - -SH_PTR_TO_OFF(head, elm) + (head)->stqh_last; \ - if ((head)->stqh_last == \ - SH_PTR_TO_OFF((head), &(head)->stqh_first)) \ - (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \ - else \ - *__SH_TAILQ_LAST_OFF(head) = -(head)->stqh_last + \ - SH_PTR_TO_OFF((elm), &(elm)->field.stqe_next) + \ - SH_PTR_TO_OFF(head, elm); \ - (head)->stqh_last = \ - SH_PTR_TO_OFF(head, &((elm)->field.stqe_next)); \ -} while (0) - -#define SH_TAILQ_INSERT_BEFORE(head, listelm, elm, field, type) do { \ - if (listelm == SH_TAILQ_FIRST(head, type)) { \ - SH_TAILQ_INSERT_HEAD(head, elm, field, type); \ - } else { \ - (elm)->field.stqe_next = SH_PTR_TO_OFF(elm, listelm); \ - (elm)->field.stqe_prev = SH_TAILQ_NEXT_TO_PREV( \ - SH_TAILQ_PREVP((listelm), field, type), field) + \ - (elm)->field.stqe_next; \ - (SH_TAILQ_PREVP(listelm, field, type))->field.stqe_next =\ - (SH_PTR_TO_OFF((SH_TAILQ_PREVP(listelm, field, type)), \ - elm)); \ - (listelm)->field.stqe_prev = \ - SH_TAILQ_NEXT_TO_PREV(elm, field); \ - } \ -} while (0) - -#define SH_TAILQ_INSERT_AFTER(head, listelm, elm, field, type) do { \ - if ((listelm)->field.stqe_next != -1) { \ - (elm)->field.stqe_next = (listelm)->field.stqe_next - \ - SH_PTR_TO_OFF(listelm, elm); \ - SH_TAILQ_NEXTP(listelm, field, type)->field.stqe_prev = \ - SH_TAILQ_NEXT_TO_PREV(elm, field); \ - } else { \ - (elm)->field.stqe_next = -1; \ - (head)->stqh_last = \ - SH_PTR_TO_OFF(head, &elm->field.stqe_next); \ - } \ - (listelm)->field.stqe_next = SH_PTR_TO_OFF(listelm, elm); \ - (elm)->field.stqe_prev = SH_TAILQ_NEXT_TO_PREV(listelm, field); \ -} while (0) - -#define SH_TAILQ_REMOVE(head, elm, field, type) do { \ - if ((elm)->field.stqe_next != -1) { \ - SH_TAILQ_NEXTP(elm, field, type)->field.stqe_prev = \ - (elm)->field.stqe_prev + \ - SH_PTR_TO_OFF(SH_TAILQ_NEXTP(elm, \ - field, type), elm); \ - *__SH_TAILQ_PREV_OFF(elm, field) += elm->field.stqe_next;\ - } else { \ - (head)->stqh_last = (elm)->field.stqe_prev + \ - SH_PTR_TO_OFF(head, elm); \ - *__SH_TAILQ_PREV_OFF(elm, field) = -1; \ - } \ -} while (0) - -#if defined(__cplusplus) -} -#endif -#endif /* !_SYS_SHQUEUE_H_ */ diff --git a/storage/bdb/dbinc/tcl_db.h b/storage/bdb/dbinc/tcl_db.h deleted file mode 100644 index 4bc68ba12bb..00000000000 --- a/storage/bdb/dbinc/tcl_db.h +++ /dev/null @@ -1,241 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_db.h,v 12.4 2005/08/08 14:52:30 bostic Exp $ - */ - -#ifndef _DB_TCL_DB_H_ -#define _DB_TCL_DB_H_ - -#define MSG_SIZE 100 /* Message size */ - -enum INFOTYPE { - I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_LOGC, I_NDBM, I_SEQ}; - -#define MAX_ID 8 /* Maximum number of sub-id's we need */ -#define DBTCL_PREP 64 /* Size of txn_recover preplist */ - -#define DBTCL_DBM 1 -#define DBTCL_NDBM 2 - -/* - * Why use a home grown package over the Tcl_Hash functions? - * - * We could have implemented the stuff below without maintaining our - * own list manipulation, efficiently hashing it with the available - * Tcl functions (Tcl_CreateHashEntry, Tcl_GetHashValue, etc). I chose - * not to do so for these reasons: - * - * We still need the information below. Using the hashing only removes - * us from needing the next/prev pointers. We still need the structure - * itself because we need more than one value associated with a widget. - * We need to keep track of parent pointers for sub-widgets (like cursors) - * so we can correctly close. We need to keep track of individual widget's - * id counters for any sub-widgets they may have. We need to be able to - * associate the name/client data outside the scope of the widget. - * - * So, is it better to use the hashing rather than - * the linear list we have now? I decided against it for the simple reason - * that to access the structure would require two calls. The first is - * Tcl_FindHashEntry(table, key) and then, once we have the entry, we'd - * have to do Tcl_GetHashValue(entry) to get the pointer of the structure. - * - * I believe the number of simultaneous DB widgets in existence at one time - * is not going to be that large (more than several dozen) such that - * linearly searching the list is not going to impact performance in a - * noticeable way. Should performance be impacted due to the size of the - * info list, then perhaps it is time to revisit this decision. - */ -typedef struct dbtcl_info { - LIST_ENTRY(dbtcl_info) entries; - Tcl_Interp *i_interp; - char *i_name; - enum INFOTYPE i_type; - union infop { - DB *dbp; - DBC *dbcp; - DB_ENV *envp; - DB_LOCK *lock; - DB_LOGC *logc; - DB_MPOOLFILE *mp; - DB_TXN *txnp; - void *anyp; - } un; - union data { - int anydata; - db_pgno_t pgno; - u_int32_t lockid; - } und; - union data2 { - int anydata; - int pagesz; - DB_COMPACT *c_data; - } und2; - DBT i_lockobj; - FILE *i_err; - char *i_errpfx; - - /* Callbacks--Tcl_Objs containing proc names */ - Tcl_Obj *i_btcompare; - Tcl_Obj *i_dupcompare; - Tcl_Obj *i_hashproc; - Tcl_Obj *i_rep_send; - Tcl_Obj *i_second_call; - - /* Environment ID for the i_rep_send callback. */ - Tcl_Obj *i_rep_eid; - - struct dbtcl_info *i_parent; - int i_otherid[MAX_ID]; -} DBTCL_INFO; - -#define i_anyp un.anyp -#define i_pagep un.anyp -#define i_envp un.envp -#define i_dbp un.dbp -#define i_dbcp un.dbcp -#define i_txnp un.txnp -#define i_mp un.mp -#define i_lock un.lock -#define i_logc un.logc - -#define i_data und.anydata -#define i_pgno und.pgno -#define i_locker und.lockid -#define i_data2 und2.anydata -#define i_pgsz und2.pagesz -#define i_cdata und2.c_data - -#define i_envtxnid i_otherid[0] -#define i_envmpid i_otherid[1] -#define i_envlockid i_otherid[2] -#define i_envlogcid i_otherid[3] - -#define i_mppgid i_otherid[0] - -#define i_dbdbcid i_otherid[0] - -extern int __debug_on, __debug_print, __debug_stop, __debug_test; - -typedef struct dbtcl_global { - LIST_HEAD(infohead, dbtcl_info) g_infohead; -} DBTCL_GLOBAL; -#define __db_infohead __dbtcl_global.g_infohead - -extern DBTCL_GLOBAL __dbtcl_global; - -/* - * Tcl_NewStringObj takes an "int" length argument, when the typical use is to - * call it with a size_t length (for example, returned by strlen). Tcl is in - * the wrong, but that doesn't help us much -- cast the argument. - */ -#define NewStringObj(a, b) \ - Tcl_NewStringObj(a, (int)b) - -#define NAME_TO_DB(name) (DB *)_NameToPtr((name)) -#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name)) -#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name)) -#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((name)) -#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPtr((name)) -#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((name)) -#define NAME_TO_SEQUENCE(name) (DB_SEQUENCE *)_NameToPtr((name)) - -/* - * MAKE_STAT_LIST appends a {name value} pair to a result list that MUST be - * called 'res' that is a Tcl_Obj * in the local function. This macro also - * assumes a label "error" to go to in the event of a Tcl error. For stat - * functions this will typically go before the "free" function to free the - * stat structure returned by DB. - */ -#define MAKE_STAT_LIST(s, v) do { \ - result = _SetListElemInt(interp, res, (s), (long)(v)); \ - if (result != TCL_OK) \ - goto error; \ -} while (0) - -#define MAKE_WSTAT_LIST(s, v) do { \ - result = _SetListElemWideInt(interp, res, (s), (int64_t)(v)); \ - if (result != TCL_OK) \ - goto error; \ -} while (0) - -/* - * MAKE_STAT_LSN appends a {name {LSNfile LSNoffset}} pair to a result list - * that MUST be called 'res' that is a Tcl_Obj * in the local - * function. This macro also assumes a label "error" to go to - * in the even of a Tcl error. For stat functions this will - * typically go before the "free" function to free the stat structure - * returned by DB. - */ -#define MAKE_STAT_LSN(s, lsn) do { \ - myobjc = 2; \ - myobjv[0] = Tcl_NewLongObj((long)(lsn)->file); \ - myobjv[1] = Tcl_NewLongObj((long)(lsn)->offset); \ - lsnlist = Tcl_NewListObj(myobjc, myobjv); \ - myobjc = 2; \ - myobjv[0] = Tcl_NewStringObj((s), (int)strlen(s)); \ - myobjv[1] = lsnlist; \ - thislist = Tcl_NewListObj(myobjc, myobjv); \ - result = Tcl_ListObjAppendElement(interp, res, thislist); \ - if (result != TCL_OK) \ - goto error; \ -} while (0) - -/* - * MAKE_STAT_STRLIST appends a {name string} pair to a result list - * that MUST be called 'res' that is a Tcl_Obj * in the local - * function. This macro also assumes a label "error" to go to - * in the even of a Tcl error. For stat functions this will - * typically go before the "free" function to free the stat structure - * returned by DB. - */ -#define MAKE_STAT_STRLIST(s,s1) do { \ - result = _SetListElem(interp, res, (s), strlen(s), \ - (s1), strlen(s1)); \ - if (result != TCL_OK) \ - goto error; \ -} while (0) - -/* - * FLAG_CHECK checks that the given flag is not set yet. - * If it is, it sets up an error message. - */ -#define FLAG_CHECK(flag) do { \ - if ((flag) != 0) { \ - Tcl_SetResult(interp, \ - " Only 1 policy can be specified.\n", \ - TCL_STATIC); \ - result = TCL_ERROR; \ - break; \ - } \ -} while (0) - -/* - * FLAG_CHECK2 checks that the given flag is not set yet or is - * only set to the given allowed value. - * If it is, it sets up an error message. - */ -#define FLAG_CHECK2(flag, val) do { \ - if (((flag) & ~(val)) != 0) { \ - Tcl_SetResult(interp, \ - " Only 1 policy can be specified.\n", \ - TCL_STATIC); \ - result = TCL_ERROR; \ - break; \ - } \ -} while (0) - -/* - * IS_HELP checks whether the arg we bombed on is -?, which is a help option. - * If it is, we return TCL_OK (but leave the result set to whatever - * Tcl_GetIndexFromObj says, which lists all the valid options. Otherwise - * return TCL_ERROR. - */ -#define IS_HELP(s) \ - (strcmp(Tcl_GetStringFromObj(s,NULL), "-?") == 0) ? TCL_OK : TCL_ERROR - -#include "dbinc_auto/tcl_ext.h" -#endif /* !_DB_TCL_DB_H_ */ diff --git a/storage/bdb/dbinc/txn.h b/storage/bdb/dbinc/txn.h deleted file mode 100644 index 845cdee2349..00000000000 --- a/storage/bdb/dbinc/txn.h +++ /dev/null @@ -1,225 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: txn.h,v 12.7 2005/10/13 00:53:00 bostic Exp $ - */ - -#ifndef _TXN_H_ -#define _TXN_H_ - -#include "dbinc/xa.h" - -/* Operation parameters to the delayed commit processing code. */ -typedef enum { - TXN_CLOSE, /* Close a DB handle whose close had failed. */ - TXN_REMOVE, /* Remove a file. */ - TXN_TRADE, /* Trade lockers. */ - TXN_TRADED /* Already traded; downgrade lock. */ -} TXN_EVENT_T; - -struct __db_txnregion; typedef struct __db_txnregion DB_TXNREGION; -struct __txn_logrec; typedef struct __txn_logrec DB_TXNLOGREC; - -/* - * !!! - * TXN_MINIMUM = (DB_LOCK_MAXID + 1) but this makes compilers complain. - */ -#define TXN_MINIMUM 0x80000000 -#define TXN_MAXIMUM 0xffffffff /* Maximum number of txn ids. */ -#define TXN_INVALID 0 /* Invalid transaction ID. */ - -#define DEF_MAX_TXNS 20 /* Default max transactions. */ - -/* - * Internal data maintained in shared memory for each transaction. - */ -typedef struct __txn_detail { - u_int32_t txnid; /* current transaction id - used to link free list also */ - pid_t pid; /* Process owning txn */ - db_threadid_t tid; /* Thread owning txn */ - - DB_LSN last_lsn; /* last lsn written for this txn */ - DB_LSN begin_lsn; /* lsn of begin record */ - roff_t parent; /* Offset of transaction's parent. */ - roff_t name; /* Offset of txn name. */ - - SH_TAILQ_HEAD(__tdkids) kids; /* Linked list of child txn detail. */ - SH_TAILQ_ENTRY klinks; - -#define TXN_RUNNING 1 -#define TXN_ABORTED 2 -#define TXN_PREPARED 3 -#define TXN_COMMITTED 4 - u_int32_t status; /* status of the transaction */ -#define TXN_DTL_COLLECTED 0x1 /* collected during txn_recover */ -#define TXN_DTL_RESTORED 0x2 /* prepared txn restored */ -#define TXN_DTL_INMEMORY 0x4 /* uses in memory logs */ - u_int32_t flags; - - SH_TAILQ_ENTRY links; /* free/active list */ - -#define TXN_XA_ABORTED 1 -#define TXN_XA_DEADLOCKED 2 -#define TXN_XA_ENDED 3 -#define TXN_XA_PREPARED 4 -#define TXN_XA_STARTED 5 -#define TXN_XA_SUSPENDED 6 - u_int32_t xa_status; /* XA status */ - - /* - * XID (xid_t) structure: because these fields are logged, the - * sizes have to be explicit. - */ - u_int8_t xid[XIDDATASIZE]; /* XA global transaction id */ - u_int32_t bqual; /* bqual_length from XID */ - u_int32_t gtrid; /* gtrid_length from XID */ - int32_t format; /* XA format */ -} TXN_DETAIL; - -/* - * DB_TXNMGR -- - * The transaction manager encapsulates the transaction system. - */ -struct __db_txnmgr { - /* - * These fields need to be protected for multi-threaded support. - * - * Lock list of active transactions (including the content of each - * TXN_DETAIL structure on the list). - */ - db_mutex_t mutex; - /* List of active transactions. */ - TAILQ_HEAD(_chain, __db_txn) txn_chain; - - u_int32_t n_discards; /* Number of txns discarded. */ - - /* These fields are never updated after creation, so not protected. */ - DB_ENV *dbenv; /* Environment. */ - REGINFO reginfo; /* Region information. */ -}; - -/* Macros to lock/unlock the transaction region as a whole. */ -#define TXN_SYSTEM_LOCK(dbenv) \ - MUTEX_LOCK(dbenv, ((DB_TXNREGION *)((DB_TXNMGR *) \ - (dbenv)->tx_handle)->reginfo.primary)->mtx_region) -#define TXN_SYSTEM_UNLOCK(dbenv) \ - MUTEX_UNLOCK(dbenv, ((DB_TXNREGION *)((DB_TXNMGR *) \ - (dbenv)->tx_handle)->reginfo.primary)->mtx_region) - -/* - * DB_TXNREGION -- - * The primary transaction data structure in the shared memory region. - */ -struct __db_txnregion { - db_mutex_t mtx_region; /* Region mutex. */ - - u_int32_t maxtxns; /* maximum number of active TXNs */ - u_int32_t last_txnid; /* last transaction id given out */ - u_int32_t cur_maxid; /* current max unused id. */ - - db_mutex_t mtx_ckp; /* Single thread checkpoints. */ - DB_LSN last_ckp; /* lsn of the last checkpoint */ - time_t time_ckp; /* time of last checkpoint */ - - DB_TXN_STAT stat; /* Statistics for txns. */ - -#define TXN_IN_RECOVERY 0x01 /* environment is being recovered */ - u_int32_t flags; - /* active TXN list */ - SH_TAILQ_HEAD(__active) active_txn; -}; - -/* - * DB_TXNLOGREC -- - * An in-memory, linked-list copy of a log record. - */ -struct __txn_logrec { - STAILQ_ENTRY(__txn_logrec) links;/* Linked list. */ - - u_int8_t data[1]; /* Log record. */ -}; - -/* - * Log record types. Note that these are *not* alphabetical. This is - * intentional so that we don't change the meaning of values between - * software upgrades. - * - * EXPECTED, UNEXPECTED, IGNORE, and OK are used in the txnlist functions. - * Here is an explanation of how the statuses are used. - * - * TXN_OK - * BEGIN records for transactions found on the txnlist during - * OPENFILES (BEGIN records are those with a prev_lsn of 0,0) - * - * TXN_COMMIT - * Transaction committed and should be rolled forward. - * - * TXN_ABORT - * This transaction's changes must be undone. Either there was - * never a prepare or commit record for this transaction OR there - * was a commit, but we are recovering to a timestamp or particular - * LSN and that point is before this transaction's commit. - * - * TXN_PREPARE - * Prepare record, but no commit record is in the log. - * - * TXN_IGNORE - * Generic meaning is that this transaction should not be - * processed during later recovery passes. We use it in a - * number of different manners: - * - * 1. We never saw its BEGIN record. Therefore, the logs have - * been reclaimed and we *know* that this transaction doesn't - * need to be aborted, because in order for it to be - * reclaimed, there must have been a subsequent checkpoint - * (and any dirty pages for this transaction made it to - * disk). - * - * 2. This is a child transaction that created a database. - * For some reason, we don't want to recreate that database - * (i.e., it already exists or some other database created - * after it exists). - * - * 3. During recovery open of subdatabases, if the master check fails, - * we use a TXN_IGNORE on the create of the subdb in the nested - * transaction. - * - * 4. During a remove, the file with the name being removed isn't - * the file for which we are recovering a remove. - * - * TXN_EXPECTED - * After a successful open during recovery, we update the - * transaction's status to TXN_EXPECTED. The open was done - * in the parent, but in the open log record, we record the - * child transaction's ID if we also did a create. When there - * is a valid ID in that field, we use it and mark the child's - * status as TXN_EXPECTED (indicating that we don't need to redo - * a create for this file). - * - * When recovering a remove, if we don't find or can't open - * the file, the child (which does the remove) gets marked - * EXPECTED (indicating that we don't need to redo the remove). - * - * TXN_UNEXPECTED - * During recovery, we attempted an open that should have succeeded - * and we got ENOENT, so like with the EXPECTED case, we indicate - * in the child that we got the UNEXPECTED return so that we do redo - * the creating/deleting operation. - * - */ -#define TXN_OK 0 -#define TXN_COMMIT 1 -#define TXN_PREPARE 2 -#define TXN_ABORT 3 -#define TXN_IGNORE 4 -#define TXN_EXPECTED 5 -#define TXN_UNEXPECTED 6 - -#include "dbinc_auto/txn_auto.h" -#include "dbinc_auto/txn_ext.h" -#include "dbinc_auto/xa_ext.h" -#endif /* !_TXN_H_ */ diff --git a/storage/bdb/dbinc/xa.h b/storage/bdb/dbinc/xa.h deleted file mode 100644 index 80c4032d20d..00000000000 --- a/storage/bdb/dbinc/xa.h +++ /dev/null @@ -1,179 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1998-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: xa.h,v 12.1 2005/06/16 20:21:49 bostic Exp $ - */ -/* - * Start of xa.h header - * - * Define a symbol to prevent multiple inclusions of this header file - */ -#ifndef XA_H -#define XA_H - -/* - * Transaction branch identification: XID and NULLXID: - */ -#define XIDDATASIZE 128 /* size in bytes */ -#define MAXGTRIDSIZE 64 /* maximum size in bytes of gtrid */ -#define MAXBQUALSIZE 64 /* maximum size in bytes of bqual */ - -struct xid_t { - long formatID; /* format identifier */ - long gtrid_length; /* value from 1 through 64 */ - long bqual_length; /* value from 1 through 64 */ - char data[XIDDATASIZE]; -}; -typedef struct xid_t XID; -/* - * A value of -1 in formatID means that the XID is null. - */ - -/* - * Declarations of routines by which RMs call TMs: - */ -extern int ax_reg __P((int, XID *, long)); -extern int ax_unreg __P((int, long)); - -/* - * XA Switch Data Structure - */ -#define RMNAMESZ 32 /* length of resource manager name, */ - /* including the null terminator */ -#define MAXINFOSIZE 256 /* maximum size in bytes of xa_info */ - /* strings, including the null - terminator */ -struct xa_switch_t { - char name[RMNAMESZ]; /* name of resource manager */ - long flags; /* resource manager specific options */ - long version; /* must be 0 */ - int (*xa_open_entry) /* xa_open function pointer */ - __P((char *, int, long)); - int (*xa_close_entry) /* xa_close function pointer */ - __P((char *, int, long)); - int (*xa_start_entry) /* xa_start function pointer */ - __P((XID *, int, long)); - int (*xa_end_entry) /* xa_end function pointer */ - __P((XID *, int, long)); - int (*xa_rollback_entry) /* xa_rollback function pointer */ - __P((XID *, int, long)); - int (*xa_prepare_entry) /* xa_prepare function pointer */ - __P((XID *, int, long)); - int (*xa_commit_entry) /* xa_commit function pointer */ - __P((XID *, int, long)); - int (*xa_recover_entry) /* xa_recover function pointer */ - __P((XID *, long, int, long)); - int (*xa_forget_entry) /* xa_forget function pointer */ - __P((XID *, int, long)); - int (*xa_complete_entry) /* xa_complete function pointer */ - __P((int *, int *, int, long)); -}; - -/* - * Flag definitions for the RM switch - */ -#define TMNOFLAGS 0x00000000L /* no resource manager features - selected */ -#define TMREGISTER 0x00000001L /* resource manager dynamically - registers */ -#define TMNOMIGRATE 0x00000002L /* resource manager does not support - association migration */ -#define TMUSEASYNC 0x00000004L /* resource manager supports - asynchronous operations */ -/* - * Flag definitions for xa_ and ax_ routines - */ -/* use TMNOFLAGGS, defined above, when not specifying other flags */ -#define TMASYNC 0x80000000L /* perform routine asynchronously */ -#define TMONEPHASE 0x40000000L /* caller is using one-phase commit - optimisation */ -#define TMFAIL 0x20000000L /* dissociates caller and marks - transaction branch rollback-only */ -#define TMNOWAIT 0x10000000L /* return if blocking condition - exists */ -#define TMRESUME 0x08000000L /* caller is resuming association with - suspended transaction branch */ -#define TMSUCCESS 0x04000000L /* dissociate caller from transaction - branch */ -#define TMSUSPEND 0x02000000L /* caller is suspending, not ending, - association */ -#define TMSTARTRSCAN 0x01000000L /* start a recovery scan */ -#define TMENDRSCAN 0x00800000L /* end a recovery scan */ -#define TMMULTIPLE 0x00400000L /* wait for any asynchronous - operation */ -#define TMJOIN 0x00200000L /* caller is joining existing - transaction branch */ -#define TMMIGRATE 0x00100000L /* caller intends to perform - migration */ - -/* - * ax_() return codes (transaction manager reports to resource manager) - */ -#define TM_JOIN 2 /* caller is joining existing - transaction branch */ -#define TM_RESUME 1 /* caller is resuming association with - suspended transaction branch */ -#define TM_OK 0 /* normal execution */ -#define TMER_TMERR -1 /* an error occurred in the transaction - manager */ -#define TMER_INVAL -2 /* invalid arguments were given */ -#define TMER_PROTO -3 /* routine invoked in an improper - context */ - -/* - * xa_() return codes (resource manager reports to transaction manager) - */ -#define XA_RBBASE 100 /* The inclusive lower bound of the - rollback codes */ -#define XA_RBROLLBACK XA_RBBASE /* The rollback was caused by an - unspecified reason */ -#define XA_RBCOMMFAIL XA_RBBASE+1 /* The rollback was caused by a - communication failure */ -#define XA_RBDEADLOCK XA_RBBASE+2 /* A deadlock was detected */ -#define XA_RBINTEGRITY XA_RBBASE+3 /* A condition that violates the - integrity of the resources was - detected */ -#define XA_RBOTHER XA_RBBASE+4 /* The resource manager rolled back the - transaction branch for a reason not - on this list */ -#define XA_RBPROTO XA_RBBASE+5 /* A protocol error occurred in the - resource manager */ -#define XA_RBTIMEOUT XA_RBBASE+6 /* A transaction branch took too long */ -#define XA_RBTRANSIENT XA_RBBASE+7 /* May retry the transaction branch */ -#define XA_RBEND XA_RBTRANSIENT /* The inclusive upper bound of the - rollback codes */ -#define XA_NOMIGRATE 9 /* resumption must occur where - suspension occurred */ -#define XA_HEURHAZ 8 /* the transaction branch may have - been heuristically completed */ -#define XA_HEURCOM 7 /* the transaction branch has been - heuristically committed */ -#define XA_HEURRB 6 /* the transaction branch has been - heuristically rolled back */ -#define XA_HEURMIX 5 /* the transaction branch has been - heuristically committed and rolled - back */ -#define XA_RETRY 4 /* routine returned with no effect and - may be re-issued */ -#define XA_RDONLY 3 /* the transaction branch was read-only - and has been committed */ -#define XA_OK 0 /* normal execution */ -#define XAER_ASYNC -2 /* asynchronous operation already - outstanding */ -#define XAER_RMERR -3 /* a resource manager error occurred in - the transaction branch */ -#define XAER_NOTA -4 /* the XID is not valid */ -#define XAER_INVAL -5 /* invalid arguments were given */ -#define XAER_PROTO -6 /* routine invoked in an improper - context */ -#define XAER_RMFAIL -7 /* resource manager unavailable */ -#define XAER_DUPID -8 /* the XID already exists */ -#define XAER_OUTSIDE -9 /* resource manager doing work outside - transaction */ -#endif /* ifndef XA_H */ -/* - * End of xa.h header - */ diff --git a/storage/bdb/dbinc_auto/.empty b/storage/bdb/dbinc_auto/.empty deleted file mode 100644 index cb45fa6cd5a..00000000000 --- a/storage/bdb/dbinc_auto/.empty +++ /dev/null @@ -1 +0,0 @@ -this is here to force the directory to exist diff --git a/storage/bdb/dbm/dbm.c b/storage/bdb/dbm/dbm.c deleted file mode 100644 index a7f484b4357..00000000000 --- a/storage/bdb/dbm/dbm.c +++ /dev/null @@ -1,517 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993 - * Margo Seltzer. All rights reserved. - */ -/* - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * Margo Seltzer. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: dbm.c,v 12.2 2005/06/16 20:21:49 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#define DB_DBM_HSEARCH 1 -#include "db_int.h" - -/* - * - * This package provides dbm and ndbm compatible interfaces to DB. - * - * EXTERN: #if DB_DBM_HSEARCH != 0 - * - * EXTERN: int __db_ndbm_clearerr __P((DBM *)); - * EXTERN: void __db_ndbm_close __P((DBM *)); - * EXTERN: int __db_ndbm_delete __P((DBM *, datum)); - * EXTERN: int __db_ndbm_dirfno __P((DBM *)); - * EXTERN: int __db_ndbm_error __P((DBM *)); - * EXTERN: datum __db_ndbm_fetch __P((DBM *, datum)); - * EXTERN: datum __db_ndbm_firstkey __P((DBM *)); - * EXTERN: datum __db_ndbm_nextkey __P((DBM *)); - * EXTERN: DBM *__db_ndbm_open __P((const char *, int, int)); - * EXTERN: int __db_ndbm_pagfno __P((DBM *)); - * EXTERN: int __db_ndbm_rdonly __P((DBM *)); - * EXTERN: int __db_ndbm_store __P((DBM *, datum, datum, int)); - * - * EXTERN: int __db_dbm_close __P((void)); - * EXTERN: int __db_dbm_delete __P((datum)); - * EXTERN: datum __db_dbm_fetch __P((datum)); - * EXTERN: datum __db_dbm_firstkey __P((void)); - * EXTERN: int __db_dbm_init __P((char *)); - * EXTERN: datum __db_dbm_nextkey __P((datum)); - * EXTERN: int __db_dbm_store __P((datum, datum)); - * - * EXTERN: #endif - */ - -/* - * The DBM routines, which call the NDBM routines. - */ -static DBM *__cur_db; - -static void __db_no_open __P((void)); - -int -__db_dbm_init(file) - char *file; -{ - if (__cur_db != NULL) - dbm_close(__cur_db); - if ((__cur_db = - dbm_open(file, O_CREAT | O_RDWR, __db_omode(OWNER_RW))) != NULL) - return (0); - if ((__cur_db = dbm_open(file, O_RDONLY, 0)) != NULL) - return (0); - return (-1); -} - -int -__db_dbm_close() -{ - if (__cur_db != NULL) { - dbm_close(__cur_db); - __cur_db = NULL; - } - return (0); -} - -datum -__db_dbm_fetch(key) - datum key; -{ - datum item; - - if (__cur_db == NULL) { - __db_no_open(); - item.dptr = NULL; - item.dsize = 0; - return (item); - } - return (dbm_fetch(__cur_db, key)); -} - -datum -__db_dbm_firstkey() -{ - datum item; - - if (__cur_db == NULL) { - __db_no_open(); - item.dptr = NULL; - item.dsize = 0; - return (item); - } - return (dbm_firstkey(__cur_db)); -} - -datum -__db_dbm_nextkey(key) - datum key; -{ - datum item; - - COMPQUIET(key.dsize, 0); - - if (__cur_db == NULL) { - __db_no_open(); - item.dptr = NULL; - item.dsize = 0; - return (item); - } - return (dbm_nextkey(__cur_db)); -} - -int -__db_dbm_delete(key) - datum key; -{ - if (__cur_db == NULL) { - __db_no_open(); - return (-1); - } - return (dbm_delete(__cur_db, key)); -} - -int -__db_dbm_store(key, dat) - datum key, dat; -{ - if (__cur_db == NULL) { - __db_no_open(); - return (-1); - } - return (dbm_store(__cur_db, key, dat, DBM_REPLACE)); -} - -static void -__db_no_open() -{ - (void)fprintf(stderr, "dbm: no open database.\n"); -} - -/* - * This package provides dbm and ndbm compatible interfaces to DB. - * - * The NDBM routines, which call the DB routines. - */ -/* - * Returns: - * *DBM on success - * NULL on failure - */ -DBM * -__db_ndbm_open(file, oflags, mode) - const char *file; - int oflags, mode; -{ - DB *dbp; - DBC *dbc; - int ret; - char path[MAXPATHLEN]; - - /* - * !!! - * Don't use sprintf(3)/snprintf(3) -- the former is dangerous, and - * the latter isn't standard, and we're manipulating strings handed - * us by the application. - */ - if (strlen(file) + strlen(DBM_SUFFIX) + 1 > sizeof(path)) { - __os_set_errno(ENAMETOOLONG); - return (NULL); - } - (void)strcpy(path, file); - (void)strcat(path, DBM_SUFFIX); - if ((ret = db_create(&dbp, NULL, 0)) != 0) { - __os_set_errno(ret); - return (NULL); - } - - /* - * !!! - * The historic ndbm library corrected for opening O_WRONLY. - */ - if (oflags & O_WRONLY) { - oflags &= ~O_WRONLY; - oflags |= O_RDWR; - } - - if ((ret = dbp->set_pagesize(dbp, 4096)) != 0 || - (ret = dbp->set_h_ffactor(dbp, 40)) != 0 || - (ret = dbp->set_h_nelem(dbp, 1)) != 0 || - (ret = dbp->open(dbp, NULL, - path, NULL, DB_HASH, __db_oflags(oflags), mode)) != 0) { - __os_set_errno(ret); - return (NULL); - } - - if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) { - (void)dbp->close(dbp, 0); - __os_set_errno(ret); - return (NULL); - } - - return ((DBM *)dbc); -} - -/* - * Returns: - * Nothing. - */ -void -__db_ndbm_close(dbm) - DBM *dbm; -{ - DBC *dbc; - - dbc = (DBC *)dbm; - - (void)dbc->dbp->close(dbc->dbp, 0); -} - -/* - * Returns: - * DATUM on success - * NULL on failure - */ -datum -__db_ndbm_fetch(dbm, key) - DBM *dbm; - datum key; -{ - DBC *dbc; - DBT _key, _data; - datum data; - int ret; - - dbc = (DBC *)dbm; - - memset(&_key, 0, sizeof(DBT)); - memset(&_data, 0, sizeof(DBT)); - _key.size = (u_int32_t)key.dsize; - _key.data = key.dptr; - - /* - * Note that we can't simply use the dbc we have to do a c_get/SET, - * because that cursor is the one used for sequential iteration and - * it has to remain stable in the face of intervening gets and puts. - */ - if ((ret = dbc->dbp->get(dbc->dbp, NULL, &_key, &_data, 0)) == 0) { - data.dptr = _data.data; - data.dsize = (int)_data.size; - } else { - data.dptr = NULL; - data.dsize = 0; - if (ret == DB_NOTFOUND) - __os_set_errno(ENOENT); - else { - __os_set_errno(ret); - F_SET(dbc->dbp, DB_AM_DBM_ERROR); - } - } - return (data); -} - -/* - * Returns: - * DATUM on success - * NULL on failure - */ -datum -__db_ndbm_firstkey(dbm) - DBM *dbm; -{ - DBC *dbc; - DBT _key, _data; - datum key; - int ret; - - dbc = (DBC *)dbm; - - memset(&_key, 0, sizeof(DBT)); - memset(&_data, 0, sizeof(DBT)); - - if ((ret = dbc->c_get(dbc, &_key, &_data, DB_FIRST)) == 0) { - key.dptr = _key.data; - key.dsize = (int)_key.size; - } else { - key.dptr = NULL; - key.dsize = 0; - if (ret == DB_NOTFOUND) - __os_set_errno(ENOENT); - else { - __os_set_errno(ret); - F_SET(dbc->dbp, DB_AM_DBM_ERROR); - } - } - return (key); -} - -/* - * Returns: - * DATUM on success - * NULL on failure - */ -datum -__db_ndbm_nextkey(dbm) - DBM *dbm; -{ - DBC *dbc; - DBT _key, _data; - datum key; - int ret; - - dbc = (DBC *)dbm; - - memset(&_key, 0, sizeof(DBT)); - memset(&_data, 0, sizeof(DBT)); - - if ((ret = dbc->c_get(dbc, &_key, &_data, DB_NEXT)) == 0) { - key.dptr = _key.data; - key.dsize = (int)_key.size; - } else { - key.dptr = NULL; - key.dsize = 0; - if (ret == DB_NOTFOUND) - __os_set_errno(ENOENT); - else { - __os_set_errno(ret); - F_SET(dbc->dbp, DB_AM_DBM_ERROR); - } - } - return (key); -} - -/* - * Returns: - * 0 on success - * <0 failure - */ -int -__db_ndbm_delete(dbm, key) - DBM *dbm; - datum key; -{ - DBC *dbc; - DBT _key; - int ret; - - dbc = (DBC *)dbm; - - memset(&_key, 0, sizeof(DBT)); - _key.data = key.dptr; - _key.size = (u_int32_t)key.dsize; - - if ((ret = dbc->dbp->del(dbc->dbp, NULL, &_key, 0)) == 0) - return (0); - - if (ret == DB_NOTFOUND) - __os_set_errno(ENOENT); - else { - __os_set_errno(ret); - F_SET(dbc->dbp, DB_AM_DBM_ERROR); - } - return (-1); -} - -/* - * Returns: - * 0 on success - * <0 failure - * 1 if DBM_INSERT and entry exists - */ -int -__db_ndbm_store(dbm, key, data, flags) - DBM *dbm; - datum key, data; - int flags; -{ - DBC *dbc; - DBT _key, _data; - int ret; - - dbc = (DBC *)dbm; - - memset(&_key, 0, sizeof(DBT)); - _key.data = key.dptr; - _key.size = (u_int32_t)key.dsize; - - memset(&_data, 0, sizeof(DBT)); - _data.data = data.dptr; - _data.size = (u_int32_t)data.dsize; - - if ((ret = dbc->dbp->put(dbc->dbp, NULL, - &_key, &_data, flags == DBM_INSERT ? DB_NOOVERWRITE : 0)) == 0) - return (0); - - if (ret == DB_KEYEXIST) - return (1); - - __os_set_errno(ret); - F_SET(dbc->dbp, DB_AM_DBM_ERROR); - return (-1); -} - -int -__db_ndbm_error(dbm) - DBM *dbm; -{ - DBC *dbc; - - dbc = (DBC *)dbm; - - return (F_ISSET(dbc->dbp, DB_AM_DBM_ERROR)); -} - -int -__db_ndbm_clearerr(dbm) - DBM *dbm; -{ - DBC *dbc; - - dbc = (DBC *)dbm; - - F_CLR(dbc->dbp, DB_AM_DBM_ERROR); - return (0); -} - -/* - * Returns: - * 1 if read-only - * 0 if not read-only - */ -int -__db_ndbm_rdonly(dbm) - DBM *dbm; -{ - DBC *dbc; - - dbc = (DBC *)dbm; - - return (F_ISSET(dbc->dbp, DB_AM_RDONLY) ? 1 : 0); -} - -/* - * XXX - * We only have a single file descriptor that we can return, not two. Return - * the same one for both files. Hopefully, the user is using it for locking - * and picked one to use at random. - */ -int -__db_ndbm_dirfno(dbm) - DBM *dbm; -{ - return (dbm_pagfno(dbm)); -} - -int -__db_ndbm_pagfno(dbm) - DBM *dbm; -{ - DBC *dbc; - int fd; - - dbc = (DBC *)dbm; - - (void)dbc->dbp->fd(dbc->dbp, &fd); - return (fd); -} diff --git a/storage/bdb/dbreg/dbreg.c b/storage/bdb/dbreg/dbreg.c deleted file mode 100644 index eb3e75cc739..00000000000 --- a/storage/bdb/dbreg/dbreg.c +++ /dev/null @@ -1,736 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: dbreg.c,v 12.12 2005/10/14 14:40:41 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/log.h" -#include "dbinc/txn.h" -#include "dbinc/db_am.h" - -static int __dbreg_push_id __P((DB_ENV *, DB *, int32_t)); -static int __dbreg_pop_id __P((DB_ENV *, int32_t *)); -static int __dbreg_pluck_id __P((DB_ENV *, int32_t)); - -/* - * The dbreg subsystem, as its name implies, registers database handles so - * that we can associate log messages with them without logging a filename - * or a full, unique DB ID. Instead, we assign each dbp an int32_t which is - * easy and cheap to log, and use this subsystem to map back and forth. - * - * Overview of how dbreg ids are managed: - * - * OPEN - * dbreg_setup (Creates FNAME struct.) - * dbreg_new_id (Assigns new ID to dbp and logs it. May be postponed - * until we attempt to log something else using that dbp, if the dbp - * was opened on a replication client.) - * - * CLOSE - * dbreg_close_id (Logs closure of dbp/revocation of ID.) - * dbreg_revoke_id (As name implies, revokes ID.) - * dbreg_teardown (Destroys FNAME.) - * - * RECOVERY - * dbreg_setup - * dbreg_assign_id (Assigns a particular ID we have in the log to a dbp.) - * - * sometimes: dbreg_revoke_id; dbreg_teardown - * other times: normal close path - * - * A note about locking: - * - * FNAME structures are referenced only by their corresponding dbp's - * until they have a valid id. - * - * Once they have a valid id, they must get linked into the log - * region list so they can get logged on checkpoints. - * - * An FNAME that may/does have a valid id must be accessed under - * protection of the mtx_filelist, with the following exception: - * - * We don't want to have to grab the mtx_filelist on every log - * record, and it should be safe not to do so when we're just - * looking at the id, because once allocated, the id should - * not change under a handle until the handle is closed. - * - * If a handle is closed during an attempt by another thread to - * log with it, well, the application doing the close deserves to - * go down in flames and a lot else is about to fail anyway. - * - * When in the course of logging we encounter an invalid id - * and go to allocate it lazily, we *do* need to check again - * after grabbing the mutex, because it's possible to race with - * another thread that has also decided that it needs to allocate - * a id lazily. - * - * See SR #5623 for further discussion of the new dbreg design. - */ - -/* - * __dbreg_setup -- - * Allocate and initialize an FNAME structure. The FNAME structures - * live in the log shared region and map one-to-one with open database handles. - * When the handle needs to be logged, the FNAME should have a valid fid - * allocated. If the handle currently isn't logged, it still has an FNAME - * entry. If we later discover that the handle needs to be logged, we can - * allocate a id for it later. (This happens when the handle is on a - * replication client that later becomes a master.) - * - * PUBLIC: int __dbreg_setup __P((DB *, const char *, u_int32_t)); - */ -int -__dbreg_setup(dbp, name, create_txnid) - DB *dbp; - const char *name; - u_int32_t create_txnid; -{ - DB_ENV *dbenv; - DB_LOG *dblp; - FNAME *fnp; - REGINFO *infop; - int ret; - size_t len; - void *namep; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - infop = &dblp->reginfo; - - fnp = NULL; - namep = NULL; - - /* Allocate an FNAME and, if necessary, a buffer for the name itself. */ - LOG_SYSTEM_LOCK(dbenv); - if ((ret = __db_shalloc(infop, sizeof(FNAME), 0, &fnp)) != 0) - goto err; - memset(fnp, 0, sizeof(FNAME)); - if (name != NULL) { - len = strlen(name) + 1; - if ((ret = __db_shalloc(infop, len, 0, &namep)) != 0) - goto err; - fnp->name_off = R_OFFSET(infop, namep); - memcpy(namep, name, len); - } else - fnp->name_off = INVALID_ROFF; - - LOG_SYSTEM_UNLOCK(dbenv); - - /* - * Fill in all the remaining info that we'll need later to register - * the file, if we use it for logging. - */ - fnp->id = DB_LOGFILEID_INVALID; - fnp->s_type = dbp->type; - memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN); - fnp->meta_pgno = dbp->meta_pgno; - fnp->create_txnid = create_txnid; - - dbp->log_filename = fnp; - - return (0); - -err: LOG_SYSTEM_UNLOCK(dbenv); - if (ret == ENOMEM) - __db_err(dbenv, - "Logging region out of memory; you may need to increase its size"); - - return (ret); -} - -/* - * __dbreg_teardown -- - * Destroy a DB handle's FNAME struct. - * - * PUBLIC: int __dbreg_teardown __P((DB *)); - */ -int -__dbreg_teardown(dbp) - DB *dbp; -{ - DB_ENV *dbenv; - DB_LOG *dblp; - REGINFO *infop; - FNAME *fnp; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - infop = &dblp->reginfo; - fnp = dbp->log_filename; - - /* - * We may not have an FNAME if we were never opened. This is not an - * error. - */ - if (fnp == NULL || F_ISSET(fnp, DB_FNAME_NOTLOGGED)) - return (0); - - DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID); - - LOG_SYSTEM_LOCK(dbenv); - if (fnp->name_off != INVALID_ROFF) - __db_shalloc_free(infop, R_ADDR(infop, fnp->name_off)); - __db_shalloc_free(infop, fnp); - LOG_SYSTEM_UNLOCK(dbenv); - - dbp->log_filename = NULL; - - return (0); -} - -/* - * __dbreg_new_id -- - * Get an unused dbreg id to this database handle. - * Used as a wrapper to acquire the mutex and - * only set the id on success. - * - * PUBLIC: int __dbreg_new_id __P((DB *, DB_TXN *)); - */ -int -__dbreg_new_id(dbp, txn) - DB *dbp; - DB_TXN *txn; -{ - DB_ENV *dbenv; - DB_LOG *dblp; - FNAME *fnp; - LOG *lp; - int32_t id; - int ret; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - /* The mtx_filelist protects the FNAME list and id management. */ - MUTEX_LOCK(dbenv, lp->mtx_filelist); - if (fnp->id != DB_LOGFILEID_INVALID) { - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - return (0); - } - if ((ret = __dbreg_get_id(dbp, txn, &id)) == 0) - fnp->id = id; - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - return (ret); -} - -/* - * __dbreg_get_id -- - * Assign an unused dbreg id to this database handle. - * Assume the caller holds the mtx_filelist locked. Assume the - * caller will set the fnp->id field with the id we return. - * - * PUBLIC: int __dbreg_get_id __P((DB *, DB_TXN *, int32_t *)); - */ -int -__dbreg_get_id(dbp, txn, idp) - DB *dbp; - DB_TXN *txn; - int32_t *idp; -{ - DB_ENV *dbenv; - DB_LOG *dblp; - FNAME *fnp; - LOG *lp; - int32_t id; - int ret; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - /* - * It's possible that after deciding we needed to call this function, - * someone else allocated an ID before we grabbed the lock. Check - * to make sure there was no race and we have something useful to do. - */ - /* Get an unused ID from the free list. */ - if ((ret = __dbreg_pop_id(dbenv, &id)) != 0) - goto err; - - /* If no ID was found, allocate a new one. */ - if (id == DB_LOGFILEID_INVALID) - id = lp->fid_max++; - - /* If the file is durable (i.e., not, not-durable), mark it as such. */ - if (!F_ISSET(dbp, DB_AM_NOT_DURABLE)) - F_SET(fnp, DB_FNAME_DURABLE); - - /* Hook the FNAME into the list of open files. */ - SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname); - - /* - * Log the registry. We should only request a new ID in situations - * where logging is reasonable. - */ - DB_ASSERT(!F_ISSET(dbp, DB_AM_RECOVER)); - - if ((ret = __dbreg_log_id(dbp, txn, id, 0)) != 0) - goto err; - - /* - * Once we log the create_txnid, we need to make sure we never - * log it again (as might happen if this is a replication client - * that later upgrades to a master). - */ - fnp->create_txnid = TXN_INVALID; - - DB_ASSERT(dbp->type == fnp->s_type); - DB_ASSERT(dbp->meta_pgno == fnp->meta_pgno); - - if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0) - goto err; - /* - * If we have a successful call, set the ID. Otherwise - * we have to revoke it and remove it from all the lists - * it has been added to, and return an invalid id. - */ -err: - if (ret != 0 && id != DB_LOGFILEID_INVALID) { - (void)__dbreg_revoke_id(dbp, 1, id); - id = DB_LOGFILEID_INVALID; - } - *idp = id; - return (ret); -} - -/* - * __dbreg_assign_id -- - * Assign a particular dbreg id to this database handle. - * - * PUBLIC: int __dbreg_assign_id __P((DB *, int32_t)); - */ -int -__dbreg_assign_id(dbp, id) - DB *dbp; - int32_t id; -{ - DB *close_dbp; - DB_ENV *dbenv; - DB_LOG *dblp; - FNAME *close_fnp, *fnp; - LOG *lp; - int ret; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - close_dbp = NULL; - close_fnp = NULL; - - /* The mtx_filelist protects the FNAME list and id management. */ - MUTEX_LOCK(dbenv, lp->mtx_filelist); - - /* We should only call this on DB handles that have no ID. */ - DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID); - - /* - * Make sure there isn't already a file open with this ID. There can - * be in recovery, if we're recovering across a point where an ID got - * reused. - */ - if (__dbreg_id_to_fname(dblp, id, 1, &close_fnp) == 0) { - /* - * We want to save off any dbp we have open with this id. We - * can't safely close it now, because we hold the mtx_filelist, - * but we should be able to rely on it being open in this - * process, and we're running recovery, so no other thread - * should muck with it if we just put off closing it until - * we're ready to return. - * - * Once we have the dbp, revoke its id; we're about to - * reuse it. - */ - ret = __dbreg_id_to_db_int(dbenv, NULL, &close_dbp, id, 0, 0); - if (ret == ENOENT) { - ret = 0; - goto cont; - } else if (ret != 0) - goto err; - - if ((ret = __dbreg_revoke_id(close_dbp, 1, - DB_LOGFILEID_INVALID)) != 0) - goto err; - } - - /* - * Remove this ID from the free list, if it's there, and make sure - * we don't allocate it anew. - */ -cont: if ((ret = __dbreg_pluck_id(dbenv, id)) != 0) - goto err; - if (id >= lp->fid_max) - lp->fid_max = id + 1; - - /* Now go ahead and assign the id to our dbp. */ - fnp->id = id; - /* If the file is durable (i.e., not, not-durable), mark it as such. */ - if (!F_ISSET(dbp, DB_AM_NOT_DURABLE)) - F_SET(fnp, DB_FNAME_DURABLE); - SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname); - - /* - * If we get an error adding the dbentry, revoke the id. - * We void the return value since we want to retain and - * return the original error in ret anyway. - */ - if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0) - (void)__dbreg_revoke_id(dbp, 1, id); - -err: MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - - /* There's nothing useful that our caller can do if this close fails. */ - if (close_dbp != NULL) - (void)__db_close(close_dbp, NULL, DB_NOSYNC); - - return (ret); -} - -/* - * __dbreg_revoke_id -- - * Take a log id away from a dbp, in preparation for closing it, - * but without logging the close. - * - * PUBLIC: int __dbreg_revoke_id __P((DB *, int, int32_t)); - */ -int -__dbreg_revoke_id(dbp, have_lock, force_id) - DB *dbp; - int have_lock; - int32_t force_id; -{ - DB_ENV *dbenv; - DB_LOG *dblp; - FNAME *fnp; - LOG *lp; - int32_t id; - int ret; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - /* If we lack an ID, this is a null-op. */ - if (fnp == NULL) - return (0); - - /* - * If we have a force_id, we had an error after allocating - * the id, and putting it on the fq list, but before we - * finished setting up fnp. So, if we have a force_id use it. - */ - if (force_id != DB_LOGFILEID_INVALID) - id = force_id; - else if (fnp->id == DB_LOGFILEID_INVALID) - return (0); - else - id = fnp->id; - if (!have_lock) - MUTEX_LOCK(dbenv, lp->mtx_filelist); - - fnp->id = DB_LOGFILEID_INVALID; - - /* Remove the FNAME from the list of open files. */ - SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname); - - /* - * Remove this id from the dbentry table and push it onto the - * free list. - */ - if ((ret = __dbreg_rem_dbentry(dblp, id)) == 0) { - /* - * If we are not in recovery but the file was opened - * for a recovery operation, then this process aborted - * a transaction for another process and the id may - * still be in use, so don't reuse this id. - */ - if (!F_ISSET(dbp, DB_AM_RECOVER) || IS_RECOVERING(dbenv)) - ret = __dbreg_push_id(dbenv, dbp, id); - } - - if (!have_lock) - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - return (ret); -} - -/* - * __dbreg_close_id -- - * Take a dbreg id away from a dbp that we're closing, and log - * the unregistry. - * - * PUBLIC: int __dbreg_close_id __P((DB *, DB_TXN *, u_int32_t)); - */ -int -__dbreg_close_id(dbp, txn, op) - DB *dbp; - DB_TXN *txn; - u_int32_t op; -{ - DBT fid_dbt, r_name, *dbtp; - DB_ENV *dbenv; - DB_LOG *dblp; - DB_LSN r_unused; - FNAME *fnp; - LOG *lp; - int ret; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - /* If we lack an ID, this is a null-op. */ - if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID) - return (0); - - MUTEX_LOCK(dbenv, lp->mtx_filelist); - - if (fnp->name_off == INVALID_ROFF) - dbtp = NULL; - else { - memset(&r_name, 0, sizeof(r_name)); - r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off); - r_name.size = - (u_int32_t)strlen((char *)r_name.data) + 1; - dbtp = &r_name; - } - memset(&fid_dbt, 0, sizeof(fid_dbt)); - fid_dbt.data = fnp->ufid; - fid_dbt.size = DB_FILE_ID_LEN; - if ((ret = __dbreg_register_log(dbenv, txn, &r_unused, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0, - op, dbtp, &fid_dbt, fnp->id, - fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0) { - /* - * We are trying to close, but the log write failed. - * Unfortunately, close needs to plow forward, because - * the application can't do anything with the handle. - * Make the entry in the shared memory region so that - * when we close the environment, we know that this - * happened. Also, make sure we remove this from the - * per-process table, so that we don't try to close it - * later. - */ - F_SET(fnp, DB_FNAME_NOTLOGGED); - (void)__dbreg_rem_dbentry(dblp, fnp->id); - goto err; - } - - ret = __dbreg_revoke_id(dbp, 1, DB_LOGFILEID_INVALID); - -err: MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - return (ret); -} - -/* - * __dbreg_push_id and __dbreg_pop_id -- - * Dbreg ids from closed files are kept on a stack in shared memory - * for recycling. (We want to reuse them as much as possible because each - * process keeps open files in an array by ID.) Push them to the stack and - * pop them from it, managing memory as appropriate. - * - * The stack is protected by the mtx_filelist, and both functions assume it - * is already locked. - */ -static int -__dbreg_push_id(dbenv, dbp, id) - DB_ENV *dbenv; - DB *dbp; - int32_t id; -{ - DB_LOG *dblp; - DB_REP *db_rep; - LOG *lp; - REGINFO *infop; - int32_t *stack, *newstack; - int ret; - - dblp = dbenv->lg_handle; - infop = &dblp->reginfo; - lp = infop->primary; - db_rep = dbenv->rep_handle; - - /* - * If our fid generation in replication has changed, this fid should - * not be pushed back onto the stack. - */ - if (REP_ON(dbenv) && db_rep->region != NULL && - ((REP *)db_rep->region)->gen != dbp->fid_gen) - return (0); - /* Check if we have room on the stack. */ - if (lp->free_fid_stack == INVALID_ROFF || - lp->free_fids_alloced <= lp->free_fids + 1) { - LOG_SYSTEM_LOCK(dbenv); - if ((ret = __db_shalloc(infop, - (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0, - &newstack)) != 0) { - LOG_SYSTEM_UNLOCK(dbenv); - return (ret); - } - - if (lp->free_fid_stack != INVALID_ROFF) { - stack = R_ADDR(infop, lp->free_fid_stack); - memcpy(newstack, stack, - lp->free_fids_alloced * sizeof(u_int32_t)); - __db_shalloc_free(infop, stack); - } - lp->free_fid_stack = R_OFFSET(infop, newstack); - lp->free_fids_alloced += 20; - LOG_SYSTEM_UNLOCK(dbenv); - } - - stack = R_ADDR(infop, lp->free_fid_stack); - stack[lp->free_fids++] = id; - return (0); -} - -static int -__dbreg_pop_id(dbenv, id) - DB_ENV *dbenv; - int32_t *id; -{ - DB_LOG *dblp; - LOG *lp; - int32_t *stack; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - /* Do we have anything to pop? */ - if (lp->free_fid_stack != INVALID_ROFF && lp->free_fids > 0) { - stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack); - *id = stack[--lp->free_fids]; - } else - *id = DB_LOGFILEID_INVALID; - - return (0); -} - -/* - * __dbreg_pluck_id -- - * Remove a particular dbreg id from the stack of free ids. This is - * used when we open a file, as in recovery, with a specific ID that might - * be on the stack. - * - * Returns success whether or not the particular id was found, and like - * push and pop, assumes that the mtx_filelist is locked. - */ -static int -__dbreg_pluck_id(dbenv, id) - DB_ENV *dbenv; - int32_t id; -{ - DB_LOG *dblp; - LOG *lp; - int32_t *stack; - u_int i; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - /* Do we have anything to look at? */ - if (lp->free_fid_stack != INVALID_ROFF) { - stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack); - for (i = 0; i < lp->free_fids; i++) - if (id == stack[i]) { - /* - * Found it. Overwrite it with the top - * id (which may harmlessly be itself), - * and shorten the stack by one. - */ - stack[i] = stack[lp->free_fids - 1]; - lp->free_fids--; - return (0); - } - } - - return (0); -} - -/* - * __dbreg_log_id -- - * Used for in-memory named files. They are created in mpool and - * are given id's early in the open process so that we can read and - * create pages in the mpool for the files. However, at the time that - * the mpf is created, the file may not be fully created and/or its - * meta-data may not be fully known, so we can't do a full dbregister. - * This is a routine exported that will log a complete dbregister - * record that will allow for both recovery and replication. - * - * PUBLIC: int __dbreg_log_id __P((DB *, DB_TXN *, int32_t, int)); - */ -int -__dbreg_log_id(dbp, txn, id, needlock) - DB *dbp; - DB_TXN *txn; - int32_t id; - int needlock; -{ - DBT fid_dbt, r_name; - DB_ENV *dbenv; - DB_LOG *dblp; - DB_LSN unused; - FNAME *fnp; - LOG *lp; - u_int32_t op; - int ret; - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - /* Verify that the fnp has been initialized. */ - if (fnp->s_type == DB_UNKNOWN) { - memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN); - fnp->s_type = dbp->type; - } - - /* - * Log the registry. We should only request a new ID in situations - * where logging is reasonable. - */ - memset(&fid_dbt, 0, sizeof(fid_dbt)); - memset(&r_name, 0, sizeof(r_name)); - - if (needlock) - MUTEX_LOCK(dbenv, lp->mtx_filelist); - - if (fnp->name_off != INVALID_ROFF) { - r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off); - r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1; - } - - fid_dbt.data = dbp->fileid; - fid_dbt.size = DB_FILE_ID_LEN; - - op = !F_ISSET(dbp, DB_AM_OPEN_CALLED) ? DBREG_PREOPEN : - (F_ISSET(dbp, DB_AM_INMEM) ? DBREG_REOPEN : DBREG_OPEN); - ret = __dbreg_register_log(dbenv, txn, &unused, - F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0, - op, r_name.size == 0 ? NULL : &r_name, &fid_dbt, id, - fnp->s_type, fnp->meta_pgno, fnp->create_txnid); - - if (needlock) - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - - return (ret); -} diff --git a/storage/bdb/dbreg/dbreg.src b/storage/bdb/dbreg/dbreg.src deleted file mode 100644 index 26793c1b916..00000000000 --- a/storage/bdb/dbreg/dbreg.src +++ /dev/null @@ -1,46 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: dbreg.src,v 12.1 2005/06/16 20:21:49 bostic Exp $ - */ - -PREFIX __dbreg -DBPRIVATE - -INCLUDE #ifndef NO_SYSTEM_INCLUDES -INCLUDE #include -INCLUDE -INCLUDE #include -INCLUDE #include -INCLUDE #endif -INCLUDE -INCLUDE #include "db_int.h" -INCLUDE #include "dbinc/crypto.h" -INCLUDE #include "dbinc/db_page.h" -INCLUDE #include "dbinc/db_dispatch.h" -INCLUDE #include "dbinc/db_am.h" -INCLUDE #include "dbinc/log.h" -INCLUDE #include "dbinc/txn.h" -INCLUDE - -/* - * Used for registering name/id translations at open or close. - * opcode: register or unregister - * name: file name - * fileid: unique file id - * ftype: file type - * ftype: database type - * id: transaction id of the subtransaction that created the fs object - */ -BEGIN register 2 -ARG opcode u_int32_t lu -DBT name DBT s -DBT uid DBT s -ARG fileid int32_t ld -ARG ftype DBTYPE lx -ARG meta_pgno db_pgno_t lu -ARG id u_int32_t lx -END diff --git a/storage/bdb/dbreg/dbreg_rec.c b/storage/bdb/dbreg/dbreg_rec.c deleted file mode 100644 index 44f663b3be9..00000000000 --- a/storage/bdb/dbreg/dbreg_rec.c +++ /dev/null @@ -1,421 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2005 - * Sleepycat Software. All rights reserved. - */ -/* - * Copyright (c) 1995, 1996 - * The President and Fellows of Harvard University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: dbreg_rec.c,v 12.8 2005/11/09 14:20:32 margo Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_shash.h" -#include "dbinc/db_am.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -static int __dbreg_open_file __P((DB_ENV *, - DB_TXN *, __dbreg_register_args *, void *)); - -/* - * PUBLIC: int __dbreg_register_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -__dbreg_register_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - DB_ENTRY *dbe; - DB_LOG *dblp; - DB *dbp; - __dbreg_register_args *argp; - int do_close, do_open, do_rem, ret, t_ret; - u_int32_t status; - - dblp = dbenv->lg_handle; - dbp = NULL; - -#ifdef DEBUG_RECOVER - REC_PRINT(__dbreg_register_print); -#endif - do_open = do_close = 0; - if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0) - goto out; - - switch (argp->opcode) { - case DBREG_REOPEN: - case DBREG_PREOPEN: - case DBREG_OPEN: - /* - * In general, we redo the open on REDO and abort on UNDO. - * However, a reopen is a second instance of an open of - * in-memory files and we don't want to close them yet - * on abort, so just skip that here. - */ - if ((DB_REDO(op) || - op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES)) - do_open = 1; - else if (argp->opcode != DBREG_REOPEN) - do_close = 1; - break; - case DBREG_CLOSE: - if (DB_UNDO(op)) - do_open = 1; - else - do_close = 1; - break; - case DBREG_RCLOSE: - /* - * DBREG_RCLOSE was generated by recover because a file was - * left open. The POPENFILES pass, which is run to open - * files to abort prepared transactions, may not include the - * open for this file so we open it here. Note that a normal - * CLOSE is not legal before the prepared transaction is - * committed or aborted. - */ - if (DB_UNDO(op) || op == DB_TXN_POPENFILES) - do_open = 1; - else - do_close = 1; - break; - case DBREG_CHKPNT: - if (DB_UNDO(op) || - op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES) - do_open = 1; - break; - default: - DB_ASSERT(0); - ret = EINVAL; - break; - } - - if (do_open) { - /* - * We must open the db even if the meta page is not - * yet written as we may be creating subdatabase. - */ - if (op == DB_TXN_OPENFILES && argp->opcode != DBREG_CHKPNT) - F_SET(dblp, DBLOG_FORCE_OPEN); - - /* - * During an abort or an open pass to recover prepared txns, - * we need to make sure that we use the same locker id on the - * open. We pass the txnid along to ensure this. - */ - ret = __dbreg_open_file(dbenv, - op == DB_TXN_ABORT || op == DB_TXN_POPENFILES ? - argp->txnid : NULL, argp, info); - if (ret == DB_PAGE_NOTFOUND && argp->meta_pgno != PGNO_BASE_MD) - ret = ENOENT; - if (ret == ENOENT || ret == EINVAL) { - /* - * If this is an OPEN while rolling forward, it's - * possible that the file was recreated since last - * time we got here. In that case, we've got deleted - * set and probably shouldn't, so we need to check - * for that case and possibly retry. - */ - if (op == DB_TXN_FORWARD_ROLL && - argp->txnid != 0 && - dblp->dbentry[argp->fileid].deleted) { - dblp->dbentry[argp->fileid].deleted = 0; - ret = - __dbreg_open_file(dbenv, NULL, argp, info); - if (ret == DB_PAGE_NOTFOUND && - argp->meta_pgno != PGNO_BASE_MD) - ret = ENOENT; - } - /* - * We treat ENOENT as OK since it's possible that - * the file was renamed or deleted. - * All other errors, we return. - */ - if (ret == ENOENT) - ret = 0; - } - F_CLR(dblp, DBLOG_FORCE_OPEN); - } - - if (do_close) { - /* - * If we are undoing an open, or redoing a close, - * then we need to close the file. If we are simply - * revoking then we just need to grab the DBP and revoke - * the log id. - * - * If the file is deleted, then we can just ignore this close. - * Otherwise, we should usually have a valid dbp we should - * close or whose reference count should be decremented. - * However, if we shut down without closing a file, we may, in - * fact, not have the file open, and that's OK. - */ - do_rem = 0; - MUTEX_LOCK(dbenv, dblp->mtx_dbreg); - if (argp->fileid < dblp->dbentry_cnt) { - /* - * Typically, closes should match an open which means - * that if this is a close, there should be a valid - * entry in the dbentry table when we get here, - * however there are exceptions. 1. If this is an - * OPENFILES pass, then we may have started from - * a log file other than the first, and the - * corresponding open appears in an earlier file. - * 2. If we are undoing an open on an abort or - * recovery, it's possible that we failed after - * the log record, but before we actually entered - * a handle here. - * 3. If we aborted an open, then we wrote a non-txnal - * RCLOSE into the log. During the forward pass, the - * file won't be open, and that's OK. - */ - dbe = &dblp->dbentry[argp->fileid]; - if (dbe->dbp == NULL && !dbe->deleted) { - /* No valid entry here. */ - if ((DB_REDO(op) && - argp->opcode != DBREG_RCLOSE) || - argp->opcode == DBREG_CHKPNT) { - __db_err(dbenv, - "Warning: Improper file close at %lu/%lu", - (u_long)lsnp->file, - (u_long)lsnp->offset); - } - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - goto done; - } - - /* We have either an open entry or a deleted entry. */ - if ((dbp = dbe->dbp) != NULL) { - /* - * If we're a replication client, it's - * possible to get here with a dbp that - * the user opened, but which we later - * assigned a fileid to. Be sure that - * we only close dbps that we opened in - * the recovery code or that were opened - * inside a currently aborting transaction. - */ - do_rem = F_ISSET(dbp, DB_AM_RECOVER) || - op == DB_TXN_ABORT; - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - if (op == DB_TXN_ABORT) - (void)__dbreg_close_id(dbp, - NULL, DBREG_RCLOSE); - else - (void)__dbreg_revoke_id(dbp, 0, - DB_LOGFILEID_INVALID); - } else if (dbe->deleted) { - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - if ((ret = __dbreg_rem_dbentry( - dblp, argp->fileid)) != 0) - goto out; - } - } else - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - - /* - * During recovery, all files are closed. On an abort, we only - * close the file if we opened it during the abort - * (DB_AM_RECOVER set), otherwise we simply do a __db_refresh. - * For the close case, if remove or rename has closed the file, - * don't request a sync, because a NULL mpf would be a problem. - * - * If we are undoing a create we'd better discard any buffers - * from the memory pool. We identify creates because the - * argp->id field contains the transaction containing the file - * create; if that id is invalid, we are not creating. - * - * On the backward pass, we need to "undo" opens even if the - * transaction in which they appeared committed, because we have - * already undone the corresponding close. In that case, the - * id will be valid, but we do not want to discard buffers. - */ - if (do_rem && dbp != NULL) { - if (argp->id != TXN_INVALID) { - if ((ret = __db_txnlist_find(dbenv, - info, argp->txnid->txnid, &status)) - != DB_NOTFOUND && ret != 0) - goto out; - if (ret == DB_NOTFOUND || status != TXN_COMMIT) - F_SET(dbp, DB_AM_DISCARD); - ret = 0; - } - - if (op == DB_TXN_ABORT && - !F_ISSET(dbp, DB_AM_RECOVER)) { - if ((t_ret = __db_refresh(dbp, - NULL, DB_NOSYNC, NULL, 0)) != 0 && ret == 0) - ret = t_ret; - } else { - if (op == DB_TXN_APPLY && - (t_ret = __db_sync(dbp)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __db_close( - dbp, NULL, DB_NOSYNC)) != 0 && ret == 0) - ret = t_ret; - } - } - } -done: if (ret == 0) - *lsnp = argp->prev_lsn; -out: if (argp != NULL) - __os_free(dbenv, argp); - return (ret); -} - -/* - * __dbreg_open_file -- - * Called during log_register recovery. Make sure that we have an - * entry in the dbentry table for this ndx. Returns 0 on success, - * non-zero on error. - */ -static int -__dbreg_open_file(dbenv, txn, argp, info) - DB_ENV *dbenv; - DB_TXN *txn; - __dbreg_register_args *argp; - void *info; -{ - DB_ENTRY *dbe; - DB_LOG *dblp; - DB *dbp; - u_int32_t id, status; - int ret; - - dblp = (DB_LOG *)dbenv->lg_handle; - - /* - * When we're opening, we have to check that the name we are opening - * is what we expect. If it's not, then we close the old file and - * open the new one. - */ - MUTEX_LOCK(dbenv, dblp->mtx_dbreg); - if (argp->fileid != DB_LOGFILEID_INVALID && - argp->fileid < dblp->dbentry_cnt) - dbe = &dblp->dbentry[argp->fileid]; - else - dbe = NULL; - - if (dbe != NULL) { - if (dbe->deleted) { - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - return (ENOENT); - } - - /* - * At the end of OPENFILES, we may have a file open. If this - * is a reopen, then we will always close and reopen. If the - * open was part of a committed transaction, so it doesn't - * get undone. However, if the fileid was previously used, - * we'll see a close that may need to get undone. There are - * three ways we can detect this. 1) the meta-pgno in the - * current file does not match that of the open file, 2) the - * file uid of the current file does not match that of the - * previously opened file, 3) the current file is unnamed, in - * which case it should never be opened during recovery. - */ - if ((dbp = dbe->dbp) != NULL) { - if (argp->opcode == DBREG_REOPEN || - dbp->meta_pgno != argp->meta_pgno || - argp->name.size == 0 || - memcmp(dbp->fileid, argp->uid.data, - DB_FILE_ID_LEN) != 0) { - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - (void)__dbreg_revoke_id(dbp, 0, - DB_LOGFILEID_INVALID); - if (F_ISSET(dbp, DB_AM_RECOVER)) - (void)__db_close(dbp, NULL, DB_NOSYNC); - goto reopen; - } - - /* - * We should only get here if we already have the - * dbp from an openfiles pass, in which case, what's - * here had better be the same dbp. - */ - DB_ASSERT(dbe->dbp == dbp); - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - - /* - * This is a successful open. We need to record that - * in the txnlist so that we know how to handle the - * subtransaction that created the file system object. - */ - if (argp->id != TXN_INVALID && - (ret = __db_txnlist_update(dbenv, info, - argp->id, TXN_EXPECTED, NULL, &status, 1)) != 0) - return (ret); - return (0); - } - } - - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - -reopen: - /* - * We never re-open temporary files. Temp files are only useful during - * aborts in which case the dbp was entered when the file was - * registered. During recovery, we treat temp files as properly deleted - * files, allowing the open to fail and not reporting any errors when - * recovery fails to get a valid dbp from __dbreg_id_to_db. - */ - if (argp->name.size == 0) { - (void)__dbreg_add_dbentry(dbenv, dblp, NULL, argp->fileid); - return (ENOENT); - } - - /* - * We are about to pass a recovery txn pointer into the main library. - * We need to make sure that any accessed fields are set appropriately. - */ - if (txn != NULL) { - id = txn->txnid; - memset(txn, 0, sizeof(DB_TXN)); - txn->txnid = id; - txn->mgrp = dbenv->tx_handle; - } - - return (__dbreg_do_open(dbenv, - txn, dblp, argp->uid.data, argp->name.data, argp->ftype, - argp->fileid, argp->meta_pgno, info, argp->id, argp->opcode)); -} diff --git a/storage/bdb/dbreg/dbreg_stat.c b/storage/bdb/dbreg/dbreg_stat.c deleted file mode 100644 index bdbb9b2604f..00000000000 --- a/storage/bdb/dbreg/dbreg_stat.c +++ /dev/null @@ -1,126 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: dbreg_stat.c,v 12.5 2005/10/12 15:01:47 margo Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" -#include "dbinc/log.h" -#include "dbinc/txn.h" - -#ifdef HAVE_STATISTICS -static int __dbreg_print_dblist __P((DB_ENV *, u_int32_t)); - -/* - * __dbreg_stat_print -- - * Print the dbreg statistics. - * - * PUBLIC: int __dbreg_stat_print __P((DB_ENV *, u_int32_t)); - */ -int -__dbreg_stat_print(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - int ret; - - if (LF_ISSET(DB_STAT_ALL) && - (ret = __dbreg_print_dblist(dbenv, flags)) != 0) - return (ret); - - return (0); -} - -/* - * __dbreg_print_fname -- - * Display the contents of an FNAME structure. - * - * PUBLIC: void __dbreg_print_fname __P((DB_ENV *, FNAME *)); - */ -void -__dbreg_print_fname(dbenv, fnp) - DB_ENV *dbenv; - FNAME *fnp; -{ - static const FN fn[] = { - { DB_FNAME_DURABLE, "DB_FNAME_DURABLE" }, - { DB_FNAME_NOTLOGGED, "DB_FNAME_NOTLOGGED" }, - { 0, NULL } - }; - - __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - __db_msg(dbenv, "DB handle FNAME contents:"); - STAT_LONG("log ID", fnp->id); - STAT_ULONG("Meta pgno", fnp->meta_pgno); - __db_print_fileid(dbenv, fnp->ufid, "\tFile ID"); - STAT_ULONG("create txn", fnp->create_txnid); - __db_prflags(dbenv, NULL, fnp->flags, fn, NULL, "\tFlags"); -} - -/* - * __dbreg_print_dblist -- - * Display the DB_ENV's list of files. - */ -static int -__dbreg_print_dblist(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - DB *dbp; - DB_LOG *dblp; - FNAME *fnp; - LOG *lp; - int del, first; - char *name; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); - __db_msg(dbenv, "LOG FNAME list:"); - __mutex_print_debug_single( - dbenv, "File name mutex", lp->mtx_filelist, flags); - - STAT_LONG("Fid max", lp->fid_max); - - MUTEX_LOCK(dbenv, lp->mtx_filelist); - for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname); - fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { - if (first) { - first = 0; - __db_msg(dbenv, - "ID\tName\tType\tPgno\tTxnid\tDBP-info"); - } - if (fnp->name_off == INVALID_ROFF) - name = ""; - else - name = R_ADDR(&dblp->reginfo, fnp->name_off); - - dbp = fnp->id >= dblp->dbentry_cnt ? NULL : - dblp->dbentry[fnp->id].dbp; - del = fnp->id >= dblp->dbentry_cnt ? 0 : - dblp->dbentry[fnp->id].deleted; - __db_msg(dbenv, "%ld\t%s\t%s\t%lu\t%lx\t%s %d %lx %lx", - (long)fnp->id, name, - __db_dbtype_to_string(fnp->s_type), - (u_long)fnp->meta_pgno, (u_long)fnp->create_txnid, - dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp), - (u_long)(dbp == NULL ? 0 : dbp->flags)); - } - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - - return (0); -} -#endif diff --git a/storage/bdb/dbreg/dbreg_util.c b/storage/bdb/dbreg/dbreg_util.c deleted file mode 100644 index 9c3082b1e9c..00000000000 --- a/storage/bdb/dbreg/dbreg_util.c +++ /dev/null @@ -1,672 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2005 - * Sleepycat Software. All rights reserved. - * - * $Id: dbreg_util.c,v 12.10 2005/10/12 15:01:47 margo Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" -#include "dbinc/db_shash.h" -#include "dbinc/fop.h" -#include "dbinc/log.h" -#include "dbinc/mp.h" -#include "dbinc/txn.h" - -static int __dbreg_check_master __P((DB_ENV *, u_int8_t *, char *)); - -/* - * __dbreg_add_dbentry -- - * Adds a DB entry to the dbreg DB entry table. - * - * PUBLIC: int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t)); - */ -int -__dbreg_add_dbentry(dbenv, dblp, dbp, ndx) - DB_ENV *dbenv; - DB_LOG *dblp; - DB *dbp; - int32_t ndx; -{ - int32_t i; - int ret; - - ret = 0; - - MUTEX_LOCK(dbenv, dblp->mtx_dbreg); - - /* - * Check if we need to grow the table. Note, ndx is 0-based (the - * index into the DB entry table) an dbentry_cnt is 1-based, the - * number of available slots. - */ - if (dblp->dbentry_cnt <= ndx) { - if ((ret = __os_realloc(dbenv, - (size_t)(ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY), - &dblp->dbentry)) != 0) - goto err; - - /* Initialize the new entries. */ - for (i = dblp->dbentry_cnt; i < ndx + DB_GROW_SIZE; i++) { - dblp->dbentry[i].dbp = NULL; - dblp->dbentry[i].deleted = 0; - } - dblp->dbentry_cnt = i; - } - - DB_ASSERT(dblp->dbentry[ndx].dbp == NULL); - dblp->dbentry[ndx].deleted = dbp == NULL; - dblp->dbentry[ndx].dbp = dbp; - -err: MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - return (ret); -} - -/* - * __dbreg_rem_dbentry - * Remove an entry from the DB entry table. - * - * PUBLIC: int __dbreg_rem_dbentry __P((DB_LOG *, int32_t)); - */ -int -__dbreg_rem_dbentry(dblp, ndx) - DB_LOG *dblp; - int32_t ndx; -{ - MUTEX_LOCK(dblp->dbenv, dblp->mtx_dbreg); - if (dblp->dbentry_cnt > ndx) { - dblp->dbentry[ndx].dbp = NULL; - dblp->dbentry[ndx].deleted = 0; - } - MUTEX_UNLOCK(dblp->dbenv, dblp->mtx_dbreg); - - return (0); -} - -/* - * __dbreg_log_files -- - * Put a DBREG_CHKPNT/CLOSE log record for each open database. - * - * PUBLIC: int __dbreg_log_files __P((DB_ENV *)); - */ -int -__dbreg_log_files(dbenv) - DB_ENV *dbenv; -{ - DB_LOG *dblp; - DB_LSN r_unused; - DBT *dbtp, fid_dbt, t; - FNAME *fnp; - LOG *lp; - int ret; - - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - - ret = 0; - - MUTEX_LOCK(dbenv, lp->mtx_filelist); - - for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname); - fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { - - if (fnp->name_off == INVALID_ROFF) - dbtp = NULL; - else { - memset(&t, 0, sizeof(t)); - t.data = R_ADDR(&dblp->reginfo, fnp->name_off); - t.size = (u_int32_t)strlen(t.data) + 1; - dbtp = &t; - } - memset(&fid_dbt, 0, sizeof(fid_dbt)); - fid_dbt.data = fnp->ufid; - fid_dbt.size = DB_FILE_ID_LEN; - /* - * Output DBREG_CHKPNT records which will be processed during - * the OPENFILES pass of recovery. At the end of recovery we - * want to output the files that were open so a future recovery - * run will have the correct files open during a backward pass. - * For this we output DBREG_RCLOSE records so the files will be - * closed on the forward pass. - */ - if ((ret = __dbreg_register_log(dbenv, - NULL, &r_unused, - F_ISSET(fnp, DB_FNAME_DURABLE) ? 0 : DB_LOG_NOT_DURABLE, - F_ISSET(dblp, DBLOG_RECOVER) ? DBREG_RCLOSE : DBREG_CHKPNT, - dbtp, &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno, - TXN_INVALID)) != 0) - break; - } - - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - - return (ret); -} - -/* - * __dbreg_close_files -- - * Remove the id's of open files and actually close those - * files that were opened by the recovery daemon. We sync the - * file, unless its mpf pointer has been NULLed by a db_remove or - * db_rename. We may not have flushed the log_register record that - * closes the file. - * - * PUBLIC: int __dbreg_close_files __P((DB_ENV *)); - */ -int -__dbreg_close_files(dbenv) - DB_ENV *dbenv; -{ - DB_LOG *dblp; - DB *dbp; - int ret, t_ret; - int32_t i; - - /* If we haven't initialized logging, we have nothing to do. */ - if (!LOGGING_ON(dbenv)) - return (0); - - dblp = dbenv->lg_handle; - ret = 0; - MUTEX_LOCK(dbenv, dblp->mtx_dbreg); - for (i = 0; i < dblp->dbentry_cnt; i++) { - /* - * We only want to close dbps that recovery opened. Any - * dbps that weren't opened by recovery but show up here - * are about to be unconditionally removed from the table. - * Before doing so, we need to revoke their log fileids - * so that we don't end up leaving around FNAME entries - * for dbps that shouldn't have them. - * - * Any FNAME entries that were marked NOTLOGGED had the - * log write fail while they were being closed. Since it's - * too late to be logging now we flag that as a failure - * so recovery will be run. This will get returned by - * __dbreg_revoke_id. - */ - if ((dbp = dblp->dbentry[i].dbp) != NULL) { - /* - * It's unsafe to call DB->close or revoke_id - * while holding the thread lock, because - * we'll call __dbreg_rem_dbentry and grab it again. - * - * Just drop it. Since dbreg ids go monotonically - * upward, concurrent opens should be safe, and the - * user should have no business closing files while - * we're in this loop anyway--we're in the process of - * making all outstanding dbps invalid. - */ - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - if (F_ISSET(dbp, DB_AM_RECOVER)) - t_ret = __db_close(dbp, - NULL, dbp->mpf == NULL ? DB_NOSYNC : 0); - else - t_ret = __dbreg_revoke_id( - dbp, 0, DB_LOGFILEID_INVALID); - if (ret == 0) - ret = t_ret; - MUTEX_LOCK(dbenv, dblp->mtx_dbreg); - } - - dblp->dbentry[i].deleted = 0; - dblp->dbentry[i].dbp = NULL; - } - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - return (ret); -} - -/* - * __dbreg_id_to_db -- - * Return the DB corresponding to the specified dbreg id. - * - * PUBLIC: int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int)); - */ -int -__dbreg_id_to_db(dbenv, txn, dbpp, ndx, inc) - DB_ENV *dbenv; - DB_TXN *txn; - DB **dbpp; - int32_t ndx; - int inc; -{ - return (__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, 1)); -} - -/* - * __dbreg_id_to_db_int -- - * Return the DB corresponding to the specified dbreg id. The internal - * version takes a final parameter that indicates whether we should attempt - * to open the file if no mapping is found. During recovery, the recovery - * routines all want to try to open the file (and this is called from - * __dbreg_id_to_db), however, if we have a multi-process environment where - * some processes may not have the files open (e.g., XA), then we also get - * called from __dbreg_assign_id and it's OK if there is no mapping. - * - * PUBLIC: int __dbreg_id_to_db_int __P((DB_ENV *, - * PUBLIC: DB_TXN *, DB **, int32_t, int, int)); - */ -int -__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen) - DB_ENV *dbenv; - DB_TXN *txn; - DB **dbpp; - int32_t ndx; - int inc, tryopen; -{ - DB_LOG *dblp; - FNAME *fname; - int ret; - char *name; - - ret = 0; - dblp = dbenv->lg_handle; - COMPQUIET(inc, 0); - - MUTEX_LOCK(dbenv, dblp->mtx_dbreg); - - /* - * Under XA, a process different than the one issuing DB operations - * may abort a transaction. In this case, the "recovery" routines - * are run by a process that does not necessarily have the file open, - * so we we must open the file explicitly. - */ - if (ndx >= dblp->dbentry_cnt || - (!dblp->dbentry[ndx].deleted && dblp->dbentry[ndx].dbp == NULL)) { - if (!tryopen || F_ISSET(dblp, DBLOG_RECOVER)) { - ret = ENOENT; - goto err; - } - - /* - * __dbreg_id_to_fname acquires the mtx_filelist mutex, which - * we can't safely acquire while we hold the thread lock. We - * no longer need it anyway--the dbentry table didn't have what - * we needed. - */ - MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - - if (__dbreg_id_to_fname(dblp, ndx, 0, &fname) != 0) - /* - * With transactional opens, we may actually have - * closed this file in the transaction in which - * case this will fail too. Then it's up to the - * caller to reopen the file. - */ - return (ENOENT); - - /* - * Note that we're relying on fname not to change, even though - * we released the mutex that protects it (mtx_filelist) inside - * __dbreg_id_to_fname. This should be a safe assumption, the - * other process that has the file open shouldn't be closing it - * while we're trying to abort. - */ - name = R_ADDR(&dblp->reginfo, fname->name_off); - - /* - * At this point, we are not holding the thread lock, so exit - * directly instead of going through the exit code at the - * bottom. If the __dbreg_do_open succeeded, then we don't need - * to do any of the remaining error checking at the end of this - * routine. - * XXX I am sending a NULL txnlist and 0 txnid which may be - * completely broken ;( - */ - if ((ret = __dbreg_do_open(dbenv, txn, dblp, - fname->ufid, name, fname->s_type, - ndx, fname->meta_pgno, NULL, 0, DBREG_OPEN)) != 0) - return (ret); - - *dbpp = dblp->dbentry[ndx].dbp; - return (0); - } - - /* - * Return DB_DELETED if the file has been deleted (it's not an error). - */ - if (dblp->dbentry[ndx].deleted) { - ret = DB_DELETED; - goto err; - } - - /* It's an error if we don't have a corresponding writeable DB. */ - if ((*dbpp = dblp->dbentry[ndx].dbp) == NULL) - ret = ENOENT; - else - /* - * If we are in recovery, then set that the file has - * been written. It is possible to run recovery, - * find all the pages in their post update state - * in the OS buffer pool, put a checkpoint in the log - * and then crash the system without forcing the pages - * to disk. If this is an in-memory file, we may not have - * an mpf yet. - */ - if ((*dbpp)->mpf != NULL && (*dbpp)->mpf->mfp != NULL) - (*dbpp)->mpf->mfp->file_written = 1; - -err: MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg); - return (ret); -} - -/* - * __dbreg_id_to_fname -- - * Traverse the shared-memory region looking for the entry that - * matches the passed dbreg id. Returns 0 on success; -1 on error. - * - * PUBLIC: int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **)); - */ -int -__dbreg_id_to_fname(dblp, id, have_lock, fnamep) - DB_LOG *dblp; - int32_t id; - int have_lock; - FNAME **fnamep; -{ - DB_ENV *dbenv; - FNAME *fnp; - LOG *lp; - int ret; - - dbenv = dblp->dbenv; - lp = dblp->reginfo.primary; - - ret = -1; - - if (!have_lock) - MUTEX_LOCK(dbenv, lp->mtx_filelist); - for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname); - fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { - if (fnp->id == id) { - *fnamep = fnp; - ret = 0; - break; - } - } - if (!have_lock) - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - - return (ret); -} -/* - * __dbreg_fid_to_fname -- - * Traverse the shared-memory region looking for the entry that - * matches the passed file unique id. Returns 0 on success; -1 on error. - * - * PUBLIC: int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **)); - */ -int -__dbreg_fid_to_fname(dblp, fid, have_lock, fnamep) - DB_LOG *dblp; - u_int8_t *fid; - int have_lock; - FNAME **fnamep; -{ - DB_ENV *dbenv; - FNAME *fnp; - LOG *lp; - int ret; - - dbenv = dblp->dbenv; - lp = dblp->reginfo.primary; - - ret = -1; - - if (!have_lock) - MUTEX_LOCK(dbenv, lp->mtx_filelist); - for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname); - fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) { - if (memcmp(fnp->ufid, fid, DB_FILE_ID_LEN) == 0) { - *fnamep = fnp; - ret = 0; - break; - } - } - if (!have_lock) - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - - return (ret); -} - -/* - * __dbreg_get_name - * - * Interface to get name of registered files. This is mainly diagnostic - * and the name passed could be transient unless there is something - * ensuring that the file cannot be closed. - * - * PUBLIC: int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **)); - */ -int -__dbreg_get_name(dbenv, fid, namep) - DB_ENV *dbenv; - u_int8_t *fid; - char **namep; -{ - DB_LOG *dblp; - FNAME *fnp; - - dblp = dbenv->lg_handle; - - if (dblp != NULL && __dbreg_fid_to_fname(dblp, fid, 0, &fnp) == 0) { - *namep = R_ADDR(&dblp->reginfo, fnp->name_off); - return (0); - } - - return (-1); -} - -/* - * __dbreg_do_open -- - * Open files referenced in the log. This is the part of the open that - * is not protected by the thread mutex. - * PUBLIC: int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *, - * PUBLIC: char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t, - * PUBLIC: u_int32_t)); - */ -int -__dbreg_do_open(dbenv, - txn, lp, uid, name, ftype, ndx, meta_pgno, info, id, opcode) - DB_ENV *dbenv; - DB_TXN *txn; - DB_LOG *lp; - u_int8_t *uid; - char *name; - DBTYPE ftype; - int32_t ndx; - db_pgno_t meta_pgno; - void *info; - u_int32_t id, opcode; -{ - DB *dbp; - u_int32_t cstat, ret_stat; - int ret; - char *dname, *fname; - - cstat = TXN_EXPECTED; - fname = name; - dname = NULL; - if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0) - return (ret); - - /* - * We can open files under a number of different scenarios. - * First, we can open a file during a normal txn_abort, if that file - * was opened and closed during the transaction (as is the master - * database of a sub-database). - * Second, we might be aborting a transaction in XA and not have - * it open in the process that is actually doing the abort. - * Third, we might be in recovery. - * In case 3, there is no locking, so there is no issue. - * In cases 1 and 2, we are guaranteed to already hold any locks - * that we need, since we're still in the same transaction, so by - * setting DB_AM_RECOVER, we guarantee that we don't log and that - * we don't try to acquire locks on behalf of a different locker id. - */ - F_SET(dbp, DB_AM_RECOVER); - if (meta_pgno != PGNO_BASE_MD) { - memcpy(dbp->fileid, uid, DB_FILE_ID_LEN); - dbp->meta_pgno = meta_pgno; - } - if (opcode == DBREG_PREOPEN) { - dbp->type = ftype; - if ((ret = __dbreg_setup(dbp, name, id)) != 0) - goto err; - MAKE_INMEM(dbp); - goto skip_open; - } - - if (opcode == DBREG_REOPEN) { - MAKE_INMEM(dbp); - fname = NULL; - dname = name; - } - - if ((ret = __db_open(dbp, txn, fname, dname, ftype, - DB_DURABLE_UNKNOWN | DB_ODDFILESIZE, - __db_omode(OWNER_RW), meta_pgno)) == 0) { -skip_open: - /* - * Verify that we are opening the same file that we were - * referring to when we wrote this log record. - */ - if ((meta_pgno != PGNO_BASE_MD && - __dbreg_check_master(dbenv, uid, name) != 0) || - memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0) - cstat = TXN_UNEXPECTED; - else - cstat = TXN_EXPECTED; - - /* Assign the specific dbreg id to this dbp. */ - if ((ret = __dbreg_assign_id(dbp, ndx)) != 0) - goto err; - - /* - * If we successfully opened this file, then we need to - * convey that information to the txnlist so that we - * know how to handle the subtransaction that created - * the file system object. - */ - if (id != TXN_INVALID) - ret = __db_txnlist_update(dbenv, - info, id, cstat, NULL, &ret_stat, 1); - -err: if (cstat == TXN_UNEXPECTED) - goto not_right; - return (ret); - } else if (ret == ENOENT) { - /* Record that the open failed in the txnlist. */ - if (id != TXN_INVALID) - ret = __db_txnlist_update(dbenv, info, - id, TXN_UNEXPECTED, NULL, &ret_stat, 1); - } -not_right: - (void)__db_close(dbp, NULL, DB_NOSYNC); - /* Add this file as deleted. */ - (void)__dbreg_add_dbentry(dbenv, lp, NULL, ndx); - return (ret); -} - -static int -__dbreg_check_master(dbenv, uid, name) - DB_ENV *dbenv; - u_int8_t *uid; - char *name; -{ - DB *dbp; - int ret; - - ret = 0; - if ((ret = db_create(&dbp, dbenv, 0)) != 0) - return (ret); - F_SET(dbp, DB_AM_RECOVER); - ret = __db_open(dbp, NULL, - name, NULL, DB_BTREE, 0, __db_omode(OWNER_RW), PGNO_BASE_MD); - - if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0) - ret = EINVAL; - - (void)__db_close(dbp, NULL, 0); - return (ret); -} - -/* - * __dbreg_lazy_id -- - * When a replication client gets upgraded to being a replication master, - * it may have database handles open that have not been assigned an ID, but - * which have become legal to use for logging. - * - * This function lazily allocates a new ID for such a function, in a - * new transaction created for the purpose. We need to do this in a new - * transaction because we definitely wish to commit the dbreg_register, but - * at this point we have no way of knowing whether the log record that incited - * us to call this will be part of a committed transaction. - * - * PUBLIC: int __dbreg_lazy_id __P((DB *)); - */ -int -__dbreg_lazy_id(dbp) - DB *dbp; -{ - DB_ENV *dbenv; - DB_LOG *dblp; - DB_TXN *txn; - FNAME *fnp; - LOG *lp; - int32_t id; - int ret; - - dbenv = dbp->dbenv; - - DB_ASSERT(IS_REP_MASTER(dbenv)); - - dbenv = dbp->dbenv; - dblp = dbenv->lg_handle; - lp = dblp->reginfo.primary; - fnp = dbp->log_filename; - - /* The mtx_filelist protects the FNAME list and id management. */ - MUTEX_LOCK(dbenv, lp->mtx_filelist); - if (fnp->id != DB_LOGFILEID_INVALID) { - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - return (0); - } - id = DB_LOGFILEID_INVALID; - if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) - goto err; - - if ((ret = __dbreg_get_id(dbp, txn, &id)) != 0) { - (void)__txn_abort(txn); - goto err; - } - - if ((ret = __txn_commit(txn, DB_TXN_NOSYNC)) != 0) - goto err; - - /* - * All DB related logging routines check the id value *without* - * holding the mtx_filelist to know whether we need to call - * dbreg_lazy_id to begin with. We must set the ID after a - * *successful* commit so that there is no possibility of a second - * modification call finding a valid ID in the dbp before the - * dbreg_register and commit records are in the log. - * If there was an error, then we call __dbreg_revoke_id to - * remove the entry from the lists. - */ - fnp->id = id; -err: - if (ret != 0 && id != DB_LOGFILEID_INVALID) - (void)__dbreg_revoke_id(dbp, 1, id); - MUTEX_UNLOCK(dbenv, lp->mtx_filelist); - return (ret); -} diff --git a/storage/bdb/dist/Makefile.in b/storage/bdb/dist/Makefile.in deleted file mode 100644 index abd7dd93b17..00000000000 --- a/storage/bdb/dist/Makefile.in +++ /dev/null @@ -1,1919 +0,0 @@ -# $Id: Makefile.in,v 12.33 2005/11/03 17:43:46 bostic Exp $ - -srcdir= @srcdir@/.. -builddir=. - -################################################## -# Installation directories and permissions. -################################################## -prefix= @prefix@ -exec_prefix=@exec_prefix@ -bindir= @bindir@ -includedir=@includedir@ -libdir= @libdir@ -docdir= $(prefix)/docs - -dmode= 755 -emode= 555 -fmode= 444 - -transform=@program_transform_name@ - -################################################## -# Paths for standard user-level commands. -################################################## -SHELL= @db_cv_path_sh@ -ar= @db_cv_path_ar@ -chmod= @db_cv_path_chmod@ -cp= @db_cv_path_cp@ -ln= @db_cv_path_ln@ -mkdir= @db_cv_path_mkdir@ -ranlib= @db_cv_path_ranlib@ -rm= @db_cv_path_rm@ -strip= @db_cv_path_strip@ - -################################################## -# General library information. -################################################## -DEF_LIB= @DEFAULT_LIB@ -DEF_LIB_CXX= @DEFAULT_LIB_CXX@ -INSTALLER= @INSTALLER@ -LIBTOOL= @LIBTOOL@ - -POSTLINK= @POSTLINK@ -SOLINK= @MAKEFILE_SOLINK@ -SOFLAGS= @SOFLAGS@ -LIBMAJOR= @DB_VERSION_MAJOR@ -LIBVERSION= @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@ - -CPPFLAGS= -I$(builddir) -I$(srcdir) @CPPFLAGS@ - -################################################## -# C API. -################################################## -CFLAGS= -c $(CPPFLAGS) @CFLAGS@ -CC= @MAKEFILE_CC@ -CCLINK= @MAKEFILE_CCLINK@ @CFLAGS@ - -LDFLAGS= @LDFLAGS@ -LIBS= @LIBS@ -TEST_LIBS= @TEST_LIBS@ -LIBCSO_LIBS= @LIBCSO_LIBS@ @LIBSO_LIBS@ - -libdb_base= libdb -libdb= $(libdb_base).a -libdb_version= $(libdb_base)-$(LIBVERSION).a -libso= $(libdb_base)-$(LIBVERSION)@SOSUFFIX@ -libso_target= $(libdb_base)-$(LIBVERSION).la -libso_default= $(libdb_base)@SOSUFFIX@ -libso_major= $(libdb_base)-$(LIBMAJOR)@SOSUFFIX@ - -################################################## -# C++ API. -# -# C++ support is optional, and can be built with static or shared libraries. -################################################## -CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@ -CXX= @MAKEFILE_CXX@ -CXXLINK= @MAKEFILE_CXXLINK@ @CXXFLAGS@ -XSOLINK= @MAKEFILE_XSOLINK@ @CXXFLAGS@ -LIBXSO_LIBS= @LIBXSO_LIBS@ @LIBSO_LIBS@ - -libcxx_base= libdb_cxx -libcxx= $(libcxx_base).a -libcxx_version= $(libcxx_base)-$(LIBVERSION).a -libxso= $(libcxx_base)-$(LIBVERSION)@SOSUFFIX@ -libxso_target= $(libcxx_base)-$(LIBVERSION).la -libxso_default= $(libcxx_base)@SOSUFFIX@ -libxso_major= $(libcxx_base)-$(LIBMAJOR)@SOSUFFIX@ - -################################################## -# Java API. -# -# Java support is optional and requires shared librarires. -################################################## -CLASSPATH= $(JAVA_CLASSTOP) -LIBJSO_LIBS= @LIBJSO_LIBS@ @LIBSO_LIBS@ - -JAR= @JAR@ -JAVAC= env CLASSPATH="$(CLASSPATH)" @JAVAC@ -JAVACFLAGS= @JAVACFLAGS@ -JAVA_CLASSTOP= ./classes -JAVA_RPCCLASSTOP=./classes.rpc -JAVA_EXCLASSTOP=./classes.ex -JAVA_RPCREL= com/sleepycat/db/rpcserver -JAVA_SRCDIR= $(srcdir)/java/src -JAVA_EXDIR= $(srcdir)/examples_java/src -JAVA_RPCDIR= $(srcdir)/rpc_server/java -JAVA_SLEEPYCAT= $(srcdir)/java/src/com/sleepycat - -libj_jarfile= db.jar -libj_exjarfile= dbexamples.jar -rpc_jarfile= dbsvc.jar -libjso_base= libdb_java -libjso= $(libjso_base)-$(LIBVERSION)@JMODSUFFIX@ -libjso_static= $(libjso_base)-$(LIBVERSION).a -libjso_target= $(libjso_base)-$(LIBVERSION).la -libjso_default= $(libjso_base)@JMODSUFFIX@ -libjso_major= $(libjso_base)-$(LIBMAJOR)@JMODSUFFIX@ -libjso_g= $(libjso_base)-$(LIBVERSION)_g@JMODSUFFIX@ - -################################################## -# TCL API. -# -# Tcl support is optional and requires shared libraries. -################################################## -TCL_INCLUDE_SPEC= @TCL_INCLUDE_SPEC@ -LIBTSO_LIBS= @LIBTSO_LIBS@ @LIBSO_LIBS@ -libtso_base= libdb_tcl -libtso= $(libtso_base)-$(LIBVERSION)@MODSUFFIX@ -libtso_static= $(libtso_base)-$(LIBVERSION).a -libtso_target= $(libtso_base)-$(LIBVERSION).la -libtso_default= $(libtso_base)@MODSUFFIX@ -libtso_major= $(libtso_base)-$(LIBMAJOR)@MODSUFFIX@ - -################################################## -# db_dump185 UTILITY -# -# The db_dump185 application should be compiled using the system's db.h file -# (which should be a DB 1.85/1.86 include file), and the system's 1.85/1.86 -# object library. To include the right db.h, don't include -I$(builddir) on -# the compile line. You may also need to add a local include directory and -# local libraries, for example. Do that by adding -I options to the DB185INC -# line, and -l options to the DB185LIB line. -################################################## -DB185INC= -c @CFLAGS@ -I$(srcdir) @CPPFLAGS@ -DB185LIB= - -################################################## -# NOTHING BELOW THIS LINE SHOULD EVER NEED TO BE MODIFIED. -################################################## - -################################################## -# Object and utility lists. -################################################## -BTREE_OBJS=\ - bt_compare@o@ bt_conv@o@ bt_curadj@o@ bt_cursor@o@ bt_delete@o@ \ - bt_method@o@ bt_open@o@ bt_put@o@ bt_rec@o@ bt_reclaim@o@ \ - bt_recno@o@ bt_rsearch@o@ bt_search@o@ bt_split@o@ bt_stat@o@ \ - bt_compact@o@ bt_upgrade@o@ btree_auto@o@ -BTREE_VRFY_OBJS=\ - db_ovfl_vrfy@o@ db_vrfy@o@ db_vrfyutil@o@ bt_verify@o@ -HASH_OBJS=\ - hash@o@ hash_auto@o@ hash_conv@o@ hash_dup@o@ hash_meta@o@ \ - hash_method@o@ hash_open@o@ hash_page@o@ hash_rec@o@ \ - hash_reclaim@o@ hash_stat@o@ hash_upgrade@o@ -HASH_VRFY_OBJS=\ - hash_verify@o@ -QUEUE_OBJS=\ - qam@o@ qam_auto@o@ qam_conv@o@ qam_files@o@ qam_method@o@ \ - qam_open@o@ qam_rec@o@ qam_stat@o@ qam_upgrade@o@ -QUEUE_VRFY_OBJS=\ - qam_verify@o@ -REP_OBJS=\ - rep_auto@o@ rep_backup@o@ rep_elect@o@ rep_log@o@ rep_method@o@ \ - rep_record@o@ rep_region@o@ rep_stat@o@ rep_util@o@ rep_verify@o@ -PRINT_OBJS=\ - btree_autop@o@ crdel_autop@o@ db_autop@o@ dbreg_autop@o@ \ - fileops_autop@o@ hash_autop@o@ qam_autop@o@ rep_autop@o@ \ - txn_autop@o@ - -C_OBJS= @ADDITIONAL_OBJS@ @REPLACEMENT_OBJS@ @CRYPTO_OBJS@ @RPC_CLIENT_OBJS@ \ - crdel_auto@o@ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ \ - db_byteorder@o@ db_cam@o@ db_clock@o@ db_conv@o@ db_dispatch@o@ \ - db_dup@o@ db_err@o@ db_getlong@o@ db_idspace@o@ db_iface@o@ \ - db_join@o@ db_log2@o@ db_meta@o@ db_method@o@ db_open@o@ \ - db_overflow@o@ db_pr@o@ db_rec@o@ db_reclaim@o@ db_rename@o@ \ - db_remove@o@ db_ret@o@ db_salloc@o@ db_setid@o@ db_setlsn@o@ \ - db_shash@o@ db_stati@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ \ - dbm@o@ dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ dbreg_stat@o@ \ - dbreg_util@o@ env_failchk@o@ env_file@o@ env_method@o@ \ - env_open@o@ env_recover@o@ env_region@o@ env_register@o@ \ - env_stat@o@ fileops_auto@o@ fop_basic@o@ fop_rec@o@ fop_util@o@ \ - hash_func@o@ hmac@o@ hsearch@o@ lock@o@ lock_deadlock@o@ \ - lock_failchk@o@ lock_id@o@ lock_list@o@ lock_method@o@ \ - lock_region@o@ lock_stat@o@ lock_timer@o@ lock_util@o@ log@o@ \ - log_archive@o@ log_compare@o@ log_debug@o@ log_get@o@ log_method@o@ \ - log_put@o@ log_stat@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \ - mp_fmethod@o@ mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ \ - mp_region@o@ mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ \ - mut_alloc@o@ mut_method@o@ mut_region@o@ \ - mut_stat@o@ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ \ - os_dir@o@ os_errno@o@ os_fid@o@ os_flock@o@ os_fsync@o@ \ - os_handle@o@ os_id@o@ os_map@o@ os_method@o@ os_mkdir@o@ \ - os_oflags@o@ os_open@o@ os_region@o@ os_rename@o@ os_root@o@ \ - os_rpath@o@ os_rw@o@ os_seek@o@ os_sleep@o@ os_spin@o@ \ - os_stat@o@ os_tmpdir@o@ os_truncate@o@ os_unlink@o@ sha1@o@ \ - seq_stat@o@ sequence@o@ snprintf@o@ txn@o@ txn_auto@o@ \ - txn_chkpt@o@ txn_failchk@o@ txn_method@o@ txn_rec@o@ \ - txn_recover@o@ txn_region@o@ txn_stat@o@ txn_util@o@ xa@o@ \ - xa_db@o@ xa_map@o@ - -CXX_OBJS=\ - cxx_db@o@ cxx_dbc@o@ cxx_dbt@o@ cxx_env@o@ cxx_except@o@ cxx_lock@o@ \ - cxx_logc@o@ cxx_mpool@o@ cxx_multi@o@ cxx_seq@o@ cxx_txn@o@ - -CRYPTO_OBJS=\ - aes_method@o@ crypto@o@ mt19937db@o@ rijndael-alg-fst@o@ \ - rijndael-api-fst@o@ - -JAVA_OBJS=\ - db_java_wrap@o@ - -JAVA_DBSRCS=\ - $(JAVA_SLEEPYCAT)/bind/ByteArrayBinding.java \ - $(JAVA_SLEEPYCAT)/bind/EntityBinding.java \ - $(JAVA_SLEEPYCAT)/bind/EntryBinding.java \ - $(JAVA_SLEEPYCAT)/bind/RecordNumberBinding.java \ - $(JAVA_SLEEPYCAT)/bind/serial/ClassCatalog.java \ - $(JAVA_SLEEPYCAT)/bind/serial/SerialBase.java \ - $(JAVA_SLEEPYCAT)/bind/serial/SerialBinding.java \ - $(JAVA_SLEEPYCAT)/bind/serial/SerialInput.java \ - $(JAVA_SLEEPYCAT)/bind/serial/SerialOutput.java \ - $(JAVA_SLEEPYCAT)/bind/serial/SerialSerialBinding.java \ - $(JAVA_SLEEPYCAT)/bind/serial/SerialSerialKeyCreator.java \ - $(JAVA_SLEEPYCAT)/bind/serial/StoredClassCatalog.java \ - $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialBinding.java \ - $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialKeyCreator.java \ - $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialMarshalledBinding.java \ - $(JAVA_SLEEPYCAT)/bind/serial/TupleSerialMarshalledKeyCreator.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/BooleanBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/ByteBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/CharacterBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/DoubleBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/FloatBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/IntegerBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/LongBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/MarshalledTupleEntry.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/MarshalledTupleKeyEntity.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/ShortBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/StringBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleBase.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleInput.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleInputBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleMarshalledBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleOutput.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleKeyCreator.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleMarshalledBinding.java \ - $(JAVA_SLEEPYCAT)/bind/tuple/TupleTupleMarshalledKeyCreator.java \ - $(JAVA_SLEEPYCAT)/collections/CurrentTransaction.java \ - $(JAVA_SLEEPYCAT)/collections/DataCursor.java \ - $(JAVA_SLEEPYCAT)/collections/DataView.java \ - $(JAVA_SLEEPYCAT)/collections/KeyRange.java \ - $(JAVA_SLEEPYCAT)/collections/KeyRangeException.java \ - $(JAVA_SLEEPYCAT)/collections/MapEntryParameter.java \ - $(JAVA_SLEEPYCAT)/collections/PrimaryKeyAssigner.java \ - $(JAVA_SLEEPYCAT)/collections/RangeCursor.java \ - $(JAVA_SLEEPYCAT)/collections/StoredCollection.java \ - $(JAVA_SLEEPYCAT)/collections/StoredCollections.java \ - $(JAVA_SLEEPYCAT)/collections/StoredContainer.java \ - $(JAVA_SLEEPYCAT)/collections/StoredEntrySet.java \ - $(JAVA_SLEEPYCAT)/collections/StoredIterator.java \ - $(JAVA_SLEEPYCAT)/collections/StoredKeySet.java \ - $(JAVA_SLEEPYCAT)/collections/StoredList.java \ - $(JAVA_SLEEPYCAT)/collections/StoredMap.java \ - $(JAVA_SLEEPYCAT)/collections/StoredMapEntry.java \ - $(JAVA_SLEEPYCAT)/collections/StoredSortedEntrySet.java \ - $(JAVA_SLEEPYCAT)/collections/StoredSortedKeySet.java \ - $(JAVA_SLEEPYCAT)/collections/StoredSortedMap.java \ - $(JAVA_SLEEPYCAT)/collections/StoredSortedValueSet.java \ - $(JAVA_SLEEPYCAT)/collections/StoredValueSet.java \ - $(JAVA_SLEEPYCAT)/collections/TransactionRunner.java \ - $(JAVA_SLEEPYCAT)/collections/TransactionWorker.java \ - $(JAVA_SLEEPYCAT)/collections/TupleSerialFactory.java \ - $(JAVA_SLEEPYCAT)/compat/DbCompat.java \ - $(JAVA_SLEEPYCAT)/db/BtreePrefixCalculator.java \ - $(JAVA_SLEEPYCAT)/db/BtreeStats.java \ - $(JAVA_SLEEPYCAT)/db/CacheFile.java \ - $(JAVA_SLEEPYCAT)/db/CacheFilePriority.java \ - $(JAVA_SLEEPYCAT)/db/CacheFileStats.java \ - $(JAVA_SLEEPYCAT)/db/CacheStats.java \ - $(JAVA_SLEEPYCAT)/db/CheckpointConfig.java \ - $(JAVA_SLEEPYCAT)/db/CompactConfig.java \ - $(JAVA_SLEEPYCAT)/db/CompactStats.java \ - $(JAVA_SLEEPYCAT)/db/Cursor.java \ - $(JAVA_SLEEPYCAT)/db/CursorConfig.java \ - $(JAVA_SLEEPYCAT)/db/Database.java \ - $(JAVA_SLEEPYCAT)/db/DatabaseConfig.java \ - $(JAVA_SLEEPYCAT)/db/DatabaseEntry.java \ - $(JAVA_SLEEPYCAT)/db/DatabaseException.java \ - $(JAVA_SLEEPYCAT)/db/DatabaseStats.java \ - $(JAVA_SLEEPYCAT)/db/DatabaseType.java \ - $(JAVA_SLEEPYCAT)/db/DeadlockException.java \ - $(JAVA_SLEEPYCAT)/db/Environment.java \ - $(JAVA_SLEEPYCAT)/db/EnvironmentConfig.java \ - $(JAVA_SLEEPYCAT)/db/ErrorHandler.java \ - $(JAVA_SLEEPYCAT)/db/FeedbackHandler.java \ - $(JAVA_SLEEPYCAT)/db/HashStats.java \ - $(JAVA_SLEEPYCAT)/db/Hasher.java \ - $(JAVA_SLEEPYCAT)/db/JoinConfig.java \ - $(JAVA_SLEEPYCAT)/db/JoinCursor.java \ - $(JAVA_SLEEPYCAT)/db/KeyRange.java \ - $(JAVA_SLEEPYCAT)/db/Lock.java \ - $(JAVA_SLEEPYCAT)/db/LockDetectMode.java \ - $(JAVA_SLEEPYCAT)/db/LockMode.java \ - $(JAVA_SLEEPYCAT)/db/LockNotGrantedException.java \ - $(JAVA_SLEEPYCAT)/db/LockOperation.java \ - $(JAVA_SLEEPYCAT)/db/LockRequest.java \ - $(JAVA_SLEEPYCAT)/db/LockRequestMode.java \ - $(JAVA_SLEEPYCAT)/db/LockStats.java \ - $(JAVA_SLEEPYCAT)/db/LogCursor.java \ - $(JAVA_SLEEPYCAT)/db/LogRecordHandler.java \ - $(JAVA_SLEEPYCAT)/db/LogSequenceNumber.java \ - $(JAVA_SLEEPYCAT)/db/LogStats.java \ - $(JAVA_SLEEPYCAT)/db/MemoryException.java \ - $(JAVA_SLEEPYCAT)/db/MessageHandler.java \ - $(JAVA_SLEEPYCAT)/db/MultipleDataEntry.java \ - $(JAVA_SLEEPYCAT)/db/MultipleEntry.java \ - $(JAVA_SLEEPYCAT)/db/MultipleKeyDataEntry.java \ - $(JAVA_SLEEPYCAT)/db/MultipleRecnoDataEntry.java \ - $(JAVA_SLEEPYCAT)/db/MutexStats.java \ - $(JAVA_SLEEPYCAT)/db/OperationStatus.java \ - $(JAVA_SLEEPYCAT)/db/PanicHandler.java \ - $(JAVA_SLEEPYCAT)/db/PreparedTransaction.java \ - $(JAVA_SLEEPYCAT)/db/QueueStats.java \ - $(JAVA_SLEEPYCAT)/db/RecordNumberAppender.java \ - $(JAVA_SLEEPYCAT)/db/RecoveryOperation.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationConfig.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationDuplicateMasterException.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationHandleDeadException.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationHoldElectionException.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationJoinFailureException.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationLockoutException.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationSiteUnavailableException.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationStats.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationStatus.java \ - $(JAVA_SLEEPYCAT)/db/ReplicationTransport.java \ - $(JAVA_SLEEPYCAT)/db/RunRecoveryException.java \ - $(JAVA_SLEEPYCAT)/db/SecondaryConfig.java \ - $(JAVA_SLEEPYCAT)/db/SecondaryCursor.java \ - $(JAVA_SLEEPYCAT)/db/SecondaryDatabase.java \ - $(JAVA_SLEEPYCAT)/db/SecondaryKeyCreator.java \ - $(JAVA_SLEEPYCAT)/db/Sequence.java \ - $(JAVA_SLEEPYCAT)/db/SequenceConfig.java \ - $(JAVA_SLEEPYCAT)/db/SequenceStats.java \ - $(JAVA_SLEEPYCAT)/db/StatsConfig.java \ - $(JAVA_SLEEPYCAT)/db/Transaction.java \ - $(JAVA_SLEEPYCAT)/db/TransactionConfig.java \ - $(JAVA_SLEEPYCAT)/db/TransactionStats.java \ - $(JAVA_SLEEPYCAT)/db/VerifyConfig.java \ - $(JAVA_SLEEPYCAT)/db/VersionMismatchException.java \ - $(JAVA_SLEEPYCAT)/db/internal/Db.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbClient.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbConstants.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbEnv.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbLock.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbLogc.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbMpoolFile.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbSequence.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbTxn.java \ - $(JAVA_SLEEPYCAT)/db/internal/DbUtil.java \ - $(JAVA_SLEEPYCAT)/db/internal/Dbc.java \ - $(JAVA_SLEEPYCAT)/db/internal/db_java.java \ - $(JAVA_SLEEPYCAT)/db/internal/db_javaJNI.java \ - $(JAVA_SLEEPYCAT)/util/ExceptionUnwrapper.java \ - $(JAVA_SLEEPYCAT)/util/ExceptionWrapper.java \ - $(JAVA_SLEEPYCAT)/util/FastInputStream.java \ - $(JAVA_SLEEPYCAT)/util/FastOutputStream.java \ - $(JAVA_SLEEPYCAT)/util/IOExceptionWrapper.java \ - $(JAVA_SLEEPYCAT)/util/RuntimeExceptionWrapper.java \ - $(JAVA_SLEEPYCAT)/util/UtfOps.java - -JAVA_EXSRCS=\ - $(JAVA_EXDIR)/collections/access/AccessExample.java \ - $(JAVA_EXDIR)/collections/hello/HelloDatabaseWorld.java \ - $(JAVA_EXDIR)/collections/ship/basic/PartData.java \ - $(JAVA_EXDIR)/collections/ship/basic/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/basic/Sample.java \ - $(JAVA_EXDIR)/collections/ship/basic/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/basic/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/basic/ShipmentData.java \ - $(JAVA_EXDIR)/collections/ship/basic/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/basic/SupplierData.java \ - $(JAVA_EXDIR)/collections/ship/basic/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/basic/Weight.java \ - $(JAVA_EXDIR)/collections/ship/entity/Part.java \ - $(JAVA_EXDIR)/collections/ship/entity/PartData.java \ - $(JAVA_EXDIR)/collections/ship/entity/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/entity/Sample.java \ - $(JAVA_EXDIR)/collections/ship/entity/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/entity/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/entity/Shipment.java \ - $(JAVA_EXDIR)/collections/ship/entity/ShipmentData.java \ - $(JAVA_EXDIR)/collections/ship/entity/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/entity/Supplier.java \ - $(JAVA_EXDIR)/collections/ship/entity/SupplierData.java \ - $(JAVA_EXDIR)/collections/ship/entity/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/entity/Weight.java \ - $(JAVA_EXDIR)/collections/ship/factory/Part.java \ - $(JAVA_EXDIR)/collections/ship/factory/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/factory/Sample.java \ - $(JAVA_EXDIR)/collections/ship/factory/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/factory/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/factory/Shipment.java \ - $(JAVA_EXDIR)/collections/ship/factory/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/factory/Supplier.java \ - $(JAVA_EXDIR)/collections/ship/factory/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/factory/Weight.java \ - $(JAVA_EXDIR)/collections/ship/index/PartData.java \ - $(JAVA_EXDIR)/collections/ship/index/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/index/Sample.java \ - $(JAVA_EXDIR)/collections/ship/index/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/index/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/index/ShipmentData.java \ - $(JAVA_EXDIR)/collections/ship/index/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/index/SupplierData.java \ - $(JAVA_EXDIR)/collections/ship/index/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/index/Weight.java \ - $(JAVA_EXDIR)/collections/ship/marshal/MarshalledEnt.java \ - $(JAVA_EXDIR)/collections/ship/marshal/MarshalledKey.java \ - $(JAVA_EXDIR)/collections/ship/marshal/Part.java \ - $(JAVA_EXDIR)/collections/ship/marshal/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/marshal/Sample.java \ - $(JAVA_EXDIR)/collections/ship/marshal/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/marshal/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/marshal/Shipment.java \ - $(JAVA_EXDIR)/collections/ship/marshal/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/marshal/Supplier.java \ - $(JAVA_EXDIR)/collections/ship/marshal/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/marshal/Weight.java \ - $(JAVA_EXDIR)/collections/ship/sentity/Part.java \ - $(JAVA_EXDIR)/collections/ship/sentity/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/sentity/Sample.java \ - $(JAVA_EXDIR)/collections/ship/sentity/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/sentity/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/sentity/Shipment.java \ - $(JAVA_EXDIR)/collections/ship/sentity/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/sentity/Supplier.java \ - $(JAVA_EXDIR)/collections/ship/sentity/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/sentity/Weight.java \ - $(JAVA_EXDIR)/collections/ship/tuple/Part.java \ - $(JAVA_EXDIR)/collections/ship/tuple/PartData.java \ - $(JAVA_EXDIR)/collections/ship/tuple/PartKey.java \ - $(JAVA_EXDIR)/collections/ship/tuple/Sample.java \ - $(JAVA_EXDIR)/collections/ship/tuple/SampleDatabase.java \ - $(JAVA_EXDIR)/collections/ship/tuple/SampleViews.java \ - $(JAVA_EXDIR)/collections/ship/tuple/Shipment.java \ - $(JAVA_EXDIR)/collections/ship/tuple/ShipmentData.java \ - $(JAVA_EXDIR)/collections/ship/tuple/ShipmentKey.java \ - $(JAVA_EXDIR)/collections/ship/tuple/Supplier.java \ - $(JAVA_EXDIR)/collections/ship/tuple/SupplierData.java \ - $(JAVA_EXDIR)/collections/ship/tuple/SupplierKey.java \ - $(JAVA_EXDIR)/collections/ship/tuple/Weight.java \ - $(JAVA_EXDIR)/db/AccessExample.java \ - $(JAVA_EXDIR)/db/BtRecExample.java \ - $(JAVA_EXDIR)/db/BulkAccessExample.java \ - $(JAVA_EXDIR)/db/EnvExample.java \ - $(JAVA_EXDIR)/db/GettingStarted/ExampleDatabaseLoad.java \ - $(JAVA_EXDIR)/db/GettingStarted/ExampleDatabaseRead.java \ - $(JAVA_EXDIR)/db/GettingStarted/Inventory.java \ - $(JAVA_EXDIR)/db/GettingStarted/InventoryBinding.java \ - $(JAVA_EXDIR)/db/GettingStarted/ItemNameKeyCreator.java \ - $(JAVA_EXDIR)/db/GettingStarted/MyDbs.java \ - $(JAVA_EXDIR)/db/GettingStarted/Vendor.java \ - $(JAVA_EXDIR)/db/LockExample.java \ - $(JAVA_EXDIR)/db/RPCExample.java \ - $(JAVA_EXDIR)/db/SequenceExample.java \ - $(JAVA_EXDIR)/db/TpcbExample.java \ - $(JAVA_EXDIR)/db/txn/DBWriter.java \ - $(JAVA_EXDIR)/db/txn/PayloadData.java \ - $(JAVA_EXDIR)/db/txn/TxnGuide.java \ - $(JAVA_EXDIR)/db/txn/TxnGuideInMemory.java - -TCL_OBJS=\ - tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \ - tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_rep@o@ \ - tcl_seq@o@ tcl_txn@o@ tcl_util@o@ - -RPC_CLIENT_OBJS=\ - client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \ - gen_client_ret@o@ - -RPC_SRV_OBJS=\ - db_server_proc@o@ db_server_svc@o@ db_server_util@o@ \ - gen_db_server@o@ - -RPC_CXXSRV_OBJS=\ - db_server_cxxproc@o@ db_server_cxxutil@o@ db_server_svc@o@ \ - gen_db_server@o@ - -RPC_JAVASRV_SRCS=\ - $(JAVA_RPCDIR)/AssociateCallbacks.java \ - $(JAVA_RPCDIR)/Dispatcher.java \ - $(JAVA_RPCDIR)/FreeList.java \ - $(JAVA_RPCDIR)/JoinCursorAdapter.java \ - $(JAVA_RPCDIR)/LocalIterator.java \ - $(JAVA_RPCDIR)/RpcDb.java \ - $(JAVA_RPCDIR)/RpcDbEnv.java \ - $(JAVA_RPCDIR)/RpcDbTxn.java \ - $(JAVA_RPCDIR)/RpcDbc.java \ - $(JAVA_RPCDIR)/Server.java \ - $(JAVA_RPCDIR)/Timer.java \ - $(JAVA_RPCDIR)/Util.java \ - $(JAVA_RPCDIR)/gen/ServerStubs.java \ - $(JAVA_RPCDIR)/gen/__db_associate_msg.java \ - $(JAVA_RPCDIR)/gen/__db_associate_reply.java \ - $(JAVA_RPCDIR)/gen/__db_close_msg.java \ - $(JAVA_RPCDIR)/gen/__db_close_reply.java \ - $(JAVA_RPCDIR)/gen/__db_create_msg.java \ - $(JAVA_RPCDIR)/gen/__db_create_reply.java \ - $(JAVA_RPCDIR)/gen/__db_cursor_msg.java \ - $(JAVA_RPCDIR)/gen/__db_cursor_reply.java \ - $(JAVA_RPCDIR)/gen/__db_del_msg.java \ - $(JAVA_RPCDIR)/gen/__db_del_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_bt_minkey_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_bt_minkey_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_dbname_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_dbname_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_encrypt_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_encrypt_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_h_ffactor_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_h_ffactor_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_h_nelem_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_h_nelem_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_lorder_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_lorder_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_open_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_open_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_pagesize_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_pagesize_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_q_extentsize_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_q_extentsize_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_re_delim_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_re_delim_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_re_len_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_re_len_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_re_pad_msg.java \ - $(JAVA_RPCDIR)/gen/__db_get_re_pad_reply.java \ - $(JAVA_RPCDIR)/gen/__db_get_reply.java \ - $(JAVA_RPCDIR)/gen/__db_join_msg.java \ - $(JAVA_RPCDIR)/gen/__db_join_reply.java \ - $(JAVA_RPCDIR)/gen/__db_key_range_msg.java \ - $(JAVA_RPCDIR)/gen/__db_key_range_reply.java \ - $(JAVA_RPCDIR)/gen/__db_open_msg.java \ - $(JAVA_RPCDIR)/gen/__db_open_reply.java \ - $(JAVA_RPCDIR)/gen/__db_pget_msg.java \ - $(JAVA_RPCDIR)/gen/__db_pget_reply.java \ - $(JAVA_RPCDIR)/gen/__db_put_msg.java \ - $(JAVA_RPCDIR)/gen/__db_put_reply.java \ - $(JAVA_RPCDIR)/gen/__db_remove_msg.java \ - $(JAVA_RPCDIR)/gen/__db_remove_reply.java \ - $(JAVA_RPCDIR)/gen/__db_rename_msg.java \ - $(JAVA_RPCDIR)/gen/__db_rename_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_bt_minkey_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_bt_minkey_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_encrypt_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_encrypt_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_h_ffactor_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_h_ffactor_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_h_nelem_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_h_nelem_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_lorder_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_lorder_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_pagesize_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_pagesize_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_q_extentsize_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_q_extentsize_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_re_delim_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_re_delim_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_re_len_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_re_len_reply.java \ - $(JAVA_RPCDIR)/gen/__db_set_re_pad_msg.java \ - $(JAVA_RPCDIR)/gen/__db_set_re_pad_reply.java \ - $(JAVA_RPCDIR)/gen/__db_stat_msg.java \ - $(JAVA_RPCDIR)/gen/__db_stat_reply.java \ - $(JAVA_RPCDIR)/gen/__db_sync_msg.java \ - $(JAVA_RPCDIR)/gen/__db_sync_reply.java \ - $(JAVA_RPCDIR)/gen/__db_truncate_msg.java \ - $(JAVA_RPCDIR)/gen/__db_truncate_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_close_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_close_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_count_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_count_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_del_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_del_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_dup_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_dup_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_get_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_get_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_pget_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_pget_reply.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_put_msg.java \ - $(JAVA_RPCDIR)/gen/__dbc_c_put_reply.java \ - $(JAVA_RPCDIR)/gen/__env_close_msg.java \ - $(JAVA_RPCDIR)/gen/__env_close_reply.java \ - $(JAVA_RPCDIR)/gen/__env_create_msg.java \ - $(JAVA_RPCDIR)/gen/__env_create_reply.java \ - $(JAVA_RPCDIR)/gen/__env_dbremove_msg.java \ - $(JAVA_RPCDIR)/gen/__env_dbremove_reply.java \ - $(JAVA_RPCDIR)/gen/__env_dbrename_msg.java \ - $(JAVA_RPCDIR)/gen/__env_dbrename_reply.java \ - $(JAVA_RPCDIR)/gen/__env_get_cachesize_msg.java \ - $(JAVA_RPCDIR)/gen/__env_get_cachesize_reply.java \ - $(JAVA_RPCDIR)/gen/__env_get_encrypt_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__env_get_encrypt_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__env_get_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__env_get_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__env_get_home_msg.java \ - $(JAVA_RPCDIR)/gen/__env_get_home_reply.java \ - $(JAVA_RPCDIR)/gen/__env_get_open_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__env_get_open_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__env_open_msg.java \ - $(JAVA_RPCDIR)/gen/__env_open_reply.java \ - $(JAVA_RPCDIR)/gen/__env_remove_msg.java \ - $(JAVA_RPCDIR)/gen/__env_remove_reply.java \ - $(JAVA_RPCDIR)/gen/__env_set_cachesize_msg.java \ - $(JAVA_RPCDIR)/gen/__env_set_cachesize_reply.java \ - $(JAVA_RPCDIR)/gen/__env_set_encrypt_msg.java \ - $(JAVA_RPCDIR)/gen/__env_set_encrypt_reply.java \ - $(JAVA_RPCDIR)/gen/__env_set_flags_msg.java \ - $(JAVA_RPCDIR)/gen/__env_set_flags_reply.java \ - $(JAVA_RPCDIR)/gen/__env_txn_begin_msg.java \ - $(JAVA_RPCDIR)/gen/__env_txn_begin_reply.java \ - $(JAVA_RPCDIR)/gen/__env_txn_recover_msg.java \ - $(JAVA_RPCDIR)/gen/__env_txn_recover_reply.java \ - $(JAVA_RPCDIR)/gen/__txn_abort_msg.java \ - $(JAVA_RPCDIR)/gen/__txn_abort_reply.java \ - $(JAVA_RPCDIR)/gen/__txn_commit_msg.java \ - $(JAVA_RPCDIR)/gen/__txn_commit_reply.java \ - $(JAVA_RPCDIR)/gen/__txn_discard_msg.java \ - $(JAVA_RPCDIR)/gen/__txn_discard_reply.java \ - $(JAVA_RPCDIR)/gen/__txn_prepare_msg.java \ - $(JAVA_RPCDIR)/gen/__txn_prepare_reply.java \ - $(JAVA_RPCDIR)/gen/db_server.java - -UTIL_PROGS=\ - @ADDITIONAL_PROGS@ \ - db_archive db_checkpoint db_deadlock db_dump db_hotbackup \ - db_load db_printlog db_recover db_stat db_upgrade db_verify - -################################################## -# List of files installed into the library directory. -################################################## -LIB_INSTALL_FILE_LIST=\ - $(libdb) \ - $(libso) \ - $(libso_default) \ - $(libso_major) \ - $(libdb_version) \ - $(libso_target) \ - $(libcxx) \ - $(libxso) \ - $(libxso_default) \ - $(libxso_major) \ - $(libcxx_version) \ - $(libxso_target) \ - $(libtso) \ - $(libtso_default) \ - $(libtso_major) \ - $(libtso_static) \ - $(libtso_target) \ - $(libjso) \ - $(libjso_default) \ - $(libjso_g) \ - $(libjso_major) \ - $(libjso_static) \ - $(libjso_target) \ - $(libj_exjarfile) \ - $(libj_jarfile) - -################################################## -# Note: "all" must be the first target in the Makefile. -################################################## -all: @BUILD_TARGET@ - -install-strip install: all @INSTALL_TARGET@ - -################################################## -# Library and standard utilities build. -################################################## -library_build: @INSTALL_LIBS@ @ADDITIONAL_LANG@ $(UTIL_PROGS) - -# Static C library named libdb.a. -$(libdb): $(DEF_LIB) - -# Real static C library. -$(libdb_version): $(C_OBJS) - $(ar) cr $@ $(C_OBJS) - test ! -f $(ranlib) || $(ranlib) $@ - $(rm) -f $(libdb) - $(ln) -s $(libdb_version) $(libdb) - -# Shared C library. -$(libso_target): $(C_OBJS) - $(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBCSO_LIBS) - $(rm) -f $(libdb) - $(ln) -s .libs/$(libdb_version) $(libdb) - -# Static C++ library named libdb_cxx.a. -$(libcxx): $(DEF_LIB_CXX) - -# Real static C++ library. -$(libcxx_version): $(CXX_OBJS) $(C_OBJS) - $(ar) cr $@ $(CXX_OBJS) $(C_OBJS) - test ! -f $(ranlib) || $(ranlib) $@ - $(rm) -f $(libcxx) - $(ln) -s $(libcxx_version) $(libcxx) - -# Shared C++ library. -$(libxso_target): $(CXX_OBJS) $(C_OBJS) - $(XSOLINK) $(SOFLAGS) $(LDFLAGS) \ - -o $@ $(CXX_OBJS) $(C_OBJS) $(LIBXSO_LIBS) - $(rm) -f $(libcxx) - $(ln) -s .libs/$(libcxx_version) $(libcxx) - -# Shared Java library. -$(libjso_target): $(JAVA_OBJS) $(C_OBJS) - $(SOLINK) -shrext @JMODSUFFIX@ $(SOFLAGS) $(LDFLAGS) \ - -o $@ $(JAVA_OBJS) $(C_OBJS) $(LIBJSO_LIBS) - -# Shared Tcl library. -$(libtso_target): $(TCL_OBJS) $(C_OBJS) - $(SOLINK) @LIBTSO_MODULE@ $(SOFLAGS) $(LDFLAGS) \ - -o $@ $(TCL_OBJS) $(C_OBJS) $(LIBTSO_LIBS) - -################################################## -# Creating individual dependencies and actions for building class -# files is possible, but it is very messy and error prone. -################################################## -java: $(libj_jarfile) $(libj_exjarfile) - -$(libj_jarfile): $(JAVA_DBSRCS) - @test -d $(JAVA_CLASSTOP) || \ - ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP)) - $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_DBSRCS) - cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_jarfile) ./com/sleepycat - -$(libj_exjarfile): $(libj_jarfile) $(JAVA_EXSRCS) - @test -d $(JAVA_EXCLASSTOP) || \ - ($(mkdir) -p $(JAVA_EXCLASSTOP) && \ - $(chmod) $(dmode) $(JAVA_EXCLASSTOP)) - $(JAVAC) -classpath $(libj_jarfile) -d $(JAVA_EXCLASSTOP) \ - $(JAVACFLAGS) $(JAVA_EXSRCS) - cd $(JAVA_EXCLASSTOP) && $(JAR) cf ../$(libj_exjarfile) . - -$(rpc_jarfile): $(libj_jarfile) $(RPC_JAVASRV_SRCS) - @test -d $(JAVA_RPCCLASSTOP) || \ - ($(mkdir) -p $(JAVA_RPCCLASSTOP) && \ - $(chmod) $(dmode) $(JAVA_RPCCLASSTOP)) - env CLASSPATH=$(CLASSPATH):$(JAVA_RPCDIR)/oncrpc.jar \ - @JAVAC@ -d $(JAVA_RPCCLASSTOP) $(JAVACFLAGS) $(RPC_JAVASRV_SRCS) - cd $(JAVA_RPCCLASSTOP) && $(JAR) cf ../$(rpc_jarfile) $(JAVA_RPCREL) - -################################################## -# Utilities -################################################## -berkeley_db_svc: $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -berkeley_db_cxxsvc: $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) \ - $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -berkeley_db_javasvc: $(rpc_jarfile) - echo "#!/bin/sh" > $@ - echo CLASSPATH="$(CLASSPATH):$(rpc_jarfile):$(JAVA_RPCDIR)/oncrpc.jar" >> $@ - echo LD_LIBRARY_PATH=.libs >> $@ - echo export CLASSPATH LD_LIBRARY_PATH >> $@ - echo exec java com.sleepycat.db.rpcserver.Server \$$@ >> $@ - chmod +x $@ - -db_archive: db_archive@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_archive@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_checkpoint: db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_deadlock: db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_dump: db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_dump185: db_dump185@o@ @REPLACEMENT_OBJS@ - $(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @REPLACEMENT_OBJS@ $(DB185LIB) - $(POSTLINK) $@ - -db_hotbackup: db_hotbackup@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_hotbackup@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_load: db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_printlog: db_printlog@o@ $(PRINT_OBJS) util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_printlog@o@ $(PRINT_OBJS) util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_recover: db_recover@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_recover@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_stat: db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_upgrade: db_upgrade@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_upgrade@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -db_verify: db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -################################################## -# Library and standard utilities install. -################################################## -library_install: install_setup -library_install: install_include install_lib install_utilities install_docs - -uninstall: uninstall_include uninstall_lib uninstall_utilities uninstall_docs - -install_setup: - @test -d $(DESTDIR)$(prefix) || \ - ($(mkdir) -p $(DESTDIR)$(prefix) && \ - $(chmod) $(dmode) $(DESTDIR)$(prefix)) - -INCDOT= db.h db_cxx.h @ADDITIONAL_INCS@ -install_include: - @echo "Installing DB include files: $(DESTDIR)$(includedir) ..." - @test -d $(DESTDIR)$(includedir) || \ - ($(mkdir) -p $(DESTDIR)$(includedir) && \ - $(chmod) $(dmode) $(DESTDIR)$(includedir)) - @cd $(DESTDIR)$(includedir) && $(rm) -f $(INCDOT) - @$(cp) -p $(INCDOT) $(DESTDIR)$(includedir) - @cd $(DESTDIR)$(includedir) && $(chmod) $(fmode) $(INCDOT) - -uninstall_include: - @cd $(DESTDIR)$(includedir) && $(rm) -f $(INCDOT) - -install_lib: - @echo "Installing DB library: $(DESTDIR)$(libdir) ..." - @test -d $(DESTDIR)$(libdir) || \ - ($(mkdir) -p $(DESTDIR)$(libdir) && \ - $(chmod) $(dmode) $(DESTDIR)$(libdir)) - @cd $(DESTDIR)$(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST) - @$(INSTALLER) @INSTALL_LIBS@ $(DESTDIR)$(libdir) - @(cd $(DESTDIR)$(libdir) && \ - test -f $(libso) && $(ln) -s $(libso) $(libso_default); \ - test -f $(libso) && $(ln) -s $(libso) $(libso_major); \ - test -f $(libxso) && $(ln) -s $(libxso) $(libxso_default); \ - test -f $(libxso) && $(ln) -s $(libxso) $(libxso_major); \ - test -f $(libtso) && $(ln) -s $(libtso) $(libtso_default); \ - test -f $(libtso) && $(ln) -s $(libtso) $(libtso_major); \ - test -f $(libjso) && $(ln) -s $(libjso) $(libjso_default); \ - test -f $(libjso) && $(ln) -s $(libjso) $(libjso_major); \ - test -f $(libjso) && $(ln) -s $(libjso) $(libjso_g)) || true - @(test -f $(libj_jarfile) && \ - $(cp) $(libj_jarfile) $(DESTDIR)$(libdir) && \ - $(chmod) $(fmode) $(DESTDIR)$(libdir)/$(libj_jarfile)) || true - -uninstall_lib: - @cd $(DESTDIR)$(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST) - -install_utilities: - @echo "Installing DB utilities: $(DESTDIR)$(bindir) ..." - @test -d $(DESTDIR)$(bindir) || \ - ($(mkdir) -p $(DESTDIR)$(bindir) && \ - $(chmod) $(dmode) $(DESTDIR)$(bindir)) - @for i in $(UTIL_PROGS); do \ - $(rm) -f $(DESTDIR)$(bindir)/$$i $(DESTDIR)$(bindir)/$$i.exe; \ - test -f $$i.exe && i=$$i.exe || true; \ - $(INSTALLER) $$i $(DESTDIR)$(bindir)/$$i; \ - test -f $(strip) && $(strip) $(DESTDIR)$(bindir)/$$i || true; \ - $(chmod) $(emode) $(DESTDIR)$(bindir)/$$i; \ - done - -uninstall_utilities: - @(cd $(DESTDIR)$(bindir); for i in $(UTIL_PROGS); do \ - $(rm) -f $$i $$i.exe; \ - done) - -DOCLIST=api_c api_cxx api_tcl collections gsg gsg_txn images index.html \ - java ref sleepycat utility - -install_docs: - @echo "Installing documentation: $(DESTDIR)$(docdir) ..." - @test -d $(DESTDIR)$(docdir) || \ - ($(mkdir) -p $(DESTDIR)$(docdir) && \ - $(chmod) $(dmode) $(DESTDIR)$(docdir)) - @cd $(DESTDIR)$(docdir) && $(rm) -rf $(DOCLIST) - @cd $(srcdir)/docs && $(cp) -pr $(DOCLIST) $(DESTDIR)$(docdir)/ - -uninstall_docs: - @cd $(DESTDIR)$(docdir) && $(rm) -rf $(DOCLIST) - -################################################## -# Remaining standard Makefile targets. -################################################## -CLEAN_LIST=\ - TxnGuide TxnGuideInMemory bench_001 berkeley_db_cxxsvc \ - berkeley_db_javasvc berkeley_db_svc db_dump185 db_perf \ - db_reptest dbs ex_access ex_apprec ex_btrec ex_dbclient ex_env \ - ex_lock ex_mpool ex_repquote ex_sequence ex_thread ex_tpcb \ \ - example_database_load example_database_read excxx_access \ - excxx_btrec excxx_env excxx_example_database_load \ - excxx_example_database_read excxx_lock excxx_mpool \ - excxx_sequence excxx_tpcb txn_guide txn_guide_inmemory - -mostly-clean clean: - $(rm) -rf $(C_OBJS) - $(rm) -rf $(CXX_OBJS) $(JAVA_OBJS) $(TCL_OBJS) - $(rm) -rf $(RPC_CLIENT_OBJS) $(RPC_SRV_OBJS) $(RPC_CXXSRV_OBJS) - $(rm) -rf $(UTIL_PROGS) *.exe $(CLEAN_LIST) - $(rm) -rf $(JAVA_CLASSTOP) $(JAVA_EXCLASSTOP) - $(rm) -rf $(JAVA_RPCCLASSES) $(rpc_jarfile) - $(rm) -rf tags *@o@ *.o *.o.lock *.lo core *.core - $(rm) -rf ALL.OUT.* PARALLEL_TESTDIR.* - $(rm) -rf RUN_LOG RUNQUEUE TESTDIR TESTDIR.A TEST.LIST - $(rm) -rf logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST) - -REALCLEAN_LIST=\ - Makefile confdefs.h config.cache config.log config.status \ - configure.lineno db.h db185_int.h db_185.h db_config.h \ - db_cxx.h db_int.h db_int_def.h include.tcl \ - db_server.h db_server_clnt.c db_server_svc.c db_server_xdr.c \ - gen_db_server.c win_db.h - -distclean maintainer-clean realclean: clean - $(rm) -rf $(REALCLEAN_LIST) - $(rm) -rf libtool - -check depend dvi info obj TAGS: - @echo "$@: make target not supported" && true - -dist rpm rpmbuild: - @echo "$@: make target not supported" && false - -################################################## -# Multi-threaded testers, benchmarks. -################################################## -dbs@o@: $(srcdir)/test_server/dbs.c - $(CC) $(CFLAGS) $? -dbs_am@o@: $(srcdir)/test_server/dbs_am.c - $(CC) $(CFLAGS) $? -dbs_checkpoint@o@: $(srcdir)/test_server/dbs_checkpoint.c - $(CC) $(CFLAGS) $? -dbs_debug@o@: $(srcdir)/test_server/dbs_debug.c - $(CC) $(CFLAGS) $? -dbs_handles@o@: $(srcdir)/test_server/dbs_handles.c - $(CC) $(CFLAGS) $? -dbs_log@o@: $(srcdir)/test_server/dbs_log.c - $(CC) $(CFLAGS) $? -dbs_qam@o@: $(srcdir)/test_server/dbs_qam.c - $(CC) $(CFLAGS) $? -dbs_spawn@o@: $(srcdir)/test_server/dbs_spawn.c - $(CC) $(CFLAGS) $? -dbs_trickle@o@: $(srcdir)/test_server/dbs_trickle.c - $(CC) $(CFLAGS) $? -dbs_util@o@: $(srcdir)/test_server/dbs_util.c - $(CC) $(CFLAGS) $? -dbs_yield@o@: $(srcdir)/test_server/dbs_yield.c - $(CC) $(CFLAGS) $? -DBS_OBJS=\ - dbs@o@ dbs_am@o@ dbs_checkpoint@o@ dbs_debug@o@ dbs_handles@o@ \ - dbs_log@o@ dbs_qam@o@ dbs_spawn@o@ dbs_trickle@o@ dbs_util@o@ \ - dbs_yield@o@ -dbs: $(DBS_OBJS) $(DEF_LIB) - $(CCLINK) -o $@ \ - $(LDFLAGS) $(DBS_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) - $(POSTLINK) $@ - -db_perf@o@: $(srcdir)/test_perf/db_perf.c - $(CC) $(CFLAGS) $? -perf_checkpoint@o@: $(srcdir)/test_perf/perf_checkpoint.c - $(CC) $(CFLAGS) $? -perf_config@o@: $(srcdir)/test_perf/perf_config.c - $(CC) $(CFLAGS) $? -perf_dbs@o@: $(srcdir)/test_perf/perf_dbs.c - $(CC) $(CFLAGS) $? -perf_dead@o@: $(srcdir)/test_perf/perf_dead.c - $(CC) $(CFLAGS) $? -perf_debug@o@: $(srcdir)/test_perf/perf_debug.c - $(CC) $(CFLAGS) $? -perf_file@o@: $(srcdir)/test_perf/perf_file.c - $(CC) $(CFLAGS) $? -perf_key@o@: $(srcdir)/test_perf/perf_key.c - $(CC) $(CFLAGS) $? -perf_log@o@: $(srcdir)/test_perf/perf_log.c - $(CC) $(CFLAGS) $? -perf_misc@o@: $(srcdir)/test_perf/perf_misc.c - $(CC) $(CFLAGS) $? -perf_op@o@: $(srcdir)/test_perf/perf_op.c - $(CC) $(CFLAGS) $? -perf_parse@o@: $(srcdir)/test_perf/perf_parse.c - $(CC) $(CFLAGS) $? -perf_rand@o@: $(srcdir)/test_perf/perf_rand.c - $(CC) $(CFLAGS) $? -perf_spawn@o@: $(srcdir)/test_perf/perf_spawn.c - $(CC) $(CFLAGS) $? -perf_stat@o@: $(srcdir)/test_perf/perf_stat.c - $(CC) $(CFLAGS) $? -perf_sync@o@: $(srcdir)/test_perf/perf_sync.c - $(CC) $(CFLAGS) $? -perf_thread@o@: $(srcdir)/test_perf/perf_thread.c - $(CC) $(CFLAGS) $? -perf_trickle@o@: $(srcdir)/test_perf/perf_trickle.c - $(CC) $(CFLAGS) $? -perf_txn@o@: $(srcdir)/test_perf/perf_txn.c - $(CC) $(CFLAGS) $? -perf_util@o@: $(srcdir)/test_perf/perf_util.c - $(CC) $(CFLAGS) $? -perf_vx@o@: $(srcdir)/test_perf/perf_vx.c - $(CC) $(CFLAGS) $? -DBPERF_OBJS=\ - db_perf@o@ perf_checkpoint@o@ perf_config@o@ perf_dbs@o@ \ - perf_dead@o@ perf_debug@o@ perf_file@o@ perf_key@o@ perf_log@o@ \ - perf_misc@o@ perf_op@o@ perf_parse@o@ perf_rand@o@ perf_spawn@o@ \ - perf_stat@o@ perf_sync@o@ perf_thread@o@ perf_trickle@o@ \ - perf_txn@o@ perf_util@o@ perf_vx@o@ - -db_perf: $(DBPERF_OBJS) $(DEF_LIB) - $(CCLINK) -o $@ \ - $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) - $(POSTLINK) $@ - -db_reptest@o@: $(srcdir)/test_rep/db_reptest.c - $(CC) $(CFLAGS) $? -reptest_accept@o@: $(srcdir)/test_rep/reptest_accept.c - $(CC) $(CFLAGS) $? -reptest_client@o@: $(srcdir)/test_rep/reptest_client.c - $(CC) $(CFLAGS) $? -reptest_config@o@: $(srcdir)/test_rep/reptest_config.c - $(CC) $(CFLAGS) $? -reptest_dbs@o@: $(srcdir)/test_rep/reptest_dbs.c - $(CC) $(CFLAGS) $? -reptest_debug@o@: $(srcdir)/test_rep/reptest_debug.c - $(CC) $(CFLAGS) $? -reptest_elect@o@: $(srcdir)/test_rep/reptest_elect.c - $(CC) $(CFLAGS) $? -reptest_env@o@: $(srcdir)/test_rep/reptest_env.c - $(CC) $(CFLAGS) $? -reptest_exec@o@: $(srcdir)/test_rep/reptest_exec.c - $(CC) $(CFLAGS) $? -reptest_file@o@: $(srcdir)/test_rep/reptest_file.c - $(CC) $(CFLAGS) $? -reptest_key@o@: $(srcdir)/test_rep/reptest_key.c - $(CC) $(CFLAGS) $? -reptest_master@o@: $(srcdir)/test_rep/reptest_master.c - $(CC) $(CFLAGS) $? -reptest_misc@o@: $(srcdir)/test_rep/reptest_misc.c - $(CC) $(CFLAGS) $? -reptest_msg_thread@o@: $(srcdir)/test_rep/reptest_msg_thread.c - $(CC) $(CFLAGS) $? -reptest_op@o@: $(srcdir)/test_rep/reptest_op.c - $(CC) $(CFLAGS) $? -reptest_parse@o@: $(srcdir)/test_rep/reptest_parse.c - $(CC) $(CFLAGS) $? -reptest_rand@o@: $(srcdir)/test_rep/reptest_rand.c - $(CC) $(CFLAGS) $? -reptest_send@o@: $(srcdir)/test_rep/reptest_send.c - $(CC) $(CFLAGS) $? -reptest_site@o@: $(srcdir)/test_rep/reptest_site.c - $(CC) $(CFLAGS) $? -reptest_socket@o@: $(srcdir)/test_rep/reptest_socket.c - $(CC) $(CFLAGS) $? -reptest_spawn@o@: $(srcdir)/test_rep/reptest_spawn.c - $(CC) $(CFLAGS) $? -reptest_thread@o@: $(srcdir)/test_rep/reptest_thread.c - $(CC) $(CFLAGS) $? -reptest_txn@o@: $(srcdir)/test_rep/reptest_txn.c - $(CC) $(CFLAGS) $? -reptest_util@o@: $(srcdir)/test_rep/reptest_util.c - $(CC) $(CFLAGS) $? -DBREPTEST_OBJS=\ - db_reptest@o@ reptest_accept@o@ reptest_client@o@ reptest_config@o@ \ - reptest_dbs@o@ reptest_debug@o@ reptest_elect@o@ reptest_env@o@ \ - reptest_exec@o@ reptest_file@o@ reptest_key@o@ reptest_master@o@ \ - reptest_misc@o@ reptest_msg_thread@o@ reptest_op@o@ reptest_parse@o@ \ - reptest_rand@o@ reptest_send@o@ reptest_site@o@ reptest_socket@o@ \ - reptest_spawn@o@ reptest_thread@o@ reptest_txn@o@ reptest_util@o@ - -db_reptest: $(DBREPTEST_OBJS) $(DEF_LIB) - $(CCLINK) -o $@ \ - $(LDFLAGS) $(DBREPTEST_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) - $(POSTLINK) $@ - -tm@o@: $(srcdir)/mutex/tm.c - $(CC) $(CFLAGS) $? -tm: tm@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) tm@o@ $(DEF_LIB) $(TEST_LIBS) $(LIBS) - $(POSTLINK) $@ - -################################################## -# Example programs for C. -################################################## -bench_001@o@: $(srcdir)/examples_c/bench_001.c - $(CC) $(CFLAGS) $? -bench_001: bench_001@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) bench_001@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_access@o@: $(srcdir)/examples_c/ex_access.c - $(CC) $(CFLAGS) $? -ex_access: ex_access@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_access@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_apprec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec.c - $(CC) $(CFLAGS) $? -ex_apprec_auto@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_auto.c - $(CC) $(CFLAGS) $? -ex_apprec_autop@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_autop.c - $(CC) $(CFLAGS) $? -ex_apprec_rec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_rec.c - $(CC) $(CFLAGS) $? -EX_APPREC_OBJS=\ - ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_autop@o@ ex_apprec_rec@o@ -ex_apprec: $(EX_APPREC_OBJS) $(DEF_LIB) - $(CCLINK) -o $@ \ - $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) - -ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c - $(CC) $(CFLAGS) $? -ex_btrec: ex_btrec@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_btrec@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_dbclient@o@: $(srcdir)/examples_c/ex_dbclient.c - $(CC) $(CFLAGS) $? -ex_dbclient: ex_dbclient@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_dbclient@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_env@o@: $(srcdir)/examples_c/ex_env.c - $(CC) $(CFLAGS) $? -ex_env: ex_env@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_env@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_lock@o@: $(srcdir)/examples_c/ex_lock.c - $(CC) $(CFLAGS) $? -ex_lock: ex_lock@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_lock@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_mpool@o@: $(srcdir)/examples_c/ex_mpool.c - $(CC) $(CFLAGS) $? -ex_mpool: ex_mpool@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_mpool@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_rq_client@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_client.c - $(CC) $(CFLAGS) $? -ex_rq_main@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_main.c - $(CC) $(CFLAGS) $? -ex_rq_master@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_master.c - $(CC) $(CFLAGS) $? -ex_rq_net@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_net.c - $(CC) $(CFLAGS) $? -ex_rq_util@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_util.c - $(CC) $(CFLAGS) $? -EX_RQ_OBJS=\ - ex_rq_client@o@ ex_rq_main@o@ ex_rq_master@o@ ex_rq_net@o@ ex_rq_util@o@ -ex_repquote: $(EX_RQ_OBJS) $(DEF_LIB) - $(CCLINK) -o $@ \ - $(LDFLAGS) $(EX_RQ_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS) - $(POSTLINK) $@ - -ex_sequence@o@: $(srcdir)/examples_c/ex_sequence.c - $(CC) $(CFLAGS) $? -ex_sequence: ex_sequence@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_sequence@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -ex_thread@o@: $(srcdir)/examples_c/ex_thread.c - $(CC) $(CFLAGS) $? -ex_thread: ex_thread@o@ $(DEF_LIB) - $(CCLINK) -o $@ \ - $(LDFLAGS) ex_thread@o@ $(DEF_LIB) $(TEST_LIBS) $(LIBS) - $(POSTLINK) $@ - -ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c - $(CC) $(CFLAGS) $? -ex_tpcb: ex_tpcb@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -gettingstarted_common@o@: \ - $(srcdir)/examples_c/getting_started/gettingstarted_common.c - $(CC) -I $(srcdir)/examples_c/getting_started $(CFLAGS) $? -example_database_load@o@: \ - $(srcdir)/examples_c/getting_started/example_database_load.c - $(CC) $(CFLAGS) $? -example_database_read@o@: \ - $(srcdir)/examples_c/getting_started/example_database_read.c - $(CC) $(CFLAGS) $? -example_database_load: example_database_load@o@ gettingstarted_common@o@ \ - $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - example_database_load@o@ gettingstarted_common@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ -example_database_read: example_database_read@o@ gettingstarted_common@o@ \ - $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) \ - example_database_read@o@ gettingstarted_common@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -txn_guide_inmemory@o@: $(srcdir)/examples_c/txn_guide/txn_guide_inmemory.c - $(CC) $(CFLAGS) $? -txn_guide_inmemory: txn_guide_inmemory@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) txn_guide_inmemory@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -txn_guide@o@: $(srcdir)/examples_c/txn_guide/txn_guide.c - $(CC) $(CFLAGS) $? -txn_guide: txn_guide@o@ $(DEF_LIB) - $(CCLINK) -o $@ $(LDFLAGS) txn_guide@o@ $(DEF_LIB) $(LIBS) - $(POSTLINK) $@ - -################################################## -# Example programs for C++. -################################################## -AccessExample@o@: $(srcdir)/examples_cxx/AccessExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_access: AccessExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) AccessExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -BtRecExample@o@: $(srcdir)/examples_cxx/BtRecExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_btrec: BtRecExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) BtRecExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -EnvExample@o@: $(srcdir)/examples_cxx/EnvExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_env: EnvExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) EnvExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -LockExample@o@: $(srcdir)/examples_cxx/LockExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_lock: LockExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) LockExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -MpoolExample@o@: $(srcdir)/examples_cxx/MpoolExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_mpool: MpoolExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) MpoolExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -SequenceExample@o@: $(srcdir)/examples_cxx/SequenceExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_sequence: SequenceExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) SequenceExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -TpcbExample@o@: $(srcdir)/examples_cxx/TpcbExample.cpp - $(CXX) $(CXXFLAGS) $? -excxx_tpcb: TpcbExample@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) TpcbExample@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -excxx_example_database_load@o@: \ - $(srcdir)/examples_cxx/getting_started/excxx_example_database_load.cpp - $(CXX) -I$(srcdir)/examples_cxx/getting_started $(CXXFLAGS) $? -excxx_example_database_read@o@: \ - $(srcdir)/examples_cxx/getting_started/excxx_example_database_read.cpp - $(CXX) -I$(srcdir)/examples_cxx/getting_started $(CXXFLAGS) $? -MyDb@o@: $(srcdir)/examples_cxx/getting_started/MyDb.cpp - $(CXX) -I$(srcdir)/examples_cxx/getting_started $(CXXFLAGS) $? -excxx_example_database_load: \ - excxx_example_database_load@o@ MyDb@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) \ - excxx_example_database_load@o@ MyDb@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ -excxx_example_database_read: \ - excxx_example_database_read@o@ MyDb@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) \ - excxx_example_database_read@o@ MyDb@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -TxnGuideInMemory@o@: $(srcdir)/examples_cxx/txn_guide/TxnGuideInMemory.cpp - $(CXX) $(CXXFLAGS) $? -TxnGuideInMemory: TxnGuideInMemory@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) TxnGuideInMemory@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -TxnGuide@o@: $(srcdir)/examples_cxx/txn_guide/TxnGuide.cpp - $(CXX) $(CXXFLAGS) $? -TxnGuide: TxnGuide@o@ $(DEF_LIB_CXX) - $(CXXLINK) -o $@ $(LDFLAGS) TxnGuide@o@ $(DEF_LIB_CXX) $(LIBS) - $(POSTLINK) $@ - -################################################## -# C API build rules. -################################################## -aes_method@o@: $(srcdir)/crypto/aes_method.c - $(CC) $(CFLAGS) $? -bt_compare@o@: $(srcdir)/btree/bt_compare.c - $(CC) $(CFLAGS) $? -bt_conv@o@: $(srcdir)/btree/bt_conv.c - $(CC) $(CFLAGS) $? -bt_curadj@o@: $(srcdir)/btree/bt_curadj.c - $(CC) $(CFLAGS) $? -bt_cursor@o@: $(srcdir)/btree/bt_cursor.c - $(CC) $(CFLAGS) $? -bt_delete@o@: $(srcdir)/btree/bt_delete.c - $(CC) $(CFLAGS) $? -bt_method@o@: $(srcdir)/btree/bt_method.c - $(CC) $(CFLAGS) $? -bt_open@o@: $(srcdir)/btree/bt_open.c - $(CC) $(CFLAGS) $? -bt_put@o@: $(srcdir)/btree/bt_put.c - $(CC) $(CFLAGS) $? -bt_rec@o@: $(srcdir)/btree/bt_rec.c - $(CC) $(CFLAGS) $? -bt_reclaim@o@: $(srcdir)/btree/bt_reclaim.c - $(CC) $(CFLAGS) $? -bt_recno@o@: $(srcdir)/btree/bt_recno.c - $(CC) $(CFLAGS) $? -bt_rsearch@o@: $(srcdir)/btree/bt_rsearch.c - $(CC) $(CFLAGS) $? -bt_search@o@: $(srcdir)/btree/bt_search.c - $(CC) $(CFLAGS) $? -bt_split@o@: $(srcdir)/btree/bt_split.c - $(CC) $(CFLAGS) $? -bt_stat@o@: $(srcdir)/btree/bt_stat.c - $(CC) $(CFLAGS) $? -bt_compact@o@: $(srcdir)/btree/bt_compact.c - $(CC) $(CFLAGS) $? -bt_upgrade@o@: $(srcdir)/btree/bt_upgrade.c - $(CC) $(CFLAGS) $? -bt_verify@o@: $(srcdir)/btree/bt_verify.c - $(CC) $(CFLAGS) $? -btree_auto@o@: $(srcdir)/btree/btree_auto.c - $(CC) $(CFLAGS) $? -btree_autop@o@: $(srcdir)/btree/btree_autop.c - $(CC) $(CFLAGS) $? -crdel_auto@o@: $(srcdir)/db/crdel_auto.c - $(CC) $(CFLAGS) $? -crdel_autop@o@: $(srcdir)/db/crdel_autop.c - $(CC) $(CFLAGS) $? -crdel_rec@o@: $(srcdir)/db/crdel_rec.c - $(CC) $(CFLAGS) $? -crypto@o@: $(srcdir)/crypto/crypto.c - $(CC) $(CFLAGS) $? -crypto_stub@o@: $(srcdir)/common/crypto_stub.c - $(CC) $(CFLAGS) $? -db185@o@: $(srcdir)/db185/db185.c - $(CC) $(CFLAGS) $? -db@o@: $(srcdir)/db/db.c - $(CC) $(CFLAGS) $? -db_am@o@: $(srcdir)/db/db_am.c - $(CC) $(CFLAGS) $? -db_auto@o@: $(srcdir)/db/db_auto.c - $(CC) $(CFLAGS) $? -db_autop@o@: $(srcdir)/db/db_autop.c - $(CC) $(CFLAGS) $? -db_byteorder@o@: $(srcdir)/common/db_byteorder.c - $(CC) $(CFLAGS) $? -db_cam@o@: $(srcdir)/db/db_cam.c - $(CC) $(CFLAGS) $? -db_clock@o@: $(srcdir)/common/db_clock.c - $(CC) $(CFLAGS) $? -db_conv@o@: $(srcdir)/db/db_conv.c - $(CC) $(CFLAGS) $? -db_dispatch@o@: $(srcdir)/db/db_dispatch.c - $(CC) $(CFLAGS) $? -db_dup@o@: $(srcdir)/db/db_dup.c - $(CC) $(CFLAGS) $? -db_err@o@: $(srcdir)/common/db_err.c - $(CC) $(CFLAGS) $? -db_getlong@o@: $(srcdir)/common/db_getlong.c - $(CC) $(CFLAGS) $? -db_idspace@o@: $(srcdir)/common/db_idspace.c - $(CC) $(CFLAGS) $? -db_iface@o@: $(srcdir)/db/db_iface.c - $(CC) $(CFLAGS) $? -db_join@o@: $(srcdir)/db/db_join.c - $(CC) $(CFLAGS) $? -db_log2@o@: $(srcdir)/common/db_log2.c - $(CC) $(CFLAGS) $? -db_meta@o@: $(srcdir)/db/db_meta.c - $(CC) $(CFLAGS) $? -db_method@o@: $(srcdir)/db/db_method.c - $(CC) $(CFLAGS) $? -db_open@o@: $(srcdir)/db/db_open.c - $(CC) $(CFLAGS) $? -db_overflow@o@: $(srcdir)/db/db_overflow.c - $(CC) $(CFLAGS) $? -db_ovfl_vrfy@o@: $(srcdir)/db/db_ovfl_vrfy.c - $(CC) $(CFLAGS) $? -db_pr@o@: $(srcdir)/db/db_pr.c - $(CC) $(CFLAGS) $? -db_rec@o@: $(srcdir)/db/db_rec.c - $(CC) $(CFLAGS) $? -db_reclaim@o@: $(srcdir)/db/db_reclaim.c - $(CC) $(CFLAGS) $? -db_rename@o@: $(srcdir)/db/db_rename.c - $(CC) $(CFLAGS) $? -db_remove@o@: $(srcdir)/db/db_remove.c - $(CC) $(CFLAGS) $? -db_ret@o@: $(srcdir)/db/db_ret.c - $(CC) $(CFLAGS) $? -db_setid@o@: $(srcdir)/db/db_setid.c - $(CC) $(CFLAGS) $? -db_setlsn@o@: $(srcdir)/db/db_setlsn.c - $(CC) $(CFLAGS) $? -db_salloc@o@: $(srcdir)/env/db_salloc.c - $(CC) $(CFLAGS) $? -db_shash@o@: $(srcdir)/env/db_shash.c - $(CC) $(CFLAGS) $? -db_stati@o@: $(srcdir)/db/db_stati.c - $(CC) $(CFLAGS) $? -db_truncate@o@: $(srcdir)/db/db_truncate.c - $(CC) $(CFLAGS) $? -db_upg@o@: $(srcdir)/db/db_upg.c - $(CC) $(CFLAGS) $? -db_upg_opd@o@: $(srcdir)/db/db_upg_opd.c - $(CC) $(CFLAGS) $? -db_vrfy@o@: $(srcdir)/db/db_vrfy.c - $(CC) $(CFLAGS) $? -db_vrfyutil@o@: $(srcdir)/db/db_vrfyutil.c - $(CC) $(CFLAGS) $? -db_vrfy_stub@o@: $(srcdir)/db/db_vrfy_stub.c - $(CC) $(CFLAGS) $? -dbm@o@: $(srcdir)/dbm/dbm.c - $(CC) $(CFLAGS) $? -dbreg@o@: $(srcdir)/dbreg/dbreg.c - $(CC) $(CFLAGS) $? -dbreg_auto@o@: $(srcdir)/dbreg/dbreg_auto.c - $(CC) $(CFLAGS) $? -dbreg_autop@o@: $(srcdir)/dbreg/dbreg_autop.c - $(CC) $(CFLAGS) $? -dbreg_rec@o@: $(srcdir)/dbreg/dbreg_rec.c - $(CC) $(CFLAGS) $? -dbreg_stat@o@: $(srcdir)/dbreg/dbreg_stat.c - $(CC) $(CFLAGS) $? -dbreg_util@o@: $(srcdir)/dbreg/dbreg_util.c - $(CC) $(CFLAGS) $? -env_failchk@o@: $(srcdir)/env/env_failchk.c - $(CC) $(CFLAGS) $? -env_file@o@: $(srcdir)/env/env_file.c - $(CC) $(CFLAGS) $? -env_method@o@: $(srcdir)/env/env_method.c - $(CC) $(CFLAGS) $? -env_open@o@: $(srcdir)/env/env_open.c - $(CC) $(CFLAGS) $? -env_recover@o@: $(srcdir)/env/env_recover.c - $(CC) $(CFLAGS) $? -env_region@o@: $(srcdir)/env/env_region.c - $(CC) $(CFLAGS) $? -env_register@o@: $(srcdir)/env/env_register.c - $(CC) $(CFLAGS) $? -env_stat@o@: $(srcdir)/env/env_stat.c - $(CC) $(CFLAGS) $? -fileops_auto@o@: $(srcdir)/fileops/fileops_auto.c - $(CC) $(CFLAGS) $? -fileops_autop@o@: $(srcdir)/fileops/fileops_autop.c - $(CC) $(CFLAGS) $? -fop_basic@o@: $(srcdir)/fileops/fop_basic.c - $(CC) $(CFLAGS) $? -fop_rec@o@: $(srcdir)/fileops/fop_rec.c - $(CC) $(CFLAGS) $? -fop_util@o@: $(srcdir)/fileops/fop_util.c - $(CC) $(CFLAGS) $? -hash@o@: $(srcdir)/hash/hash.c - $(CC) $(CFLAGS) $? -hash_auto@o@: $(srcdir)/hash/hash_auto.c - $(CC) $(CFLAGS) $? -hash_autop@o@: $(srcdir)/hash/hash_autop.c - $(CC) $(CFLAGS) $? -hash_conv@o@: $(srcdir)/hash/hash_conv.c - $(CC) $(CFLAGS) $? -hash_dup@o@: $(srcdir)/hash/hash_dup.c - $(CC) $(CFLAGS) $? -hash_func@o@: $(srcdir)/hash/hash_func.c - $(CC) $(CFLAGS) $? -hash_meta@o@: $(srcdir)/hash/hash_meta.c - $(CC) $(CFLAGS) $? -hash_method@o@: $(srcdir)/hash/hash_method.c - $(CC) $(CFLAGS) $? -hash_open@o@: $(srcdir)/hash/hash_open.c - $(CC) $(CFLAGS) $? -hash_page@o@: $(srcdir)/hash/hash_page.c - $(CC) $(CFLAGS) $? -hash_rec@o@: $(srcdir)/hash/hash_rec.c - $(CC) $(CFLAGS) $? -hash_reclaim@o@: $(srcdir)/hash/hash_reclaim.c - $(CC) $(CFLAGS) $? -hash_stat@o@: $(srcdir)/hash/hash_stat.c - $(CC) $(CFLAGS) $? -hash_stub@o@: $(srcdir)/hash/hash_stub.c - $(CC) $(CFLAGS) $? -hash_upgrade@o@: $(srcdir)/hash/hash_upgrade.c - $(CC) $(CFLAGS) $? -hash_verify@o@: $(srcdir)/hash/hash_verify.c - $(CC) $(CFLAGS) $? -hmac@o@: $(srcdir)/hmac/hmac.c - $(CC) $(CFLAGS) $? -hsearch@o@: $(srcdir)/hsearch/hsearch.c - $(CC) $(CFLAGS) $? -lock@o@: $(srcdir)/lock/lock.c - $(CC) $(CFLAGS) $? -lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c - $(CC) $(CFLAGS) $? -lock_failchk@o@:$(srcdir)/lock/lock_failchk.c - $(CC) $(CFLAGS) $? -lock_id@o@:$(srcdir)/lock/lock_id.c - $(CC) $(CFLAGS) $? -lock_list@o@:$(srcdir)/lock/lock_list.c - $(CC) $(CFLAGS) $? -lock_method@o@:$(srcdir)/lock/lock_method.c - $(CC) $(CFLAGS) $? -lock_region@o@:$(srcdir)/lock/lock_region.c - $(CC) $(CFLAGS) $? -lock_stat@o@:$(srcdir)/lock/lock_stat.c - $(CC) $(CFLAGS) $? -lock_timer@o@:$(srcdir)/lock/lock_timer.c - $(CC) $(CFLAGS) $? -lock_util@o@:$(srcdir)/lock/lock_util.c - $(CC) $(CFLAGS) $? -log@o@: $(srcdir)/log/log.c - $(CC) $(CFLAGS) $? -log_archive@o@: $(srcdir)/log/log_archive.c - $(CC) $(CFLAGS) $? -log_compare@o@: $(srcdir)/log/log_compare.c - $(CC) $(CFLAGS) $? -log_debug@o@: $(srcdir)/log/log_debug.c - $(CC) $(CFLAGS) $? -log_get@o@: $(srcdir)/log/log_get.c - $(CC) $(CFLAGS) $? -log_method@o@: $(srcdir)/log/log_method.c - $(CC) $(CFLAGS) $? -log_put@o@: $(srcdir)/log/log_put.c - $(CC) $(CFLAGS) $? -log_stat@o@: $(srcdir)/log/log_stat.c - $(CC) $(CFLAGS) $? -mp_alloc@o@: $(srcdir)/mp/mp_alloc.c - $(CC) $(CFLAGS) $? -mp_bh@o@: $(srcdir)/mp/mp_bh.c - $(CC) $(CFLAGS) $? -mp_fget@o@: $(srcdir)/mp/mp_fget.c - $(CC) $(CFLAGS) $? -mp_fmethod@o@: $(srcdir)/mp/mp_fmethod.c - $(CC) $(CFLAGS) $? -mp_fopen@o@: $(srcdir)/mp/mp_fopen.c - $(CC) $(CFLAGS) $? -mp_fput@o@: $(srcdir)/mp/mp_fput.c - $(CC) $(CFLAGS) $? -mp_fset@o@: $(srcdir)/mp/mp_fset.c - $(CC) $(CFLAGS) $? -mp_method@o@: $(srcdir)/mp/mp_method.c - $(CC) $(CFLAGS) $? -mp_region@o@: $(srcdir)/mp/mp_region.c - $(CC) $(CFLAGS) $? -mp_register@o@: $(srcdir)/mp/mp_register.c - $(CC) $(CFLAGS) $? -mp_stat@o@: $(srcdir)/mp/mp_stat.c - $(CC) $(CFLAGS) $? -mp_sync@o@: $(srcdir)/mp/mp_sync.c - $(CC) $(CFLAGS) $? -mp_trickle@o@: $(srcdir)/mp/mp_trickle.c - $(CC) $(CFLAGS) $? -mt19937db@o@: $(srcdir)/crypto/mersenne/mt19937db.c - $(CC) $(CFLAGS) $? -mut_alloc@o@: $(srcdir)/mutex/mut_alloc.c - $(CC) $(CFLAGS) $? -mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c - $(CC) $(CFLAGS) $? -mut_method@o@: $(srcdir)/mutex/mut_method.c - $(CC) $(CFLAGS) $? -mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c - $(CC) $(CFLAGS) $? -mut_region@o@: $(srcdir)/mutex/mut_region.c - $(CC) $(CFLAGS) $? -mut_stat@o@: $(srcdir)/mutex/mut_stat.c - $(CC) $(CFLAGS) $? -mut_tas@o@: $(srcdir)/mutex/mut_tas.c - $(CC) $(CFLAGS) $? -mut_win32@o@: $(srcdir)/mutex/mut_win32.c - $(CC) $(CFLAGS) $? -os_abs@o@: $(srcdir)/@OSDIR@/os_abs.c - $(CC) $(CFLAGS) $? -os_alloc@o@: $(srcdir)/os/os_alloc.c - $(CC) $(CFLAGS) $? -os_clock@o@: $(srcdir)/@OSDIR@/os_clock.c - $(CC) $(CFLAGS) $? -os_config@o@: $(srcdir)/@OSDIR@/os_config.c - $(CC) $(CFLAGS) $? -os_dir@o@: $(srcdir)/@OSDIR@/os_dir.c - $(CC) $(CFLAGS) $? -os_errno@o@: $(srcdir)/@OSDIR@/os_errno.c - $(CC) $(CFLAGS) $? -os_fid@o@: $(srcdir)/@OSDIR@/os_fid.c - $(CC) $(CFLAGS) $? -os_flock@o@: $(srcdir)/@OSDIR@/os_flock.c - $(CC) $(CFLAGS) $? -os_fsync@o@: $(srcdir)/@OSDIR@/os_fsync.c - $(CC) $(CFLAGS) $? -os_id@o@: $(srcdir)/os/os_id.c - $(CC) $(CFLAGS) $? -os_handle@o@: $(srcdir)/@OSDIR@/os_handle.c - $(CC) $(CFLAGS) $? -os_map@o@: $(srcdir)/@OSDIR@/os_map.c - $(CC) $(CFLAGS) $? -os_method@o@: $(srcdir)/os/os_method.c - $(CC) $(CFLAGS) $? -os_mkdir@o@: $(srcdir)/os/os_mkdir.c - $(CC) $(CFLAGS) $? -os_oflags@o@: $(srcdir)/os/os_oflags.c - $(CC) $(CFLAGS) $? -os_open@o@: $(srcdir)/@OSDIR@/os_open.c - $(CC) $(CFLAGS) $? -os_region@o@: $(srcdir)/os/os_region.c - $(CC) $(CFLAGS) $? -os_rename@o@: $(srcdir)/@OSDIR@/os_rename.c - $(CC) $(CFLAGS) $? -os_root@o@: $(srcdir)/os/os_root.c - $(CC) $(CFLAGS) $? -os_rpath@o@: $(srcdir)/os/os_rpath.c - $(CC) $(CFLAGS) $? -os_rw@o@: $(srcdir)/@OSDIR@/os_rw.c - $(CC) $(CFLAGS) $? -os_seek@o@: $(srcdir)/@OSDIR@/os_seek.c - $(CC) $(CFLAGS) $? -os_sleep@o@: $(srcdir)/@OSDIR@/os_sleep.c - $(CC) $(CFLAGS) $? -os_spin@o@: $(srcdir)/@OSDIR@/os_spin.c - $(CC) $(CFLAGS) $? -os_stat@o@: $(srcdir)/@OSDIR@/os_stat.c - $(CC) $(CFLAGS) $? -os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c - $(CC) $(CFLAGS) $? -os_truncate@o@: $(srcdir)/@OSDIR@/os_truncate.c - $(CC) $(CFLAGS) $? -os_unlink@o@: $(srcdir)/os/os_unlink.c - $(CC) $(CFLAGS) $? -qam@o@: $(srcdir)/qam/qam.c - $(CC) $(CFLAGS) $? -qam_auto@o@: $(srcdir)/qam/qam_auto.c - $(CC) $(CFLAGS) $? -qam_autop@o@: $(srcdir)/qam/qam_autop.c - $(CC) $(CFLAGS) $? -qam_conv@o@: $(srcdir)/qam/qam_conv.c - $(CC) $(CFLAGS) $? -qam_files@o@: $(srcdir)/qam/qam_files.c - $(CC) $(CFLAGS) $? -qam_method@o@: $(srcdir)/qam/qam_method.c - $(CC) $(CFLAGS) $? -qam_open@o@: $(srcdir)/qam/qam_open.c - $(CC) $(CFLAGS) $? -qam_rec@o@: $(srcdir)/qam/qam_rec.c - $(CC) $(CFLAGS) $? -qam_stat@o@: $(srcdir)/qam/qam_stat.c - $(CC) $(CFLAGS) $? -qam_stub@o@: $(srcdir)/qam/qam_stub.c - $(CC) $(CFLAGS) $? -qam_upgrade@o@: $(srcdir)/qam/qam_upgrade.c - $(CC) $(CFLAGS) $? -qam_verify@o@: $(srcdir)/qam/qam_verify.c - $(CC) $(CFLAGS) $? -rep_auto@o@: $(srcdir)/rep/rep_auto.c - $(CC) $(CFLAGS) $? -rep_autop@o@: $(srcdir)/rep/rep_autop.c - $(CC) $(CFLAGS) $? -rep_backup@o@: $(srcdir)/rep/rep_backup.c - $(CC) $(CFLAGS) $? -rep_elect@o@: $(srcdir)/rep/rep_elect.c - $(CC) $(CFLAGS) $? -rep_log@o@: $(srcdir)/rep/rep_log.c - $(CC) $(CFLAGS) $? -rep_method@o@: $(srcdir)/rep/rep_method.c - $(CC) $(CFLAGS) $? -rep_record@o@: $(srcdir)/rep/rep_record.c - $(CC) $(CFLAGS) $? -rep_region@o@: $(srcdir)/rep/rep_region.c - $(CC) $(CFLAGS) $? -rep_stub@o@: $(srcdir)/rep/rep_stub.c - $(CC) $(CFLAGS) $? -rep_stat@o@: $(srcdir)/rep/rep_stat.c - $(CC) $(CFLAGS) $? -rep_util@o@: $(srcdir)/rep/rep_util.c - $(CC) $(CFLAGS) $? -rep_verify@o@: $(srcdir)/rep/rep_verify.c - $(CC) $(CFLAGS) $? -rijndael-alg-fst@o@: $(srcdir)/crypto/rijndael/rijndael-alg-fst.c - $(CC) $(CFLAGS) $? -rijndael-api-fst@o@: $(srcdir)/crypto/rijndael/rijndael-api-fst.c - $(CC) $(CFLAGS) $? -seq_stat@o@: $(srcdir)/sequence/seq_stat.c - $(CC) $(CFLAGS) $? -sequence@o@: $(srcdir)/sequence/sequence.c - $(CC) $(CFLAGS) $? -sha1@o@: $(srcdir)/hmac/sha1.c - $(CC) $(CFLAGS) $? -stat_stub@o@: $(srcdir)/common/stat_stub.c - $(CC) $(CFLAGS) $? -txn@o@: $(srcdir)/txn/txn.c - $(CC) $(CFLAGS) $? -txn_auto@o@: $(srcdir)/txn/txn_auto.c - $(CC) $(CFLAGS) $? -txn_autop@o@: $(srcdir)/txn/txn_autop.c - $(CC) $(CFLAGS) $? -txn_chkpt@o@: $(srcdir)/txn/txn_chkpt.c - $(CC) $(CFLAGS) $? -txn_failchk@o@: $(srcdir)/txn/txn_failchk.c - $(CC) $(CFLAGS) $? -txn_method@o@: $(srcdir)/txn/txn_method.c - $(CC) $(CFLAGS) $? -txn_rec@o@: $(srcdir)/txn/txn_rec.c - $(CC) $(CFLAGS) $? -txn_recover@o@: $(srcdir)/txn/txn_recover.c - $(CC) $(CFLAGS) $? -txn_region@o@: $(srcdir)/txn/txn_region.c - $(CC) $(CFLAGS) $? -txn_stat@o@: $(srcdir)/txn/txn_stat.c - $(CC) $(CFLAGS) $? -txn_util@o@: $(srcdir)/txn/txn_util.c - $(CC) $(CFLAGS) $? -util_cache@o@: $(srcdir)/common/util_cache.c - $(CC) $(CFLAGS) $? -util_log@o@: $(srcdir)/common/util_log.c - $(CC) $(CFLAGS) $? -util_sig@o@: $(srcdir)/common/util_sig.c - $(CC) $(CFLAGS) $? -uts4_cc@o@: $(srcdir)/mutex/uts4_cc.s - $(AS) $(ASFLAGS) -o $@ $? -xa@o@: $(srcdir)/xa/xa.c - $(CC) $(CFLAGS) $? -xa_db@o@: $(srcdir)/xa/xa_db.c - $(CC) $(CFLAGS) $? -xa_map@o@: $(srcdir)/xa/xa_map.c - $(CC) $(CFLAGS) $? - -################################################## -# C++ API build rules. -################################################## -cxx_db@o@: $(srcdir)/cxx/cxx_db.cpp - $(CXX) $(CXXFLAGS) $? -cxx_dbc@o@: $(srcdir)/cxx/cxx_dbc.cpp - $(CXX) $(CXXFLAGS) $? -cxx_dbt@o@: $(srcdir)/cxx/cxx_dbt.cpp - $(CXX) $(CXXFLAGS) $? -cxx_env@o@: $(srcdir)/cxx/cxx_env.cpp - $(CXX) $(CXXFLAGS) $? -cxx_except@o@: $(srcdir)/cxx/cxx_except.cpp - $(CXX) $(CXXFLAGS) $? -cxx_lock@o@: $(srcdir)/cxx/cxx_lock.cpp - $(CXX) $(CXXFLAGS) $? -cxx_logc@o@: $(srcdir)/cxx/cxx_logc.cpp - $(CXX) $(CXXFLAGS) $? -cxx_mpool@o@: $(srcdir)/cxx/cxx_mpool.cpp - $(CXX) $(CXXFLAGS) $? -cxx_multi@o@: $(srcdir)/cxx/cxx_multi.cpp - $(CXX) $(CXXFLAGS) $? -cxx_seq@o@: $(srcdir)/cxx/cxx_seq.cpp - $(CXX) $(CXXFLAGS) $? -cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp - $(CXX) $(CXXFLAGS) $? - -################################################## -# Java API build rules. -################################################## -db_java_wrap@o@: $(srcdir)/libdb_java/db_java_wrap.c - $(CC) $(CFLAGS) $? - -################################################## -# Tcl API build rules. -################################################## -tcl_compat@o@: $(srcdir)/tcl/tcl_compat.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_db@o@: $(srcdir)/tcl/tcl_db.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_db_pkg@o@: $(srcdir)/tcl/tcl_db_pkg.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_dbcursor@o@: $(srcdir)/tcl/tcl_dbcursor.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_env@o@: $(srcdir)/tcl/tcl_env.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_internal@o@: $(srcdir)/tcl/tcl_internal.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_lock@o@: $(srcdir)/tcl/tcl_lock.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_log@o@: $(srcdir)/tcl/tcl_log.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_rep@o@: $(srcdir)/tcl/tcl_rep.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_seq@o@: $(srcdir)/tcl/tcl_seq.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? -tcl_util@o@: $(srcdir)/tcl/tcl_util.c - $(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $? - -################################################## -# RPC build rules. -################################################## -# RPC client files -client@o@: $(srcdir)/rpc_client/client.c - $(CC) $(CFLAGS) $? -db_server_clnt@o@: db_server_clnt.c - $(CC) $(CFLAGS) $? -gen_client@o@: $(srcdir)/rpc_client/gen_client.c - $(CC) $(CFLAGS) $? -gen_client_ret@o@: $(srcdir)/rpc_client/gen_client_ret.c - $(CC) $(CFLAGS) $? - -# RPC server files -db_server_proc@o@: $(srcdir)/rpc_server/c/db_server_proc.c - $(CC) $(CFLAGS) $? -db_server_svc@o@: db_server_svc.c - $(CC) $(CFLAGS) $? -db_server_util@o@: $(srcdir)/rpc_server/c/db_server_util.c - $(CC) $(CFLAGS) $? -db_server_xdr@o@: db_server_xdr.c - $(CC) $(CFLAGS) $? -gen_db_server@o@: gen_db_server.c - $(CC) $(CFLAGS) $? -db_server_cxxproc@o@: $(srcdir)/rpc_server/cxx/db_server_cxxproc.cpp - $(CXX) $(CXXFLAGS) $? -db_server_cxxutil@o@: $(srcdir)/rpc_server/cxx/db_server_cxxutil.cpp - $(CXX) $(CXXFLAGS) $? - -################################################## -# Utility build rules. -################################################## -db_archive@o@: $(srcdir)/db_archive/db_archive.c - $(CC) $(CFLAGS) $? -db_checkpoint@o@: $(srcdir)/db_checkpoint/db_checkpoint.c - $(CC) $(CFLAGS) $? -db_deadlock@o@: $(srcdir)/db_deadlock/db_deadlock.c - $(CC) $(CFLAGS) $? -db_dump@o@: $(srcdir)/db_dump/db_dump.c - $(CC) $(CFLAGS) $? -db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c - $(CC) $(DB185INC) $? -db_hotbackup@o@: $(srcdir)/db_hotbackup/db_hotbackup.c - $(CC) $(CFLAGS) $? -db_load@o@: $(srcdir)/db_load/db_load.c - $(CC) $(CFLAGS) $? -db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c - $(CC) $(CFLAGS) $? -db_recover@o@: $(srcdir)/db_recover/db_recover.c - $(CC) $(CFLAGS) $? -db_stat@o@: $(srcdir)/db_stat/db_stat.c - $(CC) $(CFLAGS) $? -db_upgrade@o@: $(srcdir)/db_upgrade/db_upgrade.c - $(CC) $(CFLAGS) $? -db_verify@o@: $(srcdir)/db_verify/db_verify.c - $(CC) $(CFLAGS) $? - -################################################## -# C library replacement files. -################################################## -getcwd@o@: $(srcdir)/clib/getcwd.c - $(CC) $(CFLAGS) $? -getopt@o@: $(srcdir)/clib/getopt.c - $(CC) $(CFLAGS) $? -memcmp@o@: $(srcdir)/clib/memcmp.c - $(CC) $(CFLAGS) $? -memcpy@o@: $(srcdir)/clib/memmove.c - $(CC) -DMEMCOPY $(CFLAGS) $? -o $@ -memmove@o@: $(srcdir)/clib/memmove.c - $(CC) -DMEMMOVE $(CFLAGS) $? -raise@o@: $(srcdir)/clib/raise.c - $(CC) $(CFLAGS) $? -strcasecmp@o@: $(srcdir)/clib/strcasecmp.c - $(CC) $(CFLAGS) $? -strdup@o@: $(srcdir)/clib/strdup.c - $(CC) $(CFLAGS) $? -snprintf@o@: $(srcdir)/clib/snprintf.c - $(CC) $(CFLAGS) $? -strerror@o@: $(srcdir)/clib/strerror.c - $(CC) $(CFLAGS) $? -strtol@o@: $(srcdir)/clib/strtol.c - $(CC) $(CFLAGS) $? -strtoul@o@: $(srcdir)/clib/strtoul.c - $(CC) $(CFLAGS) $? diff --git a/storage/bdb/dist/RELEASE b/storage/bdb/dist/RELEASE deleted file mode 100644 index e20f91edeb2..00000000000 --- a/storage/bdb/dist/RELEASE +++ /dev/null @@ -1,11 +0,0 @@ -# $Id: RELEASE,v 12.17 2005/11/12 17:43:39 bostic Exp $ - -DB_VERSION_MAJOR=4 -DB_VERSION_MINOR=4 -DB_VERSION_PATCH=16 -DB_VERSION="$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH" - -DB_VERSION_UNIQUE_NAME=`printf "_%d%03d" $DB_VERSION_MAJOR $DB_VERSION_MINOR` - -DB_RELEASE_DATE=`date "+%B %e, %Y"` -DB_VERSION_STRING="Sleepycat Software: Berkeley DB $DB_VERSION: ($DB_RELEASE_DATE)" diff --git a/storage/bdb/dist/aclocal/config.ac b/storage/bdb/dist/aclocal/config.ac deleted file mode 100644 index 717a0becc2f..00000000000 --- a/storage/bdb/dist/aclocal/config.ac +++ /dev/null @@ -1,62 +0,0 @@ -# Features we don't test for, but want the #defines to exist for -# other ports. -AH_TEMPLATE(DB_WIN32, - [We use DB_WIN32 much as one would use _WIN32 -- to specify that - we're using an operating system environment that supports Win32 - calls and semantics. We don't use _WIN32 because Cygwin/GCC also - defines _WIN32, even though Cygwin/GCC closely emulates the Unix - environment.]) - -AH_TEMPLATE(HAVE_VXWORKS, [Define to 1 if building VxWorks.]) - -AH_TEMPLATE(HAVE_FILESYSTEM_NOTZERO, - [Define to 1 if allocated filesystem blocks are not zeroed.]) - -AH_TEMPLATE(HAVE_UNLINK_WITH_OPEN_FAILURE, - [Define to 1 if unlink of file with open file descriptors will fail.]) - -AH_BOTTOM([/* - * Exit success/failure macros. - */ -#ifndef HAVE_EXIT_SUCCESS -#define EXIT_FAILURE 1 -#define EXIT_SUCCESS 0 -#endif - -/* - * Don't step on the namespace. Other libraries may have their own - * implementations of these functions, we don't want to use their - * implementations or force them to use ours based on the load order. - */ -#ifndef HAVE_GETCWD -#define getcwd __db_Cgetcwd -#endif -#ifndef HAVE_MEMCMP -#define memcmp __db_Cmemcmp -#endif -#ifndef HAVE_MEMCPY -#define memcpy __db_Cmemcpy -#endif -#ifndef HAVE_MEMMOVE -#define memmove __db_Cmemmove -#endif -#ifndef HAVE_RAISE -#define raise __db_Craise -#endif -#ifndef HAVE_SNPRINTF -#define snprintf __db_Csnprintf -#endif -#ifndef HAVE_STRCASECMP -#define strcasecmp __db_Cstrcasecmp -#define strncasecmp __db_Cstrncasecmp -#endif -#ifndef HAVE_STRERROR -#define strerror __db_Cstrerror -#endif -#ifndef HAVE_VSNPRINTF -#define vsnprintf __db_Cvsnprintf -#endif - -#ifdef DB_WIN32 -#include "win_db.h" -#endif]) diff --git a/storage/bdb/dist/aclocal/cxx.ac b/storage/bdb/dist/aclocal/cxx.ac deleted file mode 100644 index 49103cc661a..00000000000 --- a/storage/bdb/dist/aclocal/cxx.ac +++ /dev/null @@ -1,17 +0,0 @@ -# C++ checks to determine what style of headers to use and -# whether to use "using" clauses. - -AC_DEFUN(AC_CXX_HAVE_STDHEADERS, [ -AC_SUBST(cxx_have_stdheaders) -AC_CACHE_CHECK([whether C++ supports the ISO C++ standard includes], -db_cv_cxx_have_stdheaders, -[AC_LANG_SAVE - AC_LANG_CPLUSPLUS - AC_TRY_COMPILE([#include -],[std::ostream *o; return 0;], - db_cv_cxx_have_stdheaders=yes, db_cv_cxx_have_stdheaders=no) - AC_LANG_RESTORE -]) -if test "$db_cv_cxx_have_stdheaders" = yes; then - cxx_have_stdheaders="#define HAVE_CXX_STDHEADERS 1" -fi]) diff --git a/storage/bdb/dist/aclocal/gcc.ac b/storage/bdb/dist/aclocal/gcc.ac deleted file mode 100644 index 0949d982f17..00000000000 --- a/storage/bdb/dist/aclocal/gcc.ac +++ /dev/null @@ -1,36 +0,0 @@ -# Version 2.96 of gcc (shipped with RedHat Linux 7.[01] and Mandrake) had -# serious problems. -AC_DEFUN(AC_GCC_CONFIG1, [ -AC_CACHE_CHECK([whether we are using gcc version 2.96], -db_cv_gcc_2_96, [ -db_cv_gcc_2_96=no -if test "$GCC" = "yes"; then - GCC_VERSION=`${MAKEFILE_CC} --version` - case ${GCC_VERSION} in - 2.96*) - db_cv_gcc_2_96=yes;; - esac -fi]) -if test "$db_cv_gcc_2_96" = "yes"; then - CFLAGS=`echo "$CFLAGS" | sed 's/-O2/-O/'` - CXXFLAGS=`echo "$CXXFLAGS" | sed 's/-O2/-O/'` - AC_MSG_WARN([INSTALLED GCC COMPILER HAS SERIOUS BUGS; PLEASE UPGRADE.]) - AC_MSG_WARN([GCC OPTIMIZATION LEVEL SET TO -O.]) -fi]) - -# Versions of g++ up to 2.8.0 required -fhandle-exceptions, but it is -# renamed as -fexceptions and is the default in versions 2.8.0 and after. -AC_DEFUN(AC_GCC_CONFIG2, [ -AC_CACHE_CHECK([whether g++ requires -fhandle-exceptions], -db_cv_gxx_except, [ -db_cv_gxx_except=no; -if test "$GXX" = "yes"; then - GXX_VERSION=`${MAKEFILE_CXX} --version` - case ${GXX_VERSION} in - 1.*|2.[[01234567]].*|*-1.*|*-2.[[01234567]].*) - db_cv_gxx_except=yes;; - esac -fi]) -if test "$db_cv_gxx_except" = "yes"; then - CXXFLAGS="$CXXFLAGS -fhandle-exceptions" -fi]) diff --git a/storage/bdb/dist/aclocal/libtool.ac b/storage/bdb/dist/aclocal/libtool.ac deleted file mode 100644 index 771b86f32dd..00000000000 --- a/storage/bdb/dist/aclocal/libtool.ac +++ /dev/null @@ -1,6184 +0,0 @@ -# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- -## Copyright 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005 -## Free Software Foundation, Inc. -## Originally by Gordon Matzigkeit , 1996 -## -## This file is free software; the Free Software Foundation gives -## unlimited permission to copy and/or distribute it, with or without -## modifications, as long as this notice is preserved. - -# serial 47 AC_PROG_LIBTOOL - - -# AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) -# ----------------------------------------------------------- -# If this macro is not defined by Autoconf, define it here. -m4_ifdef([AC_PROVIDE_IFELSE], - [], - [m4_define([AC_PROVIDE_IFELSE], - [m4_ifdef([AC_PROVIDE_$1], - [$2], [$3])])]) - - -# AC_PROG_LIBTOOL -# --------------- -AC_DEFUN([AC_PROG_LIBTOOL], -[AC_REQUIRE([_AC_PROG_LIBTOOL])dnl -dnl If AC_PROG_CXX has already been expanded, run AC_LIBTOOL_CXX -dnl immediately, otherwise, hook it in at the end of AC_PROG_CXX. - AC_PROVIDE_IFELSE([AC_PROG_CXX], - [AC_LIBTOOL_CXX], - [define([AC_PROG_CXX], defn([AC_PROG_CXX])[AC_LIBTOOL_CXX - ])]) -dnl And a similar setup for Fortran 77 support - AC_PROVIDE_IFELSE([AC_PROG_F77], - [AC_LIBTOOL_F77], - [define([AC_PROG_F77], defn([AC_PROG_F77])[AC_LIBTOOL_F77 -])]) - -dnl Quote A][M_PROG_GCJ so that aclocal doesn't bring it in needlessly. -dnl If either AC_PROG_GCJ or A][M_PROG_GCJ have already been expanded, run -dnl AC_LIBTOOL_GCJ immediately, otherwise, hook it in at the end of both. - AC_PROVIDE_IFELSE([AC_PROG_GCJ], - [AC_LIBTOOL_GCJ], - [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], - [AC_LIBTOOL_GCJ], - [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ], - [AC_LIBTOOL_GCJ], - [ifdef([AC_PROG_GCJ], - [define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[AC_LIBTOOL_GCJ])]) - ifdef([A][M_PROG_GCJ], - [define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[AC_LIBTOOL_GCJ])]) - ifdef([LT_AC_PROG_GCJ], - [define([LT_AC_PROG_GCJ], - defn([LT_AC_PROG_GCJ])[AC_LIBTOOL_GCJ])])])]) -])])# AC_PROG_LIBTOOL - - -# _AC_PROG_LIBTOOL -# ---------------- -AC_DEFUN([_AC_PROG_LIBTOOL], -[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl -AC_BEFORE([$0],[AC_LIBTOOL_CXX])dnl -AC_BEFORE([$0],[AC_LIBTOOL_F77])dnl -AC_BEFORE([$0],[AC_LIBTOOL_GCJ])dnl - -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' -AC_SUBST(LIBTOOL)dnl - -# Prevent multiple expansion -define([AC_PROG_LIBTOOL], []) -])# _AC_PROG_LIBTOOL - - -# AC_LIBTOOL_SETUP -# ---------------- -AC_DEFUN([AC_LIBTOOL_SETUP], -[AC_PREREQ(2.50)dnl -AC_REQUIRE([AC_ENABLE_SHARED])dnl -AC_REQUIRE([AC_ENABLE_STATIC])dnl -AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl -AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_PROG_LD])dnl -AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl -AC_REQUIRE([AC_PROG_NM])dnl - -AC_REQUIRE([AC_PROG_LN_S])dnl -AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl -# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! -AC_REQUIRE([AC_OBJEXT])dnl -AC_REQUIRE([AC_EXEEXT])dnl -dnl - -AC_LIBTOOL_SYS_MAX_CMD_LEN -AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE -AC_LIBTOOL_OBJDIR - -AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl -_LT_AC_PROG_ECHO_BACKSLASH - -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; -esac - -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -Xsed='sed -e 1s/^X//' -[sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'] - -# Same as above, but do not quote variable references. -[double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'] - -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' - -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' - -# Constants: -rm="rm -f" - -# Global variables: -default_ofile=libtool -can_build_shared=yes - -# All known linkers require a `.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a -ltmain="$ac_aux_dir/ltmain.sh" -ofile="$default_ofile" -with_gnu_ld="$lt_cv_prog_gnu_ld" - -AC_CHECK_TOOL(AR, ar, false) -AC_CHECK_TOOL(RANLIB, ranlib, :) -AC_CHECK_TOOL(STRIP, strip, :) - -old_CC="$CC" -old_CFLAGS="$CFLAGS" - -# Set sane defaults for various variables -test -z "$AR" && AR=ar -test -z "$AR_FLAGS" && AR_FLAGS=cru -test -z "$AS" && AS=as -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$DLLTOOL" && DLLTOOL=dlltool -test -z "$LD" && LD=ld -test -z "$LN_S" && LN_S="ln -s" -test -z "$MAGIC_CMD" && MAGIC_CMD=file -test -z "$NM" && NM=nm -test -z "$SED" && SED=sed -test -z "$OBJDUMP" && OBJDUMP=objdump -test -z "$RANLIB" && RANLIB=: -test -z "$STRIP" && STRIP=: -test -z "$ac_objext" && ac_objext=o - -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - openbsd*) - old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds" - ;; - *) - old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" -fi - -_LT_CC_BASENAME([$compiler]) - -# Only perform the check for file, if the check method requires it -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - AC_PATH_MAGIC - fi - ;; -esac - -AC_PROVIDE_IFELSE([AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no) -AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], -enable_win32_dll=yes, enable_win32_dll=no) - -AC_ARG_ENABLE([libtool-lock], - [AC_HELP_STRING([--disable-libtool-lock], - [avoid locking (might break parallel builds)])]) -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes - -AC_ARG_WITH([pic], - [AC_HELP_STRING([--with-pic], - [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], - [pic_mode="$withval"], - [pic_mode=default]) -test -z "$pic_mode" && pic_mode=default - -# Use C for the default configuration in the libtool script -tagname= -AC_LIBTOOL_LANG_C_CONFIG -_LT_AC_TAGCONFIG -])# AC_LIBTOOL_SETUP - - -# _LT_AC_SYS_COMPILER -# ------------------- -AC_DEFUN([_LT_AC_SYS_COMPILER], -[AC_REQUIRE([AC_PROG_CC])dnl - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# Allow CC to be a program name with arguments. -compiler=$CC -])# _LT_AC_SYS_COMPILER - - -# _LT_CC_BASENAME(CC) -# ------------------- -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -AC_DEFUN([_LT_CC_BASENAME], -[for cc_temp in $1""; do - case $cc_temp in - compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; - distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` -]) - - -# _LT_COMPILER_BOILERPLATE -# ------------------------ -# Check for compiler boilerplate output or warnings with -# the simple compiler test code. -AC_DEFUN([_LT_COMPILER_BOILERPLATE], -[ac_outfile=conftest.$ac_objext -printf "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* -])# _LT_COMPILER_BOILERPLATE - - -# _LT_LINKER_BOILERPLATE -# ---------------------- -# Check for linker boilerplate output or warnings with -# the simple link test code. -AC_DEFUN([_LT_LINKER_BOILERPLATE], -[ac_outfile=conftest.$ac_objext -printf "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* -])# _LT_LINKER_BOILERPLATE - - -# _LT_AC_SYS_LIBPATH_AIX -# ---------------------- -# Links a minimal program and checks the executable -# for the system default hardcoded library path. In most cases, -# this is /usr/lib:/lib, but when the MPI compilers are used -# the location of the communication and MPI libs are included too. -# If we don't find anything, use the default library path according -# to the aix ld manual. -AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX], -[AC_LINK_IFELSE(AC_LANG_PROGRAM,[ -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } -}'` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } -}'`; fi],[]) -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -])# _LT_AC_SYS_LIBPATH_AIX - - -# _LT_AC_SHELL_INIT(ARG) -# ---------------------- -AC_DEFUN([_LT_AC_SHELL_INIT], -[ifdef([AC_DIVERSION_NOTICE], - [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], - [AC_DIVERT_PUSH(NOTICE)]) -$1 -AC_DIVERT_POP -])# _LT_AC_SHELL_INIT - - -# _LT_AC_PROG_ECHO_BACKSLASH -# -------------------------- -# Add some code to the start of the generated configure script which -# will find an echo command which doesn't interpret backslashes. -AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH], -[_LT_AC_SHELL_INIT([ -# Check that we are running under the correct shell. -SHELL=${CONFIG_SHELL-/bin/sh} - -case X$ECHO in -X*--fallback-echo) - # Remove one level of quotation (which was required for Make). - ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` - ;; -esac - -echo=${ECHO-echo} -if test "X[$]1" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift -elif test "X[$]1" = X--fallback-echo; then - # Avoid inline document here, it may be left over - : -elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then - # Yippee, $echo works! - : -else - # Restart under the correct shell. - exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} -fi - -if test "X[$]1" = X--fallback-echo; then - # used as fallback echo - shift - cat </dev/null 2>&1 && unset CDPATH - -if test -z "$ECHO"; then -if test "X${echo_test_string+set}" != Xset; then -# find a string as large as possible, as long as the shell can cope with it - for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do - # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... - if (echo_test_string=`eval $cmd`) 2>/dev/null && - echo_test_string=`eval $cmd` && - (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null - then - break - fi - done -fi - -if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - : -else - # The Solaris, AIX, and Digital Unix default echo programs unquote - # backslashes. This makes it impossible to quote backslashes using - # echo "$something" | sed 's/\\/\\\\/g' - # - # So, first we look for a working echo in the user's PATH. - - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for dir in $PATH /usr/ucb; do - IFS="$lt_save_ifs" - if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && - test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - echo="$dir/echo" - break - fi - done - IFS="$lt_save_ifs" - - if test "X$echo" = Xecho; then - # We didn't find a better echo, so look for alternatives. - if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # This shell has a builtin print -r that does the trick. - echo='print -r' - elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && - test "X$CONFIG_SHELL" != X/bin/ksh; then - # If we have ksh, try running configure again with it. - ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} - export ORIGINAL_CONFIG_SHELL - CONFIG_SHELL=/bin/ksh - export CONFIG_SHELL - exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} - else - # Try using printf. - echo='printf %s\n' - if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # Cool, printf works - : - elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL - export CONFIG_SHELL - SHELL="$CONFIG_SHELL" - export SHELL - echo="$CONFIG_SHELL [$]0 --fallback-echo" - elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - echo="$CONFIG_SHELL [$]0 --fallback-echo" - else - # maybe with a smaller string... - prev=: - - for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do - if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null - then - break - fi - prev="$cmd" - done - - if test "$prev" != 'sed 50q "[$]0"'; then - echo_test_string=`eval $prev` - export echo_test_string - exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} - else - # Oops. We lost completely, so just stick with echo. - echo=echo - fi - fi - fi - fi -fi -fi - -# Copy echo and quote the copy suitably for passing to libtool from -# the Makefile, instead of quoting the original, which is used later. -ECHO=$echo -if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then - ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" -fi - -AC_SUBST(ECHO) -])])# _LT_AC_PROG_ECHO_BACKSLASH - - -# _LT_AC_LOCK -# ----------- -AC_DEFUN([_LT_AC_LOCK], -[AC_ARG_ENABLE([libtool-lock], - [AC_HELP_STRING([--disable-libtool-lock], - [avoid locking (might break parallel builds)])]) -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes - -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE="32" - ;; - *ELF-64*) - HPUX_IA64_MODE="64" - ;; - esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out which ABI we are using. - echo '[#]line __oline__ "configure"' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - if test "$lt_cv_prog_gnu_ld" = yes; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* - ;; - -x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*linux*) - LD="${LD-ld} -m elf_i386" - ;; - ppc64-*linux*|powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - ppc*-*linux*|powerpc*-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; - -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -belf" - AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, - [AC_LANG_PUSH(C) - AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) - AC_LANG_POP]) - if test x"$lt_cv_cc_needs_belf" != x"yes"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS="$SAVE_CFLAGS" - fi - ;; -AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], -[*-*-cygwin* | *-*-mingw* | *-*-pw32*) - AC_CHECK_TOOL(DLLTOOL, dlltool, false) - AC_CHECK_TOOL(AS, as, false) - AC_CHECK_TOOL(OBJDUMP, objdump, false) - ;; - ]) -esac - -need_locks="$enable_libtool_lock" - -])# _LT_AC_LOCK - - -# AC_LIBTOOL_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) -# ---------------------------------------------------------------- -# Check whether the given compiler option works -AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], -[AC_REQUIRE([LT_AC_PROG_SED]) -AC_CACHE_CHECK([$1], [$2], - [$2=no - ifelse([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) - printf "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$3" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp - $SED '/^$/d' conftest.err >conftest.er2 - if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - fi - $rm conftest* -]) - -if test x"[$]$2" = xyes; then - ifelse([$5], , :, [$5]) -else - ifelse([$6], , :, [$6]) -fi -])# AC_LIBTOOL_COMPILER_OPTION - - -# AC_LIBTOOL_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [ACTION-SUCCESS], [ACTION-FAILURE]) -# ------------------------------------------------------------ -# Check whether the given compiler option works -AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], -[AC_CACHE_CHECK([$1], [$2], - [$2=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $3" - printf "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&AS_MESSAGE_LOG_FD - $echo "X$_lt_linker_boilerplate" | $Xsed > conftest.exp - $SED '/^$/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - else - $2=yes - fi - fi - $rm conftest* - LDFLAGS="$save_LDFLAGS" -]) - -if test x"[$]$2" = xyes; then - ifelse([$4], , :, [$4]) -else - ifelse([$5], , :, [$5]) -fi -])# AC_LIBTOOL_LINKER_OPTION - - -# AC_LIBTOOL_SYS_MAX_CMD_LEN -# -------------------------- -AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], -[# find the maximum length of command line arguments -AC_MSG_CHECKING([the maximum length of command line arguments]) -AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl - i=0 - teststring="ABCD" - - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - - cygwin* | mingw*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - - netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - *) - # If test is not a shell built-in, we'll probably end up computing a - # maximum length that is only half of the actual maximum length, but - # we can't tell. - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - while (test "X"`$SHELL [$]0 --fallback-echo "X$teststring" 2>/dev/null` \ - = "XX$teststring") >/dev/null 2>&1 && - new_result=`expr "X$teststring" : ".*" 2>&1` && - lt_cv_sys_max_cmd_len=$new_result && - test $i != 17 # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - teststring= - # Add a significant safety factor because C++ compilers can tack on massive - # amounts of additional arguments before passing them to the linker. - # It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - ;; - esac -]) -if test -n $lt_cv_sys_max_cmd_len ; then - AC_MSG_RESULT($lt_cv_sys_max_cmd_len) -else - AC_MSG_RESULT(none) -fi -])# AC_LIBTOOL_SYS_MAX_CMD_LEN - - -# _LT_AC_CHECK_DLFCN -# -------------------- -AC_DEFUN([_LT_AC_CHECK_DLFCN], -[AC_CHECK_HEADERS(dlfcn.h)dnl -])# _LT_AC_CHECK_DLFCN - - -# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, -# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) -# ------------------------------------------------------------------ -AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF], -[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl -if test "$cross_compiling" = yes; then : - [$4] -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext < -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -#ifdef __cplusplus -extern "C" void exit (int); -#endif - -void fnord() { int i=42;} -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - /* dlclose (self); */ - } - - exit (status); -}] -EOF - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) $1 ;; - x$lt_dlneed_uscore) $2 ;; - x$lt_unknown|x*) $3 ;; - esac - else : - # compilation failed - $3 - fi -fi -rm -fr conftest* -])# _LT_AC_TRY_DLOPEN_SELF - - -# AC_LIBTOOL_DLOPEN_SELF -# ------------------- -AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], -[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl -if test "x$enable_dlopen" != xyes; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - - case $host_os in - beos*) - lt_cv_dlopen="load_add_on" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; - - mingw* | pw32*) - lt_cv_dlopen="LoadLibrary" - lt_cv_dlopen_libs= - ;; - - cygwin*) - lt_cv_dlopen="dlopen" - lt_cv_dlopen_libs= - ;; - - darwin*) - # if libdl is installed we need to link against it - AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ - lt_cv_dlopen="dyld" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ]) - ;; - - *) - AC_CHECK_FUNC([shl_load], - [lt_cv_dlopen="shl_load"], - [AC_CHECK_LIB([dld], [shl_load], - [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"], - [AC_CHECK_FUNC([dlopen], - [lt_cv_dlopen="dlopen"], - [AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], - [AC_CHECK_LIB([svld], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], - [AC_CHECK_LIB([dld], [dld_link], - [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"]) - ]) - ]) - ]) - ]) - ]) - ;; - esac - - if test "x$lt_cv_dlopen" != xno; then - enable_dlopen=yes - else - enable_dlopen=no - fi - - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS="$CPPFLAGS" - test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" - - save_LDFLAGS="$LDFLAGS" - eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - - save_LIBS="$LIBS" - LIBS="$lt_cv_dlopen_libs $LIBS" - - AC_CACHE_CHECK([whether a program can dlopen itself], - lt_cv_dlopen_self, [dnl - _LT_AC_TRY_DLOPEN_SELF( - lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, - lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) - ]) - - if test "x$lt_cv_dlopen_self" = xyes; then - LDFLAGS="$LDFLAGS $link_static_flag" - AC_CACHE_CHECK([whether a statically linked program can dlopen itself], - lt_cv_dlopen_self_static, [dnl - _LT_AC_TRY_DLOPEN_SELF( - lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, - lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) - ]) - fi - - CPPFLAGS="$save_CPPFLAGS" - LDFLAGS="$save_LDFLAGS" - LIBS="$save_LIBS" - ;; - esac - - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi -])# AC_LIBTOOL_DLOPEN_SELF - - -# AC_LIBTOOL_PROG_CC_C_O([TAGNAME]) -# --------------------------------- -# Check to see if options -c and -o are simultaneously supported by compiler -AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O], -[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl -AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], - [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)], - [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no - $rm -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - printf "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp - $SED '/^$/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then - _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes - fi - fi - chmod u+w . 2>&AS_MESSAGE_LOG_FD - $rm conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files - $rm out/* && rmdir out - cd .. - rmdir conftest - $rm conftest* -]) -])# AC_LIBTOOL_PROG_CC_C_O - - -# AC_LIBTOOL_SYS_HARD_LINK_LOCKS([TAGNAME]) -# ----------------------------------------- -# Check to see if we can do hard links to lock some files if needed -AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], -[AC_REQUIRE([_LT_AC_LOCK])dnl - -hard_links="nottested" -if test "$_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - AC_MSG_CHECKING([if we can lock with hard links]) - hard_links=yes - $rm conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - AC_MSG_RESULT([$hard_links]) - if test "$hard_links" = no; then - AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) - need_locks=warn - fi -else - need_locks=no -fi -])# AC_LIBTOOL_SYS_HARD_LINK_LOCKS - - -# AC_LIBTOOL_OBJDIR -# ----------------- -AC_DEFUN([AC_LIBTOOL_OBJDIR], -[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], -[rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null]) -objdir=$lt_cv_objdir -])# AC_LIBTOOL_OBJDIR - - -# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH([TAGNAME]) -# ---------------------------------------------- -# Check hardcoding attributes. -AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], -[AC_MSG_CHECKING([how to hardcode library paths into programs]) -_LT_AC_TAGVAR(hardcode_action, $1)= -if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ - test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ - test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then - - # We can hardcode non-existant directories. - if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)" != no && - test "$_LT_AC_TAGVAR(hardcode_minus_L, $1)" != no; then - # Linking always hardcodes the temporary library directory. - _LT_AC_TAGVAR(hardcode_action, $1)=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - _LT_AC_TAGVAR(hardcode_action, $1)=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - _LT_AC_TAGVAR(hardcode_action, $1)=unsupported -fi -AC_MSG_RESULT([$_LT_AC_TAGVAR(hardcode_action, $1)]) - -if test "$_LT_AC_TAGVAR(hardcode_action, $1)" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi -])# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH - - -# AC_LIBTOOL_SYS_LIB_STRIP -# ------------------------ -AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP], -[striplib= -old_striplib= -AC_MSG_CHECKING([whether stripping libraries is possible]) -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - AC_MSG_RESULT([yes]) -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) -fi - ;; - *) - AC_MSG_RESULT([no]) - ;; - esac -fi -])# AC_LIBTOOL_SYS_LIB_STRIP - - -# AC_LIBTOOL_SYS_DYNAMIC_LINKER -# ----------------------------- -# PORTME Fill in your ld.so characteristics -AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER], -[AC_MSG_CHECKING([dynamic linker characteristics]) -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -if test "$GCC" = yes; then - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown - -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH - - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; - -aix4* | aix5*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[[01]] | aix4.[[01]].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' - fi - shlibpath_var=LIBPATH - fi - ;; - -amigaos*) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; - -bsdi[[45]]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; - -cygwin* | mingw* | pw32*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $rm \$dlpath' - shlibpath_overrides_runpath=yes - - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" - ;; - mingw*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | [grep ';[c-zC-Z]:/' >/dev/null]; then - # It is most probably a Windows format PATH printed by - # mingw gcc, but we are running on Cygwin. Gcc prints its search - # path with ; separators, and with drive letters. We can handle the - # drive letters (cygwin fileutils understands them), so leave them, - # especially as we might pass files found there to a mingw objdump, - # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - ;; - esac - ;; - - *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. - if test "$GCC" = yes; then - sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` - else - sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' - fi - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; - -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -freebsd1*) - dynamic_linker=no - ;; - -kfreebsd*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='GNU ld.so' - ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[[123]]*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[[01]]* | freebsdelf3.[[01]]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - *) # from 3.2 on - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - esac - ;; - -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; - -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555. - postinstall_cmds='chmod 555 $lib' - ;; - -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test "$lt_cv_prog_gnu_ld" = yes; then - version_type=linux - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" - sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" - hardcode_into_libs=yes - ;; - -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; - -# This must be Linux ELF. -linux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - # Append ld.so.conf contents to the search path - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi - - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; - -knetbsd*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='GNU ld.so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -nto-qnx*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -openbsd*) - version_type=sunos - need_lib_prefix=no - # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. - case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; - esac - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - case $host_os in - openbsd2.[[89]] | openbsd2.[[89]].*) - shlibpath_overrides_runpath=no - ;; - *) - shlibpath_overrides_runpath=yes - ;; - esac - else - shlibpath_overrides_runpath=yes - fi - ;; - -os2*) - libname_spec='$name' - shrext_cmds=".dll" - need_lib_prefix=no - library_names_spec='$libname${shared_ext} $libname.a' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=LIBPATH - ;; - -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" - ;; - -sco3.2v5*) - version_type=osf - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - ;; - -solaris*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; - -sunos4*) - version_type=sunos - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test "$with_gnu_ld" = yes; then - need_lib_prefix=no - fi - need_version=yes - ;; - -sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - export_dynamic_flag_spec='${wl}-Blargedynsym' - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; - -sysv4*MP*) - if test -d /usr/nec ;then - version_type=linux - library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' - soname_spec='$libname${shared_ext}.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; - -uts4*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -*) - dynamic_linker=no - ;; -esac -AC_MSG_RESULT([$dynamic_linker]) -test "$dynamic_linker" = no && can_build_shared=no -])# AC_LIBTOOL_SYS_DYNAMIC_LINKER - - -# _LT_AC_TAGCONFIG -# ---------------- -AC_DEFUN([_LT_AC_TAGCONFIG], -[AC_ARG_WITH([tags], - [AC_HELP_STRING([--with-tags@<:@=TAGS@:>@], - [include additional configurations @<:@automatic@:>@])], - [tagnames="$withval"]) - -if test -f "$ltmain" && test -n "$tagnames"; then - if test ! -f "${ofile}"; then - AC_MSG_WARN([output file `$ofile' does not exist]) - fi - - if test -z "$LTCC"; then - eval "`$SHELL ${ofile} --config | grep '^LTCC='`" - if test -z "$LTCC"; then - AC_MSG_WARN([output file `$ofile' does not look like a libtool script]) - else - AC_MSG_WARN([using `LTCC=$LTCC', extracted from `$ofile']) - fi - fi - - # Extract list of available tagged configurations in $ofile. - # Note that this assumes the entire list is on one line. - available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` - - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for tagname in $tagnames; do - IFS="$lt_save_ifs" - # Check whether tagname contains only valid characters - case `$echo "X$tagname" | $Xsed -e 's:[[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]]::g'` in - "") ;; - *) AC_MSG_ERROR([invalid tag name: $tagname]) - ;; - esac - - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null - then - AC_MSG_ERROR([tag name \"$tagname\" already exists]) - fi - - # Update the list of available tags. - if test -n "$tagname"; then - echo appending configuration tag \"$tagname\" to $ofile - - case $tagname in - CXX) - if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - AC_LIBTOOL_LANG_CXX_CONFIG - else - tagname="" - fi - ;; - - F77) - if test -n "$F77" && test "X$F77" != "Xno"; then - AC_LIBTOOL_LANG_F77_CONFIG - else - tagname="" - fi - ;; - - GCJ) - if test -n "$GCJ" && test "X$GCJ" != "Xno"; then - AC_LIBTOOL_LANG_GCJ_CONFIG - else - tagname="" - fi - ;; - - RC) - AC_LIBTOOL_LANG_RC_CONFIG - ;; - - *) - AC_MSG_ERROR([Unsupported tag name: $tagname]) - ;; - esac - - # Append the new tag name to the list of available tags. - if test -n "$tagname" ; then - available_tags="$available_tags $tagname" - fi - fi - done - IFS="$lt_save_ifs" - - # Now substitute the updated list of available tags. - if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then - mv "${ofile}T" "$ofile" - chmod +x "$ofile" - else - rm -f "${ofile}T" - AC_MSG_ERROR([unable to update list of available tagged configurations.]) - fi -fi -])# _LT_AC_TAGCONFIG - - -# AC_LIBTOOL_DLOPEN -# ----------------- -# enable checks for dlopen support -AC_DEFUN([AC_LIBTOOL_DLOPEN], - [AC_BEFORE([$0],[AC_LIBTOOL_SETUP]) -])# AC_LIBTOOL_DLOPEN - - -# AC_LIBTOOL_WIN32_DLL -# -------------------- -# declare package support for building win32 DLLs -AC_DEFUN([AC_LIBTOOL_WIN32_DLL], -[AC_BEFORE([$0], [AC_LIBTOOL_SETUP]) -])# AC_LIBTOOL_WIN32_DLL - - -# AC_ENABLE_SHARED([DEFAULT]) -# --------------------------- -# implement the --enable-shared flag -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -AC_DEFUN([AC_ENABLE_SHARED], -[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl -AC_ARG_ENABLE([shared], - [AC_HELP_STRING([--enable-shared@<:@=PKGS@:>@], - [build shared libraries @<:@default=]AC_ENABLE_SHARED_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_shared=]AC_ENABLE_SHARED_DEFAULT) -])# AC_ENABLE_SHARED - - -# AC_DISABLE_SHARED -# ----------------- -#- set the default shared flag to --disable-shared -AC_DEFUN([AC_DISABLE_SHARED], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -AC_ENABLE_SHARED(no) -])# AC_DISABLE_SHARED - - -# AC_ENABLE_STATIC([DEFAULT]) -# --------------------------- -# implement the --enable-static flag -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -AC_DEFUN([AC_ENABLE_STATIC], -[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl -AC_ARG_ENABLE([static], - [AC_HELP_STRING([--enable-static@<:@=PKGS@:>@], - [build static libraries @<:@default=]AC_ENABLE_STATIC_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_static=]AC_ENABLE_STATIC_DEFAULT) -])# AC_ENABLE_STATIC - - -# AC_DISABLE_STATIC -# ----------------- -# set the default static flag to --disable-static -AC_DEFUN([AC_DISABLE_STATIC], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -AC_ENABLE_STATIC(no) -])# AC_DISABLE_STATIC - - -# AC_ENABLE_FAST_INSTALL([DEFAULT]) -# --------------------------------- -# implement the --enable-fast-install flag -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -AC_DEFUN([AC_ENABLE_FAST_INSTALL], -[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl -AC_ARG_ENABLE([fast-install], - [AC_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], - [optimize for fast installation @<:@default=]AC_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_fast_install=]AC_ENABLE_FAST_INSTALL_DEFAULT) -])# AC_ENABLE_FAST_INSTALL - - -# AC_DISABLE_FAST_INSTALL -# ----------------------- -# set the default to --disable-fast-install -AC_DEFUN([AC_DISABLE_FAST_INSTALL], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -AC_ENABLE_FAST_INSTALL(no) -])# AC_DISABLE_FAST_INSTALL - - -# AC_LIBTOOL_PICMODE([MODE]) -# -------------------------- -# implement the --with-pic flag -# MODE is either `yes' or `no'. If omitted, it defaults to `both'. -AC_DEFUN([AC_LIBTOOL_PICMODE], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -pic_mode=ifelse($#,1,$1,default) -])# AC_LIBTOOL_PICMODE - - -# AC_PROG_EGREP -# ------------- -# This is predefined starting with Autoconf 2.54, so this conditional -# definition can be removed once we require Autoconf 2.54 or later. -m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP], -[AC_CACHE_CHECK([for egrep], [ac_cv_prog_egrep], - [if echo a | (grep -E '(a|b)') >/dev/null 2>&1 - then ac_cv_prog_egrep='grep -E' - else ac_cv_prog_egrep='egrep' - fi]) - EGREP=$ac_cv_prog_egrep - AC_SUBST([EGREP]) -])]) - - -# AC_PATH_TOOL_PREFIX -# ------------------- -# find a file program which can recognise shared library -AC_DEFUN([AC_PATH_TOOL_PREFIX], -[AC_REQUIRE([AC_PROG_EGREP])dnl -AC_MSG_CHECKING([for $1]) -AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, -[case $MAGIC_CMD in -[[\\/*] | ?:[\\/]*]) - lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD="$MAGIC_CMD" - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR -dnl $ac_dummy forces splitting on constant user-supplied paths. -dnl POSIX.2 word splitting is done only on the output of word expansions, -dnl not every word. This closes a longstanding sh security hole. - ac_dummy="ifelse([$2], , $PATH, [$2])" - for ac_dir in $ac_dummy; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f $ac_dir/$1; then - lt_cv_path_MAGIC_CMD="$ac_dir/$1" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD="$lt_cv_path_MAGIC_CMD" - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -EOF - fi ;; - esac - fi - break - fi - done - IFS="$lt_save_ifs" - MAGIC_CMD="$lt_save_MAGIC_CMD" - ;; -esac]) -MAGIC_CMD="$lt_cv_path_MAGIC_CMD" -if test -n "$MAGIC_CMD"; then - AC_MSG_RESULT($MAGIC_CMD) -else - AC_MSG_RESULT(no) -fi -])# AC_PATH_TOOL_PREFIX - - -# AC_PATH_MAGIC -# ------------- -# find a file program which can recognise a shared library -AC_DEFUN([AC_PATH_MAGIC], -[AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - AC_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) - else - MAGIC_CMD=: - fi -fi -])# AC_PATH_MAGIC - - -# AC_PROG_LD -# ---------- -# find the pathname to the GNU or non-GNU linker -AC_DEFUN([AC_PROG_LD], -[AC_ARG_WITH([gnu-ld], - [AC_HELP_STRING([--with-gnu-ld], - [assume the C compiler uses GNU ld @<:@default=no@:>@])], - [test "$withval" = no || with_gnu_ld=yes], - [with_gnu_ld=no]) -AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -ac_prog=ld -if test "$GCC" = yes; then - # Check if gcc -print-prog-name=ld gives a path. - AC_MSG_CHECKING([for ld used by $CC]) - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [[\\/]]* | ?:[[\\/]]*) - re_direlt='/[[^/]][[^/]]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` - while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do - ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD="$ac_prog" - ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test "$with_gnu_ld" = yes; then - AC_MSG_CHECKING([for GNU ld]) -else - AC_MSG_CHECKING([for non-GNU ld]) -fi -AC_CACHE_VAL(lt_cv_path_LD, -[if test -z "$LD"; then - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD="$ac_dir/$ac_prog" - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all - fi - ;; - -gnu*) - lt_cv_deplibs_check_method=pass_all - ;; - -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; - -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; - -# This must be Linux ELF. -linux*) - lt_cv_deplibs_check_method=pass_all - ;; - -netbsd*) - if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' - fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; - -nto-qnx*) - lt_cv_deplibs_check_method=unknown - ;; - -openbsd*) - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' - fi - ;; - -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; - -sco3.2v5*) - lt_cv_deplibs_check_method=pass_all - ;; - -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` - ;; - ncr) - lt_cv_deplibs_check_method=pass_all - ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' - ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so - ;; - siemens) - lt_cv_deplibs_check_method=pass_all - ;; - esac - ;; - -sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[[78]]* | unixware7* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; -esac -]) -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown -])# AC_DEPLIBS_CHECK_METHOD - - -# AC_PROG_NM -# ---------- -# find the pathname to a BSD-compatible name lister -AC_DEFUN([AC_PROG_NM], -[AC_CACHE_CHECK([for BSD-compatible nm], lt_cv_path_NM, -[if test -n "$NM"; then - # Let the user override the test. - lt_cv_path_NM="$NM" -else - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - tmp_nm="$ac_dir/${ac_tool_prefix}nm" - if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then - # Check to see if the nm accepts a BSD-compat flag. - # Adding the `sed 1q' prevents false positives on HP-UX, which says: - # nm: unknown option "B" ignored - # Tru64's nm complains that /dev/null is an invalid object file - case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in - */dev/null* | *'Invalid file or object type'*) - lt_cv_path_NM="$tmp_nm -B" - break - ;; - *) - case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in - */dev/null*) - lt_cv_path_NM="$tmp_nm -p" - break - ;; - *) - lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but - continue # so that we can try to find one that supports BSD flags - ;; - esac - esac - fi - done - IFS="$lt_save_ifs" - test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm -fi]) -NM="$lt_cv_path_NM" -])# AC_PROG_NM - - -# AC_CHECK_LIBM -# ------------- -# check for math library -AC_DEFUN([AC_CHECK_LIBM], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -LIBM= -case $host in -*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) - # These system don't have libm, or don't need it - ;; -*-ncr-sysv4.3*) - AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") - AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") - ;; -*) - AC_CHECK_LIB(m, cos, LIBM="-lm") - ;; -esac -])# AC_CHECK_LIBM - - -# AC_LIBLTDL_CONVENIENCE([DIRECTORY]) -# ----------------------------------- -# sets LIBLTDL to the link flags for the libltdl convenience library and -# LTDLINCL to the include flags for the libltdl header and adds -# --enable-ltdl-convenience to the configure arguments. Note that -# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, -# it is assumed to be `libltdl'. LIBLTDL will be prefixed with -# '${top_builddir}/' and LTDLINCL will be prefixed with '${top_srcdir}/' -# (note the single quotes!). If your package is not flat and you're not -# using automake, define top_builddir and top_srcdir appropriately in -# the Makefiles. -AC_DEFUN([AC_LIBLTDL_CONVENIENCE], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl - case $enable_ltdl_convenience in - no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;; - "") enable_ltdl_convenience=yes - ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;; - esac - LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la - LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) - # For backwards non-gettext consistent compatibility... - INCLTDL="$LTDLINCL" -])# AC_LIBLTDL_CONVENIENCE - - -# AC_LIBLTDL_INSTALLABLE([DIRECTORY]) -# ----------------------------------- -# sets LIBLTDL to the link flags for the libltdl installable library and -# LTDLINCL to the include flags for the libltdl header and adds -# --enable-ltdl-install to the configure arguments. Note that -# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, -# and an installed libltdl is not found, it is assumed to be `libltdl'. -# LIBLTDL will be prefixed with '${top_builddir}/'# and LTDLINCL with -# '${top_srcdir}/' (note the single quotes!). If your package is not -# flat and you're not using automake, define top_builddir and top_srcdir -# appropriately in the Makefiles. -# In the future, this macro may have to be called after AC_PROG_LIBTOOL. -AC_DEFUN([AC_LIBLTDL_INSTALLABLE], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl - AC_CHECK_LIB(ltdl, lt_dlinit, - [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no], - [if test x"$enable_ltdl_install" = xno; then - AC_MSG_WARN([libltdl not installed, but installation disabled]) - else - enable_ltdl_install=yes - fi - ]) - if test x"$enable_ltdl_install" = x"yes"; then - ac_configure_args="$ac_configure_args --enable-ltdl-install" - LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la - LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) - else - ac_configure_args="$ac_configure_args --enable-ltdl-install=no" - LIBLTDL="-lltdl" - LTDLINCL= - fi - # For backwards non-gettext consistent compatibility... - INCLTDL="$LTDLINCL" -])# AC_LIBLTDL_INSTALLABLE - - -# AC_LIBTOOL_CXX -# -------------- -# enable support for C++ libraries -AC_DEFUN([AC_LIBTOOL_CXX], -[AC_REQUIRE([_LT_AC_LANG_CXX]) -])# AC_LIBTOOL_CXX - - -# _LT_AC_LANG_CXX -# --------------- -AC_DEFUN([_LT_AC_LANG_CXX], -[AC_REQUIRE([AC_PROG_CXX]) -AC_REQUIRE([_LT_AC_PROG_CXXCPP]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX]) -])# _LT_AC_LANG_CXX - -# _LT_AC_PROG_CXXCPP -# --------------- -AC_DEFUN([_LT_AC_PROG_CXXCPP], -[ -AC_REQUIRE([AC_PROG_CXX]) -if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - AC_PROG_CXXCPP -fi -])# _LT_AC_PROG_CXXCPP - -# AC_LIBTOOL_F77 -# -------------- -# enable support for Fortran 77 libraries -AC_DEFUN([AC_LIBTOOL_F77], -[AC_REQUIRE([_LT_AC_LANG_F77]) -])# AC_LIBTOOL_F77 - - -# _LT_AC_LANG_F77 -# --------------- -AC_DEFUN([_LT_AC_LANG_F77], -[AC_REQUIRE([AC_PROG_F77]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}F77]) -])# _LT_AC_LANG_F77 - - -# AC_LIBTOOL_GCJ -# -------------- -# enable support for GCJ libraries -AC_DEFUN([AC_LIBTOOL_GCJ], -[AC_REQUIRE([_LT_AC_LANG_GCJ]) -])# AC_LIBTOOL_GCJ - - -# _LT_AC_LANG_GCJ -# --------------- -AC_DEFUN([_LT_AC_LANG_GCJ], -[AC_PROVIDE_IFELSE([AC_PROG_GCJ],[], - [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],[], - [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ],[], - [ifdef([AC_PROG_GCJ],[AC_REQUIRE([AC_PROG_GCJ])], - [ifdef([A][M_PROG_GCJ],[AC_REQUIRE([A][M_PROG_GCJ])], - [AC_REQUIRE([A][C_PROG_GCJ_OR_A][M_PROG_GCJ])])])])])]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}GCJ]) -])# _LT_AC_LANG_GCJ - - -# AC_LIBTOOL_RC -# -------------- -# enable support for Windows resource files -AC_DEFUN([AC_LIBTOOL_RC], -[AC_REQUIRE([LT_AC_PROG_RC]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}RC]) -])# AC_LIBTOOL_RC - - -# AC_LIBTOOL_LANG_C_CONFIG -# ------------------------ -# Ensure that the configuration vars for the C compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG], [_LT_AC_LANG_C_CONFIG]) -AC_DEFUN([_LT_AC_LANG_C_CONFIG], -[lt_save_CC="$CC" -AC_LANG_PUSH(C) - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;\n" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}\n' - -_LT_AC_SYS_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# -# Check for any special shared library compilation flags. -# -_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)= -if test "$GCC" = no; then - case $host_os in - sco3.2v5*) - _LT_AC_TAGVAR(lt_prog_cc_shlib, $1)='-belf' - ;; - esac -fi -if test -n "$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)"; then - AC_MSG_WARN([`$CC' requires `$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)' to build shared libraries]) - if echo "$old_CC $old_CFLAGS " | grep "[[ ]]$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)[[ ]]" >/dev/null; then : - else - AC_MSG_WARN([add `$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)' to the CC or CFLAGS env variable and reconfigure]) - _LT_AC_TAGVAR(lt_cv_prog_cc_can_build_shared, $1)=no - fi -fi - - -# -# Check to make sure the static flag actually works. -# -AC_LIBTOOL_LINKER_OPTION([if $compiler static flag $_LT_AC_TAGVAR(lt_prog_compiler_static, $1) works], - _LT_AC_TAGVAR(lt_prog_compiler_static_works, $1), - $_LT_AC_TAGVAR(lt_prog_compiler_static, $1), - [], - [_LT_AC_TAGVAR(lt_prog_compiler_static, $1)=]) - - -## CAVEAT EMPTOR: -## There is no encapsulation within the following macros, do not change -## the running order or otherwise move them around unless you know exactly -## what you are doing... -AC_LIBTOOL_PROG_COMPILER_NO_RTTI($1) -AC_LIBTOOL_PROG_COMPILER_PIC($1) -AC_LIBTOOL_PROG_CC_C_O($1) -AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) -AC_LIBTOOL_PROG_LD_SHLIBS($1) -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) -AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) -AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_DLOPEN_SELF($1) - -# Report which librarie types wil actually be built -AC_MSG_CHECKING([if libtool supports shared libraries]) -AC_MSG_RESULT([$can_build_shared]) - -AC_MSG_CHECKING([whether to build shared libraries]) -test "$can_build_shared" = "no" && enable_shared=no - -# On AIX, shared libraries and static libraries use the same namespace, and -# are all built from PIC. -case $host_os in -aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - -aix4* | aix5*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; -esac -AC_MSG_RESULT([$enable_shared]) - -AC_MSG_CHECKING([whether to build static libraries]) -# Make sure either enable_shared or enable_static is yes. -test "$enable_shared" = yes || enable_static=yes -AC_MSG_RESULT([$enable_static]) - -AC_LIBTOOL_CONFIG($1) - -AC_LANG_POP -CC="$lt_save_CC" -])# AC_LIBTOOL_LANG_C_CONFIG - - -# AC_LIBTOOL_LANG_CXX_CONFIG -# -------------------------- -# Ensure that the configuration vars for the C compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG], [_LT_AC_LANG_CXX_CONFIG(CXX)]) -AC_DEFUN([_LT_AC_LANG_CXX_CONFIG], -[AC_LANG_PUSH(C++) -AC_REQUIRE([AC_PROG_CXX]) -AC_REQUIRE([_LT_AC_PROG_CXXCPP]) - -_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_AC_TAGVAR(allow_undefined_flag, $1)= -_LT_AC_TAGVAR(always_export_symbols, $1)=no -_LT_AC_TAGVAR(archive_expsym_cmds, $1)= -_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_AC_TAGVAR(hardcode_direct, $1)=no -_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= -_LT_AC_TAGVAR(hardcode_libdir_separator, $1)= -_LT_AC_TAGVAR(hardcode_minus_L, $1)=no -_LT_AC_TAGVAR(hardcode_automatic, $1)=no -_LT_AC_TAGVAR(module_cmds, $1)= -_LT_AC_TAGVAR(module_expsym_cmds, $1)= -_LT_AC_TAGVAR(link_all_deplibs, $1)=unknown -_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_AC_TAGVAR(no_undefined_flag, $1)= -_LT_AC_TAGVAR(whole_archive_flag_spec, $1)= -_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Dependencies to place before and after the object being linked: -_LT_AC_TAGVAR(predep_objects, $1)= -_LT_AC_TAGVAR(postdep_objects, $1)= -_LT_AC_TAGVAR(predeps, $1)= -_LT_AC_TAGVAR(postdeps, $1)= -_LT_AC_TAGVAR(compiler_lib_search_path, $1)= - -# Source file extension for C++ test sources. -ac_ext=cpp - -# Object file extension for compiled C++ test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;\n" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(int, char *[]) { return(0); }\n' - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_AC_SYS_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC=$CC -lt_save_LD=$LD -lt_save_GCC=$GCC -GCC=$GXX -lt_save_with_gnu_ld=$with_gnu_ld -lt_save_path_LD=$lt_cv_path_LD -if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then - lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx -else - unset lt_cv_prog_gnu_ld -fi -if test -n "${lt_cv_path_LDCXX+set}"; then - lt_cv_path_LD=$lt_cv_path_LDCXX -else - unset lt_cv_path_LD -fi -test -z "${LDCXX+set}" || LD=$LDCXX -CC=${CXX-"c++"} -compiler=$CC -_LT_AC_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) - -# We don't want -fno-exception wen compiling C++ code, so set the -# no_builtin_flag separately -if test "$GXX" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' -else - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= -fi - -if test "$GXX" = yes; then - # Set up default GNU C++ configuration - - AC_PROG_LD - - # Check if GNU C++ uses GNU ld as the underlying linker, since the - # archiving commands below assume that GNU ld is being used. - if test "$with_gnu_ld" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - - # If archive_cmds runs LD, not CC, wlarc should be empty - # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to - # investigate it a little bit more. (MM) - wlarc='${wl}' - - # ancient GNU ld didn't support --whole-archive et. al. - if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ - grep 'no-whole-archive' > /dev/null; then - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - with_gnu_ld=no - wlarc= - - # A generic and very simple default shared library creation - # command for GNU C++ for the case where it uses the native - # linker, instead of GNU ld. If possible, this setting should - # overridden to take advantage of the native linker features on - # the platform it is being used on. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - fi - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - -else - GXX=no - with_gnu_ld=no - wlarc= -fi - -# PORTME: fill in a description of your system's C++ link characteristics -AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) -_LT_AC_TAGVAR(ld_shlibs, $1)=yes -case $host_os in - aix3*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) - for ld_flag in $LDFLAGS; do - case $ld_flag in - *-brtl*) - aix_use_runtimelinking=yes - break - ;; - esac - done - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_AC_TAGVAR(archive_cmds, $1)='' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - if test "$GXX" = yes; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - else - # We have old collect2 - _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= - fi - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' - _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' - # -bexpall does not export symbols beginning with underscore (_) - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - # Exported symbols can be pulled into shared objects from archives - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)=' ' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - # This is similar to how AIX traditionally builds its shared libraries. - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - chorus*) - case $cc_basename in - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(always_export_symbols, $1)=no - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_automatic, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - if test "$GXX" = yes ; then - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - fi - ;; - - dgux*) - case $cc_basename in - ec++*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - ghcx*) - # Green Hills C++ Compiler - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - freebsd[[12]]*) - # C++ shared libraries reported to be fairly broken before switch to ELF - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - freebsd-elf*) - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - ;; - freebsd* | kfreebsd*-gnu | dragonfly*) - # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF - # conventions - _LT_AC_TAGVAR(ld_shlibs, $1)=yes - ;; - gnu*) - ;; - hpux9*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[[-]]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - hpux10*|hpux11*) - if test $with_gnu_ld = no; then - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - ia64*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - ;; - *) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - ;; - esac - fi - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - ia64*) - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - ;; - *) - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - ;; - esac - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - case $host_cpu in - hppa*64*|ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes; then - if test $with_gnu_ld = no; then - case $host_cpu in - ia64*|hppa*64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - irix5* | irix6*) - case $cc_basename in - CC*) - # SGI C++ - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - - # Archives containing C++ object files must be created using - # "CC -ar", where "CC" is the IRIX C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' - ;; - *) - if test "$GXX" = yes; then - if test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' - fi - fi - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - ;; - esac - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - linux*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' - ;; - icpc*) - # Intel C++ - with_gnu_ld=yes - # version 8.0 and above of icpc choke on multiply defined symbols - # if we add $predep_objects and $postdep_objects, however 7.1 and - # earlier do not add the objects themselves. - case `$CC -V 2>&1` in - *"Version 7."*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - *) # Version 8.0 or newer - tmp_idyn= - case $host_cpu in - ia64*) tmp_idyn=' -i_dynamic';; - esac - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - esac - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' - ;; - pgCC*) - # Portland Group C++ compiler - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - ;; - cxx*) - # Compaq C++ - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' - - runpath_var=LD_RUN_PATH - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - esac - ;; - lynxos*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - m88k*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - mvs*) - case $cc_basename in - cxx*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - netbsd*) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' - wlarc= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - fi - # Workaround some broken pre-1.5 toolchains - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' - ;; - openbsd2*) - # C++ shared libraries are fairly broken - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - openbsd*) - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - fi - output_verbose_link_cmd='echo' - ;; - osf3*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' - - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - cxx*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - osf4* | osf5*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Archives containing C++ object files must be created using - # the KAI C++ compiler. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - cxx*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ - echo "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ - $rm $lib.exp' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - psos*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - sco*) - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - lcc*) - # Lucid - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_AC_TAGVAR(archive_cmds_need_lc,$1)=yes - _LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The C++ compiler is used as linker so we must use $wl - # flag to pass the commands to the underlying system - # linker. We must also pass each convience library through - # to the system linker between allextract/defaultextract. - # The C++ compiler will combine linker options so we - # cannot just pass the convience library names through - # without $wl. - # Supported since Solaris 2.6 (maybe 2.5.1?) - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' - ;; - esac - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - output_verbose_link_cmd='echo' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - - # The C++ compiler must be used to create the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' - ;; - *) - # GNU C++ compiler with Solaris linker - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' - if $CC --version | grep -v '^2\.7' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" - else - # g++ 2.7 appears to require `-G' NOT `-shared' on this - # platform. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" - fi - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' - fi - ;; - esac - ;; - sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[[78]]* | unixware7*) - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - vxworks*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; -esac -AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) -test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no - -_LT_AC_TAGVAR(GCC, $1)="$GXX" -_LT_AC_TAGVAR(LD, $1)="$LD" - -## CAVEAT EMPTOR: -## There is no encapsulation within the following macros, do not change -## the running order or otherwise move them around unless you know exactly -## what you are doing... -AC_LIBTOOL_POSTDEP_PREDEP($1) -AC_LIBTOOL_PROG_COMPILER_PIC($1) -AC_LIBTOOL_PROG_CC_C_O($1) -AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) -AC_LIBTOOL_PROG_LD_SHLIBS($1) -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) -AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) -AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_DLOPEN_SELF($1) - -AC_LIBTOOL_CONFIG($1) - -AC_LANG_POP -CC=$lt_save_CC -LDCXX=$LD -LD=$lt_save_LD -GCC=$lt_save_GCC -with_gnu_ldcxx=$with_gnu_ld -with_gnu_ld=$lt_save_with_gnu_ld -lt_cv_path_LDCXX=$lt_cv_path_LD -lt_cv_path_LD=$lt_save_path_LD -lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld -lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld -])# AC_LIBTOOL_LANG_CXX_CONFIG - -# AC_LIBTOOL_POSTDEP_PREDEP([TAGNAME]) -# ------------------------ -# Figure out "hidden" library dependencies from verbose -# compiler output when linking a shared library. -# Parse the compiler output and extract the necessary -# objects, libraries and library flags. -AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP],[ -dnl we can't use the lt_simple_compile_test_code here, -dnl because it contains code intended for an executable, -dnl not a library. It's possible we should let each -dnl tag define a new lt_????_link_test_code variable, -dnl but it's only used here... -ifelse([$1],[],[cat > conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext <> "$cfgfile" -ifelse([$1], [], -[#! $SHELL - -# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. -# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) -# NOTE: Changes made to this file will be lost: look at ltmain.sh. -# -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 -# Free Software Foundation, Inc. -# -# This file is part of GNU Libtool: -# Originally by Gordon Matzigkeit , 1996 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# A sed program that does not truncate output. -SED=$lt_SED - -# Sed that helps us avoid accidentally triggering echo(1) options like -n. -Xsed="$SED -e 1s/^X//" - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# The names of the tagged configurations supported by this script. -available_tags= - -# ### BEGIN LIBTOOL CONFIG], -[# ### BEGIN LIBTOOL TAG CONFIG: $tagname]) - -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: - -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL - -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared - -# Whether or not to build static libraries. -build_old_libs=$enable_static - -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$_LT_AC_TAGVAR(archive_cmds_need_lc, $1) - -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1) - -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install - -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os - -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os - -# An echo program that does not interpret backslashes. -echo=$lt_echo - -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS - -# A C compiler. -LTCC=$lt_LTCC - -# A language-specific compiler. -CC=$lt_[]_LT_AC_TAGVAR(compiler, $1) - -# Is the compiler the GNU C compiler? -with_gcc=$_LT_AC_TAGVAR(GCC, $1) - -# An ERE matcher. -EGREP=$lt_EGREP - -# The linker used to build libraries. -LD=$lt_[]_LT_AC_TAGVAR(LD, $1) - -# Whether we need hard or soft links. -LN_S=$lt_LN_S - -# A BSD-compatible nm program. -NM=$lt_NM - -# A symbol stripping program -STRIP=$lt_STRIP - -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD - -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" - -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" - -# Used on cygwin: assembler. -AS="$AS" - -# The name of the directory that contains temporary libtool files. -objdir=$objdir - -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds - -# How to pass a linker flag through the compiler. -wl=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) - -# Object file suffix (normally "o"). -objext="$ac_objext" - -# Old archive suffix (normally "a"). -libext="$libext" - -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' - -# Executable file suffix (normally ""). -exeext="$exeext" - -# Additional compiler flags for building library objects. -pic_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) -pic_mode=$pic_mode - -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len - -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_[]_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1) - -# Must we lock files when doing compilation? -need_locks=$lt_need_locks - -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix - -# Do we need a version for libraries? -need_version=$need_version - -# Whether dlopen is supported. -dlopen_support=$enable_dlopen - -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self - -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static - -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_static, $1) - -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) - -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_[]_LT_AC_TAGVAR(export_dynamic_flag_spec, $1) - -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_[]_LT_AC_TAGVAR(whole_archive_flag_spec, $1) - -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_[]_LT_AC_TAGVAR(thread_safe_flag_spec, $1) - -# Library versioning type. -version_type=$version_type - -# Format of library name prefix. -libname_spec=$lt_libname_spec - -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec - -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec - -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_cmds, $1) -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds - -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_new_cmds, $1) - -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) - -# Commands used to build and install a shared archive. -archive_cmds=$lt_[]_LT_AC_TAGVAR(archive_cmds, $1) -archive_expsym_cmds=$lt_[]_LT_AC_TAGVAR(archive_expsym_cmds, $1) -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds - -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_[]_LT_AC_TAGVAR(module_cmds, $1) -module_expsym_cmds=$lt_[]_LT_AC_TAGVAR(module_expsym_cmds, $1) - -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib - -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_[]_LT_AC_TAGVAR(predep_objects, $1) - -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_[]_LT_AC_TAGVAR(postdep_objects, $1) - -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_[]_LT_AC_TAGVAR(predeps, $1) - -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) - -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) - -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method - -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd - -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_[]_LT_AC_TAGVAR(allow_undefined_flag, $1) - -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_[]_LT_AC_TAGVAR(no_undefined_flag, $1) - -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds - -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval - -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe - -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl - -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - -# This is the shared library runtime path variable. -runpath_var=$runpath_var - -# This is the shared library path variable. -shlibpath_var=$shlibpath_var - -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath - -# How to hardcode a shared library path into an executable. -hardcode_action=$_LT_AC_TAGVAR(hardcode_action, $1) - -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs - -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) - -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1) - -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_separator, $1) - -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$_LT_AC_TAGVAR(hardcode_direct, $1) - -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$_LT_AC_TAGVAR(hardcode_minus_L, $1) - -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1) - -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$_LT_AC_TAGVAR(hardcode_automatic, $1) - -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" - -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$_LT_AC_TAGVAR(link_all_deplibs, $1) - -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec - -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec - -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path="$_LT_AC_TAGVAR(fix_srcfile_path, $1)" - -# Set to yes if exported symbols are required. -always_export_symbols=$_LT_AC_TAGVAR(always_export_symbols, $1) - -# The commands to list exported symbols. -export_symbols_cmds=$lt_[]_LT_AC_TAGVAR(export_symbols_cmds, $1) - -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds - -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_[]_LT_AC_TAGVAR(exclude_expsyms, $1) - -# Symbols that must always be exported. -include_expsyms=$lt_[]_LT_AC_TAGVAR(include_expsyms, $1) - -ifelse([$1],[], -[# ### END LIBTOOL CONFIG], -[# ### END LIBTOOL TAG CONFIG: $tagname]) - -__EOF__ - -ifelse([$1],[], [ - case $host_os in - aix3*) - cat <<\EOF >> "$cfgfile" - -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -EOF - ;; - esac - - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) - - mv -f "$cfgfile" "$ofile" || \ - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" -]) -else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" - fi -fi -])# AC_LIBTOOL_CONFIG - - -# AC_LIBTOOL_PROG_COMPILER_NO_RTTI([TAGNAME]) -# ------------------------------------------- -AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], -[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl - -_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= - -if test "$GCC" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' - - AC_LIBTOOL_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], - lt_cv_prog_compiler_rtti_exceptions, - [-fno-rtti -fno-exceptions], [], - [_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) -fi -])# AC_LIBTOOL_PROG_COMPILER_NO_RTTI - - -# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE -# --------------------------------- -AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], -[AC_REQUIRE([AC_CANONICAL_HOST]) -AC_REQUIRE([AC_PROG_NM]) -AC_REQUIRE([AC_OBJEXT]) -# Check for command to grab the raw symbol name followed by C symbol from nm. -AC_MSG_CHECKING([command to parse $NM output from $compiler object]) -AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], -[ -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[[BCDEGRST]]' - -# Regexp to match symbols that can be accessed directly from C. -sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' - -# Transform an extracted symbol line into a proper C declaration -lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" - -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - -# Define system-specific variables. -case $host_os in -aix*) - symcode='[[BCDT]]' - ;; -cygwin* | mingw* | pw32*) - symcode='[[ABCDGISTW]]' - ;; -hpux*) # Its linker distinguishes data from code symbols - if test "$host_cpu" = ia64; then - symcode='[[ABCDEGRST]]' - fi - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - ;; -linux*) - if test "$host_cpu" = ia64; then - symcode='[[ABCDGIRSTW]]' - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - fi - ;; -irix* | nonstopux*) - symcode='[[BCDEGRST]]' - ;; -osf*) - symcode='[[BCDEGQRST]]' - ;; -solaris* | sysv5*) - symcode='[[BDRT]]' - ;; -sysv4) - symcode='[[DFNSTU]]' - ;; -esac - -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp - ;; -esac - -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[[ABCDGIRSTW]]' ;; -esac - -# Try without a prefix undercore, then with it. -for ac_symprfx in "" "_"; do - - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" - - # Write the raw and C identifiers. - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - - # Check to see that the pipe works correctly. - pipe_works=no - - rm -f conftest* - cat > conftest.$ac_ext < $nlist) && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" - else - rm -f "$nlist"T - fi - - # Make sure that we snagged all the symbols we need. - if grep ' nm_test_var$' "$nlist" >/dev/null; then - if grep ' nm_test_func$' "$nlist" >/dev/null; then - cat < conftest.$ac_ext -#ifdef __cplusplus -extern "C" { -#endif - -EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' - - cat <> conftest.$ac_ext -#if defined (__STDC__) && __STDC__ -# define lt_ptr_t void * -#else -# define lt_ptr_t char * -# define const -#endif - -/* The mapping between symbol names and symbols. */ -const struct { - const char *name; - lt_ptr_t address; -} -lt_preloaded_symbols[[]] = -{ -EOF - $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext - cat <<\EOF >> conftest.$ac_ext - {0, (lt_ptr_t) 0} -}; - -#ifdef __cplusplus -} -#endif -EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_save_LIBS="$LIBS" - lt_save_CFLAGS="$CFLAGS" - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then - pipe_works=yes - fi - LIBS="$lt_save_LIBS" - CFLAGS="$lt_save_CFLAGS" - else - echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD - fi - else - echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD - cat conftest.$ac_ext >&5 - fi - rm -f conftest* conftst* - - # Do not use the global_symbol_pipe unless it works. - if test "$pipe_works" = yes; then - break - else - lt_cv_sys_global_symbol_pipe= - fi -done -]) -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= -fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - AC_MSG_RESULT(failed) -else - AC_MSG_RESULT(ok) -fi -]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE - - -# AC_LIBTOOL_PROG_COMPILER_PIC([TAGNAME]) -# --------------------------------------- -AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC], -[_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)= -_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= -_LT_AC_TAGVAR(lt_prog_compiler_static, $1)= - -AC_MSG_CHECKING([for $compiler option to produce PIC]) - ifelse([$1],[CXX],[ - # C++ specific cases for pic, static, wl, etc. - if test "$GXX" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - mingw* | os2* | pw32*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - *djgpp*) - # DJGPP does not support shared libraries at all - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - sysv4*MP*) - if test -d /usr/nec; then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - case $host_os in - aix4* | aix5*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - chorus*) - case $cc_basename in - cxch68*) - # Green Hills C++ Compiler - # _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" - ;; - esac - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - esac - ;; - dgux*) - case $cc_basename in - ec++*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - ghcx*) - # Green Hills C++ Compiler - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - *) - ;; - esac - ;; - freebsd* | kfreebsd*-gnu | dragonfly*) - # FreeBSD uses GNU C++ - ;; - hpux9* | hpux10* | hpux11*) - case $cc_basename in - CC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" - if test "$host_cpu" != ia64; then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - fi - ;; - aCC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - ;; - *) - ;; - esac - ;; - irix5* | irix6* | nonstopux*) - case $cc_basename in - CC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - # CC pic flag -KPIC is the default. - ;; - *) - ;; - esac - ;; - linux*) - case $cc_basename in - KCC*) - # KAI C++ Compiler - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - icpc* | ecpc*) - # Intel C++ - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgCC*) - # Portland Group C++ compiler. - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - cxx*) - # Compaq C++ - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - *) - ;; - esac - ;; - lynxos*) - ;; - m88k*) - ;; - mvs*) - case $cc_basename in - cxx*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' - ;; - *) - ;; - esac - ;; - netbsd*) - ;; - osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - ;; - RCC*) - # Rational C++ 2.4.1 - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - cxx*) - # Digital/Compaq C++ - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - *) - ;; - esac - ;; - psos*) - ;; - sco*) - case $cc_basename in - CC*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - *) - ;; - esac - ;; - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - ;; - *) - ;; - esac - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - lcc*) - # Lucid - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - *) - ;; - esac - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - *) - ;; - esac - ;; - unixware*) - ;; - vxworks*) - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -], -[ - if test "$GCC" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - - beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - - mingw* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - enable_shared=no - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - esac - ;; - - mingw* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' - ;; - - hpux9* | hpux10* | hpux11*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC (with -KPIC) is the default. - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - newsos6) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - linux*) - case $cc_basename in - icc* | ecc*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - ccc*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All Alpha code is PIC. - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - esac - ;; - - osf3* | osf4* | osf5*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All OSF/1 code is PIC. - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - sco3.2v5*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kpic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-dn' - ;; - - solaris*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; - *) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; - esac - ;; - - sunos4*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec ;then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - - unicos*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - - uts4*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - *) - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -]) -AC_MSG_RESULT([$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)]) - -# -# Check to make sure the PIC flag actually works. -# -if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then - AC_LIBTOOL_COMPILER_OPTION([if $compiler PIC flag $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) works], - _LT_AC_TAGVAR(lt_prog_compiler_pic_works, $1), - [$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])], [], - [case $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) in - "" | " "*) ;; - *) _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)" ;; - esac], - [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) -fi -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])" - ;; -esac -]) - - -# AC_LIBTOOL_PROG_LD_SHLIBS([TAGNAME]) -# ------------------------------------ -# See if the linker supports building shared libraries. -AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS], -[AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) -ifelse([$1],[CXX],[ - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - case $host_os in - aix4* | aix5*) - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - else - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - fi - ;; - pw32*) - _LT_AC_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" - ;; - cygwin* | mingw*) - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([[^ ]]*\) [[^ ]]*/\1 DATA/;/^I /d;/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' - ;; - *) - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - ;; - esac -],[ - runpath_var= - _LT_AC_TAGVAR(allow_undefined_flag, $1)= - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no - _LT_AC_TAGVAR(archive_cmds, $1)= - _LT_AC_TAGVAR(archive_expsym_cmds, $1)= - _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)= - _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1)= - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - _LT_AC_TAGVAR(thread_safe_flag_spec, $1)= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_minus_L, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(link_all_deplibs, $1)=unknown - _LT_AC_TAGVAR(hardcode_automatic, $1)=no - _LT_AC_TAGVAR(module_cmds, $1)= - _LT_AC_TAGVAR(module_expsym_cmds, $1)= - _LT_AC_TAGVAR(always_export_symbols, $1)=no - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - _LT_AC_TAGVAR(include_expsyms, $1)= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - _LT_AC_TAGVAR(exclude_expsyms, $1)="_GLOBAL_OFFSET_TABLE_" - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - extract_expsyms_cmds= - # Just being paranoid about ensuring that cc_basename is set. - _LT_CC_BASENAME([$compiler]) - case $host_os in - cygwin* | mingw* | pw32*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - openbsd*) - with_gnu_ld=no - ;; - esac - - _LT_AC_TAGVAR(ld_shlibs, $1)=yes - if test "$with_gnu_ld" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' - - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - fi - supports_anon_versioning=no - case `$LD -v 2>/dev/null` in - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - - # See if GNU ld supports shared libraries. - case $host_os in - aix3* | aix4* | aix5*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - _LT_AC_TAGVAR(ld_shlibs, $1)=no - cat <&2 - -*** Warning: the GNU linker, at least up to release 2.9.1, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to modify your PATH -*** so that a non-GNU linker is found, and then restart. - -EOF - fi - ;; - - amigaos*) - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - - # Samuel A. Falvo II reports - # that the semantics of dynamic libraries on AmigaOS, at least up - # to version 4, is to share data among multiple programs linked - # with the same dynamic library. Since this doesn't match the - # behavior of shared libraries on other platforms, we can't use - # them. - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(always_export_symbols, $1)=no - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' - - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - linux*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - tmp_addflag= - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - esac - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - - if test $supports_anon_versioning = yes; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - $echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - - solaris* | sysv5*) - if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then - _LT_AC_TAGVAR(ld_shlibs, $1)=no - cat <&2 - -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -EOF - elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - sunos4*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - - if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no; then - runpath_var= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - if test "$GCC" = yes && test -z "$link_static_flag"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported - fi - ;; - - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - else - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_AC_TAGVAR(archive_cmds, $1)='' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - if test "$GCC" = yes; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - else - # We have old collect2 - _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= - fi - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' - _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' - # -bexpall does not export symbols beginning with underscore (_) - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - # Exported symbols can be pulled into shared objects from archives - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)=' ' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - # This is similar to how AIX traditionally builds its shared libraries. - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - - amigaos*) - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - # see comment about different semantics on the GNU ld section - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - - bsdi[[45]]*) - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic - ;; - - cygwin* | mingw* | pw32*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='true' - # FIXME: Should let the user specify the lib program. - _LT_AC_TAGVAR(old_archive_cmds, $1)='lib /OUT:$oldlib$oldobjs$old_deplibs' - _LT_AC_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; - - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_automatic, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - if test "$GCC" = yes ; then - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - fi - ;; - - dgux*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - freebsd1*) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | kfreebsd*-gnu | dragonfly*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - hpux9*) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - ;; - - hpux10* | hpux11*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*|ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*|ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - ;; - esac - fi - if test "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - ia64*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - ;; - *) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - fi - ;; - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - newsos6) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - openbsd*) - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - else - case $host_os in - openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - ;; - esac - fi - ;; - - os2*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(archive_cmds, $1)='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; - - osf3*) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - else - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ - $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' - - # Both c and cxx compiler support -rpath directly - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - fi - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - - sco3.2v5*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ;; - - solaris*) - _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' - if test "$GCC" = yes; then - wlarc='${wl}' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' - else - wlarc='' - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The compiler driver will combine linker options so we - # cannot just pass the convience library names through - # without $wl, iff we do not link with $LD. - # Luckily, gcc supports the same syntax we need for Sun Studio. - # Supported since Solaris 2.6 (maybe 2.5.1?) - case $wlarc in - '') - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; - *) - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; - esac ;; - esac - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - ;; - - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv4) - case $host_vendor in - sni) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' - _LT_AC_TAGVAR(hardcode_direct, $1)=no - ;; - motorola) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv4.3*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - _LT_AC_TAGVAR(ld_shlibs, $1)=yes - fi - ;; - - sysv4.2uw2*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - hardcode_runpath_var=yes - runpath_var=LD_RUN_PATH - ;; - - sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[[78]]* | unixware7*) - _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z ${wl}text' - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - runpath_var='LD_RUN_PATH' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv5*) - _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' - # $CC -shared without GNU ld will not create a library from C++ - # object files and a static libstdc++, better avoid it by now - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - ;; - - uts4*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - fi -]) -AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) -test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi - -# -# Do we need to explicitly link libc? -# -case "x$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)" in -x|xyes) - # Assume -lc should be added - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $_LT_AC_TAGVAR(archive_cmds, $1) in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - AC_MSG_CHECKING([whether -lc should be explicitly linked in]) - $rm conftest* - printf "$lt_simple_compile_test_code" > conftest.$ac_ext - - if AC_TRY_EVAL(ac_compile) 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$_LT_AC_TAGVAR(allow_undefined_flag, $1) - _LT_AC_TAGVAR(allow_undefined_flag, $1)= - if AC_TRY_EVAL(_LT_AC_TAGVAR(archive_cmds, $1) 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) - then - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - else - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - fi - _LT_AC_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $rm conftest* - AC_MSG_RESULT([$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)]) - ;; - esac - fi - ;; -esac -])# AC_LIBTOOL_PROG_LD_SHLIBS - - -# _LT_AC_FILE_LTDLL_C -# ------------------- -# Be careful that the start marker always follows a newline. -AC_DEFUN([_LT_AC_FILE_LTDLL_C], [ -# /* ltdll.c starts here */ -# #define WIN32_LEAN_AND_MEAN -# #include -# #undef WIN32_LEAN_AND_MEAN -# #include -# -# #ifndef __CYGWIN__ -# # ifdef __CYGWIN32__ -# # define __CYGWIN__ __CYGWIN32__ -# # endif -# #endif -# -# #ifdef __cplusplus -# extern "C" { -# #endif -# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved); -# #ifdef __cplusplus -# } -# #endif -# -# #ifdef __CYGWIN__ -# #include -# DECLARE_CYGWIN_DLL( DllMain ); -# #endif -# HINSTANCE __hDllInstance_base; -# -# BOOL APIENTRY -# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved) -# { -# __hDllInstance_base = hInst; -# return TRUE; -# } -# /* ltdll.c ends here */ -])# _LT_AC_FILE_LTDLL_C - - -# _LT_AC_TAGVAR(VARNAME, [TAGNAME]) -# --------------------------------- -AC_DEFUN([_LT_AC_TAGVAR], [ifelse([$2], [], [$1], [$1_$2])]) - - -# old names -AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL]) -AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) -AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) -AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) -AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) -AC_DEFUN([AM_PROG_LD], [AC_PROG_LD]) -AC_DEFUN([AM_PROG_NM], [AC_PROG_NM]) - -# This is just to silence aclocal about the macro not being used -ifelse([AC_DISABLE_FAST_INSTALL]) - -AC_DEFUN([LT_AC_PROG_GCJ], -[AC_CHECK_TOOL(GCJ, gcj, no) - test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" - AC_SUBST(GCJFLAGS) -]) - -AC_DEFUN([LT_AC_PROG_RC], -[AC_CHECK_TOOL(RC, windres, no) -]) - -############################################################ -# NOTE: This macro has been submitted for inclusion into # -# GNU Autoconf as AC_PROG_SED. When it is available in # -# a released version of Autoconf we should remove this # -# macro and use it instead. # -############################################################ -# LT_AC_PROG_SED -# -------------- -# Check for a fully-functional sed program, that truncates -# as few characters as possible. Prefer GNU sed if found. -AC_DEFUN([LT_AC_PROG_SED], -[AC_MSG_CHECKING([for a sed that does not truncate output]) -AC_CACHE_VAL(lt_cv_path_SED, -[# Loop through the user's path and test for sed and gsed. -# Then use that list of sed's as ones to test for truncation. -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for lt_ac_prog in sed gsed; do - for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then - lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" - fi - done - done -done -lt_ac_max=0 -lt_ac_count=0 -# Add /usr/xpg4/bin/sed as it is typically found on Solaris -# along with /bin/sed that truncates output. -for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do - test ! -f $lt_ac_sed && continue - cat /dev/null > conftest.in - lt_ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >conftest.in - # Check for GNU sed and select it if it is found. - if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then - lt_cv_path_SED=$lt_ac_sed - break - fi - while true; do - cat conftest.in conftest.in >conftest.tmp - mv conftest.tmp conftest.in - cp conftest.in conftest.nl - echo >>conftest.nl - $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break - cmp -s conftest.out conftest.nl || break - # 10000 chars as input seems more than enough - test $lt_ac_count -gt 10 && break - lt_ac_count=`expr $lt_ac_count + 1` - if test $lt_ac_count -gt $lt_ac_max; then - lt_ac_max=$lt_ac_count - lt_cv_path_SED=$lt_ac_sed - fi - done -done -]) -SED=$lt_cv_path_SED -AC_MSG_RESULT([$SED]) -]) diff --git a/storage/bdb/dist/aclocal/mutex.ac b/storage/bdb/dist/aclocal/mutex.ac deleted file mode 100644 index 149bda737b2..00000000000 --- a/storage/bdb/dist/aclocal/mutex.ac +++ /dev/null @@ -1,706 +0,0 @@ -# $Id: mutex.ac,v 12.6 2005/11/04 20:19:29 bostic Exp $ - -# POSIX pthreads tests: inter-process safe and intra-process only. -AC_DEFUN(AM_PTHREADS_SHARED, [ -AC_TRY_RUN([ -#include -main() { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || - pthread_mutexattr_init(&mutexattr) || - pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); -}], [db_cv_mutex="$1"],, -AC_TRY_LINK([ -#include ],[ - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) || - pthread_mutexattr_init(&mutexattr) || - pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); -], [db_cv_mutex="$1"]))]) -AC_DEFUN(AM_PTHREADS_PRIVATE, [ -AC_TRY_RUN([ -#include -main() { - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_mutexattr_init(&mutexattr) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); -}], [db_cv_mutex="$1"],, -AC_TRY_LINK([ -#include ],[ - pthread_cond_t cond; - pthread_mutex_t mutex; - pthread_condattr_t condattr; - pthread_mutexattr_t mutexattr; - exit ( - pthread_condattr_init(&condattr) || - pthread_mutexattr_init(&mutexattr) || - pthread_cond_init(&cond, &condattr) || - pthread_mutex_init(&mutex, &mutexattr) || - pthread_mutex_lock(&mutex) || - pthread_mutex_unlock(&mutex) || - pthread_mutex_destroy(&mutex) || - pthread_cond_destroy(&cond) || - pthread_condattr_destroy(&condattr) || - pthread_mutexattr_destroy(&mutexattr)); -], [db_cv_mutex="$1"]))]) - -# Figure out mutexes for this compiler/architecture. -AC_DEFUN(AM_DEFINE_MUTEXES, [ - -# Mutexes we don't test for, but want the #defines to exist for -# other ports. -AH_TEMPLATE(HAVE_MUTEX_VMS, [Define to 1 to use VMS mutexes.]) -AH_TEMPLATE(HAVE_MUTEX_VXWORKS, [Define to 1 to use VxWorks mutexes.]) - -AC_CACHE_CHECK([for mutexes], db_cv_mutex, [ -db_cv_mutex=no - -orig_libs=$LIBS - -# User-specified POSIX or UI mutexes. -# -# There are two different reasons to specify mutexes: First, the application -# is already using one type of mutex and doesn't want to mix-and-match (for -# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the -# applications POSIX pthreads mutexes don't support inter-process locking, -# but the application wants to use them anyway (for example, some Linux and -# *BSD systems). -# -# Test for LWP threads before testing for UI/POSIX threads, we prefer them -# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not -# pwrite64, if they load the C library before the appropriate threads library, -# e.g., tclsh using dlopen to load the DB library. By using LWP threads we -# avoid answering lots of user questions, not to mention the bugs. -# -# Otherwise, test for POSIX threads before UI threads. There are Linux systems -# that support a UI compatibility mode, and applications are more likely to be -# written for POSIX threads than UI threads. -# -# Try and link with a threads library if possible. The problem is the Solaris -# C library has UI/POSIX interface stubs, but they're broken, configuring them -# for inter-process mutexes doesn't return an error, but it doesn't work either. -if test "$db_cv_posixmutexes" = yes; then - db_cv_mutex="posix_only"; -fi -if test "$db_cv_uimutexes" = yes; then - db_cv_mutex="ui_only"; -fi - -# User-specified Win32 mutexes (MinGW build) -if test "$db_cv_mingw" = "yes"; then - db_cv_mutex=win32/gcc -fi - -# LWP threads: _lwp_XXX -if test "$db_cv_mutex" = no; then -AC_TRY_LINK([ -#include ],[ - static lwp_mutex_t mi = SHAREDMUTEX; - static lwp_cond_t ci = SHAREDCV; - lwp_mutex_t mutex = mi; - lwp_cond_t cond = ci; - exit ( - _lwp_mutex_lock(&mutex) || - _lwp_mutex_unlock(&mutex)); -], [db_cv_mutex="Solaris/lwp"]) -fi - -# POSIX.1 pthreads: pthread_XXX -# -# If the user specified we use POSIX pthreads mutexes, and we fail to find the -# full interface, try and configure for just intra-process support. -if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then - LIBS="$LIBS -lpthread" - AM_PTHREADS_SHARED("POSIX/pthreads/library") - LIBS="$orig_libs" -fi -if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then - AM_PTHREADS_SHARED("POSIX/pthreads") -fi -if test "$db_cv_mutex" = "posix_only"; then - AM_PTHREADS_PRIVATE("POSIX/pthreads/private") -fi -if test "$db_cv_mutex" = "posix_only"; then - LIBS="$LIBS -lpthread" - AM_PTHREADS_PRIVATE("POSIX/pthreads/library/private") - LIBS="$orig_libs" -fi -if test "$db_cv_mutex" = "posix_only"; then - AC_MSG_ERROR([unable to find POSIX 1003.1 mutex interfaces]) -fi - -# UI threads: thr_XXX -if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then -LIBS="$LIBS -lthread" -AC_TRY_LINK([ -#include -#include ],[ - mutex_t mutex; - cond_t cond; - int type = USYNC_PROCESS; - exit ( - mutex_init(&mutex, type, NULL) || - cond_init(&cond, type, NULL) || - mutex_lock(&mutex) || - mutex_unlock(&mutex)); -], [db_cv_mutex="UI/threads/library"]) -LIBS="$orig_libs" -fi -if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then -AC_TRY_LINK([ -#include -#include ],[ - mutex_t mutex; - cond_t cond; - int type = USYNC_PROCESS; - exit ( - mutex_init(&mutex, type, NULL) || - cond_init(&cond, type, NULL) || - mutex_lock(&mutex) || - mutex_unlock(&mutex)); -], [db_cv_mutex="UI/threads"]) -fi -if test "$db_cv_mutex" = "ui_only"; then - AC_MSG_ERROR([unable to find UI mutex interfaces]) -fi - -# msemaphore: HPPA only -# Try HPPA before general msem test, it needs special alignment. -if test "$db_cv_mutex" = no; then -AC_TRY_LINK([ -#include ],[ -#if defined(__hppa) - typedef msemaphore tsl_t; - msemaphore x; - msem_init(&x, 0); - msem_lock(&x, 0); - msem_unlock(&x, 0); - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="HP/msem_init"]) -fi - -# msemaphore: AIX, OSF/1 -if test "$db_cv_mutex" = no; then -AC_TRY_LINK([ -#include -#include ],[ - typedef msemaphore tsl_t; - msemaphore x; - msem_init(&x, 0); - msem_lock(&x, 0); - msem_unlock(&x, 0); - exit(0); -], [db_cv_mutex="UNIX/msem_init"]) -fi - -# ReliantUNIX -if test "$db_cv_mutex" = no; then -LIBS="$LIBS -lmproc" -AC_TRY_LINK([ -#include ],[ - typedef spinlock_t tsl_t; - spinlock_t x; - initspin(&x, 1); - cspinlock(&x); - spinunlock(&x); -], [db_cv_mutex="ReliantUNIX/initspin"]) -LIBS="$orig_libs" -fi - -# SCO: UnixWare has threads in libthread, but OpenServer doesn't. -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__USLC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="SCO/x86/cc-assembly"]) -fi - -# abilock_t: SGI -if test "$db_cv_mutex" = no; then -AC_TRY_LINK([ -#include ],[ - typedef abilock_t tsl_t; - abilock_t x; - init_lock(&x); - acquire_lock(&x); - release_lock(&x); -], [db_cv_mutex="SGI/init_lock"]) -fi - -# sema_t: Solaris -# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever -# turn this test on, unless we find some other platform that uses the old -# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.) -if test "$db_cv_mutex" = DOESNT_WORK; then -AC_TRY_LINK([ -#include ],[ - typedef sema_t tsl_t; - sema_t x; - sema_init(&x, 1, USYNC_PROCESS, NULL); - sema_wait(&x); - sema_post(&x); -], [db_cv_mutex="UNIX/sema_init"]) -fi - -# _lock_try/_lock_clear: Solaris -# On Solaris systems without Pthread or UI mutex interfaces, DB uses the -# undocumented _lock_try _lock_clear function calls instead of either the -# sema_trywait(3T) or sema_wait(3T) function calls. This is because of -# problems in those interfaces in some releases of the Solaris C library. -if test "$db_cv_mutex" = no; then -AC_TRY_LINK([ -#include ],[ - typedef lock_t tsl_t; - lock_t x; - _lock_try(&x); - _lock_clear(&x); -], [db_cv_mutex="Solaris/_lock_try"]) -fi - -# _check_lock/_clear_lock: AIX -if test "$db_cv_mutex" = no; then -AC_TRY_LINK([ -#include ],[ - int x; - _check_lock(&x,0,1); - _clear_lock(&x,0); -], [db_cv_mutex="AIX/_check_lock"]) -fi - -# _spin_lock_try/_spin_unlock: Apple/Darwin -if test "$db_cv_mutex" = no; then -AC_TRY_LINK(,[ - int x; - _spin_lock_try(&x); - _spin_unlock(&x); -], [db_cv_mutex="Darwin/_spin_lock_try"]) -fi - -# Tru64/cc -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__alpha) && defined(__DECC) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="Tru64/cc-assembly"]) -fi - -# Alpha/gcc -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__alpha) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="ALPHA/gcc-assembly"]) -fi - -# ARM/gcc: Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__arm__) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="ARM/gcc-assembly"]) -fi - -# MIPS/gcc: Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(__mips) || defined(__mips__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="MIPS/gcc-assembly"]) -fi - -# MIPS/gcc: Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(__mips) || defined(__mips__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="MIPS/gcc-assembly"]) -fi - -# PaRisc/gcc: HP/UX -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="HPPA/gcc-assembly"]) -fi - -# PPC/gcc: -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="PPC/gcc-assembly"]) -fi - -# Sparc/gcc: SunOS, Solaris -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__sparc__) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="Sparc/gcc-assembly"]) -fi - -# 68K/gcc: SunOS -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="68K/gcc-assembly"]) -fi - -# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(i386) || defined(__i386__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="x86/gcc-assembly"]) -fi - -# x86_64/gcc: FreeBSD, NetBSD, BSD/OS, Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if (defined(x86_64) || defined(__x86_64__)) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="x86_64/gcc-assembly"]) -fi - -# S390/cc: IBM OS/390 Unix -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__MVS__) && defined(__IBMC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="S390/cc-assembly"]) -fi - -# S390/gcc: Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__s390__) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="S390/gcc-assembly"]) -fi - -# ia64/gcc: Linux -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(__ia64) && defined(__GNUC__) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="ia64/gcc-assembly"]) -fi - -# uts/cc: UTS -if test "$db_cv_mutex" = no; then -AC_TRY_COMPILE(,[ -#if defined(_UTS) - exit(0); -#else - FAIL TO COMPILE/LINK -#endif -], [db_cv_mutex="UTS/cc-assembly"]) -fi - -# default to UNIX fcntl system call mutexes. -if test "$db_cv_mutex" = no; then - db_cv_mutex="UNIX/fcntl" -fi -]) - -AC_SUBST(thread_h_decl) -AC_SUBST(db_threadid_t_decl) -db_threadid_t_decl=notset - -case "$db_cv_mutex" in -68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_68K_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and 68K assembly language mutexes.]);; -AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_AIX_CHECK_LOCK) - AH_TEMPLATE(HAVE_MUTEX_AIX_CHECK_LOCK, - [Define to 1 to use the AIX _check_lock mutexes.]);; -Darwin/_spin_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY) - AH_TEMPLATE(HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY, - [Define to 1 to use the Apple/Darwin _spin_lock_try mutexes.]);; -ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and Alpha assembly language mutexes.]);; -ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_ARM_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_ARM_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and ARM assembly language mutexes.]);; -HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_HPPA_MSEM_INIT) - AH_TEMPLATE(HAVE_MUTEX_HPPA_MSEM_INIT, - [Define to 1 to use the msem_XXX mutexes on HP-UX.]);; -HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.]);; -ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_IA64_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_IA64_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and IA64 assembly language mutexes.]);; -POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef pthread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_PTHREADS) - AH_TEMPLATE(HAVE_MUTEX_PTHREADS, - [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);; -POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef pthread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_PTHREADS) - AH_TEMPLATE(HAVE_MUTEX_PTHREADS, - [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]) - AC_DEFINE(HAVE_MUTEX_THREAD_ONLY) - AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY, - [Define to 1 to configure mutexes intra-process only.]);; -POSIX/pthreads/library) LIBS="$LIBS -lpthread" - LIBSO_LIBS="$LIBSO_LIBS -lpthread" - ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef pthread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_PTHREADS) - AH_TEMPLATE(HAVE_MUTEX_PTHREADS, - [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);; -POSIX/pthreads/library/private) - LIBS="$LIBS -lpthread" - LIBSO_LIBS="$LIBSO_LIBS -lpthread" - ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef pthread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_PTHREADS) - AH_TEMPLATE(HAVE_MUTEX_PTHREADS, - [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]) - AC_DEFINE(HAVE_MUTEX_THREAD_ONLY) - AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY, - [Define to 1 to configure mutexes intra-process only.]);; -PPC/gcc-assembly) - ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_PPC_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_PPC_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and PowerPC assembly language mutexes.]);; -ReliantUNIX/initspin) LIBS="$LIBS -lmproc" - ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_RELIANTUNIX_INITSPIN) - AH_TEMPLATE(HAVE_MUTEX_RELIANTUNIX_INITSPIN, - [Define to 1 to use Reliant UNIX initspin mutexes.]);; -S390/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_S390_CC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_S390_CC_ASSEMBLY, - [Define to 1 to use the IBM C compiler and S/390 assembly language mutexes.]);; -S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_S390_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_S390_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and S/390 assembly language mutexes.]);; -SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY, - [Define to 1 to use the SCO compiler and x86 assembly language mutexes.]);; -SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_SGI_INIT_LOCK) - AH_TEMPLATE(HAVE_MUTEX_SGI_INIT_LOCK, - [Define to 1 to use the SGI XXX_lock mutexes.]);; -Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_SOLARIS_LOCK_TRY) - AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LOCK_TRY, - [Define to 1 to use the Solaris _lock_XXX mutexes.]);; -Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef pthread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP) - AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LWP, - [Define to 1 to use the Solaris lwp threads mutexes.]);; -Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and Sparc assembly language mutexes.]);; -Tru64/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_TRU64_CC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_TRU64_CC_ASSEMBLY, - [Define to 1 to use the CC compiler and Tru64 assembly language mutexes.]);; -UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef thread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_UI_THREADS) - AH_TEMPLATE(HAVE_MUTEX_UI_THREADS, - [Define to 1 to use the UNIX International mutexes.]);; -UI/threads/library) LIBS="$LIBS -lthread" - LIBSO_LIBS="$LIBSO_LIBS -lthread" - ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS" - thread_h_decl="#include " - db_threadid_t_decl="typedef thread_t db_threadid_t;" - AC_DEFINE(HAVE_MUTEX_UI_THREADS) - AH_TEMPLATE(HAVE_MUTEX_UI_THREADS, - [Define to 1 to use the UNIX International mutexes.]);; -UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_MSEM_INIT) - AH_TEMPLATE(HAVE_MUTEX_MSEM_INIT, - [Define to 1 to use the msem_XXX mutexes on systems other than HP-UX.]);; -UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_SEMA_INIT) - AH_TEMPLATE(HAVE_MUTEX_SEMA_INIT, - [Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes.]);; -UTS/cc-assembly) ADDITIONAL_OBJS="uts4.cc${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_UTS_CC_ASSEMBLY, - [Define to 1 to use the UTS compiler and assembly language mutexes.]);; -win32) ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_WIN32) - AH_TEMPLATE(HAVE_MUTEX_WIN32, [Define to 1 to use the MSVC compiler and Windows mutexes.]);; -win32/gcc) ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_WIN32_GCC) - AH_TEMPLATE(HAVE_MUTEX_WIN32_GCC, [Define to 1 to use the GCC compiler and Windows mutexes.]);; -MIPS/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_MIPS_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_MIPS_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and MIPS assembly language mutexes.]);; -x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_X86_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and x86 assembly language mutexes.]);; -x86_64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_X86_64_GCC_ASSEMBLY) - AH_TEMPLATE(HAVE_MUTEX_X86_64_GCC_ASSEMBLY, - [Define to 1 to use the GCC compiler and amd64 assembly language mutexes.]);; -UNIX/fcntl) AC_MSG_WARN( - [NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE.]) - ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS" - AC_DEFINE(HAVE_MUTEX_FCNTL) - AH_TEMPLATE(HAVE_MUTEX_FCNTL, - [Define to 1 to use the UNIX fcntl system call mutexes.]);; -*) AC_MSG_ERROR([Unknown mutex interface: $db_cv_mutex]);; -esac - -# The mutex selection often tells us what kind of thread package we're using. -# We need to know if the thread ID type will fit into an integral type and we -# can compare it for equality and generally treat it like an int, or if it's a -# non-integral type and we have to treat it like a structure or other untyped -# block of bytes. For example, MVS typedef's pthread_t to a structure. -AH_TEMPLATE(HAVE_INTEGRAL_THREAD_TYPE, - [Define to 1 if thread identifier type db_threadid_t is integral.]) -if test "$db_threadid_t_decl" = "notset"; then - db_threadid_t_decl="typedef uintmax_t db_threadid_t;" - AC_DEFINE(HAVE_INTEGRAL_THREAD_TYPE) -else - AC_TRY_COMPILE( - #include - $thread_h_decl, [ - $db_threadid_t_decl - db_threadid_t a; - a = 0; - ], AC_DEFINE(HAVE_INTEGRAL_THREAD_TYPE)) -fi - -# There are 3 classes of mutexes: -# -# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes. -# 2: Mutexes that must be destroyed, but which don't hold permanent system -# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS. -# 3: Mutexes that must be destroyed, even after the process is gone, for -# example, pthread mutexes on QNX and binary semaphores on VxWorks. -# -# DB cannot currently distinguish between #2 and #3 because DB does not know -# if the application is running environment recovery as part of startup and -# does not need to do cleanup, or if the environment is being removed and/or -# recovered in a loop in the application, and so does need to clean up. If -# we get it wrong, we're going to call the mutex destroy routine on a random -# piece of memory, which usually works, but just might drop core. For now, -# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we -# have a better solution or reason to solve this in a general way -- so far, -# the places we've needed to handle this are few. -AH_TEMPLATE(HAVE_MUTEX_SYSTEM_RESOURCES, - [Define to 1 if mutexes hold system resources.]) - -case "$host_os$db_cv_mutex" in -*qnx*POSIX/pthread*|openedition*POSIX/pthread*) - AC_DEFINE(HAVE_MUTEX_SYSTEM_RESOURCES);; -esac]) diff --git a/storage/bdb/dist/aclocal/options.ac b/storage/bdb/dist/aclocal/options.ac deleted file mode 100644 index 2697c030b86..00000000000 --- a/storage/bdb/dist/aclocal/options.ac +++ /dev/null @@ -1,292 +0,0 @@ -# $Id: options.ac,v 12.2 2005/10/12 14:45:42 bostic Exp $ - -# Process user-specified options. -AC_DEFUN(AM_OPTIONS_SET, [ - -# --enable-bigfile was the configuration option that Berkeley DB used before -# autoconf 2.50 was released (which had --enable-largefile integrated in). -AC_ARG_ENABLE(bigfile, - [AC_HELP_STRING([--disable-bigfile], - [Obsolete; use --disable-largefile instead.])], - [AC_MSG_ERROR( - [--enable-bigfile no longer supported, use --enable-largefile])]) - -AC_MSG_CHECKING(if --disable-cryptography option specified) -AC_ARG_ENABLE(cryptography, - AC_HELP_STRING([--disable-cryptography], - [Do not build database cryptography support.]),, enableval="yes") -db_cv_build_cryptography="$enableval" -case "$enableval" in - no) AC_MSG_RESULT(yes);; -yes) AC_MSG_RESULT(no);; -esac - -AC_MSG_CHECKING(if --disable-hash option specified) -AC_ARG_ENABLE(hash, - AC_HELP_STRING([--disable-hash], - [Do not build Hash access method.]),, enableval="yes") -db_cv_build_hash="$enableval" -case "$enableval" in - no) AC_MSG_RESULT(yes);; -yes) AC_MSG_RESULT(no);; -esac - -AC_MSG_CHECKING(if --disable-queue option specified) -AC_ARG_ENABLE(queue, - AC_HELP_STRING([--disable-queue], - [Do not build Queue access method.]),, enableval="yes") -db_cv_build_queue="$enableval" -case "$enableval" in - no) AC_MSG_RESULT(yes);; -yes) AC_MSG_RESULT(no);; -esac - -AC_MSG_CHECKING(if --disable-replication option specified) -AC_ARG_ENABLE(replication, - AC_HELP_STRING([--disable-replication], - [Do not build database replication support.]),, enableval="yes") -db_cv_build_replication="$enableval" -case "$enableval" in - no) AC_MSG_RESULT(yes);; -yes) AC_MSG_RESULT(no);; -esac - -AC_MSG_CHECKING(if --disable-statistics option specified) -AC_ARG_ENABLE(statistics, - AC_HELP_STRING([--disable-statistics], - [Do not build statistics support.]),, enableval="yes") -db_cv_build_statistics="$enableval" -case "$enableval" in - no) AC_MSG_RESULT(yes);; -yes) AC_MSG_RESULT(no);; -esac - -AC_MSG_CHECKING(if --disable-verify option specified) -AC_ARG_ENABLE(verify, - AC_HELP_STRING([--disable-verify], - [Do not build database verification support.]),, enableval="yes") -db_cv_build_verify="$enableval" -case "$enableval" in - no) AC_MSG_RESULT(yes);; -yes) AC_MSG_RESULT(no);; -esac - -AC_MSG_CHECKING(if --enable-compat185 option specified) -AC_ARG_ENABLE(compat185, - [AC_HELP_STRING([--enable-compat185], - [Build DB 1.85 compatibility API.])], - [db_cv_compat185="$enable_compat185"], [db_cv_compat185="no"]) -AC_MSG_RESULT($db_cv_compat185) - -AC_MSG_CHECKING(if --enable-cxx option specified) -AC_ARG_ENABLE(cxx, - [AC_HELP_STRING([--enable-cxx], - [Build C++ API.])], - [db_cv_cxx="$enable_cxx"], [db_cv_cxx="no"]) -AC_MSG_RESULT($db_cv_cxx) - -AC_MSG_CHECKING(if --enable-debug option specified) -AC_ARG_ENABLE(debug, - [AC_HELP_STRING([--enable-debug], - [Build a debugging version.])], - [db_cv_debug="$enable_debug"], [db_cv_debug="no"]) -AC_MSG_RESULT($db_cv_debug) - -AC_MSG_CHECKING(if --enable-debug_rop option specified) -AC_ARG_ENABLE(debug_rop, - [AC_HELP_STRING([--enable-debug_rop], - [Build a version that logs read operations.])], - [db_cv_debug_rop="$enable_debug_rop"], [db_cv_debug_rop="no"]) -AC_MSG_RESULT($db_cv_debug_rop) - -AC_MSG_CHECKING(if --enable-debug_wop option specified) -AC_ARG_ENABLE(debug_wop, - [AC_HELP_STRING([--enable-debug_wop], - [Build a version that logs write operations.])], - [db_cv_debug_wop="$enable_debug_wop"], [db_cv_debug_wop="no"]) -AC_MSG_RESULT($db_cv_debug_wop) - -AC_MSG_CHECKING(if --enable-diagnostic option specified) -AC_ARG_ENABLE(diagnostic, - [AC_HELP_STRING([--enable-diagnostic], - [Build a version with run-time diagnostics.])], - [db_cv_diagnostic="$enable_diagnostic"], [db_cv_diagnostic="no"]) -if test "$db_cv_diagnostic" = "yes"; then - AC_MSG_RESULT($db_cv_diagnostic) -fi -if test "$db_cv_diagnostic" = "no" -a "$db_cv_debug_rop" = "yes"; then - db_cv_diagnostic="yes" - AC_MSG_RESULT([by --enable-debug_rop]) -fi -if test "$db_cv_diagnostic" = "no" -a "$db_cv_debug_wop" = "yes"; then - db_cv_diagnostic="yes" - AC_MSG_RESULT([by --enable-debug_wop]) -fi -if test "$db_cv_diagnostic" = "no"; then - AC_MSG_RESULT($db_cv_diagnostic) -fi - -AC_MSG_CHECKING(if --enable-dump185 option specified) -AC_ARG_ENABLE(dump185, - [AC_HELP_STRING([--enable-dump185], - [Build db_dump185(1) to dump 1.85 databases.])], - [db_cv_dump185="$enable_dump185"], [db_cv_dump185="no"]) -AC_MSG_RESULT($db_cv_dump185) - -AC_MSG_CHECKING(if --enable-java option specified) -AC_ARG_ENABLE(java, - [AC_HELP_STRING([--enable-java], - [Build Java API.])], - [db_cv_java="$enable_java"], [db_cv_java="no"]) -AC_MSG_RESULT($db_cv_java) - -AC_MSG_CHECKING(if --enable-mingw option specified) -AC_ARG_ENABLE(mingw, - [AC_HELP_STRING([--enable-mingw], - [Build Berkeley DB for MinGW.])], - [db_cv_mingw="$enable_mingw"], [db_cv_mingw="no"]) -AC_MSG_RESULT($db_cv_mingw) - -AC_MSG_CHECKING(if --enable-o_direct option specified) -AC_ARG_ENABLE(o_direct, - [AC_HELP_STRING([--enable-o_direct], - [Enable the O_DIRECT flag for direct I/O.])], - [db_cv_o_direct="$enable_o_direct"], [db_cv_o_direct="no"]) -AC_MSG_RESULT($db_cv_o_direct) - -AC_MSG_CHECKING(if --enable-posixmutexes option specified) -AC_ARG_ENABLE(posixmutexes, - [AC_HELP_STRING([--enable-posixmutexes], - [Force use of POSIX standard mutexes.])], - [db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"]) -AC_MSG_RESULT($db_cv_posixmutexes) - -AC_MSG_CHECKING(if --enable-pthread_self option specified) -AC_ARG_ENABLE(pthread_self, - [AC_HELP_STRING([--enable-pthread_self], - [Force use of pthread_self to identify threads.])], - [db_cv_pthread_self="$enable_pthread_self"], [db_cv_pthread_self="no"]) -AC_MSG_RESULT($db_cv_pthread_self) - -AC_MSG_CHECKING(if --enable-rpc option specified) -AC_ARG_ENABLE(rpc, - [AC_HELP_STRING([--enable-rpc], - [Build RPC client/server.])], - [db_cv_rpc="$enable_rpc"], [db_cv_rpc="no"]) -AC_MSG_RESULT($db_cv_rpc) - -AC_MSG_CHECKING(if --enable-smallbuild option specified) -AC_ARG_ENABLE(smallbuild, - [AC_HELP_STRING([--enable-smallbuild], - [Build small footprint version of the library.])], - [db_cv_smallbuild="$enable_smallbuild"], [db_cv_smallbuild="no"]) -if test "$db_cv_smallbuild" = "yes"; then - db_cv_build_cryptography="no" - db_cv_build_hash="no" - db_cv_build_queue="no" - db_cv_build_replication="no" - db_cv_build_statistics="no" - db_cv_build_verify="no" -fi -AC_MSG_RESULT($db_cv_smallbuild) - -AC_MSG_CHECKING(if --enable-tcl option specified) -AC_ARG_ENABLE(tcl, - [AC_HELP_STRING([--enable-tcl], - [Build Tcl API.])], - [db_cv_tcl="$enable_tcl"], [db_cv_tcl="no"]) -AC_MSG_RESULT($db_cv_tcl) - -AC_MSG_CHECKING(if --enable-test option specified) -AC_ARG_ENABLE(test, - [AC_HELP_STRING([--enable-test], - [Configure to run the test suite.])], - [db_cv_test="$enable_test"], [db_cv_test="no"]) -AC_MSG_RESULT($db_cv_test) - -AC_MSG_CHECKING(if --enable-uimutexes option specified) -AC_ARG_ENABLE(uimutexes, - [AC_HELP_STRING([--enable-uimutexes], - [Force use of Unix International mutexes.])], - [db_cv_uimutexes="$enable_uimutexes"], [db_cv_uimutexes="no"]) -AC_MSG_RESULT($db_cv_uimutexes) - -AC_MSG_CHECKING(if --enable-umrw option specified) -AC_ARG_ENABLE(umrw, - [AC_HELP_STRING([--enable-umrw], - [Mask harmless uninitialized memory read/writes.])], - [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"]) -AC_MSG_RESULT($db_cv_umrw) - -AC_MSG_CHECKING(if --with-mutex=MUTEX option specified) -AC_ARG_WITH(mutex, - [AC_HELP_STRING([--with-mutex=MUTEX], - [Selection of non-standard mutexes.])], - [with_mutex="$withval"], [with_mutex="no"]) -if test "$with_mutex" = "yes"; then - AC_MSG_ERROR([--with-mutex requires a mutex name argument]) -fi -if test "$with_mutex" != "no"; then - db_cv_mutex="$with_mutex" -fi -AC_MSG_RESULT($with_mutex) - -# --with-mutexalign=ALIGNMENT was the configuration option that Berkeley DB -# used before the DbEnv::mutex_set_align method was added. -AC_ARG_WITH(mutexalign, - [AC_HELP_STRING([--with-mutexalign=ALIGNMENT], - [Obsolete; use DbEnv::mutex_set_align instead.])], - [AC_MSG_ERROR( - [--with-mutexalign no longer supported, use DbEnv::mutex_set_align])]) - -AC_MSG_CHECKING([if --with-tcl=DIR option specified]) -AC_ARG_WITH(tcl, - [AC_HELP_STRING([--with-tcl=DIR], - [Directory location of tclConfig.sh.])], - [with_tclconfig="$withval"], [with_tclconfig="no"]) -AC_MSG_RESULT($with_tclconfig) -if test "$with_tclconfig" != "no"; then - db_cv_tcl="yes" -fi - -AC_MSG_CHECKING([if --with-uniquename=NAME option specified]) -AC_ARG_WITH(uniquename, - [AC_HELP_STRING([--with-uniquename=NAME], - [Build a uniquely named library.])], - [with_uniquename="$withval"], [with_uniquename="no"]) -if test "$with_uniquename" = "no"; then - db_cv_uniquename="no" - DB_VERSION_UNIQUE_NAME="" - AC_MSG_RESULT($with_uniquename) -else - db_cv_uniquename="yes" - if test "$with_uniquename" = "yes"; then - DB_VERSION_UNIQUE_NAME="__EDIT_DB_VERSION_UNIQUE_NAME__" - else - DB_VERSION_UNIQUE_NAME="$with_uniquename" - fi - AC_MSG_RESULT($DB_VERSION_UNIQUE_NAME) -fi - -# Test requires Tcl -if test "$db_cv_test" = "yes"; then - if test "$db_cv_tcl" = "no"; then - AC_MSG_ERROR([--enable-test requires --enable-tcl]) - fi -fi - -# Uniquename excludes C++, Java, RPC. -if test "$db_cv_uniquename" = "yes"; then - if test "$db_cv_rpc" = "yes"; then - AC_MSG_ERROR( - [--with-uniquename is not compatible with --enable-rpc]) - fi - if test "$db_cv_cxx" = "yes"; then - AC_MSG_ERROR( - [--with-uniquename is not compatible with --enable-cxx]) - fi - if test "$db_cv_java" = "yes"; then - AC_MSG_ERROR( - [--with-uniquename is not compatible with --enable-java]) - fi -fi]) diff --git a/storage/bdb/dist/aclocal/programs.ac b/storage/bdb/dist/aclocal/programs.ac deleted file mode 100644 index 76ce0ded66a..00000000000 --- a/storage/bdb/dist/aclocal/programs.ac +++ /dev/null @@ -1,76 +0,0 @@ -# $Id: programs.ac,v 12.1 2005/04/07 06:47:03 mjc Exp $ - -# Check for programs used in building/installation. -AC_DEFUN(AM_PROGRAMS_SET, [ - -AC_CHECK_TOOL(db_cv_path_ar, ar, missing_ar) -if test "$db_cv_path_ar" = missing_ar; then - AC_MSG_ERROR([No ar utility found.]) -fi - -AC_CHECK_TOOL(db_cv_path_chmod, chmod, missing_chmod) -if test "$db_cv_path_chmod" = missing_chmod; then - AC_MSG_ERROR([No chmod utility found.]) -fi - -AC_CHECK_TOOL(db_cv_path_cp, cp, missing_cp) -if test "$db_cv_path_cp" = missing_cp; then - AC_MSG_ERROR([No cp utility found.]) -fi - -AC_CHECK_TOOL(db_cv_path_ln, ln, missing_ln) -if test "$db_cv_path_ln" = missing_ln; then - AC_MSG_ERROR([No ln utility found.]) -fi - -AC_CHECK_TOOL(db_cv_path_mkdir, mkdir, missing_mkdir) -if test "$db_cv_path_mkdir" = missing_mkdir; then - AC_MSG_ERROR([No mkdir utility found.]) -fi - -# We need a complete path for ranlib, because it doesn't exist on some -# architectures because the ar utility packages the library itself. -AC_CHECK_TOOL(path_ranlib, ranlib, missing_ranlib) -AC_PATH_PROG(db_cv_path_ranlib, $path_ranlib, missing_ranlib) - -AC_CHECK_TOOL(db_cv_path_rm, rm, missing_rm) -if test "$db_cv_path_rm" = missing_rm; then - AC_MSG_ERROR([No rm utility found.]) -fi - -if test "$db_cv_rpc" = "yes"; then - AC_CHECK_TOOL(db_cv_path_rpcgen, rpcgen, missing_rpcgen) - if test "$db_cv_path_rpcgen" = missing_rpcgen; then - AC_MSG_ERROR([No rpcgen utility found.]) - fi -fi - -# We need a complete path for sh, because some implementations of make -# get upset if SHELL is set to just the command name. -AC_CHECK_TOOL(path_sh, sh, missing_sh) -AC_PATH_PROG(db_cv_path_sh, $path_sh, missing_sh) -if test "$db_cv_path_sh" = missing_sh; then - AC_MSG_ERROR([No sh utility found.]) -fi - -AC_CHECK_TOOL(db_cv_path_true, true, missing_true) -if test "$db_cv_path_true" = missing_true; then - AC_MSG_ERROR([No true utility found.]) -fi - -# Don't strip the binaries if --enable-debug was specified. -if test "$db_cv_debug" = yes; then - db_cv_path_strip=debug_build_no_strip -else - AC_CHECK_TOOL(path_strip, strip, missing_strip) - AC_PATH_PROG(db_cv_path_strip, $path_strip, missing_strip) -fi - -if test "$db_cv_test" = "yes"; then - AC_CHECK_TOOL(db_cv_path_kill, kill, missing_kill) - if test "$db_cv_path_kill" = missing_kill; then - AC_MSG_ERROR([No kill utility found.]) - fi -fi - -]) diff --git a/storage/bdb/dist/aclocal/rpc.ac b/storage/bdb/dist/aclocal/rpc.ac deleted file mode 100644 index 7e7198bc0fe..00000000000 --- a/storage/bdb/dist/aclocal/rpc.ac +++ /dev/null @@ -1,83 +0,0 @@ -# $Id: rpc.ac,v 12.0 2004/11/17 03:43:37 bostic Exp $ - -# Try and configure RPC support. -AC_DEFUN(AM_RPC_CONFIGURE, [ - AC_DEFINE(HAVE_RPC) - AH_TEMPLATE(HAVE_RPC, [Define to 1 if building RPC client/server.]) - - # We use the target's rpcgen utility because it may be architecture - # specific, for example, 32- or 64-bit specific. - XDR_FILE=$srcdir/../rpc_server/db_server.x - - # Prefer the -C option to rpcgen which generates ANSI C-conformant - # code. - RPCGEN="rpcgen -C" - AC_MSG_CHECKING(["$RPCGEN" build of db_server.h]) - $RPCGEN -h $XDR_FILE > db_server.h 2>/dev/null - if test $? -ne 0; then - AC_MSG_RESULT([no]) - - # Try rpcgen without the -C option. - RPCGEN="rpcgen" - AC_MSG_CHECKING(["$RPCGEN" build of db_server.h]) - $RPCGEN -h $XDR_FILE > db_server.h 2>/dev/null - if test $? -ne 0; then - AC_MSG_RESULT([no]) - AC_MSG_ERROR( - [Unable to build RPC support: $RPCGEN failed.]) - fi - fi - - # Some rpcgen programs generate a set of client stubs called something - # like __db_env_create_4003 and functions on the server to handle the - # request called something like __db_env_create_4003_svc. Others - # expect client and server stubs to both be called __db_env_create_4003. - # - # We have to generate code in whichever format rpcgen expects, and the - # only reliable way to do that is to check what is in the db_server.h - # file we just created. - if grep "env_create_[[0-9]]*_svc" db_server.h >/dev/null 2>&1 ; then - sed 's/__SVCSUFFIX__/_svc/' \ - < $srcdir/../rpc_server/c/gen_db_server.c > gen_db_server.c - else - sed 's/__SVCSUFFIX__//' \ - < $srcdir/../rpc_server/c/gen_db_server.c > gen_db_server.c - fi - - AC_MSG_RESULT([yes]) - - $RPCGEN -l $XDR_FILE | - sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ - -e '1,/^#include/s/^#include/#include "db_config.h"\ -&/' > db_server_clnt.c - - $RPCGEN -s tcp $XDR_FILE | - sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ - -e 's/^main *()/__dbsrv_main()/' \ - -e 's/^main *(.*argc.*argv.*)/__dbsrv_main(int argc, char *argv[])/' \ - -e '/^db_rpc_serverprog/,/^}/{' \ - -e 's/return;//' \ - -e 's/^}/__dbsrv_timeout(0);}/' \ - -e '}' \ - -e '1,/^#include/s/^#include/#include "db_config.h"\ -&/' > db_server_svc.c - - $RPCGEN -c $XDR_FILE | - sed -e 's/^#include.*db_server.h.*/#include "db_server.h"/' \ - -e '1,/^#include/s/^#include/#include "db_config.h"\ -&/' > db_server_xdr.c - - RPC_SERVER_H=db_server.h - RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)" - ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS" - - case "$host_os" in - hpux*) - AC_CHECK_FUNC(svc_run,, - AC_CHECK_LIB(nsl, svc_run, - LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"; - LIBJSO_LIBS="-lnsl $LIBJSO_LIBS"));; - solaris*) - AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));; - esac -]) diff --git a/storage/bdb/dist/aclocal/sequence.ac b/storage/bdb/dist/aclocal/sequence.ac deleted file mode 100644 index 5c491eeb1cf..00000000000 --- a/storage/bdb/dist/aclocal/sequence.ac +++ /dev/null @@ -1,95 +0,0 @@ -# $Id: sequence.ac,v 12.2 2005/11/03 17:46:14 bostic Exp $ - -# Try and configure sequence support. -AC_DEFUN(AM_SEQUENCE_CONFIGURE, [ - AC_MSG_CHECKING([for 64-bit integral type support for sequences]) - - db_cv_build_sequence="yes" - - # Have to have found 64-bit types to support sequences. If we don't - # find the native types, we try and create our own. - if test "$ac_cv_type_int64_t" = "no" -a -z "$int64_decl"; then - db_cv_build_sequence="no" - fi - if test "$ac_cv_type_uint64_t" = "no" -a -z "$u_int64_decl"; then - db_cv_build_sequence="no" - fi - - # Figure out what type is the right size, and set the format. - AC_SUBST(INT64_FMT) - AC_SUBST(UINT64_FMT) - db_cv_seq_type="no" - if test "$db_cv_build_sequence" = "yes" -a\ - "$ac_cv_sizeof_long" -eq "8"; then - db_cv_seq_type="long" - db_cv_seq_fmt='"%ld"' - db_cv_seq_ufmt='"%lu"' - INT64_FMT='#define INT64_FMT "%ld"' - UINT64_FMT='#define UINT64_FMT "%lu"' - else if test "$db_cv_build_sequence" = "yes" -a\ - "$ac_cv_sizeof_long_long" -eq "8"; then - db_cv_seq_type="long long" - db_cv_seq_fmt='"%lld"' - db_cv_seq_ufmt='"%llu"' - INT64_FMT='#define INT64_FMT "%lld"' - UINT64_FMT='#define UINT64_FMT "%llu"' - else - db_cv_build_sequence="no" - fi - fi - - # Test to see if we can declare variables of the appropriate size - # and format them. If we're cross-compiling, all we get is a link - # test, which won't test for the appropriate printf format strings. - if test "$db_cv_build_sequence" = "yes"; then - AC_TRY_RUN([ - main() { - $db_cv_seq_type l; - unsigned $db_cv_seq_type u; - char buf@<:@100@:>@; - - buf@<:@0@:>@ = 'a'; - l = 9223372036854775807LL; - (void)snprintf(buf, sizeof(buf), $db_cv_seq_fmt, l); - if (strcmp(buf, "9223372036854775807")) - return (1); - u = 18446744073709551615ULL; - (void)snprintf(buf, sizeof(buf), $db_cv_seq_ufmt, u); - if (strcmp(buf, "18446744073709551615")) - return (1); - return (0); - }],, [db_cv_build_sequence="no"], - AC_TRY_LINK(,[ - $db_cv_seq_type l; - unsigned $db_cv_seq_type u; - char buf@<:@100@:>@; - - buf@<:@0@:>@ = 'a'; - l = 9223372036854775807LL; - (void)snprintf(buf, sizeof(buf), $db_cv_seq_fmt, l); - if (strcmp(buf, "9223372036854775807")) - return (1); - u = 18446744073709551615ULL; - (void)snprintf(buf, sizeof(buf), $db_cv_seq_ufmt, u); - if (strcmp(buf, "18446744073709551615")) - return (1); - return (0); - ],, [db_cv_build_sequence="no"])) - fi - if test "$db_cv_build_sequence" = "yes"; then - AC_DEFINE(HAVE_SEQUENCE) - AH_TEMPLATE(HAVE_SEQUENCE, - [Define to 1 if building sequence support.]) - - AC_SUBST(db_seq_decl) - db_seq_decl="typedef int64_t db_seq_t;"; - - AC_DEFINE(HAVE_64BIT_TYPES) - AH_TEMPLATE(HAVE_64BIT_TYPES, - [Define to 1 if 64-bit types are available.]) - else - # It still has to compile, but it won't run. - db_seq_decl="typedef int db_seq_t;"; - fi - AC_MSG_RESULT($db_cv_build_sequence) -]) diff --git a/storage/bdb/dist/aclocal/sosuffix.ac b/storage/bdb/dist/aclocal/sosuffix.ac deleted file mode 100644 index bd391e248a0..00000000000 --- a/storage/bdb/dist/aclocal/sosuffix.ac +++ /dev/null @@ -1,76 +0,0 @@ -# $Id: sosuffix.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -# Determine shared object suffixes. -# -# Our method is to use the libtool variable $library_names_spec, -# set by using AC_PROG_LIBTOOL. This variable is a snippet of shell -# defined in terms of $versuffix, $release, $libname and $module -# We want to eval it and grab the suffix used for shared objects. -# By setting $module to yes/no, we obtain the suffixes -# used to create dlloadable, or java loadable modules. -# On many (*nix) systems, these all evaluate to .so, but there -# are some notable exceptions. -# Before calling this macro, $LIBTOOL_PROG must be set to -# the correct method of invoking libtool (e.g. $SHELL ./libtool) - -# This macro is used internally to discover the suffix for the current -# settings of $module. The result is stored in $_SOSUFFIX. -AC_DEFUN(_SOSUFFIX_INTERNAL, [ - versuffix="" - release="" - libname=libfoo - eval _SOSUFFIX=\"$shrext_cmds\" - if test "$_SOSUFFIX" = "" ; then - _SOSUFFIX=".so" - if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - if test "$_SOSUFFIX_MESSAGE" = ""; then - _SOSUFFIX_MESSAGE=yes - AC_MSG_WARN([libtool may not know about this architecture.]) - AC_MSG_WARN([assuming $_SUFFIX suffix for dynamic libraries.]) - fi - fi - fi -]) - -# SOSUFFIX_CONFIG will set the variable SOSUFFIX to be the -# shared library extension used for general linking, not dlopen. -AC_DEFUN(SOSUFFIX_CONFIG, [ - AC_MSG_CHECKING([SOSUFFIX from libtool]) - module=no - _SOSUFFIX_INTERNAL - SOSUFFIX=$_SOSUFFIX - AC_MSG_RESULT($SOSUFFIX) - AC_SUBST(SOSUFFIX) -]) - -# MODSUFFIX_CONFIG will set the variable MODSUFFIX to be the -# shared library extension used for dlopen'ed modules. -# To discover this, we set $module, simulating libtool's -module option. -AC_DEFUN(MODSUFFIX_CONFIG, [ - AC_MSG_CHECKING([MODSUFFIX from libtool]) - module=yes - _SOSUFFIX_INTERNAL - MODSUFFIX=$_SOSUFFIX - AC_MSG_RESULT($MODSUFFIX) - AC_SUBST(MODSUFFIX) -]) - -# JMODSUFFIX_CONFIG will set the variable JMODSUFFIX to be the -# shared library extension used JNI modules opened by Java. -# To discover this, we set $jnimodule, simulating libtool's -shrext option. -########################################################################## -# Robert Boehne: Not much point in this macro any more because apparently -# Darwin is the only OS that wants or needs the .jnilib extension. -########################################################################## -AC_DEFUN(JMODSUFFIX_CONFIG, [ - AC_MSG_CHECKING([JMODSUFFIX from libtool]) - module=yes - _SOSUFFIX_INTERNAL - if test `uname` = "Darwin"; then - JMODSUFFIX=".jnilib" - else - JMODSUFFIX=$_SOSUFFIX - fi - AC_MSG_RESULT($JMODSUFFIX) - AC_SUBST(JMODSUFFIX) -]) - diff --git a/storage/bdb/dist/aclocal/tcl.ac b/storage/bdb/dist/aclocal/tcl.ac deleted file mode 100644 index 360cf62b185..00000000000 --- a/storage/bdb/dist/aclocal/tcl.ac +++ /dev/null @@ -1,135 +0,0 @@ -# $Id: tcl.ac,v 12.2 2005/06/28 20:45:25 gmf Exp $ - -# The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl -# 8.3.0 distribution, with some minor changes. For this reason, license -# terms for the Berkeley DB distribution dist/aclocal/tcl.m4 file are as -# follows (copied from the license.terms file in the Tcl 8.3 distribution): -# -# This software is copyrighted by the Regents of the University of -# California, Sun Microsystems, Inc., Scriptics Corporation, -# and other parties. The following terms apply to all files associated -# with the software unless explicitly disclaimed in individual files. -# -# The authors hereby grant permission to use, copy, modify, distribute, -# and license this software and its documentation for any purpose, provided -# that existing copyright notices are retained in all copies and that this -# notice is included verbatim in any distributions. No written agreement, -# license, or royalty fee is required for any of the authorized uses. -# Modifications to this software may be copyrighted by their authors -# and need not follow the licensing terms described here, provided that -# the new terms are clearly indicated on the first page of each file where -# they apply. -# -# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY -# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES -# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY -# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# -# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, -# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE -# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE -# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR -# MODIFICATIONS. -# -# GOVERNMENT USE: If you are acquiring this software on behalf of the -# U.S. government, the Government shall have only "Restricted Rights" -# in the software and related documentation as defined in the Federal -# Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you -# are acquiring the software on behalf of the Department of Defense, the -# software shall be classified as "Commercial Computer Software" and the -# Government shall have only "Restricted Rights" as defined in Clause -# 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the -# authors grant the U.S. Government and others acting in its behalf -# permission to use and distribute the software in accordance with the -# terms specified in this license. - -AC_DEFUN(SC_PATH_TCLCONFIG, [ - AC_CACHE_VAL(ac_cv_c_tclconfig,[ - - # First check to see if --with-tclconfig was specified. - if test "${with_tclconfig}" != no; then - if test -f "${with_tclconfig}/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` - else - AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) - fi - fi - - # check in a few common install locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in `ls -d /usr/local/lib 2>/dev/null` ; do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i; pwd)` - break - fi - done - fi - - ]) - - if test x"${ac_cv_c_tclconfig}" = x ; then - TCL_BIN_DIR="# no Tcl configs found" - AC_MSG_ERROR(can't find Tcl configuration definitions) - else - TCL_BIN_DIR=${ac_cv_c_tclconfig} - fi -]) - -AC_DEFUN(SC_LOAD_TCLCONFIG, [ - AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh]) - - if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then - AC_MSG_RESULT([loading]) - . $TCL_BIN_DIR/tclConfig.sh - else - AC_MSG_RESULT([file not found]) - fi - - # DB requires at least version 8.4. - if test ${TCL_MAJOR_VERSION} -lt 8 \ - -o ${TCL_MAJOR_VERSION} -eq 8 -a ${TCL_MINOR_VERSION} -lt 4; then - AC_MSG_ERROR([Berkeley DB requires Tcl version 8.4 or better.]) - fi - - # The eval is required to do substitution (for example, the TCL_DBGX - # substitution in the TCL_LIB_FILE variable. - eval "TCL_INCLUDE_SPEC=\"${TCL_INCLUDE_SPEC}\"" - eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" - eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" - eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" - - # - # If the DB Tcl library isn't loaded with the Tcl spec and library - # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at - # load time. [#4843] Furthermore, with Tcl 8.3, the link flags - # given by the Tcl spec are insufficient for our use. [#5779] - # - case "$host_os" in - aix4.[[2-9]].*) - LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG" - LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";; - aix*) - LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";; - esac - AC_SUBST(TCL_BIN_DIR) - AC_SUBST(TCL_INCLUDE_SPEC) - AC_SUBST(TCL_LIB_FILE) - AC_SUBST(TCL_SRC_DIR) - - AC_SUBST(TCL_TCLSH) - TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}" -]) - -# Optional Tcl API. -AC_DEFUN(AM_TCL_LOAD, [ - if test `$LIBTOOL_PROG --config | grep build_libtool_libs | grep no` 2>/dev/null; then - AC_MSG_ERROR([Tcl requires shared libraries]) - fi - - SC_PATH_TCLCONFIG - SC_LOAD_TCLCONFIG - - INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)" -]) diff --git a/storage/bdb/dist/aclocal/types.ac b/storage/bdb/dist/aclocal/types.ac deleted file mode 100644 index f9291386dc3..00000000000 --- a/storage/bdb/dist/aclocal/types.ac +++ /dev/null @@ -1,167 +0,0 @@ -# $Id: types.ac,v 12.3 2005/11/03 17:46:14 bostic Exp $ - -# Check the sizes we know about, and see if any of them match what's needed. -# -# Prefer ints to anything else, because read, write and others historically -# returned an int. -AC_DEFUN(AM_SEARCH_USIZES, [ - case "$3" in - "$ac_cv_sizeof_unsigned_int") - $1="typedef unsigned int $2;";; - "$ac_cv_sizeof_unsigned_char") - $1="typedef unsigned char $2;";; - "$ac_cv_sizeof_unsigned_short") - $1="typedef unsigned short $2;";; - "$ac_cv_sizeof_unsigned_long") - $1="typedef unsigned long $2;";; - "$ac_cv_sizeof_unsigned_long_long") - $1="typedef unsigned long long $2;";; - *) - if test "$4" != "notfatal"; then - AC_MSG_ERROR([No unsigned $3-byte integral type]) - fi;; - esac]) -AC_DEFUN(AM_SEARCH_SSIZES, [ - case "$3" in - "$ac_cv_sizeof_int") - $1="typedef int $2;";; - "$ac_cv_sizeof_char") - $1="typedef char $2;";; - "$ac_cv_sizeof_short") - $1="typedef short $2;";; - "$ac_cv_sizeof_long") - $1="typedef long $2;";; - "$ac_cv_sizeof_long_long") - $1="typedef long long $2;";; - *) - if test "$4" != "notfatal"; then - AC_MSG_ERROR([No signed $3-byte integral type]) - fi;; - esac]) - -# Check for the standard system types. -AC_DEFUN(AM_TYPES, [ - -# db.h includes and , not the other default includes -# autoconf usually includes. For that reason, we specify a set of includes -# for all type checking tests. [#5060] -# -# C99 says types should be in ; include if it exists. -# -# Some systems have types in ; include if it exists. -# -# IBM's OS/390 and z/OS releases have types in not also found -# in ; include if it exists. -db_includes="#include " -AC_SUBST(inttypes_h_decl) -AC_CHECK_HEADER(inttypes.h, [ - db_includes="$db_includes -#include " - inttypes_h_decl="#include "]) -AC_SUBST(stdint_h_decl) -AC_CHECK_HEADER(stdint.h, [ - db_includes="$db_includes -#include " - stdint_h_decl="#include "]) -AC_SUBST(stddef_h_decl) -AC_CHECK_HEADER(stddef.h, [ - db_includes="$db_includes -#include " - stddef_h_decl="#include "]) -AC_SUBST(unistd_h_decl) -AC_CHECK_HEADER(unistd.h, [ - db_includes="$db_includes -#include " - unistd_h_decl="#include "]) -db_includes="$db_includes -#include " - -# We require off_t and size_t, and we don't try to substitute our own -# if we can't find them. -AC_CHECK_TYPE(off_t,, AC_MSG_ERROR([No off_t type.]), $db_includes) -AC_CHECK_TYPE(size_t,, AC_MSG_ERROR([No size_t type.]), $db_includes) - -# We need to know the sizes of various objects on this system. -AC_CHECK_SIZEOF(char,, $db_includes) -AC_CHECK_SIZEOF(unsigned char,, $db_includes) -AC_CHECK_SIZEOF(short,, $db_includes) -AC_CHECK_SIZEOF(unsigned short,, $db_includes) -AC_CHECK_SIZEOF(int,, $db_includes) -AC_CHECK_SIZEOF(unsigned int,, $db_includes) -AC_CHECK_SIZEOF(long,, $db_includes) -AC_CHECK_SIZEOF(unsigned long,, $db_includes) -AC_CHECK_SIZEOF(long long,, $db_includes) -AC_CHECK_SIZEOF(unsigned long long,, $db_includes) -AC_CHECK_SIZEOF(size_t,, $db_includes) -AC_CHECK_SIZEOF(char *,, $db_includes) - -# We look for u_char, u_short, u_int, u_long -- if we can't find them, -# we create our own. -AC_SUBST(u_char_decl) -AC_CHECK_TYPE(u_char,, - [u_char_decl="typedef unsigned char u_char;"], $db_includes) - -AC_SUBST(u_short_decl) -AC_CHECK_TYPE(u_short,, - [u_short_decl="typedef unsigned short u_short;"], $db_includes) - -AC_SUBST(u_int_decl) -AC_CHECK_TYPE(u_int,, - [u_int_decl="typedef unsigned int u_int;"], $db_includes) - -AC_SUBST(u_long_decl) -AC_CHECK_TYPE(u_long,, - [u_long_decl="typedef unsigned long u_long;"], $db_includes) - -# We look for fixed-size variants of u_char, u_short, u_int, u_long as well. -AC_SUBST(u_int8_decl) -AC_CHECK_TYPE(u_int8_t,, - [AM_SEARCH_USIZES(u_int8_decl, u_int8_t, 1)], $db_includes) - -AC_SUBST(u_int16_decl) -AC_CHECK_TYPE(u_int16_t,, - [AM_SEARCH_USIZES(u_int16_decl, u_int16_t, 2)], $db_includes) - -AC_SUBST(int16_decl) -AC_CHECK_TYPE(int16_t,, - [AM_SEARCH_SSIZES(int16_decl, int16_t, 2)], $db_includes) - -AC_SUBST(u_int32_decl) -AC_CHECK_TYPE(u_int32_t,, - [AM_SEARCH_USIZES(u_int32_decl, u_int32_t, 4)], $db_includes) - -AC_SUBST(int32_decl) -AC_CHECK_TYPE(int32_t,, - [AM_SEARCH_SSIZES(int32_decl, int32_t, 4)], $db_includes) - -AC_SUBST(u_int64_decl) -AC_CHECK_TYPE(u_int64_t,, - [AM_SEARCH_USIZES(u_int64_decl, u_int64_t, 8, notfatal)], $db_includes) - -AC_SUBST(int64_decl) -AC_CHECK_TYPE(int64_t,, - [AM_SEARCH_SSIZES(int64_decl, int64_t, 8, notfatal)], $db_includes) - -# Check for ssize_t -- if none exists, find a signed integral type that's -# the same size as a size_t. -AC_SUBST(ssize_t_decl) -AC_CHECK_TYPE(ssize_t,, - [AM_SEARCH_SSIZES(ssize_t_decl, ssize_t, $ac_cv_sizeof_size_t)], - $db_includes) - -# So far, no autoconf'd systems lack pid_t. -AC_SUBST(pid_t_decl) - -# Check for uintmax_t -- if none exists, first the largest unsigned integral -# type available. -AC_SUBST(uintmax_t_decl) -AC_CHECK_TYPE(uintmax_t,, [AC_CHECK_TYPE(unsigned long long, - [uintmax_t_decl="typedef unsigned long long uintmax_t;"], - [uintmax_t_decl="typedef unsigned long uintmax_t;"], $db_includes)]) - -# Check for uintptr_t -- if none exists, find an integral type which is -# the same size as a pointer. -AC_SUBST(uintptr_t_decl) -AC_CHECK_TYPE(uintptr_t,, - [AM_SEARCH_USIZES(uintptr_t_decl, uintptr_t, $ac_cv_sizeof_char_p)]) -]) diff --git a/storage/bdb/dist/aclocal_java/ac_check_class.ac b/storage/bdb/dist/aclocal_java/ac_check_class.ac deleted file mode 100644 index b12e7f02f9a..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_check_class.ac +++ /dev/null @@ -1,107 +0,0 @@ -dnl @synopsis AC_CHECK_CLASS -dnl -dnl AC_CHECK_CLASS tests the existence of a given Java class, either in -dnl a jar or in a '.class' file. -dnl -dnl *Warning*: its success or failure can depend on a proper setting of the -dnl CLASSPATH env. variable. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_check_class.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_CHECK_CLASS],[ -AC_REQUIRE([AC_PROG_JAVA]) -ac_var_name=`echo $1 | sed 's/\./_/g'` -dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is -dnl dynamic I need an extra level of extraction -AC_MSG_CHECKING([for $1 class]) -AC_CACHE_VAL(ac_cv_class_$ac_var_name, [ -if test x$ac_cv_prog_uudecode_base64 = xyes; then -dnl /** -dnl * Test.java: used to test dynamicaly if a class exists. -dnl */ -dnl public class Test -dnl { -dnl -dnl public static void -dnl main( String[] argv ) -dnl { -dnl Class lib; -dnl if (argv.length < 1) -dnl { -dnl System.err.println ("Missing argument"); -dnl System.exit (77); -dnl } -dnl try -dnl { -dnl lib = Class.forName (argv[0]); -dnl } -dnl catch (ClassNotFoundException e) -dnl { -dnl System.exit (1); -dnl } -dnl lib = null; -dnl System.exit (0); -dnl } -dnl -dnl } -cat << \EOF > Test.uue -begin-base64 644 Test.class -yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE -bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51 -bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ -AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt -ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV -ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp -VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM -amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi -AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B -AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA -AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN -uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK -AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA -JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA -JwAAAAIAKA== -==== -EOF - if uudecode$EXEEXT Test.uue; then - : - else - echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC - echo "configure: failed file was:" >&AC_FD_CC - cat Test.uue >&AC_FD_CC - ac_cv_prog_uudecode_base64=no - fi - rm -f Test.uue - if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then - eval "ac_cv_class_$ac_var_name=yes" - else - eval "ac_cv_class_$ac_var_name=no" - fi - rm -f Test.class -else - AC_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"], - [eval "ac_cv_class_$ac_var_name=no"]) -fi -eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`" -eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`" -HAVE_LAST_CLASS=$ac_var_val -if test x$ac_var_val = xyes; then - ifelse([$2], , :, [$2]) -else - ifelse([$3], , :, [$3]) -fi -]) -dnl for some reason the above statment didn't fall though here? -dnl do scripts have variable scoping? -eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`" -AC_MSG_RESULT($ac_var_val) -]) diff --git a/storage/bdb/dist/aclocal_java/ac_check_classpath.ac b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac deleted file mode 100644 index b18d479b3f1..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_check_classpath.ac +++ /dev/null @@ -1,23 +0,0 @@ -dnl @synopsis AC_CHECK_CLASSPATH -dnl -dnl AC_CHECK_CLASSPATH just displays the CLASSPATH, for the edification -dnl of the user. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_check_classpath.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_CHECK_CLASSPATH],[ -if test "x$CLASSPATH" = x; then - echo "You have no CLASSPATH, I hope it is good" -else - echo "You have CLASSPATH $CLASSPATH, hope it is correct" -fi -]) diff --git a/storage/bdb/dist/aclocal_java/ac_check_junit.ac b/storage/bdb/dist/aclocal_java/ac_check_junit.ac deleted file mode 100644 index cc02e327662..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_check_junit.ac +++ /dev/null @@ -1,54 +0,0 @@ -dnl @synopsis AC_CHECK_JUNIT -dnl -dnl AC_CHECK_JUNIT tests the availability of the Junit testing -dnl framework, and set some variables for conditional compilation -dnl of the test suite by automake. -dnl -dnl If available, JUNIT is set to a command launching the text -dnl based user interface of Junit, @JAVA_JUNIT@ is set to $JAVA_JUNIT -dnl and @TESTS_JUNIT@ is set to $TESTS_JUNIT, otherwise they are set -dnl to empty values. -dnl -dnl You can use these variables in your Makefile.am file like this : -dnl -dnl # Some of the following classes are built only if junit is available -dnl JAVA_JUNIT = Class1Test.java Class2Test.java AllJunitTests.java -dnl -dnl noinst_JAVA = Example1.java Example2.java @JAVA_JUNIT@ -dnl -dnl EXTRA_JAVA = $(JAVA_JUNIT) -dnl -dnl TESTS_JUNIT = AllJunitTests -dnl -dnl TESTS = StandaloneTest1 StandaloneTest2 @TESTS_JUNIT@ -dnl -dnl EXTRA_TESTS = $(TESTS_JUNIT) -dnl -dnl AllJunitTests : -dnl echo "#! /bin/sh" > $@ -dnl echo "exec @JUNIT@ my.package.name.AllJunitTests" >> $@ -dnl chmod +x $@ -dnl -dnl @author Luc Maisonobe -dnl @version $Id: ac_check_junit.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_CHECK_JUNIT],[ -AC_CACHE_VAL(ac_cv_prog_JUNIT,[ -AC_CHECK_CLASS(junit.textui.TestRunner) -if test x"`eval 'echo $ac_cv_class_junit_textui_TestRunner'`" != xno ; then - ac_cv_prog_JUNIT='$(CLASSPATH_ENV) $(JAVA) $(JAVAFLAGS) junit.textui.TestRunner' -fi]) -AC_MSG_CHECKING([for junit]) -if test x"`eval 'echo $ac_cv_prog_JUNIT'`" != x ; then - JUNIT="$ac_cv_prog_JUNIT" - JAVA_JUNIT='$(JAVA_JUNIT)' - TESTS_JUNIT='$(TESTS_JUNIT)' -else - JUNIT= - JAVA_JUNIT= - TESTS_JUNIT= -fi -AC_MSG_RESULT($JAVA_JUNIT) -AC_SUBST(JUNIT) -AC_SUBST(JAVA_JUNIT) -AC_SUBST(TESTS_JUNIT)]) diff --git a/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac deleted file mode 100644 index c7c26b87741..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac +++ /dev/null @@ -1,26 +0,0 @@ -dnl @synopsis AC_CHECK_RQRD_CLASS -dnl -dnl AC_CHECK_RQRD_CLASS tests the existence of a given Java class, either in -dnl a jar or in a '.class' file and fails if it doesn't exist. -dnl Its success or failure can depend on a proper setting of the -dnl CLASSPATH env. variable. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_check_rqrd_class.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl - -AC_DEFUN([AC_CHECK_RQRD_CLASS],[ -CLASS=`echo $1|sed 's/\./_/g'` -AC_CHECK_CLASS($1) -if test "$HAVE_LAST_CLASS" = "no"; then - AC_MSG_ERROR([Required class $1 missing, exiting.]) -fi -]) diff --git a/storage/bdb/dist/aclocal_java/ac_java_options.ac b/storage/bdb/dist/aclocal_java/ac_java_options.ac deleted file mode 100644 index e71adfe68b5..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_java_options.ac +++ /dev/null @@ -1,32 +0,0 @@ -dnl @synopsis AC_JAVA_OPTIONS -dnl -dnl AC_JAVA_OPTIONS adds configure command line options used for Java m4 -dnl macros. This Macro is optional. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Devin Weaver -dnl @version $Id: ac_java_options.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_JAVA_OPTIONS],[ -AC_ARG_WITH(java-prefix, - [ --with-java-prefix=PFX prefix where Java runtime is installed (optional)]) -AC_ARG_WITH(javac-flags, - [ --with-javac-flags=FLAGS flags to pass to the Java compiler (optional)]) -AC_ARG_WITH(java-flags, - [ --with-java-flags=FLAGS flags to pass to the Java VM (optional)]) -JAVAPREFIX=$with_java_prefix -JAVACFLAGS=$with_javac_flags -JAVAFLAGS=$with_java_flags -AC_SUBST(JAVAPREFIX)dnl -AC_SUBST(JAVACFLAGS)dnl -AC_SUBST(JAVAFLAGS)dnl -AC_SUBST(JAVA)dnl -AC_SUBST(JAVAC)dnl -]) diff --git a/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac deleted file mode 100644 index 35cdda383c3..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac +++ /dev/null @@ -1,114 +0,0 @@ -dnl @synopsis AC_JNI_INCLUDE_DIR -dnl -dnl AC_JNI_INCLUDE_DIR finds include directories needed -dnl for compiling programs using the JNI interface. -dnl -dnl JNI include directories are usually in the java distribution -dnl This is deduced from the value of JAVAC. When this macro -dnl completes, a list of directories is left in the variable -dnl JNI_INCLUDE_DIRS. -dnl -dnl Example usage follows: -dnl -dnl AC_JNI_INCLUDE_DIR -dnl -dnl for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS -dnl do -dnl CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR" -dnl done -dnl -dnl If you want to force a specific compiler: -dnl -dnl - at the configure.in level, set JAVAC=yourcompiler before calling -dnl AC_JNI_INCLUDE_DIR -dnl -dnl - at the configure level, setenv JAVAC -dnl -dnl Note: This macro can work with the autoconf M4 macros for Java programs. -dnl This particular macro is not part of the original set of macros. -dnl -dnl @author Don Anderson -dnl @version $Id: ac_jni_include_dirs.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN(AC_JNI_INCLUDE_DIR,[ - -JNI_INCLUDE_DIRS="" - -test "x$JAVAC" = x && AC_MSG_ERROR(['$JAVAC' undefined]) -AC_PATH_PROG(_ACJNI_JAVAC, $JAVAC, $JAVAC) -test ! -x "$_ACJNI_JAVAC" && AC_MSG_ERROR([$JAVAC could not be found in path]) -AC_MSG_CHECKING(absolute path of $JAVAC) -case "$_ACJNI_JAVAC" in -/*) AC_MSG_RESULT($_ACJNI_JAVAC);; -*) AC_MSG_ERROR([$_ACJNI_JAVAC is not an absolute path name]);; -esac - -_ACJNI_FOLLOW_SYMLINKS("$_ACJNI_JAVAC") -_JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'` -case "$host_os" in - darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'` - _JINC="$_JTOPDIR/Headers";; - *) _JINC="$_JTOPDIR/include";; -esac - -# If we find jni.h in /usr/include, then it's not a java-only tree, so -# don't add /usr/include or subdirectories to the list of includes. -# An extra -I/usr/include can foul things up with newer gcc's. -# -# If we don't find jni.h, just keep going. Hopefully javac knows where -# to find its include files, even if we can't. -if test -r "$_JINC/jni.h"; then - if test "$_JINC" != "/usr/include"; then - JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC" - fi -else - _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'` - if test -r "$_JTOPDIR/include/jni.h"; then - if test "$_JTOPDIR" != "/usr"; then - JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include" - fi - fi -fi - -# get the likely subdirectories for system specific java includes -if test "$_JTOPDIR" != "/usr"; then - case "$host_os" in - aix*) _JNI_INC_SUBDIRS="aix";; - bsdi*) _JNI_INC_SUBDIRS="bsdos";; - freebsd*) _JNI_INC_SUBDIRS="freebsd";; - hp*) _JNI_INC_SUBDIRS="hp-ux";; - linux*) _JNI_INC_SUBDIRS="linux genunix";; - osf*) _JNI_INC_SUBDIRS="alpha";; - solaris*) _JNI_INC_SUBDIRS="solaris";; - *) _JNI_INC_SUBDIRS="genunix";; - esac -fi - -# add any subdirectories that are present -for _JINCSUBDIR in $_JNI_INC_SUBDIRS -do - if test -d "$_JTOPDIR/include/$_JINCSUBDIR"; then - JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$_JINCSUBDIR" - fi -done -]) - -# _ACJNI_FOLLOW_SYMLINKS -# Follows symbolic links on , -# finally setting variable _ACJNI_FOLLOWED -# -------------------- -AC_DEFUN(_ACJNI_FOLLOW_SYMLINKS,[ -# find the include directory relative to the javac executable -_cur="$1" -while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do - AC_MSG_CHECKING(symlink for $_cur) - _slink=`ls -ld "$_cur" | sed 's/.* -> //'` - case "$_slink" in - /*) _cur="$_slink";; - # 'X' avoids triggering unwanted echo options. - *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$_slink";; - esac - AC_MSG_RESULT($_cur) -done -_ACJNI_FOLLOWED="$_cur" -])# _ACJNI diff --git a/storage/bdb/dist/aclocal_java/ac_prog_jar.ac b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac deleted file mode 100644 index c60a79a859d..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_jar.ac +++ /dev/null @@ -1,36 +0,0 @@ -dnl @synopsis AC_PROG_JAR -dnl -dnl AC_PROG_JAR tests for an existing jar program. It uses the environment -dnl variable JAR then tests in sequence various common jar programs. -dnl -dnl If you want to force a specific compiler: -dnl -dnl - at the configure.in level, set JAR=yourcompiler before calling -dnl AC_PROG_JAR -dnl -dnl - at the configure level, setenv JAR -dnl -dnl You can use the JAR variable in your Makefile.in, with @JAR@. -dnl -dnl Note: This macro depends on the autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download that whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl -dnl The general documentation of those macros, as well as the sample -dnl configure.in, is included in the AC_PROG_JAVA macro. -dnl -dnl @author Egon Willighagen -dnl @version $Id: ac_prog_jar.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_PROG_JAR],[ -AC_REQUIRE([AC_EXEEXT])dnl -if test "x$JAVAPREFIX" = x; then - test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar$EXEEXT) -else - test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar, $JAVAPREFIX) -fi -test "x$JAR" = x && AC_MSG_ERROR([no acceptable jar program found in \$PATH]) -AC_PROVIDE([$0])dnl -]) diff --git a/storage/bdb/dist/aclocal_java/ac_prog_java.ac b/storage/bdb/dist/aclocal_java/ac_prog_java.ac deleted file mode 100644 index a011b0a9f5a..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_java.ac +++ /dev/null @@ -1,79 +0,0 @@ -dnl @synopsis AC_PROG_JAVA -dnl -dnl Here is a summary of the main macros: -dnl -dnl AC_PROG_JAVAC: finds a Java compiler. -dnl -dnl AC_PROG_JAVA: finds a Java virtual machine. -dnl -dnl AC_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!). -dnl -dnl AC_CHECK_RQRD_CLASS: finds if we have the given class and stops otherwise. -dnl -dnl AC_TRY_COMPILE_JAVA: attempt to compile user given source. -dnl -dnl AC_TRY_RUN_JAVA: attempt to compile and run user given source. -dnl -dnl AC_JAVA_OPTIONS: adds Java configure options. -dnl -dnl AC_PROG_JAVA tests an existing Java virtual machine. It uses the -dnl environment variable JAVA then tests in sequence various common Java -dnl virtual machines. For political reasons, it starts with the free ones. -dnl You *must* call [AC_PROG_JAVAC] before. -dnl -dnl If you want to force a specific VM: -dnl -dnl - at the configure.in level, set JAVA=yourvm before calling AC_PROG_JAVA -dnl (but after AC_INIT) -dnl -dnl - at the configure level, setenv JAVA -dnl -dnl You can use the JAVA variable in your Makefile.in, with @JAVA@. -dnl -dnl *Warning*: its success or failure can depend on a proper setting of the -dnl CLASSPATH env. variable. -dnl -dnl TODO: allow to exclude virtual machines (rationale: most Java programs -dnl cannot run with some VM like kaffe). -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl -dnl A Web page, with a link to the latest CVS snapshot is at -dnl . -dnl -dnl This is a sample configure.in -dnl Process this file with autoconf to produce a configure script. -dnl -dnl AC_INIT(UnTag.java) -dnl -dnl dnl Checks for programs. -dnl AC_CHECK_CLASSPATH -dnl AC_PROG_JAVAC -dnl AC_PROG_JAVA -dnl -dnl dnl Checks for classes -dnl AC_CHECK_RQRD_CLASS(org.xml.sax.Parser) -dnl AC_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver) -dnl -dnl AC_OUTPUT(Makefile) -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_prog_java.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -dnl Note: Modified by dda@sleepycat.com to prefer java over kaffe. [#8059] -dnl -AC_DEFUN([AC_PROG_JAVA],[ -AC_REQUIRE([AC_EXEEXT])dnl -if test x$JAVAPREFIX = x; then - test x$JAVA = x && AC_CHECK_PROGS(JAVA, java$EXEEXT kaffe$EXEEXT) -else - test x$JAVA = x && AC_CHECK_PROGS(JAVA, java$EXEEXT kaffe$EXEEXT, $JAVAPREFIX) -fi -test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH]) -AC_PROG_JAVA_WORKS -AC_PROVIDE([$0])dnl -]) diff --git a/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac deleted file mode 100644 index f0ff8c57f2f..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac +++ /dev/null @@ -1,97 +0,0 @@ -dnl @synopsis AC_PROG_JAVA_WORKS -dnl -dnl Internal use ONLY. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_prog_java_works.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_PROG_JAVA_WORKS], [ -AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes) -if test x$uudecode = xyes; then -AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [ -dnl /** -dnl * Test.java: used to test if java compiler works. -dnl */ -dnl public class Test -dnl { -dnl -dnl public static void -dnl main( String[] argv ) -dnl { -dnl System.exit (0); -dnl } -dnl -dnl } -cat << \EOF > Test.uue -begin-base64 644 Test.class -yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE -bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51 -bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s -YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG -aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB -AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB -AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ= -==== -EOF -if uudecode$EXEEXT Test.uue; then - ac_cv_prog_uudecode_base64=yes -else - echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC - echo "configure: failed file was:" >&AC_FD_CC - cat Test.uue >&AC_FD_CC - ac_cv_prog_uudecode_base64=no -fi -rm -f Test.uue]) -fi -if test x$ac_cv_prog_uudecode_base64 != xyes; then - rm -f Test.class - AC_MSG_WARN([I have to compile Test.class from scratch]) - if test x$ac_cv_prog_javac_works = xno; then - AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly]) - fi - if test x$ac_cv_prog_javac_works = x; then - AC_PROG_JAVAC - fi -fi -AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [ -JAVA_TEST=Test.java -CLASS_TEST=Test.class -TEST=Test -changequote(, )dnl -cat << \EOF > $JAVA_TEST -/* [#]line __oline__ "configure" */ -public class Test { -public static void main (String args[]) { - System.exit (0); -} } -EOF -changequote([, ])dnl -if test x$ac_cv_prog_uudecode_base64 != xyes; then - if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then - : - else - echo "configure: failed program was:" >&AC_FD_CC - cat $JAVA_TEST >&AC_FD_CC - AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)) - fi -fi -if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then - ac_cv_prog_java_works=yes -else - echo "configure: failed program was:" >&AC_FD_CC - cat $JAVA_TEST >&AC_FD_CC - AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?)) -fi -rm -fr $JAVA_TEST $CLASS_TEST Test.uue -]) -AC_PROVIDE([$0])dnl -] -) diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javac.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac deleted file mode 100644 index b3607dcf842..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_javac.ac +++ /dev/null @@ -1,43 +0,0 @@ -dnl @synopsis AC_PROG_JAVAC -dnl -dnl AC_PROG_JAVAC tests an existing Java compiler. It uses the environment -dnl variable JAVAC then tests in sequence various common Java compilers. For -dnl political reasons, it starts with the free ones. -dnl -dnl If you want to force a specific compiler: -dnl -dnl - at the configure.in level, set JAVAC=yourcompiler before calling -dnl AC_PROG_JAVAC -dnl -dnl - at the configure level, setenv JAVAC -dnl -dnl You can use the JAVAC variable in your Makefile.in, with @JAVAC@. -dnl -dnl *Warning*: its success or failure can depend on a proper setting of the -dnl CLASSPATH env. variable. -dnl -dnl TODO: allow to exclude compilers (rationale: most Java programs cannot compile -dnl with some compilers like guavac). -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_prog_javac.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_PROG_JAVAC],[ -AC_REQUIRE([AC_EXEEXT])dnl -if test "x$JAVAPREFIX" = x; then - test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT) -else - test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT, $JAVAPREFIX) -fi -test "x$JAVAC" = x && AC_MSG_ERROR([no acceptable Java compiler found in \$PATH]) -AC_PROG_JAVAC_WORKS -AC_PROVIDE([$0])dnl -]) diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac deleted file mode 100644 index 0cfd1f2137f..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac +++ /dev/null @@ -1,35 +0,0 @@ -dnl @synopsis AC_PROG_JAVAC_WORKS -dnl -dnl Internal use ONLY. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Stephane Bortzmeyer -dnl @version $Id: ac_prog_javac_works.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_PROG_JAVAC_WORKS],[ -AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [ -JAVA_TEST=Test.java -CLASS_TEST=Test.class -cat << \EOF > $JAVA_TEST -/* [#]line __oline__ "configure" */ -public class Test { -} -EOF -if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) >/dev/null 2>&1; then - ac_cv_prog_javac_works=yes -else - AC_MSG_ERROR([The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)]) - echo "configure: failed program was:" >&AC_FD_CC - cat $JAVA_TEST >&AC_FD_CC -fi -rm -f $JAVA_TEST $CLASS_TEST -]) -AC_PROVIDE([$0])dnl -]) diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac deleted file mode 100644 index 36b95bd00a3..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac +++ /dev/null @@ -1,37 +0,0 @@ -dnl @synopsis AC_PROG_JAVADOC -dnl -dnl AC_PROG_JAVADOC tests for an existing javadoc generator. It uses the environment -dnl variable JAVADOC then tests in sequence various common javadoc generator. -dnl -dnl If you want to force a specific compiler: -dnl -dnl - at the configure.in level, set JAVADOC=yourgenerator before calling -dnl AC_PROG_JAVADOC -dnl -dnl - at the configure level, setenv JAVADOC -dnl -dnl You can use the JAVADOC variable in your Makefile.in, with @JAVADOC@. -dnl -dnl Note: This macro depends on the autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download that whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl -dnl The general documentation of those macros, as well as the sample -dnl configure.in, is included in the AC_PROG_JAVA macro. -dnl -dnl @author Egon Willighagen -dnl @version $Id: ac_prog_javadoc.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_PROG_JAVADOC],[ -AC_REQUIRE([AC_EXEEXT])dnl -if test "x$JAVAPREFIX" = x; then - test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc$EXEEXT) -else - test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc, $JAVAPREFIX) -fi -test "x$JAVADOC" = x && AC_MSG_ERROR([no acceptable javadoc generator found in \$PATH]) -AC_PROVIDE([$0])dnl -]) - diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javah.ac b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac deleted file mode 100644 index 7563036c091..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_prog_javah.ac +++ /dev/null @@ -1,26 +0,0 @@ -dnl @synopsis AC_PROG_JAVAH -dnl -dnl AC_PROG_JAVAH tests the availability of the javah header generator -dnl and looks for the jni.h header file. If available, JAVAH is set to -dnl the full path of javah and CPPFLAGS is updated accordingly. -dnl -dnl @author Luc Maisonobe -dnl @version $Id: ac_prog_javah.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_PROG_JAVAH],[ -AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl -AC_REQUIRE([AC_PROG_CPP])dnl -AC_PATH_PROG(JAVAH,javah) -if test x"`eval 'echo $ac_cv_path_JAVAH'`" != x ; then - AC_TRY_CPP([#include ],,[ - ac_save_CPPFLAGS="$CPPFLAGS" -changequote(, )dnl - ac_dir=`echo $ac_cv_path_JAVAH | sed 's,\(.*\)/[^/]*/[^/]*$,\1/include,'` - ac_machdep=`echo $build_os | sed 's,[-0-9].*,,'` -changequote([, ])dnl - CPPFLAGS="$ac_save_CPPFLAGS -I$ac_dir -I$ac_dir/$ac_machdep" - AC_TRY_CPP([#include ], - ac_save_CPPFLAGS="$CPPFLAGS", - AC_MSG_WARN([unable to include ])) - CPPFLAGS="$ac_save_CPPFLAGS"]) -fi]) diff --git a/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac deleted file mode 100644 index d22aeab42f1..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac +++ /dev/null @@ -1,39 +0,0 @@ -dnl @synopsis AC_TRY_COMPILE_JAVA -dnl -dnl AC_TRY_COMPILE_JAVA attempt to compile user given source. -dnl -dnl *Warning*: its success or failure can depend on a proper setting of the -dnl CLASSPATH env. variable. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Devin Weaver -dnl @version $Id: ac_try_compile_java.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_TRY_COMPILE_JAVA],[ -AC_REQUIRE([AC_PROG_JAVAC])dnl -cat << \EOF > Test.java -/* [#]line __oline__ "configure" */ -ifelse([$1], , , [import $1;]) -public class Test { -[$2] -} -EOF -if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class -then -dnl Don't remove the temporary files here, so they can be examined. - ifelse([$3], , :, [$3]) -else - echo "configure: failed program was:" >&AC_FD_CC - cat Test.java >&AC_FD_CC -ifelse([$4], , , [ rm -fr Test* - $4 -])dnl -fi -rm -fr Test*]) diff --git a/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac deleted file mode 100644 index 01249358883..00000000000 --- a/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac +++ /dev/null @@ -1,40 +0,0 @@ -dnl @synopsis AC_TRY_RUN_JAVA -dnl -dnl AC_TRY_RUN_JAVA attempt to compile and run user given source. -dnl -dnl *Warning*: its success or failure can depend on a proper setting of the -dnl CLASSPATH env. variable. -dnl -dnl Note: This is part of the set of autoconf M4 macros for Java programs. -dnl It is VERY IMPORTANT that you download the whole set, some -dnl macros depend on other. Unfortunately, the autoconf archive does not -dnl support the concept of set of macros, so I had to break it for -dnl submission. -dnl The general documentation, as well as the sample configure.in, is -dnl included in the AC_PROG_JAVA macro. -dnl -dnl @author Devin Weaver -dnl @version $Id: ac_try_run_javac.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $ -dnl -AC_DEFUN([AC_TRY_RUN_JAVA],[ -AC_REQUIRE([AC_PROG_JAVAC])dnl -AC_REQUIRE([AC_PROG_JAVA])dnl -cat << \EOF > Test.java -/* [#]line __oline__ "configure" */ -ifelse([$1], , , [include $1;]) -public class Test { -[$2] -} -EOF -if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null -then -dnl Don't remove the temporary files here, so they can be examined. - ifelse([$3], , :, [$3]) -else - echo "configure: failed program was:" >&AC_FD_CC - cat Test.java >&AC_FD_CC -ifelse([$4], , , [ rm -fr Test* - $4 -])dnl -fi -rm -fr Test*]) diff --git a/storage/bdb/dist/buildrel b/storage/bdb/dist/buildrel deleted file mode 100644 index 3d4121f902a..00000000000 --- a/storage/bdb/dist/buildrel +++ /dev/null @@ -1,128 +0,0 @@ -# $Id: buildrel,v 12.1 2005/10/25 00:27:35 bostic Exp $ -# -# Build the distribution package. -# -# A set of commands intended to be cut and pasted into a csh window. - -# Development tree, release home. -setenv D `pwd` - -# Update the release number. -cd $D/dist -cvs -q update RELEASE -vi RELEASE -setenv VERSION `sh -c '. RELEASE; echo $DB_VERSION'` -echo "Version: $VERSION" - -# Make sure the source tree is up-to-date -cd $D && cvs -q update - -# Build auto-generated files. -cd $D/dist && sh s_all - -# Commit all of the changes. -cd $D && cvs -q commit - -# Copy a development tree into a release tree. -setenv R /var/tmp/db-$VERSION -rm -rf $R && mkdir -p $R -cd $D && cvs -q status | \ - grep "Repository revision" | \ - sed -e 's;.*CVSROOT/db/;;' \ - -e 's;.*CVSROOT/;;' \ - -e 's;,v$;;' | pax -rw $R/ - -# Build the documentation, copy it into place. -cd db_docs && cvs -q update -cd db_docs && sh build $D clean && sh build $D |& sed '/.html$/d' -cd je/docs_src && sh build db ../../db -rm -rf $R/docs && cp -r $D/docs $R/docs - -# Remove source directories we don't distribute. -cd $R && rm -rf docs_src docs/api_java -cd $R && rm -rf test/TODO test/upgrade test_perf test_purify -cd $R && rm -rf test_rep test_server test_thread test_vxworks test_xa -cd $R && rm -rf java/src/com/sleepycat/xa - -# Fix symbolic links and permissions. -cd $R/dist && sh s_perm -cd $R/dist && sh s_symlink - -# Build a version and smoke test. -cd $R && rm -rf build_run && mkdir build_run -cd $R/build_run && ~bostic/bin/dbconf && make >& mklog -cd $R/build_run && make ex_access && ./ex_access - -# Check the install -cd $R/build_run && make prefix=`pwd`/BDB install - -# Build a small-footprint version and smoke test. -cd $R && rm -rf build_run && mkdir build_run -cd $R/build_run && ../dist/configure --enable-smallbuild && make >& mklog -cd $R/build_run && make ex_access && ./ex_access - -# Remove the build directory -cd $R && rm -rf build_run - -# ACQUIRE ROOT PRIVILEGES -cd $R && find . -type d | xargs chmod 775 -cd $R && find . -type f | xargs chmod 444 -cd $R && chmod 664 build_win32/*.dsp -cd $R/dist && sh s_perm -chown -R 100 $R -chgrp -R 100 $R -# DISCARD ROOT PRIVILEGES - -# Check for file names differing only in case. -cd $R && find . | sort -f | uniq -ic | sed '/1 /d' - -# Create the crypto tar archive release. -setenv T "$R/../db-$VERSION.tar.gz" -cd $R/.. && tar cf - db-$VERSION | gzip --best > $T -chmod 444 $T - -# Check the path length. -gzcat $T | tar tf - |\ -awk '{ if (length() > 99) print "Path length: " length() " bytes: " $0;}' - -# Create the non-crypto tree. -setenv RNC "$R/../db-$VERSION.NC" -rm -rf $RNC $R/../__TMP && mkdir $R/../__TMP -cd $R/../__TMP && gzcat $T | tar xpf - && mv -i db-$VERSION $RNC -cd $R && rm -rf $R/../__TMP -cd $RNC/dist && sh s_crypto - -# ACQUIRE ROOT PRIVILEGES -cd $RNC && find . -type d | xargs chmod 775 -cd $RNC && find . -type f | xargs chmod 444 -cd $RNC && chmod 664 build_win32/*.dsp -cd $RNC/dist && sh s_perm -chown -R 100 $RNC -chgrp -R 100 $RNC -# DISCARD ROOT PRIVILEGES - -# Create the non-crypto tar archive release. -setenv T "$R/../db-$VERSION.NC.tar.gz" -cd $RNC/.. && tar cf - db-$VERSION.NC | gzip --best > $T -chmod 444 $T - -# Check the path length. -gzcat $T | tar tf - |\ -awk '{ if (length() > 99) print "Path length: " length() " bytes: " $0;}' - -# Remove tags files. They're large and we don't want to store symbolic links -# in the zip archive for portability reasons. -# ACQUIRE ROOT PRIVILEGES -cd $R && rm -f `find . -name 'tags'` -cd $RNC && rm -f `find . -name 'tags'` -# DISCARD ROOT PRIVILEGES - -# Create the crypto zip archive release. -setenv T "$R/../db-$VERSION.zip" -cd $R/.. && zip -r - db-$VERSION > $T -chmod 444 $T - -# Create the non-crypto zip archive release. -setenv T "$R/../db-$VERSION.NC.zip" -cd $RNC/.. && zip -r - db-$VERSION.NC > $T -chmod 444 $T diff --git a/storage/bdb/dist/config.guess b/storage/bdb/dist/config.guess deleted file mode 100755 index d0d57f6945f..00000000000 --- a/storage/bdb/dist/config.guess +++ /dev/null @@ -1,1465 +0,0 @@ -#! /bin/sh -# Attempt to guess a canonical system name. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. - -timestamp='2005-09-19' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA -# 02110-1301, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Originally written by Per Bothner . -# Please send patches to . Submit a context -# diff and a properly formatted ChangeLog entry. -# -# This script attempts to guess a canonical system name similar to -# config.sub. If it succeeds, it prints the system name on stdout, and -# exits with 0. Otherwise, it exits with 1. -# -# The plan is that this can be called by configure scripts if you -# don't specify an explicit build system type. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] - -Output the configuration name of the system \`$me' is run on. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.guess ($timestamp) - -Originally written by Per Bothner. -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 -Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" >&2 - exit 1 ;; - * ) - break ;; - esac -done - -if test $# != 0; then - echo "$me: too many arguments$help" >&2 - exit 1 -fi - -trap 'exit 1' 1 2 15 - -# CC_FOR_BUILD -- compiler used by this script. Note that the use of a -# compiler to aid in system detection is discouraged as it requires -# temporary files to be created and, as you can see below, it is a -# headache to deal with in a portable fashion. - -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. - -# Portable tmp directory creation inspired by the Autoconf team. - -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d -q "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' - -# This is needed to find uname on a Pyramid OSx when run in the BSD universe. -# (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then - PATH=$PATH:/.attbin ; export PATH -fi - -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown - -# Note: order is significant - the case branches are not exclusive. - -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in - *:NetBSD:*:*) - # NetBSD (nbsd) targets should (where applicable) match one or - # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, - # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently - # switched to ELF, *-*-netbsd* would select the old - # object file format. This provides both forward - # compatibility and a consistent mechanism for selecting the - # object file format. - # - # Note: NetBSD doesn't particularly care about the vendor - # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in - armeb) machine=armeb-unknown ;; - arm*) machine=arm-unknown ;; - sh3el) machine=shl-unknown ;; - sh3eb) machine=sh-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; - esac - # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in - arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build - if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep __ELF__ >/dev/null - then - # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). - # Return netbsd for either. FIX? - os=netbsd - else - os=netbsdelf - fi - ;; - *) - os=netbsd - ;; - esac - # The OS release - # Debian GNU/NetBSD machines have a different userland, and - # thus, need a distinct triplet. However, they do not need - # kernel version information, so it can be replaced with a - # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in - Debian*) - release='-gnu' - ;; - *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` - ;; - esac - # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: - # contains redundant information, the shorter form: - # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" - exit ;; - *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} - exit ;; - *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} - exit ;; - macppc:MirBSD:*:*) - echo powerppc-unknown-mirbsd${UNAME_RELEASE} - exit ;; - *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} - exit ;; - alpha:OSF1:*:*) - case $UNAME_RELEASE in - *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - ;; - *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` - ;; - esac - # According to Compaq, /usr/sbin/psrinfo has been available on - # OSF/1 and Tru64 systems produced since 1995. I hope that - # covers most systems running today. This code pipes the CPU - # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in - "EV4 (21064)") - UNAME_MACHINE="alpha" ;; - "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; - "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; - "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; - "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; - "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; - "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; - "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; - "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; - "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; - "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; - "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; - "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; - esac - # A Pn.n version is a patched version. - # A Vn.n version is a released version. - # A Tn.n version is a released field test version. - # A Xn.n version is an unreleased experimental baselevel. - # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - exit ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; - Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; - *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos - exit ;; - *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos - exit ;; - *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; - *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; - *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; - arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} - exit ;; - arm:riscos:*:*|arm:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; - SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; - Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) - # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; - NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; - DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; - DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; - sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - i86pc:SunOS:5.*:*) - echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:6*:*) - # According to config.sub, this is the proper way to canonicalize - # SunOS6. Hard to guess exactly what SunOS6 will be like, but - # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in - Series*|S4*) - UNAME_RELEASE=`uname -v` - ;; - esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` - exit ;; - sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} - exit ;; - sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in - sun3) - echo m68k-sun-sunos${UNAME_RELEASE} - ;; - sun4) - echo sparc-sun-sunos${UNAME_RELEASE} - ;; - esac - exit ;; - aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} - exit ;; - # The situation for MiNT is a little confusing. The machine name - # can be virtually everything (everything which is not - # "atarist" or "atariste" at least should have a processor - # > m68000). The system name ranges from "MiNT" over "FreeMiNT" - # to the lowercase version "mint" (or "freemint"). Finally - # the system name "TOS" denotes a system which is actually not - # MiNT. But MiNT is downward compatible to TOS, so this should - # be no problem. - atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} - exit ;; - hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} - exit ;; - *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} - exit ;; - m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} - exit ;; - powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} - exit ;; - RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; - RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} - exit ;; - VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} - exit ;; - 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} - exit ;; - mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c -#ifdef __cplusplus -#include /* for printf() prototype */ - int main (int argc, char *argv[]) { -#else - int main (argc, argv) int argc; char *argv[]; { -#endif - #if defined (host_mips) && defined (MIPSEB) - #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); - #endif - #endif - exit (-1); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && - { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} - exit ;; - Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; - Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; - m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; - m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; - m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; - AViiON:dgux:*:*) - # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] - then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] - then - echo m88k-dg-dgux${UNAME_RELEASE} - else - echo m88k-dg-dguxbcs${UNAME_RELEASE} - fi - else - echo i586-dg-dgux${UNAME_RELEASE} - fi - exit ;; - M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; - M88*:*:R3*:*) - # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; - XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; - Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; - *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` - exit ;; - ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' - i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; - ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} - exit ;; - *:AIX:2:3) - if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - - main() - { - if (!__power_pc()) - exit(1); - puts("powerpc-ibm-aix3.2.5"); - exit(0); - } -EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` - then - echo "$SYSTEM_NAME" - else - echo rs6000-ibm-aix3.2.5 - fi - elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 - else - echo rs6000-ibm-aix3.2 - fi - exit ;; - *:AIX:*:[45]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then - IBM_ARCH=rs6000 - else - IBM_ARCH=powerpc - fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} - exit ;; - *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) - echo romp-ibm-bsd4.4 - exit ;; - ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 - *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; - DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; - 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; - hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; - 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; - 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 - 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 - esac ;; - esac - fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - - #define _HPUX_SOURCE - #include - #include - - int main () - { - #if defined(_SC_KERNEL_BITS) - long bits = sysconf(_SC_KERNEL_BITS); - #endif - long cpu = sysconf (_SC_CPU_VERSION); - - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1"); break; - case CPU_PA_RISC2_0: - #if defined(_SC_KERNEL_BITS) - switch (bits) - { - case 64: puts ("hppa2.0w"); break; - case 32: puts ("hppa2.0n"); break; - default: puts ("hppa2.0"); break; - } break; - #else /* !defined(_SC_KERNEL_BITS) */ - puts ("hppa2.0"); break; - #endif - default: puts ("hppa1.0"); break; - } - exit (0); - } -EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` - test -z "$HP_ARCH" && HP_ARCH=hppa - fi ;; - esac - if [ ${HP_ARCH} = "hppa2.0w" ] - then - eval $set_cc_for_build - - # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating - # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler - # generating 64-bit code. GNU and HP use different nomenclature: - # - # $ CC_FOR_BUILD=cc ./config.guess - # => hppa2.0w-hp-hpux11.23 - # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess - # => hppa64-hp-hpux11.23 - - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | - grep __LP64__ >/dev/null - then - HP_ARCH="hppa2.0w" - else - HP_ARCH="hppa64" - fi - fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} - exit ;; - ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} - exit ;; - 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - int - main () - { - long cpu = sysconf (_SC_CPU_VERSION); - /* The order matters, because CPU_IS_HP_MC68K erroneously returns - true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct - results, however. */ - if (CPU_IS_PA_RISC (cpu)) - { - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; - case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; - default: puts ("hppa-hitachi-hiuxwe2"); break; - } - } - else if (CPU_IS_HP_MC68K (cpu)) - puts ("m68k-hitachi-hiuxwe2"); - else puts ("unknown-hitachi-hiuxwe2"); - exit (0); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) - echo hppa1.1-hp-bsd - exit ;; - 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; - *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) - echo hppa1.1-hp-osf - exit ;; - hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; - i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk - else - echo ${UNAME_MACHINE}-unknown-osf1 - fi - exit ;; - parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; - C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; - C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; - C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; - C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; - CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ - | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ - -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ - -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} - exit ;; - sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:FreeBSD:*:*) - echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin - exit ;; - i*:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 - exit ;; - i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 - exit ;; - x86:Interix*:[34]*) - echo i586-pc-interix${UNAME_RELEASE}|sed -e 's/\..*//' - exit ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; - i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin - exit ;; - amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; - prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - *:GNU:*:*) - # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` - exit ;; - *:GNU/*:*:*) - # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu - exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix - exit ;; - arm*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - cris:Linux:*:*) - echo cris-axis-linux-gnu - exit ;; - crisv32:Linux:*:*) - echo crisv32-axis-linux-gnu - exit ;; - frv:Linux:*:*) - echo frv-unknown-linux-gnu - exit ;; - ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - mips:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef mips - #undef mipsel - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=mipsel - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=mips - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } - ;; - mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef mips64 - #undef mips64el - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=mips64el - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=mips64 - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } - ;; - or32:Linux:*:*) - echo or32-unknown-linux-gnu - exit ;; - ppc:Linux:*:*) - echo powerpc-unknown-linux-gnu - exit ;; - ppc64:Linux:*:*) - echo powerpc64-unknown-linux-gnu - exit ;; - alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in - EV5) UNAME_MACHINE=alphaev5 ;; - EV56) UNAME_MACHINE=alphaev56 ;; - PCA56) UNAME_MACHINE=alphapca56 ;; - PCA57) UNAME_MACHINE=alphapca56 ;; - EV6) UNAME_MACHINE=alphaev6 ;; - EV67) UNAME_MACHINE=alphaev67 ;; - EV68*) UNAME_MACHINE=alphaev68 ;; - esac - objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null - if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi - echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} - exit ;; - parisc:Linux:*:* | hppa:Linux:*:*) - # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-gnu ;; - PA8*) echo hppa2.0-unknown-linux-gnu ;; - *) echo hppa-unknown-linux-gnu ;; - esac - exit ;; - parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-gnu - exit ;; - s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux - exit ;; - sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - x86_64:Linux:*:*) - echo x86_64-unknown-linux-gnu - exit ;; - i*86:Linux:*:*) - # The BFD linker knows what the default object file format is, so - # first see if it will tell us. cd to the root directory to prevent - # problems with other programs or directories called `ld' in the path. - # Set LC_ALL=C to ensure ld outputs messages in English. - ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \ - | sed -ne '/supported targets:/!d - s/[ ][ ]*/ /g - s/.*supported targets: *// - s/ .*// - p'` - case "$ld_supported_targets" in - elf32-i386) - TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu" - ;; - a.out-i386-linux) - echo "${UNAME_MACHINE}-pc-linux-gnuaout" - exit ;; - coff-i386) - echo "${UNAME_MACHINE}-pc-linux-gnucoff" - exit ;; - "") - # Either a pre-BFD a.out linker (linux-gnuoldld) or - # one that does not give us useful --help. - echo "${UNAME_MACHINE}-pc-linux-gnuoldld" - exit ;; - esac - # Determine whether the default compiler is a.out or elf - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - #ifdef __ELF__ - # ifdef __GLIBC__ - # if __GLIBC__ >= 2 - LIBC=gnu - # else - LIBC=gnulibc1 - # endif - # else - LIBC=gnulibc1 - # endif - #else - #ifdef __INTEL_COMPILER - LIBC=gnu - #else - LIBC=gnuaout - #endif - #endif - #ifdef __dietlibc__ - LIBC=dietlibc - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=` - test x"${LIBC}" != x && { - echo "${UNAME_MACHINE}-pc-linux-${LIBC}" - exit - } - test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; } - ;; - i*86:DYNIX/ptx:4*:*) - # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. - # earlier versions are messed up and put the nodename in both - # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; - i*86:UNIX_SV:4.2MP:2.*) - # Unixware is an offshoot of SVR4, but it has its own version - # number series starting with 2... - # I am not positive that other SVR4 systems won't match this, - # I just have to hope. -- rms. - # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} - exit ;; - i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility - # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx - exit ;; - i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop - exit ;; - i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos - exit ;; - i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable - exit ;; - i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} - exit ;; - i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp - exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` - if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} - else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} - fi - exit ;; - i*86:*:5:[678]*) - # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in - *486*) UNAME_MACHINE=i486 ;; - *Pentium) UNAME_MACHINE=i586 ;; - *Pent*|*Celeron) UNAME_MACHINE=i686 ;; - esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} - exit ;; - i*86:*:3.2:*) - if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` - (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 - (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ - && UNAME_MACHINE=i586 - (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ - && UNAME_MACHINE=i686 - (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ - && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL - else - echo ${UNAME_MACHINE}-pc-sysv32 - fi - exit ;; - pc:*:*:*) - # Left here for compatibility: - # uname -m prints for DJGPP always 'pc', but it prints nothing about - # the processor, so we play safe by assuming i386. - echo i386-pc-msdosdjgpp - exit ;; - Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; - paragon:*:*:*) - echo i860-intel-osf1 - exit ;; - i860:*:4.*:*) # i860-SVR4 - if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 - else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 - fi - exit ;; - mini*:CTIX:SYS*5:*) - # "miniframe" - echo m68010-convergent-sysv - exit ;; - mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; - M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; - M68*:*:R3V[5678]*:*) - test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; - 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) - OS_REL='' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4; exit; } ;; - m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} - exit ;; - mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; - TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} - exit ;; - rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} - exit ;; - PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} - exit ;; - SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} - exit ;; - RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - *:SINIX-*:*:*) - if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 - else - echo ns32k-sni-sysv - fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort - # says - echo i586-unisys-sysv4 - exit ;; - *:UNIX_System_V:4*:FTX*) - # From Gerald Hewes . - # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; - *:*:*:FTX*) - # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; - i*86:VOS:*:*) - # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos - exit ;; - *:VOS:*:*) - # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; - mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} - exit ;; - news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; - R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} - else - echo mips-unknown-sysv${UNAME_RELEASE} - fi - exit ;; - BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; - BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; - BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; - SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} - exit ;; - SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} - exit ;; - SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} - exit ;; - Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - case $UNAME_PROCESSOR in - unknown) UNAME_PROCESSOR=powerpc ;; - esac - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} - exit ;; - *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then - UNAME_PROCESSOR=i386 - UNAME_MACHINE=pc - fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} - exit ;; - *:QNX:*:4*) - echo i386-pc-qnx - exit ;; - NSE-?:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} - exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} - exit ;; - *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; - BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; - DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} - exit ;; - *:Plan9:*:*) - # "uname -m" is not consistent, so use $cputype instead. 386 - # is converted to i386 for consistency with other x86 - # operating systems. - if test "$cputype" = "386"; then - UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" - fi - echo ${UNAME_MACHINE}-unknown-plan9 - exit ;; - *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; - *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; - KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; - XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; - *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; - *:ITS:*:*) - echo pdp10-unknown-its - exit ;; - SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} - exit ;; - *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; - esac ;; - *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; - i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' - exit ;; -esac - -#echo '(No uname command or uname output not recognized.)' 1>&2 -#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 - -eval $set_cc_for_build -cat >$dummy.c < -# include -#endif -main () -{ -#if defined (sony) -#if defined (MIPSEB) - /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, - I don't know.... */ - printf ("mips-sony-bsd\n"); exit (0); -#else -#include - printf ("m68k-sony-newsos%s\n", -#ifdef NEWSOS4 - "4" -#else - "" -#endif - ); exit (0); -#endif -#endif - -#if defined (__arm) && defined (__acorn) && defined (__unix) - printf ("arm-acorn-riscix\n"); exit (0); -#endif - -#if defined (hp300) && !defined (hpux) - printf ("m68k-hp-bsd\n"); exit (0); -#endif - -#if defined (NeXT) -#if !defined (__ARCHITECTURE__) -#define __ARCHITECTURE__ "m68k" -#endif - int version; - version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; - if (version < 4) - printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); - else - printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); - exit (0); -#endif - -#if defined (MULTIMAX) || defined (n16) -#if defined (UMAXV) - printf ("ns32k-encore-sysv\n"); exit (0); -#else -#if defined (CMU) - printf ("ns32k-encore-mach\n"); exit (0); -#else - printf ("ns32k-encore-bsd\n"); exit (0); -#endif -#endif -#endif - -#if defined (__386BSD__) - printf ("i386-pc-bsd\n"); exit (0); -#endif - -#if defined (sequent) -#if defined (i386) - printf ("i386-sequent-dynix\n"); exit (0); -#endif -#if defined (ns32000) - printf ("ns32k-sequent-dynix\n"); exit (0); -#endif -#endif - -#if defined (_SEQUENT_) - struct utsname un; - - uname(&un); - - if (strncmp(un.version, "V2", 2) == 0) { - printf ("i386-sequent-ptx2\n"); exit (0); - } - if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ - printf ("i386-sequent-ptx1\n"); exit (0); - } - printf ("i386-sequent-ptx\n"); exit (0); - -#endif - -#if defined (vax) -# if !defined (ultrix) -# include -# if defined (BSD) -# if BSD == 43 - printf ("vax-dec-bsd4.3\n"); exit (0); -# else -# if BSD == 199006 - printf ("vax-dec-bsd4.3reno\n"); exit (0); -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# endif -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# else - printf ("vax-dec-ultrix\n"); exit (0); -# endif -#endif - -#if defined (alliant) && defined (i860) - printf ("i860-alliant-bsd\n"); exit (0); -#endif - - exit (1); -} -EOF - -$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - -# Apollos put the system type in the environment. - -test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } - -# Convex versions that predate uname can use getsysinfo(1) - -if [ -x /usr/convex/getsysinfo ] -then - case `getsysinfo -f cpu_type` in - c1*) - echo c1-convex-bsd - exit ;; - c2*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - c34*) - echo c34-convex-bsd - exit ;; - c38*) - echo c38-convex-bsd - exit ;; - c4*) - echo c4-convex-bsd - exit ;; - esac -fi - -cat >&2 < in order to provide the needed -information to handle your system. - -config.guess timestamp = $timestamp - -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` - -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` - -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} -EOF - -exit 1 - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/storage/bdb/dist/config.sub b/storage/bdb/dist/config.sub deleted file mode 100755 index 1c366dfde9a..00000000000 --- a/storage/bdb/dist/config.sub +++ /dev/null @@ -1,1579 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. - -timestamp='2005-07-08' - -# This file is (in principle) common to ALL GNU software. -# The presence of a machine in this file suggests that SOME GNU software -# can handle that machine. It does not imply ALL GNU software can. -# -# This file is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA -# 02110-1301, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Please send patches to . Submit a context -# diff and a properly formatted ChangeLog entry. -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 -Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \ - kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray) - os= - basic_machine=$1 - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \ - | bfin \ - | c4x | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | m32r | m32rle | m68000 | m68k | m88k | maxq | mcore \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64vr | mips64vrel \ - | mips64orion | mips64orionel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | ms1 \ - | msp430 \ - | ns16k | ns32k \ - | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ - | pyramid \ - | sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b \ - | strongarm \ - | tahoe | thumb | tic4x | tic80 | tron \ - | v850 | v850e \ - | we32k \ - | x86 | xscale | xscalee[bl] | xstormy16 | xtensa \ - | z8k) - basic_machine=$basic_machine-unknown - ;; - m32c) - basic_machine=$basic_machine-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12) - # Motorola 68HC11/12. - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ - | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | ms1-* \ - | msp430-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ - | pyramid-* \ - | romp-* | rs6000-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \ - | tahoe-* | thumb-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tron-* \ - | v850-* | v850e-* | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xps100-* | xscale-* | xscalee[bl]-* \ - | xstormy16-* | xtensa-* \ - | ymp-* \ - | z8k-*) - ;; - m32c-*) - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16c) - basic_machine=cr16c-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; -# I'm not sure what "Sysv32" means. Should this be sysv3.2? - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - mingw32) - basic_machine=i386-pc - os=-mingw32 - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc) basic_machine=powerpc-unknown - ;; - ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tic54x | c54x*) - basic_machine=tic54x-unknown - os=-coff - ;; - tic55x | c55x*) - basic_machine=tic55x-unknown - os=-coff - ;; - tic6x | c6x*) - basic_machine=tic6x-unknown - os=-coff - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* | -openbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* \ - | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -kaos*) - os=-kaos - ;; - -zvmoe) - os=-zvmoe - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - # This also exists in the configure program, but was not the - # default. - # os=-sunos4 - ;; - m68*-cisco) - os=-aout - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/storage/bdb/dist/configure.ac b/storage/bdb/dist/configure.ac deleted file mode 100644 index d9045434513..00000000000 --- a/storage/bdb/dist/configure.ac +++ /dev/null @@ -1,754 +0,0 @@ -# $Id: configure.ac,v 12.9 2005/10/14 20:52:29 bostic Exp $ -# Process this file with autoconf to produce a configure script. - -PACKAGE=db -AC_INIT(Berkeley DB, - __EDIT_DB_VERSION__, support@sleepycat.com, db-__EDIT_DB_VERSION__) -AC_CONFIG_SRCDIR([../db/db.c]) -AC_CONFIG_HEADERS([db_config.h:config.hin]) - -# Configure setup. -AC_CANONICAL_HOST() -AC_ARG_PROGRAM() - -# Don't build in the top-level or dist directories. -AC_MSG_CHECKING(if building in the top-level or dist directories) -if [ test -d db_archive -o -f configure.ac ] ; then - AC_MSG_RESULT(yes) - AC_MSG_ERROR( - [Berkeley DB should not be built in the top-level or dist directories.]) -fi -AC_MSG_RESULT(no) - -# Substitution variables. -AC_SUBST(ADDITIONAL_INCS) -AC_SUBST(ADDITIONAL_LANG) -AC_SUBST(ADDITIONAL_OBJS) -AC_SUBST(ADDITIONAL_PROGS) -AC_SUBST(BUILD_TARGET) -AC_SUBST(CFLAGS) -AC_SUBST(CONFIGURATION_ARGS) -AC_SUBST(CONFIGURATION_PATH) -AC_SUBST(CPPFLAGS) -AC_SUBST(CRYPTO_OBJS) -AC_SUBST(CXX) -AC_SUBST(CXXFLAGS) -AC_SUBST(DB_PROTO1) -AC_SUBST(DB_PROTO2) -AC_SUBST(DEFAULT_LIB) -AC_SUBST(DEFAULT_LIB_CXX) -AC_SUBST(INSTALLER) -AC_SUBST(INSTALL_LIBS) -AC_SUBST(INSTALL_TARGET) -AC_SUBST(JAR) -AC_SUBST(JAVACFLAGS) -AC_SUBST(LDFLAGS) -AC_SUBST(LIBCSO_LIBS) -AC_SUBST(LIBJSO_LIBS) -AC_SUBST(LIBS) -AC_SUBST(LIBSO_LIBS) -AC_SUBST(LIBTOOL) -AC_SUBST(LIBTSO_LIBS) -AC_SUBST(LIBTSO_MODSUFFIX) -AC_SUBST(LIBTSO_MODULE) -AC_SUBST(LIBXSO_LIBS) -AC_SUBST(MAKEFILE_CC) -AC_SUBST(MAKEFILE_CCLINK) -AC_SUBST(MAKEFILE_CXX) -AC_SUBST(MAKEFILE_CXXLINK) -AC_SUBST(MAKEFILE_SOLINK) -AC_SUBST(MAKEFILE_XSOLINK) -AC_SUBST(OSDIR) -AC_SUBST(PATH_SEPARATOR) -AC_SUBST(POSTLINK) -AC_SUBST(REPLACEMENT_OBJS) -AC_SUBST(RPC_CLIENT_OBJS) -AC_SUBST(RPC_SERVER_H) -AC_SUBST(SOFLAGS) -AC_SUBST(TEST_LIBS) -AC_SUBST(db_cv_build_type) -AC_SUBST(db_int_def) -AC_SUBST(o) - -# Set the default installation location. -AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.__EDIT_DB_VERSION_MAJOR__.__EDIT_DB_VERSION_MINOR__) - -# Configure the version information. -AC_SUBST(DB_VERSION_MAJOR) -DB_VERSION_MAJOR="__EDIT_DB_VERSION_MAJOR__" -AC_SUBST(DB_VERSION_MINOR) -DB_VERSION_MINOR="__EDIT_DB_VERSION_MINOR__" -AC_SUBST(DB_VERSION_PATCH) -DB_VERSION_PATCH="__EDIT_DB_VERSION_PATCH__" -AC_SUBST(DB_VERSION_STRING) -DB_VERSION_STRING='"__EDIT_DB_VERSION_STRING__"' -AC_SUBST(DB_VERSION_UNIQUE_NAME) - -# Process all options before using them. -AM_OPTIONS_SET - -# Set some #defines based on configuration options. -if test "$db_cv_diagnostic" = "yes"; then - AC_DEFINE(DIAGNOSTIC) - AH_TEMPLATE(DIAGNOSTIC, - [Define to 1 if you want a version with run-time diagnostic checking.]) -fi -if test "$db_cv_debug_rop" = "yes"; then - AC_DEFINE(DEBUG_ROP) - AH_TEMPLATE(DEBUG_ROP, - [Define to 1 if you want a version that logs read operations.]) -fi -if test "$db_cv_debug_wop" = "yes"; then - AC_DEFINE(DEBUG_WOP) - AH_TEMPLATE(DEBUG_WOP, - [Define to 1 if you want a version that logs write operations.]) -fi -if test "$db_cv_umrw" = "yes"; then - AC_DEFINE(UMRW) - AH_TEMPLATE(UMRW, - [Define to 1 to mask harmless uninitialized memory read/writes.]) - -fi -if test "$db_cv_test" = "yes"; then - AC_DEFINE(CONFIG_TEST) - AH_TEMPLATE(CONFIG_TEST, - [Define to 1 if you want to build a version for running the test suite.]) -fi - -# Check for programs used in building and installation. -AM_PROGRAMS_SET -AC_PROG_INSTALL - -BUILD_TARGET="library_build" -INSTALL_TARGET="library_install" - -# This is where we handle stuff that autoconf can't handle: compiler, -# preprocessor and load flags, libraries that the standard tests don't -# look for. -# -# There are additional libraries we need for some compiler/architecture -# combinations. -# -# Some architectures require DB to be compiled with special flags and/or -# libraries for threaded applications -# -# The makefile CC may be different than the CC used in config testing, -# because the makefile CC may be set to use $(LIBTOOL). -# -# Don't override anything if it's already set from the environment. -optimize_debug="-O" -case "$host_os" in -aix4.3.*|aix5*) - optimize_debug="-O2" - CC=${CC-"xlc_r"} - CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" - LDFLAGS="$LDFLAGS -Wl,-brtl";; -bsdi3*) CC=${CC-"shlicc2"} - LIBS="$LIBS -lipc";; -cygwin*) - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";; -freebsd*) - CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" - LDFLAGS="$LDFLAGS -pthread";; -gnu*|k*bsd*-gnu|linux*) - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";; -hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";; -irix*) optimize_debug="-O2" - CPPFLAGS="$CPPFLAGS -D_SGI_MP_SOURCE";; -mpeix*) CPPFLAGS="$CPPFLAGS -D_POSIX_SOURCE -D_SOCKET_SOURCE" - LIBS="$LIBS -lsocket -lsvipc";; -osf*) CPPFLAGS="$CPPFLAGS -pthread";; -*qnx*) AC_DEFINE(HAVE_QNX) - AH_TEMPLATE(HAVE_QNX, [Define to 1 if building on QNX.]);; -solaris*) - CPPFLAGS="$CPPFLAGS -D_REENTRANT";; -esac - -# If the user wants a debugging environment, change any compiler optimization -# flags to -g. We used to add -g to the -O compiler flags, but compilers are -# good enough at code re-organization that debugging with -O no longer works. -# If you want to compile with a different set of flags, specify CFLAGS in the -# environment before configuring. -if test "$db_cv_debug" = "yes"; then - AC_DEFINE(DEBUG) - AH_TEMPLATE(DEBUG, [Define to 1 if you want a debugging version.]) - - optimize_debug="-g" -fi - -# Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf -# compiler configuration macros, because if we don't, they set CFLAGS -# to no optimization and -g, which isn't what we want. -CFLAGS=${CFLAGS-$optimize_debug} -CXXFLAGS=${CXXFLAGS-"$CFLAGS"} - -# The default compiler is cc (NOT gcc), the default CFLAGS is as specified -# above, NOT what is set by AC_PROG_CC, as it won't set optimization flags -# for any compiler other than gcc. -AC_PROG_CC(cc gcc) - -# We know what compiler we're going to use, now. Set per-compiler flags. -if test "$GCC" = "yes"; then - # We want -O2 if we're using gcc. - CFLAGS="$CFLAGS " - CFLAGS=`echo "$CFLAGS" | sed 's/-O /-O2 /g'` -else - case "$host_os" in - hpux11.0*) ;; - hpux11*) CPPFLAGS="$CPPFLAGS -mt";; - esac -fi - -# Checks for compiler characteristics. -DB_PROTO1="#undef __P" - -# AC_PROG_CC_STDC only sets ac_cv_prog_cc_stdc if the test fails, so -# check for "no", not "yes". -if test "$ac_cv_prog_cc_stdc" = "no"; then - DB_PROTO2="#define __P(protos) ()" -else - DB_PROTO2="#define __P(protos) protos" -fi - -AC_C_CONST -AC_SUBST(DB_CONST) -if test "$ac_cv_c_const" != "yes"; then - DB_CONST="#define const" -fi - -# Because of shared library building, the ${CC} used for config tests -# may be different than the ${CC} we want to put in the Makefile. -# The latter is known as ${MAKEFILE_CC} in this script. -MAKEFILE_CC="${CC}" -MAKEFILE_CCLINK="${CC}" -MAKEFILE_CXX="nocxx" -MAKEFILE_CXXLINK="nocxx" - -# See if we need the C++ compiler at all. If so, we'd like to find one that -# interoperates with the C compiler we chose. Since we prefered cc over gcc, -# we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the -# user can set CC and CXX in their environment before running configure. -# -# AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its -# first choices. -if test "$db_cv_cxx" = "yes"; then - if test "$GCC" != "yes"; then - case "$host_os" in - aix*) AC_CHECK_TOOL(CCC, xlC_r) - LIBXSO_LIBS="-lC_r $LIBXSO_LIBS" - LIBS="-lC_r $LIBS";; - hpux*) AC_CHECK_TOOL(CCC, aCC);; - irix*) AC_CHECK_TOOL(CCC, CC);; - osf*) AC_CHECK_TOOL(CCC, cxx);; - solaris*) AC_CHECK_TOOL(CCC, CC);; - esac - fi - AC_PROG_CXX - ###### WORKAROUND: SEE SR #7938 - AC_PROG_CXXCPP - ############################### - AC_CXX_HAVE_STDHEADERS - MAKEFILE_CXX="${CXX}" - MAKEFILE_CXXLINK="${CXX}" -fi - -# Do some gcc specific configuration. -AC_GCC_CONFIG1 -AC_GCC_CONFIG2 - -# We need the -Kthread/-pthread flag when compiling on SCO/Caldera's UnixWare -# and OpenUNIX releases. We can't make the test until we know which compiler -# we're using. -case "$host_os" in -sysv5UnixWare*|sysv5OpenUNIX8*) - if test "$GCC" == "yes"; then - CPPFLAGS="$CPPFLAGS -pthread" - LDFLAGS="$LDFLAGS -pthread" - else - CPPFLAGS="$CPPFLAGS -Kthread" - LDFLAGS="$LDFLAGS -Kthread" - fi;; -esac - -# Export our compiler preferences for the libtool configuration. -export CC CCC -CCC=CXX - -# Libtool configuration. -AC_PROG_LIBTOOL - -SOFLAGS="-rpath \$(libdir)" -LIBTOOL_PROG="${SHELL} ./libtool" - -# Set SOSUFFIX and friends -SOSUFFIX_CONFIG -MODSUFFIX_CONFIG -JMODSUFFIX_CONFIG - -INSTALLER="\$(LIBTOOL) --mode=install cp -p" - -MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${MAKEFILE_CC}" -MAKEFILE_SOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK} -avoid-version" -MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK}" -MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${MAKEFILE_CXX}" -MAKEFILE_XSOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK} -avoid-version" -MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}" - -LIBTOOL="\$(SHELL) ./libtool" - -case "$host_os" in -cygwin* | mingw*) - MAKEFILE_SOLINK="$MAKEFILE_SOLINK -no-undefined" - MAKEFILE_XSOLINK="$MAKEFILE_XSOLINK -no-undefined";; -esac - -# Configure for shared libraries, static libraries, or both. If both are -# configured, build the utilities and example programs with shared versions. -# -# $o is set to ".o" or ".lo", and is the file suffix used in the Makefile -# instead of .o -if test `$LIBTOOL_PROG --config | - grep build_libtool_libs | grep no` 2>/dev/null; then - enable_shared="no" -else - enable_shared="yes" -fi -if test `$LIBTOOL_PROG --config | - grep build_old_libs | grep no` 2>/dev/null; then - enable_static="no" -else - enable_static="yes" -fi - -case "$host_os" in - darwin*) - LIBTSO_MODULE="" - LIBTSO_MODSUFFIX=".dylib" - ;; - *) - LIBTSO_MODULE="-module" - LIBTSO_MODSUFFIX=@MODSUFFIX@ - ;; -esac - -# C API. -if test "$enable_shared" = "no"; then - DEFAULT_LIB="\$(libdb_version)" - POSTLINK=": " - o=".o" -else - DEFAULT_LIB="\$(libso_target)" - POSTLINK="\$(LIBTOOL) --mode=execute true" - o=".lo" -fi -INSTALL_LIBS="$DEFAULT_LIB" -if test "$enable_static" = "yes"; then - INSTALL_LIBS="$INSTALL_LIBS \$(libdb)" -fi - -# Optional C++ API. -if test "$db_cv_cxx" = "yes"; then - if test "$enable_shared" = "no"; then - DEFAULT_LIB_CXX="\$(libcxx_version)" - fi - if test "$enable_shared" = "yes"; then - DEFAULT_LIB_CXX="\$(libxso_target)" - fi - INSTALL_LIBS="$INSTALL_LIBS $DEFAULT_LIB_CXX" - if test "$enable_static" = "yes"; then - INSTALL_LIBS="$INSTALL_LIBS \$(libcxx)" - fi -fi - -# We split DbConstants.java into debug and release versions so Windows -# developers don't need to do anything special to use the Debug DLL. -if test "$db_cv_debug" = "yes"; then - db_cv_build_type=debug -else - db_cv_build_type=release -fi - -# Optional Java API. -if test "$db_cv_java" = "yes"; then - # Java requires shared libraries. - if test "$enable_shared" = "no"; then - AC_MSG_ERROR([Java requires shared libraries]) - fi - - # A classpath that includes . is needed to check for Java - CLASSPATH=".:$CLASSPATH" - export CLASSPATH - AC_PROG_JAVAC - AC_PROG_JAR - AC_PROG_JAVA - AC_JNI_INCLUDE_DIR - - AC_MSG_CHECKING(java version) - case "$JAVA" in - *kaffe* ) - JAVA_VERSION=`$JAVA -version 2>&1 | - sed -e '/Java Version:/!d' -e 's/.*Java Version: \([[^ ]]*\)[[ ]]*/\1/'` ;; - * ) JAVA_VERSION=`$JAVA -version 2>&1 | - sed -e '/ version /!d' -e 's/.*"\(.*\)".*/\1/'` ;; - esac - AC_MSG_RESULT($JAVA_VERSION) - case "$JAVA_VERSION" in - 1.[[3456789]]* | 1.[[1-9]][[0-9]]* | [[23456789]]* ) ;; - * ) - AC_MSG_ERROR([Java version 1.3 or higher required, got $JAVA_VERSION]) ;; - esac - - for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS - do - CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR" - done - - ADDITIONAL_LANG="$ADDITIONAL_LANG java" - INSTALL_LIBS="$INSTALL_LIBS \$(libjso_target)" -else - JAVAC=nojavac -fi - -# MinGW support. -if test "$db_cv_mingw" = "yes"; then - OSDIR=os_win32 - PATH_SEPARATOR="\\\\/:" - - AC_DEFINE(DB_WIN32) - AC_DEFINE(STDC_HEADERS) -else - OSDIR=os - PATH_SEPARATOR="/" -fi - -# Checks for include files, structures, C types. -AC_HEADER_STAT -AC_HEADER_TIME -AC_HEADER_DIRENT -AC_CHECK_HEADERS(sys/select.h sys/time.h sys/fcntl.h) -AC_CHECK_MEMBERS([struct stat.st_blksize]) -AM_TYPES - -AC_CACHE_CHECK([for ANSI C exit success/failure values], db_cv_exit_defines, [ -AC_TRY_COMPILE([#include ], return (EXIT_SUCCESS);, - [db_cv_exit_defines=yes], [db_cv_exit_defines=no])]) -if test "$db_cv_exit_defines" = "yes"; then - AC_DEFINE(HAVE_EXIT_SUCCESS) - AH_TEMPLATE(HAVE_EXIT_SUCCESS, - [Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines.]) -fi - -# Test for various functions/libraries -- do tests that change library values -# first. -# -# The Berkeley DB library calls fdatasync, and it's only available in -lrt on -# Solaris. See if we can find it either without additional libraries or in -# -lrt. If fdatasync is found in -lrt, add -lrt to the shared library links. -AC_SEARCH_LIBS(fdatasync, rt, [dnl - if test "$ac_cv_search_fdatasync" != "none required" ; then - LIBSO_LIBS="$LIBSO_LIBS -lrt"; - fi]) - -# The test and example programs use the sched_yield function, taken from -lrt -# on Solaris. -AC_SEARCH_LIBS(sched_yield, rt) - -# !!! -# We can't check for pthreads in the same way we did the test for sched_yield -# because the Solaris C library includes pthread interfaces which are not -# inter-process safe. For that reason we always add -lpthread if we find a -# pthread library. -# -# We can't depend on any specific call existing (pthread_create, for example), -# as it may be #defined in an include file -- OSF/1 (Tru64) has this problem. -AC_HAVE_LIBRARY(pthread, TEST_LIBS="$TEST_LIBS -lpthread") - -# !!! -# We could be more exact about whether these libraries are needed, but don't -# bother -- if they exist, we load them, it's only the test programs anyway. -AC_HAVE_LIBRARY(m, TEST_LIBS="$TEST_LIBS -lm") -AC_HAVE_LIBRARY(socket, TEST_LIBS="$TEST_LIBS -lsocket") -AC_HAVE_LIBRARY(nsl, TEST_LIBS="$TEST_LIBS -lnsl") - -# Check for mutexes. -# We do this here because it changes $LIBS. -AM_DEFINE_MUTEXES - -# Checks for system functions for which we have replacements. -# -# XXX -# The only portable getcwd call is getcwd(char *, size_t), where the -# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they -# deleted getwd(). -AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove raise) -AC_REPLACE_FUNCS(strcasecmp strdup strerror strtol strtoul) - -# Check for system functions we optionally use. -AC_CHECK_FUNCS(\ - _fstati64 clock_gettime directio fchmod fcntl fdatasync ftruncate\ - getrusage gettimeofday getuid pstat_getdynamic rand sched_yield\ - select snprintf srand sysconf vsnprintf yield) - -# Pthread_self. -# The use of pthread_self to identify threads can be forced. -if test "$db_cv_pthread_self" = "yes"; then - AC_CHECK_FUNCS(pthread_self) -fi - -# Pread/pwrite. -# HP-UX has pread/pwrite, but it doesn't work with largefile support. -# NCR's version of System V R 4.3 has pread/pwrite symbols, but no support. -case "$host_os-$host_vendor" in -hpux*|sysv4.3*-ncr) - AC_MSG_WARN( - [pread/pwrite interfaces ignored on $host_os-$host_vendor.]);; -*) - AC_CHECK_FUNCS(pread pwrite);; -esac - -# Check for fcntl(2) to deny child process access to file descriptors. -AC_CACHE_CHECK([for fcntl/F_SETFD], db_cv_fcntl_f_setfd, [ -AC_TRY_LINK([ -#include -#include ], [ - fcntl(1, F_SETFD, 1); -], [db_cv_fcntl_f_setfd=yes], [db_cv_fcntl_f_setfd=no])]) -if test "$db_cv_fcntl_f_setfd" = "yes"; then - AC_DEFINE(HAVE_FCNTL_F_SETFD) - AH_TEMPLATE(HAVE_FCNTL_F_SETFD, - [Define to 1 if fcntl/F_SETFD denies child access to file descriptors.]) -fi - -# A/UX has a broken getopt(3). -case "$host_os" in -aux*) AC_LIBOBJ([getopt]);; -esac - -# Linux has a broken O_DIRECT flag, but you can't detect it at configure time. -# Linux and SGI require buffer alignment we may not match, otherwise writes -# will fail. Default to not using the O_DIRECT flag. -if test "$db_cv_o_direct" = "yes"; then - AC_CACHE_CHECK([for open/O_DIRECT], db_cv_open_o_direct, [ - AC_TRY_LINK([ - #include - #include ], [ - open("a", O_RDONLY | O_DIRECT, 0); - ], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no])]) - if test \ - "$db_cv_o_direct" = "yes" -a "$db_cv_open_o_direct" = "yes"; then - AC_DEFINE(HAVE_O_DIRECT) - AH_TEMPLATE(HAVE_O_DIRECT, - [Define to 1 if you have the O_DIRECT flag.]) - fi -fi - -# Check for largefile support. -AC_SYS_LARGEFILE - -# Figure out how to create shared regions. -# -# First, we look for mmap. -# -# BSD/OS has mlock(2), but it doesn't work until the 4.1 release. -# -# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol -# is defined in the C library) but does not support munmap(2). Don't -# try to use mmap if we can't find munmap. -# -# Ultrix has mmap(2), but it doesn't work. -mmap_ok=no -case "$host_os" in -bsdi3*|bsdi4.0) - AC_MSG_WARN([mlock(2) interface ignored on $host_os-$host_vendor.]) - mmap_ok=yes - AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);; -ultrix*) - AC_MSG_WARN([mmap(2) interface ignored on $host_os-$host_vendor.]);; -*) - mmap_ok=yes - AC_CHECK_FUNCS(mlock munlock) - AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);; -esac - -# Second, we look for shmget. -# -# SunOS has the shmget(2) interfaces, but there appears to be a missing -# #include file, so we ignore them. -shmget_ok=no -case "$host_os" in -sunos*) - AC_MSG_WARN([shmget(2) interface ignored on $host_os-$host_vendor.]);; -*) - shmget_ok=yes - AC_CHECK_FUNCS(shmget, , shmget_ok=no);; -esac - -# We require either mmap/munmap(2) or shmget(2). -if test "$mmap_ok" = "no" -a "$shmget_ok" = "no"; then - AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.]) -fi - -# Optional RPC client/server. -if test "$db_cv_rpc" = "yes"; then - AM_RPC_CONFIGURE -fi - -# Optional Tcl support. -if test "$db_cv_tcl" = "yes"; then - AM_TCL_LOAD -fi - -# Optional sequence code. -AM_SEQUENCE_CONFIGURE - -# Optional DB 1.85 compatibility API. -if test "$db_cv_compat185" = "yes"; then - ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS" - - ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS" -fi - -# Optional utilities. -if test "$db_cv_dump185" = "yes"; then - ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS" -fi - -# You can disable pieces of functionality to save space. -# -# Btree is always configured: it is the standard method, and Hash off-page -# duplicates require it. -ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_OBJS)" - -# Hash can be disabled. -if test "$db_cv_build_hash" = "yes"; then - AC_DEFINE(HAVE_HASH) - AH_TEMPLATE(HAVE_HASH, [Define to 1 if building Hash access method.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_OBJS)" - if test "$db_cv_build_verify" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(HASH_VRFY_OBJS)" - fi -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS hash_stub${o}" -fi - -# Queue can be disabled. -if test "$db_cv_build_queue" = "yes"; then - AC_DEFINE(HAVE_QUEUE) - AH_TEMPLATE(HAVE_QUEUE, [Define to 1 if building Queue access method.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_OBJS)" - if test "$db_cv_build_verify" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(QUEUE_VRFY_OBJS)" - fi -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS qam_stub${o}" -fi - -# Replication can be disabled. -if test "$db_cv_build_replication" = "yes"; then - AC_DEFINE(HAVE_REPLICATION) - AH_TEMPLATE(HAVE_REPLICATION, - [Define to 1 if building replication support.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(REP_OBJS)" -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS rep_stub${o}" -fi - -# The statistics code can be disabled. -if test "$db_cv_build_statistics" = "yes"; then - AC_DEFINE(HAVE_STATISTICS) - AH_TEMPLATE(HAVE_STATISTICS, - [Define to 1 if building statistics support.]) -fi - -# The verification code can be disabled. -if test "$db_cv_build_verify" = "yes"; then - AC_DEFINE(HAVE_VERIFY) - AH_TEMPLATE(HAVE_VERIFY, - [Define to 1 if building access method verification support.]) - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(BTREE_VRFY_OBJS)" -else - ADDITIONAL_OBJS="$ADDITIONAL_OBJS db_vrfy_stub${o}" -fi - -# The crypto code can be disabled. -if test -d "$srcdir/../crypto" -a "$db_cv_build_cryptography" = "yes"; then - AC_DEFINE(HAVE_CRYPTO) - AH_TEMPLATE(HAVE_CRYPTO, - [Define to 1 if Berkeley DB release includes strong cryptography.]) - - CRYPTO_OBJS="\$(CRYPTO_OBJS)" -else - CRYPTO_OBJS="crypto_stub${o}" -fi - -# If DIAGNOSTIC is defined, include the log print routines in the library -# itself, various diagnostic modes use them. -if test "$db_cv_diagnostic" = "yes"; then - ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(PRINT_OBJS)" -fi - -# We need to add the additional object files into the Makefile with the correct -# suffix. We can't use $LTLIBOBJS itself, because that variable has $U encoded -# in it for automake, and that's not what we want. See SR #7227 for additional -# information. -# -# XXX: I'm not sure this is correct. -REPLACEMENT_OBJS=`echo "$LIB@&t@OBJS" | - sed "s,\.[[^.]]* ,$o ,g;s,\.[[^.]]*$,$o,"` - -# This is necessary so that .o files in LIBOBJS are also built via -# the ANSI2KNR-filtering rules. -LIB@&t@OBJS=`echo "$LIB@&t@OBJS" | - sed 's,\.[[^.]]* ,$U&,g;s,\.[[^.]]*$,$U&,'` -LTLIBOBJS=`echo "$LIB@&t@OBJS" | - sed 's,\.[[^.]]* ,.lo ,g;s,\.[[^.]]*$,.lo,'` -AC_SUBST(LTLIBOBJS) - -# Initial output file list. -CREATE_LIST="Makefile - db_cxx.h:$srcdir/../dbinc/db_cxx.in - db_int.h:$srcdir/../dbinc/db_int.in" - -# MinGW needs win_db.h. -if test "$db_cv_mingw" = "yes"; then -CREATE_LIST="$CREATE_LIST - win_db.h:$srcdir/win_db.in" -fi - -# Create the db.h file from a source file, a list of global function -# prototypes, and, if configured for unique names, a list of #defines -# to do DB_VERSION_UNIQUE_NAME substitution. -if test "$db_cv_uniquename" = "yes"; then - CREATE_LIST="$CREATE_LIST - db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in" -else - CREATE_LIST="$CREATE_LIST - db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/ext_prot.in" -fi - -# If configured for unique names, create the db_int_uext.h file (which -# does the DB_VERSION_UNIQUE_NAME substitution), which is included by -# the db_int.h file. -if test "$db_cv_uniquename" = "yes"; then - CREATE_LIST="$CREATE_LIST - db_int_def.h:$srcdir/../dbinc_auto/int_def.in" - db_int_def='#include "db_int_def.h"' -fi - -# Create the db_185.h and db185_int.h files from source files, a list of -# global function prototypes, and, if configured for unique names, a list -# of #defines to do DB_VERSION_UNIQUE_NAME substitution. -if test "$db_cv_compat185" = "yes"; then - if test "$db_cv_uniquename" = "yes"; then - CREATE_LIST="$CREATE_LIST - db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in - db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in" - else - CREATE_LIST="$CREATE_LIST - db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_prot.in - db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_prot.in" - fi -fi - -AC_CONFIG_FILES($CREATE_LIST) -AC_OUTPUT diff --git a/storage/bdb/dist/db.ecd.in b/storage/bdb/dist/db.ecd.in deleted file mode 100644 index 92a6a090716..00000000000 --- a/storage/bdb/dist/db.ecd.in +++ /dev/null @@ -1,64 +0,0 @@ -# Embedix Componenet Description (ECD) file for BerkeleyDB. -# -# $Id: db.ecd.in,v 11.1 2001/04/04 14:06:13 bostic Exp $ - - - - - SRPM=db - - - Berkeley DB is Sleepycat Software's programmatic database toolkit. - - - TYPE=bool - DEFAULT_VALUE=1 - PROMPT=Include BerkeleyDB library? - - /usr/lib/libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so - /usr/include/db.h - /usr/lib/libdb.so - - - libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so - - - ld-linux.so.2 - libc.so.6 - - STATIC_SIZE=0 - STARTUP_TIME=0 - - @EMBEDIX_ECD_CXX@ - - - - - - diff --git a/storage/bdb/dist/db.spec.in b/storage/bdb/dist/db.spec.in deleted file mode 100644 index ef253bcfcf4..00000000000 --- a/storage/bdb/dist/db.spec.in +++ /dev/null @@ -1,52 +0,0 @@ -# Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ - -Summary: Sleepycat Berkeley DB database library -Name: db -Version: @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ -Release: 1 -Copyright: Freely redistributable, see LICENSE for details. -Source: http://www.sleepycat.com/update/@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz -URL: http://www.sleepycat.com -Group: System Environment/Libraries -BuildRoot: @CONFIGURATION_PATH@/RPM_INSTALL - -%description -Berkeley DB is a programmatic toolkit that provides fast, reliable, -mission-critical, and scalable built-in database support for software -ranging from embedded applications running on hand-held appliances to -enterprise-scale servers. - -The Berkeley DB access methods include B+tree, Extended Linear Hashing, -Fixed and Variable-length records, and Persistent Queues. Berkeley DB -provides full transactional support, database recovery, online backups, -and separate access to locking, logging and shared memory caching -subsystems. - -Berkeley DB supports C, C++, Java, Tcl, Perl, and Python APIs. The -software is available for Linux, a wide variety of UNIX platforms, -Windows 95/98, Windows/NT, Windows 2000, VxWorks and QNX. - -%prep -%setup - -%build -cd build_unix -CFLAGS="$RPM_OPT_FLAGS" ../dist/configure @CONFIGURATION_ARGS@ -make library_build - -%install -cd build_unix -make prefix=@CONFIGURATION_PATH@/RPM_INSTALL@EMBEDIX_ROOT@ install - -@RPM_POST_INSTALL@ - -@RPM_POST_UNINSTALL@ - -%files -%defattr(-,root,root) -%dir @EMBEDIX_ROOT@/bin -%dir @EMBEDIX_ROOT@/docs -%dir @EMBEDIX_ROOT@/include -%dir @EMBEDIX_ROOT@/lib - -%changelog diff --git a/storage/bdb/dist/gen_inc.awk b/storage/bdb/dist/gen_inc.awk deleted file mode 100644 index d48d02bb1d2..00000000000 --- a/storage/bdb/dist/gen_inc.awk +++ /dev/null @@ -1,73 +0,0 @@ -# This awk script parses C input files looking for lines marked "PUBLIC:" -# and "EXTERN:". (PUBLIC lines are DB internal function prototypes and -# #defines, EXTERN are DB external function prototypes and #defines.) -# -# PUBLIC lines are put into two versions of per-directory include files: -# one file that contains the prototypes, and one file that contains a -# #define for the name to be processed during configuration when creating -# unique names for every global symbol in the DB library. -# -# The EXTERN lines are put into two files: one of which contains prototypes -# which are always appended to the db.h file, and one of which contains a -# #define list for use when creating unique symbol names. -# -# Four arguments: -# e_dfile list of EXTERN #defines -# e_pfile include file that contains EXTERN prototypes -# i_dfile list of internal (PUBLIC) #defines -# i_pfile include file that contains internal (PUBLIC) prototypes -/PUBLIC:/ { - sub("^.*PUBLIC:[ ][ ]*", "") - if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") { - print $0 >> i_pfile - print $0 >> i_dfile - next - } - pline = sprintf("%s %s", pline, $0) - if (pline ~ "\\)\\);") { - sub("^[ ]*", "", pline) - print pline >> i_pfile - if (pline !~ db_version_unique_name) { - gsub("[ ][ ]*__P.*", "", pline) - sub("^.*[ ][*]*", "", pline) - printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n", - pline, pline) >> i_dfile - } - pline = "" - } -} - -# When we switched to methods in 4.0, we guessed txn_{abort,begin,commit} -# were the interfaces applications would likely use and not be willing to -# change, due to the sheer volume of the calls. Provide wrappers -- we -# could do txn_abort and txn_commit using macros, but not txn_begin, as -# the name of the field is txn_begin, we didn't want to modify it. -# -# The issue with txn_begin hits us in another way. If configured with the -# --with-uniquename option, we use #defines to re-define DB's interfaces -# to unique names. We can't do that for these functions because txn_begin -# is also a field name in the DB_ENV structure, and the #defines we use go -# at the end of the db.h file -- we get control too late to #define a field -# name. So, modify the script that generates the unique names #defines to -# not generate them for these three functions, and don't include the three -# functions in libraries built with that configuration option. -/EXTERN:/ { - sub("^.*EXTERN:[ ][ ]*", "") - if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") { - print $0 >> e_pfile - print $0 >> e_dfile - next - } - eline = sprintf("%s %s", eline, $0) - if (eline ~ "\\)\\);") { - sub("^[ ]*", "", eline) - print eline >> e_pfile - if (eline !~ db_version_unique_name && eline !~ "^int txn_") { - gsub("[ ][ ]*__P.*", "", eline) - sub("^.*[ ][*]*", "", eline) - printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n", - eline, eline) >> e_dfile - } - eline = "" - } -} diff --git a/storage/bdb/dist/gen_rec.awk b/storage/bdb/dist/gen_rec.awk deleted file mode 100644 index bfb972fbac7..00000000000 --- a/storage/bdb/dist/gen_rec.awk +++ /dev/null @@ -1,981 +0,0 @@ -#!/bin/sh - -# -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2005 -# Sleepycat Software. All rights reserved. -# -# $Id: gen_rec.awk,v 12.6 2005/10/12 18:48:44 ubell Exp $ -# - -# This awk script generates all the log, print, and read routines for the DB -# logging. It also generates a template for the recovery functions (these -# functions must still be edited, but are highly stylized and the initial -# template gets you a fair way along the path). -# -# For a given file prefix.src, we generate a file prefix_auto.c, and a file -# prefix_auto.h that contains: -# -# external declarations for the file's functions -# defines for the physical record types -# (logical types are defined in each subsystem manually) -# structures to contain the data unmarshalled from the log. -# -# This awk script requires that four variables be set when it is called: -# -# source_file -- the C source file being created -# header_file -- the C #include file being created -# template_file -- the template file being created -# -# And stdin must be the input file that defines the recovery setup. -# -# Within each file prefix.src, we use a number of public keywords (documented -# in the reference guide) as well as the following ones which are private to -# DB: -# DBPRIVATE Indicates that a file will be built as part of DB, -# rather than compiled independently, and so can use -# DB-private interfaces (such as DB_LOG_NOCOPY). -# DB A DB handle. Logs the dbreg fileid for that handle, -# and makes the *_log interface take a DB * instead of a -# DB_ENV *. -# PGDBT Just like DBT, only we know it stores a page or page -# header, so we can byte-swap it (once we write the -# byte-swapping code, which doesn't exist yet). -# LOCKS Just like DBT, but uses a print function for locks. - -BEGIN { - if (source_file == "" || - header_file == "" || template_file == "") { - print "Usage: gen_rec.awk requires three variables to be set:" - print "\theader_file\t-- the recover #include file being created" - print "\tprint_file\t-- the print source file being created" - print "\tsource_file\t-- the recover source file being created" - print "\ttemplate_file\t-- the template file being created" - exit - } - FS="[\t ][\t ]*" - CFILE=source_file - HFILE=header_file - PFILE=print_file - TFILE=template_file - dbprivate = 0 - buf_only = 1; -} -/^[ ]*DBPRIVATE/ { - dbprivate = 1 -} -/^[ ]*PREFIX/ { - prefix = $2 - num_funcs = 0; - - # Start .c files. - printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \ - > CFILE - printf("#include \"db_config.h\"\n\n") >> CFILE - printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \ - > PFILE - printf("#include \"db_config.h\"\n\n") >> PFILE - if (prefix == "__ham") - printf("#ifdef HAVE_HASH\n") >> PFILE - if (prefix == "__qam") - printf("#ifdef HAVE_QUEUE\n") >> PFILE - - # Start .h file, make the entire file conditional. - printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \ - > HFILE - printf("#ifndef\t%s_AUTO_H\n#define\t%s_AUTO_H\n", prefix, prefix) \ - >> HFILE; - - # Write recovery template file headers - # This assumes we're doing DB recovery. - printf("#include \"db_config.h\"\n\n") > TFILE - printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE - printf("#include \n\n") >> TFILE - printf("#include \n") >> TFILE - printf("#endif\n\n") >> TFILE - printf("#include \"db_int.h\"\n") >> TFILE - printf("#include \"dbinc/db_page.h\"\n") >> TFILE - printf("#include \"dbinc/%s.h\"\n", prefix) >> TFILE - printf("#include \"dbinc/log.h\"\n\n") >> TFILE -} -/^[ ]*INCLUDE/ { - for (i = 2; i < NF; i++) - printf("%s ", $i) >> CFILE - printf("%s\n", $i) >> CFILE - for (i = 2; i < NF; i++) - printf("%s ", $i) >> PFILE - printf("%s\n", $i) >> PFILE -} -/^[ ]*(BEGIN|IGNORED|BEGIN_BUF)/ { - if (in_begin) { - print "Invalid format: missing END statement" - exit - } - in_begin = 1; - is_dbt = 0; - has_dbp = 0; - is_uint = 0; - need_log_function = ($1 == "BEGIN") || ($1 == "BEGIN_BUF"); - not_buf = ($1 == "BEGIN") || ($1 == "IGNORED"); - if (not_buf) - buf_only = 0; - nvars = 0; - - thisfunc = $2; - funcname = sprintf("%s_%s", prefix, $2); - - if (not_buf) - rectype = $3; - - funcs[num_funcs] = funcname; - ++num_funcs; -} -/^[ ]*(DB|ARG|DBT|LOCKS|PGDBT|POINTER|TIME)/ { - vars[nvars] = $2; - types[nvars] = $3; - atypes[nvars] = $1; - modes[nvars] = $1; - formats[nvars] = $NF; - for (i = 4; i < NF; i++) - types[nvars] = sprintf("%s %s", types[nvars], $i); - - if ($1 == "DB") { - has_dbp = 1; - } - - if ($1 == "DB" || $1 == "ARG" || $1 == "TIME") { - sizes[nvars] = sprintf("sizeof(u_int32_t)"); - is_uint = 1; - } else if ($1 == "POINTER") - sizes[nvars] = sprintf("sizeof(*%s)", $2); - else { # DBT, PGDBT - sizes[nvars] = \ - sprintf("sizeof(u_int32_t) + (%s == NULL ? 0 : %s->size)", \ - $2, $2); - is_dbt = 1; - } - nvars++; -} -/^[ ]*END/ { - if (!in_begin) { - print "Invalid format: missing BEGIN statement" - exit; - } - - # Declare the record type. - if (not_buf) { - printf("#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE - } - - # Structure declaration. - printf("typedef struct _%s_args {\n", funcname) >> HFILE - - # Here are the required fields for every structure - if (not_buf) { - printf("\tu_int32_t type;\n\tDB_TXN *txnid;\n") >> HFILE - printf("\tDB_LSN prev_lsn;\n") >>HFILE - } - - # Here are the specified fields. - for (i = 0; i < nvars; i++) { - t = types[i]; - if (modes[i] == "POINTER") { - ndx = index(t, "*"); - t = substr(types[i], 1, ndx - 2); - } - printf("\t%s\t%s;\n", t, vars[i]) >> HFILE - } - printf("} %s_args;\n\n", funcname) >> HFILE - - # Output the log, print and read functions. - if (need_log_function) { - log_function(); - } - if (not_buf) { - print_function(); - } - read_function(); - - # Recovery template - if (not_buf) { - cmd = sprintf(\ - "sed -e s/PREF/%s/ -e s/FUNC/%s/ < template/rec_ctemp >> %s", - prefix, thisfunc, TFILE) - system(cmd); - } - - # Done writing stuff, reset and continue. - in_begin = 0; -} - -END { - # End the conditional for the HFILE - printf("#endif\n") >> HFILE; - - if (buf_only == 1) - exit - - # Print initialization routine; function prototype - p[1] = sprintf("int %s_init_print %s%s", prefix, - "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ", - "db_recops, void *), size_t *));"); - p[2] = ""; - proto_format(p, PFILE); - - # Create the routine to call __db_add_recovery(print_fn, id) - printf("int\n%s_init_print(dbenv, dtabp, dtabsizep)\n", \ - prefix) >> PFILE; - printf("\tDB_ENV *dbenv;\n") >> PFILE;; - printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> PFILE; - printf(" db_recops, void *));\n") >> PFILE; - printf("\tsize_t *dtabsizep;\n{\n") >> PFILE; - # If application-specific, the user will need a prototype for - # __db_add_recovery, since they won't have DB's. - if (!dbprivate) { - printf("\tint __db_add_recovery __P((DB_ENV *,\n") >> PFILE; - printf(\ -"\t int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),\n") >> PFILE; - printf("\t size_t *,\n") >> PFILE; - printf(\ -"\t int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));\n") \ - >> PFILE; - } - - printf("\tint ret;\n\n") >> PFILE; - for (i = 0; i < num_funcs; i++) { - printf("\tif ((ret = __db_add_recovery(dbenv, ") >> PFILE; - printf("dtabp, dtabsizep,\n") >> PFILE; - printf("\t %s_print, DB_%s)) != 0)\n", \ - funcs[i], funcs[i]) >> PFILE; - printf("\t\treturn (ret);\n") >> PFILE; - } - printf("\treturn (0);\n}\n") >> PFILE; - if (prefix == "__ham") - printf("#endif /* HAVE_HASH */\n") >> PFILE - if (prefix == "__qam") - printf("#endif /* HAVE_QUEUE */\n") >> PFILE - - # We only want to generate *_init_recover functions if this is a - # DB-private, rather than application-specific, set of recovery - # functions. Application-specific recovery functions should be - # dispatched using the DB_ENV->set_app_dispatch callback rather - # than a DB dispatch table ("dtab"). - if (!dbprivate) - exit - - # Recover initialization routine - p[1] = sprintf("int %s_init_recover %s%s", prefix, - "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ", - "db_recops, void *), size_t *));"); - p[2] = ""; - proto_format(p, CFILE); - - # Create the routine to call db_add_recovery(func, id) - printf("int\n%s_init_recover(dbenv, dtabp, dtabsizep)\n", \ - prefix) >> CFILE; - printf("\tDB_ENV *dbenv;\n") >> CFILE; - printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE; - printf(" db_recops, void *));\n") >> CFILE; - printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE; - for (i = 0; i < num_funcs; i++) { - printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE; - printf("dtabp, dtabsizep,\n") >> CFILE; - printf("\t %s_recover, DB_%s)) != 0)\n", \ - funcs[i], funcs[i]) >> CFILE; - printf("\t\treturn (ret);\n") >> CFILE; - } - printf("\treturn (0);\n}\n") >> CFILE; -} - -function log_function() -{ - # Write the log function; function prototype - pi = 1; - if (not_buf) { - p[pi++] = sprintf("int %s_log", funcname); - p[pi++] = " "; - if (has_dbp == 1) { - p[pi++] = "__P((DB *"; - } else { - p[pi++] = "__P((DB_ENV *"; - } - p[pi++] = ", DB_TXN *, DB_LSN *, u_int32_t"; - } else { - p[pi++] = sprintf("int %s_buf", funcname); - p[pi++] = " "; - p[pi++] = "__P((u_int8_t *, size_t, size_t *"; - } - for (i = 0; i < nvars; i++) { - if (modes[i] == "DB") - continue; - p[pi++] = ", "; - p[pi++] = sprintf("%s%s%s", - (modes[i] == "DBT" || modes[i] == "LOCKS" || - modes[i] == "PGDBT") ? "const " : "", types[i], - (modes[i] == "DBT" || modes[i] == "LOCKS" || - modes[i] == "PGDBT") ? " *" : ""); - } - p[pi++] = ""; - p[pi++] = "));"; - p[pi++] = ""; - proto_format(p, CFILE); - - # Function declaration - if (not_buf && has_dbp == 1) { - printf("int\n%s_log(dbp, txnid, ret_lsnp, flags", \ - funcname) >> CFILE; - } else if (not_buf) { - printf("int\n%s_log(dbenv, txnid, ret_lsnp, flags", \ - funcname) >> CFILE; - } else { - printf("int\n%s_buf(buf, max, lenp", funcname) >> CFILE; - } - for (i = 0; i < nvars; i++) { - if (modes[i] == "DB") { - # We pass in fileids on the dbp, so if this is one, - # skip it. - continue; - } - printf(",") >> CFILE; - if ((i % 6) == 0) - printf("\n ") >> CFILE; - else - printf(" ") >> CFILE; - printf("%s", vars[i]) >> CFILE; - } - printf(")\n") >> CFILE; - - # Now print the parameters - if (not_buf) { - if (has_dbp == 1) { - printf("\tDB *dbp;\n") >> CFILE; - } else { - printf("\tDB_ENV *dbenv;\n") >> CFILE; - } - printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE; - printf("\tu_int32_t flags;\n") >> CFILE; - } else { - printf("\tu_int8_t *buf;\n") >> CFILE; - printf("\tsize_t max, *lenp;\n") >> CFILE; - } - for (i = 0; i < nvars; i++) { - # We just skip for modes == DB. - if (modes[i] == "DBT" || - modes[i] == "LOCKS" || modes[i] == "PGDBT") - printf("\tconst %s *%s;\n", types[i], vars[i]) >> CFILE; - else if (modes[i] != "DB") - printf("\t%s %s;\n", types[i], vars[i]) >> CFILE; - } - - # Function body and local decls - printf("{\n") >> CFILE; - if (not_buf) { - printf("\tDBT logrec;\n") >> CFILE; - if (has_dbp == 1) - printf("\tDB_ENV *dbenv;\n") >> CFILE; - if (dbprivate) - printf("\tDB_TXNLOGREC *lr;\n") >> CFILE; - printf("\tDB_LSN *lsnp, null_lsn, *rlsnp;\n") >> CFILE; - printf("\tu_int32_t ") >> CFILE; - if (is_dbt == 1) - printf("zero, ") >> CFILE; - if (is_uint == 1) - printf("uinttmp, ") >> CFILE; - printf("rectype, txn_num;\n") >> CFILE; - printf("\tu_int npad;\n") >> CFILE; - } else { - if (is_dbt == 1) - printf("\tu_int32_t zero;\n") >> CFILE; - if (is_uint == 1) - printf("\tu_int32_t uinttmp;\n") >> CFILE; - printf("\tu_int8_t *endbuf;\n") >> CFILE; - } - printf("\tu_int8_t *bp;\n") >> CFILE; - printf("\tint ") >> CFILE; - if (dbprivate && not_buf) { - printf("is_durable, ") >> CFILE; - } - printf("ret;\n\n") >> CFILE; - - # Initialization - if (not_buf) { - if (has_dbp == 1) - printf("\tdbenv = dbp->dbenv;\n") >> CFILE; - if (dbprivate) - printf("\tCOMPQUIET(lr, NULL);\n\n") >> CFILE; - printf("\trectype = DB_%s;\n", funcname) >> CFILE; - printf("\tnpad = 0;\n") >> CFILE; - printf("\trlsnp = ret_lsnp;\n\n") >> CFILE; - } - printf("\tret = 0;\n\n") >> CFILE; - - if (not_buf) { - if (dbprivate) { - printf("\tif (LF_ISSET(DB_LOG_NOT_DURABLE)") \ - >> CFILE; - if (has_dbp == 1) { - printf(" ||\n\t ") >> CFILE; - printf("F_ISSET(dbp, DB_AM_NOT_DURABLE)) {\n") \ - >> CFILE; - } else { - printf(") {\n") >> CFILE; - printf("\t\tif (txnid == NULL)\n") >> CFILE; - printf("\t\t\treturn (0);\n") >> CFILE; - } - printf("\t\tis_durable = 0;\n") >> CFILE; - printf("\t} else\n") >> CFILE; - printf("\t\tis_durable = 1;\n\n") >> CFILE; - } - printf("\tif (txnid == NULL) {\n") >> CFILE; - printf("\t\ttxn_num = 0;\n") >> CFILE; - printf("\t\tlsnp = &null_lsn;\n") >> CFILE; - printf("\t\tnull_lsn.file = null_lsn.offset = 0;\n") >> CFILE; - printf("\t} else {\n") >> CFILE; - if (dbprivate && funcname != "__db_debug") { - printf(\ - "\t\tif (TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE; - printf("\t\t (ret = __txn_activekids(") >> CFILE; - printf("dbenv, rectype, txnid)) != 0)\n") >> CFILE; - printf("\t\t\treturn (ret);\n") >> CFILE; - } - printf("\t\t/*\n\t\t * We need to assign begin_lsn while ") \ - >> CFILE; - printf("holding region mutex.\n") >> CFILE; - printf("\t\t * That assignment is done inside the ") >> CFILE; - printf("DbEnv->log_put call,\n\t\t * ") >> CFILE; - printf("so pass in the appropriate memory location to be ") \ - >> CFILE; - printf("filled\n\t\t * in by the log_put code.\n\t\t */\n") \ - >> CFILE; - printf("\t\tDB_SET_TXN_LSNP(txnid, &rlsnp, &lsnp);\n") >> CFILE; - printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE; - printf("\t}\n\n") >> CFILE; - - # If we're logging a DB handle, make sure we have a log - # file ID for it. - db_handle_id_function(modes, nvars); - - # Malloc - printf("\tlogrec.size = ") >> CFILE; - printf("sizeof(rectype) + ") >> CFILE; - printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE; - for (i = 0; i < nvars; i++) - printf("\n\t + %s", sizes[i]) >> CFILE; - printf(";\n") >> CFILE - if (dbprivate) { - printf("\tif (CRYPTO_ON(dbenv)) {\n") >> CFILE; - printf("\t\tnpad =\n") >> CFILE; - printf("\t\t ((DB_CIPHER *)dbenv->crypto_handle)") \ - >> CFILE; - printf("->adj_size(logrec.size);\n") >> CFILE; - printf("\t\tlogrec.size += npad;\n\t}\n\n") >> CFILE - - printf("\tif (is_durable || txnid == NULL) {\n") \ - >> CFILE; - printf("\t\tif ((ret =\n\t\t __os_malloc(dbenv, ") \ - >> CFILE; - printf("logrec.size, &logrec.data)) != 0)\n") >> CFILE; - printf("\t\t\treturn (ret);\n") >> CFILE; - printf("\t} else {\n") >> CFILE; - write_malloc("\t\t", - "lr", "logrec.size + sizeof(DB_TXNLOGREC)", CFILE) - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - printf("\t\tif ((ret =\n\t\t __os_malloc(dbenv, ") \ - >> CFILE; - printf("logrec.size, &logrec.data)) != 0) {\n") \ - >> CFILE; - printf("\t\t\t__os_free(dbenv, lr);\n") >> CFILE; - printf("\t\t\treturn (ret);\n") >> CFILE; - printf("\t\t}\n") >> CFILE; - printf("#else\n") >> CFILE; - printf("\t\tlogrec.data = lr->data;\n") >> CFILE; - printf("#endif\n") >> CFILE; - printf("\t}\n") >> CFILE; - } else { - write_malloc("\t", "logrec.data", "logrec.size", CFILE) - printf("\tbp = logrec.data;\n\n") >> CFILE; - } - printf("\tif (npad > 0)\n") >> CFILE; - printf("\t\tmemset((u_int8_t *)logrec.data + logrec.size ") \ - >> CFILE; - printf("- npad, 0, npad);\n\n") >> CFILE; - printf("\tbp = logrec.data;\n\n") >> CFILE; - - # Copy args into buffer - printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE; - printf("\tbp += sizeof(rectype);\n\n") >> CFILE; - printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE; - printf("\tbp += sizeof(txn_num);\n\n") >> CFILE; - printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE; - printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE; - } else { - # If we're logging a DB handle, make sure we have a log - # file ID for it. - db_handle_id_function(modes, nvars); - - printf("\tbp = buf;\n") >> CFILE; - printf("\tendbuf = bp + max;\n\n") >> CFILE - } - - for (i = 0; i < nvars; i++) { - if (modes[i] == "ARG" || modes[i] == "TIME") { - printf("\tuinttmp = (u_int32_t)%s;\n", \ - vars[i]) >> CFILE; - if (!not_buf) { - printf(\ - "\tif (bp + sizeof(uinttmp) > endbuf)\n") \ - >> CFILE; - printf("\t\treturn (ENOMEM);\n") >> CFILE; - } - printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \ - >> CFILE; - printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE; - } else if (modes[i] == "DBT" || \ - modes[i] == "LOCKS" || modes[i] == "PGDBT") { - printf("\tif (%s == NULL) {\n", vars[i]) >> CFILE; - printf("\t\tzero = 0;\n") >> CFILE; - if (!not_buf) { - printf(\ - "\t\tif (bp + sizeof(u_int32_t) > endbuf)\n") \ - >> CFILE; - printf("\t\t\treturn (ENOMEM);\n") >> CFILE; - } - printf("\t\tmemcpy(bp, &zero, sizeof(u_int32_t));\n") \ - >> CFILE; - printf("\t\tbp += sizeof(u_int32_t);\n") >> CFILE; - printf("\t} else {\n") >> CFILE; - if (!not_buf) { - printf(\ - "\t\tif (bp + sizeof(%s->size) > endbuf)\n", \ - vars[i]) >> CFILE; - printf("\t\t\treturn (ENOMEM);\n") >> CFILE; - } - printf("\t\tmemcpy(bp, &%s->size, ", vars[i]) >> CFILE; - printf("sizeof(%s->size));\n", vars[i]) >> CFILE; - printf("\t\tbp += sizeof(%s->size);\n", vars[i]) \ - >> CFILE; - if (!not_buf) { - printf("\t\tif (bp + %s->size > endbuf)\n", \ - vars[i]) >> CFILE; - printf("\t\t\treturn (ENOMEM);\n") >> CFILE; - } - printf("\t\tmemcpy(bp, %s->data, %s->size);\n", \ - vars[i], vars[i]) >> CFILE; - printf("\t\tbp += %s->size;\n\t}\n\n", \ - vars[i]) >> CFILE; - } else if (modes[i] == "DB") { - printf("\tuinttmp = ") >> CFILE; - printf("(u_int32_t)dbp->log_filename->id;\n") >> CFILE; - printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \ - >> CFILE; - printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE; - } else { # POINTER - if (!not_buf) { - printf("\tif (bp + %s > endbuf)\n", \ - sizes[i]) >> CFILE; - printf("\t\treturn (ENOMEM);\n") >> CFILE; - } - printf("\tif (%s != NULL)\n", vars[i]) >> CFILE; - printf("\t\tmemcpy(bp, %s, %s);\n", vars[i], \ - sizes[i]) >> CFILE; - printf("\telse\n") >> CFILE; - printf("\t\tmemset(bp, 0, %s);\n", sizes[i]) >> CFILE; - printf("\tbp += %s;\n\n", sizes[i]) >> CFILE; - } - } - - # Error checking. User code won't have DB_ASSERT available, but - # this is a pretty unlikely assertion anyway, so we just leave it out - # rather than requiring assert.h. - if (not_buf) { - if (dbprivate) { - printf("\tDB_ASSERT((u_int32_t)") >> CFILE; - printf("(bp - (u_int8_t *)logrec.data) ") >> CFILE; - printf("<= logrec.size);\n\n") >> CFILE; - # Save the log record off in the txn's linked list, - # or do log call. - # We didn't call the crypto alignment function when - # we created this log record (because we don't have - # the right header files to find the function), so - # we have to copy the log record to make sure the - # alignment is correct. - printf("\tif (is_durable || txnid == NULL) {\n") \ - >> CFILE; - # Output the log record and update the return LSN. - printf("\t\tif ((ret = __log_put(dbenv, rlsnp,") \ - >> CFILE; - printf("(DBT *)&logrec,\n") >> CFILE; - printf("\t\t flags | DB_LOG_NOCOPY)) == 0") >> CFILE; - printf(" && txnid != NULL) {\n") >> CFILE; - printf("\t\t\t*lsnp = *rlsnp;\n") >> CFILE; - - printf("\t\t\tif (rlsnp != ret_lsnp)\n") >> CFILE; - printf("\t\t\t\t *ret_lsnp = *rlsnp;\n") >> CFILE; - printf("\t\t}\n\t} else {\n") >> CFILE; - printf("#ifdef DIAGNOSTIC\n") >> CFILE; - - # Add the debug bit if we are logging a ND record. - printf("\t\t/*\n") >> CFILE; - printf("\t\t * Set the debug bit if we are") >> CFILE; - printf(" going to log non-durable\n") >> CFILE; - printf("\t\t * transactions so they will be ignored") \ - >> CFILE; - printf(" by recovery.\n") >> CFILE; - printf("\t\t */\n") >> CFILE; - printf("\t\tmemcpy(lr->data, logrec.data, ") >> CFILE - printf("logrec.size);\n") >> CFILE; - printf("\t\trectype |= DB_debug_FLAG;\n") >> CFILE; - printf("\t\tmemcpy(") >> CFILE - printf("logrec.data, &rectype, sizeof(rectype));\n\n") \ - >> CFILE; - # Output the log record. - printf("\t\tret = __log_put(dbenv,\n") >> CFILE; - printf("\t\t rlsnp, (DBT *)&logrec, ") >> CFILE; - printf("flags | DB_LOG_NOCOPY);\n") >> CFILE; - printf("#else\n") >> CFILE; - printf("\t\tret = 0;\n") >> CFILE; - printf("#endif\n") >> CFILE; - # Add a ND record to the txn list. - printf("\t\tSTAILQ_INSERT_HEAD(&txnid") >> CFILE; - printf("->logs, lr, links);\n") >> CFILE; - printf("\t\tF_SET((TXN_DETAIL *)") >> CFILE; - printf("txnid->td, TXN_DTL_INMEMORY);\n") >> CFILE; - # Update the return LSN. - printf("\t\tLSN_NOT_LOGGED(*ret_lsnp);\n") >> CFILE; - printf("\t}\n\n") >> CFILE; - } else { - printf("\tif ((ret = dbenv->log_put(dbenv, rlsnp,") >> CFILE; - printf(" (DBT *)&logrec,\n") >> CFILE; - printf("\t flags | DB_LOG_NOCOPY)) == 0") >> CFILE; - printf(" && txnid != NULL) {\n") >> CFILE; - - # Update the transactions last_lsn. - printf("\t\t*lsnp = *rlsnp;\n") >> CFILE; - printf("\t\tif (rlsnp != ret_lsnp)\n") >> CFILE; - printf("\t\t\t *ret_lsnp = *rlsnp;\n") >> CFILE; - printf("\t}\n") >> CFILE; - - } - # If out of disk space log writes may fail. If we are debugging - # that print out which records did not make it to disk. - printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE - printf("\tif (ret != 0)\n") >> CFILE; - printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE; - printf("\t\t (DBT *)&logrec, ret_lsnp, ") >> CFILE - printf("DB_TXN_PRINT, NULL);\n#endif\n\n") >> CFILE - # Free and return - if (dbprivate) { - printf("#ifdef DIAGNOSTIC\n") >> CFILE - write_free("\t", "logrec.data", CFILE) - printf("#else\n") >> CFILE - printf("\tif (is_durable || txnid == NULL)\n") >> CFILE; - write_free("\t\t", "logrec.data", CFILE) - printf("#endif\n") >> CFILE - } else { - write_free("\t", "logrec.data", CFILE) - } - } else { - printf("\t*lenp = (u_int32_t)(bp - buf);\n\n") >> CFILE - } - - printf("\treturn (ret);\n}\n\n") >> CFILE; -} - -# If we're logging a DB handle, make sure we have a log -# file ID for it. -function db_handle_id_function(modes, n) -{ - for (i = 0; i < n; i++) - if (modes[i] == "DB") { - # We actually log the DB handle's fileid; from - # that ID we're able to acquire an open handle - # at recovery time. - printf(\ - "\tDB_ASSERT(dbp->log_filename != NULL);\n") \ - >> CFILE; - printf("\tif (dbp->log_filename->id == ") \ - >> CFILE; - printf("DB_LOGFILEID_INVALID &&\n\t ") \ - >> CFILE - printf("(ret = __dbreg_lazy_id(dbp)) != 0)\n") \ - >> CFILE - printf("\t\treturn (ret);\n\n") >> CFILE; - break; - } -} - -function print_function() -{ - # Write the print function; function prototype - p[1] = sprintf("int %s_print", funcname); - p[2] = " "; - p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));"; - p[4] = ""; - proto_format(p, PFILE); - - # Function declaration - printf("int\n%s_print(dbenv, ", funcname) >> PFILE; - printf("dbtp, lsnp, notused2, notused3)\n") >> PFILE; - printf("\tDB_ENV *dbenv;\n") >> PFILE; - printf("\tDBT *dbtp;\n") >> PFILE; - printf("\tDB_LSN *lsnp;\n") >> PFILE; - printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> PFILE; - - # Locals - printf("\t%s_args *argp;\n", funcname) >> PFILE; - for (i = 0; i < nvars; i ++) - if (modes[i] == "TIME") { - printf("\tstruct tm *lt;\n") >> PFILE - printf("\ttime_t timeval;\n") >> PFILE - break; - } - for (i = 0; i < nvars; i ++) - if (modes[i] == "DBT" || modes[i] == "PGDBT") { - printf("\tu_int32_t i;\n") >> PFILE - printf("\tint ch;\n") >> PFILE - break; - } - printf("\tint ret;\n\n") >> PFILE; - - # Get rid of complaints about unused parameters. - printf("\tnotused2 = DB_TXN_PRINT;\n\tnotused3 = NULL;\n\n") >> PFILE; - - # Call read routine to initialize structure - printf("\tif ((ret = %s_read(dbenv, dbtp->data, &argp)) != 0)\n", \ - funcname) >> PFILE; - printf("\t\treturn (ret);\n") >> PFILE; - - # Print values in every record - printf("\t(void)printf(\n\t \"[%%lu][%%lu]%s%%s: ",\ - funcname) >> PFILE; - printf("rec: %%lu txnid %%lx ") >> PFILE; - printf("prevlsn [%%lu][%%lu]\\n\",\n") >> PFILE; - printf("\t (u_long)lsnp->file,\n") >> PFILE; - printf("\t (u_long)lsnp->offset,\n") >> PFILE; - printf("\t (argp->type & DB_debug_FLAG) ? \"_debug\" : \"\",\n") \ - >> PFILE; - printf("\t (u_long)argp->type,\n") >> PFILE; - printf("\t (u_long)argp->txnid->txnid,\n") >> PFILE; - printf("\t (u_long)argp->prev_lsn.file,\n") >> PFILE; - printf("\t (u_long)argp->prev_lsn.offset);\n") >> PFILE; - - # Now print fields of argp - for (i = 0; i < nvars; i ++) { - if (modes[i] == "TIME") { - printf("\ttimeval = (time_t)argp->%s;\n", - vars[i]) >> PFILE; - printf("\tlt = localtime(&timeval);\n") >> PFILE; - printf("\t(void)printf(\n\t \"\\t%s: ", - vars[i]) >> PFILE; - } else - printf("\t(void)printf(\"\\t%s: ", vars[i]) >> PFILE; - - if (modes[i] == "DBT" || modes[i] == "PGDBT") { - printf("\");\n") >> PFILE; - printf("\tfor (i = 0; i < ") >> PFILE; - printf("argp->%s.size; i++) {\n", vars[i]) >> PFILE; - printf("\t\tch = ((u_int8_t *)argp->%s.data)[i];\n", \ - vars[i]) >> PFILE; - printf("\t\tprintf(isprint(ch) || ch == 0x0a") >> PFILE; - printf(" ? \"%%c\" : \"%%#x \", ch);\n") >> PFILE; - printf("\t}\n\t(void)printf(\"\\n\");\n") >> PFILE; - } else if (types[i] == "DB_LSN *") { - printf("[%%%s][%%%s]\\n\",\n", \ - formats[i], formats[i]) >> PFILE; - printf("\t (u_long)argp->%s.file,", \ - vars[i]) >> PFILE; - printf(" (u_long)argp->%s.offset);\n", \ - vars[i]) >> PFILE; - } else if (modes[i] == "TIME") { - # Time values are displayed in two ways: the standard - # string returned by ctime, and in the input format - # expected by db_recover -t. - printf(\ - "%%%s (%%.24s, 20%%02lu%%02lu%%02lu%%02lu%%02lu.%%02lu)\\n\",\n", \ - formats[i]) >> PFILE; - printf("\t (long)argp->%s, ", vars[i]) >> PFILE; - printf("ctime(&timeval),", vars[i]) >> PFILE; - printf("\n\t (u_long)lt->tm_year - 100, ") >> PFILE; - printf("(u_long)lt->tm_mon+1,") >> PFILE; - printf("\n\t (u_long)lt->tm_mday, ") >> PFILE; - printf("(u_long)lt->tm_hour,") >> PFILE; - printf("\n\t (u_long)lt->tm_min, ") >> PFILE; - printf("(u_long)lt->tm_sec);\n") >> PFILE; - } else if (modes[i] == "LOCKS") { - printf("\\n\");\n") >> PFILE; - printf("\t__lock_list_print(dbenv, &argp->locks);\n") \ - >> PFILE; - } else { - if (formats[i] == "lx") - printf("0x") >> PFILE; - printf("%%%s\\n\", ", formats[i]) >> PFILE; - if (formats[i] == "lx" || formats[i] == "lu") - printf("(u_long)") >> PFILE; - if (formats[i] == "ld") - printf("(long)") >> PFILE; - printf("argp->%s);\n", vars[i]) >> PFILE; - } - } - printf("\t(void)printf(\"\\n\");\n") >> PFILE; - write_free("\t", "argp", PFILE); - printf("\treturn (0);\n") >> PFILE; - printf("}\n\n") >> PFILE; -} - -function read_function() -{ - # Write the read function; function prototype - if (not_buf) - p[1] = sprintf("int %s_read __P((DB_ENV *, void *,", funcname); - else - p[1] = sprintf("int %s_read __P((DB_ENV *, void *, void **,", \ - funcname); - p[2] = " "; - p[3] = sprintf("%s_args **));", funcname); - p[4] = ""; - proto_format(p, CFILE); - - # Function declaration - if (not_buf) - printf("int\n%s_read(dbenv, recbuf, argpp)\n", funcname) \ - >> CFILE; - else - printf(\ - "int\n%s_read(dbenv, recbuf, nextp, argpp)\n", funcname) \ - >> CFILE; - - # Now print the parameters - printf("\tDB_ENV *dbenv;\n") >> CFILE; - printf("\tvoid *recbuf;\n") >> CFILE; - if (!not_buf) - printf("\tvoid **nextp;\n") >> CFILE; - printf("\t%s_args **argpp;\n", funcname) >> CFILE; - - # Function body and local decls - printf("{\n\t%s_args *argp;\n", funcname) >> CFILE; - if (is_uint == 1) - printf("\tu_int32_t uinttmp;\n") >> CFILE; - printf("\tu_int8_t *bp;\n") >> CFILE; - - - if (dbprivate) { - # We only use dbenv and ret in the private malloc case. - printf("\tint ret;\n\n") >> CFILE; - } else { - printf("\t/* Keep the compiler quiet. */\n") >> CFILE; - printf("\n\tdbenv = NULL;\n") >> CFILE; - } - - if (not_buf) { - malloc_size = sprintf("sizeof(%s_args) + sizeof(DB_TXN)", \ - funcname) - } else { - malloc_size = sprintf("sizeof(%s_args)", funcname) - } - write_malloc("\t", "argp", malloc_size, CFILE) - - # Set up the pointers to the txnid. - printf("\tbp = recbuf;\n") >> CFILE; - - if (not_buf) { - printf("\targp->txnid = (DB_TXN *)&argp[1];\n\n") >> CFILE; - - # First get the record type, prev_lsn, and txnid fields. - - printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") \ - >> CFILE; - printf("\tbp += sizeof(argp->type);\n\n") >> CFILE; - printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE; - printf("sizeof(argp->txnid->txnid));\n") >> CFILE; - printf("\tbp += sizeof(argp->txnid->txnid);\n\n") >> CFILE; - printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") \ - >> CFILE; - printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE; - } - - # Now get rest of data. - for (i = 0; i < nvars; i ++) { - if (modes[i] == "DBT" || \ - modes[i] == "LOCKS" || modes[i] == "PGDBT") { - printf("\tmemset(&argp->%s, 0, sizeof(argp->%s));\n", \ - vars[i], vars[i]) >> CFILE; - printf("\tmemcpy(&argp->%s.size, ", vars[i]) >> CFILE; - printf("bp, sizeof(u_int32_t));\n") >> CFILE; - printf("\tbp += sizeof(u_int32_t);\n") >> CFILE; - printf("\targp->%s.data = bp;\n", vars[i]) >> CFILE; - printf("\tbp += argp->%s.size;\n", vars[i]) >> CFILE; - } else if (modes[i] == "ARG" || modes[i] == "TIME" || - modes[i] == "DB") { - printf("\tmemcpy(&uinttmp, bp, sizeof(uinttmp));\n") \ - >> CFILE; - printf("\targp->%s = (%s)uinttmp;\n", vars[i], \ - types[i]) >> CFILE; - printf("\tbp += sizeof(uinttmp);\n") >> CFILE; - } else { # POINTER - printf("\tmemcpy(&argp->%s, bp, ", vars[i]) >> CFILE; - printf(" sizeof(argp->%s));\n", vars[i]) >> CFILE; - printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE; - } - printf("\n") >> CFILE; - } - - # Free and return - if (!not_buf) - printf("\t*nextp = bp;\n") >> CFILE; - printf("\t*argpp = argp;\n") >> CFILE; - printf("\treturn (0);\n}\n\n") >> CFILE; -} - -# proto_format -- -# Pretty-print a function prototype. -function proto_format(p, fp) -{ - printf("/*\n") >> fp; - - s = ""; - for (i = 1; i in p; ++i) - s = s p[i]; - - t = " * PUBLIC: " - if (length(s) + length(t) < 80) - printf("%s%s", t, s) >> fp; - else { - split(s, p, "__P"); - len = length(t) + length(p[1]); - printf("%s%s", t, p[1]) >> fp - - n = split(p[2], comma, ","); - comma[1] = "__P" comma[1]; - for (i = 1; i <= n; i++) { - if (len + length(comma[i]) > 70) { - printf("\n * PUBLIC: ") >> fp; - len = 0; - } - printf("%s%s", comma[i], i == n ? "" : ",") >> fp; - len += length(comma[i]) + 2; - } - } - printf("\n */\n") >> fp; - delete p; -} - -function write_malloc(tab, ptr, size, file) -{ - if (dbprivate) { - print(tab "if ((ret = __os_malloc(dbenv,") >> file - print(tab " " size ", &" ptr ")) != 0)") >> file - print(tab "\treturn (ret);") >> file; - } else { - print(tab "if ((" ptr " = malloc(" size ")) == NULL)") >> file - print(tab "\treturn (ENOMEM);") >> file - } -} - -function write_free(tab, ptr, file) -{ - if (dbprivate) { - print(tab "__os_free(dbenv, " ptr ");") >> file - } else { - print(tab "free(" ptr ");") >> file - } -} diff --git a/storage/bdb/dist/gen_rpc.awk b/storage/bdb/dist/gen_rpc.awk deleted file mode 100644 index ac29648ea6a..00000000000 --- a/storage/bdb/dist/gen_rpc.awk +++ /dev/null @@ -1,1197 +0,0 @@ -# -# $Id: gen_rpc.awk,v 12.4 2005/07/21 18:21:20 bostic Exp $ -# Awk script for generating client/server RPC code. -# -# This awk script generates most of the RPC routines for DB client/server -# use. It also generates a template for server and client procedures. These -# functions must still be edited, but are highly stylized and the initial -# template gets you a fair way along the path). -# -# This awk script requires that these variables be set when it is called: -# -# major -- Major version number -# minor -- Minor version number -# xidsize -- size of GIDs -# client_file -- the C source file being created for client code -# ctmpl_file -- the C template file being created for client code -# server_file -- the C source file being created for server code -# stmpl_file -- the C template file being created for server code -# xdr_file -- the XDR message file created -# -# And stdin must be the input file that defines the RPC setup. -BEGIN { - if (major == "" || minor == "" || xidsize == "" || - client_file == "" || ctmpl_file == "" || - server_file == "" || stmpl_file == "" || xdr_file == "") { - print "Usage: gen_rpc.awk requires these variables be set:" - print "\tmajor\t-- Major version number" - print "\tminor\t-- Minor version number" - print "\txidsize\t-- GID size" - print "\tclient_file\t-- the client C source file being created" - print "\tctmpl_file\t-- the client template file being created" - print "\tserver_file\t-- the server C source file being created" - print "\tstmpl_file\t-- the server template file being created" - print "\txdr_file\t-- the XDR message file being created" - error = 1; exit - } - - FS="\t\t*" - CFILE=client_file - printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ - > CFILE - - TFILE = ctmpl_file - printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ - > TFILE - - SFILE = server_file - printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ - > SFILE - - # Server procedure template. - PFILE = stmpl_file - XFILE = xdr_file - printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \ - > XFILE - nendlist = 1; - - # Output headers - general_headers() - - # Put out the actual illegal and no-server functions. - illegal_functions(CFILE) -} -END { - if (error == 0) { - printf("program DB_RPC_SERVERPROG {\n") >> XFILE - printf("\tversion DB_RPC_SERVERVERS {\n") >> XFILE - - for (i = 1; i < nendlist; ++i) - printf("\t\t%s;\n", endlist[i]) >> XFILE - - printf("\t} = %d%03d;\n", major, minor) >> XFILE - printf("} = 351457;\n") >> XFILE - - obj_init("DB", "dbp", obj_db, CFILE) - obj_init("DBC", "dbc", obj_dbc, CFILE) - obj_init("DB_ENV", "dbenv", obj_dbenv, CFILE) - obj_init("DB_TXN", "txn", obj_txn, CFILE) - } -} - -/^[ ]*LOCAL/ { - # LOCAL methods are ones where we don't override the handle - # method for RPC, nor is it illegal -- it's just satisfied - # locally. - next; -} -/^[ ]*NOFUNC/ { - ++obj_indx; - - # NOFUNC methods are illegal on the RPC client. - if ($2 ~ "^db_") - obj_illegal(obj_db, "dbp", $2, $3) - else if ($2 ~ "^dbc_") - obj_illegal(obj_dbc, "dbc", $2, $3) - else if ($2 ~ "^env_") - obj_illegal(obj_dbenv, "dbenv", $2, $3) - else if ($2 ~ "^txn_") - obj_illegal(obj_txn, "txn", $2, $3) - else { - print "unexpected handle prefix: " $2 - error = 1; exit - } - next; -} -/^[ ]*BEGIN/ { - ++obj_indx; - - name = $2; - link_only = ret_code = 0 - if ($3 == "LINKONLY") - link_only = 1 - else if ($3 == "RETCODE") - ret_code = 1 - - funcvars = 0; - newvars = 0; - nvars = 0; - rvars = 0; - xdr_free = 0; - - db_handle = 0; - dbc_handle = 0; - dbt_handle = 0; - env_handle = 0; - mp_handle = 0; - txn_handle = 0; -} -/^[ ]*ARG/ { - rpc_type[nvars] = $2; - c_type[nvars] = $3; - pr_type[nvars] = $3; - args[nvars] = $4; - func_arg[nvars] = 0; - if (rpc_type[nvars] == "LIST") { - list_type[nvars] = $5; - } else - list_type[nvars] = 0; - - if (c_type[nvars] == "DBT *") - dbt_handle = 1; - else if (c_type[nvars] == "DB_ENV *") { - ctp_type[nvars] = "CT_ENV"; - env_handle = 1; - env_idx = nvars; - - if (nvars == 0) - obj_func("dbenv", obj_dbenv); - } else if (c_type[nvars] == "DB *") { - ctp_type[nvars] = "CT_DB"; - if (db_handle != 1) { - db_handle = 1; - db_idx = nvars; - } - - if (nvars == 0) - obj_func("dbp", obj_db); - } else if (c_type[nvars] == "DBC *") { - ctp_type[nvars] = "CT_CURSOR"; - dbc_handle = 1; - dbc_idx = nvars; - - if (nvars == 0) - obj_func("dbc", obj_dbc); - } else if (c_type[nvars] == "DB_TXN *") { - ctp_type[nvars] = "CT_TXN"; - txn_handle = 1; - txn_idx = nvars; - - if (nvars == 0) - obj_func("txn", obj_txn); - } - - ++nvars; -} -/^[ ]*FUNCPROT/ { - pr_type[nvars] = $2; -} -/^[ ]*FUNCARG/ { - rpc_type[nvars] = "IGNORE"; - c_type[nvars] = $2; - args[nvars] = sprintf("func%d", funcvars); - func_arg[nvars] = 1; - ++funcvars; - ++nvars; -} -/^[ ]*RET/ { - ret_type[rvars] = $2; - retc_type[rvars] = $3; - retargs[rvars] = $4; - if (ret_type[rvars] == "LIST" || ret_type[rvars] == "DBT") { - xdr_free = 1; - } - if (ret_type[rvars] == "LIST") { - retlist_type[rvars] = $5; - } else - retlist_type[rvars] = 0; - ret_isarg[rvars] = 0; - - ++rvars; -} -/^[ ]*ARET/ { - ret_type[rvars] = $2; - rpc_type[nvars] = "IGNORE"; - retc_type[rvars] = $3; - c_type[nvars] = sprintf("%s *", $3); - pr_type[nvars] = c_type[nvars]; - retargs[rvars] = $4; - args[nvars] = sprintf("%sp", $4); - if (ret_type[rvars] == "LIST" || ret_type[rvars] == "DBT") { - xdr_free = 1; - } - func_arg[nvars] = 0; - if (ret_type[nvars] == "LIST") { - retlist_type[rvars] = $5; - list_type[nvars] = $5; - } else { - retlist_type[rvars] = 0; - list_type[nvars] = 0; - } - ret_isarg[rvars] = 1; - - ++nvars; - ++rvars; -} -/^[ ]*END/ { - # - # ===================================================== - # LINKONLY -- just reference the function, that's all. - # - if (link_only) - next; - - # - # ===================================================== - # XDR messages. - # - printf("\n") >> XFILE - printf("struct __%s_msg {\n", name) >> XFILE - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "LIST") { - if (list_type[i] == "GID") { - printf("\topaque %s<>;\n", args[i]) >> XFILE - } else { - printf("\tunsigned int %s<>;\n", args[i]) >> XFILE - } - } - if (rpc_type[i] == "ID") { - printf("\tunsigned int %scl_id;\n", args[i]) >> XFILE - } - if (rpc_type[i] == "STRING") { - printf("\tstring %s<>;\n", args[i]) >> XFILE - } - if (rpc_type[i] == "GID") { - printf("\topaque %s[%d];\n", args[i], xidsize) >> XFILE - } - if (rpc_type[i] == "INT") { - printf("\tunsigned int %s;\n", args[i]) >> XFILE - } - if (rpc_type[i] == "DBT") { - printf("\tunsigned int %sdlen;\n", args[i]) >> XFILE - printf("\tunsigned int %sdoff;\n", args[i]) >> XFILE - printf("\tunsigned int %sulen;\n", args[i]) >> XFILE - printf("\tunsigned int %sflags;\n", args[i]) >> XFILE - printf("\topaque %sdata<>;\n", args[i]) >> XFILE - } - } - printf("};\n") >> XFILE - - printf("\n") >> XFILE - # - # Generate the reply message - # - printf("struct __%s_reply {\n", name) >> XFILE - printf("\t/* num return vars: %d */\n", rvars) >> XFILE - printf("\tint status;\n") >> XFILE - for (i = 0; i < rvars; ++i) { - if (ret_type[i] == "ID") { - printf("\tunsigned int %scl_id;\n", retargs[i]) >> XFILE - } - if (ret_type[i] == "STRING") { - printf("\tstring %s<>;\n", retargs[i]) >> XFILE - } - if (ret_type[i] == "INT") { - printf("\tunsigned int %s;\n", retargs[i]) >> XFILE - } - if (ret_type[i] == "DBL") { - printf("\tdouble %s;\n", retargs[i]) >> XFILE - } - if (ret_type[i] == "DBT") { - printf("\topaque %sdata<>;\n", retargs[i]) >> XFILE - } - if (ret_type[i] == "LIST") { - if (retlist_type[i] == "GID") { - printf("\topaque %s<>;\n", retargs[i]) >> XFILE - } else { - printf("\tunsigned int %s<>;\n", retargs[i]) >> XFILE - } - } - } - printf("};\n") >> XFILE - - endlist[nendlist] = \ - sprintf("__%s_reply __DB_%s(__%s_msg) = %d", \ - name, name, name, nendlist); - nendlist++; - # - # ===================================================== - # Server functions. - # - # First spit out PUBLIC prototypes for server functions. - # - printf("__%s_reply *\n", name) >> SFILE - printf("__db_%s_%d%03d__SVCSUFFIX__(msg, req)\n", \ - name, major, minor) >> SFILE - printf("\t__%s_msg *msg;\n", name) >> SFILE; - printf("\tstruct svc_req *req;\n", name) >> SFILE; - printf("{\n") >> SFILE - printf("\tstatic __%s_reply reply; /* must be static */\n", \ - name) >> SFILE - if (xdr_free) { - printf("\tstatic int __%s_free = 0; /* must be static */\n\n", \ - name) >> SFILE - } - printf("\tCOMPQUIET(req, NULL);\n", name) >> SFILE - if (xdr_free) { - printf("\tif (__%s_free)\n", name) >> SFILE - printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)&reply);\n", \ - name) >> SFILE - printf("\t__%s_free = 0;\n", name) >> SFILE - printf("\n\t/* Reinitialize allocated fields */\n") >> SFILE - for (i = 0; i < rvars; ++i) { - if (ret_type[i] == "LIST") { - printf("\treply.%s.%s_val = NULL;\n", \ - retargs[i], retargs[i]) >> SFILE - } - if (ret_type[i] == "DBT") { - printf("\treply.%sdata.%sdata_val = NULL;\n", \ - retargs[i], retargs[i]) >> SFILE - } - } - } - - need_out = 0; - # - # Compose server proc to call. Decompose message components as args. - # - printf("\n\t__%s_proc(", name) >> SFILE - sep = ""; - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "IGNORE") { - continue; - } - if (rpc_type[i] == "ID") { - printf("%smsg->%scl_id", sep, args[i]) >> SFILE - } - if (rpc_type[i] == "STRING") { - printf("%s(*msg->%s == '\\0') ? NULL : msg->%s", \ - sep, args[i], args[i]) >> SFILE - } - if (rpc_type[i] == "GID") { - printf("%s(u_int8_t *)msg->%s", sep, args[i]) >> SFILE - } - if (rpc_type[i] == "INT") { - printf("%smsg->%s", sep, args[i]) >> SFILE - } - if (rpc_type[i] == "LIST") { - printf("%smsg->%s.%s_val", \ - sep, args[i], args[i]) >> SFILE - printf("%smsg->%s.%s_len", \ - sep, args[i], args[i]) >> SFILE - } - if (rpc_type[i] == "DBT") { - printf("%smsg->%sdlen", sep, args[i]) >> SFILE - sep = ",\n\t "; - printf("%smsg->%sdoff", sep, args[i]) >> SFILE - printf("%smsg->%sulen", sep, args[i]) >> SFILE - printf("%smsg->%sflags", sep, args[i]) >> SFILE - printf("%smsg->%sdata.%sdata_val", \ - sep, args[i], args[i]) >> SFILE - printf("%smsg->%sdata.%sdata_len", \ - sep, args[i], args[i]) >> SFILE - } - sep = ",\n\t "; - } - printf("%s&reply", sep) >> SFILE - if (xdr_free) - printf("%s&__%s_free);\n", sep, name) >> SFILE - else - printf(");\n\n") >> SFILE - if (need_out) { - printf("\nout:\n") >> SFILE - } - printf("\treturn (&reply);\n") >> SFILE - printf("}\n\n") >> SFILE - - # - # ===================================================== - # Generate Procedure Template Server code - # - # Spit out comment, prototype, function name and arg list. - printf("/* BEGIN __%s_proc */\n", name) >> PFILE - delete p; - pi = 1; - p[pi++] = sprintf("void __%s_proc __P((", name); - p[pi++] = ""; - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "IGNORE") - continue; - if (rpc_type[i] == "ID") { - p[pi++] = "long"; - p[pi++] = ", "; - } - if (rpc_type[i] == "STRING") { - p[pi++] = "char *"; - p[pi++] = ", "; - } - if (rpc_type[i] == "GID") { - p[pi++] = "u_int8_t *"; - p[pi++] = ", "; - } - if (rpc_type[i] == "INT") { - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - } - if (rpc_type[i] == "INTRET") { - p[pi++] = "u_int32_t *"; - p[pi++] = ", "; - } - if (rpc_type[i] == "LIST" && list_type[i] == "GID") { - p[pi++] = "u_int8_t *"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - } - if (rpc_type[i] == "LIST" && list_type[i] == "INT") { - p[pi++] = "u_int32_t *"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - } - if (rpc_type[i] == "LIST" && list_type[i] == "ID") { - p[pi++] = "u_int32_t *"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - } - if (rpc_type[i] == "DBT") { - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - p[pi++] = "void *"; - p[pi++] = ", "; - p[pi++] = "u_int32_t"; - p[pi++] = ", "; - } - } - p[pi++] = sprintf("__%s_reply *", name); - if (xdr_free) { - p[pi++] = ", "; - p[pi++] = "int *));"; - } else { - p[pi++] = ""; - p[pi++] = "));"; - } - p[pi++] = ""; - - printf("void\n") >> PFILE - printf("__%s_proc(", name) >> PFILE - sep = ""; - argcount = 0; - for (i = 0; i < nvars; ++i) { - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } - if (rpc_type[i] == "IGNORE") - continue; - if (rpc_type[i] == "ID") { - printf("%s%scl_id", sep, args[i]) >> PFILE - } - if (rpc_type[i] == "STRING") { - printf("%s%s", sep, args[i]) >> PFILE - } - if (rpc_type[i] == "GID") { - printf("%s%s", sep, args[i]) >> PFILE - } - if (rpc_type[i] == "INT") { - printf("%s%s", sep, args[i]) >> PFILE - } - if (rpc_type[i] == "INTRET") { - printf("%s%s", sep, args[i]) >> PFILE - } - if (rpc_type[i] == "LIST") { - printf("%s%s", sep, args[i]) >> PFILE - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } else { - sep = ", "; - } - printf("%s%slen", sep, args[i]) >> PFILE - } - if (rpc_type[i] == "DBT") { - printf("%s%sdlen", sep, args[i]) >> PFILE - sep = ", "; - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } else { - sep = ", "; - } - printf("%s%sdoff", sep, args[i]) >> PFILE - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } else { - sep = ", "; - } - printf("%s%sulen", sep, args[i]) >> PFILE - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } else { - sep = ", "; - } - printf("%s%sflags", sep, args[i]) >> PFILE - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } else { - sep = ", "; - } - printf("%s%sdata", sep, args[i]) >> PFILE - argcount++; - split_lines(); - if (argcount == 0) { - sep = ""; - } else { - sep = ", "; - } - printf("%s%ssize", sep, args[i]) >> PFILE - } - sep = ", "; - } - printf("%sreplyp",sep) >> PFILE - if (xdr_free) { - printf("%sfreep)\n",sep) >> PFILE - } else { - printf(")\n") >> PFILE - } - # - # Spit out arg types/names; - # - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "ID") { - printf("\tunsigned int %scl_id;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "STRING") { - printf("\tchar *%s;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "GID") { - printf("\tu_int8_t *%s;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "INT") { - printf("\tu_int32_t %s;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "LIST" && list_type[i] == "GID") { - printf("\tu_int8_t * %s;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "LIST" && list_type[i] == "INT") { - printf("\tu_int32_t * %s;\n", args[i]) >> PFILE - printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "LIST" && list_type[i] == "ID") { - printf("\tu_int32_t * %s;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "LIST") { - printf("\tu_int32_t %slen;\n", args[i]) >> PFILE - } - if (rpc_type[i] == "DBT") { - printf("\tu_int32_t %sdlen;\n", args[i]) >> PFILE - printf("\tu_int32_t %sdoff;\n", args[i]) >> PFILE - printf("\tu_int32_t %sulen;\n", args[i]) >> PFILE - printf("\tu_int32_t %sflags;\n", args[i]) >> PFILE - printf("\tvoid *%sdata;\n", args[i]) >> PFILE - printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE - } - } - printf("\t__%s_reply *replyp;\n",name) >> PFILE - if (xdr_free) { - printf("\tint * freep;\n") >> PFILE - } - - printf("/* END __%s_proc */\n", name) >> PFILE - - # - # Function body - # - printf("{\n") >> PFILE - printf("\tint ret;\n") >> PFILE - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "ID") { - printf("\t%s %s;\n", c_type[i], args[i]) >> PFILE - printf("\tct_entry *%s_ctp;\n", args[i]) >> PFILE - } - } - printf("\n") >> PFILE - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "ID") { - printf("\tACTIVATE_CTP(%s_ctp, %scl_id, %s);\n", \ - args[i], args[i], ctp_type[i]) >> PFILE - printf("\t%s = (%s)%s_ctp->ct_anyp;\n", \ - args[i], c_type[i], args[i]) >> PFILE - } - } - printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> PFILE - printf("\treplyp->status = ret;\n") >> PFILE - printf("\treturn;\n") >> PFILE - printf("}\n\n") >> PFILE - - # - # ===================================================== - # Generate Client code - # - # Spit out PUBLIC prototypes. - # - delete p; - pi = 1; - p[pi++] = sprintf("int __dbcl_%s __P((", name); - p[pi++] = ""; - for (i = 0; i < nvars; ++i) { - p[pi++] = pr_type[i]; - p[pi++] = ", "; - } - p[pi - 1] = ""; - p[pi] = "));"; - proto_format(p, CFILE); - - # - # Spit out function name/args. - # - printf("int\n") >> CFILE - printf("__dbcl_%s(", name) >> CFILE - sep = ""; - for (i = 0; i < nvars; ++i) { - printf("%s%s", sep, args[i]) >> CFILE - sep = ", "; - } - printf(")\n") >> CFILE - - for (i = 0; i < nvars; ++i) - if (func_arg[i] == 0) - printf("\t%s %s;\n", c_type[i], args[i]) >> CFILE - else - printf("\t%s;\n", c_type[i]) >> CFILE - - printf("{\n") >> CFILE - printf("\tCLIENT *cl;\n") >> CFILE - printf("\t__%s_msg msg;\n", name) >> CFILE - printf("\t__%s_reply *replyp = NULL;\n", name) >> CFILE; - printf("\tint ret;\n") >> CFILE - if (!env_handle) - printf("\tDB_ENV *dbenv;\n") >> CFILE - # - # If we are managing a list, we need a few more vars. - # - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "LIST") { - printf("\t%s %sp;\n", c_type[i], args[i]) >> CFILE - printf("\tint %si;\n", args[i]) >> CFILE - if (list_type[i] == "GID") - printf("\tu_int8_t ** %sq;\n", args[i]) >> CFILE - else - printf("\tu_int32_t * %sq;\n", args[i]) >> CFILE - } - } - - printf("\n") >> CFILE - printf("\tret = 0;\n") >> CFILE - if (!env_handle) { - if (db_handle) - printf("\tdbenv = %s->dbenv;\n", args[db_idx]) >> CFILE - else if (dbc_handle) - printf("\tdbenv = %s->dbp->dbenv;\n", \ - args[dbc_idx]) >> CFILE - else if (txn_handle) - printf("\tdbenv = %s->mgrp->dbenv;\n", \ - args[txn_idx]) >> CFILE - else - printf("\tdbenv = NULL;\n") >> CFILE - printf("\tif (dbenv == NULL || !RPC_ON(dbenv))\n") >> CFILE - printf("\t\treturn (__dbcl_noserver(NULL));\n") >> CFILE - } else { - printf("\tif (%s == NULL || !RPC_ON(%s))\n", \ - args[env_idx], args[env_idx]) >> CFILE - printf("\t\treturn (__dbcl_noserver(%s));\n", \ - args[env_idx]) >> CFILE - } - printf("\n") >> CFILE - - printf("\tcl = (CLIENT *)%s->cl_handle;\n\n", \ - env_handle ? args[env_idx] : "dbenv") >> CFILE - - # - # If there is a function arg, check that it is NULL - # - for (i = 0; i < nvars; ++i) { - if (func_arg[i] != 1) - continue; - printf("\tif (%s != NULL) {\n", args[i]) >> CFILE - if (!env_handle) { - printf("\t\t__db_err(dbenv, ") >> CFILE - } else { - printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE - } - printf("\"User functions not supported in RPC\");\n") >> CFILE - printf("\t\treturn (EINVAL);\n\t}\n") >> CFILE - } - - # - # Compose message components - # - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "ID") { - # We don't need to check for a NULL DB_ENV *, because - # we already checked for it. I frankly couldn't care - # less, but lint gets all upset at the wasted cycles. - if (c_type[i] != "DB_ENV *") { - printf("\tif (%s == NULL)\n", args[i]) >> CFILE - printf("\t\tmsg.%scl_id = 0;\n\telse\n", \ - args[i]) >> CFILE - indent = "\t\t"; - } else - indent = "\t"; - if (c_type[i] == "DB_TXN *") { - printf("%smsg.%scl_id = %s->txnid;\n", \ - indent, args[i], args[i]) >> CFILE - } else { - printf("%smsg.%scl_id = %s->cl_id;\n", \ - indent, args[i], args[i]) >> CFILE - } - } - if (rpc_type[i] == "GID") { - printf("\tmemcpy(msg.%s, %s, %d);\n", \ - args[i], args[i], xidsize) >> CFILE - } - if (rpc_type[i] == "INT") { - printf("\tmsg.%s = %s;\n", args[i], args[i]) >> CFILE - } - if (rpc_type[i] == "STRING") { - printf("\tif (%s == NULL)\n", args[i]) >> CFILE - printf("\t\tmsg.%s = \"\";\n", args[i]) >> CFILE - printf("\telse\n") >> CFILE - printf("\t\tmsg.%s = (char *)%s;\n", \ - args[i], args[i]) >> CFILE - } - if (rpc_type[i] == "DBT") { - printf("\tmsg.%sdlen = %s->dlen;\n", \ - args[i], args[i]) >> CFILE - printf("\tmsg.%sdoff = %s->doff;\n", \ - args[i], args[i]) >> CFILE - printf("\tmsg.%sulen = %s->ulen;\n", \ - args[i], args[i]) >> CFILE - printf("\tmsg.%sflags = %s->flags;\n", \ - args[i], args[i]) >> CFILE - printf("\tmsg.%sdata.%sdata_val = %s->data;\n", \ - args[i], args[i], args[i]) >> CFILE - printf("\tmsg.%sdata.%sdata_len = %s->size;\n", \ - args[i], args[i], args[i]) >> CFILE - } - if (rpc_type[i] == "LIST") { - printf("\tfor (%si = 0, %sp = %s; *%sp != 0; ", \ - args[i], args[i], args[i], args[i]) >> CFILE - printf(" %si++, %sp++)\n\t\t;\n", args[i], args[i]) \ - >> CFILE - - # - # If we are an array of ints, *_len is how many - # elements. If we are a GID, *_len is total bytes. - # - printf("\tmsg.%s.%s_len = %si",args[i], args[i], \ - args[i]) >> CFILE - if (list_type[i] == "GID") - printf(" * %d;\n", xidsize) >> CFILE - else - printf(";\n") >> CFILE - printf("\tif ((ret = __os_calloc(") >> CFILE - if (!env_handle) - printf("dbenv,\n") >> CFILE - else - printf("%s,\n", args[env_idx]) >> CFILE - printf("\t msg.%s.%s_len,", \ - args[i], args[i]) >> CFILE - if (list_type[i] == "GID") - printf(" 1,") >> CFILE - else - printf(" sizeof(u_int32_t),") >> CFILE - printf(" &msg.%s.%s_val)) != 0)\n",\ - args[i], args[i], args[i], args[i]) >> CFILE - printf("\t\treturn (ret);\n") >> CFILE - printf("\tfor (%sq = msg.%s.%s_val, %sp = %s; ", \ - args[i], args[i], args[i], \ - args[i], args[i]) >> CFILE - printf("%si--; %sq++, %sp++)\n", \ - args[i], args[i], args[i]) >> CFILE - printf("\t\t*%sq = ", args[i]) >> CFILE - if (list_type[i] == "GID") - printf("*%sp;\n", args[i]) >> CFILE - if (list_type[i] == "ID") - printf("(*%sp)->cl_id;\n", args[i]) >> CFILE - if (list_type[i] == "INT") - printf("*%sp;\n", args[i]) >> CFILE - } - } - - printf("\n") >> CFILE - printf("\treplyp = __db_%s_%d%03d(&msg, cl);\n", name, major, minor) \ - >> CFILE - for (i = 0; i < nvars; ++i) { - if (rpc_type[i] == "LIST") { - printf("\t__os_free(") >> CFILE - if (!env_handle) - printf("dbenv, ") >> CFILE - else - printf("%s, ", args[env_idx]) >> CFILE - printf("msg.%s.%s_val);\n", args[i], args[i]) >> CFILE - } - } - printf("\tif (replyp == NULL) {\n") >> CFILE - if (!env_handle) { - printf("\t\t__db_err(dbenv, ") >> CFILE - printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE - } else { - printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE - printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE - } - printf("\t\tret = DB_NOSERVER;\n") >> CFILE - printf("\t\tgoto out;\n") >> CFILE - printf("\t}\n") >> CFILE - - if (ret_code == 0) { - printf("\tret = replyp->status;\n") >> CFILE - - # - # Set any arguments that are returned - # - for (i = 0; i < rvars; ++i) { - if (ret_isarg[i]) { - printf("\tif (%sp != NULL)\n", \ - retargs[i]) >> CFILE; - printf("\t\t*%sp = replyp->%s;\n", \ - retargs[i], retargs[i]) >> CFILE; - } - } - } else { - printf("\tret = __dbcl_%s_ret(", name) >> CFILE - sep = ""; - for (i = 0; i < nvars; ++i) { - printf("%s%s", sep, args[i]) >> CFILE - sep = ", "; - } - printf("%sreplyp);\n", sep) >> CFILE - } - printf("out:\n") >> CFILE - # - # Free reply if there was one. - # - printf("\tif (replyp != NULL)\n") >> CFILE - printf("\t\txdr_free((xdrproc_t)xdr___%s_reply,",name) >> CFILE - printf(" (void *)replyp);\n") >> CFILE - printf("\treturn (ret);\n") >> CFILE - printf("}\n\n") >> CFILE - - # - # Generate Client Template code - # - if (ret_code) { - # - # If we are doing a list, write prototypes - # - delete p; - pi = 1; - p[pi++] = sprintf("int __dbcl_%s_ret __P((", name); - p[pi++] = ""; - for (i = 0; i < nvars; ++i) { - p[pi++] = pr_type[i]; - p[pi++] = ", "; - } - p[pi] = sprintf("__%s_reply *));", name); - proto_format(p, TFILE); - - printf("int\n") >> TFILE - printf("__dbcl_%s_ret(", name) >> TFILE - sep = ""; - for (i = 0; i < nvars; ++i) { - printf("%s%s", sep, args[i]) >> TFILE - sep = ", "; - } - printf("%sreplyp)\n",sep) >> TFILE - - for (i = 0; i < nvars; ++i) - if (func_arg[i] == 0) - printf("\t%s %s;\n", c_type[i], args[i]) \ - >> TFILE - else - printf("\t%s;\n", c_type[i]) >> TFILE - printf("\t__%s_reply *replyp;\n", name) >> TFILE; - printf("{\n") >> TFILE - printf("\tint ret;\n") >> TFILE - # - # Local vars in template - # - for (i = 0; i < rvars; ++i) { - if (ret_type[i] == "ID" || ret_type[i] == "STRING" || - ret_type[i] == "INT" || ret_type[i] == "DBL") { - printf("\t%s %s;\n", \ - retc_type[i], retargs[i]) >> TFILE - } else if (ret_type[i] == "LIST") { - if (retlist_type[i] == "GID") - printf("\tu_int8_t *__db_%s;\n", \ - retargs[i]) >> TFILE - if (retlist_type[i] == "ID" || - retlist_type[i] == "INT") - printf("\tu_int32_t *__db_%s;\n", \ - retargs[i]) >> TFILE - } else { - printf("\t/* %s %s; */\n", \ - ret_type[i], retargs[i]) >> TFILE - } - } - # - # Client return code - # - printf("\n") >> TFILE - printf("\tif (replyp->status != 0)\n") >> TFILE - printf("\t\treturn (replyp->status);\n") >> TFILE - for (i = 0; i < rvars; ++i) { - varname = ""; - if (ret_type[i] == "ID") { - varname = sprintf("%scl_id", retargs[i]); - } - if (ret_type[i] == "STRING") { - varname = retargs[i]; - } - if (ret_type[i] == "INT" || ret_type[i] == "DBL") { - varname = retargs[i]; - } - if (ret_type[i] == "DBT") { - varname = sprintf("%sdata", retargs[i]); - } - if (ret_type[i] == "ID" || ret_type[i] == "STRING" || - ret_type[i] == "INT" || ret_type[i] == "DBL") { - printf("\t%s = replyp->%s;\n", \ - retargs[i], varname) >> TFILE - } else if (ret_type[i] == "LIST") { - printf("\n\t/*\n") >> TFILE - printf("\t * XXX Handle list\n") >> TFILE - printf("\t */\n\n") >> TFILE - } else { - printf("\t/* Handle replyp->%s; */\n", \ - varname) >> TFILE - } - } - printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> TFILE - printf("\treturn (replyp->status);\n") >> TFILE - printf("}\n\n") >> TFILE - } -} - -function general_headers() -{ - printf("#include \"db_config.h\"\n") >> CFILE - printf("\n") >> CFILE - printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE - printf("#include \n") >> CFILE - printf("\n") >> CFILE - printf("#include \n") >> CFILE - printf("\n") >> CFILE - printf("#include \n") >> CFILE - printf("#endif\n") >> CFILE - printf("\n") >> CFILE - printf("#include \"db_server.h\"\n") >> CFILE - printf("\n") >> CFILE - printf("#include \"db_int.h\"\n") >> CFILE - printf("#include \"dbinc/txn.h\"\n") >> CFILE - printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE - printf("\n") >> CFILE - - printf("#include \"db_config.h\"\n") >> TFILE - printf("\n") >> TFILE - printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE - printf("#include \n") >> TFILE - printf("\n") >> TFILE - printf("#include \n") >> TFILE - printf("#endif\n") >> TFILE - printf("#include \"db_int.h\"\n") >> TFILE - printf("#include \"dbinc/txn.h\"\n") >> TFILE - printf("\n") >> TFILE - - printf("#include \"db_config.h\"\n") >> SFILE - printf("\n") >> SFILE - printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE - printf("#include \n") >> SFILE - printf("\n") >> SFILE - printf("#include \n") >> SFILE - printf("\n") >> SFILE - printf("#include \n") >> SFILE - printf("#endif\n") >> SFILE - printf("\n") >> SFILE - printf("#include \"db_server.h\"\n") >> SFILE - printf("\n") >> SFILE - printf("#include \"db_int.h\"\n") >> SFILE - printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE - printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE - printf("\n") >> SFILE - - printf("#include \"db_config.h\"\n") >> PFILE - printf("\n") >> PFILE - printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE - printf("#include \n") >> PFILE - printf("\n") >> PFILE - printf("#include \n") >> PFILE - printf("\n") >> PFILE - printf("#include \n") >> PFILE - printf("#endif\n") >> PFILE - printf("\n") >> PFILE - printf("#include \"db_server.h\"\n") >> PFILE - printf("\n") >> PFILE - printf("#include \"db_int.h\"\n") >> PFILE - printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE - printf("\n") >> PFILE -} - -# -# illegal_functions -- -# Output general illegal-call functions -function illegal_functions(OUTPUT) -{ - printf("static int __dbcl_dbp_illegal __P((DB *));\n") >> OUTPUT - printf("static int __dbcl_noserver __P((DB_ENV *));\n") >> OUTPUT - printf("static int __dbcl_txn_illegal __P((DB_TXN *));\n") >> OUTPUT - printf("\n") >> OUTPUT - - printf("static int\n") >> OUTPUT - printf("__dbcl_noserver(dbenv)\n") >> OUTPUT - printf("\tDB_ENV *dbenv;\n") >> OUTPUT - printf("{\n\t__db_err(dbenv,") >> OUTPUT - printf(" \"No Berkeley DB RPC server environment\");\n") >> OUTPUT - printf("\treturn (DB_NOSERVER);\n") >> OUTPUT - printf("}\n\n") >> OUTPUT - - printf("/*\n") >> OUTPUT - printf(" * __dbcl_dbenv_illegal --\n") >> OUTPUT - printf(" * DB_ENV method not supported under RPC.\n") >> OUTPUT - printf(" *\n") >> OUTPUT - printf(" * PUBLIC: int __dbcl_dbenv_illegal __P((DB_ENV *));\n")\ - >> OUTPUT - printf(" */\n") >> OUTPUT - printf("int\n") >> OUTPUT - printf("__dbcl_dbenv_illegal(dbenv)\n") >> OUTPUT - printf("\tDB_ENV *dbenv;\n") >> OUTPUT - printf("{\n\t__db_err(dbenv,") >> OUTPUT - printf("\n\t \"Interface not supported by ") >> OUTPUT - printf("Berkeley DB RPC client environments\");\n") >> OUTPUT - printf("\treturn (DB_OPNOTSUP);\n") >> OUTPUT - printf("}\n\n") >> OUTPUT - printf("/*\n") >> OUTPUT - printf(" * __dbcl_dbp_illegal --\n") >> OUTPUT - printf(" * DB method not supported under RPC.\n") >> OUTPUT - printf(" */\n") >> OUTPUT - printf("static int\n") >> OUTPUT - printf("__dbcl_dbp_illegal(dbp)\n") >> OUTPUT - printf("\tDB *dbp;\n") >> OUTPUT - printf("{\n\treturn (__dbcl_dbenv_illegal(dbp->dbenv));\n") >> OUTPUT - printf("}\n\n") >> OUTPUT - printf("/*\n") >> OUTPUT - printf(" * __dbcl_txn_illegal --\n") >> OUTPUT - printf(" * DB_TXN method not supported under RPC.\n") >> OUTPUT - printf(" */\n") >> OUTPUT - printf("static int\n__dbcl_txn_illegal(txn)\n") >> OUTPUT - printf("\tDB_TXN *txn;\n") >> OUTPUT - printf("{\n\treturn (__dbcl_dbenv_illegal(txn->mgrp->dbenv));\n")\ - >> OUTPUT - printf("}\n\n") >> OUTPUT -} - -function obj_func(v, l) -{ - # Ignore db_create -- there's got to be something cleaner, but I - # don't want to rewrite rpc.src right now. - if (name == "db_create") - return; - if (name == "env_create") - return; - - # Strip off the leading prefix for the method name -- there's got to - # be something cleaner, but I don't want to rewrite rpc.src right now. - len = length(name); - i = index(name, "_"); - l[obj_indx] = sprintf("\t%s->%s = __dbcl_%s;", - v, substr(name, i + 1, len - i), name); -} - -function obj_illegal(l, handle, method, proto) -{ - # All of the functions return an int, with one exception. Hack - # to make that work. - type = method == "db_get_mpf" ? "DB_MPOOLFILE *" : "int" - - # Strip off the leading prefix for the method name -- there's got to - # be something cleaner, but I don't want to rewrite rpc.src right now. - len = length(method); - i = index(method, "_"); - - l[obj_indx] =\ - sprintf("\t%s->%s =\n\t (%s (*)(",\ - handle, substr(method, i + 1, len - i), type)\ - proto\ - sprintf("))\n\t __dbcl_%s_illegal;", handle); -} - -function obj_init(obj, v, list, OUTPUT) { - printf("/*\n") >> OUTPUT - printf(" * __dbcl_%s_init --\n", v) >> OUTPUT - printf(" *\tInitialize %s handle methods.\n", obj) >> OUTPUT - printf(" *\n") >> OUTPUT - printf(\ - " * PUBLIC: void __dbcl_%s_init __P((%s *));\n", v, obj) >> OUTPUT - printf(" */\n") >> OUTPUT - printf("void\n") >> OUTPUT - printf("__dbcl_%s_init(%s)\n", v, v) >> OUTPUT - printf("\t%s *%s;\n", obj, v) >> OUTPUT - printf("{\n") >> OUTPUT - for (i = 1; i < obj_indx; ++i) { - if (i in list) - print list[i] >> OUTPUT - } - printf("\treturn;\n}\n\n") >> OUTPUT -} - -# -# split_lines -- -# Add line separators to pretty-print the output. -function split_lines() { - if (argcount > 3) { - # Reset the counter, remove any trailing whitespace from - # the separator. - argcount = 0; - sub("[ ]$", "", sep) - - printf("%s\n\t\t", sep) >> PFILE - } -} - -# proto_format -- -# Pretty-print a function prototype. -function proto_format(p, OUTPUT) -{ - printf("/*\n") >> OUTPUT; - - s = ""; - for (i = 1; i in p; ++i) - s = s p[i]; - - t = " * PUBLIC: " - if (length(s) + length(t) < 80) - printf("%s%s", t, s) >> OUTPUT; - else { - split(s, p, "__P"); - len = length(t) + length(p[1]); - printf("%s%s", t, p[1]) >> OUTPUT - - n = split(p[2], comma, ","); - comma[1] = "__P" comma[1]; - for (i = 1; i <= n; i++) { - if (len + length(comma[i]) > 75) { - printf("\n * PUBLIC: ") >> OUTPUT; - len = 0; - } - printf("%s%s", comma[i], i == n ? "" : ",") >> OUTPUT; - len += length(comma[i]); - } - } - printf("\n */\n") >> OUTPUT; -} diff --git a/storage/bdb/dist/install-sh b/storage/bdb/dist/install-sh deleted file mode 100755 index b41a2459161..00000000000 --- a/storage/bdb/dist/install-sh +++ /dev/null @@ -1,251 +0,0 @@ -#!/bin/sh -# -# install - install a program, script, or datafile -# This comes from X11R5 (mit/util/scripts/install.sh). -# -# Copyright 1991 by the Massachusetts Institute of Technology -# -# Permission to use, copy, modify, distribute, and sell this software and its -# documentation for any purpose is hereby granted without fee, provided that -# the above copyright notice appear in all copies and that both that -# copyright notice and this permission notice appear in supporting -# documentation, and that the name of M.I.T. not be used in advertising or -# publicity pertaining to distribution of the software without specific, -# written prior permission. M.I.T. makes no representations about the -# suitability of this software for any purpose. It is provided "as is" -# without express or implied warranty. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. It can only install one file at a time, a restriction -# shared with many OS's install programs. - - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" - - -# put in absolute paths if you don't have them in your path; or use env. vars. - -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" - -transformbasename="" -transform_arg="" -instcmd="$mvprog" -chmodcmd="$chmodprog 0755" -chowncmd="" -chgrpcmd="" -stripcmd="" -rmcmd="$rmprog -f" -mvcmd="$mvprog" -src="" -dst="" -dir_arg="" - -while [ x"$1" != x ]; do - case $1 in - -c) instcmd="$cpprog" - shift - continue;; - - -d) dir_arg=true - shift - continue;; - - -m) chmodcmd="$chmodprog $2" - shift - shift - continue;; - - -o) chowncmd="$chownprog $2" - shift - shift - continue;; - - -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; - - -s) stripcmd="$stripprog" - shift - continue;; - - -t=*) transformarg=`echo $1 | sed 's/-t=//'` - shift - continue;; - - -b=*) transformbasename=`echo $1 | sed 's/-b=//'` - shift - continue;; - - *) if [ x"$src" = x ] - then - src=$1 - else - # this colon is to work around a 386BSD /bin/sh bug - : - dst=$1 - fi - shift - continue;; - esac -done - -if [ x"$src" = x ] -then - echo "install: no input file specified" - exit 1 -else - true -fi - -if [ x"$dir_arg" != x ]; then - dst=$src - src="" - - if [ -d $dst ]; then - instcmd=: - chmodcmd="" - else - instcmd=$mkdirprog - fi -else - -# Waiting for this to be detected by the "$instcmd $src $dsttmp" command -# might cause directories to be created, which would be especially bad -# if $src (and thus $dsttmp) contains '*'. - - if [ -f $src -o -d $src ] - then - true - else - echo "install: $src does not exist" - exit 1 - fi - - if [ x"$dst" = x ] - then - echo "install: no destination specified" - exit 1 - else - true - fi - -# If destination is a directory, append the input filename; if your system -# does not like double slashes in filenames, you may need to add some logic - - if [ -d $dst ] - then - dst="$dst"/`basename $src` - else - true - fi -fi - -## this sed command emulates the dirname command -dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` - -# Make sure that the destination directory exists. -# this part is taken from Noah Friedman's mkinstalldirs script - -# Skip lots of stat calls in the usual case. -if [ ! -d "$dstdir" ]; then -defaultIFS=' - ' -IFS="${IFS-${defaultIFS}}" - -oIFS="${IFS}" -# Some sh's can't handle IFS=/ for some reason. -IFS='%' -set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` -IFS="${oIFS}" - -pathcomp='' - -while [ $# -ne 0 ] ; do - pathcomp="${pathcomp}${1}" - shift - - if [ ! -d "${pathcomp}" ] ; - then - $mkdirprog "${pathcomp}" - else - true - fi - - pathcomp="${pathcomp}/" -done -fi - -if [ x"$dir_arg" != x ] -then - $doit $instcmd $dst && - - if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi -else - -# If we're going to rename the final executable, determine the name now. - - if [ x"$transformarg" = x ] - then - dstfile=`basename $dst` - else - dstfile=`basename $dst $transformbasename | - sed $transformarg`$transformbasename - fi - -# don't allow the sed command to completely eliminate the filename - - if [ x"$dstfile" = x ] - then - dstfile=`basename $dst` - else - true - fi - -# Make a temp file name in the proper directory. - - dsttmp=$dstdir/#inst.$$# - -# Move or copy the file name to the temp name - - $doit $instcmd $src $dsttmp && - - trap "rm -f ${dsttmp}" 0 && - -# and set any options; do chmod last to preserve setuid bits - -# If any of these fail, we abort the whole thing. If we want to -# ignore errors from any of these, just make sure not to ignore -# errors from the above "$doit $instcmd $src $dsttmp" command. - - if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && - -# Now rename the file to the real destination. - - $doit $rmcmd -f $dstdir/$dstfile && - $doit $mvcmd $dsttmp $dstdir/$dstfile - -fi && - - -exit 0 diff --git a/storage/bdb/dist/ltmain.sh b/storage/bdb/dist/ltmain.sh deleted file mode 100644 index 8915481a41b..00000000000 --- a/storage/bdb/dist/ltmain.sh +++ /dev/null @@ -1,6558 +0,0 @@ -# ltmain.sh - Provide generalized library-building support services. -# NOTE: Changing this file will not affect anything until you rerun configure. -# -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005 -# Free Software Foundation, Inc. -# Originally by Gordon Matzigkeit , 1996 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -basename="s,^.*/,,g" - -# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh -# is ksh but when the shell is invoked as "sh" and the current value of -# the _XPG environment variable is not equal to 1 (one), the special -# positional parameter $0, within a function call, is the name of the -# function. -progpath="$0" - -# The name of this program: -progname=`echo "$progpath" | $SED $basename` -modename="$progname" - -# Global variables: -EXIT_SUCCESS=0 -EXIT_FAILURE=1 - -PROGRAM=ltmain.sh -PACKAGE=libtool -VERSION=1.5.20 -TIMESTAMP=" (1.1220.2.287 2005/08/31 18:54:15)" - -# See if we are running on zsh, and set the options which allow our -# commands through without removal of \ escapes. -if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST -fi - -# Check that we have a working $echo. -if test "X$1" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift -elif test "X$1" = X--fallback-echo; then - # Avoid inline document here, it may be left over - : -elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then - # Yippee, $echo works! - : -else - # Restart under the correct shell, and then maybe $echo will work. - exec $SHELL "$progpath" --no-reexec ${1+"$@"} -fi - -if test "X$1" = X--fallback-echo; then - # used as fallback echo - shift - cat <&2 - $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit $EXIT_FAILURE -fi - -# Global variables. -mode=$default_mode -nonopt= -prev= -prevopt= -run= -show="$echo" -show_help= -execute_dlfiles= -lo2o="s/\\.lo\$/.${objext}/" -o2lo="s/\\.${objext}\$/.lo/" - -##################################### -# Shell function definitions: -# This seems to be the best place for them - -# func_win32_libid arg -# return the library type of file 'arg' -# -# Need a lot of goo to handle *both* DLLs and import libs -# Has to be a shell function in order to 'eat' the argument -# that is supplied when $file_magic_command is called. -func_win32_libid () -{ - win32_libid_type="unknown" - win32_fileres=`file -L $1 2>/dev/null` - case $win32_fileres in - *ar\ archive\ import\ library*) # definitely import - win32_libid_type="x86 archive import" - ;; - *ar\ archive*) # could be an import, or static - if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \ - $EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then - win32_nmres=`eval $NM -f posix -A $1 | \ - sed -n -e '1,100{/ I /{x;/import/!{s/^/import/;h;p;};x;};}'` - if test "X$win32_nmres" = "Ximport" ; then - win32_libid_type="x86 archive import" - else - win32_libid_type="x86 archive static" - fi - fi - ;; - *DLL*) - win32_libid_type="x86 DLL" - ;; - *executable*) # but shell scripts are "executable" too... - case $win32_fileres in - *MS\ Windows\ PE\ Intel*) - win32_libid_type="x86 DLL" - ;; - esac - ;; - esac - $echo $win32_libid_type -} - - -# func_infer_tag arg -# Infer tagged configuration to use if any are available and -# if one wasn't chosen via the "--tag" command line option. -# Only attempt this if the compiler in the base compile -# command doesn't match the default compiler. -# arg is usually of the form 'gcc ...' -func_infer_tag () -{ - if test -n "$available_tags" && test -z "$tagname"; then - CC_quoted= - for arg in $CC; do - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - CC_quoted="$CC_quoted $arg" - done - case $@ in - # Blanks in the command may have been stripped by the calling shell, - # but not from the CC environment variable when configure was run. - " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;; - # Blanks at the start of $base_compile will cause this to fail - # if we don't check for them as well. - *) - for z in $available_tags; do - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" - CC_quoted= - for arg in $CC; do - # Double-quote args containing other shell metacharacters. - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - CC_quoted="$CC_quoted $arg" - done - case "$@ " in - " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) - # The compiler in the base compile command matches - # the one in the tagged configuration. - # Assume this is the tagged configuration we want. - tagname=$z - break - ;; - esac - fi - done - # If $tagname still isn't set, then no tagged configuration - # was found and let the user know that the "--tag" command - # line option must be used. - if test -z "$tagname"; then - $echo "$modename: unable to infer tagged configuration" - $echo "$modename: specify a tag with \`--tag'" 1>&2 - exit $EXIT_FAILURE -# else -# $echo "$modename: using $tagname tagged configuration" - fi - ;; - esac - fi -} - - -# func_extract_an_archive dir oldlib -func_extract_an_archive () -{ - f_ex_an_ar_dir="$1"; shift - f_ex_an_ar_oldlib="$1" - - $show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)" - $run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $? - if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2 - exit $EXIT_FAILURE - fi -} - -# func_extract_archives gentop oldlib ... -func_extract_archives () -{ - my_gentop="$1"; shift - my_oldlibs=${1+"$@"} - my_oldobjs="" - my_xlib="" - my_xabs="" - my_xdir="" - my_status="" - - $show "${rm}r $my_gentop" - $run ${rm}r "$my_gentop" - $show "$mkdir $my_gentop" - $run $mkdir "$my_gentop" - my_status=$? - if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then - exit $my_status - fi - - for my_xlib in $my_oldlibs; do - # Extract the objects. - case $my_xlib in - [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; - *) my_xabs=`pwd`"/$my_xlib" ;; - esac - my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'` - my_xdir="$my_gentop/$my_xlib" - - $show "${rm}r $my_xdir" - $run ${rm}r "$my_xdir" - $show "$mkdir $my_xdir" - $run $mkdir "$my_xdir" - status=$? - if test "$status" -ne 0 && test ! -d "$my_xdir"; then - exit $status - fi - case $host in - *-darwin*) - $show "Extracting $my_xabs" - # Do not bother doing anything if just a dry run - if test -z "$run"; then - darwin_orig_dir=`pwd` - cd $my_xdir || exit $? - darwin_archive=$my_xabs - darwin_curdir=`pwd` - darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'` - darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null` - if test -n "$darwin_arches"; then - darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'` - darwin_arch= - $show "$darwin_base_archive has multiple architectures $darwin_arches" - for darwin_arch in $darwin_arches ; do - mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}" - lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" - cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" - func_extract_an_archive "`pwd`" "${darwin_base_archive}" - cd "$darwin_curdir" - $rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" - done # $darwin_arches - ## Okay now we have a bunch of thin objects, gotta fatten them up :) - darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP` - darwin_file= - darwin_files= - for darwin_file in $darwin_filelist; do - darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` - lipo -create -output "$darwin_file" $darwin_files - done # $darwin_filelist - ${rm}r unfat-$$ - cd "$darwin_orig_dir" - else - cd "$darwin_orig_dir" - func_extract_an_archive "$my_xdir" "$my_xabs" - fi # $darwin_arches - fi # $run - ;; - *) - func_extract_an_archive "$my_xdir" "$my_xabs" - ;; - esac - my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` - done - func_extract_archives_result="$my_oldobjs" -} -# End of Shell function definitions -##################################### - -# Darwin sucks -eval std_shrext=\"$shrext_cmds\" - -# Parse our command line options once, thoroughly. -while test "$#" -gt 0 -do - arg="$1" - shift - - case $arg in - -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;; - *) optarg= ;; - esac - - # If the previous option needs an argument, assign it. - if test -n "$prev"; then - case $prev in - execute_dlfiles) - execute_dlfiles="$execute_dlfiles $arg" - ;; - tag) - tagname="$arg" - preserve_args="${preserve_args}=$arg" - - # Check whether tagname contains only valid characters - case $tagname in - *[!-_A-Za-z0-9,/]*) - $echo "$progname: invalid tag name: $tagname" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - case $tagname in - CC) - # Don't test for the "default" C tag, as we know, it's there, but - # not specially marked. - ;; - *) - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then - taglist="$taglist $tagname" - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`" - else - $echo "$progname: ignoring unknown tag $tagname" 1>&2 - fi - ;; - esac - ;; - *) - eval "$prev=\$arg" - ;; - esac - - prev= - prevopt= - continue - fi - - # Have we seen a non-optional argument yet? - case $arg in - --help) - show_help=yes - ;; - - --version) - $echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP" - $echo - $echo "Copyright (C) 2005 Free Software Foundation, Inc." - $echo "This is free software; see the source for copying conditions. There is NO" - $echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - exit $? - ;; - - --config) - ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath - # Now print the configurations for the tags. - for tagname in $taglist; do - ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath" - done - exit $? - ;; - - --debug) - $echo "$progname: enabling shell trace mode" - set -x - preserve_args="$preserve_args $arg" - ;; - - --dry-run | -n) - run=: - ;; - - --features) - $echo "host: $host" - if test "$build_libtool_libs" = yes; then - $echo "enable shared libraries" - else - $echo "disable shared libraries" - fi - if test "$build_old_libs" = yes; then - $echo "enable static libraries" - else - $echo "disable static libraries" - fi - exit $? - ;; - - --finish) mode="finish" ;; - - --mode) prevopt="--mode" prev=mode ;; - --mode=*) mode="$optarg" ;; - - --preserve-dup-deps) duplicate_deps="yes" ;; - - --quiet | --silent) - show=: - preserve_args="$preserve_args $arg" - ;; - - --tag) prevopt="--tag" prev=tag ;; - --tag=*) - set tag "$optarg" ${1+"$@"} - shift - prev=tag - preserve_args="$preserve_args --tag" - ;; - - -dlopen) - prevopt="-dlopen" - prev=execute_dlfiles - ;; - - -*) - $echo "$modename: unrecognized option \`$arg'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - - *) - nonopt="$arg" - break - ;; - esac -done - -if test -n "$prevopt"; then - $echo "$modename: option \`$prevopt' requires an argument" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE -fi - -# If this variable is set in any of the actions, the command in it -# will be execed at the end. This prevents here-documents from being -# left over by shells. -exec_cmd= - -if test -z "$show_help"; then - - # Infer the operation mode. - if test -z "$mode"; then - $echo "*** Warning: inferring the mode of operation is deprecated." 1>&2 - $echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2 - case $nonopt in - *cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*) - mode=link - for arg - do - case $arg in - -c) - mode=compile - break - ;; - esac - done - ;; - *db | *dbx | *strace | *truss) - mode=execute - ;; - *install*|cp|mv) - mode=install - ;; - *rm) - mode=uninstall - ;; - *) - # If we have no mode, but dlfiles were specified, then do execute mode. - test -n "$execute_dlfiles" && mode=execute - - # Just use the default operation mode. - if test -z "$mode"; then - if test -n "$nonopt"; then - $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2 - else - $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2 - fi - fi - ;; - esac - fi - - # Only execute mode is allowed to have -dlopen flags. - if test -n "$execute_dlfiles" && test "$mode" != execute; then - $echo "$modename: unrecognized option \`-dlopen'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - # Change the help message to a mode-specific one. - generic_help="$help" - help="Try \`$modename --help --mode=$mode' for more information." - - # These modes are in order of execution frequency so that they run quickly. - case $mode in - # libtool compile mode - compile) - modename="$modename: compile" - # Get the compilation command and the source file. - base_compile= - srcfile="$nonopt" # always keep a non-empty value in "srcfile" - suppress_opt=yes - suppress_output= - arg_mode=normal - libobj= - later= - - for arg - do - case $arg_mode in - arg ) - # do not "continue". Instead, add this to base_compile - lastarg="$arg" - arg_mode=normal - ;; - - target ) - libobj="$arg" - arg_mode=normal - continue - ;; - - normal ) - # Accept any command-line options. - case $arg in - -o) - if test -n "$libobj" ; then - $echo "$modename: you cannot specify \`-o' more than once" 1>&2 - exit $EXIT_FAILURE - fi - arg_mode=target - continue - ;; - - -static | -prefer-pic | -prefer-non-pic) - later="$later $arg" - continue - ;; - - -no-suppress) - suppress_opt=no - continue - ;; - - -Xcompiler) - arg_mode=arg # the next one goes into the "base_compile" arg list - continue # The current "srcfile" will either be retained or - ;; # replaced later. I would guess that would be a bug. - - -Wc,*) - args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"` - lastarg= - save_ifs="$IFS"; IFS=',' - for arg in $args; do - IFS="$save_ifs" - - # Double-quote args containing other shell metacharacters. - # Many Bourne shells cannot handle close brackets correctly - # in scan sets, so we specify it separately. - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - lastarg="$lastarg $arg" - done - IFS="$save_ifs" - lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"` - - # Add the arguments to base_compile. - base_compile="$base_compile $lastarg" - continue - ;; - - * ) - # Accept the current argument as the source file. - # The previous "srcfile" becomes the current argument. - # - lastarg="$srcfile" - srcfile="$arg" - ;; - esac # case $arg - ;; - esac # case $arg_mode - - # Aesthetically quote the previous argument. - lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"` - - case $lastarg in - # Double-quote args containing other shell metacharacters. - # Many Bourne shells cannot handle close brackets correctly - # in scan sets, and some SunOS ksh mistreat backslash-escaping - # in scan sets (worked around with variable expansion), - # and furthermore cannot handle '|' '&' '(' ')' in scan sets - # at all, so we specify them separately. - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - lastarg="\"$lastarg\"" - ;; - esac - - base_compile="$base_compile $lastarg" - done # for arg - - case $arg_mode in - arg) - $echo "$modename: you must specify an argument for -Xcompile" - exit $EXIT_FAILURE - ;; - target) - $echo "$modename: you must specify a target with \`-o'" 1>&2 - exit $EXIT_FAILURE - ;; - *) - # Get the name of the library object. - [ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'` - ;; - esac - - # Recognize several different file suffixes. - # If the user specifies -o file.o, it is replaced with file.lo - xform='[cCFSifmso]' - case $libobj in - *.ada) xform=ada ;; - *.adb) xform=adb ;; - *.ads) xform=ads ;; - *.asm) xform=asm ;; - *.c++) xform=c++ ;; - *.cc) xform=cc ;; - *.ii) xform=ii ;; - *.class) xform=class ;; - *.cpp) xform=cpp ;; - *.cxx) xform=cxx ;; - *.f90) xform=f90 ;; - *.for) xform=for ;; - *.java) xform=java ;; - esac - - libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"` - - case $libobj in - *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;; - *) - $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - func_infer_tag $base_compile - - for arg in $later; do - case $arg in - -static) - build_old_libs=yes - continue - ;; - - -prefer-pic) - pic_mode=yes - continue - ;; - - -prefer-non-pic) - pic_mode=no - continue - ;; - esac - done - - qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"` - case $qlibobj in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - qlibobj="\"$qlibobj\"" ;; - esac - test "X$libobj" != "X$qlibobj" \ - && $echo "X$libobj" | grep '[]~#^*{};<>?"'"'"' &()|`$[]' \ - && $echo "$modename: libobj name \`$libobj' may not contain shell special characters." - objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` - xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$obj"; then - xdir= - else - xdir=$xdir/ - fi - lobj=${xdir}$objdir/$objname - - if test -z "$base_compile"; then - $echo "$modename: you must specify a compilation command" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - # Delete any leftover library objects. - if test "$build_old_libs" = yes; then - removelist="$obj $lobj $libobj ${libobj}T" - else - removelist="$lobj $libobj ${libobj}T" - fi - - $run $rm $removelist - trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 - - # On Cygwin there's no "real" PIC flag so we must build both object types - case $host_os in - cygwin* | mingw* | pw32* | os2*) - pic_mode=default - ;; - esac - if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then - # non-PIC code in shared libraries is not supported - pic_mode=default - fi - - # Calculate the filename of the output object if compiler does - # not support -o with -c - if test "$compiler_c_o" = no; then - output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} - lockfile="$output_obj.lock" - removelist="$removelist $output_obj $lockfile" - trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 - else - output_obj= - need_locks=no - lockfile= - fi - - # Lock this critical section if it is needed - # We use this script file to make the link, it avoids creating a new file - if test "$need_locks" = yes; then - until $run ln "$progpath" "$lockfile" 2>/dev/null; do - $show "Waiting for $lockfile to be removed" - sleep 2 - done - elif test "$need_locks" = warn; then - if test -f "$lockfile"; then - $echo "\ -*** ERROR, $lockfile exists and contains: -`cat $lockfile 2>/dev/null` - -This indicates that another process is trying to use the same -temporary object file, and libtool could not work around it because -your compiler does not support \`-c' and \`-o' together. If you -repeat this compilation, it may succeed, by chance, but you had better -avoid parallel builds (make -j) in this platform, or get a better -compiler." - - $run $rm $removelist - exit $EXIT_FAILURE - fi - $echo "$srcfile" > "$lockfile" - fi - - if test -n "$fix_srcfile_path"; then - eval srcfile=\"$fix_srcfile_path\" - fi - qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"` - case $qsrcfile in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - qsrcfile="\"$qsrcfile\"" ;; - esac - - $run $rm "$libobj" "${libobj}T" - - # Create a libtool object file (analogous to a ".la" file), - # but don't create it if we're doing a dry run. - test -z "$run" && cat > ${libobj}T </dev/null`" != "X$srcfile"; then - $echo "\ -*** ERROR, $lockfile contains: -`cat $lockfile 2>/dev/null` - -but it should contain: -$srcfile - -This indicates that another process is trying to use the same -temporary object file, and libtool could not work around it because -your compiler does not support \`-c' and \`-o' together. If you -repeat this compilation, it may succeed, by chance, but you had better -avoid parallel builds (make -j) in this platform, or get a better -compiler." - - $run $rm $removelist - exit $EXIT_FAILURE - fi - - # Just move the object if needed, then go on to compile the next one - if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then - $show "$mv $output_obj $lobj" - if $run $mv $output_obj $lobj; then : - else - error=$? - $run $rm $removelist - exit $error - fi - fi - - # Append the name of the PIC object to the libtool object file. - test -z "$run" && cat >> ${libobj}T <> ${libobj}T </dev/null`" != "X$srcfile"; then - $echo "\ -*** ERROR, $lockfile contains: -`cat $lockfile 2>/dev/null` - -but it should contain: -$srcfile - -This indicates that another process is trying to use the same -temporary object file, and libtool could not work around it because -your compiler does not support \`-c' and \`-o' together. If you -repeat this compilation, it may succeed, by chance, but you had better -avoid parallel builds (make -j) in this platform, or get a better -compiler." - - $run $rm $removelist - exit $EXIT_FAILURE - fi - - # Just move the object if needed - if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then - $show "$mv $output_obj $obj" - if $run $mv $output_obj $obj; then : - else - error=$? - $run $rm $removelist - exit $error - fi - fi - - # Append the name of the non-PIC object the libtool object file. - # Only append if the libtool object file exists. - test -z "$run" && cat >> ${libobj}T <> ${libobj}T <&2 - fi - if test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - else - if test -z "$pic_flag" && test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - fi - build_libtool_libs=no - build_old_libs=yes - prefer_static_libs=yes - break - ;; - esac - done - - # See if our shared archives depend on static archives. - test -n "$old_archive_from_new_cmds" && build_old_libs=yes - - # Go through the arguments, transforming them on the way. - while test "$#" -gt 0; do - arg="$1" - shift - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test - ;; - *) qarg=$arg ;; - esac - libtool_args="$libtool_args $qarg" - - # If the previous option needs an argument, assign it. - if test -n "$prev"; then - case $prev in - output) - compile_command="$compile_command @OUTPUT@" - finalize_command="$finalize_command @OUTPUT@" - ;; - esac - - case $prev in - dlfiles|dlprefiles) - if test "$preload" = no; then - # Add the symbol object into the linking commands. - compile_command="$compile_command @SYMFILE@" - finalize_command="$finalize_command @SYMFILE@" - preload=yes - fi - case $arg in - *.la | *.lo) ;; # We handle these cases below. - force) - if test "$dlself" = no; then - dlself=needless - export_dynamic=yes - fi - prev= - continue - ;; - self) - if test "$prev" = dlprefiles; then - dlself=yes - elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then - dlself=yes - else - dlself=needless - export_dynamic=yes - fi - prev= - continue - ;; - *) - if test "$prev" = dlfiles; then - dlfiles="$dlfiles $arg" - else - dlprefiles="$dlprefiles $arg" - fi - prev= - continue - ;; - esac - ;; - expsyms) - export_symbols="$arg" - if test ! -f "$arg"; then - $echo "$modename: symbol file \`$arg' does not exist" - exit $EXIT_FAILURE - fi - prev= - continue - ;; - expsyms_regex) - export_symbols_regex="$arg" - prev= - continue - ;; - inst_prefix) - inst_prefix_dir="$arg" - prev= - continue - ;; - precious_regex) - precious_files_regex="$arg" - prev= - continue - ;; - release) - release="-$arg" - prev= - continue - ;; - objectlist) - if test -f "$arg"; then - save_arg=$arg - moreargs= - for fil in `cat $save_arg` - do -# moreargs="$moreargs $fil" - arg=$fil - # A libtool-controlled object. - - # Check to see that this really is a libtool object. - if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - pic_object= - non_pic_object= - - # Read the .lo file - # If there is no directory component, then add one. - case $arg in - */* | *\\*) . $arg ;; - *) . ./$arg ;; - esac - - if test -z "$pic_object" || \ - test -z "$non_pic_object" || - test "$pic_object" = none && \ - test "$non_pic_object" = none; then - $echo "$modename: cannot find name of object for \`$arg'" 1>&2 - exit $EXIT_FAILURE - fi - - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi - - if test "$pic_object" != none; then - # Prepend the subdirectory the object is found in. - pic_object="$xdir$pic_object" - - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then - dlfiles="$dlfiles $pic_object" - prev= - continue - else - # If libtool objects are unsupported, then we need to preload. - prev=dlprefiles - fi - fi - - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. - dlprefiles="$dlprefiles $pic_object" - prev= - fi - - # A PIC object. - libobjs="$libobjs $pic_object" - arg="$pic_object" - fi - - # Non-PIC object. - if test "$non_pic_object" != none; then - # Prepend the subdirectory the object is found in. - non_pic_object="$xdir$non_pic_object" - - # A standard non-PIC object - non_pic_objects="$non_pic_objects $non_pic_object" - if test -z "$pic_object" || test "$pic_object" = none ; then - arg="$non_pic_object" - fi - fi - else - # Only an error if not doing a dry-run. - if test -z "$run"; then - $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 - exit $EXIT_FAILURE - else - # Dry-run case. - - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi - - pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` - non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` - libobjs="$libobjs $pic_object" - non_pic_objects="$non_pic_objects $non_pic_object" - fi - fi - done - else - $echo "$modename: link input file \`$save_arg' does not exist" - exit $EXIT_FAILURE - fi - arg=$save_arg - prev= - continue - ;; - rpath | xrpath) - # We need an absolute path. - case $arg in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - $echo "$modename: only absolute run-paths are allowed" 1>&2 - exit $EXIT_FAILURE - ;; - esac - if test "$prev" = rpath; then - case "$rpath " in - *" $arg "*) ;; - *) rpath="$rpath $arg" ;; - esac - else - case "$xrpath " in - *" $arg "*) ;; - *) xrpath="$xrpath $arg" ;; - esac - fi - prev= - continue - ;; - xcompiler) - compiler_flags="$compiler_flags $qarg" - prev= - compile_command="$compile_command $qarg" - finalize_command="$finalize_command $qarg" - continue - ;; - xlinker) - linker_flags="$linker_flags $qarg" - compiler_flags="$compiler_flags $wl$qarg" - prev= - compile_command="$compile_command $wl$qarg" - finalize_command="$finalize_command $wl$qarg" - continue - ;; - xcclinker) - linker_flags="$linker_flags $qarg" - compiler_flags="$compiler_flags $qarg" - prev= - compile_command="$compile_command $qarg" - finalize_command="$finalize_command $qarg" - continue - ;; - shrext) - shrext_cmds="$arg" - prev= - continue - ;; - darwin_framework) - compiler_flags="$compiler_flags $arg" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - prev= - continue - ;; - *) - eval "$prev=\"\$arg\"" - prev= - continue - ;; - esac - fi # test -n "$prev" - - prevarg="$arg" - - case $arg in - -all-static) - if test -n "$link_static_flag"; then - compile_command="$compile_command $link_static_flag" - finalize_command="$finalize_command $link_static_flag" - fi - continue - ;; - - -allow-undefined) - # FIXME: remove this flag sometime in the future. - $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2 - continue - ;; - - -avoid-version) - avoid_version=yes - continue - ;; - - -dlopen) - prev=dlfiles - continue - ;; - - -dlpreopen) - prev=dlprefiles - continue - ;; - - -export-dynamic) - export_dynamic=yes - continue - ;; - - -export-symbols | -export-symbols-regex) - if test -n "$export_symbols" || test -n "$export_symbols_regex"; then - $echo "$modename: more than one -exported-symbols argument is not allowed" - exit $EXIT_FAILURE - fi - if test "X$arg" = "X-export-symbols"; then - prev=expsyms - else - prev=expsyms_regex - fi - continue - ;; - - -framework|-arch) - prev=darwin_framework - compiler_flags="$compiler_flags $arg" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - continue - ;; - - -inst-prefix-dir) - prev=inst_prefix - continue - ;; - - # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* - # so, if we see these flags be careful not to treat them like -L - -L[A-Z][A-Z]*:*) - case $with_gcc/$host in - no/*-*-irix* | /*-*-irix*) - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - ;; - esac - continue - ;; - - -L*) - dir=`$echo "X$arg" | $Xsed -e 's/^-L//'` - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - absdir=`cd "$dir" && pwd` - if test -z "$absdir"; then - $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2 - exit $EXIT_FAILURE - fi - dir="$absdir" - ;; - esac - case "$deplibs " in - *" -L$dir "*) ;; - *) - deplibs="$deplibs -L$dir" - lib_search_path="$lib_search_path $dir" - ;; - esac - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) - case :$dllsearchpath: in - *":$dir:"*) ;; - *) dllsearchpath="$dllsearchpath:$dir";; - esac - ;; - esac - continue - ;; - - -l*) - if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then - case $host in - *-*-cygwin* | *-*-pw32* | *-*-beos*) - # These systems don't actually have a C or math library (as such) - continue - ;; - *-*-mingw* | *-*-os2*) - # These systems don't actually have a C library (as such) - test "X$arg" = "X-lc" && continue - ;; - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc due to us having libc/libc_r. - test "X$arg" = "X-lc" && continue - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C and math libraries are in the System framework - deplibs="$deplibs -framework System" - continue - esac - elif test "X$arg" = "X-lc_r"; then - case $host in - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc_r directly, use -pthread flag. - continue - ;; - esac - fi - deplibs="$deplibs $arg" - continue - ;; - - # Tru64 UNIX uses -model [arg] to determine the layout of C++ - # classes, name mangling, and exception handling. - -model) - compile_command="$compile_command $arg" - compiler_flags="$compiler_flags $arg" - finalize_command="$finalize_command $arg" - prev=xcompiler - continue - ;; - - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) - compiler_flags="$compiler_flags $arg" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - continue - ;; - - -module) - module=yes - continue - ;; - - ################################################################ - #### Local edit for Sleepycat SR #8705 - #### Some cases separated below. - ################################################################ - # -64, -mips[0-9] enable 64-bit mode on the SGI compiler - # -r[0-9][0-9]* specifies the processor on the SGI compiler - # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler - # +DA*, +DD* enable 64-bit mode on the HP compiler - # -q* pass through compiler args for the IBM compiler - # -m* pass through architecture-specific compiler args for GCC - -r[0-9][0-9]*|-xtarget=*|+DA*|+DD*|-q*|-m*) - - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - if test "$with_gcc" = "yes" ; then - compiler_flags="$compiler_flags $arg" - fi - continue - ;; - - ################################################################ - #### Local edit for Sleepycat SR #8705 - #### This case was given to us by Albert Chin, and we expect - #### this to be included in future versions of libtool, - #### though we must verify that before upgrading. - #### Note that libtool 1.5.20 at least, incorporates similar - #### code, but it got refactored incorrectly. - ################################################################ - # Flags for IRIX and Solaris compiler - -64|-mips[0-9]|-xarch=*) - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - compiler_flags="$compiler_flags $arg" - continue - ;; - - -shrext) - prev=shrext - continue - ;; - - -no-fast-install) - fast_install=no - continue - ;; - - -no-install) - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) - # The PATH hackery in wrapper scripts is required on Windows - # in order for the loader to find any dlls it needs. - $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2 - $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2 - fast_install=no - ;; - *) no_install=yes ;; - esac - continue - ;; - - -no-undefined) - allow_undefined=no - continue - ;; - - -objectlist) - prev=objectlist - continue - ;; - - -o) prev=output ;; - - -precious-files-regex) - prev=precious_regex - continue - ;; - - -release) - prev=release - continue - ;; - - -rpath) - prev=rpath - continue - ;; - - -R) - prev=xrpath - continue - ;; - - -R*) - dir=`$echo "X$arg" | $Xsed -e 's/^-R//'` - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - $echo "$modename: only absolute run-paths are allowed" 1>&2 - exit $EXIT_FAILURE - ;; - esac - case "$xrpath " in - *" $dir "*) ;; - *) xrpath="$xrpath $dir" ;; - esac - continue - ;; - - -static) - # The effects of -static are defined in a previous loop. - # We used to do the same as -all-static on platforms that - # didn't have a PIC flag, but the assumption that the effects - # would be equivalent was wrong. It would break on at least - # Digital Unix and AIX. - continue - ;; - - -thread-safe) - thread_safe=yes - continue - ;; - - -version-info) - prev=vinfo - continue - ;; - -version-number) - prev=vinfo - vinfo_number=yes - continue - ;; - - -Wc,*) - args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'` - arg= - save_ifs="$IFS"; IFS=',' - for flag in $args; do - IFS="$save_ifs" - case $flag in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - flag="\"$flag\"" - ;; - esac - arg="$arg $wl$flag" - compiler_flags="$compiler_flags $flag" - done - IFS="$save_ifs" - arg=`$echo "X$arg" | $Xsed -e "s/^ //"` - ;; - - -Wl,*) - args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'` - arg= - save_ifs="$IFS"; IFS=',' - for flag in $args; do - IFS="$save_ifs" - case $flag in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - flag="\"$flag\"" - ;; - esac - arg="$arg $wl$flag" - compiler_flags="$compiler_flags $wl$flag" - linker_flags="$linker_flags $flag" - done - IFS="$save_ifs" - arg=`$echo "X$arg" | $Xsed -e "s/^ //"` - ;; - - -Xcompiler) - prev=xcompiler - continue - ;; - - -Xlinker) - prev=xlinker - continue - ;; - - -XCClinker) - prev=xcclinker - continue - ;; - - # Some other compiler flag. - -* | +*) - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - ;; - - *.$objext) - # A standard object. - objs="$objs $arg" - ;; - - *.lo) - # A libtool-controlled object. - - # Check to see that this really is a libtool object. - if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - pic_object= - non_pic_object= - - # Read the .lo file - # If there is no directory component, then add one. - case $arg in - */* | *\\*) . $arg ;; - *) . ./$arg ;; - esac - - if test -z "$pic_object" || \ - test -z "$non_pic_object" || - test "$pic_object" = none && \ - test "$non_pic_object" = none; then - $echo "$modename: cannot find name of object for \`$arg'" 1>&2 - exit $EXIT_FAILURE - fi - - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi - - if test "$pic_object" != none; then - # Prepend the subdirectory the object is found in. - pic_object="$xdir$pic_object" - - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then - dlfiles="$dlfiles $pic_object" - prev= - continue - else - # If libtool objects are unsupported, then we need to preload. - prev=dlprefiles - fi - fi - - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. - dlprefiles="$dlprefiles $pic_object" - prev= - fi - - # A PIC object. - libobjs="$libobjs $pic_object" - arg="$pic_object" - fi - - # Non-PIC object. - if test "$non_pic_object" != none; then - # Prepend the subdirectory the object is found in. - non_pic_object="$xdir$non_pic_object" - - # A standard non-PIC object - non_pic_objects="$non_pic_objects $non_pic_object" - if test -z "$pic_object" || test "$pic_object" = none ; then - arg="$non_pic_object" - fi - fi - else - # Only an error if not doing a dry-run. - if test -z "$run"; then - $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 - exit $EXIT_FAILURE - else - # Dry-run case. - - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi - - pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` - non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` - libobjs="$libobjs $pic_object" - non_pic_objects="$non_pic_objects $non_pic_object" - fi - fi - ;; - - *.$libext) - # An archive. - deplibs="$deplibs $arg" - old_deplibs="$old_deplibs $arg" - continue - ;; - - *.la) - # A libtool-controlled library. - - if test "$prev" = dlfiles; then - # This library was specified with -dlopen. - dlfiles="$dlfiles $arg" - prev= - elif test "$prev" = dlprefiles; then - # The library was specified with -dlpreopen. - dlprefiles="$dlprefiles $arg" - prev= - else - deplibs="$deplibs $arg" - fi - continue - ;; - - # Some other compiler argument. - *) - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - ;; - esac # arg - - # Now actually substitute the argument into the commands. - if test -n "$arg"; then - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - fi - done # argument parsing loop - - if test -n "$prev"; then - $echo "$modename: the \`$prevarg' option requires an argument" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then - eval arg=\"$export_dynamic_flag_spec\" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - fi - - oldlibs= - # calculate the name of the file, without its directory - outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'` - libobjs_save="$libobjs" - - if test -n "$shlibpath_var"; then - # get the directories listed in $shlibpath_var - eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` - else - shlib_search_path= - fi - eval sys_lib_search_path=\"$sys_lib_search_path_spec\" - eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" - - output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'` - if test "X$output_objdir" = "X$output"; then - output_objdir="$objdir" - else - output_objdir="$output_objdir/$objdir" - fi - # Create the object directory. - if test ! -d "$output_objdir"; then - $show "$mkdir $output_objdir" - $run $mkdir $output_objdir - status=$? - if test "$status" -ne 0 && test ! -d "$output_objdir"; then - exit $status - fi - fi - - # Determine the type of output - case $output in - "") - $echo "$modename: you must specify an output file" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - *.$libext) linkmode=oldlib ;; - *.lo | *.$objext) linkmode=obj ;; - *.la) linkmode=lib ;; - *) linkmode=prog ;; # Anything else should be a program. - esac - - case $host in - *cygwin* | *mingw* | *pw32*) - # don't eliminate duplications in $postdeps and $predeps - duplicate_compiler_generated_deps=yes - ;; - *) - duplicate_compiler_generated_deps=$duplicate_deps - ;; - esac - specialdeplibs= - - libs= - # Find all interdependent deplibs by searching for libraries - # that are linked more than once (e.g. -la -lb -la) - for deplib in $deplibs; do - if test "X$duplicate_deps" = "Xyes" ; then - case "$libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - libs="$libs $deplib" - done - - if test "$linkmode" = lib; then - libs="$predeps $libs $compiler_lib_search_path $postdeps" - - # Compute libraries that are listed more than once in $predeps - # $postdeps and mark them as special (i.e., whose duplicates are - # not to be eliminated). - pre_post_deps= - if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then - for pre_post_dep in $predeps $postdeps; do - case "$pre_post_deps " in - *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; - esac - pre_post_deps="$pre_post_deps $pre_post_dep" - done - fi - pre_post_deps= - fi - - deplibs= - newdependency_libs= - newlib_search_path= - need_relink=no # whether we're linking any uninstalled libtool libraries - notinst_deplibs= # not-installed libtool libraries - notinst_path= # paths that contain not-installed libtool libraries - case $linkmode in - lib) - passes="conv link" - for file in $dlfiles $dlprefiles; do - case $file in - *.la) ;; - *) - $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2 - exit $EXIT_FAILURE - ;; - esac - done - ;; - prog) - compile_deplibs= - finalize_deplibs= - alldeplibs=no - newdlfiles= - newdlprefiles= - passes="conv scan dlopen dlpreopen link" - ;; - *) passes="conv" - ;; - esac - for pass in $passes; do - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan"; then - libs="$deplibs" - deplibs= - fi - if test "$linkmode" = prog; then - case $pass in - dlopen) libs="$dlfiles" ;; - dlpreopen) libs="$dlprefiles" ;; - link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; - esac - fi - if test "$pass" = dlopen; then - # Collect dlpreopened libraries - save_deplibs="$deplibs" - deplibs= - fi - for deplib in $libs; do - lib= - found=no - case $deplib in - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - compiler_flags="$compiler_flags $deplib" - fi - continue - ;; - -l*) - if test "$linkmode" != lib && test "$linkmode" != prog; then - $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2 - continue - fi - name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` - for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do - for search_ext in .la $std_shrext .so .a; do - # Search the libtool library - lib="$searchdir/lib${name}${search_ext}" - if test -f "$lib"; then - if test "$search_ext" = ".la"; then - found=yes - else - found=no - fi - break 2 - fi - done - done - if test "$found" != yes; then - # deplib doesn't seem to be a libtool library - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" - fi - continue - else # deplib is a libtool library - # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, - # We need to do some special things here, and not later. - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $deplib "*) - if (${SED} -e '2q' $lib | - grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - library_names= - old_library= - case $lib in - */* | *\\*) . $lib ;; - *) . ./$lib ;; - esac - for l in $old_library $library_names; do - ll="$l" - done - if test "X$ll" = "X$old_library" ; then # only static version available - found=no - ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` - test "X$ladir" = "X$lib" && ladir="." - lib=$ladir/$old_library - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" - fi - continue - fi - fi - ;; - *) ;; - esac - fi - fi - ;; # -l - -L*) - case $linkmode in - lib) - deplibs="$deplib $deplibs" - test "$pass" = conv && continue - newdependency_libs="$deplib $newdependency_libs" - newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` - ;; - prog) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - continue - fi - if test "$pass" = scan; then - deplibs="$deplib $deplibs" - else - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - fi - newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` - ;; - *) - $echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2 - ;; - esac # linkmode - continue - ;; # -L - -R*) - if test "$pass" = link; then - dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'` - # Make sure the xrpath contains only unique directories. - case "$xrpath " in - *" $dir "*) ;; - *) xrpath="$xrpath $dir" ;; - esac - fi - deplibs="$deplib $deplibs" - continue - ;; - *.la) lib="$deplib" ;; - *.$libext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - continue - fi - case $linkmode in - lib) - valid_a_lib=no - case $deplibs_check_method in - match_pattern*) - set dummy $deplibs_check_method - match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` - if eval $echo \"$deplib\" 2>/dev/null \ - | $SED 10q \ - | $EGREP "$match_pattern_regex" > /dev/null; then - valid_a_lib=yes - fi - ;; - pass_all) - valid_a_lib=yes - ;; - esac - if test "$valid_a_lib" != yes; then - $echo - $echo "*** Warning: Trying to link with static lib archive $deplib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have" - $echo "*** because the file extensions .$libext of this argument makes me believe" - $echo "*** that it is just a static archive that I should not used here." - else - $echo - $echo "*** Warning: Linking the shared library $output against the" - $echo "*** static library $deplib is not portable!" - deplibs="$deplib $deplibs" - fi - continue - ;; - prog) - if test "$pass" != link; then - deplibs="$deplib $deplibs" - else - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - fi - continue - ;; - esac # linkmode - ;; # *.$libext - *.lo | *.$objext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - elif test "$linkmode" = prog; then - if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then - # If there is no dlopen support or we're linking statically, - # we need to preload. - newdlprefiles="$newdlprefiles $deplib" - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - newdlfiles="$newdlfiles $deplib" - fi - fi - continue - ;; - %DEPLIBS%) - alldeplibs=yes - continue - ;; - esac # case $deplib - if test "$found" = yes || test -f "$lib"; then : - else - $echo "$modename: cannot find the library \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - - # Check to see that this really is a libtool archive. - if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : - else - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - - ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` - test "X$ladir" = "X$lib" && ladir="." - - dlname= - dlopen= - dlpreopen= - libdir= - library_names= - old_library= - # If the library was installed with an old release of libtool, - # it will not redefine variables installed, or shouldnotlink - installed=yes - shouldnotlink=no - avoidtemprpath= - - - # Read the .la file - case $lib in - */* | *\\*) . $lib ;; - *) . ./$lib ;; - esac - - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan" || - { test "$linkmode" != prog && test "$linkmode" != lib; }; then - test -n "$dlopen" && dlfiles="$dlfiles $dlopen" - test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" - fi - - if test "$pass" = conv; then - # Only check for convenience libraries - deplibs="$lib $deplibs" - if test -z "$libdir"; then - if test -z "$old_library"; then - $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - # It is a libtool convenience library, so add in its objects. - convenience="$convenience $ladir/$objdir/$old_library" - old_convenience="$old_convenience $ladir/$objdir/$old_library" - tmp_libs= - for deplib in $dependency_libs; do - deplibs="$deplib $deplibs" - if test "X$duplicate_deps" = "Xyes" ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done - elif test "$linkmode" != prog && test "$linkmode" != lib; then - $echo "$modename: \`$lib' is not a convenience library" 1>&2 - exit $EXIT_FAILURE - fi - continue - fi # $pass = conv - - - # Get the name of the library we link against. - linklib= - for l in $old_library $library_names; do - linklib="$l" - done - if test -z "$linklib"; then - $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - - # This library was specified with -dlopen. - if test "$pass" = dlopen; then - if test -z "$libdir"; then - $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - if test -z "$dlname" || - test "$dlopen_support" != yes || - test "$build_libtool_libs" = no; then - # If there is no dlname, no dlopen support or we're linking - # statically, we need to preload. We also need to preload any - # dependent libraries so libltdl's deplib preloader doesn't - # bomb out in the load deplibs phase. - dlprefiles="$dlprefiles $lib $dependency_libs" - else - newdlfiles="$newdlfiles $lib" - fi - continue - fi # $pass = dlopen - - # We need an absolute path. - case $ladir in - [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; - *) - abs_ladir=`cd "$ladir" && pwd` - if test -z "$abs_ladir"; then - $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2 - $echo "$modename: passing it literally to the linker, although it might fail" 1>&2 - abs_ladir="$ladir" - fi - ;; - esac - laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` - - # Find the relevant object directory and library name. - if test "X$installed" = Xyes; then - if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then - $echo "$modename: warning: library \`$lib' was moved." 1>&2 - dir="$ladir" - absdir="$abs_ladir" - libdir="$abs_ladir" - else - dir="$libdir" - absdir="$libdir" - fi - test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes - else - if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then - dir="$ladir" - absdir="$abs_ladir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" - else - dir="$ladir/$objdir" - absdir="$abs_ladir/$objdir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" - fi - fi # $installed = yes - name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` - - # This library was specified with -dlpreopen. - if test "$pass" = dlpreopen; then - if test -z "$libdir"; then - $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - # Prefer using a static library (so that no silly _DYNAMIC symbols - # are required to link). - if test -n "$old_library"; then - newdlprefiles="$newdlprefiles $dir/$old_library" - # Otherwise, use the dlname, so that lt_dlopen finds it. - elif test -n "$dlname"; then - newdlprefiles="$newdlprefiles $dir/$dlname" - else - newdlprefiles="$newdlprefiles $dir/$linklib" - fi - fi # $pass = dlpreopen - - if test -z "$libdir"; then - # Link the convenience library - if test "$linkmode" = lib; then - deplibs="$dir/$old_library $deplibs" - elif test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$dir/$old_library $compile_deplibs" - finalize_deplibs="$dir/$old_library $finalize_deplibs" - else - deplibs="$lib $deplibs" # used for prog,scan pass - fi - continue - fi - - - if test "$linkmode" = prog && test "$pass" != link; then - newlib_search_path="$newlib_search_path $ladir" - deplibs="$lib $deplibs" - - linkalldeplibs=no - if test "$link_all_deplibs" != no || test -z "$library_names" || - test "$build_libtool_libs" = no; then - linkalldeplibs=yes - fi - - tmp_libs= - for deplib in $dependency_libs; do - case $deplib in - -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test - esac - # Need to link against all dependency_libs? - if test "$linkalldeplibs" = yes; then - deplibs="$deplib $deplibs" - else - # Need to hardcode shared library paths - # or/and link against static libraries - newdependency_libs="$deplib $newdependency_libs" - fi - if test "X$duplicate_deps" = "Xyes" ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done # for deplib - continue - fi # $linkmode = prog... - - if test "$linkmode,$pass" = "prog,link"; then - if test -n "$library_names" && - { test "$prefer_static_libs" = no || test -z "$old_library"; }; then - # We need to hardcode the library path - if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then - # Make sure the rpath contains only unique directories. - case "$temp_rpath " in - *" $dir "*) ;; - *" $absdir "*) ;; - *) temp_rpath="$temp_rpath $absdir" ;; - esac - fi - - # Hardcode the library path. - # Skip directories that are in the system default run-time - # search path. - case " $sys_lib_dlsearch_path " in - *" $absdir "*) ;; - *) - case "$compile_rpath " in - *" $absdir "*) ;; - *) compile_rpath="$compile_rpath $absdir" - esac - ;; - esac - case " $sys_lib_dlsearch_path " in - *" $libdir "*) ;; - *) - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" - esac - ;; - esac - fi # $linkmode,$pass = prog,link... - - if test "$alldeplibs" = yes && - { test "$deplibs_check_method" = pass_all || - { test "$build_libtool_libs" = yes && - test -n "$library_names"; }; }; then - # We only need to search for static libraries - continue - fi - fi - - link_static=no # Whether the deplib will be linked statically - if test -n "$library_names" && - { test "$prefer_static_libs" = no || test -z "$old_library"; }; then - if test "$installed" = no; then - notinst_deplibs="$notinst_deplibs $lib" - need_relink=yes - fi - # This is a shared library - - # Warn about portability, can't link against -module's on - # some systems (darwin) - if test "$shouldnotlink" = yes && test "$pass" = link ; then - $echo - if test "$linkmode" = prog; then - $echo "*** Warning: Linking the executable $output against the loadable module" - else - $echo "*** Warning: Linking the shared library $output against the loadable module" - fi - $echo "*** $linklib is not portable!" - fi - if test "$linkmode" = lib && - test "$hardcode_into_libs" = yes; then - # Hardcode the library path. - # Skip directories that are in the system default run-time - # search path. - case " $sys_lib_dlsearch_path " in - *" $absdir "*) ;; - *) - case "$compile_rpath " in - *" $absdir "*) ;; - *) compile_rpath="$compile_rpath $absdir" - esac - ;; - esac - case " $sys_lib_dlsearch_path " in - *" $libdir "*) ;; - *) - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" - esac - ;; - esac - fi - - if test -n "$old_archive_from_expsyms_cmds"; then - # figure out the soname - set dummy $library_names - realname="$2" - shift; shift - libname=`eval \\$echo \"$libname_spec\"` - # use dlname if we got it. it's perfectly good, no? - if test -n "$dlname"; then - soname="$dlname" - elif test -n "$soname_spec"; then - # bleh windows - case $host in - *cygwin* | mingw*) - major=`expr $current - $age` - versuffix="-$major" - ;; - esac - eval soname=\"$soname_spec\" - else - soname="$realname" - fi - - # Make a new name for the extract_expsyms_cmds to use - soroot="$soname" - soname=`$echo $soroot | ${SED} -e 's/^.*\///'` - newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a" - - # If the library has no export list, then create one now - if test -f "$output_objdir/$soname-def"; then : - else - $show "extracting exported symbol list from \`$soname'" - save_ifs="$IFS"; IFS='~' - cmds=$extract_expsyms_cmds - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - fi - - # Create $newlib - if test -f "$output_objdir/$newlib"; then :; else - $show "generating import library for \`$soname'" - save_ifs="$IFS"; IFS='~' - cmds=$old_archive_from_expsyms_cmds - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - fi - # make sure the library variables are pointing to the new library - dir=$output_objdir - linklib=$newlib - fi # test -n "$old_archive_from_expsyms_cmds" - - if test "$linkmode" = prog || test "$mode" != relink; then - add_shlibpath= - add_dir= - add= - lib_linked=yes - case $hardcode_action in - immediate | unsupported) - if test "$hardcode_direct" = no; then - add="$dir/$linklib" - case $host in - *-*-sco3.2v5* ) add_dir="-L$dir" ;; - *-*-darwin* ) - # if the lib is a module then we can not link against - # it, someone is ignoring the new warnings I added - if /usr/bin/file -L $add 2> /dev/null | $EGREP "bundle" >/dev/null ; then - $echo "** Warning, lib $linklib is a module, not a shared library" - if test -z "$old_library" ; then - $echo - $echo "** And there doesn't seem to be a static archive available" - $echo "** The link will probably fail, sorry" - else - add="$dir/$old_library" - fi - fi - esac - elif test "$hardcode_minus_L" = no; then - case $host in - *-*-sunos*) add_shlibpath="$dir" ;; - esac - add_dir="-L$dir" - add="-l$name" - elif test "$hardcode_shlibpath_var" = no; then - add_shlibpath="$dir" - add="-l$name" - else - lib_linked=no - fi - ;; - relink) - if test "$hardcode_direct" = yes; then - add="$dir/$linklib" - elif test "$hardcode_minus_L" = yes; then - add_dir="-L$dir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) - add_dir="$add_dir -L$inst_prefix_dir$libdir" - ;; - esac - fi - add="-l$name" - elif test "$hardcode_shlibpath_var" = yes; then - add_shlibpath="$dir" - add="-l$name" - else - lib_linked=no - fi - ;; - *) lib_linked=no ;; - esac - - if test "$lib_linked" != yes; then - $echo "$modename: configuration error: unsupported hardcode properties" - exit $EXIT_FAILURE - fi - - if test -n "$add_shlibpath"; then - case :$compile_shlibpath: in - *":$add_shlibpath:"*) ;; - *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; - esac - fi - if test "$linkmode" = prog; then - test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" - test -n "$add" && compile_deplibs="$add $compile_deplibs" - else - test -n "$add_dir" && deplibs="$add_dir $deplibs" - test -n "$add" && deplibs="$add $deplibs" - if test "$hardcode_direct" != yes && \ - test "$hardcode_minus_L" != yes && \ - test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; - *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; - esac - fi - fi - fi - - if test "$linkmode" = prog || test "$mode" = relink; then - add_shlibpath= - add_dir= - add= - # Finalize command for both is simple: just hardcode it. - if test "$hardcode_direct" = yes; then - add="$libdir/$linklib" - elif test "$hardcode_minus_L" = yes; then - add_dir="-L$libdir" - add="-l$name" - elif test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; - *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; - esac - add="-l$name" - elif test "$hardcode_automatic" = yes; then - if test -n "$inst_prefix_dir" && - test -f "$inst_prefix_dir$libdir/$linklib" ; then - add="$inst_prefix_dir$libdir/$linklib" - else - add="$libdir/$linklib" - fi - else - # We cannot seem to hardcode it, guess we'll fake it. - add_dir="-L$libdir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) - add_dir="$add_dir -L$inst_prefix_dir$libdir" - ;; - esac - fi - add="-l$name" - fi - - if test "$linkmode" = prog; then - test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" - test -n "$add" && finalize_deplibs="$add $finalize_deplibs" - else - test -n "$add_dir" && deplibs="$add_dir $deplibs" - test -n "$add" && deplibs="$add $deplibs" - fi - fi - elif test "$linkmode" = prog; then - # Here we assume that one of hardcode_direct or hardcode_minus_L - # is not unsupported. This is valid on all known static and - # shared platforms. - if test "$hardcode_direct" != unsupported; then - test -n "$old_library" && linklib="$old_library" - compile_deplibs="$dir/$linklib $compile_deplibs" - finalize_deplibs="$dir/$linklib $finalize_deplibs" - else - compile_deplibs="-l$name -L$dir $compile_deplibs" - finalize_deplibs="-l$name -L$dir $finalize_deplibs" - fi - elif test "$build_libtool_libs" = yes; then - # Not a shared library - if test "$deplibs_check_method" != pass_all; then - # We're trying link a shared library against a static one - # but the system doesn't support it. - - # Just print a warning and add the library to dependency_libs so - # that the program can be linked against the static library. - $echo - $echo "*** Warning: This system can not link to static lib archive $lib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have." - if test "$module" = yes; then - $echo "*** But as you try to build a module library, libtool will still create " - $echo "*** a static module, that should work as long as the dlopening application" - $echo "*** is linked with the -dlopen flag to resolve symbols at runtime." - if test -z "$global_symbol_pipe"; then - $echo - $echo "*** However, this would only work if libtool was able to extract symbol" - $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" - $echo "*** not find such a program. So, this module is probably useless." - $echo "*** \`nm' from GNU binutils and a full rebuild may help." - fi - if test "$build_old_libs" = no; then - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - fi - else - deplibs="$dir/$old_library $deplibs" - link_static=yes - fi - fi # link shared/static library? - - if test "$linkmode" = lib; then - if test -n "$dependency_libs" && - { test "$hardcode_into_libs" != yes || - test "$build_old_libs" = yes || - test "$link_static" = yes; }; then - # Extract -R from dependency_libs - temp_deplibs= - for libdir in $dependency_libs; do - case $libdir in - -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'` - case " $xrpath " in - *" $temp_xrpath "*) ;; - *) xrpath="$xrpath $temp_xrpath";; - esac;; - *) temp_deplibs="$temp_deplibs $libdir";; - esac - done - dependency_libs="$temp_deplibs" - fi - - newlib_search_path="$newlib_search_path $absdir" - # Link against this library - test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" - # ... and its dependency_libs - tmp_libs= - for deplib in $dependency_libs; do - newdependency_libs="$deplib $newdependency_libs" - if test "X$duplicate_deps" = "Xyes" ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done - - if test "$link_all_deplibs" != no; then - # Add the search paths of all dependency libraries - for deplib in $dependency_libs; do - case $deplib in - -L*) path="$deplib" ;; - *.la) - dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'` - test "X$dir" = "X$deplib" && dir="." - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; - *) - absdir=`cd "$dir" && pwd` - if test -z "$absdir"; then - $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2 - absdir="$dir" - fi - ;; - esac - if grep "^installed=no" $deplib > /dev/null; then - path="$absdir/$objdir" - else - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - if test -z "$libdir"; then - $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - if test "$absdir" != "$libdir"; then - $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2 - fi - path="$absdir" - fi - depdepl= - case $host in - *-*-darwin*) - # we do not want to link against static libs, - # but need to link against shared - eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` - if test -n "$deplibrary_names" ; then - for tmp in $deplibrary_names ; do - depdepl=$tmp - done - if test -f "$path/$depdepl" ; then - depdepl="$path/$depdepl" - fi - # do not add paths which are already there - case " $newlib_search_path " in - *" $path "*) ;; - *) newlib_search_path="$newlib_search_path $path";; - esac - fi - path="" - ;; - *) - path="-L$path" - ;; - esac - ;; - -l*) - case $host in - *-*-darwin*) - # Again, we only want to link against shared libraries - eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` - for tmp in $newlib_search_path ; do - if test -f "$tmp/lib$tmp_libs.dylib" ; then - eval depdepl="$tmp/lib$tmp_libs.dylib" - break - fi - done - path="" - ;; - *) continue ;; - esac - ;; - *) continue ;; - esac - case " $deplibs " in - *" $path "*) ;; - *) deplibs="$path $deplibs" ;; - esac - case " $deplibs " in - *" $depdepl "*) ;; - *) deplibs="$depdepl $deplibs" ;; - esac - done - fi # link_all_deplibs != no - fi # linkmode = lib - done # for deplib in $libs - dependency_libs="$newdependency_libs" - if test "$pass" = dlpreopen; then - # Link the dlpreopened libraries before other libraries - for deplib in $save_deplibs; do - deplibs="$deplib $deplibs" - done - fi - if test "$pass" != dlopen; then - if test "$pass" != conv; then - # Make sure lib_search_path contains only unique directories. - lib_search_path= - for dir in $newlib_search_path; do - case "$lib_search_path " in - *" $dir "*) ;; - *) lib_search_path="$lib_search_path $dir" ;; - esac - done - newlib_search_path= - fi - - if test "$linkmode,$pass" != "prog,link"; then - vars="deplibs" - else - vars="compile_deplibs finalize_deplibs" - fi - for var in $vars dependency_libs; do - # Add libraries to $var in reverse order - eval tmp_libs=\"\$$var\" - new_libs= - for deplib in $tmp_libs; do - # FIXME: Pedantically, this is the right thing to do, so - # that some nasty dependency loop isn't accidentally - # broken: - #new_libs="$deplib $new_libs" - # Pragmatically, this seems to cause very few problems in - # practice: - case $deplib in - -L*) new_libs="$deplib $new_libs" ;; - -R*) ;; - *) - # And here is the reason: when a library appears more - # than once as an explicit dependence of a library, or - # is implicitly linked in more than once by the - # compiler, it is considered special, and multiple - # occurrences thereof are not removed. Compare this - # with having the same library being listed as a - # dependency of multiple other libraries: in this case, - # we know (pedantically, we assume) the library does not - # need to be listed more than once, so we keep only the - # last copy. This is not always right, but it is rare - # enough that we require users that really mean to play - # such unportable linking tricks to link the library - # using -Wl,-lname, so that libtool does not consider it - # for duplicate removal. - case " $specialdeplibs " in - *" $deplib "*) new_libs="$deplib $new_libs" ;; - *) - case " $new_libs " in - *" $deplib "*) ;; - *) new_libs="$deplib $new_libs" ;; - esac - ;; - esac - ;; - esac - done - tmp_libs= - for deplib in $new_libs; do - case $deplib in - -L*) - case " $tmp_libs " in - *" $deplib "*) ;; - *) tmp_libs="$tmp_libs $deplib" ;; - esac - ;; - *) tmp_libs="$tmp_libs $deplib" ;; - esac - done - eval $var=\"$tmp_libs\" - done # for var - fi - # Last step: remove runtime libs from dependency_libs - # (they stay in deplibs) - tmp_libs= - for i in $dependency_libs ; do - case " $predeps $postdeps $compiler_lib_search_path " in - *" $i "*) - i="" - ;; - esac - if test -n "$i" ; then - tmp_libs="$tmp_libs $i" - fi - done - dependency_libs=$tmp_libs - done # for pass - if test "$linkmode" = prog; then - dlfiles="$newdlfiles" - dlprefiles="$newdlprefiles" - fi - - case $linkmode in - oldlib) - if test -n "$deplibs"; then - $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 - fi - - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2 - fi - - if test -n "$rpath"; then - $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2 - fi - - if test -n "$xrpath"; then - $echo "$modename: warning: \`-R' is ignored for archives" 1>&2 - fi - - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2 - fi - - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for archives" 1>&2 - fi - - if test -n "$export_symbols" || test -n "$export_symbols_regex"; then - $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2 - fi - - # Now set the variables for building old libraries. - build_libtool_libs=no - oldlibs="$output" - objs="$objs$old_deplibs" - ;; - - lib) - # Make sure we only generate libraries of the form `libNAME.la'. - case $outputname in - lib*) - name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` - eval shared_ext=\"$shrext_cmds\" - eval libname=\"$libname_spec\" - ;; - *) - if test "$module" = no; then - $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - if test "$need_lib_prefix" != no; then - # Add the "lib" prefix for modules if required - name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` - eval shared_ext=\"$shrext_cmds\" - eval libname=\"$libname_spec\" - else - libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` - fi - ;; - esac - - if test -n "$objs"; then - if test "$deplibs_check_method" != pass_all; then - $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1 - exit $EXIT_FAILURE - else - $echo - $echo "*** Warning: Linking the shared library $output against the non-libtool" - $echo "*** objects $objs is not portable!" - libobjs="$libobjs $objs" - fi - fi - - if test "$dlself" != no; then - $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2 - fi - - set dummy $rpath - if test "$#" -gt 2; then - $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2 - fi - install_libdir="$2" - - oldlibs= - if test -z "$rpath"; then - if test "$build_libtool_libs" = yes; then - # Building a libtool convenience library. - # Some compilers have problems with a `.al' extension so - # convenience libraries should have the same extension an - # archive normally would. - oldlibs="$output_objdir/$libname.$libext $oldlibs" - build_libtool_libs=convenience - build_old_libs=yes - fi - - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2 - fi - - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2 - fi - else - - # Parse the version information argument. - save_ifs="$IFS"; IFS=':' - set dummy $vinfo 0 0 0 - IFS="$save_ifs" - - if test -n "$8"; then - $echo "$modename: too many parameters to \`-version-info'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - # convert absolute version numbers to libtool ages - # this retains compatibility with .la files and attempts - # to make the code below a bit more comprehensible - - case $vinfo_number in - yes) - number_major="$2" - number_minor="$3" - number_revision="$4" - # - # There are really only two kinds -- those that - # use the current revision as the major version - # and those that subtract age and use age as - # a minor version. But, then there is irix - # which has an extra 1 added just for fun - # - case $version_type in - darwin|linux|osf|windows) - current=`expr $number_major + $number_minor` - age="$number_minor" - revision="$number_revision" - ;; - freebsd-aout|freebsd-elf|sunos) - current="$number_major" - revision="$number_minor" - age="0" - ;; - irix|nonstopux) - current=`expr $number_major + $number_minor - 1` - age="$number_minor" - revision="$number_minor" - ;; - esac - ;; - no) - current="$2" - revision="$3" - age="$4" - ;; - esac - - # Check that each of the things are valid numbers. - case $current in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - $echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - case $revision in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - $echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - case $age in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - $echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - if test "$age" -gt "$current"; then - $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE - fi - - # Calculate the version variables. - major= - versuffix= - verstring= - case $version_type in - none) ;; - - darwin) - # Like Linux, but with the current version available in - # verstring for coding it into the library header - major=.`expr $current - $age` - versuffix="$major.$age.$revision" - # Darwin ld doesn't like 0 for these options... - minor_current=`expr $current + 1` - verstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" - ;; - - freebsd-aout) - major=".$current" - versuffix=".$current.$revision"; - ;; - - freebsd-elf) - major=".$current" - versuffix=".$current"; - ;; - - irix | nonstopux) - major=`expr $current - $age + 1` - - case $version_type in - nonstopux) verstring_prefix=nonstopux ;; - *) verstring_prefix=sgi ;; - esac - verstring="$verstring_prefix$major.$revision" - - # Add in all the interfaces that we are compatible with. - loop=$revision - while test "$loop" -ne 0; do - iface=`expr $revision - $loop` - loop=`expr $loop - 1` - verstring="$verstring_prefix$major.$iface:$verstring" - done - - # Before this point, $major must not contain `.'. - major=.$major - versuffix="$major.$revision" - ;; - - linux) - major=.`expr $current - $age` - versuffix="$major.$age.$revision" - ;; - - osf) - major=.`expr $current - $age` - versuffix=".$current.$age.$revision" - verstring="$current.$age.$revision" - - # Add in all the interfaces that we are compatible with. - loop=$age - while test "$loop" -ne 0; do - iface=`expr $current - $loop` - loop=`expr $loop - 1` - verstring="$verstring:${iface}.0" - done - - # Make executables depend on our current version. - verstring="$verstring:${current}.0" - ;; - - sunos) - major=".$current" - versuffix=".$current.$revision" - ;; - - windows) - # Use '-' rather than '.', since we only want one - # extension on DOS 8.3 filesystems. - major=`expr $current - $age` - versuffix="-$major" - ;; - - *) - $echo "$modename: unknown library version type \`$version_type'" 1>&2 - $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit $EXIT_FAILURE - ;; - esac - - # Clear the version info if we defaulted, and they specified a release. - if test -z "$vinfo" && test -n "$release"; then - major= - case $version_type in - darwin) - # we can't check for "0.0" in archive_cmds due to quoting - # problems, so we reset it completely - verstring= - ;; - *) - verstring="0.0" - ;; - esac - if test "$need_version" = no; then - versuffix= - else - versuffix=".0.0" - fi - fi - - # Remove version info from name if versioning should be avoided - if test "$avoid_version" = yes && test "$need_version" = no; then - major= - versuffix= - verstring="" - fi - - # Check to see if the archive will have undefined symbols. - if test "$allow_undefined" = yes; then - if test "$allow_undefined_flag" = unsupported; then - $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2 - build_libtool_libs=no - build_old_libs=yes - fi - else - # Don't allow undefined symbols. - allow_undefined_flag="$no_undefined_flag" - fi - fi - - if test "$mode" != relink; then - # Remove our outputs, but don't remove object files since they - # may have been created when compiling PIC objects. - removelist= - tempremovelist=`$echo "$output_objdir/*"` - for p in $tempremovelist; do - case $p in - *.$objext) - ;; - $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) - if test "X$precious_files_regex" != "X"; then - if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 - then - continue - fi - fi - removelist="$removelist $p" - ;; - *) ;; - esac - done - if test -n "$removelist"; then - $show "${rm}r $removelist" - $run ${rm}r $removelist - fi - fi - - # Now set the variables for building old libraries. - if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then - oldlibs="$oldlibs $output_objdir/$libname.$libext" - - # Transform .lo files to .o files. - oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` - fi - - # Eliminate all temporary directories. - for path in $notinst_path; do - lib_search_path=`$echo "$lib_search_path " | ${SED} -e 's% $path % %g'` - deplibs=`$echo "$deplibs " | ${SED} -e 's% -L$path % %g'` - dependency_libs=`$echo "$dependency_libs " | ${SED} -e 's% -L$path % %g'` - done - - if test -n "$xrpath"; then - # If the user specified any rpath flags, then add them. - temp_xrpath= - for libdir in $xrpath; do - temp_xrpath="$temp_xrpath -R$libdir" - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" ;; - esac - done - if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then - dependency_libs="$temp_xrpath $dependency_libs" - fi - fi - - # Make sure dlfiles contains only unique files that won't be dlpreopened - old_dlfiles="$dlfiles" - dlfiles= - for lib in $old_dlfiles; do - case " $dlprefiles $dlfiles " in - *" $lib "*) ;; - *) dlfiles="$dlfiles $lib" ;; - esac - done - - # Make sure dlprefiles contains only unique files - old_dlprefiles="$dlprefiles" - dlprefiles= - for lib in $old_dlprefiles; do - case "$dlprefiles " in - *" $lib "*) ;; - *) dlprefiles="$dlprefiles $lib" ;; - esac - done - - if test "$build_libtool_libs" = yes; then - if test -n "$rpath"; then - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*) - # these systems don't actually have a c library (as such)! - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C library is in the System framework - deplibs="$deplibs -framework System" - ;; - *-*-netbsd*) - # Don't link with libc until the a.out ld.so is fixed. - ;; - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc due to us having libc/libc_r. - test "X$arg" = "X-lc" && continue - ;; - *) - # Add libc to deplibs on all other systems if necessary. - if test "$build_libtool_need_lc" = "yes"; then - deplibs="$deplibs -lc" - fi - ;; - esac - fi - - # Transform deplibs into only deplibs that can be linked in shared. - name_save=$name - libname_save=$libname - release_save=$release - versuffix_save=$versuffix - major_save=$major - # I'm not sure if I'm treating the release correctly. I think - # release should show up in the -l (ie -lgmp5) so we don't want to - # add it in twice. Is that correct? - release="" - versuffix="" - major="" - newdeplibs= - droppeddeps=no - case $deplibs_check_method in - pass_all) - # Don't check for shared/static. Everything works. - # This might be a little naive. We might want to check - # whether the library exists or not. But this is on - # osf3 & osf4 and I'm not really sure... Just - # implementing what was already the behavior. - newdeplibs=$deplibs - ;; - test_compile) - # This code stresses the "libraries are programs" paradigm to its - # limits. Maybe even breaks it. We compile a program, linking it - # against the deplibs as a proxy for the library. Then we can check - # whether they linked in statically or dynamically with ldd. - $rm conftest.c - cat > conftest.c </dev/null` - for potent_lib in $potential_libs; do - # Follow soft links. - if ls -lLd "$potent_lib" 2>/dev/null \ - | grep " -> " >/dev/null; then - continue - fi - # The statement above tries to avoid entering an - # endless loop below, in case of cyclic links. - # We might still enter an endless loop, since a link - # loop can be closed while we follow links, - # but so what? - potlib="$potent_lib" - while test -h "$potlib" 2>/dev/null; do - potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` - case $potliblink in - [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; - *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; - esac - done - if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \ - | ${SED} 10q \ - | $EGREP "$file_magic_regex" > /dev/null; then - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - break 2 - fi - done - done - fi - if test -n "$a_deplib" ; then - droppeddeps=yes - $echo - $echo "*** Warning: linker path does not have real file for library $a_deplib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have" - $echo "*** because I did check the linker path looking for a file starting" - if test -z "$potlib" ; then - $echo "*** with $libname but no candidates were found. (...for file magic test)" - else - $echo "*** with $libname and none of the candidates passed a file format test" - $echo "*** using a file magic. Last file checked: $potlib" - fi - fi - else - # Add a -L argument. - newdeplibs="$newdeplibs $a_deplib" - fi - done # Gone through all deplibs. - ;; - match_pattern*) - set dummy $deplibs_check_method - match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` - for a_deplib in $deplibs; do - name=`expr $a_deplib : '-l\(.*\)'` - # If $name is empty we are operating on a -L argument. - if test -n "$name" && test "$name" != "0"; then - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $a_deplib "*) - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - ;; - esac - fi - if test -n "$a_deplib" ; then - libname=`eval \\$echo \"$libname_spec\"` - for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do - potential_libs=`ls $i/$libname[.-]* 2>/dev/null` - for potent_lib in $potential_libs; do - potlib="$potent_lib" # see symlink-check above in file_magic test - if eval $echo \"$potent_lib\" 2>/dev/null \ - | ${SED} 10q \ - | $EGREP "$match_pattern_regex" > /dev/null; then - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - break 2 - fi - done - done - fi - if test -n "$a_deplib" ; then - droppeddeps=yes - $echo - $echo "*** Warning: linker path does not have real file for library $a_deplib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have" - $echo "*** because I did check the linker path looking for a file starting" - if test -z "$potlib" ; then - $echo "*** with $libname but no candidates were found. (...for regex pattern test)" - else - $echo "*** with $libname and none of the candidates passed a file format test" - $echo "*** using a regex pattern. Last file checked: $potlib" - fi - fi - else - # Add a -L argument. - newdeplibs="$newdeplibs $a_deplib" - fi - done # Gone through all deplibs. - ;; - none | unknown | *) - newdeplibs="" - tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \ - -e 's/ -[LR][^ ]*//g'` - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - for i in $predeps $postdeps ; do - # can't use Xsed below, because $i might contain '/' - tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"` - done - fi - if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \ - | grep . >/dev/null; then - $echo - if test "X$deplibs_check_method" = "Xnone"; then - $echo "*** Warning: inter-library dependencies are not supported in this platform." - else - $echo "*** Warning: inter-library dependencies are not known to be supported." - fi - $echo "*** All declared inter-library dependencies are being dropped." - droppeddeps=yes - fi - ;; - esac - versuffix=$versuffix_save - major=$major_save - release=$release_save - libname=$libname_save - name=$name_save - - case $host in - *-*-rhapsody* | *-*-darwin1.[012]) - # On Rhapsody replace the C library is the System framework - newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'` - ;; - esac - - if test "$droppeddeps" = yes; then - if test "$module" = yes; then - $echo - $echo "*** Warning: libtool could not satisfy all declared inter-library" - $echo "*** dependencies of module $libname. Therefore, libtool will create" - $echo "*** a static module, that should work as long as the dlopening" - $echo "*** application is linked with the -dlopen flag." - if test -z "$global_symbol_pipe"; then - $echo - $echo "*** However, this would only work if libtool was able to extract symbol" - $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" - $echo "*** not find such a program. So, this module is probably useless." - $echo "*** \`nm' from GNU binutils and a full rebuild may help." - fi - if test "$build_old_libs" = no; then - oldlibs="$output_objdir/$libname.$libext" - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - else - $echo "*** The inter-library dependencies that have been dropped here will be" - $echo "*** automatically added whenever a program is linked with this library" - $echo "*** or is declared to -dlopen it." - - if test "$allow_undefined" = no; then - $echo - $echo "*** Since this library must not contain undefined symbols," - $echo "*** because either the platform does not support them or" - $echo "*** it was explicitly requested with -no-undefined," - $echo "*** libtool will only create a static version of it." - if test "$build_old_libs" = no; then - oldlibs="$output_objdir/$libname.$libext" - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - fi - fi - fi - # Done checking deplibs! - deplibs=$newdeplibs - fi - - # All the library-specific variables (install_libdir is set above). - library_names= - old_library= - dlname= - - # Test again, we may have decided not to build it any more - if test "$build_libtool_libs" = yes; then - if test "$hardcode_into_libs" = yes; then - # Hardcode the library paths - hardcode_libdirs= - dep_rpath= - rpath="$finalize_rpath" - test "$mode" != relink && rpath="$compile_rpath$rpath" - for libdir in $rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval flag=\"$hardcode_libdir_flag_spec\" - dep_rpath="$dep_rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; - *) perm_rpath="$perm_rpath $libdir" ;; - esac - fi - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - if test -n "$hardcode_libdir_flag_spec_ld"; then - eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" - else - eval dep_rpath=\"$hardcode_libdir_flag_spec\" - fi - fi - if test -n "$runpath_var" && test -n "$perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do - rpath="$rpath$dir:" - done - eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" - fi - test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" - fi - - shlibpath="$finalize_shlibpath" - test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" - if test -n "$shlibpath"; then - eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" - fi - - # Get the real and link names of the library. - eval shared_ext=\"$shrext_cmds\" - eval library_names=\"$library_names_spec\" - set dummy $library_names - realname="$2" - shift; shift - - if test -n "$soname_spec"; then - eval soname=\"$soname_spec\" - else - soname="$realname" - fi - if test -z "$dlname"; then - dlname=$soname - fi - - lib="$output_objdir/$realname" - for link - do - linknames="$linknames $link" - done - - # Use standard objects if they are pic - test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - - # Prepare the list of exported symbols - if test -z "$export_symbols"; then - if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then - $show "generating symbol list for \`$libname.la'" - export_symbols="$output_objdir/$libname.exp" - $run $rm $export_symbols - cmds=$export_symbols_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - if len=`expr "X$cmd" : ".*"` && - test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then - $show "$cmd" - $run eval "$cmd" || exit $? - skipped_export=false - else - # The command line is too long to execute in one step. - $show "using reloadable object file for export list..." - skipped_export=: - # Break out early, otherwise skipped_export may be - # set to false by a later but shorter cmd. - break - fi - done - IFS="$save_ifs" - if test -n "$export_symbols_regex"; then - $show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\"" - $run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' - $show "$mv \"${export_symbols}T\" \"$export_symbols\"" - $run eval '$mv "${export_symbols}T" "$export_symbols"' - fi - fi - fi - - if test -n "$export_symbols" && test -n "$include_expsyms"; then - $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"' - fi - - tmp_deplibs= - for test_deplib in $deplibs; do - case " $convenience " in - *" $test_deplib "*) ;; - *) - tmp_deplibs="$tmp_deplibs $test_deplib" - ;; - esac - done - deplibs="$tmp_deplibs" - - if test -n "$convenience"; then - if test -n "$whole_archive_flag_spec"; then - save_libobjs=$libobjs - eval libobjs=\"\$libobjs $whole_archive_flag_spec\" - else - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - func_extract_archives $gentop $convenience - libobjs="$libobjs $func_extract_archives_result" - fi - fi - - if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then - eval flag=\"$thread_safe_flag_spec\" - linker_flags="$linker_flags $flag" - fi - - # Make a backup of the uninstalled library when relinking - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $? - fi - - # Do each of the archive commands. - if test "$module" = yes && test -n "$module_cmds" ; then - if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then - eval test_cmds=\"$module_expsym_cmds\" - cmds=$module_expsym_cmds - else - eval test_cmds=\"$module_cmds\" - cmds=$module_cmds - fi - else - if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - eval test_cmds=\"$archive_expsym_cmds\" - cmds=$archive_expsym_cmds - else - eval test_cmds=\"$archive_cmds\" - cmds=$archive_cmds - fi - fi - - if test "X$skipped_export" != "X:" && - len=`expr "X$test_cmds" : ".*" 2>/dev/null` && - test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then - : - else - # The command line is too long to link in one step, link piecewise. - $echo "creating reloadable object files..." - - # Save the value of $output and $libobjs because we want to - # use them later. If we have whole_archive_flag_spec, we - # want to use save_libobjs as it was before - # whole_archive_flag_spec was expanded, because we can't - # assume the linker understands whole_archive_flag_spec. - # This may have to be revisited, in case too many - # convenience libraries get linked in and end up exceeding - # the spec. - if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then - save_libobjs=$libobjs - fi - save_output=$output - output_la=`$echo "X$output" | $Xsed -e "$basename"` - - # Clear the reloadable object creation command queue and - # initialize k to one. - test_cmds= - concat_cmds= - objlist= - delfiles= - last_robj= - k=1 - output=$output_objdir/$output_la-${k}.$objext - # Loop over the list of objects to be linked. - for obj in $save_libobjs - do - eval test_cmds=\"$reload_cmds $objlist $last_robj\" - if test "X$objlist" = X || - { len=`expr "X$test_cmds" : ".*" 2>/dev/null` && - test "$len" -le "$max_cmd_len"; }; then - objlist="$objlist $obj" - else - # The command $test_cmds is almost too long, add a - # command to the queue. - if test "$k" -eq 1 ; then - # The first file doesn't have a previous command to add. - eval concat_cmds=\"$reload_cmds $objlist $last_robj\" - else - # All subsequent reloadable object files will link in - # the last one created. - eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\" - fi - last_robj=$output_objdir/$output_la-${k}.$objext - k=`expr $k + 1` - output=$output_objdir/$output_la-${k}.$objext - objlist=$obj - len=1 - fi - done - # Handle the remaining objects by creating one last - # reloadable object file. All subsequent reloadable object - # files will link in the last one created. - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" - - if ${skipped_export-false}; then - $show "generating symbol list for \`$libname.la'" - export_symbols="$output_objdir/$libname.exp" - $run $rm $export_symbols - libobjs=$output - # Append the command to create the export file. - eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\" - fi - - # Set up a command to remove the reloadable object files - # after they are used. - i=0 - while test "$i" -lt "$k" - do - i=`expr $i + 1` - delfiles="$delfiles $output_objdir/$output_la-${i}.$objext" - done - - $echo "creating a temporary reloadable object file: $output" - - # Loop through the commands generated above and execute them. - save_ifs="$IFS"; IFS='~' - for cmd in $concat_cmds; do - IFS="$save_ifs" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - - libobjs=$output - # Restore the value of output. - output=$save_output - - if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then - eval libobjs=\"\$libobjs $whole_archive_flag_spec\" - fi - # Expand the library linking commands again to reset the - # value of $libobjs for piecewise linking. - - # Do each of the archive commands. - if test "$module" = yes && test -n "$module_cmds" ; then - if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then - cmds=$module_expsym_cmds - else - cmds=$module_cmds - fi - else - if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - cmds=$archive_expsym_cmds - else - cmds=$archive_cmds - fi - fi - - # Append the command to remove the reloadable object files - # to the just-reset $cmds. - eval cmds=\"\$cmds~\$rm $delfiles\" - fi - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || { - lt_exit=$? - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' - fi - - exit $lt_exit - } - done - IFS="$save_ifs" - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $? - - if test -n "$convenience"; then - if test -z "$whole_archive_flag_spec"; then - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - fi - fi - - exit $EXIT_SUCCESS - fi - - # Create links to the real library. - for linkname in $linknames; do - if test "$realname" != "$linkname"; then - $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)" - $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $? - fi - done - - # If -module or -export-dynamic was specified, set the dlname. - if test "$module" = yes || test "$export_dynamic" = yes; then - # On all known operating systems, these are identical. - dlname="$soname" - fi - fi - ;; - - obj) - if test -n "$deplibs"; then - $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 - fi - - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2 - fi - - if test -n "$rpath"; then - $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2 - fi - - if test -n "$xrpath"; then - $echo "$modename: warning: \`-R' is ignored for objects" 1>&2 - fi - - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2 - fi - - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for objects" 1>&2 - fi - - case $output in - *.lo) - if test -n "$objs$old_deplibs"; then - $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2 - exit $EXIT_FAILURE - fi - libobj="$output" - obj=`$echo "X$output" | $Xsed -e "$lo2o"` - ;; - *) - libobj= - obj="$output" - ;; - esac - - # Delete the old objects. - $run $rm $obj $libobj - - # Objects from convenience libraries. This assumes - # single-version convenience libraries. Whenever we create - # different ones for PIC/non-PIC, this we'll have to duplicate - # the extraction. - reload_conv_objs= - gentop= - # reload_cmds runs $LD directly, so let us get rid of - # -Wl from whole_archive_flag_spec - wl= - - if test -n "$convenience"; then - if test -n "$whole_archive_flag_spec"; then - eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\" - else - gentop="$output_objdir/${obj}x" - generated="$generated $gentop" - - func_extract_archives $gentop $convenience - reload_conv_objs="$reload_objs $func_extract_archives_result" - fi - fi - - # Create the old-style object. - reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test - - output="$obj" - cmds=$reload_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - - # Exit if we aren't doing a library object file. - if test -z "$libobj"; then - if test -n "$gentop"; then - $show "${rm}r $gentop" - $run ${rm}r $gentop - fi - - exit $EXIT_SUCCESS - fi - - if test "$build_libtool_libs" != yes; then - if test -n "$gentop"; then - $show "${rm}r $gentop" - $run ${rm}r $gentop - fi - - # Create an invalid libtool object if no PIC, so that we don't - # accidentally link it into a program. - # $show "echo timestamp > $libobj" - # $run eval "echo timestamp > $libobj" || exit $? - exit $EXIT_SUCCESS - fi - - if test -n "$pic_flag" || test "$pic_mode" != default; then - # Only do commands if we really have different PIC objects. - reload_objs="$libobjs $reload_conv_objs" - output="$libobj" - cmds=$reload_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - fi - - if test -n "$gentop"; then - $show "${rm}r $gentop" - $run ${rm}r $gentop - fi - - exit $EXIT_SUCCESS - ;; - - prog) - case $host in - *cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;; - esac - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2 - fi - - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for programs" 1>&2 - fi - - if test "$preload" = yes; then - if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown && - test "$dlopen_self_static" = unknown; then - $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support." - fi - fi - - case $host in - *-*-rhapsody* | *-*-darwin1.[012]) - # On Rhapsody replace the C library is the System framework - compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'` - finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'` - ;; - esac - - case $host in - *darwin*) - # Don't allow lazy linking, it breaks C++ global constructors - if test "$tagname" = CXX ; then - compile_command="$compile_command ${wl}-bind_at_load" - finalize_command="$finalize_command ${wl}-bind_at_load" - fi - ;; - esac - - compile_command="$compile_command $compile_deplibs" - finalize_command="$finalize_command $finalize_deplibs" - - if test -n "$rpath$xrpath"; then - # If the user specified any rpath flags, then add them. - for libdir in $rpath $xrpath; do - # This is the magic to use -rpath. - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" ;; - esac - done - fi - - # Now hardcode the library paths - rpath= - hardcode_libdirs= - for libdir in $compile_rpath $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval flag=\"$hardcode_libdir_flag_spec\" - rpath="$rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; - *) perm_rpath="$perm_rpath $libdir" ;; - esac - fi - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) - case :$dllsearchpath: in - *":$libdir:"*) ;; - *) dllsearchpath="$dllsearchpath:$libdir";; - esac - ;; - esac - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - eval rpath=\" $hardcode_libdir_flag_spec\" - fi - compile_rpath="$rpath" - - rpath= - hardcode_libdirs= - for libdir in $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval flag=\"$hardcode_libdir_flag_spec\" - rpath="$rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$finalize_perm_rpath " in - *" $libdir "*) ;; - *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; - esac - fi - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - eval rpath=\" $hardcode_libdir_flag_spec\" - fi - finalize_rpath="$rpath" - - if test -n "$libobjs" && test "$build_old_libs" = yes; then - # Transform all the library objects into standard objects. - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - fi - - dlsyms= - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - if test -n "$NM" && test -n "$global_symbol_pipe"; then - dlsyms="${outputname}S.c" - else - $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2 - fi - fi - - if test -n "$dlsyms"; then - case $dlsyms in - "") ;; - *.c) - # Discover the nlist of each of the dlfiles. - nlist="$output_objdir/${outputname}.nm" - - $show "$rm $nlist ${nlist}S ${nlist}T" - $run $rm "$nlist" "${nlist}S" "${nlist}T" - - # Parse the name list into a source file. - $show "creating $output_objdir/$dlsyms" - - test -z "$run" && $echo > "$output_objdir/$dlsyms" "\ -/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */ -/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */ - -#ifdef __cplusplus -extern \"C\" { -#endif - -/* Prevent the only kind of declaration conflicts we can make. */ -#define lt_preloaded_symbols some_other_symbol - -/* External symbol declarations for the compiler. */\ -" - - if test "$dlself" = yes; then - $show "generating symbol list for \`$output'" - - test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist" - - # Add our own program objects to the symbol list. - progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - for arg in $progfiles; do - $show "extracting global C symbols from \`$arg'" - $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" - done - - if test -n "$exclude_expsyms"; then - $run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' - $run eval '$mv "$nlist"T "$nlist"' - fi - - if test -n "$export_symbols_regex"; then - $run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' - $run eval '$mv "$nlist"T "$nlist"' - fi - - # Prepare the list of exported symbols - if test -z "$export_symbols"; then - export_symbols="$output_objdir/$outputname.exp" - $run $rm $export_symbols - $run eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' - else - $run eval "${SED} -e 's/\([ ][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' - $run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' - $run eval 'mv "$nlist"T "$nlist"' - fi - fi - - for arg in $dlprefiles; do - $show "extracting global C symbols from \`$arg'" - name=`$echo "$arg" | ${SED} -e 's%^.*/%%'` - $run eval '$echo ": $name " >> "$nlist"' - $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" - done - - if test -z "$run"; then - # Make sure we have at least an empty file. - test -f "$nlist" || : > "$nlist" - - if test -n "$exclude_expsyms"; then - $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T - $mv "$nlist"T "$nlist" - fi - - # Try sorting and uniquifying the output. - if grep -v "^: " < "$nlist" | - if sort -k 3 /dev/null 2>&1; then - sort -k 3 - else - sort +2 - fi | - uniq > "$nlist"S; then - : - else - grep -v "^: " < "$nlist" > "$nlist"S - fi - - if test -f "$nlist"S; then - eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"' - else - $echo '/* NONE */' >> "$output_objdir/$dlsyms" - fi - - $echo >> "$output_objdir/$dlsyms" "\ - -#undef lt_preloaded_symbols - -#if defined (__STDC__) && __STDC__ -# define lt_ptr void * -#else -# define lt_ptr char * -# define const -#endif - -/* The mapping between symbol names and symbols. */ -" - - case $host in - *cygwin* | *mingw* ) - $echo >> "$output_objdir/$dlsyms" "\ -/* DATA imports from DLLs on WIN32 can't be const, because - runtime relocations are performed -- see ld's documentation - on pseudo-relocs */ -struct { -" - ;; - * ) - $echo >> "$output_objdir/$dlsyms" "\ -const struct { -" - ;; - esac - - - $echo >> "$output_objdir/$dlsyms" "\ - const char *name; - lt_ptr address; -} -lt_preloaded_symbols[] = -{\ -" - - eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms" - - $echo >> "$output_objdir/$dlsyms" "\ - {0, (lt_ptr) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif\ -" - fi - - pic_flag_for_symtable= - case $host in - # compiling the symbol table file with pic_flag works around - # a FreeBSD bug that causes programs to crash when -lm is - # linked before any other PIC object. But we must not use - # pic_flag when linking with -static. The problem exists in - # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. - *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) - case "$compile_command " in - *" -static "*) ;; - *) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";; - esac;; - *-*-hpux*) - case "$compile_command " in - *" -static "*) ;; - *) pic_flag_for_symtable=" $pic_flag";; - esac - esac - - # Now compile the dynamic symbol file. - $show "(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")" - $run eval '(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $? - - # Clean up the generated files. - $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T" - $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T" - - # Transform the symbol file into the correct name. - compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` - finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` - ;; - *) - $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2 - exit $EXIT_FAILURE - ;; - esac - else - # We keep going just in case the user didn't refer to - # lt_preloaded_symbols. The linker will fail if global_symbol_pipe - # really was required. - - # Nullify the symbol file. - compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` - finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` - fi - - if test "$need_relink" = no || test "$build_libtool_libs" != yes; then - # Replace the output file specification. - compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` - link_command="$compile_command$compile_rpath" - - # We have no uninstalled library dependencies, so finalize right now. - $show "$link_command" - $run eval "$link_command" - status=$? - - # Delete the generated files. - if test -n "$dlsyms"; then - $show "$rm $output_objdir/${outputname}S.${objext}" - $run $rm "$output_objdir/${outputname}S.${objext}" - fi - - exit $status - fi - - if test -n "$shlibpath_var"; then - # We should set the shlibpath_var - rpath= - for dir in $temp_rpath; do - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) - # Absolute path. - rpath="$rpath$dir:" - ;; - *) - # Relative path: add a thisdir entry. - rpath="$rpath\$thisdir/$dir:" - ;; - esac - done - temp_rpath="$rpath" - fi - - if test -n "$compile_shlibpath$finalize_shlibpath"; then - compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" - fi - if test -n "$finalize_shlibpath"; then - finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" - fi - - compile_var= - finalize_var= - if test -n "$runpath_var"; then - if test -n "$perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do - rpath="$rpath$dir:" - done - compile_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi - if test -n "$finalize_perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $finalize_perm_rpath; do - rpath="$rpath$dir:" - done - finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi - fi - - if test "$no_install" = yes; then - # We don't need to create a wrapper script. - link_command="$compile_var$compile_command$compile_rpath" - # Replace the output file specification. - link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` - # Delete the old output file. - $run $rm $output - # Link the executable and exit - $show "$link_command" - $run eval "$link_command" || exit $? - exit $EXIT_SUCCESS - fi - - if test "$hardcode_action" = relink; then - # Fast installation is not supported - link_command="$compile_var$compile_command$compile_rpath" - relink_command="$finalize_var$finalize_command$finalize_rpath" - - $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2 - $echo "$modename: \`$output' will be relinked during installation" 1>&2 - else - if test "$fast_install" != no; then - link_command="$finalize_var$compile_command$finalize_rpath" - if test "$fast_install" = yes; then - relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` - else - # fast_install is set to needless - relink_command= - fi - else - link_command="$compile_var$compile_command$compile_rpath" - relink_command="$finalize_var$finalize_command$finalize_rpath" - fi - fi - - # Replace the output file specification. - link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` - - # Delete the old output files. - $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname - - $show "$link_command" - $run eval "$link_command" || exit $? - - # Now create the wrapper script. - $show "creating $output" - - # Quote the relink command for shipping. - if test -n "$relink_command"; then - # Preserve any variables that may affect compiler behavior - for var in $variables_saved_for_relink; do - if eval test -z \"\${$var+set}\"; then - relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" - elif eval var_value=\$$var; test -z "$var_value"; then - relink_command="$var=; export $var; $relink_command" - else - var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` - relink_command="$var=\"$var_value\"; export $var; $relink_command" - fi - done - relink_command="(cd `pwd`; $relink_command)" - relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` - fi - - # Quote $echo for shipping. - if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then - case $progpath in - [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; - *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; - esac - qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"` - else - qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"` - fi - - # Only actually do things if our run command is non-null. - if test -z "$run"; then - # win32 will think the script is a binary if it has - # a .exe suffix, so we strip it off here. - case $output in - *.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;; - esac - # test for cygwin because mv fails w/o .exe extensions - case $host in - *cygwin*) - exeext=.exe - outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;; - *) exeext= ;; - esac - case $host in - *cygwin* | *mingw* ) - cwrappersource=`$echo ${objdir}/lt-${outputname}.c` - cwrapper=`$echo ${output}.exe` - $rm $cwrappersource $cwrapper - trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 - - cat > $cwrappersource <> $cwrappersource<<"EOF" -#include -#include -#include -#include -#include -#include - -#if defined(PATH_MAX) -# define LT_PATHMAX PATH_MAX -#elif defined(MAXPATHLEN) -# define LT_PATHMAX MAXPATHLEN -#else -# define LT_PATHMAX 1024 -#endif - -#ifndef DIR_SEPARATOR -#define DIR_SEPARATOR '/' -#endif - -#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ - defined (__OS2__) -#define HAVE_DOS_BASED_FILE_SYSTEM -#ifndef DIR_SEPARATOR_2 -#define DIR_SEPARATOR_2 '\\' -#endif -#endif - -#ifndef DIR_SEPARATOR_2 -# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) -#else /* DIR_SEPARATOR_2 */ -# define IS_DIR_SEPARATOR(ch) \ - (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) -#endif /* DIR_SEPARATOR_2 */ - -#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) -#define XFREE(stale) do { \ - if (stale) { free ((void *) stale); stale = 0; } \ -} while (0) - -const char *program_name = NULL; - -void * xmalloc (size_t num); -char * xstrdup (const char *string); -char * basename (const char *name); -char * fnqualify(const char *path); -char * strendzap(char *str, const char *pat); -void lt_fatal (const char *message, ...); - -int -main (int argc, char *argv[]) -{ - char **newargz; - int i; - - program_name = (char *) xstrdup ((char *) basename (argv[0])); - newargz = XMALLOC(char *, argc+2); -EOF - - cat >> $cwrappersource <> $cwrappersource <<"EOF" - newargz[1] = fnqualify(argv[0]); - /* we know the script has the same name, without the .exe */ - /* so make sure newargz[1] doesn't end in .exe */ - strendzap(newargz[1],".exe"); - for (i = 1; i < argc; i++) - newargz[i+1] = xstrdup(argv[i]); - newargz[argc+1] = NULL; -EOF - - cat >> $cwrappersource <> $cwrappersource <<"EOF" - return 127; -} - -void * -xmalloc (size_t num) -{ - void * p = (void *) malloc (num); - if (!p) - lt_fatal ("Memory exhausted"); - - return p; -} - -char * -xstrdup (const char *string) -{ - return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL -; -} - -char * -basename (const char *name) -{ - const char *base; - -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - /* Skip over the disk name in MSDOS pathnames. */ - if (isalpha (name[0]) && name[1] == ':') - name += 2; -#endif - - for (base = name; *name; name++) - if (IS_DIR_SEPARATOR (*name)) - base = name + 1; - return (char *) base; -} - -char * -fnqualify(const char *path) -{ - size_t size; - char *p; - char tmp[LT_PATHMAX + 1]; - - assert(path != NULL); - - /* Is it qualified already? */ -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - if (isalpha (path[0]) && path[1] == ':') - return xstrdup (path); -#endif - if (IS_DIR_SEPARATOR (path[0])) - return xstrdup (path); - - /* prepend the current directory */ - /* doesn't handle '~' */ - if (getcwd (tmp, LT_PATHMAX) == NULL) - lt_fatal ("getcwd failed"); - size = strlen(tmp) + 1 + strlen(path) + 1; /* +2 for '/' and '\0' */ - p = XMALLOC(char, size); - sprintf(p, "%s%c%s", tmp, DIR_SEPARATOR, path); - return p; -} - -char * -strendzap(char *str, const char *pat) -{ - size_t len, patlen; - - assert(str != NULL); - assert(pat != NULL); - - len = strlen(str); - patlen = strlen(pat); - - if (patlen <= len) - { - str += len - patlen; - if (strcmp(str, pat) == 0) - *str = '\0'; - } - return str; -} - -static void -lt_error_core (int exit_status, const char * mode, - const char * message, va_list ap) -{ - fprintf (stderr, "%s: %s: ", program_name, mode); - vfprintf (stderr, message, ap); - fprintf (stderr, ".\n"); - - if (exit_status >= 0) - exit (exit_status); -} - -void -lt_fatal (const char *message, ...) -{ - va_list ap; - va_start (ap, message); - lt_error_core (EXIT_FAILURE, "FATAL", message, ap); - va_end (ap); -} -EOF - # we should really use a build-platform specific compiler - # here, but OTOH, the wrappers (shell script and this C one) - # are only useful if you want to execute the "real" binary. - # Since the "real" binary is built for $host, then this - # wrapper might as well be built for $host, too. - $run $LTCC -s -o $cwrapper $cwrappersource - ;; - esac - $rm $output - trap "$rm $output; exit $EXIT_FAILURE" 1 2 15 - - $echo > $output "\ -#! $SHELL - -# $output - temporary wrapper script for $objdir/$outputname -# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP -# -# The $output program cannot be directly executed until all the libtool -# libraries that it depends on are installed. -# -# This wrapper script should never be moved out of the build directory. -# If it is, it will not operate correctly. - -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -Xsed='${SED} -e 1s/^X//' -sed_quote_subst='$sed_quote_subst' - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -relink_command=\"$relink_command\" - -# This environment variable determines our operation mode. -if test \"\$libtool_install_magic\" = \"$magic\"; then - # install mode needs the following variable: - notinst_deplibs='$notinst_deplibs' -else - # When we are sourced in execute mode, \$file and \$echo are already set. - if test \"\$libtool_execute_magic\" != \"$magic\"; then - echo=\"$qecho\" - file=\"\$0\" - # Make sure echo works. - if test \"X\$1\" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift - elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then - # Yippee, \$echo works! - : - else - # Restart under the correct shell, and then maybe \$echo will work. - exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} - fi - fi\ -" - $echo >> $output "\ - - # Find the directory that this script lives in. - thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` - test \"x\$thisdir\" = \"x\$file\" && thisdir=. - - # Follow symbolic links until we get to the real thisdir. - file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` - while test -n \"\$file\"; do - destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` - - # If there was a directory component, then change thisdir. - if test \"x\$destdir\" != \"x\$file\"; then - case \"\$destdir\" in - [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; - *) thisdir=\"\$thisdir/\$destdir\" ;; - esac - fi - - file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\` - file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` - done - - # Try to get the absolute directory name. - absdir=\`cd \"\$thisdir\" && pwd\` - test -n \"\$absdir\" && thisdir=\"\$absdir\" -" - - if test "$fast_install" = yes; then - $echo >> $output "\ - program=lt-'$outputname'$exeext - progdir=\"\$thisdir/$objdir\" - - if test ! -f \"\$progdir/\$program\" || \\ - { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ - test \"X\$file\" != \"X\$progdir/\$program\"; }; then - - file=\"\$\$-\$program\" - - if test ! -d \"\$progdir\"; then - $mkdir \"\$progdir\" - else - $rm \"\$progdir/\$file\" - fi" - - $echo >> $output "\ - - # relink executable if necessary - if test -n \"\$relink_command\"; then - if relink_command_output=\`eval \$relink_command 2>&1\`; then : - else - $echo \"\$relink_command_output\" >&2 - $rm \"\$progdir/\$file\" - exit $EXIT_FAILURE - fi - fi - - $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || - { $rm \"\$progdir/\$program\"; - $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; } - $rm \"\$progdir/\$file\" - fi" - else - $echo >> $output "\ - program='$outputname' - progdir=\"\$thisdir/$objdir\" -" - fi - - $echo >> $output "\ - - if test -f \"\$progdir/\$program\"; then" - - # Export our shlibpath_var if we have one. - if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then - $echo >> $output "\ - # Add our own library path to $shlibpath_var - $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" - - # Some systems cannot cope with colon-terminated $shlibpath_var - # The second colon is a workaround for a bug in BeOS R4 sed - $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` - - export $shlibpath_var -" - fi - - # fixup the dll searchpath if we need to. - if test -n "$dllsearchpath"; then - $echo >> $output "\ - # Add the dll search path components to the executable PATH - PATH=$dllsearchpath:\$PATH -" - fi - - $echo >> $output "\ - if test \"\$libtool_execute_magic\" != \"$magic\"; then - # Run the actual program with our arguments. -" - case $host in - # Backslashes separate directories on plain windows - *-*-mingw | *-*-os2*) - $echo >> $output "\ - exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} -" - ;; - - *) - $echo >> $output "\ - exec \"\$progdir/\$program\" \${1+\"\$@\"} -" - ;; - esac - $echo >> $output "\ - \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\" - exit $EXIT_FAILURE - fi - else - # The program doesn't exist. - \$echo \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 - \$echo \"This script is just a wrapper for \$program.\" 1>&2 - $echo \"See the $PACKAGE documentation for more information.\" 1>&2 - exit $EXIT_FAILURE - fi -fi\ -" - chmod +x $output - fi - exit $EXIT_SUCCESS - ;; - esac - - # See if we need to build an old-fashioned archive. - for oldlib in $oldlibs; do - - if test "$build_libtool_libs" = convenience; then - oldobjs="$libobjs_save" - addlibs="$convenience" - build_libtool_libs=no - else - if test "$build_libtool_libs" = module; then - oldobjs="$libobjs_save" - build_libtool_libs=no - else - oldobjs="$old_deplibs $non_pic_objects" - fi - addlibs="$old_convenience" - fi - - if test -n "$addlibs"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - func_extract_archives $gentop $addlibs - oldobjs="$oldobjs $func_extract_archives_result" - fi - - # Do each command in the archive commands. - if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then - cmds=$old_archive_from_new_cmds - else - # POSIX demands no paths to be encoded in archives. We have - # to avoid creating archives with duplicate basenames if we - # might have to extract them afterwards, e.g., when creating a - # static archive out of a convenience library, or when linking - # the entirety of a libtool archive into another (currently - # not supported by libtool). - if (for obj in $oldobjs - do - $echo "X$obj" | $Xsed -e 's%^.*/%%' - done | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "copying selected object files to avoid basename conflicts..." - - if test -z "$gentop"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - $show "$mkdir $gentop" - $run $mkdir "$gentop" - status=$? - if test "$status" -ne 0 && test ! -d "$gentop"; then - exit $status - fi - fi - - save_oldobjs=$oldobjs - oldobjs= - counter=1 - for obj in $save_oldobjs - do - objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` - case " $oldobjs " in - " ") oldobjs=$obj ;; - *[\ /]"$objbase "*) - while :; do - # Make sure we don't pick an alternate name that also - # overlaps. - newobj=lt$counter-$objbase - counter=`expr $counter + 1` - case " $oldobjs " in - *[\ /]"$newobj "*) ;; - *) if test ! -f "$gentop/$newobj"; then break; fi ;; - esac - done - $show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" - $run ln "$obj" "$gentop/$newobj" || - $run cp "$obj" "$gentop/$newobj" - oldobjs="$oldobjs $gentop/$newobj" - ;; - *) oldobjs="$oldobjs $obj" ;; - esac - done - fi - - eval cmds=\"$old_archive_cmds\" - - if len=`expr "X$cmds" : ".*"` && - test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then - cmds=$old_archive_cmds - else - # the command line is too long to link in one step, link in parts - $echo "using piecewise archive linking..." - save_RANLIB=$RANLIB - RANLIB=: - objlist= - concat_cmds= - save_oldobjs=$oldobjs - - # Is there a better way of finding the last object in the list? - for obj in $save_oldobjs - do - last_oldobj=$obj - done - for obj in $save_oldobjs - do - oldobjs="$objlist $obj" - objlist="$objlist $obj" - eval test_cmds=\"$old_archive_cmds\" - if len=`expr "X$test_cmds" : ".*" 2>/dev/null` && - test "$len" -le "$max_cmd_len"; then - : - else - # the above command should be used before it gets too long - oldobjs=$objlist - if test "$obj" = "$last_oldobj" ; then - RANLIB=$save_RANLIB - fi - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" - objlist= - fi - done - RANLIB=$save_RANLIB - oldobjs=$objlist - if test "X$oldobjs" = "X" ; then - eval cmds=\"\$concat_cmds\" - else - eval cmds=\"\$concat_cmds~\$old_archive_cmds\" - fi - fi - fi - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - eval cmd=\"$cmd\" - IFS="$save_ifs" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - done - - if test -n "$generated"; then - $show "${rm}r$generated" - $run ${rm}r$generated - fi - - # Now create the libtool archive. - case $output in - *.la) - old_library= - test "$build_old_libs" = yes && old_library="$libname.$libext" - $show "creating $output" - - # Preserve any variables that may affect compiler behavior - for var in $variables_saved_for_relink; do - if eval test -z \"\${$var+set}\"; then - relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" - elif eval var_value=\$$var; test -z "$var_value"; then - relink_command="$var=; export $var; $relink_command" - else - var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` - relink_command="$var=\"$var_value\"; export $var; $relink_command" - fi - done - # Quote the link command for shipping. - relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" - relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` - if test "$hardcode_automatic" = yes ; then - relink_command= - fi - - - # Only create the output if not a dry run. - if test -z "$run"; then - for installed in no yes; do - if test "$installed" = yes; then - if test -z "$install_libdir"; then - break - fi - output="$output_objdir/$outputname"i - # Replace all uninstalled libtool libraries with the installed ones - newdependency_libs= - for deplib in $dependency_libs; do - case $deplib in - *.la) - name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'` - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - if test -z "$libdir"; then - $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - newdependency_libs="$newdependency_libs $libdir/$name" - ;; - *) newdependency_libs="$newdependency_libs $deplib" ;; - esac - done - dependency_libs="$newdependency_libs" - newdlfiles= - for lib in $dlfiles; do - name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` - if test -z "$libdir"; then - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - newdlfiles="$newdlfiles $libdir/$name" - done - dlfiles="$newdlfiles" - newdlprefiles= - for lib in $dlprefiles; do - name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` - if test -z "$libdir"; then - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - newdlprefiles="$newdlprefiles $libdir/$name" - done - dlprefiles="$newdlprefiles" - else - newdlfiles= - for lib in $dlfiles; do - case $lib in - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac - newdlfiles="$newdlfiles $abs" - done - dlfiles="$newdlfiles" - newdlprefiles= - for lib in $dlprefiles; do - case $lib in - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac - newdlprefiles="$newdlprefiles $abs" - done - dlprefiles="$newdlprefiles" - fi - $rm $output - # place dlname in correct position for cygwin - tdlname=$dlname - case $host,$output,$installed,$module,$dlname in - *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; - esac - $echo > $output "\ -# $outputname - a libtool library file -# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP -# -# Please DO NOT delete this file! -# It is necessary for linking the library. - -# The name that we can dlopen(3). -dlname='$tdlname' - -# Names of this library. -library_names='$library_names' - -# The name of the static archive. -old_library='$old_library' - -# Libraries that this one depends upon. -dependency_libs='$dependency_libs' - -# Version information for $libname. -current=$current -age=$age -revision=$revision - -# Is this an already installed library? -installed=$installed - -# Should we warn about portability when linking against -modules? -shouldnotlink=$module - -# Files to dlopen/dlpreopen -dlopen='$dlfiles' -dlpreopen='$dlprefiles' - -# Directory that this library needs to be installed in: -libdir='$install_libdir'" - if test "$installed" = no && test "$need_relink" = yes; then - $echo >> $output "\ -relink_command=\"$relink_command\"" - fi - done - fi - - # Do a symbolic link so that the libtool archive can be found in - # LD_LIBRARY_PATH before the program is installed. - $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" - $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $? - ;; - esac - exit $EXIT_SUCCESS - ;; - - # libtool install mode - install) - modename="$modename: install" - - # There may be an optional sh(1) argument at the beginning of - # install_prog (especially on Windows NT). - if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || - # Allow the use of GNU shtool's install command. - $echo "X$nonopt" | grep shtool > /dev/null; then - # Aesthetically quote it. - arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - install_prog="$arg " - arg="$1" - shift - else - install_prog= - arg=$nonopt - fi - - # The real first argument should be the name of the installation program. - # Aesthetically quote it. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - install_prog="$install_prog$arg" - - # We need to accept at least all the BSD install flags. - dest= - files= - opts= - prev= - install_type= - isdir=no - stripme= - for arg - do - if test -n "$dest"; then - files="$files $dest" - dest=$arg - continue - fi - - case $arg in - -d) isdir=yes ;; - -f) - case " $install_prog " in - *[\\\ /]cp\ *) ;; - *) prev=$arg ;; - esac - ;; - -g | -m | -o) prev=$arg ;; - -s) - stripme=" -s" - continue - ;; - -*) - ;; - *) - # If the previous option needed an argument, then skip it. - if test -n "$prev"; then - prev= - else - dest=$arg - continue - fi - ;; - esac - - # Aesthetically quote the argument. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - install_prog="$install_prog $arg" - done - - if test -z "$install_prog"; then - $echo "$modename: you must specify an install program" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - if test -n "$prev"; then - $echo "$modename: the \`$prev' option requires an argument" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - if test -z "$files"; then - if test -z "$dest"; then - $echo "$modename: no file or destination specified" 1>&2 - else - $echo "$modename: you must specify a destination" 1>&2 - fi - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - # Strip any trailing slash from the destination. - dest=`$echo "X$dest" | $Xsed -e 's%/$%%'` - - # Check to see that the destination is a directory. - test -d "$dest" && isdir=yes - if test "$isdir" = yes; then - destdir="$dest" - destname= - else - destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'` - test "X$destdir" = "X$dest" && destdir=. - destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'` - - # Not a directory, so check to see that there is only one file specified. - set dummy $files - if test "$#" -gt 2; then - $echo "$modename: \`$dest' is not a directory" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - fi - case $destdir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - for file in $files; do - case $file in - *.lo) ;; - *) - $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - esac - done - ;; - esac - - # This variable tells wrapper scripts just to set variables rather - # than running their programs. - libtool_install_magic="$magic" - - staticlibs= - future_libdirs= - current_libdirs= - for file in $files; do - - # Do each installation. - case $file in - *.$libext) - # Do the static libraries later. - staticlibs="$staticlibs $file" - ;; - - *.la) - # Check to see that this really is a libtool archive. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : - else - $echo "$modename: \`$file' is not a valid libtool archive" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - library_names= - old_library= - relink_command= - # If there is no directory component, then add one. - case $file in - */* | *\\*) . $file ;; - *) . ./$file ;; - esac - - # Add the libdir to current_libdirs if it is the destination. - if test "X$destdir" = "X$libdir"; then - case "$current_libdirs " in - *" $libdir "*) ;; - *) current_libdirs="$current_libdirs $libdir" ;; - esac - else - # Note the libdir as a future libdir. - case "$future_libdirs " in - *" $libdir "*) ;; - *) future_libdirs="$future_libdirs $libdir" ;; - esac - fi - - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/ - test "X$dir" = "X$file/" && dir= - dir="$dir$objdir" - - if test -n "$relink_command"; then - # Determine the prefix the user has applied to our future dir. - inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"` - - # Don't allow the user to place us outside of our expected - # location b/c this prevents finding dependent libraries that - # are installed to the same prefix. - # At present, this check doesn't affect windows .dll's that - # are installed into $libdir/../bin (currently, that works fine) - # but it's something to keep an eye on. - if test "$inst_prefix_dir" = "$destdir"; then - $echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2 - exit $EXIT_FAILURE - fi - - if test -n "$inst_prefix_dir"; then - # Stick the inst_prefix_dir data into the link command. - relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` - else - relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"` - fi - - $echo "$modename: warning: relinking \`$file'" 1>&2 - $show "$relink_command" - if $run eval "$relink_command"; then : - else - $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 - exit $EXIT_FAILURE - fi - fi - - # See the names of the shared library. - set dummy $library_names - if test -n "$2"; then - realname="$2" - shift - shift - - srcname="$realname" - test -n "$relink_command" && srcname="$realname"T - - # Install the shared library and build the symlinks. - $show "$install_prog $dir/$srcname $destdir/$realname" - $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $? - if test -n "$stripme" && test -n "$striplib"; then - $show "$striplib $destdir/$realname" - $run eval "$striplib $destdir/$realname" || exit $? - fi - - if test "$#" -gt 0; then - # Delete the old symlinks, and create new ones. - # Try `ln -sf' first, because the `ln' binary might depend on - # the symlink we replace! Solaris /bin/ln does not understand -f, - # so we also need to try rm && ln -s. - for linkname - do - if test "$linkname" != "$realname"; then - $show "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" - $run eval "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" - fi - done - fi - - # Do each command in the postinstall commands. - lib="$destdir/$realname" - cmds=$postinstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || { - lt_exit=$? - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' - fi - - exit $lt_exit - } - done - IFS="$save_ifs" - fi - - # Install the pseudo-library for information purposes. - name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - instname="$dir/$name"i - $show "$install_prog $instname $destdir/$name" - $run eval "$install_prog $instname $destdir/$name" || exit $? - - # Maybe install the static library, too. - test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" - ;; - - *.lo) - # Install (i.e. copy) a libtool object. - - # Figure out destination file name, if it wasn't already specified. - if test -n "$destname"; then - destfile="$destdir/$destname" - else - destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - destfile="$destdir/$destfile" - fi - - # Deduce the name of the destination old-style object file. - case $destfile in - *.lo) - staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"` - ;; - *.$objext) - staticdest="$destfile" - destfile= - ;; - *) - $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - # Install the libtool object if requested. - if test -n "$destfile"; then - $show "$install_prog $file $destfile" - $run eval "$install_prog $file $destfile" || exit $? - fi - - # Install the old object if enabled. - if test "$build_old_libs" = yes; then - # Deduce the name of the old-style object file. - staticobj=`$echo "X$file" | $Xsed -e "$lo2o"` - - $show "$install_prog $staticobj $staticdest" - $run eval "$install_prog \$staticobj \$staticdest" || exit $? - fi - exit $EXIT_SUCCESS - ;; - - *) - # Figure out destination file name, if it wasn't already specified. - if test -n "$destname"; then - destfile="$destdir/$destname" - else - destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - destfile="$destdir/$destfile" - fi - - # If the file is missing, and there is a .exe on the end, strip it - # because it is most likely a libtool script we actually want to - # install - stripped_ext="" - case $file in - *.exe) - if test ! -f "$file"; then - file=`$echo $file|${SED} 's,.exe$,,'` - stripped_ext=".exe" - fi - ;; - esac - - # Do a test to see if this is really a libtool program. - case $host in - *cygwin*|*mingw*) - wrapper=`$echo $file | ${SED} -e 's,.exe$,,'` - ;; - *) - wrapper=$file - ;; - esac - if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then - notinst_deplibs= - relink_command= - - # Note that it is not necessary on cygwin/mingw to append a dot to - # foo even if both foo and FILE.exe exist: automatic-append-.exe - # behavior happens only for exec(3), not for open(2)! Also, sourcing - # `FILE.' does not work on cygwin managed mounts. - # - # If there is no directory component, then add one. - case $wrapper in - */* | *\\*) . ${wrapper} ;; - *) . ./${wrapper} ;; - esac - - # Check the variables that should have been set. - if test -z "$notinst_deplibs"; then - $echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2 - exit $EXIT_FAILURE - fi - - finalize=yes - for lib in $notinst_deplibs; do - # Check to see that each library is installed. - libdir= - if test -f "$lib"; then - # If there is no directory component, then add one. - case $lib in - */* | *\\*) . $lib ;; - *) . ./$lib ;; - esac - fi - libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test - if test -n "$libdir" && test ! -f "$libfile"; then - $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2 - finalize=no - fi - done - - relink_command= - # Note that it is not necessary on cygwin/mingw to append a dot to - # foo even if both foo and FILE.exe exist: automatic-append-.exe - # behavior happens only for exec(3), not for open(2)! Also, sourcing - # `FILE.' does not work on cygwin managed mounts. - # - # If there is no directory component, then add one. - case $wrapper in - */* | *\\*) . ${wrapper} ;; - *) . ./${wrapper} ;; - esac - - outputname= - if test "$fast_install" = no && test -n "$relink_command"; then - if test "$finalize" = yes && test -z "$run"; then - tmpdir="/tmp" - test -n "$TMPDIR" && tmpdir="$TMPDIR" - tmpdir="$tmpdir/libtool-$$" - save_umask=`umask` - umask 0077 - if $mkdir "$tmpdir"; then - umask $save_umask - else - umask $save_umask - $echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2 - continue - fi - file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'` - outputname="$tmpdir/$file" - # Replace the output file specification. - relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` - - $show "$relink_command" - if $run eval "$relink_command"; then : - else - $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 - ${rm}r "$tmpdir" - continue - fi - file="$outputname" - else - $echo "$modename: warning: cannot relink \`$file'" 1>&2 - fi - else - # Install the binary that we compiled earlier. - file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` - fi - fi - - # remove .exe since cygwin /usr/bin/install will append another - # one anyway - case $install_prog,$host in - */usr/bin/install*,*cygwin*) - case $file:$destfile in - *.exe:*.exe) - # this is ok - ;; - *.exe:*) - destfile=$destfile.exe - ;; - *:*.exe) - destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'` - ;; - esac - ;; - esac - $show "$install_prog$stripme $file $destfile" - $run eval "$install_prog\$stripme \$file \$destfile" || exit $? - test -n "$outputname" && ${rm}r "$tmpdir" - ;; - esac - done - - for file in $staticlibs; do - name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - - # Set up the ranlib parameters. - oldlib="$destdir/$name" - - $show "$install_prog $file $oldlib" - $run eval "$install_prog \$file \$oldlib" || exit $? - - if test -n "$stripme" && test -n "$old_striplib"; then - $show "$old_striplib $oldlib" - $run eval "$old_striplib $oldlib" || exit $? - fi - - # Do each command in the postinstall commands. - cmds=$old_postinstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - done - - if test -n "$future_libdirs"; then - $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2 - fi - - if test -n "$current_libdirs"; then - # Maybe just do a dry run. - test -n "$run" && current_libdirs=" -n$current_libdirs" - exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' - else - exit $EXIT_SUCCESS - fi - ;; - - # libtool finish mode - finish) - modename="$modename: finish" - libdirs="$nonopt" - admincmds= - - if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then - for dir - do - libdirs="$libdirs $dir" - done - - for libdir in $libdirs; do - if test -n "$finish_cmds"; then - # Do each command in the finish commands. - cmds=$finish_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || admincmds="$admincmds - $cmd" - done - IFS="$save_ifs" - fi - if test -n "$finish_eval"; then - # Do the single finish_eval. - eval cmds=\"$finish_eval\" - $run eval "$cmds" || admincmds="$admincmds - $cmds" - fi - done - fi - - # Exit here if they wanted silent mode. - test "$show" = : && exit $EXIT_SUCCESS - - $echo "----------------------------------------------------------------------" - $echo "Libraries have been installed in:" - for libdir in $libdirs; do - $echo " $libdir" - done - $echo - $echo "If you ever happen to want to link against installed libraries" - $echo "in a given directory, LIBDIR, you must either use libtool, and" - $echo "specify the full pathname of the library, or use the \`-LLIBDIR'" - $echo "flag during linking and do at least one of the following:" - if test -n "$shlibpath_var"; then - $echo " - add LIBDIR to the \`$shlibpath_var' environment variable" - $echo " during execution" - fi - if test -n "$runpath_var"; then - $echo " - add LIBDIR to the \`$runpath_var' environment variable" - $echo " during linking" - fi - if test -n "$hardcode_libdir_flag_spec"; then - libdir=LIBDIR - eval flag=\"$hardcode_libdir_flag_spec\" - - $echo " - use the \`$flag' linker flag" - fi - if test -n "$admincmds"; then - $echo " - have your system administrator run these commands:$admincmds" - fi - if test -f /etc/ld.so.conf; then - $echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" - fi - $echo - $echo "See any operating system documentation about shared libraries for" - $echo "more information, such as the ld(1) and ld.so(8) manual pages." - $echo "----------------------------------------------------------------------" - exit $EXIT_SUCCESS - ;; - - # libtool execute mode - execute) - modename="$modename: execute" - - # The first argument is the command name. - cmd="$nonopt" - if test -z "$cmd"; then - $echo "$modename: you must specify a COMMAND" 1>&2 - $echo "$help" - exit $EXIT_FAILURE - fi - - # Handle -dlopen flags immediately. - for file in $execute_dlfiles; do - if test ! -f "$file"; then - $echo "$modename: \`$file' is not a file" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - dir= - case $file in - *.la) - # Check to see that this really is a libtool archive. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : - else - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - # Read the libtool library. - dlname= - library_names= - - # If there is no directory component, then add one. - case $file in - */* | *\\*) . $file ;; - *) . ./$file ;; - esac - - # Skip this library if it cannot be dlopened. - if test -z "$dlname"; then - # Warn if it was a shared library. - test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'" - continue - fi - - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` - test "X$dir" = "X$file" && dir=. - - if test -f "$dir/$objdir/$dlname"; then - dir="$dir/$objdir" - else - $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2 - exit $EXIT_FAILURE - fi - ;; - - *.lo) - # Just add the directory containing the .lo file. - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` - test "X$dir" = "X$file" && dir=. - ;; - - *) - $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2 - continue - ;; - esac - - # Get the absolute pathname. - absdir=`cd "$dir" && pwd` - test -n "$absdir" && dir="$absdir" - - # Now add the directory to shlibpath_var. - if eval "test -z \"\$$shlibpath_var\""; then - eval "$shlibpath_var=\"\$dir\"" - else - eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" - fi - done - - # This variable tells wrapper scripts just to set shlibpath_var - # rather than running their programs. - libtool_execute_magic="$magic" - - # Check if any of the arguments is a wrapper script. - args= - for file - do - case $file in - -*) ;; - *) - # Do a test to see if this is really a libtool program. - if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - # If there is no directory component, then add one. - case $file in - */* | *\\*) . $file ;; - *) . ./$file ;; - esac - - # Transform arg to wrapped name. - file="$progdir/$program" - fi - ;; - esac - # Quote arguments (to preserve shell metacharacters). - file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"` - args="$args \"$file\"" - done - - if test -z "$run"; then - if test -n "$shlibpath_var"; then - # Export the shlibpath_var. - eval "export $shlibpath_var" - fi - - # Restore saved environment variables - if test "${save_LC_ALL+set}" = set; then - LC_ALL="$save_LC_ALL"; export LC_ALL - fi - if test "${save_LANG+set}" = set; then - LANG="$save_LANG"; export LANG - fi - - # Now prepare to actually exec the command. - exec_cmd="\$cmd$args" - else - # Display what would be done. - if test -n "$shlibpath_var"; then - eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\"" - $echo "export $shlibpath_var" - fi - $echo "$cmd$args" - exit $EXIT_SUCCESS - fi - ;; - - # libtool clean and uninstall mode - clean | uninstall) - modename="$modename: $mode" - rm="$nonopt" - files= - rmforce= - exit_status=0 - - # This variable tells wrapper scripts just to set variables rather - # than running their programs. - libtool_install_magic="$magic" - - for arg - do - case $arg in - -f) rm="$rm $arg"; rmforce=yes ;; - -*) rm="$rm $arg" ;; - *) files="$files $arg" ;; - esac - done - - if test -z "$rm"; then - $echo "$modename: you must specify an RM program" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - - rmdirs= - - origobjdir="$objdir" - for file in $files; do - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` - if test "X$dir" = "X$file"; then - dir=. - objdir="$origobjdir" - else - objdir="$dir/$origobjdir" - fi - name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - test "$mode" = uninstall && objdir="$dir" - - # Remember objdir for removal later, being careful to avoid duplicates - if test "$mode" = clean; then - case " $rmdirs " in - *" $objdir "*) ;; - *) rmdirs="$rmdirs $objdir" ;; - esac - fi - - # Don't error if the file doesn't exist and rm -f was used. - if (test -L "$file") >/dev/null 2>&1 \ - || (test -h "$file") >/dev/null 2>&1 \ - || test -f "$file"; then - : - elif test -d "$file"; then - exit_status=1 - continue - elif test "$rmforce" = yes; then - continue - fi - - rmfiles="$file" - - case $name in - *.la) - # Possibly a libtool archive, so verify it. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - . $dir/$name - - # Delete the libtool libraries and symlinks. - for n in $library_names; do - rmfiles="$rmfiles $objdir/$n" - done - test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" - test "$mode" = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" - - if test "$mode" = uninstall; then - if test -n "$library_names"; then - # Do each command in the postuninstall commands. - cmds=$postuninstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" - if test "$?" -ne 0 && test "$rmforce" != yes; then - exit_status=1 - fi - done - IFS="$save_ifs" - fi - - if test -n "$old_library"; then - # Do each command in the old_postuninstall commands. - cmds=$old_postuninstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" - if test "$?" -ne 0 && test "$rmforce" != yes; then - exit_status=1 - fi - done - IFS="$save_ifs" - fi - # FIXME: should reinstall the best remaining shared library. - fi - fi - ;; - - *.lo) - # Possibly a libtool object, so verify it. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - - # Read the .lo file - . $dir/$name - - # Add PIC object to the list of files to remove. - if test -n "$pic_object" \ - && test "$pic_object" != none; then - rmfiles="$rmfiles $dir/$pic_object" - fi - - # Add non-PIC object to the list of files to remove. - if test -n "$non_pic_object" \ - && test "$non_pic_object" != none; then - rmfiles="$rmfiles $dir/$non_pic_object" - fi - fi - ;; - - *) - if test "$mode" = clean ; then - noexename=$name - case $file in - *.exe) - file=`$echo $file|${SED} 's,.exe$,,'` - noexename=`$echo $name|${SED} 's,.exe$,,'` - # $file with .exe has already been added to rmfiles, - # add $file without .exe - rmfiles="$rmfiles $file" - ;; - esac - # Do a test to see if this is a libtool program. - if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - relink_command= - . $dir/$noexename - - # note $name still contains .exe if it was in $file originally - # as does the version of $file that was added into $rmfiles - rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" - if test "$fast_install" = yes && test -n "$relink_command"; then - rmfiles="$rmfiles $objdir/lt-$name" - fi - if test "X$noexename" != "X$name" ; then - rmfiles="$rmfiles $objdir/lt-${noexename}.c" - fi - fi - fi - ;; - esac - $show "$rm $rmfiles" - $run $rm $rmfiles || exit_status=1 - done - objdir="$origobjdir" - - # Try to remove the ${objdir}s in the directories where we deleted files - for dir in $rmdirs; do - if test -d "$dir"; then - $show "rmdir $dir" - $run rmdir $dir >/dev/null 2>&1 - fi - done - - exit $exit_status - ;; - - "") - $echo "$modename: you must specify a MODE" 1>&2 - $echo "$generic_help" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - if test -z "$exec_cmd"; then - $echo "$modename: invalid operation mode \`$mode'" 1>&2 - $echo "$generic_help" 1>&2 - exit $EXIT_FAILURE - fi -fi # test -z "$show_help" - -if test -n "$exec_cmd"; then - eval exec $exec_cmd - exit $EXIT_FAILURE -fi - -# We need to display help for each of the modes. -case $mode in -"") $echo \ -"Usage: $modename [OPTION]... [MODE-ARG]... - -Provide generalized library-building support services. - - --config show all configuration variables - --debug enable verbose shell tracing --n, --dry-run display commands without modifying any files - --features display basic configuration information and exit - --finish same as \`--mode=finish' - --help display this help message and exit - --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS] - --quiet same as \`--silent' - --silent don't print informational messages - --tag=TAG use configuration variables from tag TAG - --version print version information - -MODE must be one of the following: - - clean remove files from the build directory - compile compile a source file into a libtool object - execute automatically set library path, then run a program - finish complete the installation of libtool libraries - install install libraries or executables - link create a library or an executable - uninstall remove libraries from an installed directory - -MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for -a more detailed description of MODE. - -Report bugs to ." - exit $EXIT_SUCCESS - ;; - -clean) - $echo \ -"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE... - -Remove files from the build directory. - -RM is the name of the program to use to delete files associated with each FILE -(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed -to RM. - -If FILE is a libtool library, object or program, all the files associated -with it are deleted. Otherwise, only FILE itself is deleted using RM." - ;; - -compile) - $echo \ -"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE - -Compile a source file into a libtool library object. - -This mode accepts the following additional options: - - -o OUTPUT-FILE set the output file name to OUTPUT-FILE - -prefer-pic try to building PIC objects only - -prefer-non-pic try to building non-PIC objects only - -static always build a \`.o' file suitable for static linking - -COMPILE-COMMAND is a command to be used in creating a \`standard' object file -from the given SOURCEFILE. - -The output file name is determined by removing the directory component from -SOURCEFILE, then substituting the C source code suffix \`.c' with the -library object suffix, \`.lo'." - ;; - -execute) - $echo \ -"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]... - -Automatically set library path, then run a program. - -This mode accepts the following additional options: - - -dlopen FILE add the directory containing FILE to the library path - -This mode sets the library path environment variable according to \`-dlopen' -flags. - -If any of the ARGS are libtool executable wrappers, then they are translated -into their corresponding uninstalled binary, and any of their required library -directories are added to the library path. - -Then, COMMAND is executed, with ARGS as arguments." - ;; - -finish) - $echo \ -"Usage: $modename [OPTION]... --mode=finish [LIBDIR]... - -Complete the installation of libtool libraries. - -Each LIBDIR is a directory that contains libtool libraries. - -The commands that this mode executes may require superuser privileges. Use -the \`--dry-run' option if you just want to see what would be executed." - ;; - -install) - $echo \ -"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND... - -Install executables or libraries. - -INSTALL-COMMAND is the installation command. The first component should be -either the \`install' or \`cp' program. - -The rest of the components are interpreted as arguments to that command (only -BSD-compatible install options are recognized)." - ;; - -link) - $echo \ -"Usage: $modename [OPTION]... --mode=link LINK-COMMAND... - -Link object files or libraries together to form another library, or to -create an executable program. - -LINK-COMMAND is a command using the C compiler that you would use to create -a program from several object files. - -The following components of LINK-COMMAND are treated specially: - - -all-static do not do any dynamic linking at all - -avoid-version do not add a version suffix if possible - -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime - -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols - -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) - -export-symbols SYMFILE - try to export only the symbols listed in SYMFILE - -export-symbols-regex REGEX - try to export only the symbols matching REGEX - -LLIBDIR search LIBDIR for required installed libraries - -lNAME OUTPUT-FILE requires the installed library libNAME - -module build a library that can dlopened - -no-fast-install disable the fast-install mode - -no-install link a not-installable executable - -no-undefined declare that a library does not refer to external symbols - -o OUTPUT-FILE create OUTPUT-FILE from the specified objects - -objectlist FILE Use a list of object files found in FILE to specify objects - -precious-files-regex REGEX - don't remove output files matching REGEX - -release RELEASE specify package release information - -rpath LIBDIR the created library will eventually be installed in LIBDIR - -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries - -static do not do any dynamic linking of libtool libraries - -version-info CURRENT[:REVISION[:AGE]] - specify library version info [each variable defaults to 0] - -All other options (arguments beginning with \`-') are ignored. - -Every other argument is treated as a filename. Files ending in \`.la' are -treated as uninstalled libtool libraries, other files are standard or library -object files. - -If the OUTPUT-FILE ends in \`.la', then a libtool library is created, -only library objects (\`.lo' files) may be specified, and \`-rpath' is -required, except when creating a convenience library. - -If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created -using \`ar' and \`ranlib', or on Windows using \`lib'. - -If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file -is created, otherwise an executable program is created." - ;; - -uninstall) - $echo \ -"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... - -Remove libraries from an installation directory. - -RM is the name of the program to use to delete files associated with each FILE -(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed -to RM. - -If FILE is a libtool library, all the files associated with it are deleted. -Otherwise, only FILE itself is deleted using RM." - ;; - -*) - $echo "$modename: invalid operation mode \`$mode'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; -esac - -$echo -$echo "Try \`$modename --help' for more information about other modes." - -exit $? - -# The TAGs below are defined such that we never get into a situation -# in which we disable both kinds of libraries. Given conflicting -# choices, we go for a static library, that is the most portable, -# since we can't tell whether shared libraries were disabled because -# the user asked for that or because the platform doesn't support -# them. This is particularly important on AIX, because we don't -# support having both static and shared libraries enabled at the same -# time on that platform, so we default to a shared-only configuration. -# If a disable-shared tag is given, we'll fallback to a static-only -# configuration. But we'll never go from static-only to shared-only. - -# ### BEGIN LIBTOOL TAG CONFIG: disable-shared -build_libtool_libs=no -build_old_libs=yes -# ### END LIBTOOL TAG CONFIG: disable-shared - -# ### BEGIN LIBTOOL TAG CONFIG: disable-static -build_old_libs=`case $build_libtool_libs in yes) $echo no;; *) $echo yes;; esac` -# ### END LIBTOOL TAG CONFIG: disable-static - -# Local Variables: -# mode:shell-script -# sh-indentation:2 -# End: diff --git a/storage/bdb/dist/pubdef.in b/storage/bdb/dist/pubdef.in deleted file mode 100644 index b0873839d7d..00000000000 --- a/storage/bdb/dist/pubdef.in +++ /dev/null @@ -1,412 +0,0 @@ -# $Id: pubdef.in,v 12.18 2005/11/08 03:25:00 bostic Exp $ -# -# Name -# D == documentation -# I == include file -# J == Java constant -# N == wrapped by the Java native layer -DB_AFTER D I J -DB_AGGRESSIVE D I J -DB_ALREADY_ABORTED * I * -DB_AM_CHKSUM * I * -DB_AM_CL_WRITER * I * -DB_AM_COMPENSATE * I * -DB_AM_CREATED * I * -DB_AM_CREATED_MSTR * I * -DB_AM_DBM_ERROR * I * -DB_AM_DELIMITER * I * -DB_AM_DISCARD * I * -DB_AM_DUP * I * -DB_AM_DUPSORT * I * -DB_AM_ENCRYPT * I * -DB_AM_FIXEDLEN * I * -DB_AM_INMEM * I * -DB_AM_INORDER * I * -DB_AM_IN_RENAME * I * -DB_AM_NOT_DURABLE * I * -DB_AM_OPEN_CALLED * I * -DB_AM_PAD * I * -DB_AM_PGDEF * I * -DB_AM_RDONLY * I * -DB_AM_READ_UNCOMMITTED * I * -DB_AM_RECNUM * I * -DB_AM_RECOVER * I * -DB_AM_RENUMBER * I * -DB_AM_REVSPLITOFF * I * -DB_AM_SECONDARY * I * -DB_AM_SNAPSHOT * I * -DB_AM_SUBDB * I * -DB_AM_SWAP * I * -DB_AM_TXN * I * -DB_AM_VERIFYING * I * -DB_APPEND D I J -DB_ARCH_ABS D I J -DB_ARCH_DATA D I J -DB_ARCH_LOG D I J -DB_ARCH_REMOVE D I J -DB_ASSOC_IMMUTABLE_KEY * I * -DB_AUTO_COMMIT D I J -DB_BEFORE D I J -DB_BTREE D I J -DB_BTREEMAGIC * I * -DB_BTREEOLDVER * I * -DB_BTREEVERSION * I * -DB_BUFFER_SMALL D I N -DB_CACHED_COUNTS * I * -DB_CDB_ALLDB D I J -DB_CHKSUM D I J -DB_COMPACT_FLAGS * I * -DB_CONFIG D * * -DB_CONSUME D I J -DB_CONSUME_WAIT D I J -DB_CREATE D I J -DB_CURRENT D I J -DB_CXX_NO_EXCEPTIONS D I * -DB_DBM_HSEARCH * I * -DB_DBT_APPMALLOC D I N -DB_DBT_DUPOK * I * -DB_DBT_ISSET * I * -DB_DBT_MALLOC D I J -DB_DBT_PARTIAL D I J -DB_DBT_REALLOC D I N -DB_DBT_USERMEM D I J -DB_DEGREE_2 * I * -DB_DELETED * I * -DB_DIRECT D I * -DB_DIRECT_DB D I J -DB_DIRECT_LOG D I J -DB_DIRTY_READ * I * -DB_DONOTINDEX D I J -DB_DSYNC_DB D I J -DB_DSYNC_LOG D I J -DB_DUP D I J -DB_DUPSORT D I J -DB_DURABLE_UNKNOWN * I * -DB_EID_BROADCAST D I J -DB_EID_INVALID D I J -DB_ENCRYPT D I J -DB_ENCRYPT_AES D I J -DB_ENV_AUTO_COMMIT * I * -DB_ENV_CDB * I * -DB_ENV_CDB_ALLDB * I * -DB_ENV_CREATE * I * -DB_ENV_DBLOCAL * I * -DB_ENV_DIRECT_DB * I * -DB_ENV_DIRECT_LOG * I * -DB_ENV_DSYNC_DB * I * -DB_ENV_DSYNC_LOG * I * -DB_ENV_FATAL * I * -DB_ENV_LOCKDOWN * I * -DB_ENV_LOG_AUTOREMOVE * I * -DB_ENV_LOG_INMEMORY * I * -DB_ENV_NOLOCKING * I * -DB_ENV_NOMMAP * I * -DB_ENV_NOPANIC * I * -DB_ENV_OPEN_CALLED * I * -DB_ENV_OVERWRITE * I * -DB_ENV_PRIVATE * I * -DB_ENV_REGION_INIT * I * -DB_ENV_RPCCLIENT * I * -DB_ENV_RPCCLIENT_GIVEN * I * -DB_ENV_SYSTEM_MEM * I * -DB_ENV_THREAD * I * -DB_ENV_TIME_NOTGRANTED * I * -DB_ENV_TXN_NOSYNC * I * -DB_ENV_TXN_WRITE_NOSYNC * I * -DB_ENV_YIELDCPU * I * -DB_EXCL D I J -DB_EXTENT * I * -DB_FAST_STAT D I J -DB_FCNTL_LOCKING * I * -DB_FILE_ID_LEN * I * -DB_FIRST D I J -DB_FLUSH D I J -DB_FORCE D I J -DB_FREELIST_ONLY D I J -DB_FREE_SPACE D I J -DB_GET_BOTH D I J -DB_GET_BOTHC * I * -DB_GET_BOTH_RANGE D I J -DB_GET_RECNO D I J -DB_HANDLE_LOCK * I * -DB_HASH D I J -DB_HASHMAGIC * I * -DB_HASHOLDVER * I * -DB_HASHVERSION * I * -DB_HOME D * * -DB_IMMUTABLE_KEY D I J -DB_INIT_CDB D I J -DB_INIT_LOCK D I J -DB_INIT_LOG D I J -DB_INIT_MPOOL D I J -DB_INIT_REP D I J -DB_INIT_TXN D I J -DB_INORDER D I J -DB_JOINENV * I J -DB_JOIN_ITEM D I J -DB_JOIN_NOSORT D I J -DB_KEYEMPTY D I J -DB_KEYEXIST D I J -DB_KEYFIRST D I J -DB_KEYLAST D I J -DB_LAST D I J -DB_LOCKDOWN D I J -DB_LOCKVERSION * I * -DB_LOCK_ABORT * I * -DB_LOCK_DEADLOCK D I J -DB_LOCK_DEFAULT D I J -DB_LOCK_DUMP * I * -DB_LOCK_EXPIRE D I J -DB_LOCK_GET D I J -DB_LOCK_GET_TIMEOUT D I J -DB_LOCK_INHERIT * I * -DB_LOCK_IREAD D I J -DB_LOCK_IWR D I J -DB_LOCK_IWRITE D I J -DB_LOCK_MAXLOCKS D I J -DB_LOCK_MAXWRITE D I J -DB_LOCK_MINLOCKS D I J -DB_LOCK_MINWRITE D I J -DB_LOCK_NG * I * -DB_LOCK_NORUN * I * -DB_LOCK_NOTGRANTED D I J -DB_LOCK_NOWAIT D I J -DB_LOCK_OLDEST D I J -DB_LOCK_PUT D I J -DB_LOCK_PUT_ALL D I J -DB_LOCK_PUT_OBJ D I J -DB_LOCK_PUT_READ * I * -DB_LOCK_RANDOM D I J -DB_LOCK_READ D I J -DB_LOCK_READ_UNCOMMITTED * I * -DB_LOCK_RECORD * I * -DB_LOCK_SET_TIMEOUT * I * -DB_LOCK_SWITCH * I * -DB_LOCK_TIMEOUT D I J -DB_LOCK_TRADE * I * -DB_LOCK_UPGRADE * I * -DB_LOCK_UPGRADE_WRITE * I * -DB_LOCK_WAIT * I * -DB_LOCK_WRITE D I J -DB_LOCK_WWRITE * I * -DB_LOCK_YOUNGEST D I J -DB_LOGC_BUF_SIZE * I * -DB_LOGFILEID_INVALID * I * -DB_LOGMAGIC * I * -DB_LOGOLDVER * I * -DB_LOGVERSION * I * -DB_LOG_AUTOREMOVE D I J -DB_LOG_BUFFER_FULL D I * -DB_LOG_CHKPNT * I * -DB_LOG_COMMIT * I * -DB_LOG_DISK * I * -DB_LOG_INMEMORY D I J -DB_LOG_LOCKED * I * -DB_LOG_NOCOPY * I * -DB_LOG_NOT_DURABLE * I * -DB_LOG_PERM * I * -DB_LOG_RESEND * I * -DB_LOG_SILENT_ERR * I * -DB_LOG_WRNOSYNC * I * -DB_LSTAT_ABORTED * I * -DB_LSTAT_EXPIRED * I * -DB_LSTAT_FREE * I * -DB_LSTAT_HELD * I * -DB_LSTAT_PENDING * I * -DB_LSTAT_WAITING * I * -DB_MAX_PAGES * I * -DB_MAX_RECORDS * I * -DB_MPOOL_CLEAN D I * -DB_MPOOL_CREATE D I * -DB_MPOOL_DIRTY D I * -DB_MPOOL_DISCARD D I * -DB_MPOOL_FREE * I * -DB_MPOOL_LAST D I * -DB_MPOOL_NEW D I * -DB_MPOOL_NOFILE D I J -DB_MPOOL_UNLINK D I J -DB_MULTIPLE D I J -DB_MULTIPLE_INIT D I * -DB_MULTIPLE_KEY D I J -DB_MULTIPLE_KEY_NEXT D I * -DB_MULTIPLE_NEXT D I * -DB_MULTIPLE_RECNO_NEXT D I * -DB_MUTEX_ALLOCATED * I * -DB_MUTEX_LOCKED * I * -DB_MUTEX_LOGICAL_LOCK * I * -DB_MUTEX_SELF_BLOCK D I * -DB_MUTEX_THREAD * I * -DB_NEEDSPLIT * I * -DB_NEXT D I J -DB_NEXT_DUP D I J -DB_NEXT_NODUP D I J -DB_NODUPDATA D I J -DB_NOLOCKING D I J -DB_NOMMAP D I J -DB_NOORDERCHK D I J -DB_NOOVERWRITE D I J -DB_NOPANIC D I J -DB_NOSERVER D I * -DB_NOSERVER_HOME D I J -DB_NOSERVER_ID D I J -DB_NOSYNC D I J -DB_NOTFOUND D I J -DB_NO_AUTO_COMMIT * I * -DB_ODDFILESIZE D I * -DB_OK_BTREE * I * -DB_OK_HASH * I * -DB_OK_QUEUE * I * -DB_OK_RECNO * I * -DB_OLD_VERSION D I * -DB_OPFLAGS_MASK * I * -DB_ORDERCHKONLY D I J -DB_OVERWRITE D I J -DB_PAGE_LOCK * I * -DB_PAGE_NOTFOUND D I * -DB_PANIC_ENVIRONMENT D I J -DB_POSITION D I J -DB_PREV D I J -DB_PREV_NODUP D I J -DB_PRINTABLE D I J -DB_PRIORITY_DEFAULT D I J -DB_PRIORITY_HIGH D I J -DB_PRIORITY_LOW D I J -DB_PRIORITY_VERY_HIGH D I J -DB_PRIORITY_VERY_LOW D I J -DB_PRIVATE D I J -DB_PR_PAGE * I * -DB_PR_RECOVERYTEST * I * -DB_QAMMAGIC * I * -DB_QAMOLDVER * I * -DB_QAMVERSION * I * -DB_QUEUE D I J -DB_RDONLY D I J -DB_RDWRMASTER * I * -DB_READ_COMMITTED D I J -DB_READ_UNCOMMITTED D I J -DB_RECNO D I J -DB_RECNUM D I J -DB_RECORDCOUNT * I * -DB_RECORD_LOCK * I * -DB_RECOVER D I J -DB_RECOVER_FATAL D I J -DB_REDO * I * -DB_REGION_INIT D I J -DB_REGION_MAGIC * I * -DB_REGISTER D I J -DB_RENAMEMAGIC * I * -DB_RENUMBER D I J -DB_REP_ANYWHERE D I J -DB_REP_BULKOVF * I * -DB_REP_CLIENT D I J -DB_REP_CONF_BULK D I J -DB_REP_CONF_DELAYCLIENT D I J -DB_REP_CONF_NOAUTOINIT D I J -DB_REP_CONF_NOWAIT D I J -DB_REP_DUPMASTER D I N -DB_REP_EGENCHG * I * -DB_REP_HANDLE_DEAD D I N -DB_REP_HOLDELECTION D I N -DB_REP_IGNORE D I J -DB_REP_ISPERM D I J -DB_REP_JOIN_FAILURE D I N -DB_REP_LOCKOUT D I N -DB_REP_LOGREADY * I * -DB_REP_MASTER D I J -DB_REP_NEWMASTER D I J -DB_REP_NEWSITE D I J -DB_REP_NOBUFFER D I J -DB_REP_NOTPERM D I J -DB_REP_PAGEDONE * I * -DB_REP_PERMANENT D I J -DB_REP_REREQUEST D I J -DB_REP_STARTUPDONE D I J -DB_REP_UNAVAIL D I N -DB_REVSPLITOFF D I J -DB_RMW D I J -DB_RPCCLIENT D I J -DB_RUNRECOVERY D I N -DB_SALVAGE D I J -DB_SECONDARY_BAD D I * -DB_SEQUENCE_OLDVER * I * -DB_SEQUENCE_VERSION * I * -DB_SEQ_DEC D I J -DB_SEQ_INC D I J -DB_SEQ_RANGE_SET * I * -DB_SEQ_WRAP D I J -DB_SEQ_WRAPPED * I * -DB_SET D I J -DB_SET_LOCK_TIMEOUT D I J -DB_SET_RANGE D I J -DB_SET_RECNO D I J -DB_SET_TXN_LSNP * I * -DB_SET_TXN_NOW * I * -DB_SET_TXN_TIMEOUT D I J -DB_SNAPSHOT D I J -DB_STAT_ALL D I * -DB_STAT_CLEAR D I J -DB_STAT_LOCK_CONF D I * -DB_STAT_LOCK_LOCKERS D I * -DB_STAT_LOCK_OBJECTS D I * -DB_STAT_LOCK_PARAMS D I * -DB_STAT_MEMP_HASH D I * -DB_STAT_SUBSYSTEM D I * -DB_SURPRISE_KID * I * -DB_SWAPBYTES * I * -DB_SYSTEM_MEM D I J -DB_TEST_ELECTINIT * I * -DB_TEST_ELECTVOTE1 * I * -DB_TEST_POSTDESTROY * I * -DB_TEST_POSTLOG * I * -DB_TEST_POSTLOGMETA * I * -DB_TEST_POSTOPEN * I * -DB_TEST_POSTSYNC * I * -DB_TEST_PREDESTROY * I * -DB_TEST_PREOPEN * I * -DB_TEST_SUBDB_LOCKS * I * -DB_THREAD D I J -DB_THREADID_STRLEN D I * -DB_TIMEOUT * I * -DB_TIME_NOTGRANTED D I J -DB_TRUNCATE D I J -DB_TXNVERSION * I * -DB_TXN_ABORT D I J -DB_TXN_APPLY D I J -DB_TXN_BACKWARD_ALLOC * I * -DB_TXN_BACKWARD_ROLL D I J -DB_TXN_CKP * I * -DB_TXN_FORWARD_ROLL D I J -DB_TXN_NOSYNC D I J -DB_TXN_NOT_DURABLE D I J -DB_TXN_NOWAIT D I J -DB_TXN_OPENFILES * I * -DB_TXN_POPENFILES * I * -DB_TXN_PRINT D I J -DB_TXN_SYNC D I J -DB_TXN_WRITE_NOSYNC D I J -DB_UNDO * I * -DB_UNKNOWN D I J -DB_UNREF * I * -DB_UPDATE_SECONDARY * I * -DB_UPGRADE D I J -DB_USE_ENVIRON D I J -DB_USE_ENVIRON_ROOT D I J -DB_VERB_DEADLOCK D I J -DB_VERB_RECOVERY D I J -DB_VERB_REGISTER D I J -DB_VERB_REPLICATION D I J -DB_VERB_WAITSFOR D I J -DB_VERIFY D I J -DB_VERIFY_BAD D I N -DB_VERIFY_FATAL * I * -DB_VERSION_MAJOR * I J -DB_VERSION_MINOR * I J -DB_VERSION_MISMATCH D I N -DB_VERSION_PATCH * I J -DB_VERSION_STRING * I N -DB_WRITECURSOR D I J -DB_WRITELOCK * I * -DB_WRITEOPEN * I * -DB_XA_CREATE D I J -DB_XIDDATASIZE D I J -DB_YIELDCPU D I J diff --git a/storage/bdb/dist/s_all b/storage/bdb/dist/s_all deleted file mode 100644 index cabd38e8dc6..00000000000 --- a/storage/bdb/dist/s_all +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -# $Id: s_all,v 12.0 2004/11/17 03:43:35 bostic Exp $ - -sh s_perm # permissions. -#sh s_symlink # symbolic links. -sh s_readme # distribution README file. - -# -# The following order is important, s_include must run last. -# -sh s_config # autoconf. -sh s_recover # logging/recovery files. -#sh s_rpc # RPC files. -sh s_include # standard include files. - -sh s_win32 # Win32 include files. -sh s_win32_dsp # Win32 build environment. -#sh s_vxworks # VxWorks include files. -#sh s_java # Java support. -#sh s_test # Test suite support. -#sh s_tags # Tags files. diff --git a/storage/bdb/dist/s_config b/storage/bdb/dist/s_config deleted file mode 100644 index 194df83a59e..00000000000 --- a/storage/bdb/dist/s_config +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -# $Id: s_config,v 12.1 2005/08/16 05:19:18 mjc Exp $ -# -# Build the autoconfiguration files. - -trap 'rm -f aclocal.m4 ; exit 0' 0 1 2 3 13 15 - -. ./RELEASE - -echo "autoconf: building aclocal.m4..." -cat aclocal/*.ac aclocal_java/*.ac > aclocal.m4 - -echo "autoconf: running autoheader to build config.hin..." -rm -f config.hin -autoheader -chmod 444 config.hin - -echo "autoconf: running autoconf to build configure" -rm -f configure -autoconf - -# Edit version information we couldn't pre-compute. -sed -e "s/__EDIT_DB_VERSION_MAJOR__/$DB_VERSION_MAJOR/g" \ - -e "s/__EDIT_DB_VERSION_MINOR__/$DB_VERSION_MINOR/g" \ - -e "s/__EDIT_DB_VERSION_PATCH__/$DB_VERSION_PATCH/g" \ - -e "s/__EDIT_DB_VERSION_STRING__/$DB_VERSION_STRING/g" \ - -e "s/__EDIT_DB_VERSION_UNIQUE_NAME__/$DB_VERSION_UNIQUE_NAME/g" \ - -e "s/__EDIT_DB_VERSION__/$DB_VERSION/g" configure > configure.version -mv configure.version configure - -rm -rf autom4te.cache -chmod 555 configure - -chmod 555 config.guess config.sub install-sh diff --git a/storage/bdb/dist/s_crypto b/storage/bdb/dist/s_crypto deleted file mode 100644 index cc54a347c07..00000000000 --- a/storage/bdb/dist/s_crypto +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/sh - -# $Id: s_crypto,v 12.0 2004/11/17 03:43:35 bostic Exp $ - -# Remove crypto from the DB source tree. - -d=.. - -t=/tmp/__db_a -trap 'rm -f $t ; exit 0' 0 -trap 'rm -f $t ; exit 1' 1 2 3 13 15 - -if ! test -d $d/crypto; then - echo "s_crypto: no crypto sources found in the source tree." - exit 1 -fi - -# Remove the crypto. -rm -rf $d/crypto - -# Update the release splash page. -f=$d/docs/index.html -chmod 664 $f -(echo '/DOES/' && - echo 's/DOES/DOES NOT/' && - echo 'w' && - echo 'q') | ed $f - -# Win/32. -f=win_config.in -chmod 664 $f -(echo '/#define.HAVE_CRYPTO/' && - echo 'c' && - echo '/* #undef HAVE_CRYPTO */' - echo '.' && - echo 'w' && - echo 'q') | ed $f - -f=srcfiles.in -chmod 664 $f -(echo 'g/^crypto\//d' && - echo '/crypto_stub\.c/' && - echo 's/small/dynamic small static vx/' && - echo 'w' && - echo 'q') | ed $f - - sh ./s_win32 - sh ./s_win32_dsp - -# VxWorks -f=vx_config.in -chmod 664 $f -(echo '/#define.HAVE_CRYPTO/' && - echo 'c' && - echo '/* #undef HAVE_CRYPTO */' - echo '.' && - echo 'w' && - echo 'q') | ed $f - - sh ./s_vxworks diff --git a/storage/bdb/dist/s_dir b/storage/bdb/dist/s_dir deleted file mode 100644 index 58513a8321d..00000000000 --- a/storage/bdb/dist/s_dir +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - - -make_dir() -{ - if test ! -d $1; then - echo "mkdir $1" - mkdir $1 - status=$? - if test $status -ne 0 && test ! -d $1; then - echo "error: $status" - fi - fi -} - -echo "Creating directories..." - -make_dir ../test_server -make_dir ../dbinc_auto -make_dir ../build_vxworks/BerkeleyDB -make_dir ../build_vxworks/db_archive -make_dir ../build_vxworks/db_archive/db_archive -make_dir ../build_vxworks/db_checkpoint -make_dir ../build_vxworks/db_checkpoint/db_checkpoint -make_dir ../build_vxworks/db_deadlock -make_dir ../build_vxworks/db_deadlock/db_deadlock -make_dir ../build_vxworks/db_dump -make_dir ../build_vxworks/db_dump/db_dump -make_dir ../build_vxworks/db_load -make_dir ../build_vxworks/db_load/db_load -make_dir ../build_vxworks/db_printlog -make_dir ../build_vxworks/db_printlog/db_printlog -make_dir ../build_vxworks/db_recover -make_dir ../build_vxworks/db_recover/db_recover -make_dir ../build_vxworks/db_stat -make_dir ../build_vxworks/db_stat/db_stat -make_dir ../build_vxworks/db_upgrade -make_dir ../build_vxworks/db_upgrade/db_upgrade -make_dir ../build_vxworks/db_verify -make_dir ../build_vxworks/db_verify/db_verify -make_dir ../build_vxworks/dbdemo/dbdemo -make_dir ../dbinc_auto - diff --git a/storage/bdb/dist/s_include b/storage/bdb/dist/s_include deleted file mode 100644 index e5531e1bcbf..00000000000 --- a/storage/bdb/dist/s_include +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/sh - -# $Id: s_include,v 12.0 2004/11/17 03:43:35 bostic Exp $ -# -# Build the automatically generated function prototype files. - -msgc="/* DO NOT EDIT: automatically built by dist/s_include. */" - -. ./RELEASE - -head() -{ - defonly=0 - while : - do case "$1" in - space) - echo ""; shift;; - defonly) - defonly=1; shift;; - *) - name="$1"; break;; - esac - done - - echo "$msgc" - echo "#ifndef $name" - echo "#define $name" - echo "" - if [ $defonly -eq 0 ]; then - echo "#if defined(__cplusplus)" - echo "extern \"C\" {" - echo "#endif" - echo "" - fi -} - -tail() -{ - defonly=0 - while : - do case "$1" in - defonly) - defonly=1; shift;; - *) - name="$1"; break;; - esac - done - - echo "" - if [ $defonly -eq 0 ]; then - echo "#if defined(__cplusplus)" - echo "}" - echo "#endif" - fi - echo "#endif /* !$name */" -} - -# We are building several files: -# 1 external #define file -# 1 external prototype file -# 1 internal #define file -# N internal prototype files -e_dfile=/tmp/__db_c.$$ -e_pfile=/tmp/__db_a.$$ -i_dfile=/tmp/__db_d.$$ -i_pfile=/tmp/__db_b.$$ -trap 'rm -f $e_dfile $e_pfile $i_dfile $i_pfile; exit 0' 0 1 2 3 13 15 - -head defonly space _DB_EXT_DEF_IN_ > $e_dfile -head space _DB_EXT_PROT_IN_ > $e_pfile -head defonly _DB_INT_DEF_IN_ > $i_dfile - -# Process the standard directories, creating per-directory prototype -# files and adding to the external prototype and #define files. -for i in db btree clib common crypto dbreg env fileops hash hmac \ - lock log mp mutex os qam rep sequence txn xa; do - head "_${i}_ext_h_" > $i_pfile - - if [ $i = os ] ; then - f=`ls ../$i/*.c ../os_win32/*.c` - elif [ $i = rpc_server ] ; then - f=`ls ../$i/c/*.c` - elif [ $i = crypto ] ; then - f=`ls ../$i/*.c ../$i/*/*.c` - else - f=`ls ../$i/*.c` - fi - awk -f gen_inc.awk \ - -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ - -v e_dfile=$e_dfile \ - -v e_pfile=$e_pfile \ - -v i_dfile=$i_dfile \ - -v i_pfile=$i_pfile $f - - tail "_${i}_ext_h_" >> $i_pfile - - f=../dbinc_auto/${i}_ext.h - cmp $i_pfile $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $i_pfile $f && chmod 444 $f) -done - -# Process directories which only add to the external prototype and #define -# files. -for i in dbm hsearch; do - f=`ls ../$i/*.c` - awk -f gen_inc.awk \ - -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ - -v e_dfile=$e_dfile \ - -v e_pfile=$e_pfile \ - -v i_dfile="" \ - -v i_pfile="" $f -done - -# There are a few globals in DB -- add them to the external/internal -# #define files. -(echo "#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@"; - echo "#define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@") >> $i_dfile -(echo "#define db_xa_switch db_xa_switch@DB_VERSION_UNIQUE_NAME@") >> $e_dfile - -# Wrap up the external #defines/prototypes, and internal #defines. -tail defonly _DB_EXT_DEF_IN_ >> $e_dfile -f=../dbinc_auto/ext_def.in -cmp $e_dfile $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f) - -tail _DB_EXT_PROT_IN_ >> $e_pfile -f=../dbinc_auto/ext_prot.in -cmp $e_pfile $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f) - -tail defonly _DB_INT_DEF_IN_ >> $i_dfile -f=../dbinc_auto/int_def.in -cmp $i_dfile $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $i_dfile $f && chmod 444 $f) - -# DB185 compatibility support. -head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile -head space _DB_EXT_185_PROT_IN_ > $e_pfile - -f=`ls ../db185/*.c` -awk -f gen_inc.awk \ - -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ - -v e_dfile=$e_dfile \ - -v e_pfile=$e_pfile \ - -v i_dfile="" \ - -v i_pfile="" $f - -tail defonly _DB_EXT_185_DEF_IN_ >> $e_dfile -f=../dbinc_auto/ext_185_def.in -cmp $e_dfile $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f) - -tail _DB_EXT_185_PROT_IN_ >> $e_pfile -f=../dbinc_auto/ext_185_prot.in -cmp $e_pfile $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f) diff --git a/storage/bdb/dist/s_java b/storage/bdb/dist/s_java deleted file mode 100644 index 57b88e8e560..00000000000 --- a/storage/bdb/dist/s_java +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# $Id: s_java,v 12.0 2004/11/17 03:43:35 bostic Exp $ -# -# Build the Java files. - -sh s_java_stat # Create Java stat methods -sh s_java_swig # Create core Java API with SWIG -sh s_java_const # Create Java constants diff --git a/storage/bdb/dist/s_java_const b/storage/bdb/dist/s_java_const deleted file mode 100644 index 8374b1f61a8..00000000000 --- a/storage/bdb/dist/s_java_const +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/sh - -# $Id: s_java_const,v 12.0 2004/11/17 03:43:35 bostic Exp $ -# -# Build the Java files. - -msgjava="/* DO NOT EDIT: automatically built by dist/s_java_const. */" - -. RELEASE - -t=/tmp/__java -trap 'rm -f $t; exit 0' 0 1 2 3 13 15 - -(echo "$msgjava" && - echo && - echo 'package com.sleepycat.db.internal;' && - echo && - echo 'public interface DbConstants' && - echo '{' && - for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \ - egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \ - done | - sed -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/" \ - -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/" \ - -e "s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/" \ - -e 's/^#define[ ][ ]*//' \ - -e 's/[()=,]/ /g' \ - -e 's/\/\*/ /' | \ - awk '{ print " int " $1 " = " $2 ";" }' && - echo '}' && - echo && - echo '// end of DbConstants.java') > $t - -f=../java/src/com/sleepycat/db/internal/DbConstants.java -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) diff --git a/storage/bdb/dist/s_java_stat b/storage/bdb/dist/s_java_stat deleted file mode 100644 index 0d00be59646..00000000000 --- a/storage/bdb/dist/s_java_stat +++ /dev/null @@ -1,364 +0,0 @@ -#!/bin/sh - -# $Id: s_java_stat,v 12.9 2005/11/04 00:09:21 mjc Exp $ -# -# Build the Java files. - -msgjava="/*- - * DO NOT EDIT: automatically built by dist/s_java_stat. - * - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2002-2005 - * Sleepycat Software. All rights reserved. - */" - - -s=/tmp/__java.sed -t=/tmp/__java -c=/tmp/__javajnic -u1=/tmp/__javautil1 -u2=/tmp/__javautil2 -trap 'rm -f $t $c $u1 $u2; exit 0' 0 1 2 3 13 15 - -# Script to convert DB C structure declarations into Java declarations. -jclass() -{ - cat > $s < $s <> $c - echo " jobject jobj, struct __db_$1 *statp) {" >> $c - sed -n -f $s < ../dbinc/db.in >> $c - echo ' return (0);' >> $c - echo '}' >> $c -} - -jni_fieldid_decls() -{ - cat > $s <> $u1 -} - -jni_fieldids() -{ - cat > $s <> $u2 -} - -# Script to convert DB C structure declarations into a toString method body -jclass_toString() -{ - cat > $s < $t - jclass_jni $1 $2 - f=../java/src/com/sleepycat/db/$j_class.java - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) -} - -echo "$msgjava" > $c -> $u1 -> $u2 - -stat_class bt_stat BtreeStats " extends DatabaseStats" - -# Build CompactStats.java - not purely a statistics class, but close enough to -# share this code. -(echo "$msgjava" - echo - echo 'package com.sleepycat.db;' - echo - echo 'import com.sleepycat.db.internal.DbUtil;' - echo - echo "public class CompactStats" - echo '{' - echo " // no public constructor" - echo " protected CompactStats() {}" - echo - echo " /* package */" - echo " CompactStats(int fillpercent, int timeout, int pages) {" - echo " this.compact_fillpercent = fillpercent;" - echo " this.compact_timeout = timeout;" - echo " this.compact_pages = pages;" - echo " }" - jclass compact - jclass_toString compact CompactStats - echo '}' - echo '// end of TransactionStats.java') > $t -jclass_jni compact __dbj_fill_compact -f=../java/src/com/sleepycat/db/CompactStats.java -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -stat_class h_stat HashStats " extends DatabaseStats" -stat_class lock_stat LockStats -stat_class log_stat LogStats -stat_class mpool_fstat CacheFileStats -stat_class mpool_stat CacheStats -stat_class mutex_stat MutexStats -stat_class qam_stat QueueStats " extends DatabaseStats" -stat_class rep_stat ReplicationStats -stat_class seq_stat SequenceStats - -# Build TransactionStats.java - special because of embedded Active class -(echo "$msgjava" - echo - echo 'package com.sleepycat.db;' - echo - echo 'import com.sleepycat.db.internal.DbUtil;' - echo - echo "public class TransactionStats" - echo '{' - echo " // no public constructor" - echo " protected TransactionStats() {}" - echo - echo -n " public static class Active {" - echo " // no public constructor" - echo " protected Active() {}" - jclass txn_active " " - jclass_toString txn_active Active " " - echo ' };' - jclass txn_stat - jclass_toString txn_stat TransactionStats - echo '}' - echo '// end of TransactionStats.java') > $t -jclass_jni txn_stat __dbj_fill_txn_stat -jclass_jni txn_active __dbj_fill_txn_active -f=../java/src/com/sleepycat/db/TransactionStats.java -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -mv $c $t -f=../libdb_java/java_stat_auto.c -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../libdb_java/java_util.i -sed '/BEGIN-STAT-FIELD-DECLS/q' < $f > $t -cat $u1 >> $t -sed -n '/END-STAT-FIELD-DECLS/,/BEGIN-STAT-FIELDS/p' < $f >> $t -cat $u2 >> $t -sed -n '/END-STAT-FIELDS/,$p' < $f >> $t -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 644 $f) diff --git a/storage/bdb/dist/s_java_swig b/storage/bdb/dist/s_java_swig deleted file mode 100644 index 8be53b058bc..00000000000 --- a/storage/bdb/dist/s_java_swig +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/sh - -# $Id: s_java_swig,v 12.2 2005/10/17 19:20:12 bostic Exp $ -# -# Run SWIG to generate the Java APIs - -t=/tmp/__db_a -trap 'rm -f $t ; exit 0' 0 -trap 'rm -f $t ; exit 1' 1 2 3 13 15 - -SWIG=swig -SWIG_DIR=../libdb_java -SWIG_FILE=$SWIG_DIR/db.i -PACKAGE="com.sleepycat.db.internal" - -die() { - echo "$@" >&2 - exit 1 -} - -[ -f $SWIG_FILE ] || die "Must be run from the dist directory" - -for api in java ; do - echo "Building $api API" - - swig_args="" - case $api in - java) - swig_args="-nodefault -package $PACKAGE $args" - ;; - esac - - $SWIG -Wall -$api $swig_args -I$SWIG_DIR \ - -o ../libdb_$api/db_${api}_wrap.c $SWIG_FILE || exit $? -done - -# Skip Java sources if run with "-n" -if [ "x$1" = "x-n" ] ; then - rm -f $SWIG_DIR/*.java - exit 0 -fi - -# Fixups for Java -JAVA_SRCTOP=../java/src -JAVA_PKGDIR=com/sleepycat/db/internal -JAVA_SRCDIR=$JAVA_SRCTOP/$JAVA_PKGDIR - -# SWIG 1.3.18 puts the Java files in the same directory as the native code. -cd $SWIG_DIR -[ -f Db.java ] || exit 1 - -for f in *.java ; do - case $f in - SWIGTYPE*) - die "Interface contains unresolved types: $f" - esac - rm -f $JAVA_SRCDIR/$f - perl -p $SWIG_DIR/java-post.pl < $f > $JAVA_SRCDIR/$f || exit $? - rm -f $f -done - -# db_config.h must be the first #include, move it to the top of the file. -( - echo '#include "db_config.h"' - sed '/#include "db_config.h"/d' < db_java_wrap.c -) > $t && cp $t db_java_wrap.c diff --git a/storage/bdb/dist/s_javah b/storage/bdb/dist/s_javah deleted file mode 100644 index 67c41d09c4d..00000000000 --- a/storage/bdb/dist/s_javah +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh - -# $Id: s_javah,v 1.1 2002/08/14 17:14:24 dda Exp $ -# -# Use javah to build the libdb_java/com_*.h header files. -# -# To run this, you will need a javac and javah in your PATH. -# If possible, install tools with a recent vintage, JDK 1.3 or higher is good. -# Using Sun's JDK rather than some other installation ensures -# that the header files will not be constantly changed. - -. ./RELEASE - -JAVAC=javac -JAVAH=javah -export CLASSPATH -CLASSPATH= - -# CLASSES are only those classes for which we have native methods. -D=com.sleepycat.db -CLASSES="$D.Dbc $D.DbEnv $D.Db $D.DbLock $D.DbLogc $D.DbLsn $D.Dbt $D.DbTxn $D.xa.DbXAResource" - -d=/tmp/__javah -c=$d/classes -trap 'rm -rf $d; exit 0' 0 1 2 3 13 15 - -rm -rf $d -mkdir $d || exit 1 -mkdir $c || exit 1 - -# Make skeleton versions of XA classes and interfaces -# We only need to compile them, not run them. -pkg="package javax.transaction.xa" -echo "$pkg; public interface XAResource {}" > $d/XAResource.java -echo "$pkg; public interface Xid {}" > $d/Xid.java -echo "$pkg; public class XAException extends Exception {}" \ - > $d/XAException.java - - -# Create the .class files and use them with javah to create the .h files -${JAVAC} -d $c $d/*.java \ - ../java/src/com/sleepycat/db/*.java \ - ../java/src/com/sleepycat/db/xa/*.java || exit 1 -${JAVAH} -classpath $c -d $d ${CLASSES} || exit 1 - -for cl in ${CLASSES}; do - h=`echo $cl | sed -e 's/\./_/g'`.h - t=$d/$h - f=../libdb_java/$h - if [ ! -f $t ]; then - echo "ERROR: $t does not exist" - exit 1 - fi - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) -done diff --git a/storage/bdb/dist/s_je2db b/storage/bdb/dist/s_je2db deleted file mode 100644 index a5c64197e57..00000000000 --- a/storage/bdb/dist/s_je2db +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/sh - - -# The examples must be hand-edited after they are copied: -# - add setInitializeCache(true), setInitializeLocking(true), setType(BTREE) -# - add null databaseName param to openDatabase() and openSecondaryDatabase() -# - remove foreign key configuration and imports - -COPY_EXAMPLES=0 - -JEDIR=$1 -if [ $# -eq 1 ] ; then - DBDIR=.. -else - DBDIR=$2 -fi - -if [ ! -d "$DBDIR/dbinc" -o ! -f "$JEDIR/build.xml" ] ; then - echo >&2 "Usage $0 /path/to/je [ /path/to/db ]" - exit 1 -fi - -JEDIR=$(cd "$JEDIR" ; /bin/pwd) -DBDIR=$(cd "$DBDIR" ; /bin/pwd) - -JESRC="$JEDIR/src" -JETEST="$JEDIR/test" -JEEXAMPLES="$JEDIR/examples" -DBSRC="$DBDIR/java/src" -DBTEST="$DBDIR/test/scr024/src" -DBEXAMPLES="$DBDIR/examples_java/src" -DIRMATCH="com/sleepycat\(/examples\)*/\(\(bind\)\|\(collections\)\|\(util\)\)" - -cd "$JESRC" -for d in `find . -type d | grep -v CVS | grep $DIRMATCH` ; do - #echo "$DBSRC/$d" - mkdir -p "$DBSRC/$d" -done -cd "$JETEST" -for d in `find . -type d | grep -v CVS | grep $DIRMATCH` ; do - #echo "$DBTEST/$d" - mkdir -p "$DBTEST/$d" -done -if [ $COPY_EXAMPLES -eq 1 ] ; then - cd "$JEEXAMPLES" - for d in `find . -type d | grep -v CVS | grep $DIRMATCH` ; do - #echo "$DBEXAMPLES/$d" - mkdir -p "$DBEXAMPLES/$d" - done -fi - -E1='s/com\.sleepycat\.je/com.sleepycat.db/g' -E2='/import com\.sleepycat\.db\.ForeignKeyNullifier/d' -E3='/implements/s/, ForeignKeyNullifier//' -E4='//,//d' -EXCLUDETESTS="\(\(ForeignKeyTest\)\|\(TupleSerialFactoryTest\)\\|\(XACollectionTest\)\)" - -cd "$JESRC" -for f in `find . -name '*.java' | grep $DIRMATCH` ; do - #echo $DBSRC/$f - sed -e "$E1" -e "$E2" -e "$E3" -e "$E4" < $f > $DBSRC/$f.sed.out - diff -q -I "\$\Id:" $DBSRC/$f $DBSRC/$f.sed.out || \ - mv -f $DBSRC/$f.sed.out $DBSRC/$f - rm -f $DBSRC/$f.sed.out -done - -cd "$JETEST" -for f in `find . -name '*.java' | grep $DIRMATCH | grep -v $EXCLUDETESTS` ; do - #echo $DBTEST/$f - sed -e "$E1" < $f > $DBTEST/$f.sed.out - diff -q -I "\$\Id:" $DBTEST/$f $DBTEST/$f.sed.out || \ - mv -f $DBTEST/$f.sed.out $DBTEST/$f - rm -f $DBTEST/$f.sed.out -done -cp -f "com/sleepycat/collections/test/serial/TestSerial.java.original" \ - "$DBTEST/com/sleepycat/collections/test/serial" - -if [ $COPY_EXAMPLES -eq 1 ] ; then - cd "$JEEXAMPLES" - for f in `find . -name '*.java' | grep $DIRMATCH` ; do - #echo $DBEXAMPLES/$f - sed -e "$E1" < $f > $DBEXAMPLES/$f.sed.out - diff -q -I "\$\Id:" $DBEXAMPLES/$f $DBEXAMPLES/$f.sed.out || \ - mv -f $DBEXAMPLES/$f.sed.out $DBEXAMPLES/$f - rm -f $DBEXAMPLES/$f.sed.out - done -fi - -exit 0 diff --git a/storage/bdb/dist/s_perm b/storage/bdb/dist/s_perm deleted file mode 100755 index 219a76835e6..00000000000 --- a/storage/bdb/dist/s_perm +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh - -# $Id: s_perm,v 12.0 2004/11/17 03:43:35 bostic Exp $ - -d=.. -echo 'Updating Berkeley DB source tree permissions...' - -run() -{ - #echo " $1 ($2)" - if [ -f "$d/$1" ]; then - chmod "$2" "$d/$1" - else - echo "$d/$1: no such file or directory" - exit 1 - fi -} - -run build_win32/include.tcl 664 -run dist/config.guess 555 -run dist/config.sub 555 -run dist/configure 555 -run dist/install-sh 555 -run dist/s_all 555 -run dist/s_config 555 -run dist/s_crypto 555 -run dist/s_include 555 -run dist/s_java 555 -run dist/s_java_const 555 -run dist/s_java_stat 555 -run dist/s_java_swig 555 -run dist/s_perm 555 -run dist/s_readme 555 -run dist/s_recover 555 -run dist/s_rpc 555 -run dist/s_symlink 555 -run dist/s_tags 555 -run dist/s_test 555 -run dist/s_vxworks 555 -run dist/s_win32 555 -run dist/s_win32_dsp 555 -run dist/vx_buildcd 555 -#run mod_db4/configure 555 - -#run perl/BerkeleyDB/dbinfo 555 -#run perl/BerkeleyDB/mkpod 555 - -#for i in `cd $d && find build_vxworks \ -# -name '*.wsp' -o -name '*.cdf' -o -name '*.wpj'`; do - #echo " $i (775)" -# chmod 775 $d/$i -#done diff --git a/storage/bdb/dist/s_readme b/storage/bdb/dist/s_readme deleted file mode 100644 index 1a56da1bff3..00000000000 --- a/storage/bdb/dist/s_readme +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -# $Id: s_readme,v 12.0 2004/11/17 03:43:35 bostic Exp $ -# -# Build the README. - -echo 'Updating Berkeley DB README file...' - -d=.. - -t=/tmp/__t -trap 'rm -f $t; exit 0' 0 1 2 3 13 15 - -. RELEASE - -cat << END_OF_README>$t -$DB_VERSION_STRING - -This is version $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH of Berkeley DB from Sleepycat Software. To view -the release and installation documentation, load the distribution file -docs/index.html into your web browser. -END_OF_README - -f=../README -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) diff --git a/storage/bdb/dist/s_recover b/storage/bdb/dist/s_recover deleted file mode 100755 index 9bea4a60493..00000000000 --- a/storage/bdb/dist/s_recover +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -# $Id: s_recover,v 12.0 2004/11/17 03:43:35 bostic Exp $ -# -# Build the automatically generated logging/recovery files. - -header=/tmp/__db_a -loglist=/tmp/__db_b -print=/tmp/__db_c -source=/tmp/__db_d -template=/tmp/__db_e -tmp=/tmp/__db_f - -trap 'rm -f /tmp/__db_[abcdef]; exit 1' 1 2 3 13 15 -trap 'rm -f /tmp/__db_[abcdef]; exit 0' 0 - -DIR="db dbreg btree fileops hash qam rep txn" - -# Check to make sure we haven't duplicated a log record entry, and build -# the list of log record types that the test suite uses. -for i in $DIR; do - for f in ../$i/*.src; do - # Grab the PREFIX; there should only be one per file, and - # so it's okay to just take the first. - grep '^PREFIX' $f | sed q - egrep '^BEGIN[ ]|^IGNORED[ ]|^DEPRECATED[ ]' $f | - awk '{print $1 "\t" $2 "\t" $3}' - done -done > $loglist -grep -v '^PREFIX' $loglist | - awk '{print $2 "\t" $3}' | sort -n -k 2 | uniq -d -f 1 > $tmp -[ -s $tmp ] && { - echo "DUPLICATE LOG VALUES:" - cat $tmp - rm -f $tmp - exit 1 -} -f=../test/logtrack.list -cmp $loglist $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $loglist $f && chmod 444 $f) - -# Build DB's recovery routines. -for i in $DIR; do - for f in ../$i/*.src; do - subsystem=`basename $f .src` - awk -f gen_rec.awk \ - -v header_file=$header \ - -v print_file=$print\ - -v source_file=$source \ - -v template_file=$template < $f - - f=../dbinc_auto/${subsystem}_auto.h - cmp $header $f > /dev/null 2>&1 || - (echo "Building $f" && - rm -f $f && cp $header $f && chmod 444 $f) - f=../$i/${subsystem}_auto.c - cmp $source $f > /dev/null 2>&1 || - (echo "Building $f" && - rm -f $f && cp $source $f && chmod 444 $f) - f=../$i/${subsystem}_autop.c - cmp $print $f > /dev/null 2>&1 || - (echo "Building $f" && - rm -f $f && cp $print $f && chmod 444 $f) - f=template/rec_${subsystem} - cmp $template $f > /dev/null 2>&1 || - (echo "Building $f" && - rm -f $f && cp $template $f && chmod 444 $f) - done -done - -# Build the example application's recovery routines. -#(cd ../examples_c/ex_apprec && sh auto_rebuild) diff --git a/storage/bdb/dist/s_rpc b/storage/bdb/dist/s_rpc deleted file mode 100644 index 7da75819e06..00000000000 --- a/storage/bdb/dist/s_rpc +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh - -# $Id: s_rpc,v 12.0 2004/11/17 03:43:35 bostic Exp $ -# -# Build the automatically generated RPC files - -echo "Building RPC client/server files..." - -. ./RELEASE - -t=/tmp/__db_a -trap 'rm -f $t ; exit 0' 0 -trap 'rm -f $t ; exit 1' 1 2 3 13 15 - -client_file=../rpc_client/gen_client.c -ctmpl_file=./template/gen_client_ret -server_file=../rpc_server/c/gen_db_server.c -stmpl_file=./template/db_server_proc -xdr_file=../rpc_server/db_server.x - -rm -f $client_file $ctmpl_file $server_file $stmpl_file $xdr_file - -# -# Generate client/server/XDR code -# -xidsize=\ -`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' ../dbinc/db.in` - -awk -f gen_rpc.awk \ - -v client_file=$client_file \ - -v ctmpl_file=$ctmpl_file \ - -v major=$DB_VERSION_MAJOR \ - -v minor=$DB_VERSION_MINOR \ - -v server_file=$server_file \ - -v stmpl_file=$stmpl_file \ - -v xdr_file=$xdr_file \ - -v xidsize=$xidsize < ../rpc_server/rpc.src - -chmod 444 $client_file $server_file diff --git a/storage/bdb/dist/s_symlink b/storage/bdb/dist/s_symlink deleted file mode 100755 index 576fbf3b54b..00000000000 --- a/storage/bdb/dist/s_symlink +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh - -# $Id: s_symlink,v 12.1 2004/12/30 21:27:57 bostic Exp $ - -echo 'Creating Berkeley DB source tree symbolic links...' - -build() -{ - #echo " $1 -> $2" - (cd ../`dirname $1` && rm -f `basename $1` && ln -s $2 `basename $1`) -} - -build btree/tags ../dist/tags -build build_unix/tags ../dist/tags -build clib/tags ../dist/tags -build common/tags ../dist/tags -build crypto/tags ../dist/tags -build cxx/tags ../dist/tags -build db/tags ../dist/tags -build db185/tags ../dist/tags -build db_archive/tags ../dist/tags -build db_checkpoint/tags ../dist/tags -build db_deadlock/tags ../dist/tags -build db_dump/tags ../dist/tags -build db_dump185/tags ../dist/tags -build db_hotbackup/tags ../dist/tags -build db_load/tags ../dist/tags -build db_printlog/tags ../dist/tags -build db_recover/tags ../dist/tags -build db_stat/tags ../dist/tags -build db_upgrade/tags ../dist/tags -build db_verify/tags ../dist/tags -build dbinc/tags ../dist/tags -build dbinc_auto/tags ../dist/tags -build dbm/tags ../dist/tags -build dbreg/tags ../dist/tags -build env/tags ../dist/tags -#build examples_c/tags ../dist/tags -#build examples_cxx/tags ../dist/tags -build fileops/tags ../dist/tags -build hash/tags ../dist/tags -build hmac/tags ../dist/tags -build hsearch/tags ../dist/tags -#build libdb_java/tags ../dist/tags -build lock/tags ../dist/tags -build log/tags ../dist/tags -build mp/tags ../dist/tags -build mutex/tags ../dist/tags -build os/tags ../dist/tags -#build os_vxworks/tags ../dist/tags -build os_win32/tags ../dist/tags -build qam/tags ../dist/tags -build rep/tags ../dist/tags -#build rpc_client/tags ../dist/tags -#build rpc_server/tags ../dist/tags -build sequence/tags ../dist/tags -#build tcl/tags ../dist/tags -build txn/tags ../dist/tags -build xa/tags ../dist/tags diff --git a/storage/bdb/dist/s_tags b/storage/bdb/dist/s_tags deleted file mode 100755 index 22613775d94..00000000000 --- a/storage/bdb/dist/s_tags +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh - -# $Id: s_tags,v 12.1 2005/10/25 14:21:21 bostic Exp $ -# -# Build tags files. - -files=`echo ../dbinc/*.h \ - ../dbinc/*.in \ - ../btree/*.[ch] \ - ../clib/*.[ch] \ - ../common/*.[ch] \ - ../crypto/*.[ch] \ - ../crypto/mersenne/*.[ch] \ - ../crypto/rijndael/*.[ch] \ - ../db/*.[ch] \ - ../db185/*.[ch] \ - ../dbm/*.[ch] \ - ../dbreg/*.[ch] \ - ../env/*.[ch] \ - ../fileops/*.[ch] \ - ../hash/*.[ch] \ - ../hmac/*.[ch] \ - ../hsearch/*.[ch] \ - ../lock/*.[ch] \ - ../log/*.[ch] \ - ../mp/*.[ch] \ - ../mutex/*.[ch] \ - ../os/*.[ch] \ - ../qam/*.[ch] \ - ../rep/*.[ch] \ - ../rpc_client/*.[ch] \ - ../rpc_server/c/*.[ch] \ - ../sequence/*.[ch] \ - ../tcl/*.[ch] \ - ../txn/*.[ch] \ - ../xa/*.[ch] \ - ../cxx/*.cpp \ - ../libdb_java/*.[ch] | sed 's/[^ ]*stub.c//g'` - -f=tags -echo "Building $f" -rm -f $f - -# Figure out what flags this ctags accepts. -flags="" -if ctags -d ../db/db.c 2>/dev/null; then - flags="-d $flags" -fi -if ctags -t ../db/db.c 2>/dev/null; then - flags="-t $flags" -fi -if ctags -w ../db/db.c 2>/dev/null; then - flags="-w $flags" -fi - -ctags $flags $files 2>/dev/null -chmod 444 $f - -for i in test_perf test_rep test_server; do - f=../$i/tags - echo "Building $f" - (cd ../$i && ctags $flags *.[ch] 2>/dev/null) - chmod 444 $f -done diff --git a/storage/bdb/dist/s_test b/storage/bdb/dist/s_test deleted file mode 100644 index 83b3c567587..00000000000 --- a/storage/bdb/dist/s_test +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/sh - -# $Id: s_test,v 12.2 2005/06/23 15:26:39 carol Exp $ -# -# Build the Tcl test files. - -msg1="# Automatically built by dist/s_test; may require local editing." -msg2="# Automatically built by dist/s_test; may require local editing." - -t=/tmp/__t -trap 'rm -f $t; exit 0' 0 1 2 3 13 15 - -. RELEASE - -(echo "$msg1" && \ - echo "" && \ - echo "set tclsh_path @TCL_TCLSH@" && \ - echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@@LIBTSO_MODSUFFIX@" && \ - echo "" && \ - echo "set rpc_server localhost" && \ - echo "set rpc_path ." && \ - echo "set rpc_testdir \$rpc_path/TESTDIR" && \ - echo "" && \ - echo "set src_root @srcdir@/.." && \ - echo "set test_path @srcdir@/../test" && \ - echo "set je_root @srcdir@/../../je" && \ - echo "" && \ - echo "global testdir" && \ - echo "set testdir ./TESTDIR" && \ - echo "" && \ - echo "global dict" && \ - echo "global util_path" && \ - echo "" && \ - echo "global is_freebsd_test" && \ - echo "global is_hp_test" && \ - echo "global is_linux_test" && \ - echo "global is_qnx_test" && \ - echo "global is_sunos_test" && \ - echo "global is_windows_test" && \ - echo "global is_windows9x_test" && \ - echo "" && \ - echo "set KILL \"@db_cv_path_kill@\"") > $t - -f=../test/include.tcl -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -(echo "$msg1" && \ - echo "" && \ - echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \ - echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \ - echo "" && \ - echo "set src_root .." && \ - echo "set test_path ../test" && \ - echo "set je_root ../../je" && \ - echo "" && \ - echo "global testdir" && \ - echo "set testdir ./TESTDIR" && \ - echo "" && \ - echo "global dict" && \ - echo "global util_path" && \ - echo "" && \ - echo "global is_freebsd_test" && \ - echo "global is_hp_test" && \ - echo "global is_linux_test" && \ - echo "global is_qnx_test" && \ - echo "global is_sunos_test" && \ - echo "global is_windows_test" && \ - echo "global is_windows9x_test" && \ - echo "" && \ - echo "set KILL ./dbkill.exe") > $t - -f=../build_win32/include.tcl -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -# Build the test directory TESTS file. -(echo $msg2; -cat `egrep -l '^#[ ][ ]*TEST' ../test/*.tcl` | -sed -e '/^#[ ][ ]*TEST/!{' \ - -e 's/.*//' \ - -e '}' | -cat -s | -sed -e '/TEST/{' \ - -e 's/^#[ ][ ]*TEST[ ]*//' \ - -e 's/^ //' \ - -e 'H' \ - -e 'd' \ - -e '}' \ - -e 's/.*//' \ - -e x \ - -e 's/\n/__LINEBREAK__/g' | -sort | -sed -e 's/__LINEBREAK__/\ -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\ -/' \ - -e 's/__LINEBREAK__/\ - /g' | -sed -e 's/^[ ][ ]*$//') > $t - -f=../test/TESTS -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) diff --git a/storage/bdb/dist/s_vxworks b/storage/bdb/dist/s_vxworks deleted file mode 100644 index de2e41b55b6..00000000000 --- a/storage/bdb/dist/s_vxworks +++ /dev/null @@ -1,302 +0,0 @@ -#!/bin/sh - -# $Id: s_vxworks,v 12.6 2005/11/03 17:46:13 bostic Exp $ -# -# Build the VxWorks files. - -msgc="/* DO NOT EDIT: automatically built by dist/s_vxworks. */" - -. RELEASE - -s=/tmp/__db_a -t=/tmp/__db_b -u=/tmp/__db_c -vxfilelist=/tmp/__db_d -vxsmallfiles=/tmp/__db_e - -trap 'rm -f $s $t $u $vxfilelist $vxsmallfiles ; exit 0' 0 -trap 'rm -f $s $t $u $vxfilelist $vxsmallfiles ; exit 1' 1 2 3 13 15 - -# Build the VxWorks automatically generated files. -cat < $s -/extern "C" {/{ -n -n -i\\ -\\ -/* Tornado 2 does not provide a standard C pre-processor #define. */\\ -#ifndef __vxworks\\ -#define __vxworks\\ -#endif -} -/@inttypes_h_decl@/d -/@stddef_h_decl@/d -/@stdint_h_decl@/d -/@unistd_h_decl@/d -/@thread_h_decl@/d -s/@u_int8_decl@/typedef unsigned char u_int8_t;/ -/@int16_decl@/d -s/@u_int16_decl@/typedef unsigned short u_int16_t;/ -/@int32_decl@/d -s/@u_int32_decl@/typedef unsigned int u_int32_t;/ -s/@int64_decl@// -s/@u_int64_decl@/typedef unsigned long long u_int64_t;/ -/@u_char_decl@/d -/@u_short_decl@/d -/@u_int_decl@/d -/@u_long_decl@/d -/@ssize_t_decl@/d -s/@uintmax_t_decl@/typedef unsigned long uintmax_t;/ -s/@uintptr_t_decl@/typedef unsigned long uintptr_t;/ -s/@db_seq_decl@/typedef int db_seq_t;/ -/@pid_t_decl@/d -s/@db_threadid_t_decl@/typedef uintmax_t db_threadid_t;/ -s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/ -s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/ -s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/ -s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/ -s/@DB_VERSION_UNIQUE_NAME@// -s/@DB_CONST@// -s/@DB_PROTO1@/#undef __P/ -s/@DB_PROTO2@/#define __P(protos) protos/ -ENDOFSEDTEXT -(echo "$msgc" && - sed -f $s ../dbinc/db.in && - cat ../dbinc_auto/ext_prot.in) > $t -test `egrep '@.*@' $t` && { - egrep '@.*@' $t - echo 'Unexpanded autoconf variables found in VxWorks db.h.' - exit 1 -} -f=../build_vxworks/db.h -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -cat < $s -s/@INT64_FMT@/#define INT64_FMT "%lld"/ -s/@UINT64_FMT@/#define UINT64_FMT "%llu"/ -s/@PATH_SEPARATOR@/\/\\\\\\\\/ -s/@db_int_def@// -ENDOFSEDTEXT -(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t -test `egrep '@.*@' $t` && { - egrep '@.*@' $t - echo 'Unexpanded autoconf variables found in VxWorks db_int.h.' - exit 1 -} -f=../build_vxworks/db_int.h -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../build_vxworks/db_config.h -(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" vx_config.in) > $t -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../build_vxworks/db_config_small.h -(echo "$msgc" && - sed -e "s/__EDIT_DB_VERSION__/$DB_VERSION/" \ - -e "s;^#define.*HAVE_CRYPTO.*1;/* #undef HAVE_CRYPTO */;" \ - -e "s;^#define.*HAVE_HASH.*1;/* #undef HAVE_HASH */;" \ - -e "s;^#define.*HAVE_QUEUE.*1;/* #undef HAVE_QUEUE */;" \ - -e "s;^#define.*HAVE_REPLICATION.*1;/* #undef HAVE_REPLICATION */;" \ - -e "s;^#define.*HAVE_STATISTICS.*1;/* #undef HAVE_STATISTICS */;" \ - -e "s;^#define.*HAVE_VERIFY.*1;/* #undef HAVE_VERIFY */;" \ - vx_config.in) > $t -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -# Build a sed script that will change a "standard" DB utility into -# VxWorks-compatible code. -transform() -{ - # Build a sed script that will add argument parsing support and - # rename all of the functions to be private to this file. -cat <\\ -#define ERROR_RETURN ERROR\\ -\\ -int\\ -$1_main(argc, argv) -d -} -/^ while ((ch = getopt/i\\ -\\ __db_getopt_reset = 1; -/^[ ]*extern int optind;/s/;/, __db_getopt_reset;/ -ENDOFSEDTEXT - - # Replace all function names with VxWorks safe names. - # Function names are: - # Tokens starting at the beginning of the line, immediately - # followed by an opening parenthesis. - # Replace: - # Matches preceded by a non-C-token character and immediately - # followed by an opening parenthesis. - # Matches preceded by a non-C-token character and immediately - # followed by " __P". - # Matches starting at the beginning of the line, immediately - # followed by an opening parenthesis. - for k in `sed -e 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)(.*$/\1/p' -e d $2`; do - echo "s/\([^a-zA-Z0-9_]\)\($k(\)/\1$1_\2/g" - echo "s/\([^a-zA-Z0-9_]\)\($k[ ]__P\)/\1$1_\2/g" - echo "s/^\($k(\)/$1_\1/g" - done - - # There is a special case the rules above don't catch: - # a txn_compare function used as an argument to qsort(3). - # a print_app_record function used as argument to - # dbenv->set_app_dispatch). - echo "s/, txn_compare);/, db_stat_txn_compare);/" - echo "s/, print_app_record)) /, db_printlog_print_app_record)) /" - - # We convert the ex_access sample into dbdemo for VxWorks. - echo 's/progname = "ex_access";/progname = "dbdemo";/' - - # The example programs have to load db_int.h, not db.h -- else - # they won't have the right Berkeley DB prototypes for getopt - # and friends. - echo '/#include.*db.h/c\' - echo '#include \' - echo '#include ' -} - -PROGRAM_LIST="db_archive db_checkpoint db_deadlock db_dump db_hotbackup \ - db_load db_printlog db_recover db_stat db_upgrade db_verify ex_access" - -# Build VxWorks versions of the utilities. -for i in $PROGRAM_LIST; do - if [ $i = "ex_access" ]; then - target=dbdemo - dir=../examples_c - else - target=$i - dir=../$i - fi - - transform $target $dir/$i.c > $s - sed -f $s < $dir/$i.c > $t - - f=../build_vxworks/$target/$target.c - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) -done - -# Build VxWorks Tornado 2.0 project files for the utilities. -for i in $PROGRAM_LIST; do - if [ $i = "ex_access" ]; then - target=dbdemo - dir=../examples_c - else - target=$i - dir=../$i - fi - - sed "s/__DB_APPLICATION_NAME__/$target/g" < vx_2.0/wpj.in > $t - f=../build_vxworks/$target/${target}20.wpj - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - sed "s/__DB_APPLICATION_NAME__/$target/g" < vx_2.2/wpj.in > $t - f=../build_vxworks/$target/${target}22.wpj - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) -done - -# Build the list of files VxWorks knows about. -sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in | - egrep -w vx | - sed 's/[ ].*//' > $vxfilelist - -# Build the list of files VxWorks knows about. -sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in | - egrep -w vxsmall | - sed 's/[ ].*//' > $vxsmallfiles - -# Build VxWorks Tornado 2.0 project files for the library itself. -for v in 0 2 ; do - # - # Build regular project files - # - (cat vx_2.${v}/BerkeleyDB.wpj - for i in `cat $vxfilelist`; do - o=" FILE_\$(PRJ_DIR)/../$i" - echo "${o}_dependDone" - echo "TRUE" - echo "" - echo - echo "${o}_dependencies" - echo "\$(PRJ_DIR)/db_config.h \\" - echo " \$(PRJ_DIR)/db_int.h \\" - echo " \$(PRJ_DIR)/db.h" - echo "" - echo - echo "${o}_objects" - echo "`basename $i .c`.o" - echo "" - echo - echo "${o}_tool" - echo "C/C++ compiler" - echo "" - echo - done - echo " PROJECT_FILES" - sed -e '$!s/$/ \\/' \ - -e 's/^/$(PRJ_DIR)\/..\//' \ - -e '1!s/^/ /' < $vxfilelist - echo "" - echo - echo " userComments" - echo "BerkeleyDB" - echo "") > $t - # - # Build small lib project files - # - (cat vx_2.${v}/BerkeleyDBsmall.wpj - for i in `cat $vxsmallfiles`; do - o=" FILE_\$(PRJ_DIR)/../$i" - echo "${o}_dependDone" - echo "TRUE" - echo "" - echo - echo "${o}_dependencies" - echo "\$(PRJ_DIR)/db_config.h \\" - echo " \$(PRJ_DIR)/db_int.h \\" - echo " \$(PRJ_DIR)/db.h" - echo "" - echo - echo "${o}_objects" - echo "`basename $i .c`.o" - echo "" - echo - echo "${o}_tool" - echo "C/C++ compiler" - echo "" - echo - done - echo " PROJECT_FILES" - sed -e '$!s/$/ \\/' \ - -e 's/^/$(PRJ_DIR)\/..\//' \ - -e '1!s/^/ /' < $vxsmallfiles - echo "" - echo - echo " userComments" - echo "BerkeleyDB" - echo "") > $u - f=../build_vxworks/BerkeleyDB2${v}.wpj - cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - f=../build_vxworks/BerkeleyDB2${v}small.wpj - cmp $u $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $u $f && chmod 444 $f) -done - diff --git a/storage/bdb/dist/s_win32 b/storage/bdb/dist/s_win32 deleted file mode 100644 index 490bcc888c0..00000000000 --- a/storage/bdb/dist/s_win32 +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/sh - -# $Id: s_win32,v 12.10 2005/11/03 17:46:13 bostic Exp $ -# -# Build Windows/32 include files. - -msgc="/* DO NOT EDIT: automatically built by dist/s_win32. */" -msgw="; DO NOT EDIT: automatically built by dist/s_win32." - -. RELEASE - -s=/tmp/__db_a$$ -t=/tmp/__db_b$$ -rm -f $s $t - -trap 'rm -f $s $t ; exit 1' 1 2 3 13 15 - -# Build the Win32 automatically generated files. -cat < $s -/@inttypes_h_decl@/d -/@stdint_h_decl@/d -s/@stddef_h_decl@/#include / -/@unistd_h_decl@/d -/@thread_h_decl@/d -s/@u_int8_decl@/typedef unsigned char u_int8_t;/ -s/@int16_decl@/typedef short int16_t;/ -s/@u_int16_decl@/typedef unsigned short u_int16_t;/ -s/@int32_decl@/typedef int int32_t;/ -s/@u_int32_decl@/typedef unsigned int u_int32_t;/ -s/@int64_decl@/typedef __int64 int64_t;/ -s/@u_int64_decl@/typedef unsigned __int64 u_int64_t;/ -s/@db_seq_decl@/typedef int64_t db_seq_t;/ -s/@pid_t_decl@/typedef int pid_t;/ -s/@db_threadid_t_decl@/typedef u_int32_t db_threadid_t;/ -/@u_char_decl@/{ - i\\ -#ifndef _WINSOCKAPI_ - s/@u_char_decl@/typedef unsigned char u_char;/ -} -s/@u_short_decl@/typedef unsigned short u_short;/ -s/@u_int_decl@/typedef unsigned int u_int;/ -/@u_long_decl@/{ - s/@u_long_decl@/typedef unsigned long u_long;/ - a\\ -#endif -} -/@ssize_t_decl@/{ - i\\ -#ifdef _WIN64\\ -typedef int64_t ssize_t;\\ -#else\\ -typedef int32_t ssize_t;\\ -#endif - d -} -s/@uintmax_t_decl@/typedef u_int64_t uintmax_t;/ -/@uintptr_t_decl@/{ - i\\ -#ifdef _WIN64\\ -typedef u_int64_t uintptr_t;\\ -#else\\ -typedef u_int32_t uintptr_t;\\ -#endif - d -} -s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/ -s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/ -s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/ -s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/ -s/@DB_VERSION_UNIQUE_NAME@// -s/@DB_CONST@// -s/@DB_PROTO1@/#undef __P/ -s/@DB_PROTO2@/#define __P(protos) protos/ -ENDOFSEDTEXT -(echo "$msgc" && - sed -f $s ../dbinc/db.in && - cat ../dbinc_auto/ext_prot.in) > $t -test `egrep '@.*@' $t` && { - egrep '@.*@' $t - echo 'Unexpanded autoconf variables found in Windows db.h.' - exit 1 -} -f=../build_win32/db.h -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -cat < $s -s/@cxx_have_stdheaders@/#define HAVE_CXX_STDHEADERS 1/ -ENDOFSEDTEXT -(echo "$msgc" && sed -f $s ../dbinc/db_cxx.in) > $t -test `egrep '@.*@' $t` && { - egrep '@.*@' $t - echo 'Unexpanded autoconf variables found in Windows db_cxx.h.' - exit 1 -} -f=../build_win32/db_cxx.h -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -cat < $s -s/@INT64_FMT@// -s/@UINT64_FMT@// -s/@PATH_SEPARATOR@/\\\\\\\\\/:/ -s/@db_int_def@// -ENDOFSEDTEXT -(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t -test `egrep '@.*@' $t` && { - egrep '@.*@' $t - echo 'Unexpanded autoconf variables found in Windows db_int.h.' - exit 1 -} -f=../build_win32/db_int.h -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../build_win32/db_config.h -(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" win_config.in) > $t -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../build_win32/libdb.rc -cat < $s -s/%MAJOR%/$DB_VERSION_MAJOR/ -s/%MINOR%/$DB_VERSION_MINOR/ -s/%PATCH%/$DB_VERSION_PATCH/ -ENDOFSEDTEXT -sed -f $s ../build_win32/libdbrc.src > $t -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../build_win32/libdb.def -(echo $msgw && - echo && - echo EXPORTS; -a=1 -for i in `sed -e '/^$/d' -e '/^#/d' win_exports.in`; do - echo " $i @$a" - a=`expr $a + 1` -done) > $t -cmp $t $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f) - -f=../build_win32/win_db.h -i=win_db.in -cmp $i $f > /dev/null 2>&1 || - (echo "Building $f" && rm -f $f && cp $i $f && chmod 444 $f) - -rm -f $s $t diff --git a/storage/bdb/dist/s_win32_dsp b/storage/bdb/dist/s_win32_dsp deleted file mode 100644 index 59d3664a5f0..00000000000 --- a/storage/bdb/dist/s_win32_dsp +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/sh - -# $Id: s_win32_dsp,v 12.3 2005/10/20 01:45:53 mjc Exp $ -# -# Build Windows/32 .dsp files. - -. RELEASE - -SRCFILES=srcfiles.in - -create_dsp() -{ - projname="$1" # name of the .dsp file - match="$2" # the string used to egrep the $sources file - sources="$3" # a modified version of $SRCFILES to facilitate matches - dsptemplate="$4" # overall template file for the .dsp - extra_cppflags="$5" # extra flags to send to compiler - release_libs="$6" # libraries to link against in Release builds - debug_libs="$7" # libraries to link against in Debug builds - lib_suffix="$8" # the library name is libdb@lib_suffix@@VERSION@ - - srctemplate="$BUILDDIR/srcfile_dsp.src" # template file for the src file fragments - dspoutput=$BUILDDIR/$projname.dsp - - - postbuild=$dspoutput.postbuild - if [ ! -f $postbuild ] ; then - postbuild=/dev/null - fi - - rm -f $dspoutput.insert - for srcpath in `egrep "$match" $sources | sed -e 's/[ ].*//'` - do - # take the path name and break it up, converting / to \\. - # so many backslashes needed because of shell quoting and - # sed quoting -- we'll end up with two backslashes for every - # forward slash, but we need that when feeding that to the - # later sed command. - set - `echo $srcpath | sed -e 's;\(.*\)/;../\\1 ;' \ - -e "s;$BUILDDIR;.;" \ - -e 's;/;\\\\\\\\;g'` - srcdir="$1" - srcfile="$2" - sed -e "s/@srcdir@/$srcdir/g" \ - -e "s/@srcfile@/$srcfile/g" \ - < $srctemplate >> $dspoutput.insert - done - sed -e "/@SOURCE_FILES@/r$dspoutput.insert" \ - -e "/@SOURCE_FILES@/d" \ - -e "/@POST_BUILD@/r$postbuild" \ - -e "/@POST_BUILD@/d" \ - -e "s/@project_name@/$projname/g" \ - -e "s/@bin_rel_dest@/Release/g" \ - -e "s/@lib_rel_dest@/Release/g" \ - -e "s/@bin_debug_dest@/Debug/g" \ - -e "s/@lib_debug_dest@/Debug/g" \ - -e "s,@extra_cppflags@,$extra_cppflags,g" \ - -e "s,@release_libs@,$release_libs,g" \ - -e "s,@debug_libs@,$debug_libs,g" \ - -e "s,@lib_suffix@,$lib_suffix,g" \ - -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g" \ - -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g" \ - < $dsptemplate > $dspoutput.new - - # Set the file mode to 644 because the VC++ IDE needs a writeable file - # in our development environment. - cmp $dspoutput.new $dspoutput > /dev/null 2>&1 || - (echo "Building $dspoutput" && rm -f $dspoutput && - cp $dspoutput.new $dspoutput && chmod 664 $dspoutput) - rm -f $dspoutput.insert $dspoutput.new -} - -TMPA=/tmp/swin32dsp$$a -trap "rm -f $TMPA; exit 1" 1 2 3 15 - -# create a copy of the srcfiles with comments and empty lines removed. -# add a space at the end of each list of modules so that each module -# can be unambiguously matched e.g. ' dynamic ' -sed -e "s/#.*$//" \ - -e "/^[ ]*$/d" \ - -e "s/[ ][ ]*/ /" \ - -e "s/[ ]*$//" \ - -e "/[ ]/!d" \ - -e "s/$/ /" < $SRCFILES > $TMPA - -# get a list of all modules mentioned -# -MODULES="`sed -e 's/^[^ ]* //' < $TMPA \ - | tr ' ' '\012' | sort | uniq`" - -for BUILDDIR in ../build_win32 -do - for module in $MODULES - do - case "$module" in - dynamic ) - create_dsp db_dll " $module " $TMPA $BUILDDIR/dynamic_dsp.src - ;; - small ) - create_dsp db_small " $module " $TMPA $BUILDDIR/static_dsp.src \ - '/D "HAVE_SMALLBUILD"' '' '' _small - ;; - static ) - create_dsp db_static " $module " $TMPA $BUILDDIR/static_dsp.src - ;; - java ) - create_dsp db_java " $module " $TMPA $BUILDDIR/dynamic_dsp.src '' \ - 'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib' \ - 'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib' _java - ;; - tcl ) - create_dsp db_tcl " $module " $TMPA $BUILDDIR/dynamic_dsp.src \ - '/D "DB_TCL_SUPPORT"' \ - 'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl84.lib' \ - 'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl84g.lib' _tcl - ;; - testutil ) - create_dsp db_test " $module " $TMPA $BUILDDIR/app_dsp.src \ - '' '/out:"$(OUTDIR)/dbkill.exe"' '/out:"$(OUTDIR)/dbkill.exe"' - ;; - app=ex_repquote ) - create_dsp ex_repquote " $module " $TMPA \ - $BUILDDIR/app_dsp.src '' 'ws2_32.lib' 'ws2_32.lib' - ;; - app=* ) - appname=`echo $module | sed -e 's/^app=//'` - create_dsp $appname " $module " $TMPA \ - $BUILDDIR/app_dsp.src - ;; - vx|vxsmall ) - ;; - * ) - echo "s_win32_dsp: module name $module in $SRCFILES is unknown type" - ;; - esac - done -done - -rm -f $TMPA diff --git a/storage/bdb/dist/s_winmsi b/storage/bdb/dist/s_winmsi deleted file mode 100644 index 23b9afe4ef9..00000000000 --- a/storage/bdb/dist/s_winmsi +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash - -# $Id: s_winmsi,v 1.7 2005/10/26 13:29:32 dda Exp $ -# -# Note: The s_winmsi script in Berkeley DB core closely parallels the -# s_winmsi script in Berkeley DB/XML. If you change one, -# consider whether your changes apply to the other. -# As of this writing, the two s_winmsi scripts 'diff' very closely, and -# identical portions have been factored into functions in s_winmsi.fcn. -# -# Usage: s_winmsi [ options ] -# -# See the Usage() function in s_winmsi.fcn for a full list of options. -# By default, this script expects a db-X.Y.Z.NC.zip file -# to be in this directory, and uses it to build all binaries -# needed for an Windows install, and finally builds the an -# output db-X.Y.Z.NC.msi file that can be installed on -# Windows XP and 2000. -# -# The major other inputs to this script are these files: -# -# features.in list of choosable features (like Java,PHP,...) -# files.in what files are in each feature and where they belong -# links.in a list of URLs that end up as part of the Start Menu -# environment.in a list of environment vars that must be set -# -# This script does a number of operations, using the directory -# './winmsi/stage' as a staging area: -# -# extracts the contents of the input ZIP file and uses those -# files (minus docs/...) to build a Sources directory for -# the Sources features. -# -# builds Berkeley DB using Visual Studio tools using a .BAT -# script derived from winbuild.in . -# -# builds Perl and other APIs . -# -# uses {features,files,links,environment}.in to build some include -# files in WiX XML format. These files are named -# *.wixinc (e.g. directory.wixinc) -# -# run m4 on dbcorewix.in to create dbcore.wxs . dbcorewix.in -# uses m4 macros to allow reasonable refactoring of repeated -# UI code. Also, m4 is used to include the files created in -# the previous step. -# -# Use the WiX compiler/linker on the .wxs files to create the .msi file. -# -################################################################ - -# Define all needed shell functions -. ./winmsi/s_winmsi.fcn - -ERRORLOG="$0".log -SetupErrorLog - -# Do this before parsing options, we need the version number -. ./RELEASE -dbver=db-$DB_VERSION.NC - -# Set variables used by functions to customize this installer -PRODUCT_NAME="Berkeley DB" -PRODUCT_VERSION="$DB_VERSION" -PRODUCT_STAGE=`pwd`/winmsi/stage -PRODUCT_LICENSEDIR="${PRODUCT_STAGE}/$dbver" -PRODUCT_SUB_BLDDIR="${PRODUCT_STAGE}/$dbver" -PRODUCT_BLDDIR="${PRODUCT_STAGE}/$dbver" -PRODUCT_SRCDIR="${PRODUCT_STAGE}/$dbver" -PRODUCT_DBBUILDDIR="${PRODUCT_STAGE}/$dbver/build_unix" -PRODUCT_SHARED_WINMSIDIR=`pwd`/winmsi -PRODUCT_IMAGEDIR=$PRODUCT_SHARED_WINMSIDIR/images -PRODUCT_ZIP_FILEFMT="db-X.Y.Z.NC.zip" -PRODUCT_MSI_FILEFMT="db-X.Y.Z.NC.msi" - -PRODUCT_MAJOR=`echo "$PRODUCT_VERSION" | \ - sed -e 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)/\1/'` -PRODUCT_MINOR=`echo "$PRODUCT_VERSION" | \ - sed -e 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)/\2/'` -PRODUCT_PATCH=`echo "$PRODUCT_VERSION" | \ - sed -e 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)/\3/'` -PRODUCT_MAJMIN="${PRODUCT_MAJOR}${PRODUCT_MINOR}" - -# Gather command line options, and use reasonable defaults -SetupOptions \ - -input "$dbver.zip" \ - -output "$dbver.msi" \ - "$@" - -if [ "$OPT_USEBUILD" != '' ]; then - PRODUCT_BLDDIR="${OPT_USEBUILD}" - PRODUCT_SUB_BLDDIR="${OPT_USEBUILD}" -fi - -Progress "s_winmsi starting, errors to $ERRORLOG" - -# Fail fast for certain missing files - -RequireCygwin -RequireJava -RequireTcl -RequireWix -RequirePerl - -CreateStage -cd ${PRODUCT_STAGE} - - -CreateSources ${PRODUCT_STAGE}/Sources - -# The docs are put into a separate feature set -mv ${PRODUCT_STAGE}/Sources/docs ${PRODUCT_STAGE}/ - -# Build everything unless we were told to use a preexisting build -if [ "$OPT_USEBUILD" = '' ]; then - CreateWindowsBuild - CreateWindowsSystem - CreateInclude \ - ${PRODUCT_SUB_BLDDIR}/installed_include \ - ${PRODUCT_SUB_BLDDIR}/dbinc/* \ - ${PRODUCT_SUB_BLDDIR}/dbinc_auto/* \ - ${PRODUCT_SUB_BLDDIR}/build_win32/*.h - CreateDbPerl -fi - -if ! "$OPT_SKIPGEN" ; then - CreateLicenseRtf ../../../LICENSE license.rtf - CreateWixIncludeFiles -fi - -CreateMsi ../dbcorewix.in dbcore.wxs "$OPT_OUTFILE" - -Progress "s_winmsi finished, $OPT_OUTFILE created." -exit 0 - diff --git a/storage/bdb/dist/srcfiles.in b/storage/bdb/dist/srcfiles.in deleted file mode 100644 index 530c216934b..00000000000 --- a/storage/bdb/dist/srcfiles.in +++ /dev/null @@ -1,405 +0,0 @@ -# $Id: srcfiles.in,v 12.24 2005/10/27 14:45:25 bostic Exp $ -# -# This is an input file for the s_win32_dsp and s_vxworks scripts. It lists -# the source files in the Berkeley DB tree and notes which are used to build -# the Win/32 and VxWorks libraries. -# -# Please keep this list sorted alphabetically! -# -# Each non-blank, non-comment line is of the form -# filename module [ module ...] -# -# The possible modules, including the name of the project (.dsp) file: -# -# app=NAME Linked into application NAME.exe (db_NAME.dsp) -# dynamic File is in the Windows DLL (db_dll.dsp) -# small File is in the small Windows library (db_small.dsp) -# static File is in the Windows static library (db_static.dsp) -# java File is in the Windows Java DLL (db_java.dsp) -# tcl File is in the Windows tcl DLL (db_tcl.dsp) -# testutil File is used for Windows testing (db_test.dsp) -# vx File is in the VxWorks library. -# vxsmall File is in the small VxWorks library. - -btree/bt_compact.c dynamic small static vx vxsmall -btree/bt_compare.c dynamic small static vx vxsmall -btree/bt_conv.c dynamic small static vx vxsmall -btree/bt_curadj.c dynamic small static vx vxsmall -btree/bt_cursor.c dynamic small static vx vxsmall -btree/bt_delete.c dynamic small static vx vxsmall -btree/bt_method.c dynamic small static vx vxsmall -btree/bt_open.c dynamic small static vx vxsmall -btree/bt_put.c dynamic small static vx vxsmall -btree/bt_rec.c dynamic small static vx vxsmall -btree/bt_reclaim.c dynamic small static vx vxsmall -btree/bt_recno.c dynamic small static vx vxsmall -btree/bt_rsearch.c dynamic small static vx vxsmall -btree/bt_search.c dynamic small static vx vxsmall -btree/bt_split.c dynamic small static vx vxsmall -btree/bt_stat.c dynamic small static vx vxsmall -btree/bt_upgrade.c dynamic small static vx vxsmall -btree/bt_verify.c dynamic static vx -btree/btree_auto.c dynamic small static vx vxsmall -btree/btree_autop.c app=db_printlog -build_vxworks/db_archive/db_archive.c -build_vxworks/db_checkpoint/db_checkpoint.c -build_vxworks/db_deadlock/db_deadlock.c -build_vxworks/db_dump/db_dump.c -build_vxworks/db_hotbackup/db_hotbackup.c -build_vxworks/db_load/db_load.c -build_vxworks/db_printlog/db_printlog.c -build_vxworks/db_recover/db_recover.c -build_vxworks/db_stat/db_stat.c -build_vxworks/db_upgrade/db_upgrade.c -build_vxworks/db_verify/db_verify.c -build_vxworks/dbdemo/dbdemo.c -build_win32/dbkill.cpp testutil -build_win32/libdb.def dynamic -build_win32/libdb.rc dynamic -build_win32/libdb_tcl.def tcl -clib/getcwd.c -clib/getopt.c vx vxsmall -clib/memcmp.c -clib/memmove.c -clib/raise.c -clib/snprintf.c vx vxsmall -clib/strcasecmp.c dynamic small static vx vxsmall -clib/strdup.c vx vxsmall -clib/strerror.c -clib/strtol.c -clib/strtoul.c -common/crypto_stub.c small vxsmall -common/db_byteorder.c dynamic small static vx vxsmall -common/db_clock.c dynamic static vx -common/db_err.c dynamic small static vx vxsmall -common/db_getlong.c dynamic small static vx vxsmall -common/db_idspace.c dynamic small static vx vxsmall -common/db_log2.c dynamic small static vx vxsmall -common/util_arg.c vx vxsmall -common/util_cache.c dynamic small static vx vxsmall -common/util_log.c dynamic small static vx vxsmall -common/util_sig.c dynamic small static vx vxsmall -crypto/aes_method.c dynamic static vx -crypto/crypto.c dynamic static vx -crypto/mersenne/mt19937db.c dynamic static vx -crypto/rijndael/rijndael-alg-fst.c dynamic static vx -crypto/rijndael/rijndael-api-fst.c dynamic static vx -cxx/cxx_db.cpp dynamic small static -cxx/cxx_dbc.cpp dynamic small static -cxx/cxx_dbt.cpp dynamic small static -cxx/cxx_env.cpp dynamic small static -cxx/cxx_except.cpp dynamic small static -cxx/cxx_lock.cpp dynamic small static -cxx/cxx_logc.cpp dynamic small static -cxx/cxx_mpool.cpp dynamic small static -cxx/cxx_multi.cpp dynamic small static -cxx/cxx_seq.cpp dynamic small static -cxx/cxx_txn.cpp dynamic small static -db/crdel_auto.c dynamic small static vx vxsmall -db/crdel_autop.c app=db_printlog -db/crdel_rec.c dynamic small static vx vxsmall -db/db.c dynamic small static vx vxsmall -db/db_am.c dynamic small static vx vxsmall -db/db_auto.c dynamic small static vx vxsmall -db/db_autop.c app=db_printlog -db/db_cam.c dynamic small static vx vxsmall -db/db_conv.c dynamic small static vx vxsmall -db/db_dispatch.c dynamic small static vx vxsmall -db/db_dup.c dynamic small static vx vxsmall -db/db_iface.c dynamic small static vx vxsmall -db/db_join.c dynamic small static vx vxsmall -db/db_meta.c dynamic small static vx vxsmall -db/db_method.c dynamic small static vx vxsmall -db/db_open.c dynamic small static vx vxsmall -db/db_overflow.c dynamic small static vx vxsmall -db/db_ovfl_vrfy.c dynamic static vx -db/db_pr.c dynamic small static vx vxsmall -db/db_rec.c dynamic small static vx vxsmall -db/db_reclaim.c dynamic small static vx vxsmall -db/db_remove.c dynamic small static vx vxsmall -db/db_rename.c dynamic small static vx vxsmall -db/db_ret.c dynamic small static vx vxsmall -db/db_setid.c dynamic small static vx vxsmall -db/db_setlsn.c dynamic small static vx vxsmall -db/db_stati.c dynamic small static vx vxsmall -db/db_truncate.c dynamic small static vx vxsmall -db/db_upg.c dynamic small static vx vxsmall -db/db_upg_opd.c dynamic small static vx vxsmall -db/db_vrfy.c dynamic static vx -db/db_vrfy_stub.c small vxsmall -db/db_vrfyutil.c dynamic static vx -db185/db185.c -db_archive/db_archive.c app=db_archive -db_checkpoint/db_checkpoint.c app=db_checkpoint -db_deadlock/db_deadlock.c app=db_deadlock -db_dump/db_dump.c app=db_dump -db_dump185/db_dump185.c -db_hotbackup/db_hotbackup.c app=db_hotbackup -db_load/db_load.c app=db_load -db_printlog/db_printlog.c app=db_printlog -db_recover/db_recover.c app=db_recover -db_server_clnt.c -db_server_svc.c -db_server_xdr.c -db_stat/db_stat.c app=db_stat -db_upgrade/db_upgrade.c app=db_upgrade -db_verify/db_verify.c app=db_verify -dbm/dbm.c dynamic static -dbreg/dbreg.c dynamic small static vx vxsmall -dbreg/dbreg_auto.c dynamic small static vx vxsmall -dbreg/dbreg_autop.c app=db_printlog -dbreg/dbreg_rec.c dynamic small static vx vxsmall -dbreg/dbreg_stat.c dynamic small static vx vxsmall -dbreg/dbreg_util.c dynamic small static vx vxsmall -env/db_salloc.c dynamic small static vx vxsmall -env/db_shash.c dynamic small static vx vxsmall -env/env_failchk.c dynamic small static vx vxsmall -env/env_file.c dynamic small static vx vxsmall -env/env_method.c dynamic small static vx vxsmall -env/env_open.c dynamic small static vx vxsmall -env/env_recover.c dynamic small static vx vxsmall -env/env_region.c dynamic small static vx vxsmall -env/env_register.c dynamic small static vx vxsmall -env/env_stat.c dynamic small static vx vxsmall -examples_c/bench_001.c -examples_c/csv/DbRecord.c app=ex_csvload app=ex_csvquery -examples_c/csv/code.c app=ex_csvcode -examples_c/csv/csv_local.c app=ex_csvload app=ex_csvquery -examples_c/csv/db.c app=ex_csvload app=ex_csvquery -examples_c/csv/load.c app=ex_csvload -examples_c/csv/load_main.c app=ex_csvload -examples_c/csv/query.c app=ex_csvquery -examples_c/csv/query_main.c app=ex_csvquery -examples_c/csv/util.c app=ex_csvload app=ex_csvquery -examples_c/ex_access.c app=ex_access -examples_c/ex_apprec/ex_apprec.c -examples_c/ex_apprec/ex_apprec_auto.c -examples_c/ex_apprec/ex_apprec_autop.c -examples_c/ex_apprec/ex_apprec_rec.c -examples_c/ex_btrec.c app=ex_btrec -examples_c/ex_dbclient.c -examples_c/ex_env.c app=ex_env -examples_c/ex_lock.c app=ex_lock -examples_c/ex_mpool.c app=ex_mpool -examples_c/ex_repquote/ex_rq_client.c app=ex_repquote -examples_c/ex_repquote/ex_rq_main.c app=ex_repquote -examples_c/ex_repquote/ex_rq_master.c app=ex_repquote -examples_c/ex_repquote/ex_rq_net.c app=ex_repquote -examples_c/ex_repquote/ex_rq_util.c app=ex_repquote -examples_c/ex_sequence.c app=ex_sequence -examples_c/ex_thread.c -examples_c/ex_tpcb.c app=ex_tpcb -examples_c/getting_started/example_database_load.c app=example_database_load -examples_c/getting_started/example_database_read.c app=example_database_read -examples_c/getting_started/gettingstarted_common.c app=example_database_load app=example_database_read -examples_c/txn_guide/txn_guide.c app=ex_txnguide -examples_c/txn_guide/txn_guide_inmemory.c app=ex_txnguide_inmem -examples_cxx/AccessExample.cpp app=excxx_access -examples_cxx/BtRecExample.cpp app=excxx_btrec -examples_cxx/EnvExample.cpp app=excxx_env -examples_cxx/LockExample.cpp app=excxx_lock -examples_cxx/MpoolExample.cpp app=excxx_mpool -examples_cxx/SequenceExample.cpp app=excxx_sequence -examples_cxx/TpcbExample.cpp app=excxx_tpcb -examples_cxx/getting_started/MyDb.cpp app=excxx_example_database_load app=excxx_example_database_read -examples_cxx/getting_started/excxx_example_database_load.cpp app=excxx_example_database_load -examples_cxx/getting_started/excxx_example_database_read.cpp app=excxx_example_database_read -examples_cxx/txn_guide/TxnGuide.cpp app=excxx_txnguide -examples_cxx/txn_guide/TxnGuideInMemory.cpp app=excxx_txnguide_inmem -fileops/fileops_auto.c dynamic small static vx vxsmall -fileops/fileops_autop.c app=db_printlog -fileops/fop_basic.c dynamic small static vx vxsmall -fileops/fop_rec.c dynamic small static vx vxsmall -fileops/fop_util.c dynamic small static vx vxsmall -gen_db_server.c -hash/hash.c dynamic static vx -hash/hash_auto.c dynamic static vx -hash/hash_autop.c app=db_printlog -hash/hash_conv.c dynamic static vx -hash/hash_dup.c dynamic static vx -hash/hash_func.c dynamic small static vx vxsmall -hash/hash_meta.c dynamic static vx -hash/hash_method.c dynamic static vx -hash/hash_open.c dynamic static vx -hash/hash_page.c dynamic static vx -hash/hash_rec.c dynamic static vx -hash/hash_reclaim.c dynamic static vx -hash/hash_stat.c dynamic static vx -hash/hash_stub.c small vxsmall -hash/hash_upgrade.c dynamic static vx -hash/hash_verify.c dynamic static vx -hmac/hmac.c dynamic small static vx vxsmall -hmac/sha1.c dynamic small static vx vxsmall -hsearch/hsearch.c dynamic static vx -libdb_java/db_java_wrap.c java -lock/lock.c dynamic small static vx vxsmall -lock/lock_deadlock.c dynamic small static vx vxsmall -lock/lock_failchk.c dynamic small static vx vxsmall -lock/lock_id.c dynamic small static vx vxsmall -lock/lock_list.c dynamic small static vx vxsmall -lock/lock_method.c dynamic small static vx vxsmall -lock/lock_region.c dynamic small static vx vxsmall -lock/lock_stat.c dynamic small static vx vxsmall -lock/lock_timer.c dynamic small static vx vxsmall -lock/lock_util.c dynamic small static vx vxsmall -log/log.c dynamic small static vx vxsmall -log/log_archive.c dynamic small static vx vxsmall -log/log_compare.c dynamic small static vx vxsmall -log/log_debug.c dynamic small static vx vxsmall -log/log_get.c dynamic small static vx vxsmall -log/log_method.c dynamic small static vx vxsmall -log/log_put.c dynamic small static vx vxsmall -log/log_stat.c dynamic small static vx vxsmall -mp/mp_alloc.c dynamic small static vx vxsmall -mp/mp_bh.c dynamic small static vx vxsmall -mp/mp_fget.c dynamic small static vx vxsmall -mp/mp_fmethod.c dynamic small static vx vxsmall -mp/mp_fopen.c dynamic small static vx vxsmall -mp/mp_fput.c dynamic small static vx vxsmall -mp/mp_fset.c dynamic small static vx vxsmall -mp/mp_method.c dynamic small static vx vxsmall -mp/mp_region.c dynamic small static vx vxsmall -mp/mp_register.c dynamic small static vx vxsmall -mp/mp_stat.c dynamic small static vx vxsmall -mp/mp_sync.c dynamic small static vx vxsmall -mp/mp_trickle.c dynamic small static vx vxsmall -mutex/mut_alloc.c dynamic small static vx vxsmall -mutex/mut_fcntl.c -mutex/mut_method.c dynamic small static vx vxsmall -mutex/mut_pthread.c -mutex/mut_region.c dynamic small static vx vxsmall -mutex/mut_stat.c dynamic small static vx vxsmall -mutex/mut_tas.c vx vxsmall -mutex/mut_win32.c dynamic small static -mutex/tm.c app=tm -os/os_abs.c -os/os_alloc.c dynamic small static vx vxsmall -os/os_clock.c vx vxsmall -os/os_config.c -os/os_dir.c vx vxsmall -os/os_errno.c vx vxsmall -os/os_fid.c vx vxsmall -os/os_flock.c vx vxsmall -os/os_fsync.c vx vxsmall -os/os_handle.c vx vxsmall -os/os_id.c dynamic small static vx vxsmall -os/os_map.c -os/os_method.c dynamic small static vx vxsmall -os/os_mkdir.c dynamic small static vx vxsmall -os/os_oflags.c dynamic small static vx vxsmall -os/os_open.c vx vxsmall -os/os_region.c dynamic small static vx vxsmall -os/os_rename.c vx vxsmall -os/os_root.c dynamic small static vx vxsmall -os/os_rpath.c dynamic small static vx vxsmall -os/os_rw.c vx vxsmall -os/os_seek.c vx vxsmall -os/os_sleep.c vx vxsmall -os/os_spin.c vx vxsmall -os/os_stat.c vx vxsmall -os/os_tmpdir.c dynamic small static vx vxsmall -os/os_truncate.c vx vxsmall -os/os_unlink.c vx vxsmall -os_vxworks/os_vx_abs.c vx vxsmall -os_vxworks/os_vx_config.c vx vxsmall -os_vxworks/os_vx_map.c vx vxsmall -os_win32/os_abs.c dynamic small static -os_win32/os_clock.c dynamic small static -os_win32/os_config.c dynamic small static -os_win32/os_dir.c dynamic small static -os_win32/os_errno.c dynamic small static -os_win32/os_fid.c dynamic small static -os_win32/os_flock.c dynamic small static -os_win32/os_fsync.c dynamic small static -os_win32/os_handle.c dynamic small static -os_win32/os_map.c dynamic small static -os_win32/os_open.c dynamic small static -os_win32/os_rename.c dynamic small static -os_win32/os_rw.c dynamic small static -os_win32/os_seek.c dynamic small static -os_win32/os_sleep.c dynamic small static -os_win32/os_spin.c dynamic small static -os_win32/os_stat.c dynamic small static -os_win32/os_truncate.c dynamic small static -os_win32/os_unlink.c dynamic small static -qam/qam.c dynamic static vx -qam/qam_auto.c dynamic static vx -qam/qam_autop.c app=db_printlog -qam/qam_conv.c dynamic static vx -qam/qam_files.c dynamic static vx -qam/qam_method.c dynamic static vx -qam/qam_open.c dynamic static vx -qam/qam_rec.c dynamic static vx -qam/qam_stat.c dynamic static vx -qam/qam_stub.c small vxsmall -qam/qam_upgrade.c dynamic static vx -qam/qam_verify.c dynamic static vx -rep/rep_auto.c dynamic static vx -rep/rep_autop.c app=db_printlog -rep/rep_backup.c dynamic static vx -rep/rep_elect.c dynamic static vx -rep/rep_log.c dynamic static vx -rep/rep_method.c dynamic static vx -rep/rep_record.c dynamic static vx -rep/rep_region.c dynamic static vx -rep/rep_stat.c dynamic static vx -rep/rep_stub.c small vxsmall -rep/rep_util.c dynamic static vx -rep/rep_verify.c dynamic static vx -rpc_client/client.c -rpc_client/gen_client.c -rpc_client/gen_client_ret.c -rpc_server/c/db_server_proc.c -rpc_server/c/db_server_util.c -rpc_server/cxx/db_server_cxxproc.cpp -rpc_server/cxx/db_server_cxxutil.cpp -sequence/seq_stat.c dynamic small static -sequence/sequence.c dynamic small static -tcl/tcl_compat.c tcl -tcl/tcl_db.c tcl -tcl/tcl_db_pkg.c tcl -tcl/tcl_dbcursor.c tcl -tcl/tcl_env.c tcl -tcl/tcl_internal.c tcl -tcl/tcl_lock.c tcl -tcl/tcl_log.c tcl -tcl/tcl_mp.c tcl -tcl/tcl_rep.c tcl -tcl/tcl_seq.c tcl -tcl/tcl_txn.c tcl -tcl/tcl_util.c tcl -test_perf/db_perf.c app=db_perf -test_perf/perf_checkpoint.c app=db_perf -test_perf/perf_config.c app=db_perf -test_perf/perf_dbs.c app=db_perf -test_perf/perf_dead.c app=db_perf -test_perf/perf_debug.c app=db_perf -test_perf/perf_file.c app=db_perf -test_perf/perf_key.c app=db_perf -test_perf/perf_log.c app=db_perf -test_perf/perf_misc.c app=db_perf -test_perf/perf_op.c app=db_perf -test_perf/perf_parse.c app=db_perf -test_perf/perf_rand.c app=db_perf -test_perf/perf_spawn.c app=db_perf -test_perf/perf_stat.c app=db_perf -test_perf/perf_sync.c app=db_perf -test_perf/perf_thread.c app=db_perf -test_perf/perf_trickle.c app=db_perf -test_perf/perf_txn.c app=db_perf -test_perf/perf_util.c app=db_perf -test_perf/perf_vx.c -txn/txn.c dynamic small static vx vxsmall -txn/txn_auto.c dynamic small static vx vxsmall -txn/txn_autop.c app=db_printlog -txn/txn_chkpt.c dynamic small static vx vxsmall -txn/txn_failchk.c dynamic small static vx vxsmall -txn/txn_method.c dynamic small static vx vxsmall -txn/txn_rec.c dynamic small static vx vxsmall -txn/txn_recover.c dynamic small static vx vxsmall -txn/txn_region.c dynamic small static vx vxsmall -txn/txn_stat.c dynamic small static vx vxsmall -txn/txn_util.c dynamic small static vx vxsmall -xa/xa.c dynamic small static vx vxsmall -xa/xa_db.c dynamic small static vx vxsmall -xa/xa_map.c dynamic small static vx vxsmall diff --git a/storage/bdb/dist/template/rec_ctemp b/storage/bdb/dist/template/rec_ctemp deleted file mode 100644 index 2951189c5bd..00000000000 --- a/storage/bdb/dist/template/rec_ctemp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * PREF_FUNC_recover -- - * Recovery function for FUNC. - * - * PUBLIC: int PREF_FUNC_recover - * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); - */ -int -PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info) - DB_ENV *dbenv; - DBT *dbtp; - DB_LSN *lsnp; - db_recops op; - void *info; -{ - PREF_FUNC_args *argp; - DB *file_dbp; - DBC *dbc; - DB_MPOOLFILE *mpf; - PAGE *pagep; - int cmp_n, cmp_p, modified, ret; - - REC_PRINT(PREF_FUNC_print); - REC_INTRO(PREF_FUNC_read, 1); - - if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) - if (DB_REDO(op)) { - if ((ret = mpf->get(mpf, - &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) - goto out; - } else { - *lsnp = argp->prev_lsn; - ret = 0; - goto out; - } - - modified = 0; - cmp_n = log_compare(lsnp, &LSN(pagep)); - - /* - * Use this when there is something like "pagelsn" in the argp - * structure. Sometimes, you might need to compare meta-data - * lsn's instead. - * - * cmp_p = log_compare(&LSN(pagep), argp->pagelsn); - */ - if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ - modified = 1; - } else if (cmp_n == 0 && !DB_REDO(op)) { - /* Need to undo update described. */ - modified = 1; - } - if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) - goto out; - - *lsnp = argp->prev_lsn; - ret = 0; - -out: REC_CLOSE; -} - diff --git a/storage/bdb/dist/template/rec_rep b/storage/bdb/dist/template/rec_rep deleted file mode 100644 index 872812cd069..00000000000 --- a/storage/bdb/dist/template/rec_rep +++ /dev/null @@ -1,13 +0,0 @@ -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/__rep.h" -#include "dbinc/log.h" - diff --git a/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj b/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj deleted file mode 100644 index 692d1b40bb6..00000000000 --- a/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj +++ /dev/null @@ -1,251 +0,0 @@ -Document file - DO NOT EDIT - - BUILD_PENTIUM_debug_BUILDRULE -BerkeleyDB20.out - - - BUILD_PENTIUM_debug_MACRO_AR -ar386 - - - BUILD_PENTIUM_debug_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20_sim.a - - - BUILD_PENTIUM_debug_MACRO_AS -cc386 - - - BUILD_PENTIUM_debug_MACRO_CC -cc386 - - - BUILD_PENTIUM_debug_MACRO_CFLAGS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM \ - -O0 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. \ - -DDIAGNOSTIC \ - -DDEBUG - - - BUILD_PENTIUM_debug_MACRO_CFLAGS_AS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUM_debug_MACRO_CPP -cc386 -E -P -xc - - - BUILD_PENTIUM_debug_MACRO_LD -ld386 - - - BUILD_PENTIUM_debug_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_debug_MACRO_NM -nm386 -g - - - BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_debug_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_debug_MACRO_SIZE -size386 - - - BUILD_PENTIUM_debug_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_debug_TC -::tc_PENTIUMgnu - - - BUILD_PENTIUM_release_BUILDRULE -BerkeleyDB20.out - - - BUILD_PENTIUM_release_MACRO_AR -ar386 - - - BUILD_PENTIUM_release_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20_sim.a - - - BUILD_PENTIUM_release_MACRO_AS -cc386 - - - BUILD_PENTIUM_release_MACRO_CC -cc386 - - - BUILD_PENTIUM_release_MACRO_CFLAGS --mpentium \ - -ansi \ - -nostdinc \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM \ - -O2 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. - - - BUILD_PENTIUM_release_MACRO_CFLAGS_AS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUM_release_MACRO_CPP -cc386 -E -P -xc - - - BUILD_PENTIUM_release_MACRO_LD -ld386 - - - BUILD_PENTIUM_release_MACRO_LDDEPS - - - - BUILD_PENTIUM_release_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_release_MACRO_NM -nm386 -g - - - BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_release_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_release_MACRO_SIZE -size386 - - - BUILD_PENTIUM_release_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_release_TC -::tc_PENTIUMgnu - - - BUILD_RULE_BerkeleyDB20.out - - - - BUILD_RULE_BerkeleyDB20_sim.out - - - - BUILD_RULE_archive - - - - BUILD_RULE_objects - - - - BUILD__CURRENT -PENTIUM_debug - - - BUILD__LIST -PENTIUM_release PENTIUM_debug - - - CORE_INFO_TYPE -::prj_vxApp - - - CORE_INFO_VERSION -2.0 - - diff --git a/storage/bdb/dist/vx_2.0/BerkeleyDBsmall.wpj b/storage/bdb/dist/vx_2.0/BerkeleyDBsmall.wpj deleted file mode 100644 index 3c9fd350fa1..00000000000 --- a/storage/bdb/dist/vx_2.0/BerkeleyDBsmall.wpj +++ /dev/null @@ -1,251 +0,0 @@ -Document file - DO NOT EDIT - - BUILD_PENTIUM_debug_BUILDRULE -BerkeleyDB20small.out - - - BUILD_PENTIUM_debug_MACRO_AR -ar386 - - - BUILD_PENTIUM_debug_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20small_sim.a - - - BUILD_PENTIUM_debug_MACRO_AS -cc386 - - - BUILD_PENTIUM_debug_MACRO_CC -cc386 - - - BUILD_PENTIUM_debug_MACRO_CFLAGS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM \ - -O0 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. \ - -DDIAGNOSTIC \ - -DDEBUG - - - BUILD_PENTIUM_debug_MACRO_CFLAGS_AS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUM_debug_MACRO_CPP -cc386 -E -P -xc - - - BUILD_PENTIUM_debug_MACRO_LD -ld386 - - - BUILD_PENTIUM_debug_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_debug_MACRO_NM -nm386 -g - - - BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_debug_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_debug_MACRO_SIZE -size386 - - - BUILD_PENTIUM_debug_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_debug_TC -::tc_PENTIUMgnu - - - BUILD_PENTIUM_release_BUILDRULE -BerkeleyDB20small.out - - - BUILD_PENTIUM_release_MACRO_AR -ar386 - - - BUILD_PENTIUM_release_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB20small_sim.a - - - BUILD_PENTIUM_release_MACRO_AS -cc386 - - - BUILD_PENTIUM_release_MACRO_CC -cc386 - - - BUILD_PENTIUM_release_MACRO_CFLAGS --mpentium \ - -ansi \ - -nostdinc \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM \ - -O2 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. - - - BUILD_PENTIUM_release_MACRO_CFLAGS_AS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUM_release_MACRO_CPP -cc386 -E -P -xc - - - BUILD_PENTIUM_release_MACRO_LD -ld386 - - - BUILD_PENTIUM_release_MACRO_LDDEPS - - - - BUILD_PENTIUM_release_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_release_MACRO_NM -nm386 -g - - - BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_release_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_release_MACRO_SIZE -size386 - - - BUILD_PENTIUM_release_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_release_TC -::tc_PENTIUMgnu - - - BUILD_RULE_BerkeleyDB20small.out - - - - BUILD_RULE_BerkeleyDB20small_sim.out - - - - BUILD_RULE_archive - - - - BUILD_RULE_objects - - - - BUILD__CURRENT -PENTIUM_debug - - - BUILD__LIST -PENTIUM_release PENTIUM_debug - - - CORE_INFO_TYPE -::prj_vxApp - - - CORE_INFO_VERSION -2.0 - - diff --git a/storage/bdb/dist/vx_2.0/wpj.in b/storage/bdb/dist/vx_2.0/wpj.in deleted file mode 100644 index a38cf7251a6..00000000000 --- a/storage/bdb/dist/vx_2.0/wpj.in +++ /dev/null @@ -1,160 +0,0 @@ -Document file - DO NOT EDIT - - BUILD_PENTIUMgnu_BUILDRULE -__DB_APPLICATION_NAME__20.out - - - BUILD_PENTIUMgnu_MACRO_AR -ar386 - - - BUILD_PENTIUMgnu_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUMgnu/__DB_APPLICATION_NAME__20.a - - - BUILD_PENTIUMgnu_MACRO_AS -cc386 - - - BUILD_PENTIUMgnu_MACRO_CC -cc386 - - - BUILD_PENTIUMgnu_MACRO_CFLAGS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I$(PRJ_DIR)/.. \ - -I$(PRJ_DIR)/../.. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUMgnu_MACRO_CFLAGS_AS --g \ - -mpentium \ - -ansi \ - -nostdinc \ - -fvolatile \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUMgnu_MACRO_CPP -cc386 -E -P -xc - - - BUILD_PENTIUMgnu_MACRO_LD -ld386 - - - BUILD_PENTIUMgnu_MACRO_LDDEPS - - - - BUILD_PENTIUMgnu_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUMgnu_MACRO_NM -nm386 -g - - - BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUMgnu_MACRO_PRJ_LIBS - - - - BUILD_PENTIUMgnu_MACRO_SIZE -size386 - - - BUILD_PENTIUMgnu_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUMgnu_TC -::tc_PENTIUMgnu - - - BUILD_RULE_archive - - - - BUILD_RULE___DB_APPLICATION_NAME__20.out - - - - BUILD_RULE_objects - - - - BUILD__CURRENT -PENTIUMgnu - - - BUILD__LIST -PENTIUMgnu - - - CORE_INFO_TYPE -::prj_vxApp - - - CORE_INFO_VERSION -2.0 - - - FILE___DB_APPLICATION_NAME__.c_dependDone -FALSE - - - FILE___DB_APPLICATION_NAME__.c_dependencies - - - - FILE___DB_APPLICATION_NAME__.c_objects -__DB_APPLICATION_NAME__.o - - - FILE___DB_APPLICATION_NAME__.c_tool -C/C++ compiler - - - PROJECT_FILES -$(PRJ_DIR)/__DB_APPLICATION_NAME__.c - - - userComments -__DB_APPLICATION_NAME__ - diff --git a/storage/bdb/dist/vx_2.2/BerkeleyDB.wpj b/storage/bdb/dist/vx_2.2/BerkeleyDB.wpj deleted file mode 100644 index e27a231f76f..00000000000 --- a/storage/bdb/dist/vx_2.2/BerkeleyDB.wpj +++ /dev/null @@ -1,310 +0,0 @@ -Document file - DO NOT EDIT - - BUILD_PENTIUM_debug_BUILDRULE -BerkeleyDB22.out - - - BUILD_PENTIUM_debug_MACRO_AR -arpentium - - - BUILD_PENTIUM_debug_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUM_debug/BerkeleyDB22.a - - - BUILD_PENTIUM_debug_MACRO_AS -ccpentium - - - BUILD_PENTIUM_debug_MACRO_CC -ccpentium - - - BUILD_PENTIUM_debug_MACRO_CC_ARCH_SPEC --mcpu=pentiumpro -march=pentiumpro - - - BUILD_PENTIUM_debug_MACRO_CFLAGS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu \ - -O0 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. \ - -DDIAGNOSTIC \ - -DDEBUG - - - BUILD_PENTIUM_debug_MACRO_CFLAGS_AS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -xassembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu - - - BUILD_PENTIUM_debug_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM_debug_MACRO_HEX_FLAGS - - - - BUILD_PENTIUM_debug_MACRO_LD -ldpentium - - - BUILD_PENTIUM_debug_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_debug_MACRO_LD_PARTIAL -ccpentium -r -nostdlib -Wl,-X - - - BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_debug_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_debug_MACRO_OPTION_DEPEND --M -w - - - BUILD_PENTIUM_debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_debug_MACRO_OPTION_LANG_C --xc - - - BUILD_PENTIUM_debug_MACRO_OPTION_UNDEFINE_MACRO --U - - - BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_debug_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_debug_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM_debug_MACRO_TOOL_FAMILY -gnu - - - BUILD_PENTIUM_debug_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_debug_TC -::tc_PENTIUM2gnu - - - BUILD_PENTIUM_release_BUILDRULE -BerkeleyDB22.out - - - BUILD_PENTIUM_release_MACRO_AR -arpentium - - - BUILD_PENTIUM_release_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUM_release/BerkeleyDB22.a - - - BUILD_PENTIUM_release_MACRO_AS -ccpentium - - - BUILD_PENTIUM_release_MACRO_CC -ccpentium - - - BUILD_PENTIUM_release_MACRO_CC_ARCH_SPEC --mcpu=pentiumpro -march=pentiumpro - - - BUILD_PENTIUM_release_MACRO_CFLAGS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu \ - -O2 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. - - - BUILD_PENTIUM_release_MACRO_CFLAGS_AS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -xassembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu - - - BUILD_PENTIUM_release_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM_release_MACRO_HEX_FLAGS - - - - BUILD_PENTIUM_release_MACRO_LD -ldpentium - - - BUILD_PENTIUM_release_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_release_MACRO_LD_PARTIAL -ccpentium -r -nostdlib -Wl,-X - - - BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_release_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_release_MACRO_OPTION_DEPEND --M -w - - - BUILD_PENTIUM_release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_release_MACRO_OPTION_LANG_C --xc - - - BUILD_PENTIUM_release_MACRO_OPTION_UNDEFINE_MACRO --U - - - BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_release_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_release_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM_release_MACRO_TOOL_FAMILY -gnu - - - BUILD_PENTIUM_release_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_release_TC -::tc_PENTIUM2gnu - - - BUILD_RULE_BerkeleyDB22.out - - - - BUILD_RULE_BerkeleyDB22.pl - - - - BUILD_RULE_archive - - - - BUILD_RULE_objects - - - - BUILD__CURRENT -PENTIUM_debug - - - BUILD__LIST -PENTIUM_release PENTIUM_debug - - - CORE_INFO_TYPE -::prj_vxApp - - - CORE_INFO_VERSION -2.2 - - diff --git a/storage/bdb/dist/vx_2.2/BerkeleyDBsmall.wpj b/storage/bdb/dist/vx_2.2/BerkeleyDBsmall.wpj deleted file mode 100644 index bfbdadc46a5..00000000000 --- a/storage/bdb/dist/vx_2.2/BerkeleyDBsmall.wpj +++ /dev/null @@ -1,310 +0,0 @@ -Document file - DO NOT EDIT - - BUILD_PENTIUM_debug_BUILDRULE -BerkeleyDB22small.out - - - BUILD_PENTIUM_debug_MACRO_AR -arpentium - - - BUILD_PENTIUM_debug_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUM_debug/BerkeleyDB22small.a - - - BUILD_PENTIUM_debug_MACRO_AS -ccpentium - - - BUILD_PENTIUM_debug_MACRO_CC -ccpentium - - - BUILD_PENTIUM_debug_MACRO_CC_ARCH_SPEC --mcpu=pentiumpro -march=pentiumpro - - - BUILD_PENTIUM_debug_MACRO_CFLAGS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu \ - -O0 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. \ - -DDIAGNOSTIC \ - -DDEBUG - - - BUILD_PENTIUM_debug_MACRO_CFLAGS_AS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -xassembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu - - - BUILD_PENTIUM_debug_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM_debug_MACRO_HEX_FLAGS - - - - BUILD_PENTIUM_debug_MACRO_LD -ldpentium - - - BUILD_PENTIUM_debug_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_debug_MACRO_LD_PARTIAL -ccpentium -r -nostdlib -Wl,-X - - - BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_debug_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_debug_MACRO_OPTION_DEPEND --M -w - - - BUILD_PENTIUM_debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_debug_MACRO_OPTION_LANG_C --xc - - - BUILD_PENTIUM_debug_MACRO_OPTION_UNDEFINE_MACRO --U - - - BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_debug_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_debug_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM_debug_MACRO_TOOL_FAMILY -gnu - - - BUILD_PENTIUM_debug_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_debug_TC -::tc_PENTIUM2gnu - - - BUILD_PENTIUM_release_BUILDRULE -BerkeleyDB22small.out - - - BUILD_PENTIUM_release_MACRO_AR -arpentium - - - BUILD_PENTIUM_release_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUM_release/BerkeleyDB22small.a - - - BUILD_PENTIUM_release_MACRO_AS -ccpentium - - - BUILD_PENTIUM_release_MACRO_CC -ccpentium - - - BUILD_PENTIUM_release_MACRO_CC_ARCH_SPEC --mcpu=pentiumpro -march=pentiumpro - - - BUILD_PENTIUM_release_MACRO_CFLAGS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu \ - -O2 \ - -I$(PRJ_DIR) \ - -I$(PRJ_DIR)/.. - - - BUILD_PENTIUM_release_MACRO_CFLAGS_AS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -xassembler-with-cpp \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu - - - BUILD_PENTIUM_release_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM_release_MACRO_HEX_FLAGS - - - - BUILD_PENTIUM_release_MACRO_LD -ldpentium - - - BUILD_PENTIUM_release_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM_release_MACRO_LD_PARTIAL -ccpentium -r -nostdlib -Wl,-X - - - BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM_release_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM_release_MACRO_OPTION_DEPEND --M -w - - - BUILD_PENTIUM_release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM_release_MACRO_OPTION_LANG_C --xc - - - BUILD_PENTIUM_release_MACRO_OPTION_UNDEFINE_MACRO --U - - - BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM_release_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM_release_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM_release_MACRO_TOOL_FAMILY -gnu - - - BUILD_PENTIUM_release_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM_release_TC -::tc_PENTIUM2gnu - - - BUILD_RULE_BerkeleyDB22small.out - - - - BUILD_RULE_BerkeleyDB22small.pl - - - - BUILD_RULE_archive - - - - BUILD_RULE_objects - - - - BUILD__CURRENT -PENTIUM_debug - - - BUILD__LIST -PENTIUM_release PENTIUM_debug - - - CORE_INFO_TYPE -::prj_vxApp - - - CORE_INFO_VERSION -2.2 - - diff --git a/storage/bdb/dist/vx_2.2/wpj.in b/storage/bdb/dist/vx_2.2/wpj.in deleted file mode 100644 index d883ef2b193..00000000000 --- a/storage/bdb/dist/vx_2.2/wpj.in +++ /dev/null @@ -1,194 +0,0 @@ -Document file - DO NOT EDIT - - BUILD_PENTIUM2gnu_BUILDRULE -__DB_APPLICATION_NAME__22.out - - - BUILD_PENTIUM2gnu_MACRO_AR -arpentium - - - BUILD_PENTIUM2gnu_MACRO_ARCHIVE -$(PRJ_DIR)/PENTIUM2gnu/__DB_APPLICATION_NAME__22.a - - - BUILD_PENTIUM2gnu_MACRO_AS -ccpentium - - - BUILD_PENTIUM2gnu_MACRO_CC -ccpentium - - - BUILD_PENTIUM2gnu_MACRO_CC_ARCH_SPEC --mcpu=pentiumpro -march=pentiumpro - - - BUILD_PENTIUM2gnu_MACRO_CFLAGS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -fvolatile \ - -fno-builtin \ - -fno-defer-pop \ - -I$(PRJ_DIR)/.. \ - -I$(PRJ_DIR)/../.. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu - - - BUILD_PENTIUM2gnu_MACRO_CFLAGS_AS --g \ - -mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -xassembler-with-cpp \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -DTOOL_FAMILY=gnu \ - -DTOOL=gnu - - - BUILD_PENTIUM2gnu_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM2gnu_MACRO_HEX_FLAGS - - - - BUILD_PENTIUM2gnu_MACRO_LD -ldpentium - - - BUILD_PENTIUM2gnu_MACRO_LDFLAGS --X -N - - - BUILD_PENTIUM2gnu_MACRO_LD_PARTIAL -ccpentium -r -nostdlib -Wl,-X - - - BUILD_PENTIUM2gnu_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM2gnu_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM2gnu_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM2gnu_MACRO_OPTION_DEPEND --M -w - - - BUILD_PENTIUM2gnu_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM2gnu_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM2gnu_MACRO_OPTION_LANG_C --xc - - - BUILD_PENTIUM2gnu_MACRO_OPTION_UNDEFINE_MACRO --U - - - BUILD_PENTIUM2gnu_MACRO_POST_BUILD_RULE - - - - BUILD_PENTIUM2gnu_MACRO_PRJ_LIBS - - - - BUILD_PENTIUM2gnu_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM2gnu_MACRO_TOOL_FAMILY -gnu - - - BUILD_PENTIUM2gnu_RO_DEPEND_PATH -{$(WIND_BASE)/target/h/} \ - {$(WIND_BASE)/target/src/} \ - {$(WIND_BASE)/target/config/} - - - BUILD_PENTIUM2gnu_TC -::tc_PENTIUM2gnu - - - BUILD_RULE_archive - - - - BUILD_RULE___DB_APPLICATION_NAME__22.out - - - - BUILD_RULE___DB_APPLICATION_NAME__22.pl - - - - BUILD_RULE_objects - - - - BUILD__CURRENT -PENTIUM2gnu - - - BUILD__LIST -PENTIUM2gnu - - - CORE_INFO_TYPE -::prj_vxApp - - - CORE_INFO_VERSION -2.2 - - - FILE___DB_APPLICATION_NAME__.c_dependDone -FALSE - - - FILE___DB_APPLICATION_NAME__.c_dependencies - - - - FILE___DB_APPLICATION_NAME__.c_objects -__DB_APPLICATION_NAME__.o - - - FILE___DB_APPLICATION_NAME__.c_tool -C/C++ compiler - - - PROJECT_FILES -$(PRJ_DIR)/__DB_APPLICATION_NAME__.c - - - userComments -__DB_APPLICATION_NAME__ - diff --git a/storage/bdb/dist/vx_3.1/Makefile.custom b/storage/bdb/dist/vx_3.1/Makefile.custom deleted file mode 100644 index ca781f7b251..00000000000 --- a/storage/bdb/dist/vx_3.1/Makefile.custom +++ /dev/null @@ -1,51 +0,0 @@ -# -# Custom Makefile shell -# -# This file may be edited freely, since it will not be regenerated -# by the project manager. -# -# Use this makefile to define rules to make external binaries -# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory. -# -# If you have specified external modules during your component -# creation, you will find make rules already in place below. -# You will likely have to edit these to suit your individual -# build setup. -# -# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in -# your Makefile to support builds for different architectures. Use -# the FORCE_EXTERNAL_MAKE phony target to ensure that your external -# make always runs. -# -# The example below assumes that your custom makefile is in the -# mySourceTree directory, and that the binary file it produces -# is placed into the $(BUILD_SPEC) sub-directory. -# -# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree -# EXTERNAL_MODULE = myLibrary.o -# EXTERNAL_MAKE = make -# -# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE -# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \ -# -f $(EXTERNAL_SOURCE_BASE)/Makefile \ -# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F) -# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@) -# -# If you are not adding your external modules from the component wizard, -# you will have to include them in your component yourself: -# -# From the GUI, you can do this with the Component's 'Add external module' -# dialog. -# -# If you are using the command line, add the module(s) by editing the -# MODULES line in component.cdf file, e.g. -# -# Component INCLUDE_MYCOMPONENT { -# -# MODULES foo.o goo.o \ -# myLibrary.o -# - - -# rules to build custom libraries - diff --git a/storage/bdb/dist/vx_3.1/cdf.1 b/storage/bdb/dist/vx_3.1/cdf.1 deleted file mode 100644 index 17db06f7e61..00000000000 --- a/storage/bdb/dist/vx_3.1/cdf.1 +++ /dev/null @@ -1,12 +0,0 @@ -/* component.cdf - dynamically updated configuration */ - -/* - * NOTE: you may edit this file to alter the configuration - * But all non-configuration information, including comments, - * will be lost upon rebuilding this project. - */ - -/* Component information */ - -Component INCLUDE_BERKELEYDB { - ENTRY_POINTS ALL_GLOBAL_SYMBOLS diff --git a/storage/bdb/dist/vx_3.1/cdf.2 b/storage/bdb/dist/vx_3.1/cdf.2 deleted file mode 100644 index 76f123af9fb..00000000000 --- a/storage/bdb/dist/vx_3.1/cdf.2 +++ /dev/null @@ -1,9 +0,0 @@ - NAME BerkeleyDB - PREF_DOMAIN ANY - _INIT_ORDER usrComponentsInit -} - -/* EntryPoint information */ - -/* Module information */ - diff --git a/storage/bdb/dist/vx_3.1/cdf.3 b/storage/bdb/dist/vx_3.1/cdf.3 deleted file mode 100644 index a3146ced95a..00000000000 --- a/storage/bdb/dist/vx_3.1/cdf.3 +++ /dev/null @@ -1,2 +0,0 @@ -/* Parameter information */ - diff --git a/storage/bdb/dist/vx_3.1/component.cdf b/storage/bdb/dist/vx_3.1/component.cdf deleted file mode 100644 index 91edaa87853..00000000000 --- a/storage/bdb/dist/vx_3.1/component.cdf +++ /dev/null @@ -1,30 +0,0 @@ -/* component.cdf - dynamically updated configuration */ - -/* - * NOTE: you may edit this file to alter the configuration - * But all non-configuration information, including comments, - * will be lost upon rebuilding this project. - */ - -/* Component information */ - -Component INCLUDE___DB_CAPAPPL_NAME__ { - ENTRY_POINTS ALL_GLOBAL_SYMBOLS - MODULES __DB_APPLICATION_NAME__.o - NAME __DB_APPLICATION_NAME__ - PREF_DOMAIN ANY - _INIT_ORDER usrComponentsInit -} - -/* EntryPoint information */ - -/* Module information */ - -Module __DB_APPLICATION_NAME__.o { - - NAME __DB_APPLICATION_NAME__.o - SRC_PATH_NAME $PRJ_DIR/../__DB_APPLICATION_NAME__.c -} - -/* Parameter information */ - diff --git a/storage/bdb/dist/vx_3.1/component.wpj b/storage/bdb/dist/vx_3.1/component.wpj deleted file mode 100644 index 01c51c1b97f..00000000000 --- a/storage/bdb/dist/vx_3.1/component.wpj +++ /dev/null @@ -1,475 +0,0 @@ -Document file - DO NOT EDIT - - CORE_INFO_TYPE -::prj_component - - - CORE_INFO_VERSION -AE1.1 - - - BUILD__CURRENT -PENTIUM2gnu.debug - - - BUILD_PENTIUM2gnu.debug_CURRENT_TARGET -default - - - BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU -1 - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags -toolMacro objects - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects -__DB_APPLICATION_NAME__.o - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects -__DB_APPLICATION_NAME__.o - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags -toolMacro objects - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects -compConfig.o - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.debug_MACRO_AR -arpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_AS -ccpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_CC -ccpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -MD \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -I$(PRJ_DIR)/../.. \ - -I$(PRJ_DIR)/../../.. \ - -DCPU=PENTIUM2 - - - BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 - - - BUILD_PENTIUM2gnu.debug_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT -c++filtpentium --strip-underscores - - - BUILD_PENTIUM2gnu.debug_MACRO_LD -ldpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS --X - - - BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL -ccpentium \ - -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \ - -nostdlib \ - -r \ - -Wl,-X - - - BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM2gnu.debug_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM2gnu.debug_MACRO_RELEASE -0 - - - BUILD_PENTIUM2gnu.debug_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM2gnu.debug_RELEASE -0 - - - BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH -$(WIND_BASE)/target/h/ - - - BUILD_PENTIUM2gnu.debug_TC -::tc_PENTIUM2gnu.debug - - - BUILD_PENTIUM2gnu.release_DEFAULTFORCPU -0 - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags -toolMacro objects - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects -__DB_APPLICATION_NAME__.o - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags -toolMacro objects - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects -compConfig.o - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.release_MACRO_AR -arpentium - - - BUILD_PENTIUM2gnu.release_MACRO_AS -ccpentium - - - BUILD_PENTIUM2gnu.release_MACRO_CC -ccpentium - - - BUILD_PENTIUM2gnu.release_MACRO_CFLAGS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -O2 \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -MD \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -I$(PRJ_DIR)/../.. \ - -I$(PRJ_DIR)/../../.. \ - -DCPU=PENTIUM2 - - - BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -O2 \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 - - - BUILD_PENTIUM2gnu.release_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM2gnu.release_MACRO_CPPFILT -c++filtpentium --strip-underscores - - - BUILD_PENTIUM2gnu.release_MACRO_LD -ldpentium - - - BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS --X - - - BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL -ccpentium \ - -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \ - -nostdlib \ - -r \ - -Wl,-X - - - BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM2gnu.release_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM2gnu.release_MACRO_RELEASE -1 - - - BUILD_PENTIUM2gnu.release_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM2gnu.release_RELEASE -1 - - - BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH -$(WIND_BASE)/target/h/ - - - BUILD_PENTIUM2gnu.release_TC -::tc_PENTIUM2gnu.release - - - BUILD_PENTIUMgnu.debug_DEFAULTFORCPU -1 - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags -toolMacro objects - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects -__DB_APPLICATION_NAME__.o - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro -CC - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags -toolMacro objects - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects -compConfig.o - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro -CC - - - BUILD_PENTIUMgnu.debug_MACRO_AR -arpentium - - - BUILD_PENTIUMgnu.debug_MACRO_AS -ccpentium - - - BUILD_PENTIUMgnu.debug_MACRO_CC -ccpentium - - - BUILD_PENTIUMgnu.debug_MACRO_CFLAGS --mcpu=pentium \ - -march=pentium \ - -ansi \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -MD \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -I$(PRJ_DIR)/../.. \ - -I$(PRJ_DIR)/../../.. \ - -DCPU=PENTIUM - - - BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS --mcpu=pentium \ - -march=pentium \ - -ansi \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUMgnu.debug_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUMgnu.debug_MACRO_CPPFILT -c++filtpentium --strip-underscores - - - BUILD_PENTIUMgnu.debug_MACRO_LD -ldpentium - - - BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS --X - - - BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL -ccpentium \ - -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \ - -nostdlib \ - -r \ - -Wl,-X - - - BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUMgnu.debug_MACRO_NM -nmpentium -g - - - BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUMgnu.debug_MACRO_RELEASE -0 - - - BUILD_PENTIUMgnu.debug_MACRO_SIZE -sizepentium - - - BUILD_PENTIUMgnu.debug_RELEASE -0 - - - BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH -$(WIND_BASE)/target/h/ - - - BUILD_PENTIUMgnu.debug_TC -::tc_PENTIUMgnu.debug - - - BUILD__LIST -PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug - - - PROJECT_FILES -$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c \ - $(PRJ_DIR)/compConfig.c - - - WCC__CDF_PATH -$(PRJ_DIR) - - - WCC__CURRENT -PENTIUM2gnu.debug - - - WCC__LIST -PENTIUM2gnu.debug - - - WCC__MXR_LIBS -lib$(CPU)$(TOOL)vx.a - - - WCC__OBJS_PATH -$(WIND_BASE)/target/lib/obj$CPU$TOOLvx - - diff --git a/storage/bdb/dist/vx_3.1/wpj.1 b/storage/bdb/dist/vx_3.1/wpj.1 deleted file mode 100644 index 414b4e8fa35..00000000000 --- a/storage/bdb/dist/vx_3.1/wpj.1 +++ /dev/null @@ -1,22 +0,0 @@ -Document file - DO NOT EDIT - - CORE_INFO_TYPE -::prj_component - - - CORE_INFO_VERSION -AE1.0 - - - BUILD__CURRENT -PENTIUM2gnu.debug - - - BUILD_PENTIUM2gnu.debug_CURRENT_TARGET -default - - - BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU -0 - - diff --git a/storage/bdb/dist/vx_3.1/wpj.2 b/storage/bdb/dist/vx_3.1/wpj.2 deleted file mode 100644 index 0294f763ef7..00000000000 --- a/storage/bdb/dist/vx_3.1/wpj.2 +++ /dev/null @@ -1,130 +0,0 @@ - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags -toolMacro objects - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects -compConfig.o - - - BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.debug_MACRO_AR -arpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_AS -ccpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_CC -ccpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -MD \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -I$(PRJ_DIR)/.. \ - -I$(PRJ_DIR)/../.. \ - -DDEBUG \ - -DDIAGNOSTIC - - - BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 - - - BUILD_PENTIUM2gnu.debug_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT -c++filtpentium --strip-underscores - - - BUILD_PENTIUM2gnu.debug_MACRO_LD -ldpentium - - - BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS --X - - - BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL -ccpentium \ - -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \ - -nostdlib \ - -r \ - -Wl,-X - - - BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM2gnu.debug_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM2gnu.debug_MACRO_RELEASE -0 - - - BUILD_PENTIUM2gnu.debug_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM2gnu.debug_RELEASE -0 - - - BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH -$(WIND_BASE)/target/h/ - - - BUILD_PENTIUM2gnu.debug_TC -::tc_PENTIUM2gnu.debug - - - BUILD_PENTIUM2gnu.release_DEFAULTFORCPU -0 - - diff --git a/storage/bdb/dist/vx_3.1/wpj.3 b/storage/bdb/dist/vx_3.1/wpj.3 deleted file mode 100644 index f06e6253923..00000000000 --- a/storage/bdb/dist/vx_3.1/wpj.3 +++ /dev/null @@ -1,128 +0,0 @@ - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags -toolMacro objects - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects -compConfig.o - - - BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro -CC - - - BUILD_PENTIUM2gnu.release_MACRO_AR -arpentium - - - BUILD_PENTIUM2gnu.release_MACRO_AS -ccpentium - - - BUILD_PENTIUM2gnu.release_MACRO_CC -ccpentium - - - BUILD_PENTIUM2gnu.release_MACRO_CFLAGS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -O2 \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -MD \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 \ - -I$(PRJ_DIR)/.. \ - -I$(PRJ_DIR)/../.. - - - BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS --mcpu=pentiumpro \ - -march=pentiumpro \ - -ansi \ - -O2 \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM2 - - - BUILD_PENTIUM2gnu.release_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUM2gnu.release_MACRO_CPPFILT -c++filtpentium --strip-underscores - - - BUILD_PENTIUM2gnu.release_MACRO_LD -ldpentium - - - BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS --X - - - BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL -ccpentium \ - -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \ - -nostdlib \ - -r \ - -Wl,-X - - - BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUM2gnu.release_MACRO_NM -nmpentium -g - - - BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUM2gnu.release_MACRO_RELEASE -1 - - - BUILD_PENTIUM2gnu.release_MACRO_SIZE -sizepentium - - - BUILD_PENTIUM2gnu.release_RELEASE -1 - - - BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH -$(WIND_BASE)/target/h/ - - - BUILD_PENTIUM2gnu.release_TC -::tc_PENTIUM2gnu.release - - - BUILD_PENTIUMgnu.debug_DEFAULTFORCPU -1 - - diff --git a/storage/bdb/dist/vx_3.1/wpj.4 b/storage/bdb/dist/vx_3.1/wpj.4 deleted file mode 100644 index 84de6ebf359..00000000000 --- a/storage/bdb/dist/vx_3.1/wpj.4 +++ /dev/null @@ -1,135 +0,0 @@ - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags -toolMacro objects - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects -compConfig.o - - - BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro -CC - - - BUILD_PENTIUMgnu.debug_MACRO_AR -arpentium - - - BUILD_PENTIUMgnu.debug_MACRO_AS -ccpentium - - - BUILD_PENTIUMgnu.debug_MACRO_CC -ccpentium - - - BUILD_PENTIUMgnu.debug_MACRO_CFLAGS --mcpu=pentium \ - -march=pentium \ - -ansi \ - -DRW_MULTI_THREAD \ - -D_REENTRANT \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -MD \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM \ - -I$(PRJ_DIR)/.. \ - -I$(PRJ_DIR)/../.. \ - -DDEBUG \ - -DDIAGNOSTIC - - - BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS --mcpu=pentium \ - -march=pentium \ - -ansi \ - -g \ - -nostdlib \ - -fno-builtin \ - -fno-defer-pop \ - -P \ - -x \ - assembler-with-cpp \ - -Wall \ - -I. \ - -I$(WIND_BASE)/target/h \ - -DCPU=PENTIUM - - - BUILD_PENTIUMgnu.debug_MACRO_CPP -ccpentium -E -P - - - BUILD_PENTIUMgnu.debug_MACRO_CPPFILT -c++filtpentium --strip-underscores - - - BUILD_PENTIUMgnu.debug_MACRO_LD -ldpentium - - - BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS --X - - - BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL -ccpentium \ - -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \ - -nostdlib \ - -r \ - -Wl,-X - - - BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS --X -r - - - BUILD_PENTIUMgnu.debug_MACRO_NM -nmpentium -g - - - BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO --D - - - BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE --MD - - - BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR --I - - - BUILD_PENTIUMgnu.debug_MACRO_RELEASE -0 - - - BUILD_PENTIUMgnu.debug_MACRO_SIZE -sizepentium - - - BUILD_PENTIUMgnu.debug_RELEASE -0 - - - BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH -$(WIND_BASE)/target/h/ - - - BUILD_PENTIUMgnu.debug_TC -::tc_PENTIUMgnu.debug - - - BUILD__LIST -PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release - - - COMPONENT_COM_TYPE - - - - PROJECT_FILES diff --git a/storage/bdb/dist/vx_3.1/wpj.5 b/storage/bdb/dist/vx_3.1/wpj.5 deleted file mode 100644 index f4056e7e22a..00000000000 --- a/storage/bdb/dist/vx_3.1/wpj.5 +++ /dev/null @@ -1,22 +0,0 @@ - - - WCC__CDF_PATH -$(PRJ_DIR) - - - WCC__CURRENT -PENTIUMgnu.debug - - - WCC__LIST -PENTIUMgnu.debug - - - WCC__MXR_LIBS -lib$(CPU)$(TOOL)vx.a - - - WCC__OBJS_PATH -$(WIND_BASE)/target/lib/obj$CPU$TOOLvx - - diff --git a/storage/bdb/dist/vx_buildcd b/storage/bdb/dist/vx_buildcd deleted file mode 100755 index 72bd10b8d52..00000000000 --- a/storage/bdb/dist/vx_buildcd +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/sh -# $Id: vx_buildcd,v 12.0 2004/11/17 03:43:37 bostic Exp $ -# -# Build the Setup SDK CD image on the VxWorks host machine. - -. ./RELEASE - -B=`pwd` -B=$B/.. -D=$B/dist/vx_setup -C=$D/db.CD -Q=/export/home/sue/SetupSDK -S=$Q/resource/mfg/setup -W=sun4-solaris2 - -symdoc=$D/docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH -symdb=$D/windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH -rm -rf $D/docs $D/windlink -mkdir $D/docs $D/windlink $D/windlink/sleepycat -ln -s $B/docs $symdoc -ln -s $B $symdb - -s=/tmp/__db_a -t=/tmp/__db_b - -# -# Remove the old CD directory if it is there. -if test -d $C; then - echo "$C cannot exist." - echo "As root, execute 'rm -rf $C'" - echo "and then rerun the script" - exit 1 -fi - -# -# Check for absolute pathnames in the project files. -# That is bad, but Tornado insists on putting them in -# whenever you add new files. -# -rm -f $t -f=`find $B/build_vxworks -name \*.wpj -print` -for i in $f; do - grep -l -- "$B" $i >> $t -done -if test -s $t; then - echo "The following files contain absolute pathnames." - echo "They must be fixed before building the CD image:" - cat $t - exit 1 -fi - -# -# NOTE: We reuse the same sed script over several files. -# -cat < $s -s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g -s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g -s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/g -s#@DB_SETUP_DIR@#$D#g -ENDOFSEDTEXT - -f=$D/setup.pool -(sed -f $s $D/vx_setup.in) > $t - (echo "Building $f" && rm -f $f && cp $t $f) - -f=$D/README.TXT -(sed -f $s $D/README.in) > $t - (echo "Building $f" && rm -f $f && cp $t $f) - -f=$D/CONFIG.TCL -(sed -f $s $D/CONFIG.in) > $t - (echo "Building $f" && rm -f $f && cp $t $f) - -f=$D/filelist.demo -(sed -f $s $D/vx_demofile.in) > $t - (echo "Building $f" && rm -f $f && cp $t $f) - -# Copy the Sleepycat specific files into the SetupSDK area. -(cd $D && cp README.TXT $S) -(cd $D && cp LICENSE.TXT $S) -(cd $D && cp CONFIG.TCL $S/RESOURCE/TCL) -(cd $D && cp SETUP.BMP $S/RESOURCE/BITMAPS) - -# -# NOTE: The contents of LIB must be on one, long, single line. -# Even preserving it with a \ doesn't work for htmlBook. -# -f=../docs/LIB -(echo "Building $f" && rm -f $f) -cat <> $f -{BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {Sleepycat Software Berkeley DB} {BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {Sleepycat BerkeleyDB} {} {} {} -ENDOFLIBTEXT - -# -# Start generating the file list. -f=$D/filelist.all - -# -# Just put everything into the image. But we only want to find regular -# files; we cannot have all the directories listed too. -# -# NOTE: This find is overly aggressive in getting files, particularly -# for the 'windlink/sleepycat' files. We actually end up with 3 sets of the -# documentation, the "real" ones in 'docs/BerkeleyDB*', the set found -# via 'windlink/sleepycat/Berk*/docs' and the one found via our symlink in -# 'windlink/sleepycat/Berk*/dist/vx_setup/docs/Berk*'. -# -# However, we waste a little disk space so that the expression below -# is trivial and we don't have to maintain it as new files/directories -# are added to DB. -# -(cd $D && find docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name \* -type f -print) > $t -(cd $D && find windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name docs -prune -o -type f -print) >> $t -(echo "Building $f" && rm -f $f && cp $t $f) -# -# Finally build the CD image! -# -env PATH=$Q/$W/bin:$PATH QMS_BASE=$Q WIND_HOST_TYPE=$W \ -pool mfg -d $C -v -nokey BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR < $D/setup.pool diff --git a/storage/bdb/dist/vx_config.in b/storage/bdb/dist/vx_config.in deleted file mode 100644 index 539600d62ab..00000000000 --- a/storage/bdb/dist/vx_config.in +++ /dev/null @@ -1,451 +0,0 @@ -/* !!! - * The CONFIG_TEST option may be added using the Tornado project build. - * DO NOT modify it here. - */ -/* Define to 1 if you want to build a version for running the test suite. */ -/* #undef CONFIG_TEST */ - -/* We use DB_WIN32 much as one would use _WIN32 -- to specify that we're using - an operating system environment that supports Win32 calls and semantics. We - don't use _WIN32 because Cygwin/GCC also defines _WIN32, even though - Cygwin/GCC closely emulates the Unix environment. */ -/* #undef DB_WIN32 */ - -/* !!! - * The DEBUG option may be added using the Tornado project build. - * DO NOT modify it here. - */ -/* Define to 1 if you want a debugging version. */ -/* #undef DEBUG */ - -/* Define to 1 if you want a version that logs read operations. */ -/* #undef DEBUG_ROP */ - -/* Define to 1 if you want a version that logs write operations. */ -/* #undef DEBUG_WOP */ - -/* !!! - * The DIAGNOSTIC option may be added using the Tornado project build. - * DO NOT modify it here. - */ -/* Define to 1 if you want a version with run-time diagnostic checking. */ -/* #undef DIAGNOSTIC */ - -/* Define to 1 if 64-bit types are available. */ -#define HAVE_64BIT_TYPES 1 - -/* Define to 1 if you have the `clock_gettime' function. */ -#define HAVE_CLOCK_GETTIME 1 - -/* Define to 1 if Berkeley DB release includes strong cryptography. */ -#define HAVE_CRYPTO 1 - -/* Define to 1 if you have the `directio' function. */ -/* #undef HAVE_DIRECTIO */ - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -#define HAVE_DIRENT_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DLFCN_H */ - -/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */ -#define HAVE_EXIT_SUCCESS 1 - -/* Define to 1 if you have the `fchmod' function. */ -/* #undef HAVE_FCHMOD */ - -/* Define to 1 if you have the `fcntl' function. */ -/* #undef HAVE_FCNTL */ - -/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */ -/* #undef HAVE_FCNTL_F_SETFD */ - -/* Define to 1 if you have the `fdatasync' function. */ -/* #undef HAVE_FDATASYNC */ - -/* Define to 1 if allocated filesystem blocks are not zeroed. */ -#define HAVE_FILESYSTEM_NOTZERO 1 - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* Define to 1 if you have the `getopt' function. */ -/* #undef HAVE_GETOPT */ - -/* Define to 1 if you have the `getrusage' function. */ -/* #undef HAVE_GETRUSAGE */ - -/* Define to 1 if you have the `gettimeofday' function. */ -/* #undef HAVE_GETTIMEOFDAY */ - -/* Define to 1 if you have the `getuid' function. */ -/* #undef HAVE_GETUID */ - -/* Define to 1 if building Hash access method. */ -#define HAVE_HASH 1 - -/* Define to 1 if thread identifier type db_threadid_t is integral. */ -#define HAVE_INTEGRAL_THREAD_TYPE 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_INTTYPES_H */ - -/* Define to 1 if you have the `nsl' library (-lnsl). */ -/* #undef HAVE_LIBNSL */ - -/* Define to 1 if you have the `memcmp' function. */ -#define HAVE_MEMCMP 1 - -/* Define to 1 if you have the `memcpy' function. */ -#define HAVE_MEMCPY 1 - -/* Define to 1 if you have the `memmove' function. */ -#define HAVE_MEMMOVE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mlock' function. */ -/* #undef HAVE_MLOCK */ - -/* Define to 1 if you have the `mmap' function. */ -/* #undef HAVE_MMAP */ - -/* Define to 1 if you have the `munlock' function. */ -/* #undef HAVE_MUNLOCK */ - -/* Define to 1 if you have the `munmap' function. */ -/* #undef HAVE_MUNMAP */ - -/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */ -/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */ - -/* Define to 1 to use the AIX _check_lock mutexes. */ -/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */ - -/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */ -/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */ - -/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */ -/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */ - -/* Define to 1 to use the Apple/Darwin _spin_lock_try mutexes. */ -/* #undef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY */ - -/* Define to 1 to use the UNIX fcntl system call mutexes. */ -/* #undef HAVE_MUTEX_FCNTL */ - -/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes. - */ -/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */ - -/* Define to 1 to use the msem_XXX mutexes on HP-UX. */ -/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */ - -/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */ -/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */ - -/* Define to 1 to use the GCC compiler and MIPS assembly language mutexes. */ -/* #undef HAVE_MUTEX_MIPS_GCC_ASSEMBLY */ - -/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */ -/* #undef HAVE_MUTEX_MSEM_INIT */ - -/* Define to 1 to use the GCC compiler and PowerPC assembly language mutexes. - */ -/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */ - -/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */ -/* #undef HAVE_MUTEX_PTHREADS */ - -/* Define to 1 to use Reliant UNIX initspin mutexes. */ -/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */ - -/* Define to 1 to use the IBM C compiler and S/390 assembly language mutexes. - */ -/* #undef HAVE_MUTEX_S390_CC_ASSEMBLY */ - -/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */ -/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */ - -/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */ -/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */ - -/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */ -/* #undef HAVE_MUTEX_SEMA_INIT */ - -/* Define to 1 to use the SGI XXX_lock mutexes. */ -/* #undef HAVE_MUTEX_SGI_INIT_LOCK */ - -/* Define to 1 to use the Solaris _lock_XXX mutexes. */ -/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */ - -/* Define to 1 to use the Solaris lwp threads mutexes. */ -/* #undef HAVE_MUTEX_SOLARIS_LWP */ - -/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */ -/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */ - -/* Define to 1 if mutexes hold system resources. */ -#define HAVE_MUTEX_SYSTEM_RESOURCES 1 - -/* Define to 1 to configure mutexes intra-process only. */ -/* #undef HAVE_MUTEX_THREAD_ONLY */ - -/* Define to 1 to use the CC compiler and Tru64 assembly language mutexes. */ -/* #undef HAVE_MUTEX_TRU64_CC_ASSEMBLY */ - -/* Define to 1 to use the UNIX International mutexes. */ -/* #undef HAVE_MUTEX_UI_THREADS */ - -/* Define to 1 to use the UTS compiler and assembly language mutexes. */ -/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */ - -/* Define to 1 to use VMS mutexes. */ -/* #undef HAVE_MUTEX_VMS */ - -/* Define to 1 to use VxWorks mutexes. */ -#define HAVE_MUTEX_VXWORKS 1 - -/* Define to 1 to use the MSVC compiler and Windows mutexes. */ -/* #undef HAVE_MUTEX_WIN32 */ - -/* Define to 1 to use the GCC compiler and Windows mutexes. */ -/* #undef HAVE_MUTEX_WIN32_GCC */ - -/* Define to 1 to use the GCC compiler and amd64 assembly language mutexes. */ -/* #undef HAVE_MUTEX_X86_64_GCC_ASSEMBLY */ - -/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */ -/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */ - -/* Define to 1 if you have the header file, and it defines `DIR'. */ -/* #undef HAVE_NDIR_H */ - -/* Define to 1 if you have the O_DIRECT flag. */ -/* #undef HAVE_O_DIRECT */ - -/* Define to 1 if you have the `pread' function. */ -/* #undef HAVE_PREAD */ - -/* Define to 1 if you have the `pstat_getdynamic' function. */ -/* #undef HAVE_PSTAT_GETDYNAMIC */ - -/* Define to 1 if you have the `pthread_self' function. */ -/* #undef HAVE_PTHREAD_SELF */ - -/* Define to 1 if you have the `pwrite' function. */ -/* #undef HAVE_PWRITE */ - -/* Define to 1 if building on QNX. */ -/* #undef HAVE_QNX */ - -/* Define to 1 if building Queue access method. */ -#define HAVE_QUEUE 1 - -/* Define to 1 if you have the `raise' function. */ -#define HAVE_RAISE 1 - -/* Define to 1 if you have the `rand' function. */ -#define HAVE_RAND 1 - -/* Define to 1 if building replication support. */ -#define HAVE_REPLICATION 1 - -/* Define to 1 if building RPC client/server. */ -/* #undef HAVE_RPC */ - -/* Define to 1 if you have the `sched_yield' function. */ -#define HAVE_SCHED_YIELD 1 - -/* Define to 1 if you have the `select' function. */ -#define HAVE_SELECT 1 - -/* Define to 1 if building sequence support. */ -/* #undef HAVE_SEQUENCE */ - -/* Define to 1 if you have the `shmget' function. */ -/* #undef HAVE_SHMGET */ - -/* Define to 1 if you have the `snprintf' function. */ -/* #undef HAVE_SNPRINTF */ - -/* Define to 1 if you have the `srand' function. */ -#define HAVE_SRAND 1 - -/* Define to 1 if building statistics support. */ -#define HAVE_STATISTICS 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_STDINT_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the `strcasecmp' function. */ -/* #undef HAVE_STRCASECMP */ - -/* Define to 1 if you have the `strdup' function. */ -/* #undef HAVE_STRDUP */ - -/* Define to 1 if you have the `strerror' function. */ -#define HAVE_STRERROR 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strtol' function. */ -#define HAVE_STRTOL 1 - -/* Define to 1 if you have the `strtoul' function. */ -#define HAVE_STRTOUL 1 - -/* Define to 1 if `st_blksize' is member of `struct stat'. */ -#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 - -/* Define to 1 if you have the `sysconf' function. */ -/* #undef HAVE_SYSCONF */ - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -/* #undef HAVE_SYS_DIR_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_FCNTL_H */ - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -/* #undef HAVE_SYS_NDIR_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_SELECT_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_STAT_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_TIME_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_TYPES_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if unlink of file with open file descriptors will fail. */ -#define HAVE_UNLINK_WITH_OPEN_FAILURE 1 - -/* Define to 1 if building access method verification support. */ -#define HAVE_VERIFY 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -/* #undef HAVE_VSNPRINTF */ - -/* Define to 1 if building VxWorks. */ -#define HAVE_VXWORKS 1 - -/* Define to 1 if you have the `yield' function. */ -/* #undef HAVE_YIELD */ - -/* Define to 1 if you have the `_fstati64' function. */ -/* #undef HAVE__FSTATI64 */ - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "support@sleepycat.com" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "Berkeley DB" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "__EDIT_DB_VERSION__" - -/* Define to 1 if the `S_IS*' macros in do not work properly. */ -/* #undef STAT_MACROS_BROKEN */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to 1 if you can safely include both and . */ -/* #undef TIME_WITH_SYS_TIME */ - -/* Define to 1 to mask harmless uninitialized memory read/writes. */ -/* #undef UMRW */ - -/* Number of bits in a file offset, on hosts where this is settable. */ -/* #undef _FILE_OFFSET_BITS */ - -/* Define for large files, on AIX-style hosts. */ -/* #undef _LARGE_FILES */ - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* - * Exit success/failure macros. - */ -#ifndef HAVE_EXIT_SUCCESS -#define EXIT_FAILURE 1 -#define EXIT_SUCCESS 0 -#endif - -/* - * Don't step on the namespace. Other libraries may have their own - * implementations of these functions, we don't want to use their - * implementations or force them to use ours based on the load order. - */ -#ifndef HAVE_GETCWD -#define getcwd __db_Cgetcwd -#endif -#ifndef HAVE_GETOPT -#define getopt __db_Cgetopt -#define optarg __db_Coptarg -#define opterr __db_Copterr -#define optind __db_Coptind -#define optopt __db_Coptopt -#endif -#ifndef HAVE_MEMCMP -#define memcmp __db_Cmemcmp -#endif -#ifndef HAVE_MEMCPY -#define memcpy __db_Cmemcpy -#endif -#ifndef HAVE_MEMMOVE -#define memmove __db_Cmemmove -#endif -#ifndef HAVE_RAISE -#define raise __db_Craise -#endif -#ifndef HAVE_SNPRINTF -#define snprintf __db_Csnprintf -#endif -#ifndef HAVE_STRCASECMP -#define strcasecmp __db_Cstrcasecmp -#define strncasecmp __db_Cstrncasecmp -#endif -#ifndef HAVE_STRERROR -#define strerror __db_Cstrerror -#endif -#ifndef HAVE_VSNPRINTF -#define vsnprintf __db_Cvsnprintf -#endif - -/* - * !!! - * The following is not part of the automatic configuration setup, but - * provides the information necessary to build Berkeley DB on VxWorks. - */ -#include "vxWorks.h" diff --git a/storage/bdb/dist/vx_setup/CONFIG.in b/storage/bdb/dist/vx_setup/CONFIG.in deleted file mode 100644 index 1fccd1d2ed6..00000000000 --- a/storage/bdb/dist/vx_setup/CONFIG.in +++ /dev/null @@ -1,10 +0,0 @@ -# -# Install configuration file. -# -# Note: This file may be modified during the pool manufacturing process to -# add additional configuration statements. This file is sourced by -# INSTW32.TCL. -# - -cdromDescSet "Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@" - diff --git a/storage/bdb/dist/vx_setup/LICENSE.TXT b/storage/bdb/dist/vx_setup/LICENSE.TXT deleted file mode 100644 index 812dce00d49..00000000000 --- a/storage/bdb/dist/vx_setup/LICENSE.TXT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright (c) 1996-2005 - Sleepycat Software. All rights reserved. -See the file LICENSE for redistribution information. diff --git a/storage/bdb/dist/vx_setup/MESSAGES.TCL b/storage/bdb/dist/vx_setup/MESSAGES.TCL deleted file mode 100644 index 718a67fbc50..00000000000 --- a/storage/bdb/dist/vx_setup/MESSAGES.TCL +++ /dev/null @@ -1,651 +0,0 @@ -# MESSAGES.TCL - All setup strings. - -# modification history -# -------------------- -# 03q,20apr99,bjl added release notes message for backward compatibility -# page. -# 03p,12apr99,wmd Add word about simulator in message about the drivers -# object product. -# 03o,03mar99,tcy Adjust setup directory size based on platform (fix for -# SPR 25228) -# 03n,24feb99,tcy modified DLL update messages -# 03m,22feb99,tcy modified to align messages -# 03l,17feb99,tcy modified message in the finish page for program group -# installation -# 03k,11feb99,tcy added messages for backward compatibility page -# 03j,25jan99,tcy added messages from INSTW32.TCL -# 03i,25jan99,wmd Reword the message for 5010_DRIVERS_INFO. -# 03h,09dec98,bjl added messages about manufacturers updating patches. -# 03g,01dec98,wmd Fix typos. -# 03f,23nov98,tcy warn user to disable virus protection on Welcome screen -# 03e,19nov98,wmd fixed minor nits in wording. -# 03d,19nov98,bjl added web site locations for patchinfo. -# 03c,18nov98,bjl added formatted patch messages for patchinfo file. -# 03b,12nov98,tcy added message for not saving installation key -# 03a,10nov98,tcy added warning message for space in destination directory -# removed message for checking temporary disk space -# 02z,27oct98,bjl added recommended patch messages, modified required msg. -# 02y,26oct98,tcy added message for checking temporary disk space -# 02x,22oct98,wmd fix messages for clarity. -# 02w,21oct98,wmd fix message for drv/obj. -# 02v,20oct98,tcy added message for updating system and changed dcom message -# 02u,20oct98,bjl added tornado registry name entry message. -# 02t,19oct98,bjl added tornado registry description message. -# 02s,16oct98,wmd add new message for driver product warning. -# 02r,16oct98,wmd fixed README.TXT description. -# 02q,12oct98,tcy removed extraneous "the" from messages -# 02p,06oct98,tcy added CD description to Welcome page -# 02o,29sep98,bjl added required patches message 5000_PATCHES_TEXT. -# 02n,29sep98,wmd add text for readme page -# 02m,29sep98,tcy refined DLL registration page text -# 02l,29sep98,tcy changed message for DCOM -# 02k,26sep98,tcy added messages for DLL and DCOM pages -# 02j,24sep98,tcy removed "following" from 1080_WARN_4 message. -# 02i,17sep98,tcy added comment on size of SETUP files to 1140_COMP_SELECT. -# 02h,17sep98,wmd reword message 1080_WARN_4. -# 02g,14sep98,tcy changed 1210_FINISH and 1550_USAGE messages -# 02f,08sep98,tcy warn user library update may take several minutes -# 02e,01sep98,wmd reword message for installing over tree. -# added new messages for license agreement pages. -# 02d,20aug98,wmd added message for license agreeement. -# 02c,18aug98,tcy added message for zip-file dialog box -# 02d,04aug98,wmd added newer/older duplicate file warnings. -# 02c,24jul98,tcy added system check messages -# 02b,16jul98,wmd add new messages for T-2. -# 02a,22jul98,tcy moved license messages to LICW32.TCL; -# removed portMapper messages -# 01n,09feb98,pdn updated string 1080_WARN_4 -# 01m,08apr97,pdn added new string for remote icon installing -# fixed spr#8334 -# 01l,08mar97,tcy fixed language in string id 3340 -# 01k,07mar97,tcy added string id 3340 -# 01j,10feb97,pdn added more license messages. -# 01i,09feb97,pdn implemented variable argument list for strTableGet(), -# clean up. -# 01h,17jan97,jmo fixed language in strings -# 01g,12dec96,tcy merged in TEXT-only strings -# 01f,12dec96,pdn added 1080_WARN_4 string warning that CD-ROM -# revision is older than expected. -# 01e,27nov96,sj added string for warning against installing in -# the root of windows drive. -# 01d,18nov96,tcy added strings for text-based installation script -# 01c,14nov96,pdn substituted function for some global variables -# 01b,14nov96,sj added strings from Windows installation script -# 01a,11nov96,pdn written - -proc strTableGet {strId args} { - global strTable - global setupVals - global current_file - - if [regexp {^format.*$} $strTable($strId) junk] { - return [eval $strTable($strId)] - } { - return $strTable($strId) - } -} - -set strTable(1000_WELCOME_CD) \ - "format %s \"[cdNameGet description]\"" - -set strTable(1000_WELCOME1) \ - "format %s \"Welcome to the SETUP program. This program will\ - install \[cdromDescGet\] on your computer.\"" - -set strTable(1010_WELCOME2) \ - "It is strongly recommended that you exit all programs and disable virus\ - protection before running this SETUP program." - -set strTable(1020_WELCOME3) \ - "At any time, you can quit the SETUP program by clicking the \ - button. You also can go back to previous dialog boxes by clicking the\ - button. To accept the current settings for a dialog box and go on\ - with the installation process, click the button." - -set strTable(3020_WELCOME3) \ - "format %s \"At any prompt, you can cancel installation \[cdromDescGet\]\ - by typing \'exit\'. You can also go to the previous question\ - by typing \'-\'. To accept current settings and go on with\ - the installation process, press .\"" - -set strTable(1030_WELCOME4) \ - "WARNING: This program is protected by copyright law and international\ - treaties." - -set strTable(1040_WELCOME5) \ - "Unauthorized reproduction or distribution of this program, or any portion\ - of it, may result in severe civil and criminal penalties, and will be\ - prosecuted to the maximum extent possible under law." - -set strTable(1050_ROOT_WARN) \ - "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\] is not\ - recommended. We suggest that you logoff and logon as a normal\ - user before running this program.\ - \n\nClick Next to continue with SETUP anyway.\"" - -set strTable(3050_ROOT_WARN) \ - "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\]\ - is not recommended. We suggest that you logoff and \ - logon as a normal user before running this program.\ - \n\nPress to continue with SETUP anyway.\"" - -set strTable(1051_ROOT_WARN) \ - "format %s \"Installing \[cdromDescGet\] without System Administrator\ - privileges is not recommended. Under your present privileges,\ - SETUP will not offer certain installation options, such as \ - the installation of some services, etc. Also, the software\ - will be installed as a personal copy and will not be visible\ - to other users on this machine.\ - \n\nTo install \[cdromDescGet\] with access to all its\ - installation features and options, we suggest that you exit\ - the installation now and rerun it later with System\ - Administrator\'s privileges.\n\nClick to continue with\ - SETUP anyway.\"" - -set strTable(1060_REGISTRATION) \ - "Below, type your name, the name of your company." - -set strTable(1070_WARN_1) \ - "The installation key you entered is invalid. Please enter a valid\ - installation key." - -set strTable(1071_WARN_1) \ - "Please enter the requested information." - -set strTable(1080_WARN_2) \ - "You entered a key that was not created for this CD-ROM. Please verify\ - that you are using the appropriate key. If this problem persists, contact\ - Wind River Systems Sales department for help." - -set strTable(1080_WARN_3) \ - "The installation key you entered is meant for other vendor's CD-ROM.\ - Please contact the vendor who issued the CD-ROM for a proper key." - -set strTable(1085_WARN_4) \ - "This CD-ROM does not require an installation key. Click the \"Next\"\ - button to continue the installation." - -set strTable(1090_WARN_3) \ - "format %s \"Can\'t initiate SETUP: \[lindex \$args 0\]. Please correct\ - the problem then run SETUP again.\"" - -set strTable(1095_WARN_NO_TCPIP) \ - "SETUP has detected that your system does not have TCP-IP installed.\ - To correct the problem, please contact your administrator and then\ - run SETUP again.\nAborting setup." - -set strTable(1097_WARN_NO_LONGFILENAME_SUP) \ - "SETUP has detected that your system does not have long filename\ - support. To correct the problem, please contact your administrator\ - and then run SETUP again.\nAborting setup." - -set strTable(1105_FULL_INSTALL) \ - "Installs the Tornado products, tools, compilers, and other optional\ - components that you may have purchased." - -set strTable(1107_PROGRAM_GROUP) \ -"Installs only the Tornado program group and tools icons for access to\ - Tornado tools installed on a remote server." - -set strTable(1100_DEST_DIR) \ - "format %s \"Please type the name of the directory where you want SETUP to\ - install \[cdromDescGet\].\ - \n\nClick the button to choose the directory\ - interactively.\"" - -set strTable(1100_REMOTE_DIR) \ - "format %s \"Please type the name of the directory where Tornado has\ - already been installed.\ - \n\nClick the button to choose the directory\ - interactively.\"" - -set strTable(3100_DEST_DIR) \ - "format %s \"Please type the name of the directory where you want SETUP\ - to install \[cdromDescGet\].\"" - -set strTable(1110_DEST_DIR_WARN) \ - "The installation directory you entered does not exist.\ - \nDo you want to create it now?" - -set strTable(3110_DEST_DIR_WARN) \ - "The installation directory you entered does not exist." - -set strTable(3115_DEST_DIR_QUESTION) \ - "Do you want to create it now? \[y\]" - -set strTable(1111_DEST_DIR_WARN) \ - "format %s \"Installing \[cdromDescGet\] in the root directory is not\ - recommended.\nClick to select another directory.\"" - -set strTable(1120_DEST_DIR_WARN2) \ - "format %s \"Creating \[destDirGet\] failed: file exists.\"" - -set strTable(1121_DEST_DIR_WARN2) \ - "format %s \"Installing in \[destDirGet\] is not recommended.\ - \nDo you want to change the installation directory?\"" - -set strTable(1122_DEST_DIR_WARN2) \ - "format %s \"Unable to create \[destDirGet\].\"" - -set strTable(1130_DEST_DIR_WARN3) \ - "You do not have permission to write files into the installation directory\ - you entered.\ - \n\nPlease choose a writable directory." - -set strTable(1135_DEST_DIR_WARN4) \ - "format %s \"The installation directory you entered contains white\ - space(s). Please select another directory.\"" - -set strTable(1137_DUP_PRODUCT_WARN) \ - "format %s \"Reinstalling products may potentially destroy any\ - modifications you may have made to previously installed files.\ - Do you wish to continue with the installation or go back to the\ - '\[strTableGet 1450_TITLE_OPTION\]' page to reconsider your choices?\"" - -set strTable(3155_COMP_SELECT_QUESTION) \ - "Do you want to go back and specify a directory on a bigger partition?\ - \[y\]" - -set strTable(1140_COMP_SELECT) \ - "format %s \"In the option list below, please check all items you wish\ - to install. SETUP files will be copied to your selected directory and\ - take up \[setupSizeGet\] MB of disk space.\n\"" - -set strTable(3140_COMP_SELECT) \ - "In the option list below, select the item(s) you want to install." - -set strTable(3145_COMP_SELECT_CHANGE) \ - "Press to accept the setting. To change the setting, enter a\ - list of item numbers separated by spaces." - -set strTable(3145_COMP_SELECT_CHANGE_INVALID) \ - "The item number(s) you entered is not valid." - -set strTable(1150_COMP_SELECT_WARN) \ - "There is not enough disk space to install the selected component(s).\ - \n\nDo you want to go back and specify a directory on a bigger disk or\ - partition?" - -set strTable(3150_COMP_SELECT_WARN) \ - "There is not enough space to install the selected component(s)." - -set strTable(1151_COMP_SELECT_WARN) \ - "At least one component must be selected to continue installation." - -set strTable(1160_PERMISSION) \ - "SETUP is about to install the component(s) you have requested.\ - \n\nThe selected button(s) below indicate the file permissions which\ - will be set during the installation process.\ - \n\nPlease adjust these to suit your site requirements." - -set strTable(3160_PERMISSION) \ - "SETUP is about to install the component(s) you have requested." - -set strTable(3162_PERMISSION) \ - "The list below indicates the file permissions which will be set during\ - the installation process. Please adjust these to suit your site\ - requirements." - -set strTable(3165_PERMISSION_QUESTION) \ - "Press to accept the setting. To change the setting, enter a\ - list of item numbers separated by spaces." - -set strTable(1161_FOLDER_SELECT) \ - "SETUP will add program icons to the Program Folder listed below. You may\ - type a new folder name, or select one from the existing Folders list." - -set strTable(1162_FOLDER_SELECT) \ - "Please enter a valid folder name." - -set strTable(1170_FILE_COPY) \ - "format %s \"SETUP is copying the selected component(s) to the directory\ - \[destDirGet\].\"" - -set strTable(1171_FILE_COPY) \ - "format %s \"SETUP cannot read \[setupFileNameGet 0\] from the CD-ROM.\ - Please ensure that the CD-ROM is properly mounted.\"" - -set strTable(1180_LIB_UPDATE) \ - "SETUP is updating the VxWorks libraries. We recommend that you let\ - SETUP finish this step, or the libraries will be in an inconsistent\ - state. Please be patient as the process may take several minutes. \ - If you want to quit the SETUP program, click and run\ - the SETUP program again at a later time." - -set strTable(3180_LIB_UPDATE) \ - "SETUP is updating the VxWorks libraries." - -set strTable(1190_REGISTRY_HOST) \ - "The Tornado Registry is a daemon that keeps track of all available\ - targets by name. Only one registry is required on your network, \ - and it can run on any networked host.\ - \n\nPlease enter the name of the host where the Tornado Registry will\ - be running." - -set strTable(1191_REGISTRY_DESC) \ - "The Tornado Registry is a daemon that keeps track of all available\ - targets by name. Only one registry is required on your network, \ - and it can run on any networked host." - -set strTable(1192_REGISTRY_NAME) \ - "Please enter the name of the host where the Tornado Registry will\ - be running." - -set strTable(1200_FINISH_WARN) \ - "format %s \"However, there were \[errorCountGet\] error(s) which occured\ - during the process. Please review the log file\ - \[destDirDispGet\]/setup.log for more information.\"" - -set strTable(1210_FINISH) \ - "format %s \"SETUP has completed installing the selected product(s).\"" - -set strTable(1212_FINISH) \ - "SETUP has completed installing the program folders and icons." - -set strTable(1213_FINISH) \ - "Terminating SETUP program." - -set strTable(1360_QUIT_CALLBACK) \ - "format %s \"SETUP is not complete. If you quit the SETUP program now,\ - \[cdromDescGet\] will not be installed.\n\nYou may run\ - the SETUP program at a later time to complete the\ - installation.\ - \n\nTo continue installing the program, click . \ - To quit the SETUP program, click .\"" - -set strTable(3360_QUIT_CALLBACK) \ - "format %s \"SETUP is not complete. If you quit the SETUP program now,\ - \[cdromDescGet\] will not be installed.\n\nYou may run the\ - SETUP program at a later time to complete the installation.\ - \n\nTo continue installing the program, Press . \ - To quit the SETUP program, type \'exit\'.\"" - -set strTable(1370_FILE_ACCESS_ERROR) \ - "format %s \"SETUP cannot create/update file \[lindex \$args 0\]:\ - \[lindex \$args 1\]\"" - -set strTable(1380_DEFLATE_ERROR) \ - "format %s \"SETUP isn\'t able to deflate \[setupFileNameGet 0\]\ - \n\nPlease select one of the following options\ - to continue with the SETUP process.\"" - -set strTable(1390_MEMORY_LOW) \ - "The system is running out of memory. To continue, close applications\ - or increase the system swap space." - -set strTable(1400_DISK_FULL) \ - "No disk space left. To continue, free up some disk space." - -set strTable(1550_USAGE) \ - "Usage: SETUP /I\[con\]\]\t\n\ - /I : Add standard Tornado icons \n\ - from a remote installation" - -set strTable(1410_TITLE_WELCOME) "Welcome" -set strTable(1420_TITLE_WARNING) "Warning" -set strTable(1430_TITLE_REGISTRATION) "User Registration" -set strTable(1440_TITLE_DESTDIR) "Select Directory" -set strTable(1450_TITLE_OPTION) "Select Products" -set strTable(1460_TITLE_PERMISSION) "Permission" -set strTable(1470_TITLE_FILECOPY) "Copying Files" -set strTable(1480_TITLE_LIBUPDATE) "Update Libraries" -set strTable(1490_TITLE_REGISTRY_HOST) "Tornado Registry" -set strTable(1495_TITLE_BACKWARD_COMPATIBILITY) "Backward Compatibility" -set strTable(1500_TITLE_FINISH) "Finish" -set strTable(1560_TITLE_FOLDER) "Select Folder" -set strTable(1563_TITLE_DLL_REG) "Software Registration" -set strTable(1567_TITLE_DCOM) "DCOM Installation" - -set strTable(1570_OPTION_SELECT) \ - "Choose one of the options listed below, then click the\ - button to continue the installation." - -set strTable(1576_OPTION_MANUAL) \ - "Install Tornado Registry manually" - -set strTable(1577_OPTION_STARTUP) \ - "Install Tornado Registry locally in the Startup Group" - -set strTable(1578_OPTION_SERVICE) \ - "Install Tornado Registry locally as a Service" - -set strTable(1579_OPTION_REMOTE) \ - "Configure to use a remote Tornado Registry" - -set strTable(1580_OPTION_DESC) \ - "If you plan on running Tornado in a non-networked environment, we\ - recommend that you install the registry in your Startup Group or as an\ - NT Service. For more information, consult your Tornado User\'s Guide." - -set strTable(1581_OPTION_DESC) \ - "If you plan on running Tornado in a non-networked environment, we\ - recommend that you install the registry in your Startup Group. For more\ - information, consult your Tornado User\'s Guide." - -set strTable(3000_RETURN_QUESTION) \ - "Press to continue" - -set strTable(3055_EXIT_QUESTION) \ - "Type \'exit\' to quit the program or press to continue" - -set strTable(3370_BACK_CALLBACK) \ - "Cannot go back further." - -set strTable(1080_WARN_4) \ - "The installation key you entered attempted to unlock one or more \ - products that may have been removed from our product line. \ - Please compare the unlocked product list on the\ - \"[strTableGet 1450_TITLE_OPTION]\" screen with your purchased order\ - list, and contact us if you discover any differences." - -set strTable(4000_BASE_INSTALL_WARN) \ - "format %s \"Warning! Re-installing Tornado over an existing \ - tree will overwrite any installed patches. \ - If you proceed with the installation, please \ - re-install patches if any.\"" - -set strTable(4000_BASE_INSTALL_WARN_1) \ - "Select to overwrite existing Tornado installation,\ - or choose