Merge bk-internal.mysql.com:/data0/bk/mysql-5.1

into  bk-internal.mysql.com:/data0/bk/mysql-5.1-kt
This commit is contained in:
unknown 2006-08-16 15:58:25 +02:00
commit d03b17b9ae
554 changed files with 5432 additions and 194755 deletions

View file

@ -38,7 +38,6 @@ EXTRA_DIST = FINISH.sh \
compile-pentium-debug-max \
compile-pentium-debug-max-no-embedded \
compile-pentium-debug-max-no-ndb \
compile-pentium-debug-no-bdb \
compile-pentium-debug-openssl \
compile-pentium-debug-yassl \
compile-pentium-gcov \

View file

@ -52,7 +52,6 @@ fi
--with-csv-storage-engine \
--with-example-storage-engine \
--with-federated-storage-engine \
--with-berkeley-db \
--with-innodb \
--with-ssl \
--enable-thread-safe-client \

View file

@ -1,9 +0,0 @@
#! /bin/sh
path=`dirname $0`
. "$path/SETUP.sh"
extra_flags="$pentium_cflags $debug_cflags"
extra_configs="$pentium_configs $debug_configs --without-berkeley-db $static_link"
. "$path/FINISH.sh"

View file

@ -37,7 +37,7 @@ gmake -k clean || true
path=`dirname $0`
. "$path/autorun.sh"
CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-berkeley-db --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS
CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS
gmake -j 4

View file

@ -131,9 +131,6 @@ ADD_SUBDIRECTORY(client)
IF(WITH_ARCHIVE_STORAGE_ENGINE)
ADD_SUBDIRECTORY(storage/archive)
ENDIF(WITH_ARCHIVE_STORAGE_ENGINE)
IF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_SUBDIRECTORY(storage/bdb)
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
IF(WITH_BLACKHOLE_STORAGE_ENGINE)
ADD_SUBDIRECTORY(storage/blackhole)
ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE)

View file

@ -32,7 +32,6 @@ sinclude(config/ac-macros/check_cpu.m4)
sinclude(config/ac-macros/character_sets.m4)
sinclude(config/ac-macros/compiler_flag.m4)
sinclude(config/ac-macros/plugins.m4)
sinclude(config/ac-macros/ha_berkeley.m4)
sinclude(config/ac-macros/ha_ndbcluster.m4)
sinclude(config/ac-macros/large_file.m4)
sinclude(config/ac-macros/misc.m4)
@ -2142,12 +2141,6 @@ MYSQL_CHECK_SSL
# functions tested above
#--------------------------------------------------------------------
MYSQL_STORAGE_ENGINE(berkeley, berkeley-db, [BerkeleyDB Storage Engine],
[Transactional Tables using BerkeleyDB], [max,max-no-ndb])
MYSQL_PLUGIN_DIRECTORY(berkeley,[storage/bdb])
MYSQL_PLUGIN_STATIC(berkeley, [[\$(bdb_libs_with_path)]])
MYSQL_PLUGIN_ACTIONS(berkeley, [MYSQL_SETUP_BERKELEY_DB])
MYSQL_STORAGE_ENGINE(blackhole,,[Blackhole Storage Engine],
[Basic Write-only Read-never tables], [max,max-no-ndb])
MYSQL_PLUGIN_DIRECTORY(blackhole, [storage/blackhole])

View file

@ -16,7 +16,6 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include
${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
${CMAKE_SOURCE_DIR}/zlib
)
@ -84,9 +83,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
IF(WITH_INNOBASE_STORAGE_ENGINE)
ADD_DEPENDENCIES(mysqlserver innobase)
ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
IF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_DEPENDENCIES(mysqlserver bdb)
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_LIBRARY(libmysqld MODULE cmake_dummy.c libmysqld.def)
TARGET_LINK_LIBRARIES(libmysqld wsock32)

View file

@ -45,7 +45,7 @@ noinst_HEADERS = embedded_priv.h emb_qcache.h
sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
ha_innodb.cc ha_berkeley.cc ha_federated.cc ha_ndbcluster.cc \
ha_innodb.cc ha_federated.cc ha_ndbcluster.cc \
ha_ndbcluster_binlog.cc ha_partition.cc \
handler.cc sql_handler.cc \
hostname.cc init.cc password.c \
@ -96,10 +96,6 @@ yassl_inc_libs= $(top_srcdir)/extra/yassl/src/.libs/libyassl.a \
endif
# Storage engine specific compilation options
ha_berkeley.o: ha_berkeley.cc
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
ha_ndbcluster.o:ha_ndbcluster.cc
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<

View file

@ -3,7 +3,6 @@
#
-- source include/not_embedded.inc
-- source include/have_bdb.inc
-- source include/have_innodb.inc
-- source include/have_debug.inc
@ -12,7 +11,7 @@ drop table if exists t1, t2;
--enable_warnings
reset master;
create table t1 (a int) engine=bdb;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
begin;
insert t1 values (5);

View file

@ -1,4 +0,0 @@
-- require r/have_bdb.require
disable_query_log;
show variables like "have_bdb";
enable_query_log;

View file

@ -80,7 +80,7 @@ basedir=.
EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/"
fi
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb --tmpdir=. $EXTRA_ARG"
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --tmpdir=. $EXTRA_ARG"
echo "running $mysqld_boot"
if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot

View file

@ -2122,7 +2122,6 @@ sub install_db ($$) {
mtr_add_arg($args, "--datadir=%s", $data_dir);
mtr_add_arg($args, "--skip-innodb");
mtr_add_arg($args, "--skip-ndbcluster");
mtr_add_arg($args, "--skip-bdb");
mtr_add_arg($args, "--tmpdir=.");
if ( ! $opt_netware )
@ -2215,7 +2214,6 @@ basedir = $path_my_basedir
server_id = $server_id
skip-stack-trace
skip-innodb
skip-bdb
skip-ndbcluster
EOF
;
@ -2629,7 +2627,6 @@ sub mysqld_arguments ($$$$$) {
if ( $opt_valgrind_mysqld )
{
mtr_add_arg($args, "%s--skip-safemalloc", $prefix);
mtr_add_arg($args, "%s--skip-bdb", $prefix);
}
my $pidfile;

View file

@ -536,8 +536,8 @@ while test $# -gt 0; do
--valgrind | --valgrind-all)
find_valgrind;
VALGRIND=$FIND_VALGRIND
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc --skip-bdb"
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc --skip-bdb"
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc"
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc"
SLEEP_TIME_AFTER_RESTART=10
SLEEP_TIME_FOR_DELETE=60
USE_RUNNING_SERVER=0

View file

@ -6,26 +6,26 @@ Table Op Msg_type Msg_text
test.t4 backup error Failed copying .frm file (errno: X)
test.t4 backup status Operation failed
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/bogus/t4.frm' (Errcode: X)
backup table t4 to '../tmp';
Table Op Msg_type Msg_text
test.t4 backup status OK
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
backup table t4 to '../tmp';
Table Op Msg_type Msg_text
test.t4 backup error Failed copying .frm file (errno: X)
test.t4 backup status Operation failed
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/tmp/t4.frm' (Errcode: X)
drop table t4;
restore table t4 from '../tmp';
Table Op Msg_type Msg_text
test.t4 restore status OK
Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
select count(*) from t4;
count(*)
0
@ -35,19 +35,19 @@ backup table t1 to '../tmp';
Table Op Msg_type Msg_text
test.t1 backup status OK
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
drop table t1;
restore table t1 from '../bogus';
Table Op Msg_type Msg_text
t1 restore error Failed copying .frm file
Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
Error 29 File 'MYSQLTEST_VARDIR/bogus/t1.frm' not found (Errcode: X)
restore table t1 from '../tmp';
Table Op Msg_type Msg_text
test.t1 restore status OK
Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
select n from t1;
n
23
@ -62,7 +62,7 @@ Table Op Msg_type Msg_text
test.t2 backup status OK
test.t3 backup status OK
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
drop table t1,t2,t3;
restore table t1,t2,t3 from '../tmp';
Table Op Msg_type Msg_text
@ -70,7 +70,7 @@ test.t1 restore status OK
test.t2 restore status OK
test.t3 restore status OK
Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
select n from t1;
n
23
@ -91,7 +91,7 @@ restore table t1 from '../tmp';
Table Op Msg_type Msg_text
test.t1 restore status OK
Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
rename table t1 to t5;
lock tables t5 write;
backup table t5 to '../tmp';
@ -99,7 +99,7 @@ unlock tables;
Table Op Msg_type Msg_text
test.t5 backup status OK
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
drop table t5;
DROP TABLE IF EXISTS `t+1`;
CREATE TABLE `t+1` (c1 INT);
@ -108,13 +108,13 @@ BACKUP TABLE `t+1` TO '../tmp';
Table Op Msg_type Msg_text
test.t+1 backup status OK
Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
DROP TABLE `t+1`;
RESTORE TABLE `t+1` FROM '../tmp';
Table Op Msg_type Msg_text
test.t+1 restore status OK
Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
SELECT * FROM `t+1`;
c1
1

View file

@ -1,11 +0,0 @@
drop table if exists t1;
create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB;
insert into t1 values(1, 't1',4,9);
insert into t1 values(2, 'metatable',1,9);
insert into t1 values(3, 'metaindex',1,9 );
select * from t1;
objid tablename oid test
1 t1 4 9
2 metatable 1 9
3 metaindex 1 9
alter table t1 drop column test;

View file

@ -1,6 +0,0 @@
select * from t1;
objid tablename oid
1 t1 4
2 metatable 1
3 metaindex 1
drop table t1;

View file

@ -1,39 +0,0 @@
drop table if exists t1;
CREATE TABLE t1 (
ChargeID int(10) unsigned NOT NULL auto_increment,
ServiceID int(10) unsigned DEFAULT '0' NOT NULL,
ChargeDate date DEFAULT '0000-00-00' NOT NULL,
ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL,
FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund')
DEFAULT 'New' NOT NULL,
ChargeAuthorizationMessage text,
ChargeComment text,
ChargeTimeStamp varchar(20),
PRIMARY KEY (ChargeID),
KEY ServiceID (ServiceID),
KEY ChargeDate (ChargeDate)
) engine=BDB;
BEGIN;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
COMMIT;
BEGIN;
UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE
ChargeID = 1;
COMMIT;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
select * from t1;
ChargeID ServiceID ChargeDate ChargeAmount FedTaxes ProvTaxes ChargeStatus ChargeAuthorizationMessage ChargeComment ChargeTimeStamp
1 1 2001-03-01 1.00 1.00 1.00 New blablabla NULL now
2 1 2001-03-01 1.00 1.00 1.00 New NULL NULL now
drop table t1;
create table t1 (a int) engine=bdb;
set autocommit=0;
insert into t1 values(1);
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
drop table t1;

View file

@ -1,31 +0,0 @@
drop table if exists t1,t2;
create table t1 (id integer, x integer) engine=BDB;
create table t2 (id integer, x integer) engine=BDB;
insert into t1 values(0, 0);
insert into t2 values(0, 0);
set autocommit=0;
update t1 set x = 1 where id = 0;
set autocommit=0;
update t2 set x = 1 where id = 0;
select x from t1 where id = 0;
select x from t2 where id = 0;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
commit;
x
1
commit;
select * from t1;
id x
0 1
select * from t2;
id x
0 1
commit;
select * from t1;
id x
0 1
select * from t2;
id x
0 1
commit;
drop table t1,t2;

File diff suppressed because it is too large Load diff

View file

@ -1,99 +0,0 @@
drop table if exists t1, t2, t3;
flush status;
set autocommit=0;
create table t1 (a int not null) engine=bdb;
insert into t1 values (1),(2),(3);
select * from t1;
a
1
2
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t1;
set autocommit=1;
create table t1 (a int not null) engine=bdb;
begin;
insert into t1 values (1),(2),(3);
select * from t1;
a
1
2
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t1;
create table t1 (a int not null) engine=bdb;
create table t2 (a int not null) engine=bdb;
create table t3 (a int not null) engine=bdb;
insert into t1 values (1),(2);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
select * from t1;
a
1
2
select * from t2;
a
1
2
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
begin;
select * from t1;
a
1
2
select * from t2;
a
1
2
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
insert into t1 values (3);
insert into t2 values (3);
insert into t1 values (4);
select * from t1;
a
1
2
3
4
select * from t2;
a
1
2
3
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
drop table if exists t1, t2, t3;

View file

@ -1,462 +0,0 @@
SET storage_engine=bdb;
DROP TABLE IF EXISTS t1, gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
CREATE TABLE gis_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT);
CREATE TABLE gis_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g LINESTRING);
CREATE TABLE gis_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POLYGON);
CREATE TABLE gis_multi_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOINT);
CREATE TABLE gis_multi_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTILINESTRING);
CREATE TABLE gis_multi_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOLYGON);
CREATE TABLE gis_geometrycollection (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRYCOLLECTION);
CREATE TABLE gis_geometry (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRY);
SHOW CREATE TABLE gis_point;
Table Create Table
gis_point CREATE TABLE `gis_point` (
`fid` int(11) NOT NULL AUTO_INCREMENT,
`g` point DEFAULT NULL,
PRIMARY KEY (`fid`)
) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1
SHOW FIELDS FROM gis_point;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g point YES NULL
SHOW FIELDS FROM gis_line;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g linestring YES NULL
SHOW FIELDS FROM gis_polygon;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g polygon YES NULL
SHOW FIELDS FROM gis_multi_point;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g multipoint YES NULL
SHOW FIELDS FROM gis_multi_line;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g multilinestring YES NULL
SHOW FIELDS FROM gis_multi_polygon;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g multipolygon YES NULL
SHOW FIELDS FROM gis_geometrycollection;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g geometrycollection YES NULL
SHOW FIELDS FROM gis_geometry;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g geometry YES NULL
INSERT INTO gis_point VALUES
(101, PointFromText('POINT(10 10)')),
(102, PointFromText('POINT(20 10)')),
(103, PointFromText('POINT(20 20)')),
(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
INSERT INTO gis_line VALUES
(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
(107, LineStringFromWKB(LineString(Point(10, 10), Point(40, 10))));
INSERT INTO gis_polygon VALUES
(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
(110, PolyFromWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0)))));
INSERT INTO gis_multi_point VALUES
(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
(113, MPointFromWKB(MultiPoint(Point(3, 6), Point(4, 10))));
INSERT INTO gis_multi_line VALUES
(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
(116, MLineFromWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7)))));
INSERT INTO gis_multi_polygon VALUES
(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
(119, MPolyFromWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3))))));
INSERT INTO gis_geometrycollection VALUES
(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
(121, GeometryFromWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))));
INSERT into gis_geometry SELECT * FROM gis_point;
INSERT into gis_geometry SELECT * FROM gis_line;
INSERT into gis_geometry SELECT * FROM gis_polygon;
INSERT into gis_geometry SELECT * FROM gis_multi_point;
INSERT into gis_geometry SELECT * FROM gis_multi_line;
INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
SELECT fid, AsText(g) FROM gis_point ORDER by fid;
fid AsText(g)
101 POINT(10 10)
102 POINT(20 10)
103 POINT(20 20)
104 POINT(10 20)
SELECT fid, AsText(g) FROM gis_line ORDER by fid;
fid AsText(g)
105 LINESTRING(0 0,0 10,10 0)
106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
107 LINESTRING(10 10,40 10)
SELECT fid, AsText(g) FROM gis_polygon ORDER by fid;
fid AsText(g)
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
110 POLYGON((0 0,30 0,30 30,0 0))
SELECT fid, AsText(g) FROM gis_multi_point ORDER by fid;
fid AsText(g)
111 MULTIPOINT(0 0,10 10,10 20,20 20)
112 MULTIPOINT(1 1,11 11,11 21,21 21)
113 MULTIPOINT(3 6,4 10)
SELECT fid, AsText(g) FROM gis_multi_line ORDER by fid;
fid AsText(g)
114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
115 MULTILINESTRING((10 48,10 21,10 0))
116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
SELECT fid, AsText(g) FROM gis_multi_polygon ORDER by fid;
fid AsText(g)
117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
SELECT fid, AsText(g) FROM gis_geometrycollection ORDER by fid;
fid AsText(g)
120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
SELECT fid, AsText(g) FROM gis_geometry ORDER by fid;
fid AsText(g)
101 POINT(10 10)
102 POINT(20 10)
103 POINT(20 20)
104 POINT(10 20)
105 LINESTRING(0 0,0 10,10 0)
106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
107 LINESTRING(10 10,40 10)
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
110 POLYGON((0 0,30 0,30 30,0 0))
111 MULTIPOINT(0 0,10 10,10 20,20 20)
112 MULTIPOINT(1 1,11 11,11 21,21 21)
113 MULTIPOINT(3 6,4 10)
114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
115 MULTILINESTRING((10 48,10 21,10 0))
116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
SELECT fid, Dimension(g) FROM gis_geometry ORDER by fid;
fid Dimension(g)
101 0
102 0
103 0
104 0
105 1
106 1
107 1
108 2
109 2
110 2
111 0
112 0
113 0
114 1
115 1
116 1
117 2
118 2
119 2
120 1
121 1
SELECT fid, GeometryType(g) FROM gis_geometry ORDER by fid;
fid GeometryType(g)
101 POINT
102 POINT
103 POINT
104 POINT
105 LINESTRING
106 LINESTRING
107 LINESTRING
108 POLYGON
109 POLYGON
110 POLYGON
111 MULTIPOINT
112 MULTIPOINT
113 MULTIPOINT
114 MULTILINESTRING
115 MULTILINESTRING
116 MULTILINESTRING
117 MULTIPOLYGON
118 MULTIPOLYGON
119 MULTIPOLYGON
120 GEOMETRYCOLLECTION
121 GEOMETRYCOLLECTION
SELECT fid, IsEmpty(g) FROM gis_geometry ORDER by fid;
fid IsEmpty(g)
101 0
102 0
103 0
104 0
105 0
106 0
107 0
108 0
109 0
110 0
111 0
112 0
113 0
114 0
115 0
116 0
117 0
118 0
119 0
120 0
121 0
SELECT fid, AsText(Envelope(g)) FROM gis_geometry ORDER by fid;
fid AsText(Envelope(g))
101 POLYGON((10 10,10 10,10 10,10 10,10 10))
102 POLYGON((20 10,20 10,20 10,20 10,20 10))
103 POLYGON((20 20,20 20,20 20,20 20,20 20))
104 POLYGON((10 20,10 20,10 20,10 20,10 20))
105 POLYGON((0 0,10 0,10 10,0 10,0 0))
106 POLYGON((10 10,20 10,20 20,10 20,10 10))
107 POLYGON((10 10,40 10,40 10,10 10,10 10))
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
109 POLYGON((0 0,50 0,50 50,0 50,0 0))
110 POLYGON((0 0,30 0,30 30,0 30,0 0))
111 POLYGON((0 0,20 0,20 20,0 20,0 0))
112 POLYGON((1 1,21 1,21 21,1 21,1 1))
113 POLYGON((3 6,4 6,4 10,3 10,3 6))
114 POLYGON((10 0,16 0,16 48,10 48,10 0))
115 POLYGON((10 0,10 0,10 48,10 48,10 0))
116 POLYGON((1 2,21 2,21 8,1 8,1 2))
117 POLYGON((28 0,84 0,84 42,28 42,28 0))
118 POLYGON((28 0,84 0,84 42,28 42,28 0))
119 POLYGON((0 0,3 0,3 3,0 3,0 0))
120 POLYGON((0 0,10 0,10 10,0 10,0 0))
121 POLYGON((3 6,44 6,44 9,3 9,3 6))
explain extended select Dimension(g), GeometryType(g), IsEmpty(g), AsText(Envelope(g)) from gis_geometry;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_geometry ALL NULL NULL NULL NULL 21 100.00
Warnings:
Note 1003 select dimension(`test`.`gis_geometry`.`g`) AS `Dimension(g)`,geometrytype(`test`.`gis_geometry`.`g`) AS `GeometryType(g)`,isempty(`test`.`gis_geometry`.`g`) AS `IsEmpty(g)`,astext(envelope(`test`.`gis_geometry`.`g`)) AS `AsText(Envelope(g))` from `test`.`gis_geometry`
SELECT fid, X(g) FROM gis_point ORDER by fid;
fid X(g)
101 10
102 20
103 20
104 10
SELECT fid, Y(g) FROM gis_point ORDER by fid;
fid Y(g)
101 10
102 10
103 20
104 20
explain extended select X(g),Y(g) FROM gis_point;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_point ALL NULL NULL NULL NULL 4 100.00
Warnings:
Note 1003 select x(`test`.`gis_point`.`g`) AS `X(g)`,y(`test`.`gis_point`.`g`) AS `Y(g)` from `test`.`gis_point`
SELECT fid, AsText(StartPoint(g)) FROM gis_line ORDER by fid;
fid AsText(StartPoint(g))
105 POINT(0 0)
106 POINT(10 10)
107 POINT(10 10)
SELECT fid, AsText(EndPoint(g)) FROM gis_line ORDER by fid;
fid AsText(EndPoint(g))
105 POINT(10 0)
106 POINT(10 10)
107 POINT(40 10)
SELECT fid, GLength(g) FROM gis_line ORDER by fid;
fid GLength(g)
105 24.142135623731
106 40
107 30
SELECT fid, NumPoints(g) FROM gis_line ORDER by fid;
fid NumPoints(g)
105 3
106 5
107 2
SELECT fid, AsText(PointN(g, 2)) FROM gis_line ORDER by fid;
fid AsText(PointN(g, 2))
105 POINT(0 10)
106 POINT(20 10)
107 POINT(40 10)
SELECT fid, IsClosed(g) FROM gis_line ORDER by fid;
fid IsClosed(g)
105 0
106 1
107 0
explain extended select AsText(StartPoint(g)),AsText(EndPoint(g)),GLength(g),NumPoints(g),AsText(PointN(g, 2)),IsClosed(g) FROM gis_line;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_line ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select astext(startpoint(`test`.`gis_line`.`g`)) AS `AsText(StartPoint(g))`,astext(endpoint(`test`.`gis_line`.`g`)) AS `AsText(EndPoint(g))`,glength(`test`.`gis_line`.`g`) AS `GLength(g)`,numpoints(`test`.`gis_line`.`g`) AS `NumPoints(g)`,astext(pointn(`test`.`gis_line`.`g`,2)) AS `AsText(PointN(g, 2))`,isclosed(`test`.`gis_line`.`g`) AS `IsClosed(g)` from `test`.`gis_line`
SELECT fid, AsText(Centroid(g)) FROM gis_polygon ORDER by fid;
fid AsText(Centroid(g))
108 POINT(15 15)
109 POINT(25.416666666667 25.416666666667)
110 POINT(20 10)
SELECT fid, Area(g) FROM gis_polygon ORDER by fid;
fid Area(g)
108 100
109 2400
110 450
SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon ORDER by fid;
fid AsText(ExteriorRing(g))
108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
110 LINESTRING(0 0,30 0,30 30,0 0)
SELECT fid, NumInteriorRings(g) FROM gis_polygon ORDER by fid;
fid NumInteriorRings(g)
108 0
109 1
110 0
SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon ORDER by fid;
fid AsText(InteriorRingN(g, 1))
108 NULL
109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
110 NULL
explain extended select AsText(Centroid(g)),Area(g),AsText(ExteriorRing(g)),NumInteriorRings(g),AsText(InteriorRingN(g, 1)) FROM gis_polygon;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_polygon ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select astext(centroid(`test`.`gis_polygon`.`g`)) AS `AsText(Centroid(g))`,area(`test`.`gis_polygon`.`g`) AS `Area(g)`,astext(exteriorring(`test`.`gis_polygon`.`g`)) AS `AsText(ExteriorRing(g))`,numinteriorrings(`test`.`gis_polygon`.`g`) AS `NumInteriorRings(g)`,astext(interiorringn(`test`.`gis_polygon`.`g`,1)) AS `AsText(InteriorRingN(g, 1))` from `test`.`gis_polygon`
SELECT fid, IsClosed(g) FROM gis_multi_line ORDER by fid;
fid IsClosed(g)
114 0
115 0
116 0
SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon ORDER by fid;
fid AsText(Centroid(g))
117 POINT(55.588527753042 17.426536064114)
118 POINT(55.588527753042 17.426536064114)
119 POINT(2 2)
SELECT fid, Area(g) FROM gis_multi_polygon ORDER by fid;
fid Area(g)
117 1684.5
118 1684.5
119 4.5
SELECT fid, NumGeometries(g) from gis_multi_point ORDER by fid;
fid NumGeometries(g)
111 4
112 4
113 2
SELECT fid, NumGeometries(g) from gis_multi_line ORDER by fid;
fid NumGeometries(g)
114 2
115 1
116 2
SELECT fid, NumGeometries(g) from gis_multi_polygon ORDER by fid;
fid NumGeometries(g)
117 2
118 2
119 1
SELECT fid, NumGeometries(g) from gis_geometrycollection ORDER by fid;
fid NumGeometries(g)
120 2
121 2
explain extended SELECT fid, NumGeometries(g) from gis_multi_point;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,numgeometries(`test`.`gis_multi_point`.`g`) AS `NumGeometries(g)` from `test`.`gis_multi_point`
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point ORDER by fid;
fid AsText(GeometryN(g, 2))
111 POINT(10 10)
112 POINT(11 11)
113 POINT(4 10)
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line ORDER by fid;
fid AsText(GeometryN(g, 2))
114 LINESTRING(16 0,16 23,16 48)
115 NULL
116 LINESTRING(2 5,5 8,21 7)
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon ORDER by fid;
fid AsText(GeometryN(g, 2))
117 POLYGON((59 18,67 18,67 13,59 13,59 18))
118 POLYGON((59 18,67 18,67 13,59 13,59 18))
119 NULL
SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection ORDER by fid;
fid AsText(GeometryN(g, 2))
120 LINESTRING(0 0,10 10)
121 LINESTRING(3 6,7 9)
SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection ORDER by fid;
fid AsText(GeometryN(g, 1))
120 POINT(0 0)
121 POINT(44 6)
explain extended SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,astext(geometryn(`test`.`gis_multi_point`.`g`,2)) AS `AsText(GeometryN(g, 2))` from `test`.`gis_multi_point`
SELECT g1.fid as first, g2.fid as second,
Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
first second w c o e d t i r
120 120 1 1 0 1 0 0 1 0
120 121 0 0 0 0 0 0 1 0
121 120 0 0 1 0 0 0 1 0
121 121 1 1 0 1 0 0 1 0
explain extended SELECT g1.fid as first, g2.fid as second,
Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE g1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort
1 SIMPLE g2 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1003 select `test`.`g1`.`fid` AS `first`,`test`.`g2`.`fid` AS `second`,within(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `w`,contains(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `c`,overlaps(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `o`,equals(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `e`,disjoint(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `d`,touches(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `t`,intersects(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `i`,crosses(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `r` from `test`.`gis_geometrycollection` `g1` join `test`.`gis_geometrycollection` `g2` order by `test`.`g1`.`fid`,`test`.`g2`.`fid`
DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
CREATE TABLE t1 (
a INTEGER PRIMARY KEY AUTO_INCREMENT,
gp point,
ln linestring,
pg polygon,
mp multipoint,
mln multilinestring,
mpg multipolygon,
gc geometrycollection,
gm geometry
);
SHOW FIELDS FROM t1;
Field Type Null Key Default Extra
a int(11) NO PRI NULL auto_increment
gp point YES NULL
ln linestring YES NULL
pg polygon YES NULL
mp multipoint YES NULL
mln multilinestring YES NULL
mpg multipolygon YES NULL
gc geometrycollection YES NULL
gm geometry YES NULL
ALTER TABLE t1 ADD fid INT;
SHOW FIELDS FROM t1;
Field Type Null Key Default Extra
a int(11) NO PRI NULL auto_increment
gp point YES NULL
ln linestring YES NULL
pg polygon YES NULL
mp multipoint YES NULL
mln multilinestring YES NULL
mpg multipolygon YES NULL
gc geometrycollection YES NULL
gm geometry YES NULL
fid int(11) YES NULL
DROP TABLE t1;
create table t1 (pk integer primary key auto_increment, a geometry not null);
insert into t1 (a) values (GeomFromText('Point(1 2)'));
insert into t1 (a) values ('Garbage');
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert IGNORE into t1 (a) values ('Garbage');
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
drop table t1;
create table t1 (pk integer primary key auto_increment, fl geometry);
insert into t1 (fl) values (1);
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert into t1 (fl) values (1.11);
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert into t1 (fl) values ("qwerty");
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert into t1 (fl) values (pointfromtext('point(1,1)'));
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
drop table t1;

View file

@ -1,6 +1,6 @@
drop table if exists t1, t2;
reset master;
create table t1 (a int) engine=bdb;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
begin;
insert t1 values (5);
@ -10,12 +10,12 @@ insert t2 values (5);
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Table_map 1 # table_id: # (test.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Xid 1 # COMMIT /* xid= */
master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Table_map 1 # table_id: # (test.t2)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F

View file

@ -12,7 +12,7 @@ master-bin.000001 367 Xid 1 394 COMMIT /* XID */
drop table t1;
drop table if exists t1, t2;
reset master;
create table t1 (a int) engine=bdb;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
begin;
insert t1 values (5);
@ -22,11 +22,11 @@ insert t2 values (5);
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Query 1 # use `test`; insert t1 values (5)
master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Xid 1 # COMMIT /* xid= */
master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Query 1 # use `test`; insert t2 values (5)
master-bin.000001 # Xid 1 # COMMIT /* xid= */

View file

@ -479,7 +479,7 @@ drop table t1;
create table t1 (
c char(10) character set utf8,
unique key a (c(1))
) engine=bdb;
) engine=innodb;
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
insert into t1 values ('aa');
ERROR 23000: Duplicate entry 'aa' for key 'a'
@ -637,7 +637,7 @@ drop table t1;
create table t1 (
c char(10) character set utf8 collate utf8_bin,
unique key a (c(1))
) engine=bdb;
) engine=innodb;
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
insert into t1 values ('aa');
ERROR 23000: Duplicate entry 'aa' for key 'a'
@ -707,7 +707,7 @@ drop table t1;
create table t1 (
str varchar(255) character set utf8 not null,
key str (str(2))
) engine=bdb;
) engine=innodb;
INSERT INTO t1 VALUES ('str');
INSERT INTO t1 VALUES ('str2');
select * from t1 where str='str';
@ -796,7 +796,7 @@ insert into t1 values(1,'foo'),(2,'foobar');
select * from t1 where b like 'foob%';
a b
2 foobar
alter table t1 engine=bdb;
alter table t1 engine=innodb;
select * from t1 where b like 'foob%';
a b
2 foobar

View file

@ -1,2 +0,0 @@
Variable_name Value
have_bdb YES

View file

@ -43,7 +43,6 @@ character-sets-dir option_value
basedir option_value
skip-stack-trace option_value
skip-innodb option_value
skip-bdb option_value
skip-ndbcluster option_value
nonguarded option_value
log-output option_value
@ -64,7 +63,6 @@ character-sets-dir option_value
basedir option_value
skip-stack-trace option_value
skip-innodb option_value
skip-bdb option_value
skip-ndbcluster option_value
nonguarded option_value
log-output option_value

View file

@ -22,7 +22,6 @@ basedir VALUE
server_id VALUE
skip-stack-trace VALUE
skip-innodb VALUE
skip-bdb VALUE
skip-ndbcluster VALUE
log-output VALUE
SHOW INSTANCE OPTIONS mysqld2;
@ -41,7 +40,6 @@ basedir VALUE
server_id VALUE
skip-stack-trace VALUE
skip-innodb VALUE
skip-bdb VALUE
skip-ndbcluster VALUE
nonguarded VALUE
log-output VALUE

View file

@ -1,136 +0,0 @@
drop table if exists t1;
create table t1 (
pk int primary key,
key1 int,
key2 int,
filler char(200),
filler2 char(200),
index(key1),
index(key2)
) engine=bdb;
select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 );
pk key1 key2 filler filler2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
9 9 9 filler-data filler-data-2
10 10 10 filler-data filler-data-2
4 4 4 filler-data filler-data-2
5 5 5 filler-data filler-data-2
6 6 6 filler-data filler-data-2
7 7 7 filler-data filler-data-2
8 8 8 filler-data filler-data-2
set @maxv=1000;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1=18 or key1=60;
pk key1 key2 filler filler2
18 18 18 filler-data filler-data-2
60 60 60 filler-data filler-data-2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1 < 3 or key1 > @maxv-11;
pk key1 key2 filler filler2
990 990 990 filler-data filler-data-2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or
(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10);
pk key1 key2 filler filler2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
select * from t1 where
(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 )
or
(key1 < 5) or (key1 > @maxv-10);
pk key1 key2 filler filler2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
drop table t1;

View file

@ -29,13 +29,13 @@ on (mysql.general_log.command_type = join_test.command_type)
drop table join_test;
flush logs;
lock tables mysql.general_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible.
ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.slow_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible.
ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.general_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
unlock tables;
lock tables mysql.general_log READ LOCAL;
@ -161,13 +161,13 @@ TIMESTAMP USER_HOST THREAD_ID 1 Query set global slow_query_log='ON'
TIMESTAMP USER_HOST THREAD_ID 1 Query select * from mysql.general_log
flush logs;
lock tables mysql.general_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible.
ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.slow_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible.
ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.general_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
unlock tables;
set global general_log='OFF';

View file

@ -492,7 +492,7 @@ create table t2 like t1;
insert into t2 select * from t1;
delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2;
create table t1 ( c char(8) not null ) engine=bdb;
create table t1 ( c char(8) not null ) engine=innodb;
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');
insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F');
alter table t1 add b char(8) not null;

View file

@ -64,17 +64,26 @@ pk u o
insert into t1 values (1,1,1);
drop table t1;
create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3);
insert into t1 values (1,'one',1);
begin;
select * from t1 where x = 1 for update;
x y z
1 one 1
begin;
select * from t1 where x = 1 for update;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
rollback;
rollback;
insert into t1 values (2,'two',2),(3,"three",3);
begin;
select * from t1 where x = 1 for update;
x y z
1 one 1
select * from t1 where x = 1 for update;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
select * from t1 where x = 2 for update;
x y z
2 two 2
select * from t1 where x = 1 for update;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
rollback;
commit;
begin;

File diff suppressed because it is too large Load diff

View file

@ -1,186 +0,0 @@
drop table if exists t1, t2, t3,t4;
create table t1 (
pk1 int not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (-5, 1, 1),
(-100, 1, 1),
(3, 1, 1),
(0, 1, 1),
(10, 1, 1);
explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 5 Using sort_union(key1,key2); Using where
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 key1 key2
-100 1 1
-5 1 1
0 1 1
3 1 1
10 1 1
drop table t1;
create table t1 (
pk1 int unsigned not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (0, 1, 1),
(0xFFFFFFFF, 1, 1),
(0xFFFFFFFE, 1, 1),
(1, 1, 1),
(2, 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 key1 key2
0 1 1
1 1 1
2 1 1
4294967294 1 1
4294967295 1 1
drop table t1;
create table t1 (
pk1 char(4) not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb collate latin2_general_ci;
insert into t1 values ('a1', 1, 1),
('b2', 1, 1),
('A3', 1, 1),
('B4', 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 key1 key2
a1 1 1
A3 1 1
b2 1 1
B4 1 1
drop table t1;
create table t1 (
pk1 int not NULL,
pk2 char(4) not NULL collate latin1_german1_ci,
pk3 char(4) not NULL collate latin1_bin,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1,pk2,pk3),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values
(1, 'u', 'u', 1, 1),
(1, 'u', char(0xEC), 1, 1),
(1, 'u', 'x', 1, 1);
insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1;
insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u';
insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1;
select * from t1;
pk1 pk2 pk3 key1 key2
1 ì u 1 1
1 ì x 1 1
1 ì ì 1 1
1 u u 1 1
1 u x 1 1
1 u ì 1 1
1 x u 1 1
1 x x 1 1
1 x ì 1 1
2 ì u 1 1
2 ì x 1 1
2 ì ì 1 1
2 u u 1 1
2 u x 1 1
2 u ì 1 1
2 x u 1 1
2 x x 1 1
2 x ì 1 1
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 pk2 pk3 key1 key2
1 ì u 1 1
1 ì x 1 1
1 ì ì 1 1
1 u u 1 1
1 u x 1 1
1 u ì 1 1
1 x u 1 1
1 x x 1 1
1 x ì 1 1
2 ì u 1 1
2 ì x 1 1
2 ì ì 1 1
2 u u 1 1
2 u x 1 1
2 u ì 1 1
2 x u 1 1
2 x x 1 1
2 x ì 1 1
alter table t1 drop primary key;
select * from t1;
pk1 pk2 pk3 key1 key2
1 ì u 1 1
1 ì x 1 1
1 ì ì 1 1
1 u u 1 1
1 u x 1 1
1 u ì 1 1
1 x u 1 1
1 x x 1 1
1 x ì 1 1
2 ì u 1 1
2 ì x 1 1
2 ì ì 1 1
2 u u 1 1
2 u x 1 1
2 u ì 1 1
2 x u 1 1
2 x x 1 1
2 x ì 1 1
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 pk2 pk3 key1 key2
1 ì u 1 1
1 ì x 1 1
1 ì ì 1 1
1 u u 1 1
1 u x 1 1
1 u ì 1 1
1 x u 1 1
1 x x 1 1
1 x ì 1 1
2 ì u 1 1
2 ì x 1 1
2 ì ì 1 1
2 u u 1 1
2 u x 1 1
2 u ì 1 1
2 x u 1 1
2 x x 1 1
2 x ì 1 1
drop table t1;
create table t1 (
pk1 varchar(8) NOT NULL default '',
pk2 varchar(4) NOT NULL default '',
key1 int(11),
key2 int(11),
primary key(pk1, pk2),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values ('','empt',2,2),
('a','a--a',2,2),
('bb','b--b',2,2),
('ccc','c--c',2,2),
('dddd','d--d',2,2);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 pk2 key1 key2
empt 2 2
a a--a 2 2
bb b--b 2 2
ccc c--c 2 2
dddd d--d 2 2
drop table t1;

View file

@ -692,7 +692,7 @@ drop database mysqltest;
show full plugin;
show warnings;
Level Code Message
Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead.
Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead
show plugin;
show plugins;
End of 5.1 tests

View file

@ -535,7 +535,7 @@ use db_bug7787|
CREATE PROCEDURE p1()
SHOW INNODB STATUS; |
Warnings:
Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead.
Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead
GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost|
DROP DATABASE db_bug7787|
drop user user_bug7787@localhost|

View file

@ -101,13 +101,13 @@ create table t1 (t2 timestamp(2), t4 timestamp(4), t6 timestamp(6),
t8 timestamp(8), t10 timestamp(10), t12 timestamp(12),
t14 timestamp(14));
Warnings:
Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
insert t1 values (0,0,0,0,0,0,0),
("1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59",
"1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59",

View file

@ -175,7 +175,7 @@ Warning 1266 Using storage engine MyISAM for table 't1'
drop table t1;
set table_type=MYISAM;
Warnings:
Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead.
Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead
create table t1 (a int);
insert into t1 (a) values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
update t1 set a='abc';

View file

@ -1,18 +0,0 @@
#
# Test of problem when shutting down mysqld at once after ALTER TABLE
#
-- source include/have_bdb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB;
insert into t1 values(1, 't1',4,9);
insert into t1 values(2, 'metatable',1,9);
insert into t1 values(3, 'metaindex',1,9 );
select * from t1;
alter table t1 drop column test;
# Now we do a reboot and continue with the next test
# End of 4.1 tests

View file

@ -1,2 +0,0 @@
--skip-external-locking

View file

@ -1,10 +0,0 @@
#
# Note that this test uses tables from the previous test
# This is to test that the table t1 survives a reboot of MySQL
# The options in the -master.opt file are just there to force the reboot
#
-- source include/have_bdb.inc
select * from t1;
drop table t1;
# End of 4.1 tests

View file

@ -1,51 +0,0 @@
-- source include/have_bdb.inc
# test for bug reported by Mark Steele
--disable_warnings
drop table if exists t1;
--enable_warnings
CREATE TABLE t1 (
ChargeID int(10) unsigned NOT NULL auto_increment,
ServiceID int(10) unsigned DEFAULT '0' NOT NULL,
ChargeDate date DEFAULT '0000-00-00' NOT NULL,
ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL,
FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund')
DEFAULT 'New' NOT NULL,
ChargeAuthorizationMessage text,
ChargeComment text,
ChargeTimeStamp varchar(20),
PRIMARY KEY (ChargeID),
KEY ServiceID (ServiceID),
KEY ChargeDate (ChargeDate)
) engine=BDB;
BEGIN;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
COMMIT;
BEGIN;
UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE
ChargeID = 1;
COMMIT;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
select * from t1;
drop table t1;
#
# Test for bug #2342 "Running ANALYZE TABLE on bdb table
# inside a transaction hangs server thread"
create table t1 (a int) engine=bdb;
set autocommit=0;
insert into t1 values(1);
analyze table t1;
drop table t1;
# End of 4.1 tests

View file

@ -1,59 +0,0 @@
# This test doesn't work with the embedded version as this code
# assumes that one query is running while we are doing queries on
# a second connection.
# This would work if mysqltest run would be threaded and handle each
# connection in a separate thread.
#
-- source include/not_embedded.inc
-- source include/have_bdb.inc
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
connection con1;
create table t1 (id integer, x integer) engine=BDB;
create table t2 (id integer, x integer) engine=BDB;
insert into t1 values(0, 0);
insert into t2 values(0, 0);
set autocommit=0;
update t1 set x = 1 where id = 0;
connection con2;
set autocommit=0;
update t2 set x = 1 where id = 0;
# The following query should hang because con1 is locking the page
--send
select x from t1 where id = 0;
connection con1;
# This should generate a deadlock as we are trying to access a locked row
--send
select x from t2 where id = 0;
connection con2;
--error 1213
reap;
commit;
connection con1;
reap;
commit;
connection con2;
select * from t1;
select * from t2;
commit;
connection con1;
select * from t1;
select * from t2;
commit;
drop table t1,t2;
# End of 4.1 tests

View file

@ -1,59 +0,0 @@
# This test doesn't work with the embedded version as this code
# assumes that one query is running while we are doing queries on
# a second connection.
# This would work if mysqltest run would be threaded and handle each
# connection in a separate thread.
#
#-- source include/not_embedded.inc
-- source include/have_bdb.inc
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
connection con1;
create table t1 (id integer, x integer) engine=BDB;
create table t2 (id integer, x integer) engine=BDB;
insert into t1 values(0, 0);
insert into t2 values(0, 0);
set autocommit=0;
update t1 set x = 1 where id = 0;
connection con2;
set autocommit=0;
update t2 set x = 1 where id = 0;
# The following query should hang because con1 is locking the page
--send
select x from t1 where id = 0;
connection con1;
# This should generate a deadlock as we are trying to access a locked row
--send
select x from t2 where id = 0;
connection con2;
--error 1213
reap;
commit;
connection con1;
reap;
commit;
connection con2;
select * from t1;
select * from t2;
commit;
connection con1;
select * from t1;
select * from t2;
commit;
drop table t1,t2;
# End of 4.1 tests

File diff suppressed because it is too large Load diff

View file

@ -1 +0,0 @@
--set-variable=query_cache_size=1M

View file

@ -1,53 +0,0 @@
-- source include/have_bdb.inc
-- source include/have_query_cache.inc
#
# Without auto_commit.
#
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
flush status;
set autocommit=0;
create table t1 (a int not null) engine=bdb;
insert into t1 values (1),(2),(3);
select * from t1;
show status like "Qcache_queries_in_cache";
drop table t1;
set autocommit=1;
create table t1 (a int not null) engine=bdb;
begin;
insert into t1 values (1),(2),(3);
select * from t1;
show status like "Qcache_queries_in_cache";
drop table t1;
create table t1 (a int not null) engine=bdb;
create table t2 (a int not null) engine=bdb;
create table t3 (a int not null) engine=bdb;
insert into t1 values (1),(2);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
select * from t1;
select * from t2;
select * from t3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
begin;
select * from t1;
select * from t2;
select * from t3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
insert into t1 values (3);
insert into t2 values (3);
insert into t1 values (4);
select * from t1;
select * from t2;
select * from t3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
commit;
show status like "Qcache_queries_in_cache";
drop table if exists t1, t2, t3;
# End of 4.1 tests

View file

@ -1,3 +0,0 @@
-- source include/have_bdb.inc
SET storage_engine=bdb;
--source include/gis_generic.inc

View file

@ -360,7 +360,7 @@ drop table t1;
create table t1 (
c char(10) character set utf8,
unique key a (c(1))
) engine=bdb;
) engine=innodb;
--enable_warnings
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
--error 1062
@ -483,7 +483,7 @@ drop table t1;
create table t1 (
c char(10) character set utf8 collate utf8_bin,
unique key a (c(1))
) engine=bdb;
) engine=innodb;
--enable_warnings
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
--error 1062
@ -558,7 +558,7 @@ drop table t1;
create table t1 (
str varchar(255) character set utf8 not null,
key str (str(2))
) engine=bdb;
) engine=innodb;
--enable_warnings
INSERT INTO t1 VALUES ('str');
INSERT INTO t1 VALUES ('str2');
@ -644,7 +644,7 @@ create table t1 (
insert into t1 values(1,'foo'),(2,'foobar');
select * from t1 where b like 'foob%';
--disable_warnings
alter table t1 engine=bdb;
alter table t1 engine=innodb;
--enable_warnings
select * from t1 where b like 'foob%';
drop table t1;

View file

@ -44,3 +44,4 @@ rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson
rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson
crash_commit_before : 2006-08-02 msvensson
rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly)
federated_transactions : Need to be re-enabled once Patrick's merge is complete

View file

@ -10,7 +10,7 @@ CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default ''
)
DEFAULT CHARSET=latin1 ENGINE=InnoDB;
DEFAULT CHARSET=latin1 ENGINE=innodb;
connection master;
DROP TABLE IF EXISTS federated.t1;

View file

@ -1,52 +0,0 @@
#
# 2-sweeps read Index_merge test
#
-- source include/have_bdb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (
pk int primary key,
key1 int,
key2 int,
filler char(200),
filler2 char(200),
index(key1),
index(key2)
) engine=bdb;
--disable_query_log
let $1=1000;
while ($1)
{
eval insert into t1 values($1, $1, $1, 'filler-data','filler-data-2');
dec $1;
}
--enable_query_log
select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 );
set @maxv=1000;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1=18 or key1=60;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1 < 3 or key1 > @maxv-11;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or
(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10);
select * from t1 where
(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 )
or
(key1 < 5) or (key1 > @maxv-10);
drop table t1;

View file

@ -485,7 +485,7 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2;
--disable_warnings
create table t1 ( c char(8) not null ) engine=bdb;
create table t1 ( c char(8) not null ) engine=innodb;
--enable_warnings
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');

View file

@ -73,7 +73,7 @@ drop table t1;
create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3);
insert into t1 values (1,'one',1);
# PK access
connection con1;
@ -82,11 +82,22 @@ select * from t1 where x = 1 for update;
connection con2;
begin;
select * from t1 where x = 2 for update;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
rollback;
insert into t1 values (2,'two',2),(3,"three",3);
begin;
select * from t1 where x = 1 for update;
connection con2;
--error 1205
select * from t1 where x = 1 for update;
select * from t1 where x = 2 for update;
rollback;
connection con1;
commit;

View file

@ -1,25 +0,0 @@
###############################################
# #
# Prepared Statements test on BDB tables #
# #
###############################################
#
# NOTE: PLEASE SEE ps_1general.test (bottom)
# BEFORE ADDING NEW TEST CASES HERE !!!
use test;
-- source include/have_bdb.inc
let $type= 'BDB' ;
-- source include/ps_create.inc
-- source include/ps_renew.inc
-- source include/ps_query.inc
-- source include/ps_modify.inc
-- source include/ps_modify1.inc
-- source include/ps_conv.inc
drop table t1, t9;
# End of 4.1 tests

View file

@ -1,108 +0,0 @@
#
# Test for rowid ordering (and comparison) functions.
# do index_merge select for tables with PK of various types.
#
--disable_warnings
drop table if exists t1, t2, t3,t4;
--enable_warnings
-- source include/have_bdb.inc
# Signed number as rowid
create table t1 (
pk1 int not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (-5, 1, 1),
(-100, 1, 1),
(3, 1, 1),
(0, 1, 1),
(10, 1, 1);
explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Unsigned numbers as rowids
create table t1 (
pk1 int unsigned not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (0, 1, 1),
(0xFFFFFFFF, 1, 1),
(0xFFFFFFFE, 1, 1),
(1, 1, 1),
(2, 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Case-insensitive char(N)
create table t1 (
pk1 char(4) not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb collate latin2_general_ci;
insert into t1 values ('a1', 1, 1),
('b2', 1, 1),
('A3', 1, 1),
('B4', 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Multi-part PK
create table t1 (
pk1 int not NULL,
pk2 char(4) not NULL collate latin1_german1_ci,
pk3 char(4) not NULL collate latin1_bin,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1,pk2,pk3),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values
(1, 'u', 'u', 1, 1),
(1, 'u', char(0xEC), 1, 1),
(1, 'u', 'x', 1, 1);
insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1;
insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u';
insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1;
select * from t1;
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
# Hidden PK
alter table t1 drop primary key;
select * from t1;
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Variable-length PK
# this is also test for Bug#2688
create table t1 (
pk1 varchar(8) NOT NULL default '',
pk2 varchar(4) NOT NULL default '',
key1 int(11),
key2 int(11),
primary key(pk1, pk2),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values ('','empt',2,2),
('a','a--a',2,2),
('bb','b--b',2,2),
('ccc','c--c',2,2),
('dddd','d--d',2,2);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;

View file

@ -42,7 +42,7 @@ base64_needed_encoded_length(int length_of_data)
int
base64_needed_decoded_length(int length_of_encoded_data)
{
return ceil(length_of_encoded_data * 3 / 4);
return (int)ceil(length_of_encoded_data * 3 / 4);
}

View file

@ -46,7 +46,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
before seeking to the given offset
*/
error= (old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
error= (old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
lseek(Filedes, offset, MY_SEEK_SET) == -1L;
if (!error) /* Seek was successful */
@ -121,7 +121,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset,
As we cannot change the file pointer, we save the old position,
before seeking to the given offset
*/
error= ((old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
error= ((old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
lseek(Filedes, offset, MY_SEEK_SET) == -1L);
if (!error) /* Seek was successful */

View file

@ -361,7 +361,6 @@ int mysql_install_db(int argc, char *argv[])
add_arg(&al, "--bootstrap");
add_arg(&al, "--skip-grant-tables");
add_arg(&al, "--skip-innodb");
add_arg(&al, "--skip-bdb");
// spawn mysqld
err = spawn(mysqld, &al, TRUE, sql_file, out_log, err_log);

View file

@ -210,7 +210,6 @@ void install_db(char *datadir)
add_arg(&al, "--basedir=%s", base_dir);
add_arg(&al, "--datadir=%s", datadir);
add_arg(&al, "--skip-innodb");
add_arg(&al, "--skip-bdb");
// spawn
if ((err = spawn(mysqld_file, &al, TRUE, input, output, error)) != 0)

View file

@ -212,7 +212,7 @@ then
fi
mysqld_install_cmd_line="$mysqld $defaults $mysqld_opt --bootstrap \
--skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb \
--skip-bdb --skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K"
--skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K"
if $scriptdir/mysql_create_system_tables $create_option $mdata $hostname $windows \
| eval "$mysqld_install_cmd_line"
then

View file

@ -429,7 +429,7 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
goto err;
}
if (check_date(l_time, not_zero_date, flags, was_cut))
if ((my_bool)check_date(l_time, not_zero_date, flags, was_cut))
goto err;
l_time->time_type= (number_of_fields <= 3 ?

View file

@ -8,8 +8,7 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/zlib
${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
${CMAKE_SOURCE_DIR}/storage/bdb/dbinc)
)
SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc
${CMAKE_SOURCE_DIR}/sql/message.h
@ -29,7 +28,7 @@ ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER
ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc
ha_innodb.cc ha_partition.cc ha_federated.cc ha_berkeley.cc
ha_innodb.cc ha_partition.cc ha_federated.cc
handler.cc hash_filo.cc hash_filo.h
hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc
item_create.cc item_func.cc item_geofunc.cc item_row.cc
@ -79,9 +78,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
IF(WITH_INNOBASE_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld innobase)
ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
IF(WITH_BERKELEY_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld bdb)
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_DEPENDENCIES(mysqld GenError)

View file

@ -47,10 +47,10 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_create.h item_subselect.h item_row.h \
mysql_priv.h item_geofunc.h sql_bitmap.h \
procedure.h sql_class.h sql_lex.h sql_list.h \
sql_manager.h sql_map.h sql_string.h unireg.h \
sql_map.h sql_string.h unireg.h \
sql_error.h field.h handler.h mysqld_suffix.h \
ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \
ha_innodb.h ha_berkeley.h ha_federated.h \
ha_innodb.h ha_federated.h \
ha_ndbcluster.h ha_ndbcluster_binlog.h \
ha_ndbcluster_tables.h \
opt_range.h protocol.h rpl_tblmap.h \
@ -88,7 +88,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
discover.cc time.cc opt_range.cc opt_sum.cc \
records.cc filesort.cc handler.cc \
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
ha_partition.cc ha_innodb.cc ha_berkeley.cc \
ha_partition.cc ha_innodb.cc \
ha_federated.cc \
ha_ndbcluster.cc ha_ndbcluster_binlog.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
@ -161,9 +161,6 @@ lex_hash.h: gen_lex_hash$(EXEEXT)
./gen_lex_hash$(EXEEXT) > $@
# the following three should eventually be moved out of this directory
ha_berkeley.o: ha_berkeley.cc ha_berkeley.h
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<

View file

@ -6571,7 +6571,7 @@ void Field_varstring::sql_type(String &res) const
}
uint Field_varstring::data_length(const char *from)
uint32 Field_varstring::data_length(const char *from)
{
return length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
}

View file

@ -118,6 +118,11 @@ public:
*/
virtual String *val_str(String*,String *)=0;
String *val_int_as_str(String *val_buffer, my_bool unsigned_flag);
/*
str_needs_quotes() returns TRUE if the value returned by val_str() needs
to be quoted when used in constructing an SQL query.
*/
virtual bool str_needs_quotes() { return FALSE; }
virtual Item_result result_type () const=0;
virtual Item_result cmp_type () const { return result_type(); }
virtual Item_result cast_to_int_type () const { return result_type(); }
@ -417,6 +422,7 @@ public:
uint32 max_length() { return field_length; }
friend class create_field;
my_decimal *val_decimal(my_decimal *);
virtual bool str_needs_quotes() { return TRUE; }
uint is_equal(create_field *new_field);
};
@ -1120,7 +1126,7 @@ public:
int key_cmp(const byte *str, uint length);
uint packed_col_length(const char *to, uint length);
uint max_packed_col_length(uint max_length);
uint data_length(const char *from);
uint32 data_length(const char *from);
uint size_of() const { return sizeof(*this); }
enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; }
bool has_charset(void) const
@ -1385,6 +1391,7 @@ public:
double val_real(void);
longlong val_int(void);
String *val_str(String*, String *);
virtual bool str_needs_quotes() { return TRUE; }
my_decimal *val_decimal(my_decimal *);
int cmp(const char *a, const char *b)
{ return cmp_binary(a, b); }

File diff suppressed because it is too large Load diff

View file

@ -1,180 +0,0 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef USE_PRAGMA_INTERFACE
#pragma interface /* gcc class implementation */
#endif
/* class for the the myisam handler */
#include <db.h>
#define BDB_HIDDEN_PRIMARY_KEY_LENGTH 5
typedef struct st_berkeley_share {
ulonglong auto_ident;
ha_rows rows, org_rows;
ulong *rec_per_key;
THR_LOCK lock;
pthread_mutex_t mutex;
char *table_name;
DB *status_block, *file, **key_file;
u_int32_t *key_type;
uint table_name_length,use_count;
uint status,version;
uint ref_length;
bool fixed_length_primary_key, fixed_length_row;
} BDB_SHARE;
class ha_berkeley: public handler
{
THR_LOCK_DATA lock;
DBT last_key,current_row;
gptr alloc_ptr;
byte *rec_buff;
char *key_buff, *key_buff2, *primary_key_buff;
DB *file, **key_file;
DB_TXN *transaction;
u_int32_t *key_type;
DBC *cursor;
BDB_SHARE *share;
ulong int_table_flags;
ulong alloced_rec_buff_length;
ulong changed_rows;
uint primary_key,last_dup_key, hidden_primary_key, version;
bool key_read, using_ignore;
bool fix_rec_buff_for_blob(ulong length);
byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH];
ulong max_row_length(const byte *buf);
int pack_row(DBT *row,const byte *record, bool new_row);
void unpack_row(char *record, DBT *row);
void unpack_key(char *record, DBT *key, uint index);
DBT *create_key(DBT *key, uint keynr, char *buff, const byte *record,
int key_length = MAX_KEY_LENGTH);
DBT *pack_key(DBT *key, uint keynr, char *buff, const byte *key_ptr,
uint key_length);
int remove_key(DB_TXN *trans, uint keynr, const byte *record, DBT *prim_key);
int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record,
DBT *prim_key, key_map *keys);
int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key,
const byte *old_row, DBT *old_key,
const byte *new_row, DBT *new_key);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
int update_primary_key(DB_TXN *trans, bool primary_key_changed,
const byte * old_row, DBT *old_key,
const byte * new_row, DBT *prim_key,
bool local_using_ignore);
int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool);
DBT *get_pos(DBT *to, byte *pos);
public:
ha_berkeley(TABLE_SHARE *table_arg);
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
ulong index_flags(uint idx, uint part, bool all_parts) const;
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
ulonglong table_flags(void) const { return int_table_flags; }
uint max_supported_keys() const { return MAX_KEY-1; }
uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_rows_upper_bound();
uint max_supported_key_length() const { return UINT_MAX32; }
uint max_supported_key_part_length() const { return UINT_MAX32; }
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
double scan_time();
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
int index_init(uint index, bool sorted);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_last(byte * buf, const byte * key, uint key_len);
int index_next(byte * buf);
int index_next_same(byte * buf, const byte *key, uint keylen);
int index_prev(byte * buf);
int index_first(byte * buf);
int index_last(byte * buf);
int rnd_init(bool scan);
int rnd_end();
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type);
void position(byte *record);
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int check(THD* thd, HA_CHECK_OPT* check_opt);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
int rename_table(const char* from, const char* to);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
void get_status();
inline void get_auto_primary_key(byte *to)
{
pthread_mutex_lock(&share->mutex);
share->auto_ident++;
int5store(to,share->auto_ident);
pthread_mutex_unlock(&share->mutex);
}
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values);
void print_error(int error, myf errflag);
uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
bool primary_key_is_clustered() { return true; }
int cmp_ref(const byte *ref1, const byte *ref2);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
};
extern const u_int32_t bdb_DB_TXN_NOSYNC;
extern const u_int32_t bdb_DB_RECOVER;
extern const u_int32_t bdb_DB_PRIVATE;
extern const u_int32_t bdb_DB_DIRECT_DB;
extern const u_int32_t bdb_DB_DIRECT_LOG;
extern bool berkeley_shared_data;
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
berkeley_lock_types[];
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
extern ulonglong berkeley_cache_size;
extern ulong berkeley_region_size, berkeley_cache_parts;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
extern long berkeley_lock_scan_time;
extern TYPELIB berkeley_lock_typelib;
int berkeley_init(void);
int berkeley_end(ha_panic_function type);
bool berkeley_flush_logs(void);
bool berkeley_show_status(THD *thd, stat_print_fn *print, enum ha_stat_type);

View file

@ -1142,7 +1142,7 @@ bool ha_federated::create_where_from_key(String *to,
Field *field= key_part->field;
uint store_length= key_part->store_length;
uint part_length= min(store_length, length);
needs_quotes= 1;
needs_quotes= field->str_needs_quotes();
DBUG_DUMP("key, start of loop", (char *) ptr, length);
if (key_part->null_bit)
@ -1663,23 +1663,22 @@ int ha_federated::write_row(byte *buf)
{
commas_added= TRUE;
if ((*field)->is_null())
insert_field_value_string.append(STRING_WITH_LEN(" NULL "));
values_string.append(STRING_WITH_LEN(" NULL "));
else
{
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&insert_field_value_string);
values_string.append('\'');
if (needs_quote)
values_string.append('\'');
insert_field_value_string.print(&values_string);
values_string.append('\'');
if (needs_quote)
values_string.append('\'');
insert_field_value_string.length(0);
}
/* append the field name */
insert_string.append((*field)->field_name);
/* append the value */
values_string.append(insert_field_value_string);
insert_field_value_string.length(0);
/* append commas between both fields and fieldnames */
/*
unfortunately, we can't use the logic if *(fields + 1) to
@ -1884,12 +1883,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
update_string.append(STRING_WITH_LEN(" NULL "));
else
{
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
/* otherwise = */
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value);
update_string.append('\'');
if (needs_quote)
update_string.append('\'');
field_value.print(&update_string);
update_string.append('\'');
if (needs_quote)
update_string.append('\'');
field_value.length(0);
tmp_restore_column_map(table->read_set, old_map);
}
@ -1903,12 +1905,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
where_string.append(STRING_WITH_LEN(" IS NULL "));
else
{
bool needs_quote= (*field)->str_needs_quotes();
where_string.append(STRING_WITH_LEN(" = "));
(*field)->val_str(&field_value,
(char*) (old_data + (*field)->offset()));
where_string.append('\'');
if (needs_quote)
where_string.append('\'');
field_value.print(&where_string);
where_string.append('\'');
if (needs_quote)
where_string.append('\'');
field_value.length(0);
}
where_string.append(STRING_WITH_LEN(" AND "));
@ -1983,11 +1988,14 @@ int ha_federated::delete_row(const byte *buf)
}
else
{
delete_string.append(STRING_WITH_LEN(" = "));
cur_field->val_str(&data_string);
delete_string.append('\'');
data_string.print(&delete_string);
delete_string.append('\'');
bool needs_quote= cur_field->str_needs_quotes();
delete_string.append(STRING_WITH_LEN(" = "));
cur_field->val_str(&data_string);
if (needs_quote)
delete_string.append('\'');
data_string.print(&delete_string);
if (needs_quote)
delete_string.append('\'');
}
delete_string.append(STRING_WITH_LEN(" AND "));
}

View file

@ -256,13 +256,15 @@ int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans)
}
inline
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans)
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
bool force_release)
{
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
h->release_completed_operations(trans, force_release);
return h->m_ignore_no_key ?
execute_no_commit_ignore_no_key(h,trans) :
trans->execute(NdbTransaction::NoCommit,
@ -297,13 +299,15 @@ int execute_commit(THD *thd, NdbTransaction *trans)
}
inline
int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans)
int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans,
bool force_release)
{
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
h->release_completed_operations(trans, force_release);
return trans->execute(NdbTransaction::NoCommit,
NdbTransaction::AO_IgnoreError,
h->m_force_send);
@ -328,6 +332,7 @@ Thd_ndb::Thd_ndb()
all= NULL;
stmt= NULL;
error= 0;
query_state&= NDB_QUERY_NORMAL;
options= 0;
(void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
(hash_get_key)thd_ndb_share_get_key, 0, 0);
@ -1696,7 +1701,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
ERR_RETURN(trans->getNdbError());
}
if (execute_no_commit_ie(this,trans) != 0)
if (execute_no_commit_ie(this,trans,false) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@ -1761,7 +1766,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
}
}
if (execute_no_commit(this,trans) != 0)
if (execute_no_commit(this,trans,false) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@ -1914,7 +1919,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record)
}
last= trans->getLastDefinedOperation();
if (first)
res= execute_no_commit_ie(this,trans);
res= execute_no_commit_ie(this,trans,false);
else
{
// Table has no keys
@ -1963,7 +1968,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res);
if (execute_no_commit_ie(this,trans) != 0)
if (execute_no_commit_ie(this,trans,false) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@ -2011,7 +2016,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
*/
if (m_ops_pending && m_blobs_pending)
{
if (execute_no_commit(this,trans) != 0)
if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(ndb_err(trans));
m_ops_pending= 0;
m_blobs_pending= FALSE;
@ -2043,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
{
if (m_transaction_on)
{
if (execute_no_commit(this,trans) != 0)
if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(-1);
}
else
@ -2370,7 +2375,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
ERR_RETURN(trans->getNdbError());
}
if (execute_no_commit(this,trans) != 0)
if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(ndb_err(trans));
DBUG_RETURN(next_result(buf));
@ -2440,7 +2445,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res);
if (execute_no_commit(this,trans) != 0)
if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(ndb_err(trans));
DBUG_PRINT("exit", ("Scan started successfully"));
DBUG_RETURN(next_result(buf));
@ -2603,7 +2608,7 @@ int ha_ndbcluster::write_row(byte *record)
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
{
if (execute_no_commit(this,trans) != 0)
if (execute_no_commit(this,trans,false) != 0)
{
m_skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure();
@ -2840,7 +2845,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
op->setValue(no_fields, part_func_value);
}
// Execute update operation
if (!cursor && execute_no_commit(this,trans) != 0) {
if (!cursor && execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@ -2926,7 +2931,7 @@ int ha_ndbcluster::delete_row(const byte *record)
}
// Execute delete operation
if (execute_no_commit(this,trans) != 0) {
if (execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@ -3392,6 +3397,26 @@ int ha_ndbcluster::close_scan()
NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor;
if (m_lock_tuple)
{
/*
Lock level m_lock.type either TL_WRITE_ALLOW_WRITE
(SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT
LOCK WITH SHARE MODE) and row was not explictly unlocked
with unlock_row() call
*/
NdbOperation *op;
// Lock row
DBUG_PRINT("info", ("Keeping lock on scanned row"));
if (!(op= cursor->lockCurrentTuple()))
{
m_lock_tuple= false;
ERR_RETURN(trans->getNdbError());
}
m_ops_pending++;
}
m_lock_tuple= false;
if (m_ops_pending)
{
/*
@ -3399,7 +3424,7 @@ int ha_ndbcluster::close_scan()
deleteing/updating transaction before closing the scan
*/
DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending));
if (execute_no_commit(this,trans) != 0) {
if (execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@ -3793,7 +3818,7 @@ int ha_ndbcluster::end_bulk_insert()
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
{
if (execute_no_commit(this, trans) != 0)
if (execute_no_commit(this, trans,false) != 0)
{
no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans);
@ -3968,6 +3993,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
ERR_RETURN(ndb->getNdbError());
thd_ndb->init_open_tables();
thd_ndb->stmt= trans;
thd_ndb->query_state&= NDB_QUERY_NORMAL;
trans_register_ha(thd, FALSE, &ndbcluster_hton);
}
else
@ -3983,6 +4009,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
ERR_RETURN(ndb->getNdbError());
thd_ndb->init_open_tables();
thd_ndb->all= trans;
thd_ndb->query_state&= NDB_QUERY_NORMAL;
trans_register_ha(thd, TRUE, &ndbcluster_hton);
/*
@ -4139,6 +4166,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
thd_ndb->stmt= trans;
trans_register_ha(thd, FALSE, &ndbcluster_hton);
}
thd_ndb->query_state&= NDB_QUERY_NORMAL;
m_active_trans= trans;
// Start of statement
@ -7557,6 +7585,30 @@ int ha_ndbcluster::write_ndb_file(const char *name)
DBUG_RETURN(error);
}
void
ha_ndbcluster::release_completed_operations(NdbTransaction *trans,
bool force_release)
{
if (trans->hasBlobOperation())
{
/* We are reading/writing BLOB fields,
releasing operation records is unsafe
*/
return;
}
if (!force_release)
{
if (get_thd_ndb(current_thd)->query_state & NDB_QUERY_MULTI_READ_RANGE)
{
/* We are batching reads and have not consumed all fetched
rows yet, releasing operation records is unsafe
*/
return;
}
}
trans->releaseCompletedOperations();
}
int
ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE *ranges,
@ -7572,6 +7624,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
NDB_INDEX_TYPE index_type= get_index_type(active_index);
ulong reclength= table_share->reclength;
NdbOperation* op;
Thd_ndb *thd_ndb= get_thd_ndb(current_thd);
if (uses_blob_value())
{
@ -7585,7 +7638,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
sorted,
buffer));
}
thd_ndb->query_state|= NDB_QUERY_MULTI_READ_RANGE;
m_disable_multi_read= FALSE;
/**
@ -7757,7 +7810,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
*/
m_current_multi_operation=
lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation();
if (!(res= execute_no_commit_ie(this, m_active_trans)))
if (!(res= execute_no_commit_ie(this, m_active_trans, true)))
{
m_multi_range_defined= multi_range_curr;
multi_range_curr= ranges;

View file

@ -534,6 +534,11 @@ class Ndb_cond_traverse_context
Ndb_rewrite_context *rewrite_stack;
};
typedef enum ndb_query_state_bits {
NDB_QUERY_NORMAL = 0,
NDB_QUERY_MULTI_READ_RANGE = 1
} NDB_QUERY_STATE_BITS;
/*
Place holder for ha_ndbcluster thread specific data
*/
@ -571,6 +576,7 @@ class Thd_ndb
int error;
uint32 options;
List<NDB_SHARE> changed_tables;
uint query_state;
HASH open_tables;
};
@ -849,8 +855,8 @@ private:
friend int execute_commit(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool);
friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool);
NdbTransaction *m_active_trans;
NdbScanOperation *m_active_cursor;
@ -898,6 +904,8 @@ private:
bool m_force_send;
ha_rows m_autoincrement_prefetch;
bool m_transaction_on;
void release_completed_operations(NdbTransaction*, bool);
Ndb_cond_stack *m_cond_stack;
bool m_disable_multi_read;
byte *m_multi_range_result_ptr;

View file

@ -74,7 +74,6 @@ static const LEX_STRING sys_table_aliases[]=
{
{(char*)STRING_WITH_LEN("INNOBASE")}, {(char*)STRING_WITH_LEN("INNODB")},
{(char*)STRING_WITH_LEN("NDB")}, {(char*)STRING_WITH_LEN("NDBCLUSTER")},
{(char*)STRING_WITH_LEN("BDB")}, {(char*)STRING_WITH_LEN("BERKELEYDB")},
{(char*)STRING_WITH_LEN("HEAP")}, {(char*)STRING_WITH_LEN("MEMORY")},
{(char*)STRING_WITH_LEN("MERGE")}, {(char*)STRING_WITH_LEN("MRG_MYISAM")},
{NullS, 0}
@ -1508,7 +1507,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
This is never called for InnoDB tables, as these table types
has the HA_STATS_RECORDS_IS_EXACT set.
*/

View file

@ -82,10 +82,8 @@ static SYMBOL symbols[] = {
{ "AVG", SYM(AVG_SYM)},
{ "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)},
{ "BACKUP", SYM(BACKUP_SYM)},
{ "BDB", SYM(BERKELEY_DB_SYM)},
{ "BEFORE", SYM(BEFORE_SYM)},
{ "BEGIN", SYM(BEGIN_SYM)},
{ "BERKELEYDB", SYM(BERKELEY_DB_SYM)},
{ "BETWEEN", SYM(BETWEEN_SYM)},
{ "BIGINT", SYM(BIGINT)},
{ "BINARY", SYM(BINARY)},

View file

@ -1620,12 +1620,6 @@ extern handlerton innobase_hton;
#else
extern SHOW_COMP_OPTION have_innodb;
#endif
#ifdef WITH_BERKELEY_STORAGE_ENGINE
extern handlerton berkeley_hton;
#define have_berkeley_db berkeley_hton.state
#else
extern SHOW_COMP_OPTION have_berkeley_db;
#endif
#ifdef WITH_EXAMPLE_STORAGE_ENGINE
extern handlerton example_hton;
#define have_example_db example_hton.state

View file

@ -310,7 +310,7 @@ static bool lower_case_table_names_used= 0;
static bool volatile select_thread_in_use, signal_thread_in_use;
static bool volatile ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_bdb, opt_isam, opt_ndbcluster, opt_merge;
static my_bool opt_isam, opt_ndbcluster, opt_merge;
static my_bool opt_short_log_format= 0;
static uint kill_cached_threads, wake_thread;
static ulong killed_threads, thread_created;
@ -332,10 +332,6 @@ static I_List<THD> thread_cache;
static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
#ifdef WITH_BERKELEY_STORAGE_ENGINE
static my_bool opt_sync_bdb_logs;
#endif
/* Global variables */
bool opt_update_log, opt_bin_log;
@ -405,22 +401,6 @@ extern ulong srv_commit_concurrency;
extern ulong srv_flush_log_at_trx_commit;
}
#endif
#ifdef WITH_BERKELEY_STORAGE_ENGINE
#ifndef HAVE_U_INT32_T
typedef unsigned int u_int32_t;
#endif
extern const u_int32_t bdb_DB_TXN_NOSYNC, bdb_DB_RECOVER, bdb_DB_PRIVATE,
bdb_DB_DIRECT_DB, bdb_DB_DIRECT_LOG;
extern bool berkeley_shared_data;
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
berkeley_lock_types[];
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
extern ulonglong berkeley_cache_size;
extern ulong berkeley_region_size, berkeley_cache_parts;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
extern long berkeley_lock_scan_time;
extern TYPELIB berkeley_lock_typelib;
#endif
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
const char *opt_ndbcluster_connectstring= 0;
@ -3355,11 +3335,7 @@ server.");
static void create_maintenance_thread()
{
if (
#ifdef WITH_BERKELEY_STORAGE_ENGINE
(have_berkeley_db == SHOW_OPTION_YES) ||
#endif
(flush_time && flush_time != ~(ulong) 0L))
if (flush_time && flush_time != ~(ulong) 0L)
{
pthread_t hThread;
if (pthread_create(&hThread,&connection_attrib,handle_manager,0))
@ -4901,38 +4877,6 @@ struct my_option my_long_options[] =
"Path to installation directory. All paths are usually resolved relative to this.",
(gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
Disable with --skip-bdb (will save memory).",
(gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0,
0, 0, 0},
#ifdef WITH_BERKELEY_STORAGE_ENGINE
{"bdb-data-direct", OPT_BDB_DATA_DIRECT,
"Turn off system buffering of BDB database files to avoid double caching.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
(gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-lock-detect", OPT_BDB_LOCK,
"Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-log-direct", OPT_BDB_LOG_DIRECT,
"Turn off system buffering of BDB log files to avoid double caching.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory.",
(gptr*) &berkeley_logdir, (gptr*) &berkeley_logdir, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-no-recover", OPT_BDB_NO_RECOVER,
"Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-no-sync", OPT_BDB_NOSYNC,
"This option is deprecated, use --skip-sync-bdb-logs instead",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-shared-data", OPT_BDB_SHARED,
"Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
0, 0, 0, 0, 0},
{"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.",
(gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
{"big-tables", OPT_BIG_TABLES,
"Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@ -5747,31 +5691,6 @@ log and this option does nothing anymore.",
"The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.",
(gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG,
REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 },
#ifdef WITH_BERKELEY_STORAGE_ENGINE
{ "bdb_cache_parts", OPT_BDB_CACHE_PARTS,
"Number of parts to use for BDB cache.",
(gptr*) &berkeley_cache_parts, (gptr*) &berkeley_cache_parts, 0, GET_ULONG,
REQUIRED_ARG, 1, 1, 1024, 0, 1, 0},
{ "bdb_cache_size", OPT_BDB_CACHE_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULL,
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (ulonglong) ~0, 0, IO_SIZE, 0},
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0},
{"bdb_max_lock", OPT_BDB_MAX_LOCK,
"The maximum number of locks you can have active on a BDB table.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_region_size", OPT_BDB_REGION_SIZE,
"The size of the underlying logging area of the Berkeley DB environment.",
(gptr*) &berkeley_region_size, (gptr*) &berkeley_region_size, 0, GET_ULONG,
OPT_ARG, 60*1024L, 60*1024L, (long) ~0, 0, 1, 0},
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
@ -6263,12 +6182,6 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
1, 0},
#ifdef WITH_BERKELEY_STORAGE_ENGINE
{"sync-bdb-logs", OPT_BDB_SYNC,
"Synchronously flush Berkeley DB logs. Enabled by default",
(gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
NO_ARG, 1, 0, 0, 0, 0, 0},
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
{"sync-binlog", OPT_SYNC_BINLOG,
"Synchronously flush binary log to disk after every #th event. "
"Use 0 (default) to disable synchronous flushing.",
@ -7583,59 +7496,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
have_merge_db= SHOW_OPTION_YES;
else
have_merge_db= SHOW_OPTION_DISABLED;
#ifdef WITH_BERKELEY_STORAGE_ENGINE
case OPT_BDB_NOSYNC:
/* Deprecated option */
opt_sync_bdb_logs= 0;
/* Fall through */
case OPT_BDB_SYNC:
if (!opt_sync_bdb_logs)
berkeley_env_flags|= bdb_DB_TXN_NOSYNC;
else
berkeley_env_flags&= ~bdb_DB_TXN_NOSYNC;
break;
case OPT_BDB_LOG_DIRECT:
berkeley_env_flags|= bdb_DB_DIRECT_DB;
break;
case OPT_BDB_DATA_DIRECT:
berkeley_env_flags|= bdb_DB_DIRECT_LOG;
break;
case OPT_BDB_NO_RECOVER:
berkeley_init_flags&= ~(bdb_DB_RECOVER);
break;
case OPT_BDB_LOCK:
{
int type;
if ((type=find_type(argument, &berkeley_lock_typelib, 2)) > 0)
berkeley_lock_type=berkeley_lock_types[type-1];
else
{
int err;
char *end;
uint length= strlen(argument);
long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err);
if (end == argument+length)
berkeley_lock_scan_time= value;
else
{
fprintf(stderr,"Unknown lock type: %s\n",argument);
exit(1);
}
}
break;
}
case OPT_BDB_SHARED:
berkeley_init_flags&= ~(bdb_DB_PRIVATE);
berkeley_shared_data= 1;
break;
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
case OPT_BDB:
#ifdef WITH_BERKELEY_STORAGE_ENGINE
if (opt_bdb)
have_berkeley_db= SHOW_OPTION_YES;
else
have_berkeley_db= SHOW_OPTION_DISABLED;
#endif
break;
case OPT_NDBCLUSTER:
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
@ -7868,10 +7729,6 @@ static void get_options(int argc,char **argv)
#ifndef WITH_ISAM_STORAGE_ENGINE
if (opt_isam)
sql_print_warning("this binary does not contain ISAM storage engine");
#endif
#ifndef WITH_BERKELEY_STORAGE_ENGINE
if (opt_bdb)
sql_print_warning("this binary does not contain BDB storage engine");
#endif
if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) &&
!opt_slow_log)
@ -8215,7 +8072,6 @@ void refresh_status(THD *thd)
/*****************************************************************************
Instantiate have_xyx for missing storage engines
*****************************************************************************/
#undef have_berkeley_db
#undef have_innodb
#undef have_ndbcluster
#undef have_example_db
@ -8225,7 +8081,6 @@ void refresh_status(THD *thd)
#undef have_partition_db
#undef have_blackhole_db
SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO;
@ -8235,14 +8090,6 @@ SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO;
#ifndef WITH_BERKELEY_STORAGE_ENGINE
bool berkeley_shared_data;
ulong berkeley_max_lock, berkeley_log_buffer_size;
ulonglong berkeley_cache_size;
ulong berkeley_region_size, berkeley_cache_parts;
char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
#endif
#ifndef WITH_INNOBASE_STORAGE_ENGINE
uint innobase_flush_log_at_trx_commit;
ulong innobase_fast_shutdown;

View file

@ -59,13 +59,6 @@
#include "event_scheduler.h"
/* WITH_BERKELEY_STORAGE_ENGINE */
extern bool berkeley_shared_data;
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
extern ulonglong berkeley_cache_size;
extern ulong berkeley_region_size, berkeley_cache_parts;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
/* WITH_INNOBASE_STORAGE_ENGINE */
extern uint innobase_flush_log_at_trx_commit;
extern ulong innobase_fast_shutdown;
@ -669,7 +662,6 @@ sys_var_thd_time_zone sys_time_zone("time_zone");
/* Read only variables */
sys_var_have_variable sys_have_archive_db("have_archive", &have_archive_db);
sys_var_have_variable sys_have_berkeley_db("have_bdb", &have_berkeley_db);
sys_var_have_variable sys_have_blackhole_db("have_blackhole_engine",
&have_blackhole_db);
sys_var_have_variable sys_have_compress("have_compress", &have_compress);
@ -760,15 +752,6 @@ SHOW_VAR init_vars[]= {
{sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS},
{"back_log", (char*) &back_log, SHOW_LONG},
{sys_basedir.name, (char*) &sys_basedir, SHOW_SYS},
{"bdb_cache_parts", (char*) &berkeley_cache_parts, SHOW_LONG},
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONGLONG},
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
{"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG},
{"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
{"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG},
{"bdb_region_size", (char*) &berkeley_region_size, SHOW_LONG},
{"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL},
{"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
{sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS},
{sys_binlog_format.name, (char*) &sys_binlog_format, SHOW_SYS},
{sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS},
@ -813,7 +796,6 @@ SHOW_VAR init_vars[]= {
{sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS},
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
{sys_have_archive_db.name, (char*) &have_archive_db, SHOW_HAVE},
{sys_have_berkeley_db.name, (char*) &have_berkeley_db, SHOW_HAVE},
{sys_have_blackhole_db.name,(char*) &have_blackhole_db, SHOW_HAVE},
{sys_have_compress.name, (char*) &have_compress, SHOW_HAVE},
{sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE},

File diff suppressed because it is too large Load diff

View file

@ -23,7 +23,6 @@
*/
#include "mysql_priv.h"
#include "sql_manager.h"
ulong volatile manager_status;
bool volatile manager_thread_in_use;

View file

@ -1,19 +0,0 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef WITH_BERKELEY_STORAGE_ENGINE
void berkeley_cleanup_log_files(void);
#endif /* WITH_BERKELEY_STORAGE_ENGINE */

View file

@ -146,7 +146,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token BEFORE_SYM
%token BEGIN_SYM
%token BENCHMARK_SYM
%token BERKELEY_DB_SYM
%token BIGINT
%token BINARY
%token BINLOG_SYM
@ -8354,30 +8353,6 @@ show_param:
if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS))
YYABORT;
}
| BERKELEY_DB_SYM LOGS_SYM
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
if (!(lex->create_info.db_type=
ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB)))
{
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB");
YYABORT;
}
WARN_DEPRECATED(yythd, "5.2", "SHOW BDB LOGS", "'SHOW ENGINE BDB LOGS'");
}
| LOGS_SYM
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
if (!(lex->create_info.db_type=
ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB)))
{
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB");
YYABORT;
}
WARN_DEPRECATED(yythd, "5.2", "SHOW LOGS", "'SHOW ENGINE BDB LOGS'");
}
| GRANTS
{
LEX *lex=Lex;
@ -9408,7 +9383,6 @@ keyword_sp:
| AUTOEXTEND_SIZE_SYM {}
| AVG_ROW_LENGTH {}
| AVG_SYM {}
| BERKELEY_DB_SYM {}
| BINLOG_SYM {}
| BIT_SYM {}
| BOOL_SYM {}

View file

@ -1,67 +0,0 @@
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
${CMAKE_SOURCE_DIR}/storage/bdb/dbinc
${CMAKE_SOURCE_DIR}/storage/bdb)
# BDB needs a number of source files that are auto-generated by the unix
# configure. So to build BDB, it is necessary to copy these over to the Windows
# bitkeeper tree, or to use a source .tar.gz package which already has these
# files.
ADD_LIBRARY(bdb crypto/aes_method.c btree/bt_compact.c btree/bt_compare.c
btree/bt_conv.c btree/bt_curadj.c btree/bt_cursor.c
btree/bt_delete.c btree/bt_method.c btree/bt_open.c btree/bt_put.c
btree/bt_rec.c btree/bt_reclaim.c btree/bt_recno.c
btree/bt_rsearch.c btree/bt_search.c btree/bt_split.c
btree/bt_stat.c btree/bt_upgrade.c btree/bt_verify.c
btree/btree_auto.c db/crdel_auto.c db/crdel_rec.c crypto/crypto.c
db/db.c db/db_am.c db/db_auto.c common/db_byteorder.c db/db_cam.c
common/db_clock.c db/db_conv.c db/db_dispatch.c db/db_dup.c
common/db_err.c common/db_getlong.c common/db_idspace.c
db/db_iface.c db/db_join.c common/db_log2.c db/db_meta.c
db/db_method.c db/db_open.c db/db_overflow.c db/db_ovfl_vrfy.c
db/db_pr.c db/db_rec.c db/db_reclaim.c db/db_remove.c
db/db_rename.c db/db_ret.c env/db_salloc.c db/db_setid.c
db/db_setlsn.c env/db_shash.c db/db_stati.c db/db_truncate.c
db/db_upg.c db/db_upg_opd.c db/db_vrfy.c db/db_vrfyutil.c
dbm/dbm.c dbreg/dbreg.c dbreg/dbreg_auto.c dbreg/dbreg_rec.c
dbreg/dbreg_stat.c dbreg/dbreg_util.c env/env_failchk.c
env/env_file.c env/env_method.c env/env_open.c env/env_recover.c
env/env_region.c env/env_register.c env/env_stat.c
fileops/fileops_auto.c fileops/fop_basic.c fileops/fop_rec.c
fileops/fop_util.c hash/hash.c hash/hash_auto.c hash/hash_conv.c
hash/hash_dup.c hash/hash_func.c hash/hash_meta.c
hash/hash_method.c hash/hash_open.c hash/hash_page.c
hash/hash_rec.c hash/hash_reclaim.c hash/hash_stat.c
hash/hash_upgrade.c hash/hash_verify.c hmac/hmac.c
hsearch/hsearch.c lock/lock.c lock/lock_deadlock.c
lock/lock_failchk.c lock/lock_id.c lock/lock_list.c
lock/lock_method.c lock/lock_region.c lock/lock_stat.c
lock/lock_timer.c lock/lock_util.c log/log.c log/log_archive.c
log/log_compare.c log/log_debug.c log/log_get.c log/log_method.c
log/log_put.c log/log_stat.c mp/mp_alloc.c mp/mp_bh.c mp/mp_fget.c
mp/mp_fmethod.c mp/mp_fopen.c mp/mp_fput.c mp/mp_fset.c
mp/mp_method.c mp/mp_region.c mp/mp_register.c mp/mp_stat.c
mp/mp_sync.c mp/mp_trickle.c crypto/mersenne/mt19937db.c
mutex/mut_alloc.c mutex/mut_method.c mutex/mut_region.c
mutex/mut_stat.c mutex/mut_tas.c mutex/mut_win32.c
os_win32/os_abs.c os/os_alloc.c os_win32/os_clock.c
os_win32/os_config.c os_win32/os_dir.c os_win32/os_errno.c
os_win32/os_fid.c os_win32/os_flock.c os_win32/os_fsync.c
os_win32/os_handle.c os/os_id.c os_win32/os_map.c os/os_method.c
os/os_oflags.c os_win32/os_open.c os/os_region.c
os_win32/os_rename.c os/os_root.c os/os_rpath.c os_win32/os_rw.c
os_win32/os_seek.c os_win32/os_sleep.c os_win32/os_spin.c
os_win32/os_stat.c os/os_tmpdir.c os_win32/os_truncate.c
os/os_unlink.c qam/qam.c qam/qam_auto.c qam/qam_conv.c
qam/qam_files.c qam/qam_method.c qam/qam_open.c qam/qam_rec.c
qam/qam_stat.c qam/qam_upgrade.c qam/qam_verify.c rep/rep_auto.c
rep/rep_backup.c rep/rep_elect.c rep/rep_log.c rep/rep_method.c
rep/rep_record.c rep/rep_region.c rep/rep_stat.c rep/rep_stub.c
rep/rep_util.c rep/rep_verify.c crypto/rijndael/rijndael-alg-fst.c
crypto/rijndael/rijndael-api-fst.c hmac/sha1.c clib/strcasecmp.c
txn/txn.c txn/txn_auto.c txn/txn_chkpt.c txn/txn_failchk.c
txn/txn_method.c txn/txn_rec.c txn/txn_recover.c txn/txn_region.c
txn/txn_stat.c txn/txn_util.c common/util_log.c common/util_sig.c
xa/xa.c xa/xa_db.c xa/xa_map.c)

View file

@ -1,102 +0,0 @@
/*-
* $Id: LICENSE,v 12.1 2005/06/16 20:20:10 bostic Exp $
*/
The following is the license that applies to this copy of the Berkeley DB
software. For a license to use the Berkeley DB software under conditions
other than those described here, or to purchase support for this software,
please contact Sleepycat Software by email at info@sleepycat.com, or on
the Web at http://www.sleepycat.com.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
/*
* Copyright (c) 1990-2005
* Sleepycat Software. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Redistributions in any form must be accompanied by information on
* how to obtain complete source code for the DB software and any
* accompanying software that uses the DB software. The source code
* must either be included in the distribution or be available for no
* more than the cost of distribution plus a nominal fee, and must be
* freely redistributable under reasonable conditions. For an
* executable file, complete source code means the source code for all
* modules it contains. It does not include source code for modules or
* files that typically accompany the major components of the operating
* system on which the executable file runs.
*
* THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
* NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 1995, 1996
* The President and Fellows of Harvard University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/

View file

@ -1,56 +0,0 @@
# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Adaptor makefile to translate between what automake expects and what
# BDB provides (or vice versa).
srcdir = @srcdir@
top_srcdir = @top_srcdir@
# distdir and top_distdir are set by the calling Makefile
bdb_build = build_unix
files = LICENSE Makefile Makefile.in README CMakeLists.txt
subdirs = btree build_win32 clib common cxx db dbinc \
dbinc_auto db185 db_archive db_checkpoint db_deadlock db_dump \
db_dump185 db_hotbackup db_load db_printlog db_recover db_stat db_upgrade \
db_verify dbm dbreg dist env fileops hash \
hsearch hmac include lock log mp mutex os \
os_win32 qam rep txn xa sequence crypto
@SET_MAKE@
all:
cd $(bdb_build) && $(MAKE) all
clean:
cd $(bdb_build) && $(MAKE) clean
distclean:
cd $(bdb_build) && $(MAKE) distclean
# May want to fix this, and MYSQL/configure, to install things
install dvi check installcheck:
distdir:
for s in $(subdirs); do \
cp -pr $(srcdir)/$$s $(distdir)/$$s; \
done
for f in $(files); do \
test -f $(distdir)/$$f || cp -p $(srcdir)/$$f $(distdir)/$$f; \
done
mkdir $(distdir)/$(bdb_build)
cp -p $(srcdir)/$(bdb_build)/.IGNORE_ME $(distdir)/$(bdb_build)

File diff suppressed because it is too large Load diff

View file

@ -1,213 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Mike Olson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: bt_compare.c,v 12.1 2005/06/16 20:20:13 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/btree.h"
/*
* __bam_cmp --
* Compare a key to a given record.
*
* PUBLIC: int __bam_cmp __P((DB *, const DBT *, PAGE *,
* PUBLIC: u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
*/
int
__bam_cmp(dbp, dbt, h, indx, func, cmpp)
DB *dbp;
const DBT *dbt;
PAGE *h;
u_int32_t indx;
int (*func)__P((DB *, const DBT *, const DBT *));
int *cmpp;
{
BINTERNAL *bi;
BKEYDATA *bk;
BOVERFLOW *bo;
DBT pg_dbt;
/*
* Returns:
* < 0 if dbt is < page record
* = 0 if dbt is = page record
* > 0 if dbt is > page record
*
* !!!
* We do not clear the pg_dbt DBT even though it's likely to contain
* random bits. That should be okay, because the app's comparison
* routine had better not be looking at fields other than data/size.
* We don't clear it because we go through this path a lot and it's
* expensive.
*/
switch (TYPE(h)) {
case P_LBTREE:
case P_LDUP:
case P_LRECNO:
bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW)
bo = (BOVERFLOW *)bk;
else {
pg_dbt.data = bk->data;
pg_dbt.size = bk->len;
*cmpp = func(dbp, dbt, &pg_dbt);
return (0);
}
break;
case P_IBTREE:
/*
* The following code guarantees that the left-most key on an
* internal page at any place in the tree sorts less than any
* user-specified key. The reason is that if we have reached
* this internal page, we know the user key must sort greater
* than the key we're storing for this page in any internal
* pages at levels above us in the tree. It then follows that
* any user-specified key cannot sort less than the first page
* which we reference, and so there's no reason to call the
* comparison routine. While this may save us a comparison
* routine call or two, the real reason for this is because
* we don't maintain a copy of the smallest key in the tree,
* so that we don't have to update all the levels of the tree
* should the application store a new smallest key. And, so,
* we may not have a key to compare, which makes doing the
* comparison difficult and error prone.
*/
if (indx == 0) {
*cmpp = 1;
return (0);
}
bi = GET_BINTERNAL(dbp, h, indx);
if (B_TYPE(bi->type) == B_OVERFLOW)
bo = (BOVERFLOW *)(bi->data);
else {
pg_dbt.data = bi->data;
pg_dbt.size = bi->len;
*cmpp = func(dbp, dbt, &pg_dbt);
return (0);
}
break;
default:
return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
/*
* Overflow.
*/
return (__db_moff(dbp, dbt,
bo->pgno, bo->tlen, func == __bam_defcmp ? NULL : func, cmpp));
}
/*
* __bam_defcmp --
* Default comparison routine.
*
* PUBLIC: int __bam_defcmp __P((DB *, const DBT *, const DBT *));
*/
int
__bam_defcmp(dbp, a, b)
DB *dbp;
const DBT *a, *b;
{
size_t len;
u_int8_t *p1, *p2;
COMPQUIET(dbp, NULL);
/*
* Returns:
* < 0 if a is < b
* = 0 if a is = b
* > 0 if a is > b
*
* XXX
* If a size_t doesn't fit into a long, or if the difference between
* any two characters doesn't fit into an int, this routine can lose.
* What we need is a signed integral type that's guaranteed to be at
* least as large as a size_t, and there is no such thing.
*/
len = a->size > b->size ? b->size : a->size;
for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
if (*p1 != *p2)
return ((long)*p1 - (long)*p2);
return ((long)a->size - (long)b->size);
}
/*
* __bam_defpfx --
* Default prefix routine.
*
* PUBLIC: size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
*/
size_t
__bam_defpfx(dbp, a, b)
DB *dbp;
const DBT *a, *b;
{
size_t cnt, len;
u_int8_t *p1, *p2;
COMPQUIET(dbp, NULL);
cnt = 1;
len = a->size > b->size ? b->size : a->size;
for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2, ++cnt)
if (*p1 != *p2)
return (cnt);
/*
* They match up to the smaller of the two sizes.
* Collate the longer after the shorter.
*/
if (a->size < b->size)
return (a->size + 1);
if (b->size < a->size)
return (b->size + 1);
return (b->size);
}

View file

@ -1,100 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*
* $Id: bt_conv.c,v 12.2 2005/06/16 20:20:13 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/db_swap.h"
#include "dbinc/btree.h"
/*
* __bam_pgin --
* Convert host-specific page layout from the host-independent format
* stored on disk.
*
* PUBLIC: int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
*/
int
__bam_pgin(dbenv, dummydbp, pg, pp, cookie)
DB_ENV *dbenv;
DB *dummydbp;
db_pgno_t pg;
void *pp;
DBT *cookie;
{
DB_PGINFO *pginfo;
PAGE *h;
pginfo = (DB_PGINFO *)cookie->data;
if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
h = pp;
return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
__db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
}
/*
* __bam_pgout --
* Convert host-specific page layout to the host-independent format
* stored on disk.
*
* PUBLIC: int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
*/
int
__bam_pgout(dbenv, dummydbp, pg, pp, cookie)
DB_ENV *dbenv;
DB *dummydbp;
db_pgno_t pg;
void *pp;
DBT *cookie;
{
DB_PGINFO *pginfo;
PAGE *h;
pginfo = (DB_PGINFO *)cookie->data;
if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
h = pp;
return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
__db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
}
/*
* __bam_mswap --
* Swap the bytes on the btree metadata page.
*
* PUBLIC: int __bam_mswap __P((PAGE *));
*/
int
__bam_mswap(pg)
PAGE *pg;
{
u_int8_t *p;
__db_metaswap(pg);
p = (u_int8_t *)pg + sizeof(DBMETA);
p += sizeof(u_int32_t); /* unused */
SWAP32(p); /* minkey */
SWAP32(p); /* re_len */
SWAP32(p); /* re_pad */
SWAP32(p); /* root */
p += 92 * sizeof(u_int32_t); /* unused */
SWAP32(p); /* crypto_magic */
return (0);
}

View file

@ -1,590 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*
* $Id: bt_curadj.c,v 12.3 2005/07/20 16:50:45 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/btree.h"
static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t));
/*
* Cursor adjustments are logged if they are for subtransactions. This is
* because it's possible for a subtransaction to adjust cursors which will
* still be active after the subtransaction aborts, and so which must be
* restored to their previous locations. Cursors that can be both affected
* by our cursor adjustments and active after our transaction aborts can
* only be found in our parent transaction -- cursors in other transactions,
* including other child transactions of our parent, must have conflicting
* locker IDs, and so cannot be affected by adjustments in this transaction.
*/
/*
* __bam_ca_delete --
* Update the cursors when items are deleted and when already deleted
* items are overwritten. Return the number of relevant cursors found.
*
* PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int, int *));
*/
int
__bam_ca_delete(dbp, pgno, indx, delete, countp)
DB *dbp;
db_pgno_t pgno;
u_int32_t indx;
int delete, *countp;
{
BTREE_CURSOR *cp;
DB *ldbp;
DB_ENV *dbenv;
DBC *dbc;
int count; /* !!!: Has to contain max number of cursors. */
dbenv = dbp->dbenv;
/*
* Adjust the cursors. We have the page write locked, so the
* only other cursors that can be pointing at a page are
* those in the same thread of control. Unfortunately, we don't
* know that they're using the same DB handle, so traverse
* all matching DB handles in the same DB_ENV, then all cursors
* on each matching DB handle.
*
* Each cursor is single-threaded, so we only need to lock the
* list of DBs and then the list of cursors in each DB.
*/
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (count = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
cp = (BTREE_CURSOR *)dbc->internal;
if (cp->pgno == pgno && cp->indx == indx) {
/*
* [#8032] This assert is checking
* for possible race conditions where we
* hold a cursor position without a lock.
* Unfortunately, there are paths in the
* Btree code that do not satisfy these
* conditions. None of them are known to
* be a problem, but this assert should
* be re-activated when the Btree stack
* code is re-written.
DB_ASSERT(!STD_LOCKING(dbc) ||
cp->lock_mode != DB_LOCK_NG);
*/
if (delete)
F_SET(cp, C_DELETED);
else
F_CLR(cp, C_DELETED);
++count;
}
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
if (countp != NULL)
*countp = count;
return (0);
}
/*
* __ram_ca_delete --
* Return if any relevant cursors found.
*
* PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t, int *));
*/
int
__ram_ca_delete(dbp, root_pgno, foundp)
DB *dbp;
db_pgno_t root_pgno;
int *foundp;
{
DB *ldbp;
DBC *dbc;
DB_ENV *dbenv;
int found;
found = 0;
dbenv = dbp->dbenv;
/*
* Review the cursors. See the comment in __bam_ca_delete().
*/
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
found == 0 && ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
found == 0 && dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
if (dbc->internal->root == root_pgno)
found = 1;
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
*foundp = found;
return (0);
}
/*
* __bam_ca_di --
* Adjust the cursors during a delete or insert.
*
* PUBLIC: int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
*/
int
__bam_ca_di(my_dbc, pgno, indx, adjust)
DBC *my_dbc;
db_pgno_t pgno;
u_int32_t indx;
int adjust;
{
DB *dbp, *ldbp;
DB_ENV *dbenv;
DB_LSN lsn;
DB_TXN *my_txn;
DBC *dbc;
DBC_INTERNAL *cp;
int found, ret;
dbp = my_dbc->dbp;
dbenv = dbp->dbenv;
my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
/*
* Adjust the cursors. See the comment in __bam_ca_delete().
*/
found = 0;
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
if (dbc->dbtype == DB_RECNO)
continue;
cp = dbc->internal;
if (cp->pgno == pgno && cp->indx >= indx) {
/* Cursor indices should never be negative. */
DB_ASSERT(cp->indx != 0 || adjust > 0);
/* [#8032]
DB_ASSERT(!STD_LOCKING(dbc) ||
cp->lock_mode != DB_LOCK_NG);
*/
cp->indx += adjust;
if (my_txn != NULL && dbc->txn != my_txn)
found = 1;
}
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
if (found != 0 && DBC_LOGGING(my_dbc)) {
if ((ret = __bam_curadj_log(dbp, my_dbc->txn, &lsn, 0,
DB_CA_DI, pgno, 0, 0, (u_int32_t)adjust, indx, 0)) != 0)
return (ret);
}
return (0);
}
/*
* __bam_opd_cursor -- create a new opd cursor.
*/
static int
__bam_opd_cursor(dbp, dbc, first, tpgno, ti)
DB *dbp;
DBC *dbc;
db_pgno_t tpgno;
u_int32_t first, ti;
{
BTREE_CURSOR *cp, *orig_cp;
DBC *dbc_nopd;
int ret;
orig_cp = (BTREE_CURSOR *)dbc->internal;
dbc_nopd = NULL;
/*
* Allocate a new cursor and create the stack. If duplicates
* are sorted, we've just created an off-page duplicate Btree.
* If duplicates aren't sorted, we've just created a Recno tree.
*
* Note that in order to get here at all, there shouldn't be
* an old off-page dup cursor--to augment the checking db_c_newopd
* will do, assert this.
*/
DB_ASSERT(orig_cp->opd == NULL);
if ((ret = __db_c_newopd(dbc, tpgno, orig_cp->opd, &dbc_nopd)) != 0)
return (ret);
cp = (BTREE_CURSOR *)dbc_nopd->internal;
cp->pgno = tpgno;
cp->indx = ti;
if (dbp->dup_compare == NULL) {
/*
* Converting to off-page Recno trees is tricky. The
* record number for the cursor is the index + 1 (to
* convert to 1-based record numbers).
*/
cp->recno = ti + 1;
}
/*
* Transfer the deleted flag from the top-level cursor to the
* created one.
*/
if (F_ISSET(orig_cp, C_DELETED)) {
F_SET(cp, C_DELETED);
F_CLR(orig_cp, C_DELETED);
}
/* Stack the cursors and reset the initial cursor's index. */
orig_cp->opd = dbc_nopd;
orig_cp->indx = first;
return (0);
}
/*
* __bam_ca_dup --
* Adjust the cursors when moving items from a leaf page to a duplicates
* page.
*
* PUBLIC: int __bam_ca_dup __P((DBC *,
* PUBLIC: u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
*/
int
__bam_ca_dup(my_dbc, first, fpgno, fi, tpgno, ti)
DBC *my_dbc;
db_pgno_t fpgno, tpgno;
u_int32_t first, fi, ti;
{
BTREE_CURSOR *orig_cp;
DB *dbp, *ldbp;
DBC *dbc;
DB_ENV *dbenv;
DB_LSN lsn;
DB_TXN *my_txn;
int found, ret;
dbp = my_dbc->dbp;
dbenv = dbp->dbenv;
my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
/*
* Adjust the cursors. See the comment in __bam_ca_delete().
*/
found = 0;
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
loop: MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
/* Find cursors pointing to this record. */
orig_cp = (BTREE_CURSOR *)dbc->internal;
if (orig_cp->pgno != fpgno || orig_cp->indx != fi)
continue;
/*
* Since we rescan the list see if this is already
* converted.
*/
if (orig_cp->opd != NULL)
continue;
MUTEX_UNLOCK(dbenv, dbp->mutex);
/* [#8032]
DB_ASSERT(!STD_LOCKING(dbc) ||
orig_cp->lock_mode != DB_LOCK_NG);
*/
if ((ret = __bam_opd_cursor(dbp,
dbc, first, tpgno, ti)) !=0)
return (ret);
if (my_txn != NULL && dbc->txn != my_txn)
found = 1;
/* We released the mutex to get a cursor, start over. */
goto loop;
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
if (found != 0 && DBC_LOGGING(my_dbc)) {
if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
&lsn, 0, DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0)
return (ret);
}
return (0);
}
/*
* __bam_ca_undodup --
* Adjust the cursors when returning items to a leaf page
* from a duplicate page.
* Called only during undo processing.
*
* PUBLIC: int __bam_ca_undodup __P((DB *,
* PUBLIC: u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
*/
int
__bam_ca_undodup(dbp, first, fpgno, fi, ti)
DB *dbp;
db_pgno_t fpgno;
u_int32_t first, fi, ti;
{
BTREE_CURSOR *orig_cp;
DB *ldbp;
DBC *dbc;
DB_ENV *dbenv;
int ret;
dbenv = dbp->dbenv;
/*
* Adjust the cursors. See the comment in __bam_ca_delete().
*/
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
loop: MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
orig_cp = (BTREE_CURSOR *)dbc->internal;
/*
* A note on the orig_cp->opd != NULL requirement here:
* it's possible that there's a cursor that refers to
* the same duplicate set, but which has no opd cursor,
* because it refers to a different item and we took
* care of it while processing a previous record.
*/
if (orig_cp->pgno != fpgno ||
orig_cp->indx != first ||
orig_cp->opd == NULL || ((BTREE_CURSOR *)
orig_cp->opd->internal)->indx != ti)
continue;
MUTEX_UNLOCK(dbenv, dbp->mutex);
if ((ret = __db_c_close(orig_cp->opd)) != 0)
return (ret);
orig_cp->opd = NULL;
orig_cp->indx = fi;
/*
* We released the mutex to free a cursor,
* start over.
*/
goto loop;
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
return (0);
}
/*
* __bam_ca_rsplit --
* Adjust the cursors when doing reverse splits.
*
* PUBLIC: int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
*/
int
__bam_ca_rsplit(my_dbc, fpgno, tpgno)
DBC* my_dbc;
db_pgno_t fpgno, tpgno;
{
DB *dbp, *ldbp;
DBC *dbc;
DB_ENV *dbenv;
DB_LSN lsn;
DB_TXN *my_txn;
int found, ret;
dbp = my_dbc->dbp;
dbenv = dbp->dbenv;
my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
/*
* Adjust the cursors. See the comment in __bam_ca_delete().
*/
found = 0;
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
if (dbc->dbtype == DB_RECNO)
continue;
if (dbc->internal->pgno == fpgno) {
dbc->internal->pgno = tpgno;
/* [#8032]
DB_ASSERT(!STD_LOCKING(dbc) ||
dbc->internal->lock_mode != DB_LOCK_NG);
*/
if (my_txn != NULL && dbc->txn != my_txn)
found = 1;
}
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
if (found != 0 && DBC_LOGGING(my_dbc)) {
if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
&lsn, 0, DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0)
return (ret);
}
return (0);
}
/*
* __bam_ca_split --
* Adjust the cursors when splitting a page.
*
* PUBLIC: int __bam_ca_split __P((DBC *,
* PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
*/
int
__bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft)
DBC *my_dbc;
db_pgno_t ppgno, lpgno, rpgno;
u_int32_t split_indx;
int cleft;
{
DB *dbp, *ldbp;
DBC *dbc;
DBC_INTERNAL *cp;
DB_ENV *dbenv;
DB_LSN lsn;
DB_TXN *my_txn;
int found, ret;
dbp = my_dbc->dbp;
dbenv = dbp->dbenv;
my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
/*
* Adjust the cursors. See the comment in __bam_ca_delete().
*
* If splitting the page that a cursor was on, the cursor has to be
* adjusted to point to the same record as before the split. Most
* of the time we don't adjust pointers to the left page, because
* we're going to copy its contents back over the original page. If
* the cursor is on the right page, it is decremented by the number of
* records split to the left page.
*/
found = 0;
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
if (dbc->dbtype == DB_RECNO)
continue;
cp = dbc->internal;
if (cp->pgno == ppgno) {
/* [#8032]
DB_ASSERT(!STD_LOCKING(dbc) ||
cp->lock_mode != DB_LOCK_NG);
*/
if (my_txn != NULL && dbc->txn != my_txn)
found = 1;
if (cp->indx < split_indx) {
if (cleft)
cp->pgno = lpgno;
} else {
cp->pgno = rpgno;
cp->indx -= split_indx;
}
}
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
if (found != 0 && DBC_LOGGING(my_dbc)) {
if ((ret = __bam_curadj_log(dbp,
my_dbc->txn, &lsn, 0, DB_CA_SPLIT, ppgno, rpgno,
cleft ? lpgno : PGNO_INVALID, 0, split_indx, 0)) != 0)
return (ret);
}
return (0);
}
/*
* __bam_ca_undosplit --
* Adjust the cursors when undoing a split of a page.
* If we grew a level we will execute this for both the
* left and the right pages.
* Called only during undo processing.
*
* PUBLIC: int __bam_ca_undosplit __P((DB *,
* PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
*/
int
__bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx)
DB *dbp;
db_pgno_t frompgno, topgno, lpgno;
u_int32_t split_indx;
{
DB *ldbp;
DBC *dbc;
DB_ENV *dbenv;
DBC_INTERNAL *cp;
dbenv = dbp->dbenv;
/*
* Adjust the cursors. See the comment in __bam_ca_delete().
*
* When backing out a split, we move the cursor back
* to the original offset and bump it by the split_indx.
*/
MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
ldbp = LIST_NEXT(ldbp, dblistlinks)) {
MUTEX_LOCK(dbenv, dbp->mutex);
for (dbc = TAILQ_FIRST(&ldbp->active_queue);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
if (dbc->dbtype == DB_RECNO)
continue;
cp = dbc->internal;
if (cp->pgno == topgno) {
cp->pgno = frompgno;
cp->indx += split_indx;
} else if (cp->pgno == lpgno)
cp->pgno = frompgno;
}
MUTEX_UNLOCK(dbenv, dbp->mutex);
}
MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
return (0);
}

File diff suppressed because it is too large Load diff

View file

@ -1,643 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Mike Olson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: bt_delete.c,v 12.13 2005/10/20 18:14:59 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/db_shash.h"
#include "dbinc/btree.h"
#include "dbinc/lock.h"
#include "dbinc/mp.h"
/*
* __bam_ditem --
* Delete one or more entries from a page.
*
* PUBLIC: int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
*/
int
__bam_ditem(dbc, h, indx)
DBC *dbc;
PAGE *h;
u_int32_t indx;
{
BINTERNAL *bi;
BKEYDATA *bk;
DB *dbp;
DB_MPOOLFILE *mpf;
u_int32_t nbytes;
int ret;
db_indx_t *inp;
dbp = dbc->dbp;
mpf = dbp->mpf;
inp = P_INP(dbp, h);
switch (TYPE(h)) {
case P_IBTREE:
bi = GET_BINTERNAL(dbp, h, indx);
switch (B_TYPE(bi->type)) {
case B_DUPLICATE:
case B_KEYDATA:
nbytes = BINTERNAL_SIZE(bi->len);
break;
case B_OVERFLOW:
nbytes = BINTERNAL_SIZE(bi->len);
if ((ret =
__db_doff(dbc, ((BOVERFLOW *)bi->data)->pgno)) != 0)
return (ret);
break;
default:
return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
break;
case P_IRECNO:
nbytes = RINTERNAL_SIZE;
break;
case P_LBTREE:
/*
* If it's a duplicate key, discard the index and don't touch
* the actual page item.
*
* !!!
* This works because no data item can have an index matching
* any other index so even if the data item is in a key "slot",
* it won't match any other index.
*/
if ((indx % 2) == 0) {
/*
* Check for a duplicate after us on the page. NOTE:
* we have to delete the key item before deleting the
* data item, otherwise the "indx + P_INDX" calculation
* won't work!
*/
if (indx + P_INDX < (u_int32_t)NUM_ENT(h) &&
inp[indx] == inp[indx + P_INDX])
return (__bam_adjindx(dbc,
h, indx, indx + O_INDX, 0));
/*
* Check for a duplicate before us on the page. It
* doesn't matter if we delete the key item before or
* after the data item for the purposes of this one.
*/
if (indx > 0 && inp[indx] == inp[indx - P_INDX])
return (__bam_adjindx(dbc,
h, indx, indx - P_INDX, 0));
}
/* FALLTHROUGH */
case P_LDUP:
case P_LRECNO:
bk = GET_BKEYDATA(dbp, h, indx);
switch (B_TYPE(bk->type)) {
case B_DUPLICATE:
nbytes = BOVERFLOW_SIZE;
break;
case B_OVERFLOW:
nbytes = BOVERFLOW_SIZE;
if ((ret = __db_doff(
dbc, (GET_BOVERFLOW(dbp, h, indx))->pgno)) != 0)
return (ret);
break;
case B_KEYDATA:
nbytes = BKEYDATA_SIZE(bk->len);
break;
default:
return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
break;
default:
return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
/* Delete the item and mark the page dirty. */
if ((ret = __db_ditem(dbc, h, indx, nbytes)) != 0)
return (ret);
if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
return (0);
}
/*
* __bam_adjindx --
* Adjust an index on the page.
*
* PUBLIC: int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
*/
int
__bam_adjindx(dbc, h, indx, indx_copy, is_insert)
DBC *dbc;
PAGE *h;
u_int32_t indx, indx_copy;
int is_insert;
{
DB *dbp;
DB_MPOOLFILE *mpf;
db_indx_t copy, *inp;
int ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
inp = P_INP(dbp, h);
/* Log the change. */
if (DBC_LOGGING(dbc)) {
if ((ret = __bam_adj_log(dbp, dbc->txn, &LSN(h), 0,
PGNO(h), &LSN(h), indx, indx_copy, (u_int32_t)is_insert)) != 0)
return (ret);
} else
LSN_NOT_LOGGED(LSN(h));
/* Shuffle the indices and mark the page dirty. */
if (is_insert) {
copy = inp[indx_copy];
if (indx != NUM_ENT(h))
memmove(&inp[indx + O_INDX], &inp[indx],
sizeof(db_indx_t) * (NUM_ENT(h) - indx));
inp[indx] = copy;
++NUM_ENT(h);
} else {
--NUM_ENT(h);
if (indx != NUM_ENT(h))
memmove(&inp[indx], &inp[indx + O_INDX],
sizeof(db_indx_t) * (NUM_ENT(h) - indx));
}
if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
return (0);
}
/*
* __bam_dpages --
* Delete a set of locked pages.
*
* PUBLIC: int __bam_dpages __P((DBC *, int, int));
*/
int
__bam_dpages(dbc, use_top, update)
DBC *dbc;
int use_top;
int update;
{
BTREE_CURSOR *cp;
BINTERNAL *bi;
DB *dbp;
DBT a, b;
DB_LOCK c_lock, p_lock;
DB_MPOOLFILE *mpf;
EPG *epg, *save_sp, *stack_epg;
PAGE *child, *parent;
db_indx_t nitems;
db_pgno_t pgno, root_pgno;
db_recno_t rcnt;
int done, ret, t_ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
nitems = 0;
pgno = PGNO_INVALID;
/*
* We have the entire stack of deletable pages locked.
*
* Btree calls us with the first page in the stack is to have a
* single item deleted, and the rest of the pages are to be removed.
*
* Recno always has a stack to the root and __bam_merge operations
* may have unneeded items in the sack. We find the lowest page
* in the stack that has more than one record in it and start there.
*/
ret = 0;
if (use_top)
stack_epg = cp->sp;
else
for (stack_epg = cp->csp; stack_epg > cp->sp; --stack_epg)
if (NUM_ENT(stack_epg->page) > 1)
break;
epg = stack_epg;
/*
* !!!
* There is an interesting deadlock situation here. We have to relink
* the leaf page chain around the leaf page being deleted. Consider
* a cursor walking through the leaf pages, that has the previous page
* read-locked and is waiting on a lock for the page we're deleting.
* It will deadlock here. Before we unlink the subtree, we relink the
* leaf page chain.
*/
if (LEVEL(cp->csp->page) == 1 &&
(ret = __bam_relink(dbc, cp->csp->page, PGNO_INVALID)) != 0)
goto discard;
/*
* Delete the last item that references the underlying pages that are
* to be deleted, and adjust cursors that reference that page. Then,
* save that page's page number and item count and release it. If
* the application isn't retaining locks because it's running without
* transactions, this lets the rest of the tree get back to business
* immediately.
*/
if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
goto discard;
if ((ret = __bam_ca_di(dbc, PGNO(epg->page), epg->indx, -1)) != 0)
goto discard;
if (update && epg->indx == 0) {
save_sp = cp->csp;
cp->csp = epg;
ret = __bam_pupdate(dbc, epg->page);
cp->csp = save_sp;
if (ret != 0)
goto discard;
}
pgno = PGNO(epg->page);
nitems = NUM_ENT(epg->page);
ret = __memp_fput(mpf, epg->page, 0);
if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
goto err_inc;
/* Then, discard any pages that we don't care about. */
discard: for (epg = cp->sp; epg < stack_epg; ++epg) {
if ((t_ret = __memp_fput(mpf, epg->page, 0)) != 0 && ret == 0)
ret = t_ret;
epg->page = NULL;
if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0)
ret = t_ret;
}
if (ret != 0)
goto err;
/* Free the rest of the pages in the stack. */
while (++epg <= cp->csp) {
/*
* Delete page entries so they will be restored as part of
* recovery. We don't need to do cursor adjustment here as
* the pages are being emptied by definition and so cannot
* be referenced by a cursor.
*/
if (NUM_ENT(epg->page) != 0) {
DB_ASSERT(LEVEL(epg->page) != 1);
if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
goto err;
/*
* Sheer paranoia: if we find any pages that aren't
* emptied by the delete, someone else added an item
* while we were walking the tree, and we discontinue
* the delete. Shouldn't be possible, but we check
* regardless.
*/
if (NUM_ENT(epg->page) != 0)
goto err;
}
ret = __db_free(dbc, epg->page);
if (cp->page == epg->page)
cp->page = NULL;
epg->page = NULL;
if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
goto err_inc;
}
if (0) {
err_inc: ++epg;
err: for (; epg <= cp->csp; ++epg) {
if (epg->page != NULL)
(void)__memp_fput(mpf, epg->page, 0);
(void)__TLPUT(dbc, epg->lock);
}
BT_STK_CLR(cp);
return (ret);
}
BT_STK_CLR(cp);
/*
* If we just deleted the next-to-last item from the root page, the
* tree can collapse one or more levels. While there remains only a
* single item on the root page, write lock the last page referenced
* by the root page and copy it over the root page.
*/
root_pgno = cp->root;
if (pgno != root_pgno || nitems != 1)
return (0);
for (done = 0; !done;) {
/* Initialize. */
parent = child = NULL;
LOCK_INIT(p_lock);
LOCK_INIT(c_lock);
/* Lock the root. */
pgno = root_pgno;
if ((ret =
__db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &p_lock)) != 0)
goto stop;
if ((ret = __memp_fget(mpf, &pgno, 0, &parent)) != 0)
goto stop;
if (NUM_ENT(parent) != 1)
goto stop;
switch (TYPE(parent)) {
case P_IBTREE:
/*
* If this is overflow, then try to delete it.
* The child may or may not still point at it.
*/
bi = GET_BINTERNAL(dbp, parent, 0);
if (B_TYPE(bi->type) == B_OVERFLOW)
if ((ret = __db_doff(dbc,
((BOVERFLOW *)bi->data)->pgno)) != 0)
goto stop;
pgno = bi->pgno;
break;
case P_IRECNO:
pgno = GET_RINTERNAL(dbp, parent, 0)->pgno;
break;
default:
goto stop;
}
/* Lock the child page. */
if ((ret =
__db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &c_lock)) != 0)
goto stop;
if ((ret = __memp_fget(mpf, &pgno, 0, &child)) != 0)
goto stop;
/* Log the change. */
if (DBC_LOGGING(dbc)) {
memset(&a, 0, sizeof(a));
a.data = child;
a.size = dbp->pgsize;
memset(&b, 0, sizeof(b));
b.data = P_ENTRY(dbp, parent, 0);
b.size = TYPE(parent) == P_IRECNO ? RINTERNAL_SIZE :
BINTERNAL_SIZE(((BINTERNAL *)b.data)->len);
if ((ret = __bam_rsplit_log(dbp, dbc->txn,
&child->lsn, 0, PGNO(child), &a, PGNO(parent),
RE_NREC(parent), &b, &parent->lsn)) != 0)
goto stop;
} else
LSN_NOT_LOGGED(child->lsn);
/*
* Make the switch.
*
* One fixup -- internal pages below the top level do not store
* a record count, so we have to preserve it if we're not
* converting to a leaf page. Note also that we are about to
* overwrite the parent page, including its LSN. This is OK
* because the log message we wrote describing this update
* stores its LSN on the child page. When the child is copied
* onto the parent, the correct LSN is copied into place.
*/
COMPQUIET(rcnt, 0);
if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
rcnt = RE_NREC(parent);
memcpy(parent, child, dbp->pgsize);
PGNO(parent) = root_pgno;
if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
RE_NREC_SET(parent, rcnt);
/* Mark the pages dirty. */
if ((ret = __memp_fset(mpf, parent, DB_MPOOL_DIRTY)) != 0)
goto stop;
if ((ret = __memp_fset(mpf, child, DB_MPOOL_DIRTY)) != 0)
goto stop;
/* Adjust the cursors. */
if ((ret = __bam_ca_rsplit(dbc, PGNO(child), root_pgno)) != 0)
goto stop;
/*
* Free the page copied onto the root page and discard its
* lock. (The call to __db_free() discards our reference
* to the page.)
*/
if ((ret = __db_free(dbc, child)) != 0) {
child = NULL;
goto stop;
}
child = NULL;
if (0) {
stop: done = 1;
}
if ((t_ret = __TLPUT(dbc, p_lock)) != 0 && ret == 0)
ret = t_ret;
if (parent != NULL &&
(t_ret = __memp_fput(mpf, parent, 0)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __TLPUT(dbc, c_lock)) != 0 && ret == 0)
ret = t_ret;
if (child != NULL &&
(t_ret = __memp_fput(mpf, child, 0)) != 0 && ret == 0)
ret = t_ret;
}
return (ret);
}
/*
* __bam_relink --
* Relink around a deleted page.
*
* PUBLIC: int __bam_relink __P((DBC *, PAGE *, db_pgno_t));
*/
int
__bam_relink(dbc, pagep, new_pgno)
DBC *dbc;
PAGE *pagep;
db_pgno_t new_pgno;
{
DB *dbp;
PAGE *np, *pp;
DB_LOCK npl, ppl;
DB_LSN *nlsnp, *plsnp, ret_lsn;
DB_MPOOLFILE *mpf;
int ret, t_ret;
dbp = dbc->dbp;
np = pp = NULL;
LOCK_INIT(npl);
LOCK_INIT(ppl);
nlsnp = plsnp = NULL;
mpf = dbp->mpf;
ret = 0;
/*
* Retrieve and lock the one/two pages. For a remove, we may need
* two pages (the before and after). For an add, we only need one
* because, the split took care of the prev.
*/
if (pagep->next_pgno != PGNO_INVALID) {
if ((ret = __db_lget(dbc,
0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &pagep->next_pgno, 0, &np)) != 0) {
ret = __db_pgerr(dbp, pagep->next_pgno, ret);
goto err;
}
nlsnp = &np->lsn;
}
if (pagep->prev_pgno != PGNO_INVALID) {
if ((ret = __db_lget(dbc,
0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &pagep->prev_pgno, 0, &pp)) != 0) {
ret = __db_pgerr(dbp, pagep->prev_pgno, ret);
goto err;
}
plsnp = &pp->lsn;
}
/* Log the change. */
if (DBC_LOGGING(dbc)) {
if ((ret = __bam_relink_log(dbp, dbc->txn, &ret_lsn, 0,
pagep->pgno, new_pgno, pagep->prev_pgno, plsnp,
pagep->next_pgno, nlsnp)) != 0)
goto err;
} else
LSN_NOT_LOGGED(ret_lsn);
if (np != NULL)
np->lsn = ret_lsn;
if (pp != NULL)
pp->lsn = ret_lsn;
/*
* Modify and release the two pages.
*/
if (np != NULL) {
if (new_pgno == PGNO_INVALID)
np->prev_pgno = pagep->prev_pgno;
else
np->prev_pgno = new_pgno;
ret = __memp_fput(mpf, np, DB_MPOOL_DIRTY);
if ((t_ret = __TLPUT(dbc, npl)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
goto err;
}
if (pp != NULL) {
if (new_pgno == PGNO_INVALID)
pp->next_pgno = pagep->next_pgno;
else
pp->next_pgno = new_pgno;
ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY);
if ((t_ret = __TLPUT(dbc, ppl)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
goto err;
}
return (0);
err: if (np != NULL)
(void)__memp_fput(mpf, np, 0);
(void)__TLPUT(dbc, npl);
if (pp != NULL)
(void)__memp_fput(mpf, pp, 0);
(void)__TLPUT(dbc, ppl);
return (ret);
}
/*
* __bam_pupdate --
* Update parent key pointers up the tree.
*
* PUBLIC: int __bam_pupdate __P((DBC *, PAGE *));
*/
int
__bam_pupdate(dbc, lpg)
DBC *dbc;
PAGE *lpg;
{
BTREE_CURSOR *cp;
DB_ENV *dbenv;
EPG *epg;
int ret;
dbenv = dbc->dbp->dbenv;
cp = (BTREE_CURSOR *)dbc->internal;
ret = 0;
/*
* Update the parents up the tree. __bam_pinsert only looks at the
* left child if is a leaf page, so we don't need to change it. We
* just do a delete and insert; a replace is possible but reusing
* pinsert is better.
*/
for (epg = &cp->csp[-1]; epg >= cp->sp; epg--) {
if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
return (ret);
epg->indx--;
if ((ret = __bam_pinsert(dbc, epg,
lpg, epg[1].page, BPI_NORECNUM)) != 0) {
if (ret == DB_NEEDSPLIT) {
/* This should not happen. */
__db_err(dbenv,
"Not enough room in parent: %s: page %lu",
dbc->dbp->fname, (u_long)PGNO(epg->page));
ret = __db_panic(dbenv, EINVAL);
}
return (ret);
}
}
return (ret);
}

View file

@ -1,514 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1999-2005
* Sleepycat Software. All rights reserved.
*
* $Id: bt_method.c,v 12.2 2005/06/16 20:20:16 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/btree.h"
#include "dbinc/qam.h"
static int __bam_set_bt_minkey __P((DB *, u_int32_t));
static int __bam_set_bt_prefix
__P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
static int __ram_get_re_delim __P((DB *, int *));
static int __ram_set_re_delim __P((DB *, int));
static int __ram_set_re_len __P((DB *, u_int32_t));
static int __ram_set_re_pad __P((DB *, int));
static int __ram_get_re_source __P((DB *, const char **));
static int __ram_set_re_source __P((DB *, const char *));
/*
* __bam_db_create --
* Btree specific initialization of the DB structure.
*
* PUBLIC: int __bam_db_create __P((DB *));
*/
int
__bam_db_create(dbp)
DB *dbp;
{
BTREE *t;
int ret;
/* Allocate and initialize the private btree structure. */
if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(BTREE), &t)) != 0)
return (ret);
dbp->bt_internal = t;
t->bt_minkey = DEFMINKEYPAGE; /* Btree */
t->bt_compare = __bam_defcmp;
t->bt_prefix = __bam_defpfx;
dbp->set_bt_compare = __bam_set_bt_compare;
dbp->get_bt_minkey = __bam_get_bt_minkey;
dbp->set_bt_minkey = __bam_set_bt_minkey;
dbp->set_bt_prefix = __bam_set_bt_prefix;
t->re_pad = ' '; /* Recno */
t->re_delim = '\n';
t->re_eof = 1;
dbp->get_re_delim = __ram_get_re_delim;
dbp->set_re_delim = __ram_set_re_delim;
dbp->get_re_len = __ram_get_re_len;
dbp->set_re_len = __ram_set_re_len;
dbp->get_re_pad = __ram_get_re_pad;
dbp->set_re_pad = __ram_set_re_pad;
dbp->get_re_source = __ram_get_re_source;
dbp->set_re_source = __ram_set_re_source;
return (0);
}
/*
* __bam_db_close --
* Btree specific discard of the DB structure.
*
* PUBLIC: int __bam_db_close __P((DB *));
*/
int
__bam_db_close(dbp)
DB *dbp;
{
BTREE *t;
if ((t = dbp->bt_internal) == NULL)
return (0);
/* Recno */
/* Close any backing source file descriptor. */
if (t->re_fp != NULL)
(void)fclose(t->re_fp);
/* Free any backing source file name. */
if (t->re_source != NULL)
__os_free(dbp->dbenv, t->re_source);
__os_free(dbp->dbenv, t);
dbp->bt_internal = NULL;
return (0);
}
/*
* __bam_map_flags --
* Map Btree specific flags from public to the internal values.
*
* PUBLIC: void __bam_map_flags __P((DB *, u_int32_t *, u_int32_t *));
*/
void
__bam_map_flags(dbp, inflagsp, outflagsp)
DB *dbp;
u_int32_t *inflagsp, *outflagsp;
{
COMPQUIET(dbp, NULL);
if (FLD_ISSET(*inflagsp, DB_DUP)) {
FLD_SET(*outflagsp, DB_AM_DUP);
FLD_CLR(*inflagsp, DB_DUP);
}
if (FLD_ISSET(*inflagsp, DB_DUPSORT)) {
FLD_SET(*outflagsp, DB_AM_DUP | DB_AM_DUPSORT);
FLD_CLR(*inflagsp, DB_DUPSORT);
}
if (FLD_ISSET(*inflagsp, DB_RECNUM)) {
FLD_SET(*outflagsp, DB_AM_RECNUM);
FLD_CLR(*inflagsp, DB_RECNUM);
}
if (FLD_ISSET(*inflagsp, DB_REVSPLITOFF)) {
FLD_SET(*outflagsp, DB_AM_REVSPLITOFF);
FLD_CLR(*inflagsp, DB_REVSPLITOFF);
}
}
/*
* __bam_set_flags --
* Set Btree specific flags.
*
* PUBLIC: int __bam_set_flags __P((DB *, u_int32_t *flagsp));
*/
int
__bam_set_flags(dbp, flagsp)
DB *dbp;
u_int32_t *flagsp;
{
u_int32_t flags;
flags = *flagsp;
if (LF_ISSET(DB_DUP | DB_DUPSORT | DB_RECNUM | DB_REVSPLITOFF))
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
/*
* The DB_DUP and DB_DUPSORT flags are shared by the Hash
* and Btree access methods.
*/
if (LF_ISSET(DB_DUP | DB_DUPSORT))
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
if (LF_ISSET(DB_RECNUM | DB_REVSPLITOFF))
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
/* DB_DUP/DB_DUPSORT is incompatible with DB_RECNUM. */
if (LF_ISSET(DB_DUP | DB_DUPSORT) && F_ISSET(dbp, DB_AM_RECNUM))
goto incompat;
/* DB_RECNUM is incompatible with DB_DUP/DB_DUPSORT. */
if (LF_ISSET(DB_RECNUM) && F_ISSET(dbp, DB_AM_DUP))
goto incompat;
if (LF_ISSET(DB_DUPSORT) && dbp->dup_compare == NULL)
dbp->dup_compare = __bam_defcmp;
__bam_map_flags(dbp, flagsp, &dbp->flags);
return (0);
incompat:
return (__db_ferr(dbp->dbenv, "DB->set_flags", 1));
}
/*
* __bam_set_bt_compare --
* Set the comparison function.
*
* PUBLIC: int __bam_set_bt_compare
* PUBLIC: __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
*/
int
__bam_set_bt_compare(dbp, func)
DB *dbp;
int (*func) __P((DB *, const DBT *, const DBT *));
{
BTREE *t;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_compare");
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
t = dbp->bt_internal;
/*
* Can't default the prefix routine if the user supplies a comparison
* routine; shortening the keys can break their comparison algorithm.
*/
t->bt_compare = func;
if (t->bt_prefix == __bam_defpfx)
t->bt_prefix = NULL;
return (0);
}
/*
* __db_get_bt_minkey --
* Get the minimum keys per page.
*
* PUBLIC: int __bam_get_bt_minkey __P((DB *, u_int32_t *));
*/
int
__bam_get_bt_minkey(dbp, bt_minkeyp)
DB *dbp;
u_int32_t *bt_minkeyp;
{
BTREE *t;
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
t = dbp->bt_internal;
*bt_minkeyp = t->bt_minkey;
return (0);
}
/*
* __bam_set_bt_minkey --
* Set the minimum keys per page.
*/
static int
__bam_set_bt_minkey(dbp, bt_minkey)
DB *dbp;
u_int32_t bt_minkey;
{
BTREE *t;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_minkey");
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
t = dbp->bt_internal;
if (bt_minkey < 2) {
__db_err(dbp->dbenv, "minimum bt_minkey value is 2");
return (EINVAL);
}
t->bt_minkey = bt_minkey;
return (0);
}
/*
* __bam_set_bt_prefix --
* Set the prefix function.
*/
static int
__bam_set_bt_prefix(dbp, func)
DB *dbp;
size_t (*func) __P((DB *, const DBT *, const DBT *));
{
BTREE *t;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_prefix");
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
t = dbp->bt_internal;
t->bt_prefix = func;
return (0);
}
/*
* __ram_map_flags --
* Map Recno specific flags from public to the internal values.
*
* PUBLIC: void __ram_map_flags __P((DB *, u_int32_t *, u_int32_t *));
*/
void
__ram_map_flags(dbp, inflagsp, outflagsp)
DB *dbp;
u_int32_t *inflagsp, *outflagsp;
{
COMPQUIET(dbp, NULL);
if (FLD_ISSET(*inflagsp, DB_RENUMBER)) {
FLD_SET(*outflagsp, DB_AM_RENUMBER);
FLD_CLR(*inflagsp, DB_RENUMBER);
}
if (FLD_ISSET(*inflagsp, DB_SNAPSHOT)) {
FLD_SET(*outflagsp, DB_AM_SNAPSHOT);
FLD_CLR(*inflagsp, DB_SNAPSHOT);
}
}
/*
* __ram_set_flags --
* Set Recno specific flags.
*
* PUBLIC: int __ram_set_flags __P((DB *, u_int32_t *flagsp));
*/
int
__ram_set_flags(dbp, flagsp)
DB *dbp;
u_int32_t *flagsp;
{
u_int32_t flags;
flags = *flagsp;
if (LF_ISSET(DB_RENUMBER | DB_SNAPSHOT)) {
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
}
__ram_map_flags(dbp, flagsp, &dbp->flags);
return (0);
}
/*
* __db_get_re_delim --
* Get the variable-length input record delimiter.
*/
static int
__ram_get_re_delim(dbp, re_delimp)
DB *dbp;
int *re_delimp;
{
BTREE *t;
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
t = dbp->bt_internal;
*re_delimp = t->re_delim;
return (0);
}
/*
* __ram_set_re_delim --
* Set the variable-length input record delimiter.
*/
static int
__ram_set_re_delim(dbp, re_delim)
DB *dbp;
int re_delim;
{
BTREE *t;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_delim");
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
t = dbp->bt_internal;
t->re_delim = re_delim;
F_SET(dbp, DB_AM_DELIMITER);
return (0);
}
/*
* __db_get_re_len --
* Get the variable-length input record length.
*
* PUBLIC: int __ram_get_re_len __P((DB *, u_int32_t *));
*/
int
__ram_get_re_len(dbp, re_lenp)
DB *dbp;
u_int32_t *re_lenp;
{
BTREE *t;
QUEUE *q;
DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
/*
* This has to work for all access methods, before or after opening the
* database. When the record length is set with __ram_set_re_len, the
* value in both the BTREE and QUEUE structs will be correct.
* Otherwise, this only makes sense after the database in opened, in
* which case we know the type.
*/
if (dbp->type == DB_QUEUE) {
q = dbp->q_internal;
*re_lenp = q->re_len;
} else {
t = dbp->bt_internal;
*re_lenp = t->re_len;
}
return (0);
}
/*
* __ram_set_re_len --
* Set the variable-length input record length.
*/
static int
__ram_set_re_len(dbp, re_len)
DB *dbp;
u_int32_t re_len;
{
BTREE *t;
QUEUE *q;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_len");
DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
t = dbp->bt_internal;
t->re_len = re_len;
q = dbp->q_internal;
q->re_len = re_len;
F_SET(dbp, DB_AM_FIXEDLEN);
return (0);
}
/*
* __db_get_re_pad --
* Get the fixed-length record pad character.
*
* PUBLIC: int __ram_get_re_pad __P((DB *, int *));
*/
int
__ram_get_re_pad(dbp, re_padp)
DB *dbp;
int *re_padp;
{
BTREE *t;
QUEUE *q;
DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
/*
* This has to work for all access methods, before or after opening the
* database. When the record length is set with __ram_set_re_pad, the
* value in both the BTREE and QUEUE structs will be correct.
* Otherwise, this only makes sense after the database in opened, in
* which case we know the type.
*/
if (dbp->type == DB_QUEUE) {
q = dbp->q_internal;
*re_padp = q->re_pad;
} else {
t = dbp->bt_internal;
*re_padp = t->re_pad;
}
return (0);
}
/*
* __ram_set_re_pad --
* Set the fixed-length record pad character.
*/
static int
__ram_set_re_pad(dbp, re_pad)
DB *dbp;
int re_pad;
{
BTREE *t;
QUEUE *q;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_pad");
DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
t = dbp->bt_internal;
t->re_pad = re_pad;
q = dbp->q_internal;
q->re_pad = re_pad;
F_SET(dbp, DB_AM_PAD);
return (0);
}
/*
* __db_get_re_source --
* Get the backing source file name.
*/
static int
__ram_get_re_source(dbp, re_sourcep)
DB *dbp;
const char **re_sourcep;
{
BTREE *t;
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
t = dbp->bt_internal;
*re_sourcep = t->re_source;
return (0);
}
/*
* __ram_set_re_source --
* Set the backing source file name.
*/
static int
__ram_set_re_source(dbp, re_source)
DB *dbp;
const char *re_source;
{
BTREE *t;
DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_re_source");
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
t = dbp->bt_internal;
return (__os_strdup(dbp->dbenv, re_source, &t->re_source));
}

View file

@ -1,607 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Mike Olson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: bt_open.c,v 12.5 2005/09/28 17:44:17 margo Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/crypto.h"
#include "dbinc/db_page.h"
#include "dbinc/db_swap.h"
#include "dbinc/btree.h"
#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
#include "dbinc/log.h"
#include "dbinc/mp.h"
#include "dbinc/fop.h"
static void __bam_init_meta __P((DB *, BTMETA *, db_pgno_t, DB_LSN *));
/*
* __bam_open --
* Open a btree.
*
* PUBLIC: int __bam_open __P((DB *,
* PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
*/
int
__bam_open(dbp, txn, name, base_pgno, flags)
DB *dbp;
DB_TXN *txn;
const char *name;
db_pgno_t base_pgno;
u_int32_t flags;
{
BTREE *t;
COMPQUIET(name, NULL);
t = dbp->bt_internal;
/*
* We don't permit the user to specify a prefix routine if they didn't
* also specify a comparison routine, they can't know enough about our
* comparison routine to get it right.
*/
if (t->bt_compare == __bam_defcmp && t->bt_prefix != __bam_defpfx) {
__db_err(dbp->dbenv,
"prefix comparison may not be specified for default comparison routine");
return (EINVAL);
}
/*
* Verify that the bt_minkey value specified won't cause the
* calculation of ovflsize to underflow [#2406] for this pagesize.
*/
if (B_MINKEY_TO_OVFLSIZE(dbp, t->bt_minkey, dbp->pgsize) >
B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
__db_err(dbp->dbenv,
"bt_minkey value of %lu too high for page size of %lu",
(u_long)t->bt_minkey, (u_long)dbp->pgsize);
return (EINVAL);
}
/* Start up the tree. */
return (__bam_read_root(dbp, txn, base_pgno, flags));
}
/*
* __bam_metachk --
*
* PUBLIC: int __bam_metachk __P((DB *, const char *, BTMETA *));
*/
int
__bam_metachk(dbp, name, btm)
DB *dbp;
const char *name;
BTMETA *btm;
{
DB_ENV *dbenv;
u_int32_t vers;
int ret;
dbenv = dbp->dbenv;
/*
* At this point, all we know is that the magic number is for a Btree.
* Check the version, the database may be out of date.
*/
vers = btm->dbmeta.version;
if (F_ISSET(dbp, DB_AM_SWAP))
M_32_SWAP(vers);
switch (vers) {
case 6:
case 7:
__db_err(dbenv,
"%s: btree version %lu requires a version upgrade",
name, (u_long)vers);
return (DB_OLD_VERSION);
case 8:
case 9:
break;
default:
__db_err(dbenv,
"%s: unsupported btree version: %lu", name, (u_long)vers);
return (EINVAL);
}
/* Swap the page if we need to. */
if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __bam_mswap((PAGE *)btm)) != 0)
return (ret);
/*
* Check application info against metadata info, and set info, flags,
* and type based on metadata info.
*/
if ((ret =
__db_fchk(dbenv, "DB->open", btm->dbmeta.flags, BTM_MASK)) != 0)
return (ret);
if (F_ISSET(&btm->dbmeta, BTM_RECNO)) {
if (dbp->type == DB_BTREE)
goto wrong_type;
dbp->type = DB_RECNO;
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
} else {
if (dbp->type == DB_RECNO)
goto wrong_type;
dbp->type = DB_BTREE;
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
}
if (F_ISSET(&btm->dbmeta, BTM_DUP))
F_SET(dbp, DB_AM_DUP);
else
if (F_ISSET(dbp, DB_AM_DUP)) {
__db_err(dbenv,
"%s: DB_DUP specified to open method but not set in database",
name);
return (EINVAL);
}
if (F_ISSET(&btm->dbmeta, BTM_RECNUM)) {
if (dbp->type != DB_BTREE)
goto wrong_type;
F_SET(dbp, DB_AM_RECNUM);
if ((ret = __db_fcchk(dbenv,
"DB->open", dbp->flags, DB_AM_DUP, DB_AM_RECNUM)) != 0)
return (ret);
} else
if (F_ISSET(dbp, DB_AM_RECNUM)) {
__db_err(dbenv,
"%s: DB_RECNUM specified to open method but not set in database",
name);
return (EINVAL);
}
if (F_ISSET(&btm->dbmeta, BTM_FIXEDLEN)) {
if (dbp->type != DB_RECNO)
goto wrong_type;
F_SET(dbp, DB_AM_FIXEDLEN);
} else
if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
__db_err(dbenv,
"%s: DB_FIXEDLEN specified to open method but not set in database",
name);
return (EINVAL);
}
if (F_ISSET(&btm->dbmeta, BTM_RENUMBER)) {
if (dbp->type != DB_RECNO)
goto wrong_type;
F_SET(dbp, DB_AM_RENUMBER);
} else
if (F_ISSET(dbp, DB_AM_RENUMBER)) {
__db_err(dbenv,
"%s: DB_RENUMBER specified to open method but not set in database",
name);
return (EINVAL);
}
if (F_ISSET(&btm->dbmeta, BTM_SUBDB))
F_SET(dbp, DB_AM_SUBDB);
else
if (F_ISSET(dbp, DB_AM_SUBDB)) {
__db_err(dbenv,
"%s: multiple databases specified but not supported by file",
name);
return (EINVAL);
}
if (F_ISSET(&btm->dbmeta, BTM_DUPSORT)) {
if (dbp->dup_compare == NULL)
dbp->dup_compare = __bam_defcmp;
F_SET(dbp, DB_AM_DUPSORT);
} else
if (dbp->dup_compare != NULL) {
__db_err(dbenv,
"%s: duplicate sort specified but not supported in database",
name);
return (EINVAL);
}
/* Set the page size. */
dbp->pgsize = btm->dbmeta.pagesize;
/* Copy the file's ID. */
memcpy(dbp->fileid, btm->dbmeta.uid, DB_FILE_ID_LEN);
return (0);
wrong_type:
if (dbp->type == DB_BTREE)
__db_err(dbenv,
"open method type is Btree, database type is Recno");
else
__db_err(dbenv,
"open method type is Recno, database type is Btree");
return (EINVAL);
}
/*
* __bam_read_root --
* Read the root page and check a tree.
*
* PUBLIC: int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t));
*/
int
__bam_read_root(dbp, txn, base_pgno, flags)
DB *dbp;
DB_TXN *txn;
db_pgno_t base_pgno;
u_int32_t flags;
{
BTMETA *meta;
BTREE *t;
DBC *dbc;
DB_LOCK metalock;
DB_MPOOLFILE *mpf;
int ret, t_ret;
COMPQUIET(flags, 0);
meta = NULL;
t = dbp->bt_internal;
LOCK_INIT(metalock);
mpf = dbp->mpf;
ret = 0;
/* Get a cursor. */
if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0)
return (ret);
/* Get the metadata page. */
if ((ret =
__db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &base_pgno, 0, &meta)) != 0)
goto err;
/*
* If the magic number is set, the tree has been created. Correct
* any fields that may not be right. Note, all of the local flags
* were set by DB->open.
*
* Otherwise, we'd better be in recovery or abort, in which case the
* metadata page will be created/initialized elsewhere.
*/
if (meta->dbmeta.magic == DB_BTREEMAGIC) {
t->bt_minkey = meta->minkey;
t->re_pad = (int)meta->re_pad;
t->re_len = meta->re_len;
t->bt_meta = base_pgno;
t->bt_root = meta->root;
} else {
DB_ASSERT(IS_RECOVERING(dbp->dbenv) ||
F_ISSET(dbp, DB_AM_RECOVER));
}
/*
* !!!
* If creating a subdatabase, we've already done an insert when
* we put the subdatabase's entry into the master database, so
* our last-page-inserted value is wrongly initialized for the
* master database, not the subdatabase we're creating. I'm not
* sure where the *right* place to clear this value is, it's not
* intuitively obvious that it belongs here.
*/
t->bt_lpgno = PGNO_INVALID;
err: /* Put the metadata page back. */
if (meta != NULL &&
(t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
/*
* __bam_init_meta --
*
* Initialize a btree meta-data page. The following fields may need
* to be updated later: last_pgno, root.
*/
static void
__bam_init_meta(dbp, meta, pgno, lsnp)
DB *dbp;
BTMETA *meta;
db_pgno_t pgno;
DB_LSN *lsnp;
{
BTREE *t;
memset(meta, 0, sizeof(BTMETA));
meta->dbmeta.lsn = *lsnp;
meta->dbmeta.pgno = pgno;
meta->dbmeta.magic = DB_BTREEMAGIC;
meta->dbmeta.version = DB_BTREEVERSION;
meta->dbmeta.pagesize = dbp->pgsize;
if (F_ISSET(dbp, DB_AM_CHKSUM))
FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
meta->dbmeta.encrypt_alg =
((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
meta->crypto_magic = meta->dbmeta.magic;
}
meta->dbmeta.type = P_BTREEMETA;
meta->dbmeta.free = PGNO_INVALID;
meta->dbmeta.last_pgno = pgno;
if (F_ISSET(dbp, DB_AM_DUP))
F_SET(&meta->dbmeta, BTM_DUP);
if (F_ISSET(dbp, DB_AM_FIXEDLEN))
F_SET(&meta->dbmeta, BTM_FIXEDLEN);
if (F_ISSET(dbp, DB_AM_RECNUM))
F_SET(&meta->dbmeta, BTM_RECNUM);
if (F_ISSET(dbp, DB_AM_RENUMBER))
F_SET(&meta->dbmeta, BTM_RENUMBER);
if (F_ISSET(dbp, DB_AM_SUBDB))
F_SET(&meta->dbmeta, BTM_SUBDB);
if (dbp->dup_compare != NULL)
F_SET(&meta->dbmeta, BTM_DUPSORT);
if (dbp->type == DB_RECNO)
F_SET(&meta->dbmeta, BTM_RECNO);
memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
t = dbp->bt_internal;
meta->minkey = t->bt_minkey;
meta->re_len = t->re_len;
meta->re_pad = (u_int32_t)t->re_pad;
}
/*
* __bam_new_file --
* Create the necessary pages to begin a new database file.
*
* This code appears more complex than it is because of the two cases (named
* and unnamed). The way to read the code is that for each page being created,
* there are three parts: 1) a "get page" chunk (which either uses malloc'd
* memory or calls __memp_fget), 2) the initialization, and 3) the "put page"
* chunk which either does a fop write or an __memp_fput.
*
* PUBLIC: int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
*/
int
__bam_new_file(dbp, txn, fhp, name)
DB *dbp;
DB_TXN *txn;
DB_FH *fhp;
const char *name;
{
BTMETA *meta;
DB_ENV *dbenv;
DB_LSN lsn;
DB_MPOOLFILE *mpf;
DB_PGINFO pginfo;
DBT pdbt;
PAGE *root;
db_pgno_t pgno;
int ret, t_ret;
void *buf;
dbenv = dbp->dbenv;
mpf = dbp->mpf;
root = NULL;
meta = NULL;
buf = NULL;
if (F_ISSET(dbp, DB_AM_INMEM)) {
/* Build the meta-data page. */
pgno = PGNO_BASE_MD;
if ((ret =
__memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &meta)) != 0)
return (ret);
LSN_NOT_LOGGED(lsn);
__bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
meta->root = 1;
meta->dbmeta.last_pgno = 1;
if ((ret =
__db_log_page(dbp, txn, &lsn, pgno, (PAGE *)meta)) != 0)
goto err;
ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY);
meta = NULL;
if (ret != 0)
goto err;
/* Build the root page. */
pgno = 1;
if ((ret =
__memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0)
goto err;
P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID,
LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE);
LSN_NOT_LOGGED(root->lsn);
if ((ret =
__db_log_page(dbp, txn, &root->lsn, pgno, root)) != 0)
goto err;
ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY);
root = NULL;
if (ret != 0)
goto err;
} else {
memset(&pdbt, 0, sizeof(pdbt));
/* Build the meta-data page. */
pginfo.db_pagesize = dbp->pgsize;
pginfo.flags =
F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
pginfo.type = dbp->type;
pdbt.data = &pginfo;
pdbt.size = sizeof(pginfo);
if ((ret = __os_calloc(dbenv, 1, dbp->pgsize, &buf)) != 0)
return (ret);
meta = (BTMETA *)buf;
LSN_NOT_LOGGED(lsn);
__bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
meta->root = 1;
meta->dbmeta.last_pgno = 1;
if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
goto err;
if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp,
dbp->pgsize, 0, 0, buf, dbp->pgsize, 1, F_ISSET(
dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0)
goto err;
meta = NULL;
/* Build the root page. */
#ifdef DIAGNOSTIC
memset(buf, CLEAR_BYTE, dbp->pgsize);
#endif
root = (PAGE *)buf;
P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID,
LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE);
LSN_NOT_LOGGED(root->lsn);
if ((ret = __db_pgout(dbenv, root->pgno, root, &pdbt)) != 0)
goto err;
if ((ret = __fop_write(dbenv, txn, name, DB_APP_DATA, fhp,
dbp->pgsize, 1, 0, buf, dbp->pgsize, 1, F_ISSET(
dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0)) != 0)
goto err;
root = NULL;
}
err: if (buf != NULL)
__os_free(dbenv, buf);
else {
if (meta != NULL &&
(t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0)
ret = t_ret;
if (root != NULL &&
(t_ret = __memp_fput(mpf, root, 0)) != 0 && ret == 0)
ret = t_ret;
}
return (ret);
}
/*
* __bam_new_subdb --
* Create a metadata page and a root page for a new btree.
*
* PUBLIC: int __bam_new_subdb __P((DB *, DB *, DB_TXN *));
*/
int
__bam_new_subdb(mdbp, dbp, txn)
DB *mdbp, *dbp;
DB_TXN *txn;
{
BTMETA *meta;
DBC *dbc;
DB_ENV *dbenv;
DB_LOCK metalock;
DB_LSN lsn;
DB_MPOOLFILE *mpf;
PAGE *root;
int ret, t_ret;
dbenv = mdbp->dbenv;
mpf = mdbp->mpf;
dbc = NULL;
meta = NULL;
root = NULL;
if ((ret = __db_cursor(mdbp, txn,
&dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
return (ret);
/* Get, and optionally create the metadata page. */
if ((ret = __db_lget(dbc,
0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
goto err;
if ((ret =
__memp_fget(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
goto err;
/* Build meta-data page. */
lsn = meta->dbmeta.lsn;
__bam_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
if ((ret = __db_log_page(mdbp,
txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
goto err;
/* Create and initialize a root page. */
if ((ret = __db_new(dbc,
dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE, &root)) != 0)
goto err;
root->level = LEAFLEVEL;
if (DBENV_LOGGING(dbenv) &&
(ret = __bam_root_log(mdbp, txn, &meta->dbmeta.lsn, 0,
meta->dbmeta.pgno, root->pgno, &meta->dbmeta.lsn)) != 0)
goto err;
meta->root = root->pgno;
if ((ret =
__db_log_page(mdbp, txn, &root->lsn, root->pgno, root)) != 0)
goto err;
/* Release the metadata and root pages. */
if ((ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY)) != 0)
goto err;
meta = NULL;
if ((ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY)) != 0)
goto err;
root = NULL;
err:
if (meta != NULL)
if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0)
ret = t_ret;
if (root != NULL)
if ((t_ret = __memp_fput(mpf, root, 0)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
ret = t_ret;
if (dbc != NULL)
if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}

View file

@ -1,912 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Mike Olson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: bt_put.c,v 12.10 2005/10/20 18:57:00 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/db_shash.h"
#include "dbinc/btree.h"
#include "dbinc/mp.h"
static int __bam_build
__P((DBC *, u_int32_t, DBT *, PAGE *, u_int32_t, u_int32_t));
static int __bam_dup_check __P((DBC *, u_int32_t,
PAGE *, u_int32_t, u_int32_t, db_indx_t *));
static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t, u_int32_t));
static int __bam_ovput
__P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *));
static u_int32_t
__bam_partsize __P((DB *, u_int32_t, DBT *, PAGE *, u_int32_t));
/*
* __bam_iitem --
* Insert an item into the tree.
*
* PUBLIC: int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
*/
int
__bam_iitem(dbc, key, data, op, flags)
DBC *dbc;
DBT *key, *data;
u_int32_t op, flags;
{
DB_ENV *dbenv;
BKEYDATA *bk, bk_tmp;
BTREE *t;
BTREE_CURSOR *cp;
DB *dbp;
DBT bk_hdr, tdbt;
DB_MPOOLFILE *mpf;
PAGE *h;
db_indx_t cnt, indx;
u_int32_t data_size, have_bytes, need_bytes, needed, pages, pagespace;
int cmp, bigkey, bigdata, dupadjust, padrec, replace, ret, was_deleted;
COMPQUIET(bk, NULL);
COMPQUIET(cnt, 0);
dbp = dbc->dbp;
dbenv = dbp->dbenv;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
t = dbp->bt_internal;
h = cp->page;
indx = cp->indx;
dupadjust = replace = was_deleted = 0;
/*
* Fixed-length records with partial puts: it's an error to specify
* anything other simple overwrite.
*/
if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
F_ISSET(data, DB_DBT_PARTIAL) && data->size != data->dlen)
return (__db_rec_repl(dbenv, data->size, data->dlen));
/*
* Figure out how much space the data will take, including if it's a
* partial record.
*
* Fixed-length records: it's an error to specify a record that's
* longer than the fixed-length, and we never require less than
* the fixed-length record size.
*/
data_size = F_ISSET(data, DB_DBT_PARTIAL) ?
__bam_partsize(dbp, op, data, h, indx) : data->size;
padrec = 0;
if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
if (data_size > t->re_len)
return (__db_rec_toobig(dbenv, data_size, t->re_len));
/* Records that are deleted anyway needn't be padded out. */
if (!LF_ISSET(BI_DELETED) && data_size < t->re_len) {
padrec = 1;
data_size = t->re_len;
}
}
/*
* Handle partial puts or short fixed-length records: build the
* real record.
*/
if (padrec || F_ISSET(data, DB_DBT_PARTIAL)) {
tdbt = *data;
if ((ret =
__bam_build(dbc, op, &tdbt, h, indx, data_size)) != 0)
return (ret);
data = &tdbt;
}
/*
* If the user has specified a duplicate comparison function, return
* an error if DB_CURRENT was specified and the replacement data
* doesn't compare equal to the current data. This stops apps from
* screwing up the duplicate sort order. We have to do this after
* we build the real record so that we're comparing the real items.
*/
if (op == DB_CURRENT && dbp->dup_compare != NULL) {
if ((ret = __bam_cmp(dbp, data, h,
indx + (TYPE(h) == P_LBTREE ? O_INDX : 0),
dbp->dup_compare, &cmp)) != 0)
return (ret);
if (cmp != 0) {
__db_err(dbenv,
"Existing data sorts differently from put data");
return (EINVAL);
}
}
/*
* If the key or data item won't fit on a page, we'll have to store
* them on overflow pages.
*/
needed = 0;
bigdata = data_size > cp->ovflsize;
switch (op) {
case DB_KEYFIRST:
/* We're adding a new key and data pair. */
bigkey = key->size > cp->ovflsize;
if (bigkey)
needed += BOVERFLOW_PSIZE;
else
needed += BKEYDATA_PSIZE(key->size);
if (bigdata)
needed += BOVERFLOW_PSIZE;
else
needed += BKEYDATA_PSIZE(data_size);
break;
case DB_AFTER:
case DB_BEFORE:
case DB_CURRENT:
/*
* We're either overwriting the data item of a key/data pair
* or we're creating a new on-page duplicate and only adding
* a data item.
*
* !!!
* We're not currently correcting for space reclaimed from
* already deleted items, but I don't think it's worth the
* complexity.
*/
bigkey = 0;
if (op == DB_CURRENT) {
bk = GET_BKEYDATA(dbp, h,
indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
if (B_TYPE(bk->type) == B_KEYDATA)
have_bytes = BKEYDATA_PSIZE(bk->len);
else
have_bytes = BOVERFLOW_PSIZE;
need_bytes = 0;
} else {
have_bytes = 0;
need_bytes = sizeof(db_indx_t);
}
if (bigdata)
need_bytes += BOVERFLOW_PSIZE;
else
need_bytes += BKEYDATA_PSIZE(data_size);
if (have_bytes < need_bytes)
needed += need_bytes - have_bytes;
break;
default:
return (__db_unknown_flag(dbenv, "DB->put", op));
}
/* Split the page if there's not enough room. */
if (P_FREESPACE(dbp, h) < needed)
return (DB_NEEDSPLIT);
/*
* Check to see if we will convert to off page duplicates -- if
* so, we'll need a page.
*/
if (F_ISSET(dbp, DB_AM_DUP) &&
TYPE(h) == P_LBTREE && op != DB_KEYFIRST &&
P_FREESPACE(dbp, h) - needed <= dbp->pgsize / 2 &&
__bam_dup_check(dbc, op, h, indx, needed, &cnt)) {
pages = 1;
dupadjust = 1;
} else
pages = 0;
/*
* If we are not using transactions and there is a page limit
* set on the file, then figure out if things will fit before
* taking action.
*/
if (dbc->txn == NULL && dbp->mpf->mfp->maxpgno != 0) {
pagespace = P_MAXSPACE(dbp, dbp->pgsize);
if (bigdata)
pages += ((data_size - 1) / pagespace) + 1;
if (bigkey)
pages += ((key->size - 1) / pagespace) + 1;
if (pages > (dbp->mpf->mfp->maxpgno - dbp->mpf->mfp->last_pgno))
return (__db_space_err(dbp));
}
/*
* The code breaks it up into five cases:
*
* 1. Insert a new key/data pair.
* 2. Append a new data item (a new duplicate).
* 3. Insert a new data item (a new duplicate).
* 4. Delete and re-add the data item (overflow item).
* 5. Overwrite the data item.
*/
switch (op) {
case DB_KEYFIRST: /* 1. Insert a new key/data pair. */
if (bigkey) {
if ((ret = __bam_ovput(dbc,
B_OVERFLOW, PGNO_INVALID, h, indx, key)) != 0)
return (ret);
} else
if ((ret = __db_pitem(dbc, h, indx,
BKEYDATA_SIZE(key->size), NULL, key)) != 0)
return (ret);
if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
return (ret);
++indx;
break;
case DB_AFTER: /* 2. Append a new data item. */
if (TYPE(h) == P_LBTREE) {
/* Copy the key for the duplicate and adjust cursors. */
if ((ret =
__bam_adjindx(dbc, h, indx + P_INDX, indx, 1)) != 0)
return (ret);
if ((ret =
__bam_ca_di(dbc, PGNO(h), indx + P_INDX, 1)) != 0)
return (ret);
indx += 3;
cp->indx += 2;
} else {
++indx;
cp->indx += 1;
}
break;
case DB_BEFORE: /* 3. Insert a new data item. */
if (TYPE(h) == P_LBTREE) {
/* Copy the key for the duplicate and adjust cursors. */
if ((ret = __bam_adjindx(dbc, h, indx, indx, 1)) != 0)
return (ret);
if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
return (ret);
++indx;
}
break;
case DB_CURRENT:
/*
* Clear the cursor's deleted flag. The problem is that if
* we deadlock or fail while deleting the overflow item or
* replacing the non-overflow item, a subsequent cursor close
* will try and remove the item because the cursor's delete
* flag is set.
*/
if ((ret = __bam_ca_delete(dbp, PGNO(h), indx, 0, NULL)) != 0)
return (ret);
if (TYPE(h) == P_LBTREE) {
++indx;
}
/*
* In a Btree deleted records aren't counted (deleted records
* are counted in a Recno because all accesses are based on
* record number). If it's a Btree and it's a DB_CURRENT
* operation overwriting a previously deleted record, increment
* the record count.
*/
if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP)
was_deleted = B_DISSET(bk->type);
/*
* 4. Delete and re-add the data item.
*
* If we're changing the type of the on-page structure, or we
* are referencing offpage items, we have to delete and then
* re-add the item. We do not do any cursor adjustments here
* because we're going to immediately re-add the item into the
* same slot.
*/
if (bigdata || B_TYPE(bk->type) != B_KEYDATA) {
if ((ret = __bam_ditem(dbc, h, indx)) != 0)
return (ret);
break;
}
/* 5. Overwrite the data item. */
replace = 1;
break;
default:
return (__db_unknown_flag(dbenv, "DB->put", op));
}
/* Add the data. */
if (bigdata) {
/*
* We do not have to handle deleted (BI_DELETED) records
* in this case; the actual records should never be created.
*/
DB_ASSERT(!LF_ISSET(BI_DELETED));
if ((ret = __bam_ovput(dbc,
B_OVERFLOW, PGNO_INVALID, h, indx, data)) != 0)
return (ret);
} else {
if (LF_ISSET(BI_DELETED)) {
B_TSET(bk_tmp.type, B_KEYDATA, 1);
bk_tmp.len = data->size;
bk_hdr.data = &bk_tmp;
bk_hdr.size = SSZA(BKEYDATA, data);
ret = __db_pitem(dbc, h, indx,
BKEYDATA_SIZE(data->size), &bk_hdr, data);
} else if (replace)
ret = __bam_ritem(dbc, h, indx, data);
else
ret = __db_pitem(dbc, h, indx,
BKEYDATA_SIZE(data->size), NULL, data);
if (ret != 0)
return (ret);
}
if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
/*
* Re-position the cursors if necessary and reset the current cursor
* to point to the new item.
*/
if (op != DB_CURRENT) {
if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
return (ret);
cp->indx = TYPE(h) == P_LBTREE ? indx - O_INDX : indx;
}
/*
* If we've changed the record count, update the tree. There's no
* need to adjust the count if the operation not performed on the
* current record or when the current record was previously deleted.
*/
if (F_ISSET(cp, C_RECNUM) && (op != DB_CURRENT || was_deleted))
if ((ret = __bam_adjust(dbc, 1)) != 0)
return (ret);
/*
* If a Btree leaf page is at least 50% full and we may have added or
* modified a duplicate data item, see if the set of duplicates takes
* up at least 25% of the space on the page. If it does, move it onto
* its own page.
*/
if (dupadjust &&
(ret = __bam_dup_convert(dbc, h, indx - O_INDX, cnt)) != 0)
return (ret);
/* If we've modified a recno file, set the flag. */
if (dbc->dbtype == DB_RECNO)
t->re_modified = 1;
return (ret);
}
/*
* __bam_partsize --
* Figure out how much space a partial data item is in total.
*/
static u_int32_t
__bam_partsize(dbp, op, data, h, indx)
DB *dbp;
u_int32_t op, indx;
DBT *data;
PAGE *h;
{
BKEYDATA *bk;
u_int32_t nbytes;
/*
* If the record doesn't already exist, it's simply the data we're
* provided.
*/
if (op != DB_CURRENT)
return (data->doff + data->size);
/*
* Otherwise, it's the data provided plus any already existing data
* that we're not replacing.
*/
bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
nbytes =
B_TYPE(bk->type) == B_OVERFLOW ? ((BOVERFLOW *)bk)->tlen : bk->len;
return (__db_partsize(nbytes, data));
}
/*
* __bam_build --
* Build the real record for a partial put, or short fixed-length record.
*/
static int
__bam_build(dbc, op, dbt, h, indx, nbytes)
DBC *dbc;
u_int32_t op, indx, nbytes;
DBT *dbt;
PAGE *h;
{
BKEYDATA *bk, tbk;
BOVERFLOW *bo;
BTREE *t;
DB *dbp;
DBT copy, *rdata;
u_int32_t len, tlen;
u_int8_t *p;
int ret;
COMPQUIET(bo, NULL);
dbp = dbc->dbp;
t = dbp->bt_internal;
/* We use the record data return memory, it's only a short-term use. */
rdata = &dbc->my_rdata;
if (rdata->ulen < nbytes) {
if ((ret = __os_realloc(dbp->dbenv,
nbytes, &rdata->data)) != 0) {
rdata->ulen = 0;
rdata->data = NULL;
return (ret);
}
rdata->ulen = nbytes;
}
/*
* We use nul or pad bytes for any part of the record that isn't
* specified; get it over with.
*/
memset(rdata->data,
F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_pad : 0, nbytes);
/*
* In the next clauses, we need to do three things: a) set p to point
* to the place at which to copy the user's data, b) set tlen to the
* total length of the record, not including the bytes contributed by
* the user, and c) copy any valid data from an existing record. If
* it's not a partial put (this code is called for both partial puts
* and fixed-length record padding) or it's a new key, we can cut to
* the chase.
*/
if (!F_ISSET(dbt, DB_DBT_PARTIAL) || op != DB_CURRENT) {
p = (u_int8_t *)rdata->data + dbt->doff;
tlen = dbt->doff;
goto user_copy;
}
/* Find the current record. */
if (indx < NUM_ENT(h)) {
bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ?
O_INDX : 0));
bo = (BOVERFLOW *)bk;
} else {
bk = &tbk;
B_TSET(bk->type, B_KEYDATA, 0);
bk->len = 0;
}
if (B_TYPE(bk->type) == B_OVERFLOW) {
/*
* In the case of an overflow record, we shift things around
* in the current record rather than allocate a separate copy.
*/
memset(&copy, 0, sizeof(copy));
if ((ret = __db_goff(dbp, &copy, bo->tlen,
bo->pgno, &rdata->data, &rdata->ulen)) != 0)
return (ret);
/* Skip any leading data from the original record. */
tlen = dbt->doff;
p = (u_int8_t *)rdata->data + dbt->doff;
/*
* Copy in any trailing data from the original record.
*
* If the original record was larger than the original offset
* plus the bytes being deleted, there is trailing data in the
* original record we need to preserve. If we aren't deleting
* the same number of bytes as we're inserting, copy it up or
* down, into place.
*
* Use memmove(), the regions may overlap.
*/
if (bo->tlen > dbt->doff + dbt->dlen) {
len = bo->tlen - (dbt->doff + dbt->dlen);
if (dbt->dlen != dbt->size)
memmove(p + dbt->size, p + dbt->dlen, len);
tlen += len;
}
} else {
/* Copy in any leading data from the original record. */
memcpy(rdata->data,
bk->data, dbt->doff > bk->len ? bk->len : dbt->doff);
tlen = dbt->doff;
p = (u_int8_t *)rdata->data + dbt->doff;
/* Copy in any trailing data from the original record. */
len = dbt->doff + dbt->dlen;
if (bk->len > len) {
memcpy(p + dbt->size, bk->data + len, bk->len - len);
tlen += bk->len - len;
}
}
user_copy:
/*
* Copy in the application provided data -- p and tlen must have been
* initialized above.
*/
memcpy(p, dbt->data, dbt->size);
tlen += dbt->size;
/* Set the DBT to reference our new record. */
rdata->size = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : tlen;
rdata->dlen = 0;
rdata->doff = 0;
rdata->flags = 0;
*dbt = *rdata;
return (0);
}
/*
* __bam_ritem --
* Replace an item on a page.
*
* PUBLIC: int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
*/
int
__bam_ritem(dbc, h, indx, data)
DBC *dbc;
PAGE *h;
u_int32_t indx;
DBT *data;
{
BKEYDATA *bk;
DB *dbp;
DBT orig, repl;
db_indx_t cnt, lo, ln, min, off, prefix, suffix;
int32_t nbytes;
int ret;
db_indx_t *inp;
u_int8_t *p, *t;
dbp = dbc->dbp;
/*
* Replace a single item onto a page. The logic figuring out where
* to insert and whether it fits is handled in the caller. All we do
* here is manage the page shuffling.
*/
bk = GET_BKEYDATA(dbp, h, indx);
/* Log the change. */
if (DBC_LOGGING(dbc)) {
/*
* We might as well check to see if the two data items share
* a common prefix and suffix -- it can save us a lot of log
* message if they're large.
*/
min = data->size < bk->len ? data->size : bk->len;
for (prefix = 0,
p = bk->data, t = data->data;
prefix < min && *p == *t; ++prefix, ++p, ++t)
;
min -= prefix;
for (suffix = 0,
p = (u_int8_t *)bk->data + bk->len - 1,
t = (u_int8_t *)data->data + data->size - 1;
suffix < min && *p == *t; ++suffix, --p, --t)
;
/* We only log the parts of the keys that have changed. */
orig.data = (u_int8_t *)bk->data + prefix;
orig.size = bk->len - (prefix + suffix);
repl.data = (u_int8_t *)data->data + prefix;
repl.size = data->size - (prefix + suffix);
if ((ret = __bam_repl_log(dbp, dbc->txn, &LSN(h), 0, PGNO(h),
&LSN(h), (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type),
&orig, &repl, (u_int32_t)prefix, (u_int32_t)suffix)) != 0)
return (ret);
} else
LSN_NOT_LOGGED(LSN(h));
/*
* Set references to the first in-use byte on the page and the
* first byte of the item being replaced.
*/
inp = P_INP(dbp, h);
p = (u_int8_t *)h + HOFFSET(h);
t = (u_int8_t *)bk;
/*
* If the entry is growing in size, shift the beginning of the data
* part of the page down. If the entry is shrinking in size, shift
* the beginning of the data part of the page up. Use memmove(3),
* the regions overlap.
*/
lo = BKEYDATA_SIZE(bk->len);
ln = (db_indx_t)BKEYDATA_SIZE(data->size);
if (lo != ln) {
nbytes = lo - ln; /* Signed difference. */
if (p == t) /* First index is fast. */
inp[indx] += nbytes;
else { /* Else, shift the page. */
memmove(p + nbytes, p, (size_t)(t - p));
/* Adjust the indices' offsets. */
off = inp[indx];
for (cnt = 0; cnt < NUM_ENT(h); ++cnt)
if (inp[cnt] <= off)
inp[cnt] += nbytes;
}
/* Clean up the page and adjust the item's reference. */
HOFFSET(h) += nbytes;
t += nbytes;
}
/* Copy the new item onto the page. */
bk = (BKEYDATA *)t;
B_TSET(bk->type, B_KEYDATA, 0);
bk->len = data->size;
memcpy(bk->data, data->data, data->size);
return (0);
}
/*
* __bam_dup_check --
* Check to see if the duplicate set at indx should have its own page.
*/
static int
__bam_dup_check(dbc, op, h, indx, sz, cntp)
DBC *dbc;
u_int32_t op;
PAGE *h;
u_int32_t indx, sz;
db_indx_t *cntp;
{
BKEYDATA *bk;
DB *dbp;
db_indx_t cnt, first, *inp;
dbp = dbc->dbp;
inp = P_INP(dbp, h);
/*
* Count the duplicate records and calculate how much room they're
* using on the page.
*/
while (indx > 0 && inp[indx] == inp[indx - P_INDX])
indx -= P_INDX;
/* Count the key once. */
bk = GET_BKEYDATA(dbp, h, indx);
sz += B_TYPE(bk->type) == B_KEYDATA ?
BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
/* Sum up all the data items. */
first = indx;
/*
* Account for the record being inserted. If we are replacing it,
* don't count it twice.
*
* We execute the loop with first == indx to get the size of the
* first record.
*/
cnt = op == DB_CURRENT ? 0 : 1;
for (first = indx;
indx < NUM_ENT(h) && inp[first] == inp[indx];
++cnt, indx += P_INDX) {
bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
sz += B_TYPE(bk->type) == B_KEYDATA ?
BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
}
/*
* We have to do these checks when the user is replacing the cursor's
* data item -- if the application replaces a duplicate item with a
* larger data item, it can increase the amount of space used by the
* duplicates, requiring this check. But that means we may have done
* this check when it wasn't a duplicate item after all.
*/
if (cnt == 1)
return (0);
/*
* If this set of duplicates is using more than 25% of the page, move
* them off. The choice of 25% is a WAG, but the value must be small
* enough that we can always split a page without putting duplicates
* on two different pages.
*/
if (sz < dbp->pgsize / 4)
return (0);
*cntp = cnt;
return (1);
}
/*
* __bam_dup_convert --
* Move a set of duplicates off-page and into their own tree.
*/
static int
__bam_dup_convert(dbc, h, indx, cnt)
DBC *dbc;
PAGE *h;
u_int32_t indx, cnt;
{
BKEYDATA *bk;
DB *dbp;
DBT hdr;
DB_MPOOLFILE *mpf;
PAGE *dp;
db_indx_t cpindx, dindx, first, *inp;
int ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
inp = P_INP(dbp, h);
/* Move to the beginning of the dup set. */
while (indx > 0 && inp[indx] == inp[indx - P_INDX])
indx -= P_INDX;
/* Get a new page. */
if ((ret = __db_new(dbc,
dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
return (ret);
P_INIT(dp, dbp->pgsize, dp->pgno,
PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
/*
* Move this set of duplicates off the page. First points to the first
* key of the first duplicate key/data pair, cnt is the number of pairs
* we're dealing with.
*/
memset(&hdr, 0, sizeof(hdr));
first = indx;
dindx = indx;
cpindx = 0;
do {
/* Move cursors referencing the old entry to the new entry. */
if ((ret = __bam_ca_dup(dbc, first,
PGNO(h), indx, PGNO(dp), cpindx)) != 0)
goto err;
/*
* Copy the entry to the new page. If the off-duplicate page
* If the off-duplicate page is a Btree page (i.e. dup_compare
* will be non-NULL, we use Btree pages for sorted dups,
* and Recno pages for unsorted dups), move all entries
* normally, even deleted ones. If it's a Recno page,
* deleted entries are discarded (if the deleted entry is
* overflow, then free up those pages).
*/
bk = GET_BKEYDATA(dbp, h, dindx + 1);
hdr.data = bk;
hdr.size = B_TYPE(bk->type) == B_KEYDATA ?
BKEYDATA_SIZE(bk->len) : BOVERFLOW_SIZE;
if (dbp->dup_compare == NULL && B_DISSET(bk->type)) {
/*
* Unsorted dups, i.e. recno page, and we have
* a deleted entry, don't move it, but if it was
* an overflow entry, we need to free those pages.
*/
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_doff(dbc,
(GET_BOVERFLOW(dbp, h, dindx + 1))->pgno)) != 0)
goto err;
} else {
if ((ret = __db_pitem(
dbc, dp, cpindx, hdr.size, &hdr, NULL)) != 0)
goto err;
++cpindx;
}
/* Delete all but the last reference to the key. */
if (cnt != 1) {
if ((ret = __bam_adjindx(dbc,
h, dindx, first + 1, 0)) != 0)
goto err;
} else
dindx++;
/* Delete the data item. */
if ((ret = __db_ditem(dbc, h, dindx, hdr.size)) != 0)
goto err;
indx += P_INDX;
} while (--cnt);
/* Put in a new data item that points to the duplicates page. */
if ((ret = __bam_ovput(dbc,
B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0)
goto err;
/* Adjust cursors for all the above movements. */
if ((ret = __bam_ca_di(dbc,
PGNO(h), first + P_INDX, (int)(first + P_INDX - indx))) != 0)
goto err;
return (__memp_fput(mpf, dp, DB_MPOOL_DIRTY));
err: (void)__memp_fput(mpf, dp, 0);
return (ret);
}
/*
* __bam_ovput --
* Build an item for an off-page duplicates page or overflow page and
* insert it on the page.
*/
static int
__bam_ovput(dbc, type, pgno, h, indx, item)
DBC *dbc;
u_int32_t type, indx;
db_pgno_t pgno;
PAGE *h;
DBT *item;
{
BOVERFLOW bo;
DBT hdr;
int ret;
UMRW_SET(bo.unused1);
B_TSET(bo.type, type, 0);
UMRW_SET(bo.unused2);
/*
* If we're creating an overflow item, do so and acquire the page
* number for it. If we're creating an off-page duplicates tree,
* we are giving the page number as an argument.
*/
if (type == B_OVERFLOW) {
if ((ret = __db_poff(dbc, item, &bo.pgno)) != 0)
return (ret);
bo.tlen = item->size;
} else {
bo.pgno = pgno;
bo.tlen = 0;
}
/* Store the new record on the page. */
memset(&hdr, 0, sizeof(hdr));
hdr.data = &bo;
hdr.size = BOVERFLOW_SIZE;
return (__db_pitem(dbc, h, indx, BOVERFLOW_SIZE, &hdr, NULL));
}

File diff suppressed because it is too large Load diff

View file

@ -1,76 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1998-2005
* Sleepycat Software. All rights reserved.
*
* $Id: bt_reclaim.c,v 12.2 2005/06/16 20:20:19 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/btree.h"
/*
* __bam_reclaim --
* Free a database.
*
* PUBLIC: int __bam_reclaim __P((DB *, DB_TXN *));
*/
int
__bam_reclaim(dbp, txn)
DB *dbp;
DB_TXN *txn;
{
DBC *dbc;
int ret, t_ret;
/* Acquire a cursor. */
if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0)
return (ret);
/* Walk the tree, freeing pages. */
ret = __bam_traverse(dbc,
DB_LOCK_WRITE, dbc->internal->root, __db_reclaim_callback, dbc);
/* Discard the cursor. */
if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
/*
* __bam_truncate --
* Truncate a database.
*
* PUBLIC: int __bam_truncate __P((DBC *, u_int32_t *));
*/
int
__bam_truncate(dbc, countp)
DBC *dbc;
u_int32_t *countp;
{
db_trunc_param trunc;
int ret;
trunc.count = 0;
trunc.dbc = dbc;
/* Walk the tree, freeing pages. */
ret = __bam_traverse(dbc,
DB_LOCK_WRITE, dbc->internal->root, __db_truncate_callback, &trunc);
if (countp != NULL)
*countp = trunc.count;
return (ret);
}

File diff suppressed because it is too large Load diff

View file

@ -1,431 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: bt_rsearch.c,v 12.5 2005/08/08 03:37:05 ubell Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/btree.h"
#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
#include "dbinc/mp.h"
/*
* __bam_rsearch --
* Search a btree for a record number.
*
* PUBLIC: int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
*/
int
__bam_rsearch(dbc, recnop, flags, stop, exactp)
DBC *dbc;
db_recno_t *recnop;
u_int32_t flags;
int stop, *exactp;
{
BINTERNAL *bi;
BTREE_CURSOR *cp;
DB *dbp;
DB_LOCK lock;
DB_MPOOLFILE *mpf;
PAGE *h;
RINTERNAL *ri;
db_indx_t adjust, deloffset, indx, top;
db_lockmode_t lock_mode;
db_pgno_t pg;
db_recno_t recno, t_recno, total;
int ret, stack, t_ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
h = NULL;
BT_STK_CLR(cp);
/*
* There are several ways we search a btree tree. The flags argument
* specifies if we're acquiring read or write locks and if we are
* locking pairs of pages. In addition, if we're adding or deleting
* an item, we have to lock the entire tree, regardless. See btree.h
* for more details.
*
* If write-locking pages, we need to know whether or not to acquire a
* write lock on a page before getting it. This depends on how deep it
* is in tree, which we don't know until we acquire the root page. So,
* if we need to lock the root page we may have to upgrade it later,
* because we won't get the correct lock initially.
*
* Retrieve the root page.
*/
if ((ret = __bam_get_root(dbc, cp->root, stop, flags, &stack)) != 0)
return (ret);
lock_mode = cp->csp->lock_mode;
lock = cp->csp->lock;
h = cp->csp->page;
BT_STK_CLR(cp);
/*
* If appending to the tree, set the record number now -- we have the
* root page locked.
*
* Delete only deletes exact matches, read only returns exact matches.
* Note, this is different from __bam_search(), which returns non-exact
* matches for read.
*
* The record may not exist. We can only return the correct location
* for the record immediately after the last record in the tree, so do
* a fast check now.
*/
total = RE_NREC(h);
if (LF_ISSET(S_APPEND)) {
*exactp = 0;
*recnop = recno = total + 1;
} else {
recno = *recnop;
if (recno <= total)
*exactp = 1;
else {
*exactp = 0;
if (!LF_ISSET(S_PAST_EOF) || recno > total + 1) {
/*
* Keep the page locked for serializability.
*
* XXX
* This leaves the root page locked, which will
* eliminate any concurrency. A possible fix
* would be to lock the last leaf page instead.
*/
ret = __memp_fput(mpf, h, 0);
if ((t_ret =
__TLPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
return (ret == 0 ? DB_NOTFOUND : ret);
}
}
}
/*
* !!!
* Record numbers in the tree are 0-based, but the recno is
* 1-based. All of the calculations below have to take this
* into account.
*/
for (total = 0;;) {
switch (TYPE(h)) {
case P_LBTREE:
case P_LDUP:
recno -= total;
/*
* There may be logically deleted records on the page.
* If there are enough, the record may not exist.
*/
if (TYPE(h) == P_LBTREE) {
adjust = P_INDX;
deloffset = O_INDX;
} else {
adjust = O_INDX;
deloffset = 0;
}
for (t_recno = 0, indx = 0;; indx += adjust) {
if (indx >= NUM_ENT(h)) {
*exactp = 0;
if (!LF_ISSET(S_PAST_EOF) ||
recno > t_recno + 1) {
ret = __memp_fput(mpf, h, 0);
h = NULL;
if ((t_ret = __TLPUT(dbc,
lock)) != 0 && ret == 0)
ret = t_ret;
if (ret == 0)
ret = DB_NOTFOUND;
goto err;
}
}
if (!B_DISSET(GET_BKEYDATA(dbp, h,
indx + deloffset)->type) &&
++t_recno == recno)
break;
}
/* Correct from 1-based to 0-based for a page offset. */
BT_STK_ENTER(dbp->dbenv,
cp, h, indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
return (0);
case P_IBTREE:
for (indx = 0, top = NUM_ENT(h);;) {
bi = GET_BINTERNAL(dbp, h, indx);
if (++indx == top || total + bi->nrecs >= recno)
break;
total += bi->nrecs;
}
pg = bi->pgno;
break;
case P_LRECNO:
recno -= total;
/* Correct from 1-based to 0-based for a page offset. */
--recno;
BT_STK_ENTER(dbp->dbenv,
cp, h, recno, lock, lock_mode, ret);
if (ret != 0)
goto err;
return (0);
case P_IRECNO:
for (indx = 0, top = NUM_ENT(h);;) {
ri = GET_RINTERNAL(dbp, h, indx);
if (++indx == top || total + ri->nrecs >= recno)
break;
total += ri->nrecs;
}
pg = ri->pgno;
break;
default:
return (__db_pgfmt(dbp->dbenv, h->pgno));
}
--indx;
/* Return if this is the lowest page wanted. */
if (stop == LEVEL(h)) {
BT_STK_ENTER(dbp->dbenv,
cp, h, indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
return (0);
}
if (stack) {
BT_STK_PUSH(dbp->dbenv,
cp, h, indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
h = NULL;
lock_mode = DB_LOCK_WRITE;
if ((ret =
__db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
goto err;
} else {
/*
* Decide if we want to return a pointer to the next
* page in the stack. If we do, write lock it and
* never unlock it.
*/
if ((LF_ISSET(S_PARENT) &&
(u_int8_t)(stop + 1) >= (u_int8_t)(LEVEL(h) - 1)) ||
(LEVEL(h) - 1) == LEAFLEVEL)
stack = 1;
if ((ret = __memp_fput(mpf, h, 0)) != 0)
goto err;
h = NULL;
lock_mode = stack &&
LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
if ((ret = __db_lget(dbc,
LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
/*
* If we fail, discard the lock we held. This
* is OK because this only happens when we are
* descending the tree holding read-locks.
*/
(void)__LPUT(dbc, lock);
goto err;
}
}
if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0)
goto err;
}
/* NOTREACHED */
err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
BT_STK_POP(cp);
__bam_stkrel(dbc, 0);
return (ret);
}
/*
* __bam_adjust --
* Adjust the tree after adding or deleting a record.
*
* PUBLIC: int __bam_adjust __P((DBC *, int32_t));
*/
int
__bam_adjust(dbc, adjust)
DBC *dbc;
int32_t adjust;
{
BTREE_CURSOR *cp;
DB *dbp;
DB_MPOOLFILE *mpf;
EPG *epg;
PAGE *h;
db_pgno_t root_pgno;
int ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
root_pgno = cp->root;
/* Update the record counts for the tree. */
for (epg = cp->sp; epg <= cp->csp; ++epg) {
h = epg->page;
if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO) {
if (DBC_LOGGING(dbc)) {
if ((ret = __bam_cadjust_log(dbp, dbc->txn,
&LSN(h), 0, PGNO(h), &LSN(h),
(u_int32_t)epg->indx, adjust,
PGNO(h) == root_pgno ?
CAD_UPDATEROOT : 0)) != 0)
return (ret);
} else
LSN_NOT_LOGGED(LSN(h));
if (TYPE(h) == P_IBTREE)
GET_BINTERNAL(dbp, h, epg->indx)->nrecs +=
adjust;
else
GET_RINTERNAL(dbp, h, epg->indx)->nrecs +=
adjust;
if (PGNO(h) == root_pgno)
RE_NREC_ADJ(h, adjust);
if ((ret = __memp_fset(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
}
}
return (0);
}
/*
* __bam_nrecs --
* Return the number of records in the tree.
*
* PUBLIC: int __bam_nrecs __P((DBC *, db_recno_t *));
*/
int
__bam_nrecs(dbc, rep)
DBC *dbc;
db_recno_t *rep;
{
DB *dbp;
DB_LOCK lock;
DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
int ret, t_ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
pgno = dbc->internal->root;
if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
return (ret);
if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0)
return (ret);
*rep = RE_NREC(h);
ret = __memp_fput(mpf, h, 0);
if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
/*
* __bam_total --
* Return the number of records below a page.
*
* PUBLIC: db_recno_t __bam_total __P((DB *, PAGE *));
*/
db_recno_t
__bam_total(dbp, h)
DB *dbp;
PAGE *h;
{
db_recno_t nrecs;
db_indx_t indx, top;
nrecs = 0;
top = NUM_ENT(h);
switch (TYPE(h)) {
case P_LBTREE:
/* Check for logically deleted records. */
for (indx = 0; indx < top; indx += P_INDX)
if (!B_DISSET(
GET_BKEYDATA(dbp, h, indx + O_INDX)->type))
++nrecs;
break;
case P_LDUP:
/* Check for logically deleted records. */
for (indx = 0; indx < top; indx += O_INDX)
if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
++nrecs;
break;
case P_IBTREE:
for (indx = 0; indx < top; indx += O_INDX)
nrecs += GET_BINTERNAL(dbp, h, indx)->nrecs;
break;
case P_LRECNO:
nrecs = NUM_ENT(h);
break;
case P_IRECNO:
for (indx = 0; indx < top; indx += O_INDX)
nrecs += GET_RINTERNAL(dbp, h, indx)->nrecs;
break;
}
return (nrecs);
}

View file

@ -1,706 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Mike Olson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: bt_search.c,v 12.17 2005/11/10 21:17:13 ubell Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/db_shash.h"
#include "dbinc/btree.h"
#include "dbinc/lock.h"
#include "dbinc/mp.h"
/*
* __bam_get_root --
* Fetch the root of a tree and see if we want to keep
* it in the stack.
*
* PUBLIC: int __bam_get_root __P((DBC *, db_pgno_t, int, u_int32_t, int *));
*/
int
__bam_get_root(dbc, pg, slevel, flags, stack)
DBC *dbc;
db_pgno_t pg;
int slevel;
u_int32_t flags;
int *stack;
{
BTREE_CURSOR *cp;
DB *dbp;
DB_LOCK lock;
DB_MPOOLFILE *mpf;
PAGE *h;
db_lockmode_t lock_mode;
int ret, t_ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
* If write-locking pages, we need to know whether or not to acquire a
* write lock on a page before getting it. This depends on how deep it
* is in tree, which we don't know until we acquire the root page. So,
* if we need to lock the root page we may have to upgrade it later,
* because we won't get the correct lock initially.
*
* Retrieve the root page.
*/
try_again:
*stack = LF_ISSET(S_STACK) &&
(dbc->dbtype == DB_RECNO || F_ISSET(cp, C_RECNUM));
lock_mode = DB_LOCK_READ;
if (*stack ||
LF_ISSET(S_DEL) || (LF_ISSET(S_NEXT) && LF_ISSET(S_WRITE)))
lock_mode = DB_LOCK_WRITE;
if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
return (ret);
if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) {
/* Did not read it, so we can release the lock */
(void)__LPUT(dbc, lock);
return (ret);
}
/*
* Decide if we need to save this page; if we do, write lock it.
* We deliberately don't lock-couple on this call. If the tree
* is tiny, i.e., one page, and two threads are busily updating
* the root page, we're almost guaranteed deadlocks galore, as
* each one gets a read lock and then blocks the other's attempt
* for a write lock.
*/
if (!*stack &&
((LF_ISSET(S_PARENT) && (u_int8_t)(slevel + 1) >= LEVEL(h)) ||
(LF_ISSET(S_WRITE) && LEVEL(h) == LEAFLEVEL) ||
(LF_ISSET(S_START) && slevel == LEVEL(h)))) {
if (!STD_LOCKING(dbc))
goto no_relock;
ret = __memp_fput(mpf, h, 0);
if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
return (ret);
lock_mode = DB_LOCK_WRITE;
if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
return (ret);
if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) {
/* Did not read it, so we can release the lock */
(void)__LPUT(dbc, lock);
return (ret);
}
if (!((LF_ISSET(S_PARENT) &&
(u_int8_t)(slevel + 1) >= LEVEL(h)) ||
(LF_ISSET(S_WRITE) && LEVEL(h) == LEAFLEVEL) ||
(LF_ISSET(S_START) && slevel == LEVEL(h)))) {
/* Someone else split the root, start over. */
ret = __memp_fput(mpf, h, 0);
if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
return (ret);
goto try_again;
}
no_relock: *stack = 1;
}
BT_STK_ENTER(dbp->dbenv, cp, h, 0, lock, lock_mode, ret);
return (ret);
}
/*
* __bam_search --
* Search a btree for a key.
*
* PUBLIC: int __bam_search __P((DBC *, db_pgno_t,
* PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *));
*/
int
__bam_search(dbc, root_pgno, key, flags, slevel, recnop, exactp)
DBC *dbc;
db_pgno_t root_pgno;
const DBT *key;
u_int32_t flags;
int slevel, *exactp;
db_recno_t *recnop;
{
BTREE *t;
BTREE_CURSOR *cp;
DB *dbp;
DB_LOCK lock;
DB_MPOOLFILE *mpf;
PAGE *h;
db_indx_t base, i, indx, *inp, lim;
db_lockmode_t lock_mode;
db_pgno_t pg;
db_recno_t recno;
int adjust, cmp, deloffset, ret, stack, t_ret;
int (*func) __P((DB *, const DBT *, const DBT *));
dbp = dbc->dbp;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
h = NULL;
t = dbp->bt_internal;
recno = 0;
BT_STK_CLR(cp);
/*
* There are several ways we search a btree tree. The flags argument
* specifies if we're acquiring read or write locks, if we position
* to the first or last item in a set of duplicates, if we return
* deleted items, and if we are locking pairs of pages. In addition,
* if we're modifying record numbers, we have to lock the entire tree
* regardless. See btree.h for more details.
*/
if (root_pgno == PGNO_INVALID)
root_pgno = cp->root;
if ((ret = __bam_get_root(dbc, root_pgno, slevel, flags, &stack)) != 0)
return (ret);
lock_mode = cp->csp->lock_mode;
lock = cp->csp->lock;
h = cp->csp->page;
BT_STK_CLR(cp);
/* Choose a comparison function. */
func = F_ISSET(dbc, DBC_OPD) ?
(dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare) :
t->bt_compare;
for (;;) {
inp = P_INP(dbp, h);
adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX;
if (LF_ISSET(S_MIN | S_MAX)) {
if (LF_ISSET(S_MIN) || NUM_ENT(h) == 0)
indx = 0;
else if (TYPE(h) == P_LBTREE)
indx = NUM_ENT(h) - 2;
else
indx = NUM_ENT(h) - 1;
if (LEVEL(h) == LEAFLEVEL ||
(!LF_ISSET(S_START) && LEVEL(h) == slevel)) {
if (LF_ISSET(S_NEXT))
goto get_next;
goto found;
}
goto next;
}
/*
* Do a binary search on the current page. If we're searching
* a Btree leaf page, we have to walk the indices in groups of
* two. If we're searching an internal page or a off-page dup
* page, they're an index per page item. If we find an exact
* match on a leaf page, we're done.
*/
for (base = 0,
lim = NUM_ENT(h) / (db_indx_t)adjust; lim != 0; lim >>= 1) {
indx = base + ((lim >> 1) * adjust);
if ((ret =
__bam_cmp(dbp, key, h, indx, func, &cmp)) != 0)
goto err;
if (cmp == 0) {
if (LEVEL(h) == LEAFLEVEL ||
(!LF_ISSET(S_START) &&
LEVEL(h) == slevel)) {
if (LF_ISSET(S_NEXT))
goto get_next;
goto found;
}
goto next;
}
if (cmp > 0) {
base = indx + adjust;
--lim;
}
}
/*
* No match found. Base is the smallest index greater than
* key and may be zero or a last + O_INDX index.
*
* If it's a leaf page or the stopping point,
* return base as the "found" value.
* Delete only deletes exact matches.
*/
if (LEVEL(h) == LEAFLEVEL ||
(!LF_ISSET(S_START) && LEVEL(h) == slevel)) {
*exactp = 0;
if (LF_ISSET(S_EXACT)) {
ret = DB_NOTFOUND;
goto err;
}
if (LF_ISSET(S_STK_ONLY)) {
BT_STK_NUM(dbp->dbenv, cp, h, base, ret);
if ((t_ret =
__LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret =
__memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
if (LF_ISSET(S_NEXT)) {
get_next: /*
* The caller could have asked for a NEXT
* at the root if the tree recently collapsed.
*/
if (PGNO(h) == root_pgno) {
ret = DB_NOTFOUND;
goto err;
}
/*
* Save the root of the subtree
* and drop the rest of the subtree
* and search down again starting at
* the next child.
*/
if ((ret = __LPUT(dbc, lock)) != 0)
goto err;
if ((ret = __memp_fput(mpf, h, 0)) != 0)
goto err;
h = NULL;
LF_SET(S_MIN);
LF_CLR(S_NEXT);
indx = cp->sp->indx + 1;
if (indx == NUM_ENT(cp->sp->page)) {
ret = DB_NOTFOUND;
cp->csp++;
goto err;
}
h = cp->sp->page;
cp->sp->page = NULL;
lock = cp->sp->lock;
LOCK_INIT(cp->sp->lock);
if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0)
goto err;
stack = 1;
goto next;
}
/*
* !!!
* Possibly returning a deleted record -- DB_SET_RANGE,
* DB_KEYFIRST and DB_KEYLAST don't require an exact
* match, and we don't want to walk multiple pages here
* to find an undeleted record. This is handled by the
* calling routine.
*/
if (LF_ISSET(S_DEL) && cp->csp == cp->sp)
cp->csp++;
BT_STK_ENTER(dbp->dbenv,
cp, h, base, lock, lock_mode, ret);
if (ret != 0)
goto err;
return (0);
}
/*
* If it's not a leaf page, record the internal page (which is
* a parent page for the key). Decrement the base by 1 if it's
* non-zero so that if a split later occurs, the inserted page
* will be to the right of the saved page.
*/
indx = base > 0 ? base - O_INDX : base;
/*
* If we're trying to calculate the record number, sum up
* all the record numbers on this page up to the indx point.
*/
next: if (recnop != NULL)
for (i = 0; i < indx; ++i)
recno += GET_BINTERNAL(dbp, h, i)->nrecs;
pg = GET_BINTERNAL(dbp, h, indx)->pgno;
/* See if we are at the level to start stacking. */
if (LF_ISSET(S_START) && slevel == LEVEL(h))
stack = 1;
if (LF_ISSET(S_STK_ONLY)) {
if (slevel == LEVEL(h)) {
BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
if ((t_ret =
__LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret =
__memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret);
(void)__memp_fput(mpf, h, 0);
h = NULL;
if ((ret = __db_lget(dbc,
LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
/*
* Discard our lock and return on failure. This
* is OK because it only happens when descending
* the tree holding read-locks.
*/
(void)__LPUT(dbc, lock);
return (ret);
}
} else if (stack) {
/* Return if this is the lowest page wanted. */
if (LF_ISSET(S_PARENT) && slevel == LEVEL(h)) {
BT_STK_ENTER(dbp->dbenv,
cp, h, indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
return (0);
}
if (LF_ISSET(S_DEL) && NUM_ENT(h) > 1) {
/*
* There was a page with a singleton pointer
* to a non-empty subtree.
*/
cp->csp--;
if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0)
goto err;
stack = 0;
goto do_del;
}
BT_STK_PUSH(dbp->dbenv,
cp, h, indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
h = NULL;
lock_mode = DB_LOCK_WRITE;
if ((ret =
__db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
goto err;
} else {
/*
* Decide if we want to return a reference to the next
* page in the return stack. If so, lock it and never
* unlock it.
*/
if ((LF_ISSET(S_PARENT) &&
(u_int8_t)(slevel + 1) >= (LEVEL(h) - 1)) ||
(LEVEL(h) - 1) == LEAFLEVEL)
stack = 1;
/*
* Returning a subtree. See if we have hit the start
* point if so save the parent and set stack.
* Otherwise free the parent and temporarily
* save this one.
* For S_DEL we need to find a page with 1 entry.
* For S_NEXT we want find the minimal subtree
* that contains the key and the next page.
* We save pages as long as we are at the right
* edge of the subtree. When we leave the right
* edge, then drop the subtree.
*/
if (!LF_ISSET(S_DEL | S_NEXT)) {
if ((ret = __memp_fput(mpf, h, 0)) != 0)
goto err;
goto lock_next;
}
if ((LF_ISSET(S_DEL) && NUM_ENT(h) == 1)) {
stack = 1;
LF_SET(S_WRITE);
/* Push the parent. */
cp->csp++;
/* Push this node. */
BT_STK_PUSH(dbp->dbenv, cp, h,
indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
LOCK_INIT(lock);
} else {
/*
* See if we want to save the tree so far.
* If we are looking for the next key,
* then we must save this node if we are
* at the end of the page. If not then
* discard anything we have saved so far.
* For delete only keep one node until
* we find a singleton.
*/
do_del: if (cp->csp->page != NULL) {
if (LF_ISSET(S_NEXT) &&
indx == NUM_ENT(h) - 1)
cp->csp++;
else if ((ret =
__bam_stkrel(dbc, STK_NOLOCK)) != 0)
goto err;
}
/* Save this node. */
BT_STK_ENTER(dbp->dbenv, cp,
h, indx, lock, lock_mode, ret);
if (ret != 0)
goto err;
LOCK_INIT(lock);
}
lock_next: h = NULL;
if (stack && LF_ISSET(S_WRITE))
lock_mode = DB_LOCK_WRITE;
if ((ret = __db_lget(dbc,
LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
/*
* If we fail, discard the lock we held. This
* is OK because this only happens when we are
* descending the tree holding read-locks.
*/
(void)__LPUT(dbc, lock);
if (LF_ISSET(S_DEL | S_NEXT))
cp->csp++;
goto err;
}
}
if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0)
goto err;
}
/* NOTREACHED */
found: *exactp = 1;
/*
* If we got here, we know that we have a Btree leaf or off-page
* duplicates page. If it's a Btree leaf page, we have to handle
* on-page duplicates.
*
* If there are duplicates, go to the first/last one. This is
* safe because we know that we're not going to leave the page,
* all duplicate sets that are not on overflow pages exist on a
* single leaf page.
*/
if (TYPE(h) == P_LBTREE && NUM_ENT(h) > P_INDX) {
if (LF_ISSET(S_DUPLAST))
while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
inp[indx] == inp[indx + P_INDX])
indx += P_INDX;
else if (LF_ISSET(S_DUPFIRST))
while (indx > 0 &&
inp[indx] == inp[indx - P_INDX])
indx -= P_INDX;
}
/*
* Now check if we are allowed to return deleted items; if not, then
* find the next (or previous) non-deleted duplicate entry. (We do
* not move from the original found key on the basis of the S_DELNO
* flag.)
*/
DB_ASSERT(recnop == NULL || LF_ISSET(S_DELNO));
if (LF_ISSET(S_DELNO)) {
deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0;
if (LF_ISSET(S_DUPLAST))
while (B_DISSET(GET_BKEYDATA(dbp,
h, indx + deloffset)->type) && indx > 0 &&
inp[indx] == inp[indx - adjust])
indx -= adjust;
else
while (B_DISSET(GET_BKEYDATA(dbp,
h, indx + deloffset)->type) &&
indx < (db_indx_t)(NUM_ENT(h) - adjust) &&
inp[indx] == inp[indx + adjust])
indx += adjust;
/*
* If we weren't able to find a non-deleted duplicate, return
* DB_NOTFOUND.
*/
if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type)) {
ret = DB_NOTFOUND;
goto err;
}
/*
* Increment the record counter to point to the found element.
* Ignore any deleted key/data pairs. There doesn't need to
* be any correction for duplicates, as Btree doesn't support
* duplicates and record numbers in the same tree.
*/
if (recnop != NULL) {
DB_ASSERT(TYPE(h) == P_LBTREE);
for (i = 0; i < indx; i += P_INDX)
if (!B_DISSET(
GET_BKEYDATA(dbp, h, i + O_INDX)->type))
++recno;
/* Correct the number for a 0-base. */
*recnop = recno + 1;
}
}
if (LF_ISSET(S_STK_ONLY)) {
BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
} else {
if (LF_ISSET(S_DEL) && cp->csp == cp->sp)
cp->csp++;
BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret);
}
if (ret != 0)
goto err;
return (0);
err: if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
/* Keep any not-found page locked for serializability. */
if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
BT_STK_POP(cp);
__bam_stkrel(dbc, 0);
return (ret);
}
/*
* __bam_stkrel --
* Release all pages currently held in the stack.
*
* PUBLIC: int __bam_stkrel __P((DBC *, u_int32_t));
*/
int
__bam_stkrel(dbc, flags)
DBC *dbc;
u_int32_t flags;
{
BTREE_CURSOR *cp;
DB *dbp;
DB_MPOOLFILE *mpf;
EPG *epg;
int ret, t_ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
* Release inner pages first.
*
* The caller must be sure that setting STK_NOLOCK will not effect
* either serializability or recoverability.
*/
for (ret = 0, epg = cp->sp; epg <= cp->csp; ++epg) {
if (epg->page != NULL) {
if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) {
cp->page = NULL;
LOCK_INIT(cp->lock);
}
if ((t_ret =
__memp_fput(mpf, epg->page, 0)) != 0 && ret == 0)
ret = t_ret;
/*
* XXX
* Temporary fix for #3243 -- under certain deadlock
* conditions we call here again and re-free the page.
* The correct fix is to never release a stack that
* doesn't hold items.
*/
epg->page = NULL;
}
/*
* We set this if we need to release our pins,
* but are not logically ready to have the pages
* visible.
*/
if (LF_ISSET(STK_PGONLY))
continue;
if (LF_ISSET(STK_NOLOCK)) {
if ((t_ret = __LPUT(dbc, epg->lock)) != 0 && ret == 0)
ret = t_ret;
} else
if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0)
ret = t_ret;
}
/* Clear the stack, all pages have been released. */
if (!LF_ISSET(STK_PGONLY))
BT_STK_CLR(cp);
return (ret);
}
/*
* __bam_stkgrow --
* Grow the stack.
*
* PUBLIC: int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
*/
int
__bam_stkgrow(dbenv, cp)
DB_ENV *dbenv;
BTREE_CURSOR *cp;
{
EPG *p;
size_t entries;
int ret;
entries = cp->esp - cp->sp;
if ((ret = __os_calloc(dbenv, entries * 2, sizeof(EPG), &p)) != 0)
return (ret);
memcpy(p, cp->sp, entries * sizeof(EPG));
if (cp->sp != cp->stack)
__os_free(dbenv, cp->sp);
cp->sp = p;
cp->csp = p + entries;
cp->esp = p + entries * 2;
return (0);
}

File diff suppressed because it is too large Load diff

View file

@ -1,638 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*
* $Id: bt_stat.c,v 12.3 2005/06/16 20:20:23 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <ctype.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/db_shash.h"
#include "dbinc/btree.h"
#include "dbinc/lock.h"
#include "dbinc/mp.h"
#ifdef HAVE_STATISTICS
/*
* __bam_stat --
* Gather/print the btree statistics
*
* PUBLIC: int __bam_stat __P((DBC *, void *, u_int32_t));
*/
int
__bam_stat(dbc, spp, flags)
DBC *dbc;
void *spp;
u_int32_t flags;
{
BTMETA *meta;
BTREE *t;
BTREE_CURSOR *cp;
DB *dbp;
DB_BTREE_STAT *sp;
DB_ENV *dbenv;
DB_LOCK lock, metalock;
DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
int ret, t_ret, write_meta;
dbp = dbc->dbp;
dbenv = dbp->dbenv;
meta = NULL;
t = dbp->bt_internal;
sp = NULL;
LOCK_INIT(metalock);
LOCK_INIT(lock);
mpf = dbp->mpf;
h = NULL;
ret = write_meta = 0;
cp = (BTREE_CURSOR *)dbc->internal;
/* Allocate and clear the structure. */
if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0)
goto err;
memset(sp, 0, sizeof(*sp));
/* Get the metadata page for the entire database. */
pgno = PGNO_BASE_MD;
if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0)
goto err;
if (flags == DB_RECORDCOUNT || flags == DB_CACHED_COUNTS)
flags = DB_FAST_STAT;
if (flags == DB_FAST_STAT)
goto meta_only;
/* Walk the metadata free list, counting pages. */
for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) {
++sp->bt_free;
if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0)
goto err;
pgno = h->next_pgno;
if ((ret = __memp_fput(mpf, h, 0)) != 0)
goto err;
h = NULL;
}
/* Get the root page. */
pgno = cp->root;
if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0)
goto err;
/* Get the levels from the root page. */
sp->bt_levels = h->level;
/* Discard the root page. */
ret = __memp_fput(mpf, h, 0);
h = NULL;
if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
goto err;
/* Walk the tree. */
if ((ret = __bam_traverse(dbc,
DB_LOCK_READ, cp->root, __bam_stat_callback, sp)) != 0)
goto err;
/*
* Get the subdatabase metadata page if it's not the same as the
* one we already have.
*/
write_meta = !F_ISSET(dbp, DB_AM_RDONLY);
meta_only:
if (t->bt_meta != PGNO_BASE_MD || write_meta != 0) {
ret = __memp_fput(mpf, meta, 0);
meta = NULL;
if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
goto err;
if ((ret = __db_lget(dbc,
0, t->bt_meta, write_meta == 0 ?
DB_LOCK_READ : DB_LOCK_WRITE, 0, &metalock)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &t->bt_meta, 0, &meta)) != 0)
goto err;
}
if (flags == DB_FAST_STAT) {
if (dbp->type == DB_RECNO ||
(dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))) {
if ((ret = __db_lget(dbc, 0,
cp->root, DB_LOCK_READ, 0, &lock)) != 0)
goto err;
if ((ret = __memp_fget(mpf, &cp->root, 0, &h)) != 0)
goto err;
sp->bt_nkeys = RE_NREC(h);
} else
sp->bt_nkeys = meta->dbmeta.key_count;
sp->bt_ndata = dbp->type == DB_RECNO ?
sp->bt_nkeys : meta->dbmeta.record_count;
}
/* Get metadata page statistics. */
sp->bt_metaflags = meta->dbmeta.flags;
sp->bt_minkey = meta->minkey;
sp->bt_re_len = meta->re_len;
sp->bt_re_pad = meta->re_pad;
sp->bt_pagesize = meta->dbmeta.pagesize;
sp->bt_magic = meta->dbmeta.magic;
sp->bt_version = meta->dbmeta.version;
if (write_meta != 0) {
meta->dbmeta.key_count = sp->bt_nkeys;
meta->dbmeta.record_count = sp->bt_ndata;
}
*(DB_BTREE_STAT **)spp = sp;
err: /* Discard the second page. */
if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
/* Discard the metadata page. */
if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
ret = t_ret;
if (meta != NULL && (t_ret = __memp_fput(
mpf, meta, write_meta == 0 ? 0 : DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0 && sp != NULL) {
__os_ufree(dbenv, sp);
*(DB_BTREE_STAT **)spp = NULL;
}
return (ret);
}
/*
* __bam_stat_print --
* Display btree/recno statistics.
*
* PUBLIC: int __bam_stat_print __P((DBC *, u_int32_t));
*/
int
__bam_stat_print(dbc, flags)
DBC *dbc;
u_int32_t flags;
{
static const FN fn[] = {
{ BTM_DUP, "duplicates" },
{ BTM_RECNO, "recno" },
{ BTM_RECNUM, "record-numbers" },
{ BTM_FIXEDLEN, "fixed-length" },
{ BTM_RENUMBER, "renumber" },
{ BTM_SUBDB, "multiple-databases" },
{ BTM_DUPSORT, "sorted duplicates" },
{ 0, NULL }
};
DB *dbp;
DB_BTREE_STAT *sp;
DB_ENV *dbenv;
int lorder, ret;
const char *s;
dbp = dbc->dbp;
dbenv = dbp->dbenv;
if ((ret = __bam_stat(dbc, &sp, 0)) != 0)
return (ret);
if (LF_ISSET(DB_STAT_ALL)) {
__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
__db_msg(dbenv, "Default Btree/Recno database information:");
}
__db_msg(dbenv, "%lx\tBtree magic number", (u_long)sp->bt_magic);
__db_msg(dbenv, "%lu\tBtree version number", (u_long)sp->bt_version);
(void)__db_get_lorder(dbp, &lorder);
switch (lorder) {
case 1234:
s = "Little-endian";
break;
case 4321:
s = "Big-endian";
break;
default:
s = "Unrecognized byte order";
break;
}
__db_msg(dbenv, "%s\tByte order", s);
__db_prflags(dbenv, NULL, sp->bt_metaflags, fn, NULL, "\tFlags");
if (dbp->type == DB_BTREE)
__db_dl(dbenv, "Minimum keys per-page", (u_long)sp->bt_minkey);
if (dbp->type == DB_RECNO) {
__db_dl(dbenv,
"Fixed-length record size", (u_long)sp->bt_re_len);
__db_msg(dbenv,
"%#x\tFixed-length record pad", (u_int)sp->bt_re_pad);
}
__db_dl(dbenv,
"Underlying database page size", (u_long)sp->bt_pagesize);
__db_dl(dbenv, "Number of levels in the tree", (u_long)sp->bt_levels);
__db_dl(dbenv, dbp->type == DB_BTREE ?
"Number of unique keys in the tree" :
"Number of records in the tree", (u_long)sp->bt_nkeys);
__db_dl(dbenv,
"Number of data items in the tree", (u_long)sp->bt_ndata);
__db_dl(dbenv,
"Number of tree internal pages", (u_long)sp->bt_int_pg);
__db_dl_pct(dbenv,
"Number of bytes free in tree internal pages",
(u_long)sp->bt_int_pgfree,
DB_PCT_PG(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize), "ff");
__db_dl(dbenv,
"Number of tree leaf pages", (u_long)sp->bt_leaf_pg);
__db_dl_pct(dbenv, "Number of bytes free in tree leaf pages",
(u_long)sp->bt_leaf_pgfree, DB_PCT_PG(
sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize), "ff");
__db_dl(dbenv,
"Number of tree duplicate pages", (u_long)sp->bt_dup_pg);
__db_dl_pct(dbenv,
"Number of bytes free in tree duplicate pages",
(u_long)sp->bt_dup_pgfree,
DB_PCT_PG(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize), "ff");
__db_dl(dbenv,
"Number of tree overflow pages", (u_long)sp->bt_over_pg);
__db_dl_pct(dbenv, "Number of bytes free in tree overflow pages",
(u_long)sp->bt_over_pgfree, DB_PCT_PG(
sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize), "ff");
__db_dl(dbenv, "Number of empty pages", (u_long)sp->bt_empty_pg);
__db_dl(dbenv, "Number of pages on the free list", (u_long)sp->bt_free);
__os_ufree(dbenv, sp);
return (0);
}
/*
* __bam_stat_callback --
* Statistics callback.
*
* PUBLIC: int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
*/
int
__bam_stat_callback(dbp, h, cookie, putp)
DB *dbp;
PAGE *h;
void *cookie;
int *putp;
{
DB_BTREE_STAT *sp;
db_indx_t indx, *inp, top;
u_int8_t type;
sp = cookie;
*putp = 0;
top = NUM_ENT(h);
inp = P_INP(dbp, h);
switch (TYPE(h)) {
case P_IBTREE:
case P_IRECNO:
++sp->bt_int_pg;
sp->bt_int_pgfree += P_FREESPACE(dbp, h);
break;
case P_LBTREE:
if (top == 0)
++sp->bt_empty_pg;
/* Correct for on-page duplicates and deleted items. */
for (indx = 0; indx < top; indx += P_INDX) {
type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type;
/* Ignore deleted items. */
if (B_DISSET(type))
continue;
/* Ignore duplicate keys. */
if (indx + P_INDX >= top ||
inp[indx] != inp[indx + P_INDX])
++sp->bt_nkeys;
/* Ignore off-page duplicates. */
if (B_TYPE(type) != B_DUPLICATE)
++sp->bt_ndata;
}
++sp->bt_leaf_pg;
sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
break;
case P_LRECNO:
if (top == 0)
++sp->bt_empty_pg;
/*
* If walking a recno tree, then each of these items is a key.
* Otherwise, we're walking an off-page duplicate set.
*/
if (dbp->type == DB_RECNO) {
/*
* Correct for deleted items in non-renumbering Recno
* databases.
*/
if (F_ISSET(dbp, DB_AM_RENUMBER)) {
sp->bt_nkeys += top;
sp->bt_ndata += top;
} else
for (indx = 0; indx < top; indx += O_INDX) {
type = GET_BKEYDATA(dbp, h, indx)->type;
if (!B_DISSET(type)) {
++sp->bt_ndata;
++sp->bt_nkeys;
}
}
++sp->bt_leaf_pg;
sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
} else {
sp->bt_ndata += top;
++sp->bt_dup_pg;
sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
}
break;
case P_LDUP:
if (top == 0)
++sp->bt_empty_pg;
/* Correct for deleted items. */
for (indx = 0; indx < top; indx += O_INDX)
if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
++sp->bt_ndata;
++sp->bt_dup_pg;
sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
break;
case P_OVERFLOW:
++sp->bt_over_pg;
sp->bt_over_pgfree += P_OVFLSPACE(dbp, dbp->pgsize, h);
break;
default:
return (__db_pgfmt(dbp->dbenv, h->pgno));
}
return (0);
}
/*
* __bam_print_cursor --
* Display the current internal cursor.
*
* PUBLIC: void __bam_print_cursor __P((DBC *));
*/
void
__bam_print_cursor(dbc)
DBC *dbc;
{
static const FN fn[] = {
{ C_DELETED, "C_DELETED" },
{ C_RECNUM, "C_RECNUM" },
{ C_RENUMBER, "C_RENUMBER" },
{ 0, NULL }
};
DB_ENV *dbenv;
BTREE_CURSOR *cp;
dbenv = dbc->dbp->dbenv;
cp = (BTREE_CURSOR *)dbc->internal;
STAT_ULONG("Overflow size", cp->ovflsize);
if (dbc->dbtype == DB_RECNO)
STAT_ULONG("Recno", cp->recno);
STAT_ULONG("Order", cp->order);
__db_prflags(dbenv, NULL, cp->flags, fn, NULL, "\tInternal Flags");
}
#else /* !HAVE_STATISTICS */
int
__bam_stat(dbc, spp, flags)
DBC *dbc;
void *spp;
u_int32_t flags;
{
COMPQUIET(spp, NULL);
COMPQUIET(flags, 0);
return (__db_stat_not_built(dbc->dbp->dbenv));
}
int
__bam_stat_print(dbc, flags)
DBC *dbc;
u_int32_t flags;
{
COMPQUIET(flags, 0);
return (__db_stat_not_built(dbc->dbp->dbenv));
}
#endif
/*
* __bam_key_range --
* Return proportion of keys relative to given key. The numbers are
* slightly skewed due to on page duplicates.
*
* PUBLIC: int __bam_key_range __P((DBC *, DBT *, DB_KEY_RANGE *, u_int32_t));
*/
int
__bam_key_range(dbc, dbt, kp, flags)
DBC *dbc;
DBT *dbt;
DB_KEY_RANGE *kp;
u_int32_t flags;
{
BTREE_CURSOR *cp;
EPG *sp;
double factor;
int exact, ret;
COMPQUIET(flags, 0);
if ((ret = __bam_search(dbc, PGNO_INVALID,
dbt, S_STK_ONLY, 1, NULL, &exact)) != 0)
return (ret);
cp = (BTREE_CURSOR *)dbc->internal;
kp->less = kp->greater = 0.0;
factor = 1.0;
/* Correct the leaf page. */
cp->csp->entries /= 2;
cp->csp->indx /= 2;
for (sp = cp->sp; sp <= cp->csp; ++sp) {
/*
* At each level we know that pages greater than indx contain
* keys greater than what we are looking for and those less
* than indx are less than. The one pointed to by indx may
* have some less, some greater or even equal. If indx is
* equal to the number of entries, then the key is out of range
* and everything is less.
*/
if (sp->indx == 0)
kp->greater += factor * (sp->entries - 1)/sp->entries;
else if (sp->indx == sp->entries)
kp->less += factor;
else {
kp->less += factor * sp->indx / sp->entries;
kp->greater += factor *
((sp->entries - sp->indx) - 1) / sp->entries;
}
factor *= 1.0/sp->entries;
}
/*
* If there was an exact match then assign 1 n'th to the key itself.
* Otherwise that factor belongs to those greater than the key, unless
* the key was out of range.
*/
if (exact)
kp->equal = factor;
else {
if (kp->less != 1)
kp->greater += factor;
kp->equal = 0;
}
BT_STK_CLR(cp);
return (0);
}
/*
* __bam_traverse --
* Walk a Btree database.
*
* PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t,
* PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
*/
int
__bam_traverse(dbc, mode, root_pgno, callback, cookie)
DBC *dbc;
db_lockmode_t mode;
db_pgno_t root_pgno;
int (*callback)__P((DB *, PAGE *, void *, int *));
void *cookie;
{
BINTERNAL *bi;
BKEYDATA *bk;
DB *dbp;
DB_LOCK lock;
DB_MPOOLFILE *mpf;
PAGE *h;
RINTERNAL *ri;
db_indx_t indx, *inp;
int already_put, ret, t_ret;
dbp = dbc->dbp;
mpf = dbp->mpf;
already_put = 0;
if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0)
return (ret);
if ((ret = __memp_fget(mpf, &root_pgno, 0, &h)) != 0) {
(void)__TLPUT(dbc, lock);
return (ret);
}
switch (TYPE(h)) {
case P_IBTREE:
for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
bi = GET_BINTERNAL(dbp, h, indx);
if (B_TYPE(bi->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
((BOVERFLOW *)bi->data)->pgno,
callback, cookie)) != 0)
goto err;
if ((ret = __bam_traverse(
dbc, mode, bi->pgno, callback, cookie)) != 0)
goto err;
}
break;
case P_IRECNO:
for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
ri = GET_RINTERNAL(dbp, h, indx);
if ((ret = __bam_traverse(
dbc, mode, ri->pgno, callback, cookie)) != 0)
goto err;
}
break;
case P_LBTREE:
inp = P_INP(dbp, h);
for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) {
bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW &&
(indx + P_INDX >= NUM_ENT(h) ||
inp[indx] != inp[indx + P_INDX])) {
if ((ret = __db_traverse_big(dbp,
GET_BOVERFLOW(dbp, h, indx)->pgno,
callback, cookie)) != 0)
goto err;
}
bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
if (B_TYPE(bk->type) == B_DUPLICATE &&
(ret = __bam_traverse(dbc, mode,
GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
callback, cookie)) != 0)
goto err;
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
callback, cookie)) != 0)
goto err;
}
break;
case P_LDUP:
case P_LRECNO:
for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
GET_BOVERFLOW(dbp, h, indx)->pgno,
callback, cookie)) != 0)
goto err;
}
break;
default:
return (__db_pgfmt(dbp->dbenv, h->pgno));
}
ret = callback(dbp, h, cookie, &already_put);
err: if (!already_put && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}

View file

@ -1,159 +0,0 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2005
* Sleepycat Software. All rights reserved.
*
* $Id: bt_upgrade.c,v 12.1 2005/06/16 20:20:23 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_page.h"
#include "dbinc/db_upgrade.h"
#include "dbinc/btree.h"
/*
* __bam_30_btreemeta --
* Upgrade the metadata pages from version 6 to version 7.
*
* PUBLIC: int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
*/
int
__bam_30_btreemeta(dbp, real_name, buf)
DB *dbp;
char *real_name;
u_int8_t *buf;
{
BTMETA30 *newmeta;
BTMETA2X *oldmeta;
DB_ENV *dbenv;
int ret;
dbenv = dbp->dbenv;
newmeta = (BTMETA30 *)buf;
oldmeta = (BTMETA2X *)buf;
/*
* Move things from the end up, so we do not overwrite things.
* We are going to create a new uid, so we can move the stuff
* at the end of the structure first, overwriting the uid.
*/
newmeta->re_pad = oldmeta->re_pad;
newmeta->re_len = oldmeta->re_len;
newmeta->minkey = oldmeta->minkey;
newmeta->maxkey = oldmeta->maxkey;
newmeta->dbmeta.free = oldmeta->free;
newmeta->dbmeta.flags = oldmeta->flags;
newmeta->dbmeta.type = P_BTREEMETA;
newmeta->dbmeta.version = 7;
/* Replace the unique ID. */
if ((ret = __os_fileid(dbenv, real_name, 1, buf + 36)) != 0)
return (ret);
newmeta->root = 1;
return (0);
}
/*
* __bam_31_btreemeta --
* Upgrade the database from version 7 to version 8.
*
* PUBLIC: int __bam_31_btreemeta
* PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
*/
int
__bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp)
DB *dbp;
char *real_name;
u_int32_t flags;
DB_FH *fhp;
PAGE *h;
int *dirtyp;
{
BTMETA31 *newmeta;
BTMETA30 *oldmeta;
COMPQUIET(dbp, NULL);
COMPQUIET(real_name, NULL);
COMPQUIET(fhp, NULL);
newmeta = (BTMETA31 *)h;
oldmeta = (BTMETA30 *)h;
/*
* Copy the effected fields down the page.
* The fields may overlap each other so we
* start at the bottom and use memmove.
*/
newmeta->root = oldmeta->root;
newmeta->re_pad = oldmeta->re_pad;
newmeta->re_len = oldmeta->re_len;
newmeta->minkey = oldmeta->minkey;
newmeta->maxkey = oldmeta->maxkey;
memmove(newmeta->dbmeta.uid,
oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
newmeta->dbmeta.record_count = 0;
newmeta->dbmeta.key_count = 0;
ZERO_LSN(newmeta->dbmeta.unused3);
/* Set the version number. */
newmeta->dbmeta.version = 8;
/* Upgrade the flags. */
if (LF_ISSET(DB_DUPSORT))
F_SET(&newmeta->dbmeta, BTM_DUPSORT);
*dirtyp = 1;
return (0);
}
/*
* __bam_31_lbtree --
* Upgrade the database btree leaf pages.
*
* PUBLIC: int __bam_31_lbtree
* PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
*/
int
__bam_31_lbtree(dbp, real_name, flags, fhp, h, dirtyp)
DB *dbp;
char *real_name;
u_int32_t flags;
DB_FH *fhp;
PAGE *h;
int *dirtyp;
{
BKEYDATA *bk;
db_pgno_t pgno;
db_indx_t indx;
int ret;
ret = 0;
for (indx = O_INDX; indx < NUM_ENT(h); indx += P_INDX) {
bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_DUPLICATE) {
pgno = GET_BOVERFLOW(dbp, h, indx)->pgno;
if ((ret = __db_31_offdup(dbp, real_name, fhp,
LF_ISSET(DB_DUPSORT) ? 1 : 0, &pgno)) != 0)
break;
if (pgno != GET_BOVERFLOW(dbp, h, indx)->pgno) {
*dirtyp = 1;
GET_BOVERFLOW(dbp, h, indx)->pgno = pgno;
}
}
}
return (ret);
}

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more