mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 13:32:33 +01:00
Merge rkalimullin@work.mysql.com:/home/bk/mysql-4.1
into mysql.r18.ru:/usr/home/ram/work/mysql-4.1.bdb
This commit is contained in:
commit
c90917ade4
7 changed files with 79 additions and 32 deletions
|
@ -536,11 +536,9 @@ swap_retry:
|
|||
* and even a checksum error isn't a reason to panic the environment.
|
||||
*/
|
||||
if ((ret = __db_chk_meta(dbenv, dbp, meta, do_metachk)) != 0) {
|
||||
if (ret == -1) {
|
||||
if (ret == -1)
|
||||
__db_err(dbenv,
|
||||
"%s: metadata page checksum error", name);
|
||||
ret = EINVAL;
|
||||
}
|
||||
goto bad_format;
|
||||
}
|
||||
|
||||
|
@ -577,7 +575,7 @@ swap_retry:
|
|||
|
||||
bad_format:
|
||||
__db_err(dbenv, "%s: unexpected file type or format", name);
|
||||
return (ret);
|
||||
return (ret == 0 ? EINVAL : ret);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -268,6 +268,8 @@ __log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
|
|||
if (mbytesp != NULL) {
|
||||
*mbytesp = lp->stat.st_wc_mbytes;
|
||||
*bytesp = (u_int32_t)(lp->stat.st_wc_bytes + lp->b_off);
|
||||
|
||||
lp->stat.st_wc_mbytes = lp->stat.st_wc_bytes = 0;
|
||||
}
|
||||
|
||||
R_UNLOCK(dbenv, &dblp->reginfo);
|
||||
|
|
|
@ -344,6 +344,23 @@ __memp_fopen_int(dbmfp, mfp, path, flags, mode, pagesize)
|
|||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out the file's size.
|
||||
*
|
||||
* !!!
|
||||
* We can't use off_t's here, or in any code in the mainline library
|
||||
* for that matter. (We have to use them in the os stubs, of course,
|
||||
* as there are system calls that take them as arguments.) The reason
|
||||
* is some customers build in environments where an off_t is 32-bits,
|
||||
* but still run where offsets are 64-bits, and they pay us a lot of
|
||||
* money.
|
||||
*/
|
||||
if ((ret = __os_ioinfo(
|
||||
dbenv, rpath, dbmfp->fhp, &mbytes, &bytes, NULL)) != 0) {
|
||||
__db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the file id if we weren't given one. Generated file id's
|
||||
* don't use timestamps, otherwise there'd be no chance of any
|
||||
|
@ -470,6 +487,7 @@ alloc: /* Allocate and initialize a new MPOOLFILE. */
|
|||
F_SET(mfp, MP_DIRECT);
|
||||
if (LF_ISSET(DB_EXTENT))
|
||||
F_SET(mfp, MP_EXTENT);
|
||||
F_SET(mfp, MP_CAN_MMAP);
|
||||
|
||||
if (path == NULL)
|
||||
F_SET(mfp, MP_TEMP);
|
||||
|
@ -479,21 +497,6 @@ alloc: /* Allocate and initialize a new MPOOLFILE. */
|
|||
* and find the number of the last page in the file, all the
|
||||
* time being careful not to overflow 32 bits.
|
||||
*
|
||||
* !!!
|
||||
* We can't use off_t's here, or in any code in the mainline
|
||||
* library for that matter. (We have to use them in the os
|
||||
* stubs, of course, as there are system calls that take them
|
||||
* as arguments.) The reason is that some customers build in
|
||||
* environments where an off_t is 32-bits, but still run where
|
||||
* offsets are 64-bits, and they pay us a lot of money.
|
||||
*/
|
||||
if ((ret = __os_ioinfo(
|
||||
dbenv, rpath, dbmfp->fhp, &mbytes, &bytes, NULL)) != 0) {
|
||||
__db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* During verify or recovery, we might have to cope with a
|
||||
* truncated file; if the file size is not a multiple of the
|
||||
* page size, round down to a page, we'll take care of the
|
||||
|
@ -582,7 +585,7 @@ check_map:
|
|||
* compiler will perpetrate, doing the comparison in a portable way is
|
||||
* flatly impossible. Hope that mmap fails if the file is too large.
|
||||
*/
|
||||
#define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 Mb. */
|
||||
#define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 MB. */
|
||||
if (F_ISSET(mfp, MP_CAN_MMAP)) {
|
||||
if (path == NULL)
|
||||
F_CLR(mfp, MP_CAN_MMAP);
|
||||
|
|
|
@ -1198,6 +1198,9 @@ gap_check: lp->wait_recs = 0;
|
|||
* replica get flushed now and again.
|
||||
*/
|
||||
ret = dbenv->log_flush(dbenv, &ckp_lsn);
|
||||
/* Update the last_ckp in the txn region. */
|
||||
if (ret == 0)
|
||||
__txn_updateckp(dbenv, &rp->lsn);
|
||||
break;
|
||||
case DB___txn_regop:
|
||||
if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
|
||||
|
|
|
@ -1209,18 +1209,7 @@ do_ckp: /* Look through the active transactions for the lowest begin LSN. */
|
|||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to make sure last_ckp only moves forward; since
|
||||
* we drop locks above and in log_put, it's possible
|
||||
* for two calls to __txn_ckp_log to finish in a different
|
||||
* order from how they were called.
|
||||
*/
|
||||
R_LOCK(dbenv, &mgr->reginfo);
|
||||
if (log_compare(®ion->last_ckp, &ckp_lsn) < 0) {
|
||||
region->last_ckp = ckp_lsn;
|
||||
(void)time(®ion->time_ckp);
|
||||
}
|
||||
R_UNLOCK(dbenv, &mgr->reginfo);
|
||||
__txn_updateckp(dbenv, &ckp_lsn);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -1404,3 +1393,36 @@ __txn_reset(dbenv)
|
|||
return (__txn_recycle_log(dbenv,
|
||||
NULL, &scrap, 0, TXN_MINIMUM, TXN_MAXIMUM));
|
||||
}
|
||||
|
||||
/*
|
||||
* __txn_updateckp --
|
||||
* Update the last_ckp field in the transaction region. This happens
|
||||
* at the end of a normal checkpoint and also when a replication client
|
||||
* receives a checkpoint record.
|
||||
*
|
||||
* PUBLIC: void __txn_updateckp __P((DB_ENV *, DB_LSN *));
|
||||
*/
|
||||
void
|
||||
__txn_updateckp(dbenv, lsnp)
|
||||
DB_ENV *dbenv;
|
||||
DB_LSN *lsnp;
|
||||
{
|
||||
DB_TXNMGR *mgr;
|
||||
DB_TXNREGION *region;
|
||||
|
||||
mgr = dbenv->tx_handle;
|
||||
region = mgr->reginfo.primary;
|
||||
|
||||
/*
|
||||
* We want to make sure last_ckp only moves forward; since
|
||||
* we drop locks above and in log_put, it's possible
|
||||
* for two calls to __txn_ckp_log to finish in a different
|
||||
* order from how they were called.
|
||||
*/
|
||||
R_LOCK(dbenv, &mgr->reginfo);
|
||||
if (log_compare(®ion->last_ckp, lsnp) < 0) {
|
||||
region->last_ckp = *lsnp;
|
||||
(void)time(®ion->time_ckp);
|
||||
}
|
||||
R_UNLOCK(dbenv, &mgr->reginfo);
|
||||
}
|
||||
|
|
|
@ -1940,6 +1940,24 @@ int ha_berkeley::delete_table(const char *name)
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
int ha_berkeley::rename_table(const char * from, const char * to)
|
||||
{
|
||||
int error;
|
||||
char from_buff[FN_REFLEN];
|
||||
char to_buff[FN_REFLEN];
|
||||
|
||||
if ((error= db_create(&file, db_env, 0)))
|
||||
my_errno= error;
|
||||
else
|
||||
error= file->rename(file,
|
||||
fn_format(from_buff, from, "", ha_berkeley_ext, 2 | 4),
|
||||
NULL, fn_format(to_buff, to, "", ha_berkeley_ext,
|
||||
2 | 4), 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
How many seeks it will take to read through the table
|
||||
This is to be comparable to the number returned by records_in_range so
|
||||
|
|
|
@ -152,6 +152,7 @@ class ha_berkeley: public handler
|
|||
int create(const char *name, register TABLE *form,
|
||||
HA_CREATE_INFO *create_info);
|
||||
int delete_table(const char *name);
|
||||
int rename_table(const char* from, const char* to);
|
||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
|
||||
|
|
Loading…
Reference in a new issue