mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 20:42:30 +01:00
Merge bk-internal.mysql.com:/home/bk/mysql-5.1
into chilla.local:/home/mydev/mysql-5.1-axmrg
This commit is contained in:
commit
d0ad6cd45b
14 changed files with 1141 additions and 360 deletions
|
@ -131,10 +131,10 @@ ADD_SUBDIRECTORY(dbug)
|
|||
ADD_SUBDIRECTORY(strings)
|
||||
ADD_SUBDIRECTORY(regex)
|
||||
ADD_SUBDIRECTORY(mysys)
|
||||
ADD_SUBDIRECTORY(zlib)
|
||||
ADD_SUBDIRECTORY(extra/yassl)
|
||||
ADD_SUBDIRECTORY(extra/yassl/taocrypt)
|
||||
ADD_SUBDIRECTORY(extra)
|
||||
ADD_SUBDIRECTORY(zlib)
|
||||
ADD_SUBDIRECTORY(storage/heap)
|
||||
ADD_SUBDIRECTORY(storage/myisam)
|
||||
ADD_SUBDIRECTORY(storage/myisammrg)
|
||||
|
|
|
@ -95,6 +95,7 @@ ADD_EXECUTABLE(mysqladmin mysqladmin.cc)
|
|||
TARGET_LINK_LIBRARIES(mysqladmin mysqlclient mysys dbug yassl taocrypt zlib wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqlslap mysqlslap.c)
|
||||
SET_SOURCE_FILES_PROPERTIES(mysqlslap.c PROPERTIES COMPILE_FLAGS "-DTHREADS")
|
||||
TARGET_LINK_LIBRARIES(mysqlslap mysqlclient mysys yassl taocrypt zlib wsock32 dbug)
|
||||
|
||||
ADD_EXECUTABLE(echo echo.c)
|
||||
|
|
|
@ -76,6 +76,7 @@ mysqlimport_LDADD = $(CXXLDFLAGS) $(CLIENT_THREAD_LIBS) \
|
|||
mysqlshow_SOURCES= mysqlshow.c
|
||||
|
||||
mysqlslap_SOURCES= mysqlslap.c
|
||||
mysqlslap_CFLAGS= -DTHREAD -UUNDEF_THREADS_HACK
|
||||
mysqlslap_LDADD = $(CXXLDFLAGS) $(CLIENT_THREAD_LIBS) \
|
||||
@CLIENT_EXTRA_LDFLAGS@ \
|
||||
$(LIBMYSQLCLIENT_LA) \
|
||||
|
|
|
@ -49,7 +49,6 @@ enum options_client
|
|||
OPT_TRIGGERS,
|
||||
OPT_MYSQL_ONLY_PRINT,
|
||||
OPT_MYSQL_LOCK_DIRECTORY,
|
||||
OPT_MYSQL_SLAP_SLAVE,
|
||||
OPT_USE_THREADS,
|
||||
OPT_IMPORT_USE_THREADS,
|
||||
OPT_MYSQL_NUMBER_OF_QUERY,
|
||||
|
@ -58,6 +57,14 @@ enum options_client
|
|||
OPT_TZ_UTC, OPT_AUTO_CLOSE, OPT_CREATE_SLAP_SCHEMA,
|
||||
OPT_SLAP_CSV, OPT_SLAP_CREATE_STRING,
|
||||
OPT_SLAP_AUTO_GENERATE_SQL_LOAD_TYPE, OPT_SLAP_AUTO_GENERATE_WRITE_NUM,
|
||||
OPT_SLAP_AUTO_GENERATE_ADD_AUTO,
|
||||
OPT_SLAP_AUTO_GENERATE_GUID_PRIMARY,
|
||||
OPT_SLAP_AUTO_GENERATE_EXECUTE_QUERIES,
|
||||
OPT_SLAP_AUTO_GENERATE_SECONDARY_INDEXES,
|
||||
OPT_SLAP_AUTO_GENERATE_UNIQUE_WRITE_NUM,
|
||||
OPT_SLAP_AUTO_GENERATE_UNIQUE_QUERY_NUM,
|
||||
OPT_SLAP_PRE_QUERY,
|
||||
OPT_SLAP_POST_QUERY,
|
||||
OPT_MYSQL_REPLACE_INTO, OPT_BASE64_OUTPUT, OPT_SERVER_ID,
|
||||
OPT_FIX_TABLE_NAMES, OPT_FIX_DB_NAMES, OPT_SSL_VERIFY_SERVER_CERT,
|
||||
OPT_DEBUG_INFO, OPT_COLUMN_TYPES
|
||||
|
|
1228
client/mysqlslap.c
1228
client/mysqlslap.c
File diff suppressed because it is too large
Load diff
|
@ -1885,8 +1885,7 @@ sub environment_setup () {
|
|||
mtr_native_path($exe_mysqlslap) .
|
||||
" -uroot " .
|
||||
"--port=$master->[0]->{'port'} " .
|
||||
"--socket=$master->[0]->{'path_sock'} --password= " .
|
||||
"--lock-directory=$opt_tmpdir";
|
||||
"--socket=$master->[0]->{'path_sock'} --password= ";
|
||||
|
||||
if ( $opt_debug )
|
||||
{
|
||||
|
|
|
@ -143,3 +143,27 @@ select * from t1;
|
|||
select * from t2;
|
||||
select * from t1;
|
||||
DROP SCHEMA IF EXISTS `mysqlslap`;
|
||||
DROP SCHEMA IF EXISTS `mysqlslap`;
|
||||
CREATE SCHEMA `mysqlslap`;
|
||||
use mysqlslap;
|
||||
set storage_engine=`heap`;
|
||||
CREATE TABLE t1 (id int, name varchar(64));
|
||||
create table t2(foo1 varchar(32), foo2 varchar(32));
|
||||
INSERT INTO t1 VALUES (1, 'This is a test');
|
||||
insert into t2 values ('test', 'test2');
|
||||
SHOW TABLES;
|
||||
select * from t1;
|
||||
SHOW TABLES;
|
||||
DROP SCHEMA IF EXISTS `mysqlslap`;
|
||||
DROP SCHEMA IF EXISTS `mysqlslap`;
|
||||
CREATE SCHEMA `mysqlslap`;
|
||||
use mysqlslap;
|
||||
set storage_engine=`myisam`;
|
||||
CREATE TABLE t1 (id int, name varchar(64));
|
||||
create table t2(foo1 varchar(32), foo2 varchar(32));
|
||||
INSERT INTO t1 VALUES (1, 'This is a test');
|
||||
insert into t2 values ('test', 'test2');
|
||||
SHOW TABLES;
|
||||
select * from t1;
|
||||
SHOW TABLES;
|
||||
DROP SCHEMA IF EXISTS `mysqlslap`;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --use-threads
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql
|
||||
|
||||
--exec $MYSQL_SLAP --only-print --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";"
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";"
|
||||
|
@ -14,3 +14,25 @@
|
|||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --delimiter=";" --query="select * from t1;select * from t2" --create="CREATE TABLE t1 (id int, name varchar(64)); create table t2(foo1 varchar(32), foo2 varchar(32)); INSERT INTO t1 VALUES (1, 'This is a test'); insert into t2 values ('test', 'test2')"
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --create-schema=test_env
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --create-schema=test_env --auto-generate-sql-add-autoincrement
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=update
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=read
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=write
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=mixed
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=update
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=update --auto-generate-sql-execute-number=5
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=key --auto-generate-sql-execute-number=5
|
||||
|
||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=key --auto-generate-sql-execute-number=5 --auto-generate-sql-secondary-indexes=3
|
||||
|
||||
--exec $MYSQL_SLAP --only-print --delimiter=";" --query="select * from t1;select * from t2" --create="CREATE TABLE t1 (id int, name varchar(64)); create table t2(foo1 varchar(32), foo2 varchar(32)); INSERT INTO t1 VALUES (1, 'This is a test'); insert into t2 values ('test', 'test2')" --engine="heap,myisam" --post-query="SHOW TABLES" --pre-query="SHOW TABLES";
|
||||
|
|
|
@ -66,6 +66,7 @@ struct show_table_authors_st show_table_authors[]= {
|
|||
"Parser, port to OS/2, storage engines and some random stuff" },
|
||||
{ "Yuri Dario", "", "OS/2 port" },
|
||||
{ "Andrei Elkin", "Espoo, Finland", "Replication" },
|
||||
{ "Patrick Galbraith", "Sharon, NH", "Federated Engine, mysqlslap" },
|
||||
{ "Sergei Golubchik", "Kerpen, Germany",
|
||||
"Full-text search, precision math" },
|
||||
{ "Lenz Grimmer", "Hamburg, Germany",
|
||||
|
|
|
@ -217,14 +217,13 @@ int main(int argc, char *argv[])
|
|||
|
||||
azclose(&writer_handle);
|
||||
azclose(&reader_handle);
|
||||
exit(0);
|
||||
unlink(TEST_FILENAME);
|
||||
|
||||
/* Start size tests */
|
||||
printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n");
|
||||
size_test(TWOGIG, 2097152L);
|
||||
size_test(FOURGIG, 4194304L);
|
||||
size_test(EIGHTGIG, 8388608L);
|
||||
size_test(TWOGIG, 2088992L);
|
||||
size_test(FOURGIG, 4177984L);
|
||||
size_test(EIGHTGIG, 8355968L);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -234,6 +233,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for)
|
|||
azio_stream writer_handle, reader_handle;
|
||||
unsigned long long write_length;
|
||||
unsigned long long read_length= 0;
|
||||
unsigned long long count;
|
||||
unsigned int ret;
|
||||
char buffer[BUFFER_LEN];
|
||||
int error;
|
||||
|
@ -244,8 +244,10 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for)
|
|||
return 0;
|
||||
}
|
||||
|
||||
for (write_length= 0; write_length < length ; write_length+= ret)
|
||||
for (count= 0, write_length= 0; write_length < length ;
|
||||
write_length+= ret)
|
||||
{
|
||||
count++;
|
||||
ret= azwrite(&writer_handle, test_string, BUFFER_LEN);
|
||||
if (ret != BUFFER_LEN)
|
||||
{
|
||||
|
@ -257,7 +259,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for)
|
|||
azflush(&writer_handle, Z_SYNC_FLUSH);
|
||||
}
|
||||
}
|
||||
assert(write_length == length);
|
||||
assert(write_length != count * BUFFER_LEN); /* Number of rows time BUFFER_LEN */
|
||||
azflush(&writer_handle, Z_SYNC_FLUSH);
|
||||
|
||||
printf("Reading back data\n");
|
||||
|
@ -279,7 +281,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for)
|
|||
}
|
||||
}
|
||||
|
||||
assert(read_length == length);
|
||||
assert(read_length == write_length);
|
||||
assert(writer_handle.rows == rows_to_test_for);
|
||||
azclose(&writer_handle);
|
||||
azclose(&reader_handle);
|
||||
|
|
|
@ -55,8 +55,8 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
|
|||
s->stream.zalloc = (alloc_func)0;
|
||||
s->stream.zfree = (free_func)0;
|
||||
s->stream.opaque = (voidpf)0;
|
||||
memset(s->inbuf, 0, AZ_BUFSIZE);
|
||||
memset(s->outbuf, 0, AZ_BUFSIZE);
|
||||
memset(s->inbuf, 0, AZ_BUFSIZE_READ);
|
||||
memset(s->outbuf, 0, AZ_BUFSIZE_WRITE);
|
||||
s->stream.next_in = s->inbuf;
|
||||
s->stream.next_out = s->outbuf;
|
||||
s->stream.avail_in = s->stream.avail_out = 0;
|
||||
|
@ -109,7 +109,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
|
|||
return Z_NULL;
|
||||
}
|
||||
}
|
||||
s->stream.avail_out = AZ_BUFSIZE;
|
||||
s->stream.avail_out = AZ_BUFSIZE_WRITE;
|
||||
|
||||
errno = 0;
|
||||
s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd;
|
||||
|
@ -159,7 +159,7 @@ void write_header(azio_stream *s)
|
|||
char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
|
||||
char *ptr= buffer;
|
||||
|
||||
s->block_size= AZ_BUFSIZE;
|
||||
s->block_size= AZ_BUFSIZE_WRITE;
|
||||
s->version = (unsigned char)az_magic[1];
|
||||
s->minor_version = (unsigned char)az_magic[2];
|
||||
|
||||
|
@ -224,7 +224,7 @@ int get_byte(s)
|
|||
if (s->stream.avail_in == 0)
|
||||
{
|
||||
errno = 0;
|
||||
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0));
|
||||
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
|
||||
if (s->stream.avail_in == 0)
|
||||
{
|
||||
s->z_eof = 1;
|
||||
|
@ -260,7 +260,7 @@ void check_header(azio_stream *s)
|
|||
if (len < 2) {
|
||||
if (len) s->inbuf[0] = s->stream.next_in[0];
|
||||
errno = 0;
|
||||
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE >> len, MYF(0));
|
||||
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE_READ >> len, MYF(0));
|
||||
if (len == 0) s->z_err = Z_ERRNO;
|
||||
s->stream.avail_in += len;
|
||||
s->stream.next_in = s->inbuf;
|
||||
|
@ -455,7 +455,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *
|
|||
if (s->stream.avail_in == 0 && !s->z_eof) {
|
||||
|
||||
errno = 0;
|
||||
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0));
|
||||
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
|
||||
if (s->stream.avail_in == 0)
|
||||
{
|
||||
s->z_eof = 1;
|
||||
|
@ -522,12 +522,13 @@ unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len)
|
|||
{
|
||||
|
||||
s->stream.next_out = s->outbuf;
|
||||
if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE, MYF(0)) != AZ_BUFSIZE)
|
||||
if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE_WRITE,
|
||||
MYF(0)) != AZ_BUFSIZE_WRITE)
|
||||
{
|
||||
s->z_err = Z_ERRNO;
|
||||
break;
|
||||
}
|
||||
s->stream.avail_out = AZ_BUFSIZE;
|
||||
s->stream.avail_out = AZ_BUFSIZE_WRITE;
|
||||
}
|
||||
s->in += s->stream.avail_in;
|
||||
s->out += s->stream.avail_out;
|
||||
|
@ -563,7 +564,7 @@ int do_flush (azio_stream *s, int flush)
|
|||
|
||||
for (;;)
|
||||
{
|
||||
len = AZ_BUFSIZE - s->stream.avail_out;
|
||||
len = AZ_BUFSIZE_WRITE - s->stream.avail_out;
|
||||
|
||||
if (len != 0)
|
||||
{
|
||||
|
@ -574,7 +575,7 @@ int do_flush (azio_stream *s, int flush)
|
|||
return Z_ERRNO;
|
||||
}
|
||||
s->stream.next_out = s->outbuf;
|
||||
s->stream.avail_out = AZ_BUFSIZE;
|
||||
s->stream.avail_out = AZ_BUFSIZE_WRITE;
|
||||
}
|
||||
if (done) break;
|
||||
s->out += s->stream.avail_out;
|
||||
|
@ -675,8 +676,8 @@ my_off_t azseek (s, offset, whence)
|
|||
/* There was a zmemzero here if inbuf was null -Brian */
|
||||
while (offset > 0)
|
||||
{
|
||||
uInt size = AZ_BUFSIZE;
|
||||
if (offset < AZ_BUFSIZE) size = (uInt)offset;
|
||||
uInt size = AZ_BUFSIZE_WRITE;
|
||||
if (offset < AZ_BUFSIZE_WRITE) size = (uInt)offset;
|
||||
|
||||
size = azwrite(s, s->inbuf, size);
|
||||
if (size == 0) return -1L;
|
||||
|
@ -719,8 +720,8 @@ my_off_t azseek (s, offset, whence)
|
|||
}
|
||||
while (offset > 0) {
|
||||
int error;
|
||||
unsigned int size = AZ_BUFSIZE;
|
||||
if (offset < AZ_BUFSIZE) size = (int)offset;
|
||||
unsigned int size = AZ_BUFSIZE_READ;
|
||||
if (offset < AZ_BUFSIZE_READ) size = (int)offset;
|
||||
|
||||
size = azread(s, s->outbuf, size, &error);
|
||||
if (error <= 0) return -1L;
|
||||
|
|
|
@ -196,7 +196,8 @@ extern "C" {
|
|||
/* The deflate compression method (the only one supported in this version) */
|
||||
|
||||
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
|
||||
#define AZ_BUFSIZE 16384
|
||||
#define AZ_BUFSIZE_READ 32768
|
||||
#define AZ_BUFSIZE_WRITE 16384
|
||||
|
||||
|
||||
typedef struct azio_stream {
|
||||
|
@ -204,8 +205,8 @@ typedef struct azio_stream {
|
|||
int z_err; /* error code for last stream operation */
|
||||
int z_eof; /* set if end of input file */
|
||||
File file; /* .gz file */
|
||||
Byte inbuf[AZ_BUFSIZE]; /* input buffer */
|
||||
Byte outbuf[AZ_BUFSIZE]; /* output buffer */
|
||||
Byte inbuf[AZ_BUFSIZE_READ]; /* input buffer */
|
||||
Byte outbuf[AZ_BUFSIZE_WRITE]; /* output buffer */
|
||||
uLong crc; /* crc32 of uncompressed data */
|
||||
char *msg; /* error message */
|
||||
int transparent; /* 1 if input file is not a .gz file */
|
||||
|
|
|
@ -81,6 +81,7 @@
|
|||
|
||||
TODO:
|
||||
Allow users to set compression level.
|
||||
Allow adjustable block size.
|
||||
Implement versioning, should be easy.
|
||||
Allow for errors, find a way to mark bad rows.
|
||||
Add optional feature so that rows can be flushed at interval (which will cause less
|
||||
|
@ -211,6 +212,7 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
|
|||
|
||||
/* The size of the offset value we will use for position() */
|
||||
ref_length= sizeof(my_off_t);
|
||||
archive_reader_open= FALSE;
|
||||
}
|
||||
|
||||
int archive_discover(handlerton *hton, THD* thd, const char *db,
|
||||
|
@ -434,6 +436,29 @@ int ha_archive::init_archive_writer()
|
|||
}
|
||||
|
||||
|
||||
int ha_archive::init_archive_reader()
|
||||
{
|
||||
DBUG_ENTER("ha_archive::init_archive_reader");
|
||||
/*
|
||||
It is expensive to open and close the data files and since you can't have
|
||||
a gzip file that can be both read and written we keep a writer open
|
||||
that is shared amoung all open tables.
|
||||
*/
|
||||
if (!archive_reader_open)
|
||||
{
|
||||
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
|
||||
{
|
||||
DBUG_PRINT("ha_archive", ("Could not open archive read file"));
|
||||
share->crashed= TRUE;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
archive_reader_open= TRUE;
|
||||
}
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We just implement one additional file extension.
|
||||
*/
|
||||
|
@ -477,7 +502,6 @@ int ha_archive::open(const char *name, int mode, uint open_options)
|
|||
|
||||
DBUG_ASSERT(share);
|
||||
|
||||
|
||||
record_buffer= create_record_buffer(table->s->reclength +
|
||||
ARCHIVE_ROW_HEADER_SIZE);
|
||||
|
||||
|
@ -489,14 +513,6 @@ int ha_archive::open(const char *name, int mode, uint open_options)
|
|||
|
||||
thr_lock_data_init(&share->lock, &lock, NULL);
|
||||
|
||||
DBUG_PRINT("ha_archive", ("archive data_file_name %s", share->data_file_name));
|
||||
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
|
||||
{
|
||||
if (errno == EROFS || errno == EACCES)
|
||||
DBUG_RETURN(my_errno= errno);
|
||||
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
||||
}
|
||||
|
||||
DBUG_PRINT("ha_archive", ("archive table was crashed %s",
|
||||
rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
|
||||
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
|
||||
|
@ -533,8 +549,11 @@ int ha_archive::close(void)
|
|||
destroy_record_buffer(record_buffer);
|
||||
|
||||
/* First close stream */
|
||||
if (archive_reader_open)
|
||||
{
|
||||
if (azclose(&archive))
|
||||
rc= 1;
|
||||
}
|
||||
/* then also close share */
|
||||
rc|= free_share();
|
||||
|
||||
|
@ -904,7 +923,7 @@ int ha_archive::index_read(byte *buf, const byte *key,
|
|||
int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
|
||||
uint key_len, enum ha_rkey_function find_flag)
|
||||
{
|
||||
int rc= 0;
|
||||
int rc;
|
||||
bool found= 0;
|
||||
KEY *mkey= &table->s->key_info[index];
|
||||
current_k_offset= mkey->key_part->offset;
|
||||
|
@ -914,22 +933,10 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
|
|||
|
||||
DBUG_ENTER("ha_archive::index_read_idx");
|
||||
|
||||
/*
|
||||
All of the buffer must be written out or we won't see all of the
|
||||
data
|
||||
*/
|
||||
pthread_mutex_lock(&share->mutex);
|
||||
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
rc= rnd_init(TRUE);
|
||||
|
||||
/*
|
||||
Set the position of the local read thread to the beginning postion.
|
||||
*/
|
||||
if (read_data_header(&archive))
|
||||
{
|
||||
rc= HA_ERR_CRASHED_ON_USAGE;
|
||||
if (rc)
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (!(get_row(&archive, buf)))
|
||||
{
|
||||
|
@ -979,10 +986,11 @@ int ha_archive::rnd_init(bool scan)
|
|||
if (share->crashed)
|
||||
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
||||
|
||||
init_archive_reader();
|
||||
|
||||
/* We rewind the file so that we can read from the beginning if scan */
|
||||
if (scan)
|
||||
{
|
||||
scan_rows= share->rows_recorded;
|
||||
DBUG_PRINT("info", ("archive will retrieve %llu rows",
|
||||
(unsigned long long) scan_rows));
|
||||
stats.records= 0;
|
||||
|
@ -991,17 +999,18 @@ int ha_archive::rnd_init(bool scan)
|
|||
If dirty, we lock, and then reset/flush the data.
|
||||
I found that just calling azflush() doesn't always work.
|
||||
*/
|
||||
pthread_mutex_lock(&share->mutex);
|
||||
scan_rows= share->rows_recorded;
|
||||
if (share->dirty == TRUE)
|
||||
{
|
||||
pthread_mutex_lock(&share->mutex);
|
||||
if (share->dirty == TRUE)
|
||||
{
|
||||
DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
|
||||
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
||||
share->dirty= FALSE;
|
||||
}
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
}
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
|
||||
if (read_data_header(&archive))
|
||||
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
||||
|
@ -1283,6 +1292,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
|
|||
azio_stream writer;
|
||||
char writer_filename[FN_REFLEN];
|
||||
|
||||
init_archive_reader();
|
||||
|
||||
// now we close both our writer and our reader for the rename
|
||||
if (share->archive_write_open)
|
||||
{
|
||||
|
@ -1475,6 +1486,7 @@ int ha_archive::info(uint flag)
|
|||
|
||||
if (flag & HA_STATUS_AUTO)
|
||||
{
|
||||
init_archive_reader();
|
||||
azflush(&archive, Z_SYNC_FLUSH);
|
||||
stats.auto_increment_value= archive.auto_increment;
|
||||
}
|
||||
|
@ -1557,6 +1569,8 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
|
|||
Now we will rewind the archive file so that we are positioned at the
|
||||
start of the file.
|
||||
*/
|
||||
init_archive_reader();
|
||||
|
||||
if (!rc)
|
||||
read_data_header(&archive);
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ class ha_archive: public handler
|
|||
uint current_key_len;
|
||||
uint current_k_offset;
|
||||
archive_record_buffer *record_buffer;
|
||||
bool archive_reader_open;
|
||||
|
||||
archive_record_buffer *create_record_buffer(unsigned int length);
|
||||
void destroy_record_buffer(archive_record_buffer *r);
|
||||
|
@ -119,6 +120,7 @@ public:
|
|||
ARCHIVE_SHARE *get_share(const char *table_name, int *rc);
|
||||
int free_share();
|
||||
int init_archive_writer();
|
||||
int init_archive_reader();
|
||||
bool auto_repair() const { return 1; } // For the moment we just do this
|
||||
int read_data_header(azio_stream *file_to_read);
|
||||
void position(const byte *record);
|
||||
|
|
Loading…
Reference in a new issue