From dc80fc2d841b3d872e07ef71a4f6114db44f3197 Mon Sep 17 00:00:00 2001 From: unknown <brian@avenger.(none)> Date: Mon, 29 Nov 2004 01:40:25 -0800 Subject: [PATCH] Adding support for archive to handle delayed inserts. sql/examples/ha_archive.cc: Added support for delayed inserts. What delayed inserts mean for archive is that a sync call is not forced with the next select. Instant performance gain, plus if you aren't concerned about having a consistant read you won't be forced to have less then optimized compression. sql/examples/ha_archive.h: Added flags for dalyed inserts and added table flag so that the server knows that archive can support them. --- sql/examples/ha_archive.cc | 22 ++++++++++++++++++---- sql/examples/ha_archive.h | 14 ++++++++------ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index ea8b3694109..b03e655fef7 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) share->use_count= 0; share->table_name_length= length; share->table_name= tmp_name; + share->delayed= FALSE; fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); strmov(share->table_name,table_name); @@ -536,7 +537,11 @@ int ha_archive::write_row(byte * buf) pthread_mutex_lock(&share->mutex); written= gzwrite(share->archive_write, buf, table->reclength); DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength)); - share->dirty= TRUE; + if (!delayed_insert) + share->dirty= TRUE; + else + share->delayed= TRUE; + if (written != table->reclength) goto error; /* @@ -594,6 +599,7 @@ int ha_archive::rnd_init(bool scan) { gzflush(share->archive_write, Z_SYNC_FLUSH); share->dirty= FALSE; + share->delayed= FALSE; } pthread_mutex_unlock(&share->mutex); } @@ -628,9 +634,12 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) if (read == 0) DBUG_RETURN(HA_ERR_END_OF_FILE); - /* If the record is the wrong size, the file is probably damaged */ + /* If the record is the wrong size, the file is probably damaged, unless + we are dealing with a delayed insert. In that case we can assume the file is ok, + but our row count doesn't match our data since the file has not been flushed. + */ if ((ulong) read != table->reclength) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(share->delayed ? HA_ERR_END_OF_FILE : HA_ERR_CRASHED_ON_USAGE); /* Calculate blob length, we use this for our buffer */ for (field=table->blob_field; *field ; field++) @@ -648,7 +657,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) { read= gzread(file_to_read, last, size); if ((size_t) read != size) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(share->delayed ? HA_ERR_END_OF_FILE : HA_ERR_CRASHED_ON_USAGE); (*field)->set_ptr(size, last); last += size; } @@ -839,6 +848,11 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { + if (lock_type == TL_WRITE_DELAYED) + delayed_insert= TRUE; + else + delayed_insert= FALSE; + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { /* diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h index eb78b28034c..a3f2327b4a7 100644 --- a/sql/examples/ha_archive.h +++ b/sql/examples/ha_archive.h @@ -32,10 +32,11 @@ typedef struct st_archive_share { uint table_name_length,use_count; pthread_mutex_t mutex; THR_LOCK lock; - File meta_file; /* Meta file we use */ - gzFile archive_write; /* Archive file we are working with */ - bool dirty; /* Flag for if a flush should occur */ - ulonglong rows_recorded; /* Number of rows in tables */ + File meta_file; /* Meta file we use */ + gzFile archive_write; /* Archive file we are working with */ + bool dirty; /* Flag for if a flush should occur */ + ulonglong rows_recorded; /* Number of rows in tables */ + bool delayed; /* If a delayed insert has happened since opena */ } ARCHIVE_SHARE; /* @@ -53,9 +54,10 @@ class ha_archive: public handler byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ String buffer; /* Buffer used for blob storage */ ulonglong scan_rows; /* Number of rows left in scan */ + bool delayed_insert; /* If the insert is delayed */ public: - ha_archive(TABLE *table): handler(table) + ha_archive(TABLE *table): handler(table), delayed_insert(0) { /* Set our original buffer from pre-allocated memory */ buffer.set(byte_buffer, IO_SIZE, system_charset_info); @@ -72,7 +74,7 @@ public: ulong table_flags() const { return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT | - HA_FILE_BASED); + HA_FILE_BASED | HA_CAN_INSERT_DELAYED); } ulong index_flags(uint idx, uint part, bool all_parts) const {