From 2f657ee9d4aa0a1702149ee11546018a4e5a23b7 Mon Sep 17 00:00:00 2001
From: "brian@zim.tangent.org" <>
Date: Sat, 21 Jan 2006 04:49:26 -0800
Subject: [PATCH] Turns out that the bulk interface was not well documented. I
 fixed the issue in archive and I will go and update the comments in handler
 next. I should probably also look through the other engines and see if this
 an issue elsewhere as well.

---
 sql/ha_archive.cc | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index c60d40c2685..68219151186 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -135,6 +135,13 @@ static HASH archive_open_tables;
 #define DATA_BUFFER_SIZE 2       // Size of the data used in the data file
 #define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
 
+/* 
+  Number of rows that will force a bulk insert.
+*/
+#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
+
+
+
 /* dummy handlerton - only to have something to return from archive_db_init */
 handlerton archive_hton = {
   "ARCHIVE",
@@ -1026,7 +1033,8 @@ void ha_archive::info(uint flag)
 void ha_archive::start_bulk_insert(ha_rows rows)
 {
   DBUG_ENTER("ha_archive::start_bulk_insert");
-  bulk_insert= TRUE;
+  if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
+    bulk_insert= TRUE;
   DBUG_VOID_RETURN;
 }