Automerged

This commit is contained in:
Sergey Petrunia 2008-07-17 20:22:39 +04:00
commit 4540ffa835
15 changed files with 105 additions and 40 deletions

View file

@ -452,7 +452,7 @@ Event_db_repository::table_scan_all_for_i_s(THD *thd, TABLE *schema_table,
READ_RECORD read_record_info;
DBUG_ENTER("Event_db_repository::table_scan_all_for_i_s");
init_read_record(&read_record_info, thd, event_table, NULL, 1, 0);
init_read_record(&read_record_info, thd, event_table, NULL, 1, 0, FALSE);
/*
rr_sequential, in read_record(), returns 137==HA_ERR_END_OF_FILE,
@ -925,7 +925,7 @@ Event_db_repository::drop_events_by_field(THD *thd,
DBUG_VOID_RETURN;
/* only enabled events are in memory, so we go now and delete the rest */
init_read_record(&read_record_info, thd, table, NULL, 1, 0);
init_read_record(&read_record_info, thd, table, NULL, 1, 0, FALSE);
while (!ret && !(read_record_info.read_record(&read_record_info)) )
{
char *et_field= get_field(thd->mem_root, table->field[field]);

View file

@ -1149,7 +1149,7 @@ Events::load_events_from_db(THD *thd)
DBUG_RETURN(TRUE);
}
init_read_record(&read_record_info, thd, table, NULL, 0, 1);
init_read_record(&read_record_info, thd, table, NULL, 0, 1, FALSE);
while (!(read_record_info.read_record(&read_record_info)))
{
Event_queue_element *et;

View file

@ -410,6 +410,56 @@ static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
DBUG_RETURN(tmp);
}
#ifndef DBUG_OFF
/*
Print a text, SQL-like record representation into dbug trace.
Note: this function is a work in progress: at the moment
- column read bitmap is ignored (can print garbage for unused columns)
- there is no quoting
*/
static void dbug_print_record(TABLE *table, bool print_rowid)
{
char buff[1024];
Field **pfield;
String tmp(buff,sizeof(buff),&my_charset_bin);
DBUG_LOCK_FILE;
fprintf(DBUG_FILE, "record (");
for (pfield= table->field; *pfield ; pfield++)
fprintf(DBUG_FILE, "%s%s", (*pfield)->field_name, (pfield[1])? ", ":"");
fprintf(DBUG_FILE, ") = ");
fprintf(DBUG_FILE, "(");
for (pfield= table->field; *pfield ; pfield++)
{
Field *field= *pfield;
if (field->is_null())
fwrite("NULL", sizeof(char), 4, DBUG_FILE);
if (field->type() == MYSQL_TYPE_BIT)
(void) field->val_int_as_str(&tmp, 1);
else
field->val_str(&tmp);
fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE);
if (pfield[1])
fwrite(", ", sizeof(char), 2, DBUG_FILE);
}
fprintf(DBUG_FILE, ")");
if (print_rowid)
{
fprintf(DBUG_FILE, " rowid ");
for (uint i=0; i < table->file->ref_length; i++)
{
fprintf(DBUG_FILE, "%x", (uchar)table->file->ref[i]);
}
}
fprintf(DBUG_FILE, "\n");
DBUG_UNLOCK_FILE;
}
#endif
/**
Search after sort_keys and write them into tempfile.
@ -488,13 +538,10 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
current_thd->variables.read_buff_size);
}
READ_RECORD read_record_info;
if (quick_select)
{
if (select->quick->reset())
DBUG_RETURN(HA_POS_ERROR);
init_read_record(&read_record_info, current_thd, select->quick->head,
select, 1, 1);
}
/* Remember original bitmaps */
@ -514,12 +561,13 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
{
if (quick_select)
{
if ((error= read_record_info.read_record(&read_record_info)))
if ((error= select->quick->get_next()))
{
error= HA_ERR_END_OF_FILE;
break;
}
file->position(sort_form->record[0]);
DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE););
}
else /* Not quick-select */
{
@ -576,15 +624,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
if (thd->is_error())
break;
}
if (quick_select)
{
/*
index_merge quick select uses table->sort when retrieving rows, so free
resoures it has allocated.
*/
end_read_record(&read_record_info);
}
else
if (!quick_select)
{
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
if (!next_pos)

View file

@ -2175,8 +2175,8 @@ ulonglong get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
int test_if_number(char *str,int *res,bool allow_wildcards);
void change_byte(uchar *,uint,char,char);
void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
SQL_SELECT *select,
int use_record_cache, bool print_errors);
SQL_SELECT *select, int use_record_cache,
bool print_errors, bool disable_rr_cache);
void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool print_error, uint idx);
void end_read_record(READ_RECORD *info);

View file

@ -7936,6 +7936,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
handler *file= head->file;
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
file->extra(HA_EXTRA_KEYREAD);
head->prepare_for_position();
@ -7994,15 +7995,17 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
}
DBUG_PRINT("info", ("ok"));
/* ok, all row ids are in Unique */
/*
Ok all rowids are in the Unique now. The next call will initialize
head->sort structure so it can be used to iterate through the rowids
sequence.
*/
result= unique->get(head);
delete unique;
doing_pk_scan= FALSE;
/* index_merge currently doesn't support "using index" at all */
file->extra(HA_EXTRA_NO_KEYREAD);
/* start table scan */
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE);
DBUG_RETURN(result);
}
@ -8028,6 +8031,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
free_io_cache(head);
/* All rows from Unique have been retrieved, do a clustered PK scan */
if (pk_quick_select)
{

View file

@ -86,6 +86,23 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
The temporary file is normally used when the references doesn't fit into
a properly sized memory buffer. For most small queries the references
are stored in the memory buffer.
SYNOPSIS
init_read_record()
info OUT read structure
thd Thread handle
table Table the data [originally] comes from.
select SQL_SELECT structure. We may select->quick or
select->file as data source
use_record_cache Call file->extra_opt(HA_EXTRA_CACHE,...)
if we're going to do sequential read and some
additional conditions are satisfied.
print_error Copy this to info->print_error
disable_rr_cache Don't use rr_from_cache (used by sort-union
index-merge which produces rowid sequences that
are already ordered)
DESCRIPTION
This function sets up reading data via one of the methods:
The temporary file is also used when performing an update where a key is
modified.
@ -140,7 +157,8 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
*/
void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
int use_record_cache, bool print_error)
int use_record_cache, bool print_error,
bool disable_rr_cache)
{
IO_CACHE *tempfile;
DBUG_ENTER("init_read_record");
@ -191,7 +209,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
it doesn't make sense to use cache - we don't read from the table
and table->sort.io_cache is read sequentially
*/
if (!table->sort.addon_field &&
if (!disable_rr_cache &&
!table->sort.addon_field &&
! (specialflag & SPECIAL_SAFE_MODE) &&
thd->variables.read_rnd_buff_size &&
!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&

View file

@ -324,7 +324,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
acl_cache->clear(1); // Clear locked hostname cache
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0,
FALSE);
table->use_all_columns();
VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50));
while (!(read_record_info.read_record(&read_record_info)))
@ -373,7 +374,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
end_read_record(&read_record_info);
freeze_size(&acl_hosts);
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0);
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0,FALSE);
table->use_all_columns();
VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100));
password_length= table->field[2]->field_length /
@ -561,7 +562,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
end_read_record(&read_record_info);
freeze_size(&acl_users);
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0);
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0,FALSE);
table->use_all_columns();
VOID(my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100));
while (!(read_record_info.read_record(&read_record_info)))

View file

@ -245,7 +245,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
DBUG_RETURN(TRUE);
}
if (usable_index==MAX_KEY)
init_read_record(&info,thd,table,select,1,1);
init_read_record(&info, thd, table, select, 1, 1, FALSE);
else
init_read_record_idx(&info, thd, table, 1, usable_index);
@ -834,7 +834,7 @@ int multi_delete::do_deletes()
}
READ_RECORD info;
init_read_record(&info,thd,table,NULL,0,1);
init_read_record(&info, thd, table, NULL, 0, 1, FALSE);
/*
Ignore any rows not found in reference tables as they may already have
been deleted by foreign key handling

View file

@ -186,7 +186,7 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
int count= 0;
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, topics, select,1,0);
init_read_record(&read_record_info, thd, topics, select, 1, 0, FALSE);
while (!read_record_info.read_record(&read_record_info))
{
if (!select->cond->val_int()) // Doesn't match like
@ -226,7 +226,7 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
int count= 0;
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, keywords, select,1,0);
init_read_record(&read_record_info, thd, keywords, select, 1, 0, FALSE);
while (!read_record_info.read_record(&read_record_info) && count<2)
{
if (!select->cond->val_int()) // Dosn't match like
@ -350,7 +350,7 @@ int search_categories(THD *thd, TABLE *categories,
DBUG_ENTER("search_categories");
init_read_record(&read_record_info, thd, categories, select,1,0);
init_read_record(&read_record_info, thd, categories, select,1,0,FALSE);
while (!read_record_info.read_record(&read_record_info))
{
if (select && !select->cond->val_int())
@ -384,7 +384,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
DBUG_ENTER("get_all_items_for_category");
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, items, select,1,0);
init_read_record(&read_record_info, thd, items, select,1,0,FALSE);
while (!read_record_info.read_record(&read_record_info))
{
if (!select->cond->val_int())

View file

@ -1361,7 +1361,7 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
goto end;
}
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL, 1, 0);
init_read_record(&read_record_info, new_thd, table, NULL, 1, 0, FALSE);
table->use_all_columns();
/*
there're no other threads running yet, so we don't need a mutex.

View file

@ -11713,7 +11713,7 @@ join_init_read_record(JOIN_TAB *tab)
if (tab->select && tab->select->quick && tab->select->quick->reset())
return 1;
init_read_record(&tab->read_record, tab->join->thd, tab->table,
tab->select,1,1);
tab->select,1,1, FALSE);
return (*tab->read_record.read_record)(&tab->read_record);
}

View file

@ -182,7 +182,8 @@ static bool servers_load(THD *thd, TABLE_LIST *tables)
free_root(&mem, MYF(0));
init_alloc_root(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0);
init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0,
FALSE);
while (!(read_record_info.read_record(&read_record_info)))
{
/* return_val is already TRUE, so no need to set */

View file

@ -7113,7 +7113,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE);
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;

View file

@ -152,7 +152,7 @@ void udf_init()
}
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL,1,0);
init_read_record(&read_record_info, new_thd, table, NULL,1,0,FALSE);
table->use_all_columns();
while (!(error= read_record_info.read_record(&read_record_info)))
{

View file

@ -457,7 +457,7 @@ int mysql_update(THD *thd,
*/
if (used_index == MAX_KEY || (select && select->quick))
init_read_record(&info,thd,table,select,0,1);
init_read_record(&info, thd, table, select, 0, 1, FALSE);
else
init_read_record_idx(&info, thd, table, 1, used_index);
@ -523,7 +523,7 @@ int mysql_update(THD *thd,
if (select && select->quick && select->quick->reset())
goto err;
table->file->try_semi_consistent_read(1);
init_read_record(&info,thd,table,select,0,1);
init_read_record(&info, thd, table, select, 0, 1, FALSE);
updated= found= 0;
/* Generate an error when trying to set a NOT NULL field to NULL. */