mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 13:02:28 +01:00
Merge gkodinov@bk-internal.mysql.com:/home/bk/mysql-5.0-opt
into magare.gmz:/home/kgeorge/mysql/autopush/B30825-new-5.0-opt
This commit is contained in:
commit
bc80b86379
10 changed files with 19 additions and 19 deletions
|
@ -975,7 +975,7 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
|
|||
DBUG_RETURN(0);
|
||||
|
||||
if (rows && rows*total_keylength < cache_size)
|
||||
cache_size=rows;
|
||||
cache_size= (ulong)rows;
|
||||
else
|
||||
cache_size/=total_keylength*16;
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
|
|||
if ((records < UINT_MAX32) &&
|
||||
((my_off_t) (records + 1) *
|
||||
(sort_length + sizeof(char*)) <= (my_off_t) memavl))
|
||||
keys= records+1;
|
||||
keys= (uint)records+1;
|
||||
else
|
||||
do
|
||||
{
|
||||
|
@ -349,7 +349,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
|
|||
sort_keys= (uchar **) NULL;
|
||||
|
||||
memavl= max(sort_param->sortbuff_size, MIN_SORT_MEMORY);
|
||||
idx= sort_param->sort_info->max_records;
|
||||
idx= (uint)sort_param->sort_info->max_records;
|
||||
sort_length= sort_param->key_length;
|
||||
maxbuffer= 1;
|
||||
|
||||
|
|
|
@ -2562,9 +2562,9 @@ int ha_federated::info(uint flag)
|
|||
data_file_length= records * mean_rec_length;
|
||||
|
||||
if (row[12] != NULL)
|
||||
update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
|
||||
update_time= (time_t) my_strtoll10(row[12], (char**) 0, &error);
|
||||
if (row[13] != NULL)
|
||||
check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
|
||||
check_time= (time_t) my_strtoll10(row[13], (char**) 0, &error);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -175,7 +175,7 @@ void ha_heap::update_key_stats()
|
|||
else
|
||||
{
|
||||
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
|
||||
uint no_records= hash_buckets ? file->s->records/hash_buckets : 2;
|
||||
uint no_records= hash_buckets ? (uint) file->s->records/hash_buckets : 2;
|
||||
if (no_records < 2)
|
||||
no_records= 2;
|
||||
key->rec_per_key[key->key_parts-1]= no_records;
|
||||
|
|
|
@ -5474,7 +5474,7 @@ ha_innobase::info(
|
|||
|
||||
table->key_info[i].rec_per_key[j]=
|
||||
rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
|
||||
rec_per_key;
|
||||
(ulong) rec_per_key;
|
||||
}
|
||||
|
||||
index = dict_table_get_next_index_noninline(index);
|
||||
|
|
|
@ -1412,7 +1412,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
|
|||
DBUG_ENTER("ha_myisam::start_bulk_insert");
|
||||
THD *thd= current_thd;
|
||||
ulong size= min(thd->variables.read_buff_size,
|
||||
table->s->avg_row_length*rows);
|
||||
(ulong) (table->s->avg_row_length*rows));
|
||||
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
|
||||
(ulong) rows, size));
|
||||
|
||||
|
|
|
@ -2206,7 +2206,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
|
|||
if (param->table->file->primary_key_is_clustered())
|
||||
{
|
||||
result= param->table->file->read_time(param->table->s->primary_key,
|
||||
records, records);
|
||||
(uint)records, records);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -2414,7 +2414,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
|
|||
|
||||
/* Add Unique operations cost */
|
||||
unique_calc_buff_size=
|
||||
Unique::get_cost_calc_buff_size(non_cpk_scan_records,
|
||||
Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
|
||||
param->table->file->ref_length,
|
||||
param->thd->variables.sortbuff_size);
|
||||
if (param->imerge_cost_buff_size < unique_calc_buff_size)
|
||||
|
@ -2426,7 +2426,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
|
|||
}
|
||||
|
||||
imerge_cost +=
|
||||
Unique::get_use_cost(param->imerge_cost_buff, non_cpk_scan_records,
|
||||
Unique::get_use_cost(param->imerge_cost_buff, (uint)non_cpk_scan_records,
|
||||
param->table->file->ref_length,
|
||||
param->thd->variables.sortbuff_size);
|
||||
DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)",
|
||||
|
@ -2765,7 +2765,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
|
|||
info->is_covering= FALSE;
|
||||
info->index_scan_costs= 0.0;
|
||||
info->index_records= 0;
|
||||
info->out_rows= param->table->file->records;
|
||||
info->out_rows= (double) param->table->file->records;
|
||||
bitmap_clear_all(&info->covered_fields);
|
||||
return info;
|
||||
}
|
||||
|
@ -6757,7 +6757,7 @@ int QUICK_RANGE_SELECT::reset()
|
|||
if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
|
||||
{
|
||||
mrange_bufsiz= min(multi_range_bufsiz,
|
||||
(QUICK_SELECT_I::records + 1)* head->s->reclength);
|
||||
((uint)QUICK_SELECT_I::records + 1)* head->s->reclength);
|
||||
|
||||
while (mrange_bufsiz &&
|
||||
! my_multi_malloc(MYF(MY_WME),
|
||||
|
@ -8359,7 +8359,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||
bool have_min, bool have_max,
|
||||
double *read_cost, ha_rows *records)
|
||||
{
|
||||
uint table_records;
|
||||
ha_rows table_records;
|
||||
uint num_groups;
|
||||
uint num_blocks;
|
||||
uint keys_per_block;
|
||||
|
|
|
@ -41,7 +41,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt
|
|||
struct stat stat_buf;
|
||||
if (!fstat(file,&stat_buf))
|
||||
{
|
||||
if (!(map=(byte*) my_mmap(0,(size=(ulong) stat_buf.st_size),PROT_READ,
|
||||
if (!(map=(byte*) my_mmap(0,(size_t)(size=(ulong) stat_buf.st_size),PROT_READ,
|
||||
MAP_SHARED | MAP_NORESERVE,file,
|
||||
0L)))
|
||||
{
|
||||
|
@ -52,7 +52,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt
|
|||
if (map && memcmp(map,magic,magic_length))
|
||||
{
|
||||
my_error(ER_WRONG_MAGIC, MYF(0), name);
|
||||
VOID(my_munmap(map,size));
|
||||
VOID(my_munmap(map,(size_t)size));
|
||||
map=0;
|
||||
}
|
||||
if (!map)
|
||||
|
@ -70,7 +70,7 @@ mapped_files::~mapped_files()
|
|||
#ifdef HAVE_MMAP
|
||||
if (file >= 0)
|
||||
{
|
||||
VOID(my_munmap(map,size));
|
||||
VOID(my_munmap(map,(size_t)size));
|
||||
VOID(my_close(file,MYF(0)));
|
||||
file= -1; map=0;
|
||||
}
|
||||
|
|
|
@ -5915,7 +5915,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
|
|||
|
||||
/* Fix for EXPLAIN */
|
||||
if (sel->quick)
|
||||
join->best_positions[i].records_read= sel->quick->records;
|
||||
join->best_positions[i].records_read= (double)sel->quick->records;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -362,7 +362,7 @@ int mysql_update(THD *thd,
|
|||
init_read_record_idx(&info, thd, table, 1, used_index);
|
||||
|
||||
thd->proc_info="Searching rows for update";
|
||||
uint tmp_limit= limit;
|
||||
ha_rows tmp_limit= limit;
|
||||
|
||||
while (!(error=info.read_record(&info)) && !thd->killed)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue