Split lines before binary operators, not after them.

This commit is contained in:
marko 2006-08-29 08:27:56 +00:00
parent 917941e44c
commit 27dffa4f7d
44 changed files with 340 additions and 344 deletions

View file

@ -143,8 +143,8 @@ btr_root_get(
root_page_no = dict_tree_get_page(tree);
root = btr_page_get(space, root_page_no, RW_X_LATCH, mtr);
ut_a((ibool)!!page_is_comp(root) ==
dict_table_is_comp(tree->tree_index->table));
ut_a((ibool)!!page_is_comp(root)
== dict_table_is_comp(tree->tree_index->table));
return(root);
}
@ -597,8 +597,8 @@ btr_page_get_father_for_rec(
offsets = rec_get_offsets(node_ptr, index, offsets,
ULINT_UNDEFINED, &heap);
if (btr_node_ptr_get_child_page_no(node_ptr, offsets) !=
buf_frame_get_page_no(page)) {
if (UNIV_UNLIKELY(btr_node_ptr_get_child_page_no(node_ptr, offsets)
!= buf_frame_get_page_no(page))) {
rec_t* print_rec;
fputs("InnoDB: Dump of the child page:\n", stderr);
buf_page_print(buf_frame_align(page));
@ -632,8 +632,8 @@ btr_page_get_father_for_rec(
"Then dump + drop + reimport.\n", stderr);
}
ut_a(btr_node_ptr_get_child_page_no(node_ptr, offsets) ==
buf_frame_get_page_no(page));
ut_a(btr_node_ptr_get_child_page_no(node_ptr, offsets)
== buf_frame_get_page_no(page));
mem_heap_free(heap);
return(node_ptr);
@ -2599,11 +2599,10 @@ btr_index_rec_validate(
if ((dict_index_get_nth_field(index, i)->prefix_len == 0
&& len != UNIV_SQL_NULL && fixed_size
&& len != fixed_size)
||
(dict_index_get_nth_field(index, i)->prefix_len > 0
&& len != UNIV_SQL_NULL
&& len >
dict_index_get_nth_field(index, i)->prefix_len)) {
|| (dict_index_get_nth_field(index, i)->prefix_len > 0
&& len != UNIV_SQL_NULL
&& len
> dict_index_get_nth_field(index, i)->prefix_len)) {
btr_index_rec_validate_report(page, rec, index);
fprintf(stderr,
@ -2793,8 +2792,9 @@ loop:
left_page_no = btr_page_get_prev(page, &mtr);
ut_a((page_get_n_recs(page) > 0)
|| ((level == 0) &&
(buf_frame_get_page_no(page) == dict_tree_get_page(tree))));
|| ((level == 0)
&& (buf_frame_get_page_no(page)
== dict_tree_get_page(tree))));
if (right_page_no != FIL_NULL) {
rec_t* right_rec;
@ -2949,11 +2949,11 @@ loop:
} else {
right_node_ptr = btr_page_get_father_node_ptr
(tree, right_page, &mtr);
if (page_rec_get_next(node_ptr) !=
page_get_supremum_rec(father_page)) {
if (page_rec_get_next(node_ptr)
!= page_get_supremum_rec(father_page)) {
if (right_node_ptr !=
page_rec_get_next(node_ptr)) {
if (right_node_ptr
!= page_rec_get_next(node_ptr)) {
ret = FALSE;
fputs("InnoDB: node pointer to"
" the right page is wrong\n",

View file

@ -162,8 +162,8 @@ btr_cur_latch_leaves(
== buf_frame_get_page_no(page));
#endif /* UNIV_BTR_DEBUG */
ut_a(page_is_comp(get_page) == page_is_comp(page));
buf_block_align(get_page)->check_index_page_at_flush =
TRUE;
buf_block_align(get_page)->check_index_page_at_flush
= TRUE;
}
get_page = btr_page_get(space, page_no, RW_X_LATCH, mtr);
@ -179,8 +179,8 @@ btr_cur_latch_leaves(
ut_a(btr_page_get_prev(get_page, mtr)
== buf_frame_get_page_no(page));
#endif /* UNIV_BTR_DEBUG */
buf_block_align(get_page)->check_index_page_at_flush =
TRUE;
buf_block_align(get_page)->check_index_page_at_flush
= TRUE;
}
} else if (latch_mode == BTR_SEARCH_PREV) {
@ -195,8 +195,8 @@ btr_cur_latch_leaves(
ut_a(btr_page_get_next(cursor->left_page, mtr)
== buf_frame_get_page_no(page));
#endif /* UNIV_BTR_DEBUG */
ut_a(page_is_comp(cursor->left_page) ==
page_is_comp(page));
ut_a(page_is_comp(cursor->left_page)
== page_is_comp(page));
buf_block_align(cursor->left_page)
->check_index_page_at_flush = TRUE;
}
@ -452,9 +452,9 @@ retry_page_get:
ut_ad(insert_planned);
ut_ad(cursor->thr);
if (ibuf_should_try(index, ignore_sec_unique) &&
ibuf_insert(tuple, index, space, page_no,
cursor->thr)) {
if (ibuf_should_try(index, ignore_sec_unique)
&& ibuf_insert(tuple, index, space, page_no,
cursor->thr)) {
/* Insertion to the insert buffer succeeded */
cursor->flag = BTR_CUR_INSERT_TO_IBUF;
if (UNIV_LIKELY_NULL(heap)) {
@ -993,9 +993,9 @@ calculate_sizes_again:
/* Calculate the record size when entry is converted to a record */
rec_size = rec_get_converted_size(index, entry);
if (rec_size >=
ut_min(page_get_free_space_of_empty(page_is_comp(page)) / 2,
REC_MAX_DATA_SIZE)) {
if (rec_size
>= ut_min(page_get_free_space_of_empty(page_is_comp(page)) / 2,
REC_MAX_DATA_SIZE)) {
/* The record is so big that we have to store some fields
externally on separate database pages */
@ -1199,9 +1199,9 @@ btr_cur_pessimistic_insert(
}
}
if (rec_get_converted_size(index, entry) >=
ut_min(page_get_free_space_of_empty(page_is_comp(page)) / 2,
REC_MAX_DATA_SIZE)) {
if (rec_get_converted_size(index, entry)
>= ut_min(page_get_free_space_of_empty(page_is_comp(page)) / 2,
REC_MAX_DATA_SIZE)) {
/* The record is so big that we have to store some fields
externally on separate database pages */
@ -1911,10 +1911,10 @@ btr_cur_pessimistic_update(
ULINT_UNDEFINED, &heap);
n_ext_vect = btr_push_update_extern_fields(ext_vect, offsets, update);
if (UNIV_UNLIKELY(rec_get_converted_size(index, new_entry) >=
ut_min(page_get_free_space_of_empty
(page_is_comp(page)) / 2,
REC_MAX_DATA_SIZE))) {
if (UNIV_UNLIKELY(rec_get_converted_size(index, new_entry)
>= ut_min(page_get_free_space_of_empty
(page_is_comp(page)) / 2,
REC_MAX_DATA_SIZE))) {
big_rec_vec = dtuple_convert_big_rec(index, new_entry,
ext_vect, n_ext_vect);
@ -2880,8 +2880,8 @@ btr_estimate_number_of_different_key_vals(
ulint* offsets_rec = offsets_rec_;
ulint* offsets_next_rec= offsets_next_rec_;
*offsets_rec_ = (sizeof offsets_rec_) / sizeof *offsets_rec_;
*offsets_next_rec_ =
(sizeof offsets_next_rec_) / sizeof *offsets_next_rec_;
*offsets_next_rec_
= (sizeof offsets_next_rec_) / sizeof *offsets_next_rec_;
n_cols = dict_index_get_n_unique(index);
@ -2974,9 +2974,8 @@ btr_estimate_number_of_different_key_vals(
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
ULINT_UNDEFINED, &heap);
total_external_size +=
btr_rec_get_externally_stored_len(rec,
offsets_rec);
total_external_size += btr_rec_get_externally_stored_len
(rec, offsets_rec);
mtr_commit(&mtr);
}
@ -2989,16 +2988,16 @@ btr_estimate_number_of_different_key_vals(
included in index->stat_n_leaf_pages) */
for (j = 0; j <= n_cols; j++) {
index->stat_n_diff_key_vals[j] =
(n_diff[j]
* (ib_longlong)index->stat_n_leaf_pages
+ BTR_KEY_VAL_ESTIMATE_N_PAGES - 1
+ total_external_size
+ not_empty_flag)
/ (BTR_KEY_VAL_ESTIMATE_N_PAGES
+ total_external_size);
index->stat_n_diff_key_vals[j]
= ((n_diff[j]
* (ib_longlong)index->stat_n_leaf_pages
+ BTR_KEY_VAL_ESTIMATE_N_PAGES - 1
+ total_external_size
+ not_empty_flag)
/ (BTR_KEY_VAL_ESTIMATE_N_PAGES
+ total_external_size));
/* If the tree is small, smaller than <
/* If the tree is small, smaller than
10 * BTR_KEY_VAL_ESTIMATE_N_PAGES + total_external_size, then
the above estimate is ok. For bigger trees it is common that we
do not see any borders between key values in the few pages
@ -3006,9 +3005,9 @@ btr_estimate_number_of_different_key_vals(
different key values, or even more. Let us try to approximate
that: */
add_on = index->stat_n_leaf_pages /
(10 * (BTR_KEY_VAL_ESTIMATE_N_PAGES
+ total_external_size));
add_on = index->stat_n_leaf_pages
/ (10 * (BTR_KEY_VAL_ESTIMATE_N_PAGES
+ total_external_size));
if (add_on > BTR_KEY_VAL_ESTIMATE_N_PAGES) {
add_on = BTR_KEY_VAL_ESTIMATE_N_PAGES;
@ -3292,8 +3291,8 @@ btr_push_update_extern_fields(
if (upd_get_nth_field(update, i)->extern_storage) {
ext_vect[n_pushed] =
upd_get_nth_field(update, i)->field_no;
ext_vect[n_pushed] = upd_get_nth_field
(update, i)->field_no;
n_pushed++;
}

View file

@ -227,8 +227,8 @@ btr_pcur_restore_position(
btr_pcur_get_btr_cur(cursor)->index, latch_mode,
btr_pcur_get_btr_cur(cursor), mtr);
cursor->block_when_stored =
buf_block_align(btr_pcur_get_page(cursor));
cursor->block_when_stored
= buf_block_align(btr_pcur_get_page(cursor));
return(FALSE);
}
@ -320,10 +320,10 @@ btr_pcur_restore_position(
the cursor can now be on a different page! But we can retain
the value of old_rec */
cursor->block_when_stored =
buf_block_align(btr_pcur_get_page(cursor));
cursor->modify_clock =
buf_block_get_modify_clock(cursor->block_when_stored);
cursor->block_when_stored = buf_block_align
(btr_pcur_get_page(cursor));
cursor->modify_clock = buf_block_get_modify_clock
(cursor->block_when_stored);
cursor->old_stored = BTR_PCUR_OLD_STORED;
mem_heap_free(heap);

View file

@ -160,8 +160,8 @@ and the io-operation for loading the page is queued. The io-handler thread
releases the X-lock on the frame and resets the io_fix field
when the io operation completes.
A thread may request the above operation using the buf_page_get-
function. It may then continue to request a lock on the frame.
A thread may request the above operation using the function
buf_page_get(). It may then continue to request a lock on the frame.
The lock is granted when the io-handler releases the x-lock.
Read-ahead
@ -371,8 +371,8 @@ buf_page_is_corrupted(
}
checksum = buf_calc_page_new_checksum(read_buf);
checksum_field = mach_read_from_4(read_buf +
FIL_PAGE_SPACE_OR_CHKSUM);
checksum_field = mach_read_from_4(read_buf
+ FIL_PAGE_SPACE_OR_CHKSUM);
/* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id
(always equal to 0), to FIL_PAGE_SPACE_SPACE_OR_CHKSUM */
@ -659,9 +659,9 @@ buf_pool_init(
the window */
os_awe_map_physical_mem_to_window(buf_pool->frame_zero,
n_frames *
(UNIV_PAGE_SIZE
/ OS_AWE_X86_PAGE_SIZE),
n_frames
* (UNIV_PAGE_SIZE
/ OS_AWE_X86_PAGE_SIZE),
buf_pool->awe_info);
/*----------------------------------------*/
}
@ -2112,11 +2112,11 @@ buf_validate(void)
n_lru_flush++;
ut_a(rw_lock_is_locked
(&block->lock, RW_LOCK_SHARED));
} else if (block->flush_type ==
BUF_FLUSH_LIST) {
} else if (block->flush_type
== BUF_FLUSH_LIST) {
n_list_flush++;
} else if (block->flush_type ==
BUF_FLUSH_SINGLE_PAGE) {
} else if (block->flush_type
== BUF_FLUSH_SINGLE_PAGE) {
n_single_flush++;
} else {
ut_error;

View file

@ -890,9 +890,8 @@ buf_flush_batch(
old_page_count = page_count;
/* Try to flush also all the neighbors */
page_count +=
buf_flush_try_neighbors(space, offset,
flush_type);
page_count += buf_flush_try_neighbors
(space, offset, flush_type);
/* fprintf(stderr,
"Flush type %lu, page no %lu, neighb %lu\n",
flush_type, offset,

View file

@ -213,8 +213,8 @@ buf_read_ahead_random(
mutex_enter(&(buf_pool->mutex));
if (buf_pool->n_pend_reads >
buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&(buf_pool->mutex));
return(0);
@ -426,8 +426,8 @@ buf_read_ahead_linear(
return(0);
}
if (buf_pool->n_pend_reads >
buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&(buf_pool->mutex));
return(0);
@ -463,8 +463,8 @@ buf_read_ahead_linear(
}
}
if (fail_count > BUF_READ_AHEAD_LINEAR_AREA -
BUF_READ_AHEAD_LINEAR_THRESHOLD) {
if (fail_count > BUF_READ_AHEAD_LINEAR_AREA
- BUF_READ_AHEAD_LINEAR_THRESHOLD) {
/* Too many failures: return */
mutex_exit(&(buf_pool->mutex));
@ -615,8 +615,8 @@ buf_read_ibuf_merge_pages(
#ifdef UNIV_IBUF_DEBUG
ut_a(n_stored < UNIV_PAGE_SIZE);
#endif
while (buf_pool->n_pend_reads >
buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
while (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
os_thread_sleep(500000);
}

View file

@ -565,8 +565,8 @@ dtuple_convert_big_rec(
dfield = dtuple_get_nth_field(entry, i);
if (dfield->len != UNIV_SQL_NULL &&
dfield->len > longest) {
if (dfield->len != UNIV_SQL_NULL
&& dfield->len > longest) {
longest = dfield->len;

View file

@ -768,15 +768,15 @@ dict_init(void)
mutex_create(&dict_sys->mutex, SYNC_DICT);
dict_sys->table_hash = hash_create(buf_pool_get_max_size() /
(DICT_POOL_PER_TABLE_HASH *
UNIV_WORD_SIZE));
dict_sys->table_id_hash = hash_create(buf_pool_get_max_size() /
(DICT_POOL_PER_TABLE_HASH *
UNIV_WORD_SIZE));
dict_sys->col_hash = hash_create(buf_pool_get_max_size() /
(DICT_POOL_PER_COL_HASH *
UNIV_WORD_SIZE));
dict_sys->table_hash = hash_create(buf_pool_get_max_size()
/ (DICT_POOL_PER_TABLE_HASH
* UNIV_WORD_SIZE));
dict_sys->table_id_hash = hash_create(buf_pool_get_max_size()
/ (DICT_POOL_PER_TABLE_HASH
* UNIV_WORD_SIZE));
dict_sys->col_hash = hash_create(buf_pool_get_max_size()
/ (DICT_POOL_PER_COL_HASH
* UNIV_WORD_SIZE));
dict_sys->size = 0;
UT_LIST_INIT(dict_sys->table_LRU);
@ -1137,8 +1137,8 @@ dict_table_rename_in_cache(
foreign = UT_LIST_GET_FIRST(table->foreign_list);
while (foreign != NULL) {
if (ut_strlen(foreign->foreign_table_name) <
ut_strlen(table->name)) {
if (ut_strlen(foreign->foreign_table_name)
< ut_strlen(table->name)) {
/* Allocate a longer name buffer;
TODO: store buf len to save memory */
@ -1209,8 +1209,8 @@ dict_table_rename_in_cache(
foreign = UT_LIST_GET_FIRST(table->referenced_list);
while (foreign != NULL) {
if (ut_strlen(foreign->referenced_table_name) <
ut_strlen(table->name)) {
if (ut_strlen(foreign->referenced_table_name)
< ut_strlen(table->name)) {
/* Allocate a longer name buffer;
TODO: store buf len to save memory */
@ -1544,10 +1544,10 @@ dict_index_add_to_cache(
if (!UNIV_UNLIKELY(new_index->type & DICT_UNIVERSAL)) {
new_index->stat_n_diff_key_vals =
mem_heap_alloc(new_index->heap,
(1 + dict_index_get_n_unique(new_index))
* sizeof(ib_longlong));
new_index->stat_n_diff_key_vals = mem_heap_alloc
(new_index->heap,
(1 + dict_index_get_n_unique(new_index))
* sizeof(ib_longlong));
/* Give some sensible values to stat_n_... in case we do
not calculate statistics quickly enough */
@ -2142,9 +2142,9 @@ dict_foreign_find_index(
ulint n_cols, /* in: number of columns */
dict_index_t* types_idx, /* in: NULL or an index to whose types the
column types must match */
ibool check_charsets) /* in: whether to check charsets.
only has an effect if types_idx !=
NULL. */
ibool check_charsets)
/* in: whether to check charsets.
only has an effect if types_idx != NULL */
{
dict_index_t* index;
const char* col_name;
@ -2777,8 +2777,8 @@ scan_more:
/* Starting quote: remember the quote character. */
quote = *sptr;
} else if (*sptr == '#'
|| (sptr[0] == '-' && sptr[1] == '-' &&
sptr[2] == ' ')) {
|| (sptr[0] == '-' && sptr[1] == '-'
&& sptr[2] == ' ')) {
for (;;) {
/* In Unix a newline is 0x0A while in Windows
it is 0x0D followed by 0x0A */
@ -3193,8 +3193,8 @@ col_loop1:
foreign->foreign_col_names = mem_heap_alloc(foreign->heap,
i * sizeof(void*));
for (i = 0; i < foreign->n_fields; i++) {
foreign->foreign_col_names[i] =
mem_heap_strdup(foreign->heap, columns[i]->name);
foreign->foreign_col_names[i] = mem_heap_strdup
(foreign->heap, columns[i]->name);
}
ptr = dict_scan_table_name(cs, ptr, &referenced_table, name,
@ -3842,8 +3842,8 @@ dict_tree_build_node_ptr(
dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4, 0);
rec_copy_prefix_to_dtuple(tuple, rec, ind, n_unique, heap);
dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple) |
REC_STATUS_NODE_PTR);
dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple)
| REC_STATUS_NODE_PTR);
ut_ad(dtuple_check_typed(tuple));

View file

@ -1013,8 +1013,8 @@ static
void
dict_load_foreign_cols(
/*===================*/
const char* id, /* in: foreign constraint id as a null-
terminated string */
const char* id, /* in: foreign constraint id as a
null-terminated string */
dict_foreign_t* foreign)/* in: foreign constraint object */
{
dict_table_t* sys_foreign_cols;
@ -1067,12 +1067,12 @@ dict_load_foreign_cols(
ut_a(i == mach_read_from_4(field));
field = rec_get_nth_field_old(rec, 4, &len);
foreign->foreign_col_names[i] =
mem_heap_strdupl(foreign->heap, (char*) field, len);
foreign->foreign_col_names[i] = mem_heap_strdupl
(foreign->heap, (char*) field, len);
field = rec_get_nth_field_old(rec, 5, &len);
foreign->referenced_col_names[i] =
mem_heap_strdupl(foreign->heap, (char*) field, len);
foreign->referenced_col_names[i] = mem_heap_strdupl
(foreign->heap, (char*) field, len);
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
}
@ -1165,8 +1165,8 @@ dict_load_foreign(
foreign = dict_mem_foreign_create();
foreign->n_fields =
mach_read_from_4(rec_get_nth_field_old(rec, 5, &len));
foreign->n_fields = mach_read_from_4
(rec_get_nth_field_old(rec, 5, &len));
ut_a(len == 4);
@ -1178,12 +1178,12 @@ dict_load_foreign(
foreign->id = mem_heap_strdup(foreign->heap, id);
field = rec_get_nth_field_old(rec, 3, &len);
foreign->foreign_table_name =
mem_heap_strdupl(foreign->heap, (char*) field, len);
foreign->foreign_table_name = mem_heap_strdupl
(foreign->heap, (char*) field, len);
field = rec_get_nth_field_old(rec, 4, &len);
foreign->referenced_table_name =
mem_heap_strdupl(foreign->heap, (char*) field, len);
foreign->referenced_table_name = mem_heap_strdupl
(foreign->heap, (char*) field, len);
btr_pcur_close(&pcur);
mtr_commit(&mtr);

View file

@ -333,10 +333,10 @@ eval_predefined_2(
ut_ad(len2 >= len1);
if (len2 > len1) {
int_val = (lint)(len1 +
(eval_rnd % (len2 - len1 + 1)));
int_val = (lint) (len1
+ (eval_rnd % (len2 - len1 + 1)));
} else {
int_val = (lint)len1;
int_val = (lint) len1;
}
eval_rnd = ut_rnd_gen_next_ulint(eval_rnd);

View file

@ -3237,8 +3237,8 @@ fil_load_single_table_tablespaces(void)
/* We found a symlink or a file */
if (strlen(fileinfo.name) > 4
&& 0 == strcmp(fileinfo.name +
strlen(fileinfo.name) - 4,
&& 0 == strcmp(fileinfo.name
+ strlen(fileinfo.name) - 4,
".ibd")) {
/* The name ends in .ibd; try opening
the file */
@ -3684,8 +3684,8 @@ fil_extend_space_to_desired_size(
/* Keep the last data file size info up to date, rounded to
full megabytes */
srv_data_file_sizes[srv_n_data_files - 1] =
(node->size / pages_per_mb) * pages_per_mb;
srv_data_file_sizes[srv_n_data_files - 1]
= (node->size / pages_per_mb) * pages_per_mb;
}
#endif /* !UNIV_HOTBACKUP */
@ -4129,8 +4129,8 @@ fil_io(
offset_low = ((block_offset << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL)
+ byte_offset;
ut_a(node->size - block_offset >=
(byte_offset + len + (UNIV_PAGE_SIZE - 1)) / UNIV_PAGE_SIZE);
ut_a(node->size - block_offset
>= (byte_offset + len + (UNIV_PAGE_SIZE - 1)) / UNIV_PAGE_SIZE);
/* Do aio */

View file

@ -630,8 +630,8 @@ xdes_calc_descriptor_index(
/* out: descriptor index */
ulint offset) /* in: page offset */
{
return(ut_2pow_remainder(offset, XDES_DESCRIBED_PER_PAGE) /
FSP_EXTENT_SIZE);
return(ut_2pow_remainder(offset, XDES_DESCRIBED_PER_PAGE)
/ FSP_EXTENT_SIZE);
}
/************************************************************************
@ -2259,8 +2259,8 @@ fseg_fill_free_list(
for (i = 0; i < FSEG_FREE_LIST_MAX_LEN; i++) {
descr = xdes_get_descriptor(space, hint, mtr);
if ((descr == NULL) ||
(XDES_FREE != xdes_get_state(descr, mtr))) {
if ((descr == NULL)
|| (XDES_FREE != xdes_get_state(descr, mtr))) {
/* We cannot allocate the desired extent: stop */
@ -2364,8 +2364,8 @@ fseg_alloc_free_page_low(
ut_ad(mtr);
ut_ad((direction >= FSP_UP) && (direction <= FSP_NO_DIR));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) ==
FSEG_MAGIC_N_VALUE);
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
seg_id = mtr_read_dulint(seg_inode + FSEG_ID, mtr);
ut_ad(ut_dulint_cmp(seg_id, ut_dulint_zero) > 0);
@ -2421,8 +2421,8 @@ fseg_alloc_free_page_low(
} else if ((direction != FSP_NO_DIR)
&& ((reserved - used) < reserved / FSEG_FILLFACTOR)
&& (used >= FSEG_FRAG_LIMIT)
&& (!!(ret_descr =
fseg_alloc_free_extent(seg_inode, space, mtr)))) {
&& (!!(ret_descr
= fseg_alloc_free_extent(seg_inode, space, mtr)))) {
/* 3. We take any free extent (which was already assigned above
===============================================================
@ -2926,8 +2926,8 @@ fseg_mark_page_used(
descr = xdes_get_descriptor(space, page, mtr);
ut_ad(mtr_read_ulint(seg_inode + FSEG_ID, MLOG_4BYTES, mtr) ==
mtr_read_ulint(descr + XDES_ID, MLOG_4BYTES, mtr));
ut_ad(mtr_read_ulint(seg_inode + FSEG_ID, MLOG_4BYTES, mtr)
== mtr_read_ulint(descr + XDES_ID, MLOG_4BYTES, mtr));
if (xdes_is_free(descr, mtr)) {
/* We move the extent from the free list to the
@ -2981,8 +2981,8 @@ fseg_free_page_low(
ulint i;
ut_ad(seg_inode && mtr);
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) ==
FSEG_MAGIC_N_VALUE);
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
/* Drop search system page hash index if the page is found in
the pool and is hashed */
@ -3656,8 +3656,8 @@ fsp_validate(
frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED,
MLOG_4BYTES, &mtr);
n_full_frag_pages = FSP_EXTENT_SIZE *
flst_get_len(header + FSP_FULL_FRAG, &mtr);
n_full_frag_pages = FSP_EXTENT_SIZE
* flst_get_len(header + FSP_FULL_FRAG, &mtr);
ut_a(free_limit <= size || (space != 0 && size < FSP_EXTENT_SIZE));

View file

@ -2001,10 +2001,10 @@ ibuf_get_merge_page_nos(
if ((prev_page_no == first_page_no
&& prev_space_id == first_space_id)
|| contract
|| (volume_for_page >
((IBUF_MERGE_THRESHOLD - 1)
* 4 * UNIV_PAGE_SIZE
/ IBUF_PAGE_SIZE_PER_FREE_SPACE)
|| (volume_for_page
> ((IBUF_MERGE_THRESHOLD - 1)
* 4 * UNIV_PAGE_SIZE
/ IBUF_PAGE_SIZE_PER_FREE_SPACE)
/ IBUF_MERGE_THRESHOLD)) {
space_ids[*n_stored] = prev_space_id;

View file

@ -280,8 +280,8 @@ mem_heap_free_heap_top(
/* If free == start, we may free the block if it is not the first
one */
if ((heap != block) && (mem_block_get_free(block) ==
mem_block_get_start(block))) {
if ((heap != block) && (mem_block_get_free(block)
== mem_block_get_start(block))) {
mem_heap_block_free(heap, block);
}
}
@ -366,8 +366,8 @@ mem_heap_free_top(
/* If free == start, we may free the block if it is not the first
one */
if ((heap != block) && (mem_block_get_free(block) ==
mem_block_get_start(block))) {
if ((heap != block) && (mem_block_get_free(block)
== mem_block_get_start(block))) {
mem_heap_block_free(heap, block);
}
}

View file

@ -1441,11 +1441,11 @@ rec_get_converted_size(
ut_ad(dtuple_check_typed(dtuple));
ut_ad(index->type & DICT_UNIVERSAL
|| dtuple_get_n_fields(dtuple) ==
(((dtuple_get_info_bits(dtuple) & REC_NEW_STATUS_MASK)
== REC_STATUS_NODE_PTR)
? dict_index_get_n_unique_in_tree(index) + 1
: dict_index_get_n_fields(index)));
|| dtuple_get_n_fields(dtuple)
== (((dtuple_get_info_bits(dtuple) & REC_NEW_STATUS_MASK)
== REC_STATUS_NODE_PTR)
? dict_index_get_n_unique_in_tree(index) + 1
: dict_index_get_n_fields(index)));
if (dict_table_is_comp(index->table)) {
return(rec_get_converted_size_new(index, dtuple));

View file

@ -72,8 +72,8 @@ trx_rsegf_get_nth_undo(
ut_error;
}
return(mtr_read_ulint(rsegf + TRX_RSEG_UNDO_SLOTS +
n * TRX_RSEG_SLOT_SIZE, MLOG_4BYTES, mtr));
return(mtr_read_ulint(rsegf + TRX_RSEG_UNDO_SLOTS
+ n * TRX_RSEG_SLOT_SIZE, MLOG_4BYTES, mtr));
}
/*******************************************************************

View file

@ -327,8 +327,8 @@ trx_is_active(
}
trx = trx_get_on_id(trx_id);
if (trx && (trx->conc_state == TRX_ACTIVE ||
trx->conc_state == TRX_PREPARED)) {
if (trx && (trx->conc_state == TRX_ACTIVE
|| trx->conc_state == TRX_PREPARED)) {
return(TRUE);
}

View file

@ -71,8 +71,7 @@ ut_rnd_gen_ulint(void)
n_bits = 8 * sizeof(ulint);
ut_rnd_ulint_counter =
UT_RND1 * ut_rnd_ulint_counter + UT_RND2;
ut_rnd_ulint_counter = UT_RND1 * ut_rnd_ulint_counter + UT_RND2;
rnd = ut_rnd_gen_next_ulint(ut_rnd_ulint_counter);

View file

@ -676,8 +676,8 @@ lock_get_src_table(
} else if (!src) {
/* This presumably is the source table. */
src = tab_lock->table;
if (UT_LIST_GET_LEN(src->locks) != 1 ||
UT_LIST_GET_FIRST(src->locks) != lock) {
if (UT_LIST_GET_LEN(src->locks) != 1
|| UT_LIST_GET_FIRST(src->locks) != lock) {
/* We only support the case when
there is only one lock on this table. */
return(NULL);
@ -1563,8 +1563,9 @@ lock_rec_other_has_expl_req(
while (lock) {
if (lock->trx != trx
&& (gap ||
!(lock_rec_get_gap(lock) || page_rec_is_supremum(rec)))
&& (gap
|| !(lock_rec_get_gap(lock)
|| page_rec_is_supremum(rec)))
&& (wait || !lock_get_wait(lock))
&& lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
@ -2491,8 +2492,8 @@ lock_rec_inherit_to_gap(
while (lock != NULL) {
if (!lock_rec_get_insert_intention(lock)
&& !((srv_locks_unsafe_for_binlog
|| lock->trx->isolation_level ==
TRX_ISO_READ_COMMITTED)
|| lock->trx->isolation_level
== TRX_ISO_READ_COMMITTED)
&& lock_get_mode(lock) == LOCK_X)) {
lock_rec_add_to_queue(LOCK_REC | lock_get_mode(lock)
@ -3954,8 +3955,8 @@ lock_release_off_kernel(
table = lock->un_member.tab_lock.table;
table->query_cache_inv_trx_id =
trx_sys->max_trx_id;
table->query_cache_inv_trx_id
= trx_sys->max_trx_id;
}
lock_table_dequeue(lock);

View file

@ -710,8 +710,7 @@ log_calc_max_ages(void)
log_sys->max_archived_lsn_age = smallest_archive_margin;
log_sys->max_archived_lsn_age_async = smallest_archive_margin
- smallest_archive_margin /
LOG_ARCHIVE_RATIO_ASYNC;
- smallest_archive_margin / LOG_ARCHIVE_RATIO_ASYNC;
#endif /* UNIV_LOG_ARCHIVE */
failure:
mutex_exit(&(log_sys->mutex));
@ -3298,8 +3297,8 @@ log_check_log_recs(
ut_memcpy(scan_buf, start, end - start);
recv_scan_log_recs(TRUE,
(buf_pool->n_frames -
recv_n_pool_free_frames) * UNIV_PAGE_SIZE,
(buf_pool->n_frames
- recv_n_pool_free_frames) * UNIV_PAGE_SIZE,
FALSE, scan_buf, end - start,
ut_dulint_align_down(buf_start_lsn,
OS_FILE_LOG_BLOCK_SIZE),

View file

@ -2271,10 +2271,10 @@ recv_scan_log_recs(
/* We found a point from which to start the parsing
of log records */
recv_sys->parse_start_lsn =
ut_dulint_add(scanned_lsn,
log_block_get_first_rec_group
(log_block));
recv_sys->parse_start_lsn
= ut_dulint_add(scanned_lsn,
log_block_get_first_rec_group
(log_block));
recv_sys->scanned_lsn = recv_sys->parse_start_lsn;
recv_sys->recovered_lsn = recv_sys->parse_start_lsn;
}
@ -2302,8 +2302,8 @@ recv_scan_log_recs(
}
recv_sys->scanned_lsn = scanned_lsn;
recv_sys->scanned_checkpoint_no =
log_block_get_checkpoint_no(log_block);
recv_sys->scanned_checkpoint_no
= log_block_get_checkpoint_no(log_block);
}
if (data_len < OS_FILE_LOG_BLOCK_SIZE) {

View file

@ -103,8 +103,8 @@ mem_field_trailer_set_check(byte* field, ulint check)
ulint
mem_field_trailer_get_check(byte* field)
{
return(mach_read_from_4(field +
mem_field_header_get_len(field)));
return(mach_read_from_4(field
+ mem_field_header_get_len(field)));
}
/**********************************************************************
@ -486,8 +486,8 @@ mem_heap_validate_or_print(
total_len += len;
check_field = mem_field_header_get_check(user_field);
if (check_field !=
mem_field_trailer_get_check(user_field)) {
if (check_field
!= mem_field_trailer_get_check(user_field)) {
/* error */
fprintf(stderr,

View file

@ -285,9 +285,9 @@ mtr_read_ulint(
{
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(mtr_memo_contains(mtr, buf_block_align(ptr),
MTR_MEMO_PAGE_S_FIX) ||
mtr_memo_contains(mtr, buf_block_align(ptr),
MTR_MEMO_PAGE_X_FIX));
MTR_MEMO_PAGE_S_FIX)
|| mtr_memo_contains(mtr, buf_block_align(ptr),
MTR_MEMO_PAGE_X_FIX));
if (type == MLOG_1BYTE) {
return(mach_read_from_1(ptr));
} else if (type == MLOG_2BYTES) {
@ -312,9 +312,9 @@ mtr_read_dulint(
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(ptr && mtr);
ut_ad(mtr_memo_contains(mtr, buf_block_align(ptr),
MTR_MEMO_PAGE_S_FIX) ||
mtr_memo_contains(mtr, buf_block_align(ptr),
MTR_MEMO_PAGE_X_FIX));
MTR_MEMO_PAGE_S_FIX)
|| mtr_memo_contains(mtr, buf_block_align(ptr),
MTR_MEMO_PAGE_X_FIX));
return(mach_read_from_8(ptr));
}

View file

@ -843,8 +843,9 @@ os_file_create_directory(
BOOL rcode;
rcode = CreateDirectory((LPCTSTR) pathname, NULL);
if (!(rcode != 0 ||
(GetLastError() == ERROR_ALREADY_EXISTS && !fail_if_exists))) {
if (!(rcode != 0
|| (GetLastError() == ERROR_ALREADY_EXISTS
&& !fail_if_exists))) {
/* failure */
os_file_handle_error(pathname, "CreateDirectory");
@ -1190,8 +1191,8 @@ try_again:
/* Do not use unbuffered i/o to log files because
value 2 denotes that we do not flush the log at every
commit, but only once per second */
} else if (srv_win_file_flush_method ==
SRV_WIN_IO_UNBUFFERED) {
} else if (srv_win_file_flush_method
== SRV_WIN_IO_UNBUFFERED) {
attributes = attributes | FILE_FLAG_NO_BUFFERING;
}
#endif
@ -1202,8 +1203,8 @@ try_again:
/* Do not use unbuffered i/o to log files because
value 2 denotes that we do not flush the log at every
commit, but only once per second */
} else if (srv_win_file_flush_method ==
SRV_WIN_IO_UNBUFFERED) {
} else if (srv_win_file_flush_method
== SRV_WIN_IO_UNBUFFERED) {
attributes = attributes | FILE_FLAG_NO_BUFFERING;
}
#endif
@ -3004,14 +3005,14 @@ os_aio_get_segment_no_from_slot(
segment = 1;
} else if (array == os_aio_read_array) {
seg_len = os_aio_read_array->n_slots /
os_aio_read_array->n_segments;
seg_len = os_aio_read_array->n_slots
/ os_aio_read_array->n_segments;
segment = 2 + slot->pos / seg_len;
} else {
ut_a(array == os_aio_write_array);
seg_len = os_aio_write_array->n_slots /
os_aio_write_array->n_segments;
seg_len = os_aio_write_array->n_slots
/ os_aio_write_array->n_segments;
segment = os_aio_read_array->n_segments + 2
+ slot->pos / seg_len;
@ -3219,8 +3220,8 @@ loop:
control->aio_offset = offset;
control->aio_reqprio = 0;
control->aio_sigevent.sigev_notify = SIGEV_SIGNAL;
control->aio_sigevent.sigev_signo =
SIGRTMIN + 1 + os_aio_get_array_no(array);
control->aio_sigevent.sigev_signo
= SIGRTMIN + 1 + os_aio_get_array_no(array);
/* TODO: How to choose the signal numbers? */
/*
fprintf(stderr, "AIO signal number %lu\n",
@ -4285,8 +4286,8 @@ loop:
if (os_n_file_reads == os_n_file_reads_old) {
avg_bytes_read = 0.0;
} else {
avg_bytes_read = (double) os_bytes_read_since_printout /
(os_n_file_reads - os_n_file_reads_old);
avg_bytes_read = (double) os_bytes_read_since_printout
/ (os_n_file_reads - os_n_file_reads_old);
}
fprintf(file,

View file

@ -411,8 +411,8 @@ os_awe_map_physical_mem_to_window(
ut_a(ptr >= os_awe_simulate_window);
ut_a(ptr < os_awe_simulate_window + os_awe_simulate_window_size);
ut_a(page_info >= os_awe_simulate_page_info);
ut_a(page_info < os_awe_simulate_page_info +
(os_awe_simulate_mem_size / 4096));
ut_a(page_info < os_awe_simulate_page_info
+ (os_awe_simulate_mem_size / 4096));
/* First look if some other 'physical pages' are mapped at ptr,
and copy them back to where they were if yes */

View file

@ -629,8 +629,8 @@ page_cur_insert_rec_write_log(
log_end = &log_ptr[5 + 1 + 5 + 5 + MLOG_BUF_MARGIN];
}
if ((rec_get_info_and_status_bits(insert_rec, comp) !=
rec_get_info_and_status_bits(cursor_rec, comp))
if ((rec_get_info_and_status_bits(insert_rec, comp)
!= rec_get_info_and_status_bits(cursor_rec, comp))
|| (extra_size != cur_extra_size)
|| (rec_size != cur_rec_size)) {

View file

@ -389,8 +389,8 @@ page_create(
infimum_rec = rec_convert_dtuple_to_rec(heap_top, index, tuple);
ut_a(infimum_rec ==
page + (comp ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));
ut_a(infimum_rec == page
+ (comp ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));
rec_set_n_owned(infimum_rec, comp, 1);
rec_set_heap_no(infimum_rec, comp, 0);
@ -411,8 +411,8 @@ page_create(
supremum_rec = rec_convert_dtuple_to_rec(heap_top, index, tuple);
ut_a(supremum_rec ==
page + (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM));
ut_a(supremum_rec == page
+ (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM));
rec_set_n_owned(supremum_rec, comp, 1);
rec_set_heap_no(supremum_rec, comp, 1);
@ -421,8 +421,8 @@ page_create(
ULINT_UNDEFINED, &heap);
heap_top = rec_get_end(supremum_rec, offsets);
ut_ad(heap_top ==
page + (comp ? PAGE_NEW_SUPREMUM_END : PAGE_OLD_SUPREMUM_END));
ut_ad(heap_top == page
+ (comp ? PAGE_NEW_SUPREMUM_END : PAGE_OLD_SUPREMUM_END));
mem_heap_free(heap);
@ -1791,8 +1791,8 @@ page_validate(
n_slots = page_dir_get_n_slots(page);
if (!(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
page_dir_get_nth_slot(page, n_slots - 1))) {
if (!(page_header_get_ptr(page, PAGE_HEAP_TOP)
<= page_dir_get_nth_slot(page, n_slots - 1))) {
fputs("InnoDB: Record heap and dir overlap on a page ",
stderr);

View file

@ -1805,14 +1805,14 @@ pars_get_lex_chars(
len = 5;
}
fwrite(pars_sym_tab_global->sql_string +
pars_sym_tab_global->next_char_pos,
fwrite(pars_sym_tab_global->sql_string
+ pars_sym_tab_global->next_char_pos,
1, len, stderr);
}
#endif /* UNIV_SQL_DEBUG */
ut_memcpy(buf, pars_sym_tab_global->sql_string +
pars_sym_tab_global->next_char_pos, len);
ut_memcpy(buf, pars_sym_tab_global->sql_string
+ pars_sym_tab_global->next_char_pos, len);
*result = len;
pars_sym_tab_global->next_char_pos += len;

View file

@ -325,8 +325,8 @@ cmp_data_data_slow(
if (cur_type->mtype >= DATA_FLOAT
|| (cur_type->mtype == DATA_BLOB
&& 0 == (cur_type->prtype & DATA_BINARY_TYPE)
&& dtype_get_charset_coll(cur_type->prtype) !=
DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
&& dtype_get_charset_coll(cur_type->prtype)
!= DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
return(cmp_whole_field(cur_type,
data1, (unsigned) len1,
@ -527,8 +527,8 @@ cmp_dtuple_rec_with_match(
if (cur_type->mtype >= DATA_FLOAT
|| (cur_type->mtype == DATA_BLOB
&& 0 == (cur_type->prtype & DATA_BINARY_TYPE)
&& dtype_get_charset_coll(cur_type->prtype) !=
DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
&& dtype_get_charset_coll(cur_type->prtype)
!= DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
ret = cmp_whole_field(cur_type,
dfield_get_data(dtuple_field),
@ -591,8 +591,7 @@ cmp_dtuple_rec_with_match(
if (cur_type->mtype <= DATA_CHAR
|| (cur_type->mtype == DATA_BLOB
&& 0 ==
(cur_type->prtype & DATA_BINARY_TYPE))) {
&& !(cur_type->prtype & DATA_BINARY_TYPE))) {
rec_byte = cmp_collate(rec_byte);
dtuple_byte = cmp_collate(dtuple_byte);
@ -838,8 +837,8 @@ cmp_rec_rec_with_match(
if (cur_type->mtype >= DATA_FLOAT
|| (cur_type->mtype == DATA_BLOB
&& 0 == (cur_type->prtype & DATA_BINARY_TYPE)
&& dtype_get_charset_coll(cur_type->prtype) !=
DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
&& dtype_get_charset_coll(cur_type->prtype)
!= DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
ret = cmp_whole_field(cur_type,
rec1_b_ptr,
@ -901,8 +900,7 @@ cmp_rec_rec_with_match(
if (cur_type->mtype <= DATA_CHAR
|| (cur_type->mtype == DATA_BLOB
&& 0 ==
(cur_type->prtype & DATA_BINARY_TYPE))) {
&& !(cur_type->prtype & DATA_BINARY_TYPE))) {
rec1_byte = cmp_collate(rec1_byte);
rec2_byte = cmp_collate(rec2_byte);

View file

@ -176,13 +176,13 @@ rec_init_offsets(
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
/* the field is 8 bytes long */
rec_offs_base(offsets)[0] =
REC_N_NEW_EXTRA_BYTES | REC_OFFS_COMPACT;
rec_offs_base(offsets)[0]
= REC_N_NEW_EXTRA_BYTES | REC_OFFS_COMPACT;
rec_offs_base(offsets)[1] = 8;
return;
case REC_STATUS_NODE_PTR:
n_node_ptr_field =
dict_index_get_n_unique_in_tree(index);
n_node_ptr_field
= dict_index_get_n_unique_in_tree(index);
break;
case REC_STATUS_ORDINARY:
break;
@ -258,8 +258,8 @@ resolved:
rec_offs_base(offsets)[i + 1] = len;
} while (++i < rec_offs_n_fields(offsets));
*rec_offs_base(offsets) =
(rec - (lens + 1)) | REC_OFFS_COMPACT;
*rec_offs_base(offsets)
= (rec - (lens + 1)) | REC_OFFS_COMPACT;
} else {
/* Old-style record: determine extra size and end offsets */
offs = REC_N_OLD_EXTRA_BYTES;
@ -349,8 +349,8 @@ rec_get_offsets_func(
size = n + (1 + REC_OFFS_HEADER_SIZE);
if (UNIV_UNLIKELY(!offsets) ||
UNIV_UNLIKELY(rec_offs_get_n_alloc(offsets) < size)) {
if (UNIV_UNLIKELY(!offsets)
|| UNIV_UNLIKELY(rec_offs_get_n_alloc(offsets) < size)) {
if (!*heap) {
*heap = mem_heap_create_func(size * sizeof(ulint),
NULL, MEM_HEAP_DYNAMIC,
@ -474,8 +474,8 @@ rec_get_converted_size_new(
ulint len = dtuple_get_nth_field(dtuple, i)->len;
field = dict_index_get_nth_field(index, i);
type = dict_col_get_type(dict_field_get_col(field));
ut_ad(len != UNIV_SQL_NULL ||
!(dtype_get_prtype(type) & DATA_NOT_NULL));
ut_ad(len != UNIV_SQL_NULL
|| !(dtype_get_prtype(type) & DATA_NOT_NULL));
if (len == UNIV_SQL_NULL) {
/* No length is stored for NULL fields. */

View file

@ -531,14 +531,13 @@ row_ins_cascade_calc_update_vec(
char* pad_start;
const char* pad_end;
ufield->new_val.data =
mem_heap_alloc(heap,
min_size);
pad_start =
((char*) ufield->new_val.data)
ufield->new_val.data = mem_heap_alloc
(heap, min_size);
pad_start = ((char*) ufield
->new_val.data)
+ ufield->new_val.len;
pad_end =
((char*) ufield->new_val.data)
pad_end = ((char*) ufield
->new_val.data)
+ min_size;
ufield->new_val.len = min_size;
ut_memcpy(ufield->new_val.data,
@ -805,9 +804,9 @@ row_ins_foreign_check_on_constraint(
node = thr->run_node;
if (node->is_delete && 0 == (foreign->type &
(DICT_FOREIGN_ON_DELETE_CASCADE
| DICT_FOREIGN_ON_DELETE_SET_NULL))) {
if (node->is_delete && 0 == (foreign->type
& (DICT_FOREIGN_ON_DELETE_CASCADE
| DICT_FOREIGN_ON_DELETE_SET_NULL))) {
row_ins_foreign_report_err("Trying to delete",
thr, foreign,
@ -816,9 +815,9 @@ row_ins_foreign_check_on_constraint(
return(DB_ROW_IS_REFERENCED);
}
if (!node->is_delete && 0 == (foreign->type &
(DICT_FOREIGN_ON_UPDATE_CASCADE
| DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
if (!node->is_delete && 0 == (foreign->type
& (DICT_FOREIGN_ON_UPDATE_CASCADE
| DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
/* This is an UPDATE */

View file

@ -406,8 +406,8 @@ row_mysql_convert_row_to_innobase(
if (templ->mysql_null_bit_mask != 0) {
/* Column may be SQL NULL */
if (mysql_rec[templ->mysql_null_byte_offset] &
(byte) (templ->mysql_null_bit_mask)) {
if (mysql_rec[templ->mysql_null_byte_offset]
& (byte) (templ->mysql_null_bit_mask)) {
/* It is SQL NULL */
@ -707,11 +707,11 @@ row_prebuilt_free(
for (i = 0; i < MYSQL_FETCH_CACHE_SIZE; i++) {
if (prebuilt->fetch_cache[i] != NULL) {
if ((ROW_PREBUILT_FETCH_MAGIC_N !=
mach_read_from_4((prebuilt->fetch_cache[i]) - 4))
|| (ROW_PREBUILT_FETCH_MAGIC_N !=
mach_read_from_4((prebuilt->fetch_cache[i])
+ prebuilt->mysql_row_len))) {
if ((ROW_PREBUILT_FETCH_MAGIC_N != mach_read_from_4
((prebuilt->fetch_cache[i]) - 4))
|| (ROW_PREBUILT_FETCH_MAGIC_N != mach_read_from_4
((prebuilt->fetch_cache[i])
+ prebuilt->mysql_row_len))) {
fputs("InnoDB: Error: trying to free"
" a corrupt fetch buffer.\n", stderr);
@ -3037,9 +3037,9 @@ check_next_foreign:
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
if (foreign && trx->check_foreigns &&
!(drop_db && dict_tables_have_same_db
(name, foreign->foreign_table_name))) {
if (foreign && trx->check_foreigns
&& !(drop_db && dict_tables_have_same_db
(name, foreign->foreign_table_name))) {
FILE* ef = dict_foreign_err_file;
/* We only allow dropping a referenced table if
@ -3232,8 +3232,8 @@ check_next_foreign:
space_id = table->space;
if (table->dir_path_of_temp_table != NULL) {
dir_path_of_temp_table =
mem_strdup(table->dir_path_of_temp_table);
dir_path_of_temp_table = mem_strdup
(table->dir_path_of_temp_table);
is_path = TRUE;
name_or_path = dir_path_of_temp_table;
} else {
@ -3927,8 +3927,9 @@ not_ok:
is_ok = FALSE;
} else if ((index->type & DICT_UNIQUE)
&& !contains_null
&& matched_fields >=
dict_index_get_n_ordering_defined_by_user(index)) {
&& matched_fields
>= dict_index_get_n_ordering_defined_by_user
(index)) {
fputs("InnoDB: duplicate key in ", stderr);
goto not_ok;

View file

@ -424,8 +424,8 @@ row_build_row_ref(
column, or the full column, and we must adjust the length
accordingly. */
clust_col_prefix_len =
dict_index_get_nth_field(clust_index, i)->prefix_len;
clust_col_prefix_len = dict_index_get_nth_field
(clust_index, i)->prefix_len;
if (clust_col_prefix_len > 0) {
if (len != UNIV_SQL_NULL) {
@ -521,8 +521,8 @@ notfound:
column, or the full column, and we must adjust the length
accordingly. */
clust_col_prefix_len =
dict_index_get_nth_field(clust_index, i)->prefix_len;
clust_col_prefix_len = dict_index_get_nth_field
(clust_index, i)->prefix_len;
if (clust_col_prefix_len > 0) {
if (len != UNIV_SQL_NULL) {

View file

@ -1951,8 +1951,8 @@ row_sel_step(
fetches (currently, we copy them also for non-explicit
cursors) */
if (node->explicit_cursor &&
UT_LIST_GET_FIRST(node->copy_variables)) {
if (node->explicit_cursor
&& UT_LIST_GET_FIRST(node->copy_variables)) {
row_sel_copy_input_variable_vals(node);
}
@ -2014,8 +2014,8 @@ fetch_step(
(sel_node, node->func->arg);
if (!ret) {
sel_node->state =
SEL_NODE_NO_MORE_ROWS;
sel_node->state
= SEL_NODE_NO_MORE_ROWS;
}
}
}
@ -2620,8 +2620,8 @@ row_sel_store_mysql_rec(
heap = prebuilt->blob_heap;
} else {
extern_field_heap =
mem_heap_create(UNIV_PAGE_SIZE);
extern_field_heap
= mem_heap_create(UNIV_PAGE_SIZE);
heap = extern_field_heap;
}
@ -2656,8 +2656,8 @@ row_sel_store_mysql_rec(
if (templ->mysql_null_bit_mask) {
/* It is a nullable column with a non-NULL
value */
mysql_rec[templ->mysql_null_byte_offset] &=
~(byte) (templ->mysql_null_bit_mask);
mysql_rec[templ->mysql_null_byte_offset]
&= ~(byte) templ->mysql_null_bit_mask;
}
} else {
/* MySQL seems to assume the field for an SQL NULL
@ -2667,8 +2667,8 @@ row_sel_store_mysql_rec(
and DISTINCT could treat NULL values inequal. */
int pad_char;
mysql_rec[templ->mysql_null_byte_offset] |=
(byte) (templ->mysql_null_bit_mask);
mysql_rec[templ->mysql_null_byte_offset]
|= (byte) templ->mysql_null_bit_mask;
switch (templ->type) {
case DATA_VARCHAR:
case DATA_BINARY:
@ -2908,8 +2908,8 @@ row_sel_get_clust_rec_for_mysql(
clust_rec = NULL;
} else {
#ifdef UNIV_SEARCH_DEBUG
ut_a(clust_rec == NULL ||
row_sel_sec_rec_is_for_clust_rec
ut_a(clust_rec == NULL
|| row_sel_sec_rec_is_for_clust_rec
(rec, sec_index, clust_rec, clust_index));
#endif
}
@ -3018,8 +3018,8 @@ row_sel_pop_cached_row_for_mysql(
if (UNIV_UNLIKELY(prebuilt->keep_other_fields_on_keyread)) {
/* Copy cache record field by field, don't touch fields that
are not covered by current key */
cached_rec =
prebuilt->fetch_cache[prebuilt->fetch_cache_first];
cached_rec = prebuilt->fetch_cache
[prebuilt->fetch_cache_first];
for (i = 0; i < prebuilt->n_template; i++) {
templ = prebuilt->mysql_template + i;
@ -3397,13 +3397,13 @@ row_search_for_mysql(
}
/* In a search where at most one record in the index may match, we
can use a LOCK_REC_NOT_GAP type record lock when locking a non-delete-
marked matching record.
can use a LOCK_REC_NOT_GAP type record lock when locking a
non-delete-marked matching record.
Note that in a unique secondary index there may be different delete-
marked versions of a record where only the primary key values differ:
thus in a secondary index we must use next-key locks when locking
delete-marked records. */
Note that in a unique secondary index there may be different
delete-marked versions of a record where only the primary key
values differ: thus in a secondary index we must use next-key
locks when locking delete-marked records. */
if (match_mode == ROW_SEL_EXACT
&& index->type & DICT_UNIQUE
@ -3425,8 +3425,8 @@ row_search_for_mysql(
1 column. Return immediately if this is not a HANDLER
command. */
if (UNIV_UNLIKELY(direction != 0 &&
!prebuilt->used_in_HANDLER)) {
if (UNIV_UNLIKELY(direction != 0
&& !prebuilt->used_in_HANDLER)) {
err = DB_RECORD_NOT_FOUND;
goto func_exit;

View file

@ -833,8 +833,8 @@ row_upd_build_difference_binary(
extern_bit = upd_ext_vec_contains(ext_vec, n_ext_vec, i);
if (UNIV_UNLIKELY(extern_bit ==
(ibool)!rec_offs_nth_extern(offsets, i))
if (UNIV_UNLIKELY(extern_bit
== (ibool)!rec_offs_nth_extern(offsets, i))
|| !dfield_data_is_binary_equal(dfield, len, data)) {
upd_field = upd_get_nth_field(update, n_diff);

View file

@ -1506,8 +1506,8 @@ srv_suspend_mysql_thread(
mutex_exit(&kernel_mutex);
if (srv_lock_wait_timeout < 100000000 &&
wait_time > (double)srv_lock_wait_timeout) {
if (srv_lock_wait_timeout < 100000000
&& wait_time > (double)srv_lock_wait_timeout) {
trx->error_state = DB_LOCK_WAIT_TIMEOUT;
}
@ -1972,9 +1972,9 @@ loop:
wait_time = ut_difftime(ut_time(), slot->suspend_time);
if (srv_lock_wait_timeout < 100000000 &&
(wait_time > (double) srv_lock_wait_timeout
|| wait_time < 0)) {
if (srv_lock_wait_timeout < 100000000
&& (wait_time > (double) srv_lock_wait_timeout
|| wait_time < 0)) {
/* Timeout exceeded or a wrap-around in system
time counter: cancel the lock request queued
@ -2285,8 +2285,8 @@ loop:
log_buffer_flush_to_disk();
}
if (buf_get_modified_ratio_pct() >
srv_max_buf_pool_modified_pct) {
if (UNIV_UNLIKELY(buf_get_modified_ratio_pct()
> srv_max_buf_pool_modified_pct)) {
/* Try to keep the number of modified pages in the
buffer pool under the limit wished by the user */
@ -2493,8 +2493,8 @@ flush_loop:
srv_main_thread_op_info = "flushing buffer pool pages";
if (srv_fast_shutdown < 2) {
n_pages_flushed =
buf_flush_batch(BUF_FLUSH_LIST, 100, ut_dulint_max);
n_pages_flushed = buf_flush_batch
(BUF_FLUSH_LIST, 100, ut_dulint_max);
} else {
/* In the fastest shutdown we do not flush the buffer pool
to data files: we set n_pages_flushed to 0 artificially. */
@ -2560,8 +2560,8 @@ flush_loop:
goto background_loop;
}
} else if (n_tables_to_drop +
n_pages_purged + n_bytes_merged + n_pages_flushed
} else if (n_tables_to_drop
+ n_pages_purged + n_bytes_merged + n_pages_flushed
+ n_bytes_archived != 0) {
/* In a 'slow' shutdown we run purge and the insert buffer
merge to completion */

View file

@ -560,8 +560,8 @@ open_or_create_log_file(
srv_log_group_home_dirs[k] = srv_add_path_separator_if_needed
(srv_log_group_home_dirs[k]);
ut_a(strlen(srv_log_group_home_dirs[k]) <
(sizeof name) - 10 - sizeof "ib_logfile");
ut_a(strlen(srv_log_group_home_dirs[k])
< (sizeof name) - 10 - sizeof "ib_logfile");
sprintf(name, "%s%s%lu", srv_log_group_home_dirs[k],
"ib_logfile", (ulong) i);
@ -833,8 +833,7 @@ open_or_create_data_files(
if (i == srv_n_data_files - 1
&& srv_auto_extend_last_data_file) {
if (srv_data_file_sizes[i] >
rounded_size_pages
if (srv_data_file_sizes[i] > rounded_size_pages
|| (srv_last_file_size_max > 0
&& srv_last_file_size_max
< rounded_size_pages)) {
@ -1629,8 +1628,8 @@ innobase_start_or_create_for_mysql(void)
/* Create the master thread which does purge and other utility
operations */
os_thread_create(&srv_master_thread, NULL, thread_ids + 1 +
SRV_MAX_N_IO_THREADS);
os_thread_create(&srv_master_thread, NULL, thread_ids
+ (1 + SRV_MAX_N_IO_THREADS));
#ifdef UNIV_DEBUG
/* buf_debug_prints = TRUE; */
#endif /* UNIV_DEBUG */

View file

@ -902,11 +902,11 @@ sync_array_signal_object(
sync_cell_t** old_cell_ptr = cell_ptr;
size_t old_size, new_size;
old_size = cell_max_count *
sizeof(sync_cell_t*);
old_size = cell_max_count
* sizeof(sync_cell_t*);
cell_max_count *= 2;
new_size = cell_max_count *
sizeof(sync_cell_t*);
new_size = cell_max_count
* sizeof(sync_cell_t*);
cell_ptr = malloc(new_size);
ut_a(cell_ptr);

View file

@ -284,8 +284,8 @@ trx_purge_add_update_undo_to_history(
hist_size = mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
MLOG_4BYTES, mtr);
ut_ad(undo->size ==
flst_get_len(seg_header + TRX_UNDO_PAGE_LIST, mtr));
ut_ad(undo->size == flst_get_len
(seg_header + TRX_UNDO_PAGE_LIST, mtr));
mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
hist_size + undo->size, MLOG_4BYTES, mtr);

View file

@ -1286,9 +1286,9 @@ trx_undo_prev_version_build(
ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(mtr_memo_contains(index_mtr, buf_block_align(index_rec),
MTR_MEMO_PAGE_S_FIX) ||
mtr_memo_contains(index_mtr, buf_block_align(index_rec),
MTR_MEMO_PAGE_X_FIX));
MTR_MEMO_PAGE_S_FIX)
|| mtr_memo_contains(index_mtr, buf_block_align(index_rec),
MTR_MEMO_PAGE_X_FIX));
ut_ad(rec_offs_validate(rec, index, offsets));
if (!(index->type & DICT_CLUSTERED)) {

View file

@ -201,10 +201,10 @@ start_again:
"InnoDB: Doublewrite buffer not found:"
" creating new\n");
if (buf_pool_get_curr_size() <
(2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ FSP_EXTENT_SIZE / 2 + 100)
* UNIV_PAGE_SIZE) {
if (buf_pool_get_curr_size()
< ((2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ FSP_EXTENT_SIZE / 2 + 100)
* UNIV_PAGE_SIZE)) {
fprintf(stderr,
"InnoDB: Cannot create doublewrite buffer:"
" you must\n"

View file

@ -563,8 +563,8 @@ trx_lists_init_at_db_start(void)
if (srv_force_recovery == 0) {
trx->conc_state =
TRX_PREPARED;
trx->conc_state
= TRX_PREPARED;
} else {
fprintf(stderr,
"InnoDB: Since"
@ -577,8 +577,8 @@ trx_lists_init_at_db_start(void)
= TRX_ACTIVE;
}
} else {
trx->conc_state =
TRX_COMMITTED_IN_MEMORY;
trx->conc_state
= TRX_COMMITTED_IN_MEMORY;
}
/* We give a dummy value for the trx
@ -599,8 +599,8 @@ trx_lists_init_at_db_start(void)
trx_list_insert_ordered(trx);
if (undo->dict_operation) {
trx->dict_operation =
undo->dict_operation;
trx->dict_operation
= undo->dict_operation;
trx->table_id = undo->table_id;
}
}
@ -1757,8 +1757,8 @@ trx_print(
fprintf(f, "que state %lu ", (ulong) trx->que_state);
}
if (0 < UT_LIST_GET_LEN(trx->trx_locks) ||
mem_heap_get_size(trx->lock_heap) > 400) {
if (0 < UT_LIST_GET_LEN(trx->trx_locks)
|| mem_heap_get_size(trx->lock_heap) > 400) {
newline = TRUE;
fprintf(f, "%lu lock struct(s), heap size %lu",

View file

@ -585,10 +585,10 @@ trx_undo_read_xid(
{
xid->formatID = (long)mach_read_from_4(log_hdr + TRX_UNDO_XA_FORMAT);
xid->gtrid_length =
(long)mach_read_from_4(log_hdr + TRX_UNDO_XA_TRID_LEN);
xid->bqual_length =
(long)mach_read_from_4(log_hdr + TRX_UNDO_XA_BQUAL_LEN);
xid->gtrid_length
= (long) mach_read_from_4(log_hdr + TRX_UNDO_XA_TRID_LEN);
xid->bqual_length
= (long) mach_read_from_4(log_hdr + TRX_UNDO_XA_BQUAL_LEN);
memcpy(xid->data, log_hdr + TRX_UNDO_XA_XID, XIDDATASIZE);
}
@ -1639,8 +1639,8 @@ trx_undo_mark_as_dict_operation(
hdr_page = trx_undo_page_get(undo->space, undo->hdr_page_no, mtr);
mlog_write_ulint(hdr_page + undo->hdr_offset +
TRX_UNDO_DICT_TRANS,
mlog_write_ulint(hdr_page + undo->hdr_offset
+ TRX_UNDO_DICT_TRANS,
trx->dict_operation, MLOG_1BYTE, mtr);
mlog_write_dulint(hdr_page + undo->hdr_offset + TRX_UNDO_TABLE_ID,

View file

@ -454,8 +454,9 @@ ut_copy_file(
rewind(src);
do {
size_t maxs =
len < (long) sizeof buf ? (size_t) len : sizeof buf;
size_t maxs = len < (long) sizeof buf
? (size_t) len
: sizeof buf;
size_t size = fread(buf, 1, maxs, src);
fwrite(buf, 1, size, dest);
len -= (long) size;