in sql/sql_update.cc [215:1040]
int mysql_update(THD *thd,
TABLE_LIST *table_list,
List<Item> &fields,
List<Item> &values,
Item *conds,
uint order_num, ORDER *order,
ha_rows limit,
enum enum_duplicates handle_duplicates, bool ignore,
ha_rows *found_return, ha_rows *updated_return)
{
bool using_limit= limit != HA_POS_ERROR;
bool safe_update= MY_TEST(thd->variables.option_bits & OPTION_SAFE_UPDATES);
bool used_key_is_modified= FALSE, transactional_table, will_batch;
int res;
int error= 1;
int loc_error;
uint used_index, dup_key_found;
bool need_sort= TRUE;
bool reverse= FALSE;
bool using_filesort;
bool read_removal= false;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint want_privilege;
#endif
ha_rows updated, found;
key_map old_covering_keys;
TABLE *table;
SQL_SELECT *select= NULL;
READ_RECORD info;
SELECT_LEX *select_lex= &thd->lex->select_lex;
ulonglong id;
List<Item> all_fields;
THD::killed_state killed_status= THD::NOT_KILLED;
COPY_INFO update(COPY_INFO::UPDATE_OPERATION, &fields, &values);
IO_CACHE tempfile;
DBUG_ENTER("mysql_update");
if (open_normal_and_derived_tables(thd, table_list, 0))
DBUG_RETURN(1);
if (table_list->multitable_view)
{
DBUG_ASSERT(table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* convert to multiupdate */
DBUG_RETURN(2);
}
THD_STAGE_INFO(thd, stage_init);
table= table_list->table;
if (!table_list->updatable)
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
DBUG_RETURN(1);
}
/* Calculate "table->covering_keys" based on the WHERE */
table->covering_keys= table->s->keys_in_use;
table->quick_keys.clear_all();
table->possible_quick_keys.clear_all();
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Force privilege re-checking for views after they have been opened. */
want_privilege= (table_list->view ? UPDATE_ACL :
table_list->grant.want_privilege);
#endif
if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
DBUG_RETURN(1);
old_covering_keys= table->covering_keys; // Keys used in WHERE
/* Check the fields we are going to modify */
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
table_list->register_want_access(want_privilege);
#endif
if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(1); /* purecov: inspected */
if (table_list->view && check_fields(thd, fields))
{
DBUG_RETURN(1);
}
if (!table_list->updatable || check_key_in_view(thd, table_list))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
DBUG_RETURN(1);
}
if (update.add_function_default_columns(table, table->write_set))
DBUG_RETURN(1);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Check values */
table_list->grant.want_privilege= table->grant.want_privilege=
(SELECT_ACL & ~table->grant.privilege);
#endif
if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, 0))
{
free_underlaid_joins(thd, select_lex);
DBUG_RETURN(1); /* purecov: inspected */
}
if (select_lex->inner_refs_list.elements &&
fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
DBUG_RETURN(1);
// Parse column usage statistics and store it into THD.
parse_column_usage_info(thd);
if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
update.function_defaults_apply(table))
/*
A column is to be set to its ON UPDATE function default only if other
columns of the row are changing. To know this, we must be able to
compare the "before" and "after" value of those columns
(i.e. records_are_comparable() must be true below). Thus, we must read
those columns:
*/
bitmap_union(table->read_set, table->write_set);
// Don't count on usage of 'only index' when calculating which key to use
table->covering_keys.clear_all();
/*
This must be done before partitioning pruning, since prune_partitions()
uses the table->write_set to determine may prune locks too.
*/
if (table->triggers)
table->triggers->mark_fields_used(TRG_EVENT_UPDATE);
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (table->part_info)
{
if (prune_partitions(thd, table, conds))
DBUG_RETURN(1);
if (table->all_partitions_pruned_away)
{
/* No matching records */
if (thd->lex->describe)
{
error= explain_no_table(thd,
"No matching rows after partition pruning");
goto exit_without_my_ok;
}
my_ok(thd); // No matching records
DBUG_RETURN(0);
}
}
#endif
if (lock_tables(thd, table_list, thd->lex->table_count, 0))
DBUG_RETURN(1);
// Must be done after lock_tables()
if (conds)
{
COND_EQUAL *cond_equal= NULL;
Item::cond_result result;
if (table_list->check_option)
{
/*
If this UPDATE is on a view with CHECK OPTION, Item_fields
must not be replaced by constants. The reason is that when
'conds' is optimized, 'check_option' is also optimized (it is
part of 'conds'). Const replacement is fine for 'conds'
because it is evaluated on a read row, but 'check_option' is
evaluated on a row with updated fields and needs those updated
values to be correct.
Example:
CREATE VIEW v1 ... WHERE fld < 2 WITH CHECK_OPTION
UPDATE v1 SET fld=4 WHERE fld=1
check_option is "(fld < 2)"
conds is "(fld < 2) and (fld = 1)"
optimize_cond() would propagate fld=1 to the first argument of
the AND to create "(1 < 2) AND (fld = 1)". After this,
check_option would be "(1 < 2)". But for check_option to work
it must be evaluated with the *updated* value of fld: 4.
Otherwise it will evaluate to true even when it should be
false, which is the case for the UPDATE statement above.
Thus, if there is a check_option, we do only the "safe" parts
of optimize_cond(): Item_row -> Item_func_eq conversion (to
enable range access) and removal of always true/always false
predicates.
An alternative to restricting this optimization of 'conds' in
the presense of check_option: the Item-tree of 'check_option'
could be cloned before optimizing 'conds' and thereby avoid
const replacement. However, at the moment there is no such
thing as Item::clone().
*/
conds= build_equal_items(thd, conds, NULL, false,
select_lex->join_list, &cond_equal);
conds= remove_eq_conds(thd, conds, &result);
}
else
conds= optimize_cond(thd, conds, &cond_equal, select_lex->join_list,
true, &result);
if (result == Item::COND_FALSE)
{
limit= 0; // Impossible WHERE
if (thd->lex->describe)
{
error= explain_no_table(thd, "Impossible WHERE");
goto exit_without_my_ok;
}
}
if (conds)
{
conds= substitute_for_best_equal_field(conds, cond_equal, 0);
conds->update_used_tables();
}
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
Also try a second time after locking, to prune when subqueries and
stored programs can be evaluated.
*/
if (table->part_info)
{
if (prune_partitions(thd, table, conds))
DBUG_RETURN(1);
if (table->all_partitions_pruned_away)
{
/* No matching records */
if (thd->lex->describe)
{
error= explain_no_table(thd,
"No matching rows after partition pruning");
goto exit_without_my_ok;
}
my_ok(thd); // No matching records
DBUG_RETURN(0);
}
}
#endif
/* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
table->mark_columns_needed_for_update(false/*mark_binlog_columns=false*/);
select= make_select(table, 0, 0, conds, 0, &error);
{ // Enter scope for optimizer trace wrapper
Opt_trace_object wrapper(&thd->opt_trace);
wrapper.add_utf8_table(table);
if (error || !limit ||
(select && select->check_quick(thd, safe_update, limit)))
{
if (thd->lex->describe && !error && !thd->is_error())
{
error= explain_no_table(thd, "Impossible WHERE");
goto exit_without_my_ok;
}
delete select;
free_underlaid_joins(thd, select_lex);
/*
There was an error or the error was already sent by
the quick select evaluation.
TODO: Add error code output parameter to Item::val_xxx() methods.
Currently they rely on the user checking DA for
errors when unwinding the stack after calling Item::val_xxx().
*/
if (error || thd->is_error())
{
DBUG_RETURN(1); // Error in where
}
char buff[MYSQL_ERRMSG_SIZE];
my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), 0, 0,
(ulong) thd->get_stmt_da()->current_statement_warn_count());
my_ok(thd, 0, 0, buff);
DBUG_PRINT("info",("0 records updated"));
DBUG_RETURN(0);
}
} // Ends scope for optimizer trace wrapper
/* If running in safe sql mode, don't allow updates without keys */
if (table->quick_keys.is_clear_all())
{
thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
if (safe_update && !using_limit)
{
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
goto exit_without_my_ok;
}
}
if (select_lex->has_ft_funcs() && init_ftfuncs(thd, select_lex, 1))
goto exit_without_my_ok;
table->update_const_key_parts(conds);
order= simple_remove_const(order, conds);
used_index= get_index_for_order(order, table, select, limit,
&need_sort, &reverse);
if (need_sort)
{ // Assign table scan index to check below for modified key fields:
used_index= table->file->key_used_on_scan;
}
if (used_index != MAX_KEY)
{ // Check if we are modifying a key that we are used to search with:
used_key_is_modified= is_key_used(table, used_index, table->write_set);
}
else if (select && select->quick)
{
/*
select->quick != NULL and used_index == MAX_KEY happens for index
merge and should be handled in a different way.
*/
used_key_is_modified= (!select->quick->unique_key_range() &&
select->quick->is_keys_used(table->write_set));
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
used_key_is_modified|= partition_key_modified(table, table->write_set);
#endif
table->mark_columns_per_binlog_row_image();
using_filesort= order && (need_sort||used_key_is_modified);
if (thd->lex->describe)
{
const bool using_tmp_table= !using_filesort &&
(used_key_is_modified || order);
error= explain_single_table_modification(thd, table, select, used_index,
limit, using_tmp_table,
using_filesort,
true,
used_key_is_modified);
goto exit_without_my_ok;
}
if (used_key_is_modified || order)
{
/*
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
table->set_keyread(true);
/* note: We avoid sorting if we sort on the used index */
if (using_filesort)
{
/*
Doing an ORDER BY; Let filesort find and sort the rows we are going
to update
NOTE: filesort will call table->prepare_for_position()
*/
ha_rows examined_rows;
ha_rows found_rows;
Filesort fsort(order, limit, select);
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
if ((table->sort.found_records= filesort(thd, table, &fsort, true,
&examined_rows, &found_rows))
== HA_POS_ERROR)
{
goto exit_without_my_ok;
}
thd->inc_examined_row_count(examined_rows);
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
*/
delete select;
select= 0;
}
else
{
/*
We are doing a search on a key that is updated. In this case
we go trough the matching rows, save a pointer to them and
update these in a separate loop based on the pointer.
*/
table->prepare_for_position();
if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
DISK_BUFFER_SIZE, MYF(MY_WME)))
goto exit_without_my_ok;
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && (error= select->quick->reset()))
{
close_cached_file(&tempfile);
table->file->print_error(error, MYF(0));
goto exit_without_my_ok;
}
table->file->try_semi_consistent_read(1);
/*
When we get here, we have one of the following options:
A. used_index == MAX_KEY
This means we should use full table scan, and start it with
init_read_record call
B. used_index != MAX_KEY
B.1 quick select is used, start the scan with init_read_record
B.2 quick select is not used, this is full index scan (with LIMIT)
Full index scan must be started with init_read_record_idx
*/
if (used_index == MAX_KEY || (select && select->quick))
error= init_read_record(&info, thd, table, select, 0, 1, FALSE);
else
error= init_read_record_idx(&info, thd, table, 1, used_index, reverse);
if (error)
{
close_cached_file(&tempfile);
goto exit_without_my_ok;
}
THD_STAGE_INFO(thd, stage_searching_rows_for_update);
ha_rows tmp_limit= limit;
while (!(error=info.read_record(&info)) && !thd->killed)
{
thd->inc_examined_row_count(1);
bool skip_record= FALSE;
if (select && select->skip_record(thd, &skip_record))
{
error= 1;
/*
Don't try unlocking the row if skip_record reported an error since
in this case the transaction might have been rolled back already.
*/
break;
}
if (!skip_record)
{
if (table->file->was_semi_consistent_read())
continue; /* repeat the read of the same row if it still exists */
table->file->position(table->record[0]);
if (my_b_write(&tempfile,table->file->ref,
table->file->ref_length))
{
error=1; /* purecov: inspected */
break; /* purecov: inspected */
}
if (!--limit && using_limit)
{
error= -1;
break;
}
}
else
table->file->unlock_row();
}
if (thd->killed && !error)
error= 1; // Aborted
limit= tmp_limit;
table->file->try_semi_consistent_read(0);
end_read_record(&info);
/* Change select to use tempfile */
if (select)
{
select->set_quick(NULL);
if (select->free_cond)
delete select->cond;
select->cond= NULL;
}
else
{
select= new SQL_SELECT;
select->head=table;
}
if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
error=1; /* purecov: inspected */
select->file=tempfile; // Read row ptrs from this file
if (error >= 0)
goto exit_without_my_ok;
}
if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
table->set_keyread(false);
}
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (select && select->quick && (error= select->quick->reset()))
{
table->file->print_error(error, MYF(0));
goto exit_without_my_ok;
}
table->file->try_semi_consistent_read(1);
if ((error= init_read_record(&info, thd, table, select, 0, 1, FALSE)))
goto exit_without_my_ok;
updated= found= 0;
/*
Generate an error (in TRADITIONAL mode) or warning
when trying to set a NOT NULL field to NULL.
*/
thd->count_cuted_fields= CHECK_FIELD_WARN;
thd->cuted_fields=0L;
THD_STAGE_INFO(thd, stage_updating);
transactional_table= table->file->has_transactions();
thd->abort_on_warning= (!ignore && thd->is_strict_mode());
if (table->prepare_triggers_for_update_stmt_or_event())
{
will_batch= FALSE;
}
else
will_batch= !table->file->start_bulk_update();
if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
!ignore && !using_limit &&
!(table->triggers && table->triggers->has_update_triggers()) &&
select && select->quick && select->quick->index != MAX_KEY &&
check_constant_expressions(values))
read_removal= table->check_read_removal(select->quick->index);
while (!(error=info.read_record(&info)) && !thd->killed)
{
thd->inc_examined_row_count(1);
bool skip_record;
if (!select || (!select->skip_record(thd, &skip_record) && !skip_record))
{
if (table->file->was_semi_consistent_read())
continue; /* repeat the read of the same row if it still exists */
store_record(table,record[1]);
if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
table->triggers,
TRG_EVENT_UPDATE))
break; /* purecov: inspected */
found++;
if (!records_are_comparable(table) || compare_records(table))
{
if ((res= table_list->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
{
found--;
if (res == VIEW_CHECK_SKIP)
continue;
else if (res == VIEW_CHECK_ERROR)
{
error= 1;
break;
}
}
/*
In order to keep MySQL legacy behavior, we do this update *after*
the CHECK OPTION test. Proper behavior is probably to throw an
error, though.
*/
update.set_function_defaults(table);
if (will_batch)
{
/*
Typically a batched handler can execute the batched jobs when:
1) When specifically told to do so
2) When it is not a good idea to batch anymore
3) When it is necessary to send batch for other reasons
(One such reason is when READ's must be performed)
1) is covered by exec_bulk_update calls.
2) and 3) is handled by the bulk_update_row method.
bulk_update_row can execute the updates including the one
defined in the bulk_update_row or not including the row
in the call. This is up to the handler implementation and can
vary from call to call.
The dup_key_found reports the number of duplicate keys found
in those updates actually executed. It only reports those if
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
If this hasn't been issued it returns an error code and can
ignore this number. Thus any handler that implements batching
for UPDATE IGNORE must also handle this extra call properly.
If a duplicate key is found on the record included in this
call then it should be included in the count of dup_key_found
and error should be set to 0 (only if these errors are ignored).
*/
error= table->file->ha_bulk_update_row(table->record[1],
table->record[0],
&dup_key_found);
limit+= dup_key_found;
updated-= dup_key_found;
}
else
{
/* Non-batched update */
error= table->file->ha_update_row(table->record[1],
table->record[0]);
}
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
{
if (error != HA_ERR_RECORD_IS_THE_SAME)
updated++;
else
error= 0;
}
else if (!ignore ||
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY |
HA_CHECK_FK_ERROR))
{
/*
If (ignore && error is ignorable) we don't have to
do anything; otherwise...
*/
myf flags= 0;
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY |
HA_CHECK_FK_ERROR))
flags|= ME_FATALERROR; /* Other handler errors are fatal */
table->file->print_error(error,MYF(flags));
error= 1;
break;
}
else if (ignore && !table->file->is_fatal_error(error,
HA_CHECK_FK_ERROR))
warn_fk_constraint_violation(thd, table, error);
}
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE))
{
error= 1;
break;
}
if (!--limit && using_limit)
{
/*
We have reached end-of-file in most common situations where no
batching has occurred and if batching was supposed to occur but
no updates were made and finally when the batch execution was
performed without error and without finding any duplicate keys.
If the batched updates were performed with errors we need to
check and if no error but duplicate key's found we need to
continue since those are not counted for in limit.
*/
if (will_batch &&
((error= table->file->exec_bulk_update(&dup_key_found)) ||
dup_key_found))
{
if (error)
{
/* purecov: begin inspected */
/*
The handler should not report error of duplicate keys if they
are ignored. This is a requirement on batching handlers.
*/
table->file->print_error(error,MYF(0));
error= 1;
break;
/* purecov: end */
}
/*
Either an error was found and we are ignoring errors or there
were duplicate keys found. In both cases we need to correct
the counters and continue the loop.
*/
limit= dup_key_found; //limit is 0 when we get here so need to +
updated-= dup_key_found;
}
else
{
error= -1; // Simulate end of file
break;
}
}
}
/*
Don't try unlocking the row if skip_record reported an error since in
this case the transaction might have been rolled back already.
*/
else if (!thd->is_error())
table->file->unlock_row();
else
{
error= 1;
break;
}
thd->get_stmt_da()->inc_current_row_for_warning();
if (thd->is_error())
{
error= 1;
break;
}
}
table->auto_increment_field_not_null= FALSE;
dup_key_found= 0;
/*
Caching the killed status to pass as the arg to query event constuctor;
The cached value can not change whereas the killed status can
(externally) since this point and change of the latter won't affect
binlogging.
It's assumed that if an error was set in combination with an effective
killed status then the error is due to killing.
*/
killed_status= thd->killed; // get the status of the volatile
// simulated killing after the loop must be ineffective for binlogging
DBUG_EXECUTE_IF("simulate_kill_bug27571",
{
thd->killed= THD::KILL_QUERY;
};);
error= (killed_status == THD::NOT_KILLED)? error : 1;
if (error &&
will_batch &&
(loc_error= table->file->exec_bulk_update(&dup_key_found)))
/*
An error has occurred when a batched update was performed and returned
an error indication. It cannot be an allowed duplicate key error since
we require the batching handler to treat this as a normal behavior.
Otherwise we simply remove the number of duplicate keys records found
in the batched update.
*/
{
/* purecov: begin inspected */
table->file->print_error(loc_error,MYF(ME_FATALERROR));
error= 1;
/* purecov: end */
}
else
updated-= dup_key_found;
if (will_batch)
table->file->end_bulk_update();
table->file->try_semi_consistent_read(0);
if (read_removal)
{
/* Only handler knows how many records really was written */
updated= table->file->end_read_removal();
if (!records_are_comparable(table))
found= updated;
}
if (!transactional_table && updated > 0)
thd->transaction.stmt.mark_modified_non_trans_table();
end_read_record(&info);
delete select;
THD_STAGE_INFO(thd, stage_end);
(void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
/*
Invalidate the table in the query cache if something changed.
This must be before binlog writing and ha_autocommit_...
*/
if (updated)
{
query_cache_invalidate3(thd, table_list, 1);
}
/*
error < 0 means really no error at all: we processed all rows until the
last one without error. error > 0 means an error (e.g. unique key
violation and no IGNORE or REPLACE). error == 0 is also an error (if
preparing the record or invoking before triggers fails). See
ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
Sometimes we want to binlog even if we updated no rows, in case user used
it to be sure master and slave are in same state.
*/
if ((error < 0) || thd->transaction.stmt.cannot_safely_rollback())
{
if (mysql_bin_log.is_open())
{
int errcode= 0;
if (error < 0)
thd->clear_error();
else
errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
transactional_table, FALSE, FALSE, errcode))
{
error=1; // Rollback update
}
}
}
DBUG_ASSERT(transactional_table || !updated ||
thd->transaction.stmt.cannot_safely_rollback());
free_underlaid_joins(thd, select_lex);
/* If LAST_INSERT_ID(X) was used, report X */
id= thd->arg_of_last_insert_id_function ?
thd->first_successful_insert_id_in_prev_stmt : 0;
if (error < 0)
{
char buff[MYSQL_ERRMSG_SIZE];
my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found,
(ulong) updated,
(ulong) thd->get_stmt_da()->current_statement_warn_count());
my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
id, buff);
DBUG_PRINT("info",("%ld records updated", (long) updated));
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
thd->abort_on_warning= 0;
*found_return= found;
*updated_return= updated;
DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
exit_without_my_ok:
delete select;
free_underlaid_joins(thd, select_lex);
table->set_keyread(FALSE);
thd->abort_on_warning= 0;
DBUG_RETURN(error);
}