in sql/sql_update.cc [367:1182]
bool Sql_cmd_update::update_single_table(THD *thd) {
DBUG_TRACE;
myf error_flags = MYF(0); /**< Flag for fatal errors */
/*
Most recent handler error
= 1: Some non-handler error
= 0: Success
= -1: No more rows to process, or reached limit
*/
int error = 0;
Query_block *const query_block = lex->query_block;
Query_expression *const unit = lex->unit;
Table_ref *const table_list = query_block->get_table_list();
Table_ref *const update_table_ref = table_list->updatable_base_table();
TABLE *const table = update_table_ref->table;
assert(table->pos_in_table_list == update_table_ref);
const bool transactional_table = table->file->has_transactions();
const bool has_update_triggers =
table->triggers && table->triggers->has_update_triggers();
const bool has_after_triggers =
has_update_triggers &&
table->triggers->has_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER);
Opt_trace_context *const trace = &thd->opt_trace;
if (unit->set_limit(thd, unit->global_parameters()))
return true; /* purecov: inspected */
ha_rows limit = unit->select_limit_cnt;
const bool using_limit = limit != HA_POS_ERROR;
if (limit == 0 && thd->lex->is_explain()) {
Modification_plan plan(thd, MT_UPDATE, table, "LIMIT is zero", true, 0);
bool err = explain_single_table_modification(thd, thd, &plan, query_block);
return err;
}
// Used to track whether there are no rows that need to be read
bool no_rows = limit == 0;
THD::killed_state killed_status = THD::NOT_KILLED;
assert(CountHiddenFields(query_block->fields) == 0);
COPY_INFO update(COPY_INFO::UPDATE_OPERATION, &query_block->fields,
update_value_list);
if (update.add_function_default_columns(table, table->write_set)) return true;
const bool safe_update = thd->variables.option_bits & OPTION_SAFE_UPDATES;
assert(!(table->all_partitions_pruned_away || m_empty_query));
Item *conds = nullptr;
ORDER *order = query_block->order_list.first;
if (!no_rows && query_block->get_optimizable_conditions(thd, &conds, nullptr))
return true; /* purecov: inspected */
/*
See if we can substitute expressions with equivalent generated
columns in the WHERE and ORDER BY clauses of the UPDATE statement.
It is unclear if this is best to do before or after the other
substitutions performed by substitute_for_best_equal_field(). Do
it here for now, to keep it consistent with how multi-table
updates are optimized in JOIN::optimize().
*/
if (conds || order)
static_cast<void>(substitute_gc(thd, query_block, conds, nullptr, order));
if (conds != nullptr) {
if (table_list->check_option) {
// See the explanation in multi-table UPDATE code path
// (Query_result_update::prepare).
table_list->check_option->walk(&Item::disable_constant_propagation,
enum_walk::POSTFIX, nullptr);
}
COND_EQUAL *cond_equal = nullptr;
Item::cond_result result;
if (optimize_cond(thd, &conds, &cond_equal,
query_block->m_current_table_nest, &result))
return true;
if (result == Item::COND_FALSE) {
no_rows = true; // Impossible WHERE
if (thd->lex->is_explain()) {
Modification_plan plan(thd, MT_UPDATE, table, "Impossible WHERE", true,
0);
bool err =
explain_single_table_modification(thd, thd, &plan, query_block);
return err;
}
}
if (conds != nullptr) {
conds = substitute_for_best_equal_field(thd, conds, cond_equal, nullptr);
if (conds == nullptr) return true;
conds->update_used_tables();
}
}
/*
Also try a second time after locking, to prune when subqueries and
stored programs can be evaluated.
*/
if (table->part_info && !no_rows) {
if (prune_partitions(thd, table, query_block, conds))
return true; /* purecov: inspected */
if (table->all_partitions_pruned_away) {
no_rows = true;
if (thd->lex->is_explain()) {
Modification_plan plan(thd, MT_UPDATE, table,
"No matching rows after partition pruning", true,
0);
bool err =
explain_single_table_modification(thd, thd, &plan, query_block);
return err;
}
my_ok(thd);
return false;
}
}
// Initialize the cost model that will be used for this table
table->init_cost_model(thd->cost_model());
/* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
table->mark_columns_needed_for_update(thd,
false /*mark_binlog_columns=false*/);
AccessPath *range_scan = nullptr;
join_type type = JT_UNKNOWN;
auto cleanup = create_scope_guard([&range_scan, table] {
if (range_scan != nullptr) ::destroy_at(range_scan);
table->set_keyread(false);
table->file->ha_index_or_rnd_end();
free_io_cache(table);
filesort_free_buffers(table, true);
});
if (conds &&
thd->optimizer_switch_flag(OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN)) {
table->file->cond_push(conds);
}
{ // Enter scope for optimizer trace wrapper
Opt_trace_object wrapper(&thd->opt_trace);
wrapper.add_utf8_table(update_table_ref);
if (!no_rows && conds != nullptr) {
Key_map keys_to_use(Key_map::ALL_BITS), needed_reg_dummy;
MEM_ROOT temp_mem_root(key_memory_test_quick_select_exec,
thd->variables.range_alloc_block_size);
no_rows = test_quick_select(
thd, thd->mem_root, &temp_mem_root, keys_to_use, 0, 0,
limit, safe_update, ORDER_NOT_RELEVANT, table,
/*skip_records_in_range=*/false, conds, &needed_reg_dummy,
table->force_index, query_block, &range_scan) < 0;
if (thd->is_error()) return true;
}
if (no_rows) {
if (thd->lex->is_explain()) {
Modification_plan plan(thd, MT_UPDATE, table, "Impossible WHERE", true,
0);
bool err =
explain_single_table_modification(thd, thd, &plan, query_block);
return err;
}
char buff[MYSQL_ERRMSG_SIZE];
snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO), 0L, 0L,
(long)thd->get_stmt_da()->current_statement_cond_count());
my_ok(thd, 0, 0, buff);
DBUG_PRINT("info", ("0 records updated"));
return false;
}
} // Ends scope for optimizer trace wrapper
/* If running in safe sql mode, don't allow updates without keys */
if (table->quick_keys.is_clear_all()) {
thd->server_status |= SERVER_QUERY_NO_INDEX_USED;
/*
No safe update error will be returned if:
1) Statement is an EXPLAIN OR
2) LIMIT is present.
Append the first warning (if any) to the error message. Allows the user
to understand why index access couldn't be chosen.
*/
if (!lex->is_explain() && safe_update && !using_limit) {
my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, MYF(0),
thd->get_stmt_da()->get_first_condition_message());
return true;
}
}
if (query_block->has_ft_funcs() && init_ftfuncs(thd, query_block))
return true; /* purecov: inspected */
if (conds != nullptr) table->update_const_key_parts(conds);
order = simple_remove_const(order, conds);
bool need_sort;
bool reverse = false;
bool used_key_is_modified = false;
uint used_index;
{
ORDER_with_src order_src(order, ESC_ORDER_BY, /*const_optimized=*/true);
used_index = get_index_for_order(&order_src, table, limit, range_scan,
&need_sort, &reverse);
if (range_scan != nullptr) {
// May have been changed by get_index_for_order().
type = calc_join_type(range_scan);
}
}
if (need_sort) { // Assign table scan index to check below for modified key
// fields:
used_index = table->file->key_used_on_scan;
}
if (used_index != MAX_KEY) { // Check if we are modifying a key that we are
// used to search with:
used_key_is_modified = is_key_used(table, used_index, table->write_set);
} else if (range_scan) {
/*
select->range_scan != NULL and used_index == MAX_KEY happens for index
merge and should be handled in a different way.
*/
used_key_is_modified = (!unique_key_range(range_scan) &&
uses_index_on_fields(range_scan, table->write_set));
}
if (table->part_info)
used_key_is_modified |= table->part_info->num_partitions_used() > 1 &&
partition_key_modified(table, table->write_set);
const bool using_filesort = order && need_sort;
table->mark_columns_per_binlog_row_image(thd);
if (prepare_partial_update(trace, query_block->fields, *update_value_list))
return true; /* purecov: inspected */
if (table->setup_partial_update()) return true; /* purecov: inspected */
ha_rows updated_rows = 0;
ha_rows found_rows = 0;
unique_ptr_destroy_only<Filesort> fsort;
unique_ptr_destroy_only<RowIterator> iterator;
{ // Start of scope for Modification_plan
ha_rows rows;
if (range_scan)
rows = range_scan->num_output_rows();
else if (!conds && !need_sort && limit != HA_POS_ERROR)
rows = limit;
else {
update_table_ref->fetch_number_of_rows();
rows = table->file->stats.records;
}
DEBUG_SYNC(thd, "before_single_update");
Modification_plan plan(thd, MT_UPDATE, table, type, range_scan, conds,
used_index, limit,
(!using_filesort && (used_key_is_modified || order)),
using_filesort, used_key_is_modified, rows);
DEBUG_SYNC(thd, "planned_single_update");
if (thd->lex->is_explain()) {
bool err =
explain_single_table_modification(thd, thd, &plan, query_block);
return err;
}
if (thd->lex->is_ignore()) table->file->ha_extra(HA_EXTRA_IGNORE_DUP_KEY);
if (used_key_is_modified || order) {
/*
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
/* note: We avoid sorting if we sort on the used index */
if (using_filesort) {
/*
Doing an ORDER BY; Let filesort find and sort the rows we are going
to update
NOTE: filesort will call table->prepare_for_position()
*/
JOIN join(thd, query_block); // Only for holding examined_rows.
AccessPath *path = create_table_access_path(
thd, table, range_scan, /*table_ref=*/nullptr,
/*position=*/nullptr, /*count_examined_rows=*/true);
if (conds != nullptr) {
path = NewFilterAccessPath(thd, path, conds);
}
// Force filesort to sort by position.
fsort.reset(new (thd->mem_root) Filesort(
thd, {table}, /*keep_buffers=*/false, order, limit,
/*remove_duplicates=*/false,
/*force_sort_rowids=*/true, /*unwrap_rollup=*/false));
path = NewSortAccessPath(thd, path, fsort.get(), order,
/*count_examined_rows=*/false);
iterator = CreateIteratorFromAccessPath(
thd, path, &join, /*eligible_for_batch_mode=*/true);
// Prevent cleanup in JOIN::destroy() and in the cleanup condition
// guard, to avoid double-destroy of the SortingIterator.
table->sorting_iterator = nullptr;
if (iterator == nullptr || iterator->Init()) return true;
thd->inc_examined_row_count(join.examined_rows);
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
*/
if (range_scan != nullptr) {
::destroy_at(range_scan);
range_scan = nullptr;
}
conds = nullptr;
} else {
/*
We are doing a search on a key that is updated. In this case
we go through the matching rows, save a pointer to them and
update these in a separate loop based on the pointer. In the end,
we get a result file that looks exactly like what filesort uses
internally, which allows us to read from it
using SortFileIndirectIterator.
TODO: Find something less ugly.
*/
Key_map covering_keys_for_cond; // @todo - move this
if (used_index < MAX_KEY && covering_keys_for_cond.is_set(used_index))
table->set_keyread(true);
table->prepare_for_position();
table->file->try_semi_consistent_read(true);
auto end_semi_consistent_read = create_scope_guard(
[table] { table->file->try_semi_consistent_read(false); });
/*
When we get here, we have one of the following options:
A. used_index == MAX_KEY
This means we should use full table scan, and start it with
init_read_record call
B. used_index != MAX_KEY
B.1 quick select is used, start the scan with init_read_record
B.2 quick select is not used, this is full index scan (with LIMIT)
Full index scan must be started with init_read_record_idx
*/
AccessPath *path;
if (used_index == MAX_KEY || range_scan) {
path = create_table_access_path(thd, table, range_scan,
/*table_ref=*/nullptr,
/*position=*/nullptr,
/*count_examined_rows=*/false);
} else {
empty_record(table);
path = NewIndexScanAccessPath(thd, table, used_index,
/*use_order=*/true, reverse,
/*count_examined_rows=*/false);
}
iterator = CreateIteratorFromAccessPath(
thd, path, /*join=*/nullptr, /*eligible_for_batch_mode=*/true);
// Prevent cleanup in JOIN::destroy() and in the cleanup condition
// guard, to avoid double-destroy of the SortingIterator.
table->sorting_iterator = nullptr;
if (iterator == nullptr || iterator->Init()) {
return true;
}
THD_STAGE_INFO(thd, stage_searching_rows_for_update);
ha_rows tmp_limit = limit;
IO_CACHE *tempfile =
(IO_CACHE *)my_malloc(key_memory_TABLE_sort_io_cache,
sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL));
if (open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX,
DISK_BUFFER_SIZE, MYF(MY_WME))) {
my_free(tempfile);
return true;
}
while (!(error = iterator->Read()) && !thd->killed) {
assert(!thd->is_error());
thd->inc_examined_row_count(1);
if (conds != nullptr) {
const bool skip_record = conds->val_int() == 0;
if (thd->is_error()) {
error = 1;
/*
Don't try unlocking the row if skip_record reported an error
since in this case the transaction might have been rolled back
already.
*/
break;
}
if (skip_record) {
table->file->unlock_row();
continue;
}
}
if (table->file->was_semi_consistent_read())
continue; /* repeat the read of the same row if it still exists */
table->file->position(table->record[0]);
if (my_b_write(tempfile, table->file->ref, table->file->ref_length)) {
error = 1; /* purecov: inspected */
break; /* purecov: inspected */
}
if (!--limit && using_limit) {
error = -1;
break;
}
}
if (thd->killed && !error) // Aborted
error = 1; /* purecov: inspected */
limit = tmp_limit;
end_semi_consistent_read.reset();
if (used_index < MAX_KEY && covering_keys_for_cond.is_set(used_index))
table->set_keyread(false);
table->file->ha_index_or_rnd_end();
iterator.reset();
// Change reader to use tempfile
if (reinit_io_cache(tempfile, READ_CACHE, 0L, false, false))
error = 1; /* purecov: inspected */
if (error >= 0) {
close_cached_file(tempfile);
my_free(tempfile);
return error > 0;
}
iterator = NewIterator<SortFileIndirectIterator>(
thd, thd->mem_root, Mem_root_array<TABLE *>{table}, tempfile,
/*ignore_not_found_rows=*/false, /*has_null_flags=*/false,
/*examined_rows=*/nullptr);
if (iterator->Init()) return true;
if (range_scan != nullptr) {
::destroy_at(range_scan);
range_scan = nullptr;
}
conds = nullptr;
}
} else {
// No ORDER BY or updated key underway, so we can use a regular read.
iterator =
init_table_iterator(thd, table, range_scan,
/*table_ref=*/nullptr, /*position=*/nullptr,
/*ignore_not_found_rows=*/false,
/*count_examined_rows=*/false);
if (iterator == nullptr) return true; /* purecov: inspected */
}
table->file->try_semi_consistent_read(true);
auto end_semi_consistent_read = create_scope_guard(
[table] { table->file->try_semi_consistent_read(false); });
/*
Generate an error (in TRADITIONAL mode) or warning
when trying to set a NOT NULL field to NULL.
*/
thd->check_for_truncated_fields = CHECK_FIELD_WARN;
thd->num_truncated_fields = 0L;
THD_STAGE_INFO(thd, stage_updating);
bool will_batch;
/// read_removal is only used by NDB storage engine
bool read_removal = false;
if (has_after_triggers) {
/*
The table has AFTER UPDATE triggers that might access to subject
table and therefore might need update to be done immediately.
So we turn-off the batching.
*/
(void)table->file->ha_extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
will_batch = false;
} else {
// No after update triggers, attempt to start bulk update
will_batch = !table->file->start_bulk_update();
}
if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
!thd->lex->is_ignore() && !using_limit && !has_update_triggers &&
range_scan && ::used_index(range_scan) != MAX_KEY &&
check_constant_expressions(*update_value_list))
read_removal = table->check_read_removal(::used_index(range_scan));
// If the update is batched, we cannot do partial update, so turn it off.
if (will_batch) table->cleanup_partial_update(); /* purecov: inspected */
uint dup_key_found;
while (true) {
error = iterator->Read();
if (error || thd->killed) break;
thd->inc_examined_row_count(1);
if (conds != nullptr) {
const bool skip_record = conds->val_int() == 0;
if (thd->is_error()) {
error = 1;
break;
}
if (skip_record) {
table->file
->unlock_row(); // Row failed condition check, release lock
thd->get_stmt_da()->inc_current_row_for_condition();
continue;
}
}
assert(!thd->is_error());
if (table->file->was_semi_consistent_read())
/*
Reviewer: iterator is reading from the to-be-updated table or
from a tmp file.
In the latter case, if the condition of this if() is true,
it is wrong to "continue"; indeed this will pick up the _next_ row of
tempfile; it will not re-read-with-lock the current row of tempfile,
as tempfile is not an InnoDB table and not doing semi consistent read.
If that happens, we're potentially skipping a row which was found
matching! OTOH, as the rowid was written to the tempfile, it means it
matched and thus we have already re-read it in the tempfile-write loop
above and thus locked it. So we shouldn't come here. How about adding
an assertion that if reading from tmp file we shouldn't come here?
*/
continue; /* repeat the read of the same row if it still exists */
table->clear_partial_update_diffs();
store_record(table, record[1]);
bool is_row_changed = false;
if (fill_record_n_invoke_before_triggers(
thd, &update, query_block->fields, *update_value_list, table,
TRG_EVENT_UPDATE, 0, false, &is_row_changed)) {
error = 1;
break;
}
found_rows++;
if (is_row_changed) {
/*
Default function and default expression values are filled before
evaluating the view check option. Check option on view using table(s)
with default function and default expression breaks otherwise.
It is safe to not invoke CHECK OPTION for VIEW if records are same.
In this case the row is coming from the view and thus should satisfy
the CHECK OPTION.
*/
int check_result = table_list->view_check_option(thd);
if (check_result != VIEW_CHECK_OK) {
if (check_result == VIEW_CHECK_SKIP)
continue;
else if (check_result == VIEW_CHECK_ERROR) {
error = 1;
break;
}
}
/*
Existing rows in table should normally satisfy CHECK constraints. So
it should be safe to check constraints only for rows that has really
changed (i.e. after compare_records()).
In future, once addition/enabling of CHECK constraints without their
validation is supported, we might encounter old rows which do not
satisfy CHECK constraints currently enabled. However, rejecting no-op
updates to such invalid pre-existing rows won't make them valid and is
probably going to be confusing for users. So it makes sense to stick
to current behavior.
*/
if (invoke_table_check_constraints(thd, table)) {
if (thd->is_error()) {
error = 1;
break;
}
// continue when IGNORE clause is used.
continue;
}
if (will_batch) {
/*
Typically a batched handler can execute the batched jobs when:
1) When specifically told to do so
2) When it is not a good idea to batch anymore
3) When it is necessary to send batch for other reasons
(One such reason is when READ's must be performed)
1) is covered by exec_bulk_update calls.
2) and 3) is handled by the bulk_update_row method.
bulk_update_row can execute the updates including the one
defined in the bulk_update_row or not including the row
in the call. This is up to the handler implementation and can
vary from call to call.
The dup_key_found reports the number of duplicate keys found
in those updates actually executed. It only reports those if
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
If this hasn't been issued it returns an error code and can
ignore this number. Thus any handler that implements batching
for UPDATE IGNORE must also handle this extra call properly.
If a duplicate key is found on the record included in this
call then it should be included in the count of dup_key_found
and error should be set to 0 (only if these errors are ignored).
*/
error = table->file->ha_bulk_update_row(
table->record[1], table->record[0], &dup_key_found);
limit += dup_key_found;
updated_rows -= dup_key_found;
} else {
/* Non-batched update */
error =
table->file->ha_update_row(table->record[1], table->record[0]);
}
if (error == 0)
updated_rows++;
else if (error == HA_ERR_RECORD_IS_THE_SAME)
error = 0;
else {
if (table->file->is_fatal_error(error)) error_flags |= ME_FATALERROR;
table->file->print_error(error, error_flags);
// The error can have been downgraded to warning by IGNORE.
if (thd->is_error()) break;
}
}
if (!error && has_after_triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, true)) {
error = 1;
break;
}
if (!--limit && using_limit) {
/*
We have reached end-of-file in most common situations where no
batching has occurred and if batching was supposed to occur but
no updates were made and finally when the batch execution was
performed without error and without finding any duplicate keys.
If the batched updates were performed with errors we need to
check and if no error but duplicate key's found we need to
continue since those are not counted for in limit.
*/
if (will_batch &&
((error = table->file->exec_bulk_update(&dup_key_found)) ||
dup_key_found)) {
if (error) {
/*
ndbcluster is the only handler that returns an error at this
juncture
*/
assert(table->file->ht->db_type == DB_TYPE_NDBCLUSTER);
if (table->file->is_fatal_error(error))
error_flags |= ME_FATALERROR;
table->file->print_error(error, error_flags);
error = 1;
break;
}
/* purecov: begin inspected */
/*
Either an error was found and we are ignoring errors or there were
duplicate keys found with HA_IGNORE_DUP_KEY enabled. In both cases
we need to correct the counters and continue the loop.
*/
/*
Note that NDB disables batching when duplicate keys are to be
ignored. Any duplicate key found will result in an error returned
above.
*/
assert(false);
limit = dup_key_found; // limit is 0 when we get here so need to +
updated_rows -= dup_key_found;
/* purecov: end */
} else {
error = -1; // Simulate end of file
break;
}
}
thd->get_stmt_da()->inc_current_row_for_condition();
assert(!thd->is_error());
if (thd->is_error()) {
error = 1;
break;
}
}
end_semi_consistent_read.reset();
dup_key_found = 0;
/*
Caching the killed status to pass as the arg to query event constructor;
The cached value can not change whereas the killed status can
(externally) since this point and change of the latter won't affect
binlogging.
It's assumed that if an error was set in combination with an effective
killed status then the error is due to killing.
*/
killed_status = thd->killed; // get the status of the atomic
// simulated killing after the loop must be ineffective for binlogging
DBUG_EXECUTE_IF("simulate_kill_bug27571",
{ thd->killed = THD::KILL_QUERY; };);
if (killed_status != THD::NOT_KILLED) error = 1;
int loc_error;
if (error && will_batch &&
(loc_error = table->file->exec_bulk_update(&dup_key_found)))
/*
An error has occurred when a batched update was performed and returned
an error indication. It cannot be an allowed duplicate key error since
we require the batching handler to treat this as a normal behavior.
Otherwise we simply remove the number of duplicate keys records found
in the batched update.
*/
{
/* purecov: begin inspected */
error_flags = MYF(0);
if (table->file->is_fatal_error(loc_error)) error_flags |= ME_FATALERROR;
table->file->print_error(loc_error, error_flags);
error = 1;
/* purecov: end */
} else
updated_rows -= dup_key_found;
if (will_batch) table->file->end_bulk_update();
if (read_removal) {
/* Only handler knows how many records really was written */
updated_rows = table->file->end_read_removal();
if (!records_are_comparable(table)) found_rows = updated_rows;
}
} // End of scope for Modification_plan
if (!transactional_table && updated_rows > 0)
thd->get_transaction()->mark_modified_non_trans_table(
Transaction_ctx::STMT);
iterator.reset();
/*
error < 0 means really no error at all: we processed all rows until the
last one without error. error > 0 means an error (e.g. unique key
violation and no IGNORE or REPLACE). error == 0 is also an error (if
preparing the record or invoking before triggers fails). See
ha_autocommit_or_rollback(error>=0) and return error>=0 below.
Sometimes we want to binlog even if we updated no rows, in case user used
it to be sure master and slave are in same state.
*/
if ((error < 0) ||
thd->get_transaction()->cannot_safely_rollback(Transaction_ctx::STMT)) {
if (mysql_bin_log.is_open()) {
int errcode = 0;
if (error < 0)
thd->clear_error();
else
errcode = query_error_code(thd, killed_status == THD::NOT_KILLED);
if (thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query().str,
thd->query().length, transactional_table, false,
false, errcode)) {
error = 1; // Rollback update
}
}
}
assert(transactional_table || updated_rows == 0 ||
thd->get_transaction()->cannot_safely_rollback(Transaction_ctx::STMT));
// If LAST_INSERT_ID(X) was used, report X
const ulonglong id = thd->arg_of_last_insert_id_function
? thd->first_successful_insert_id_in_prev_stmt
: 0;
if (error < 0) {
char buff[MYSQL_ERRMSG_SIZE];
snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO), (long)found_rows,
(long)updated_rows,
(long)thd->get_stmt_da()->current_statement_cond_count());
my_ok(thd,
thd->get_protocol()->has_client_capability(CLIENT_FOUND_ROWS)
? found_rows
: updated_rows,
id, buff);
DBUG_PRINT("info", ("%ld records updated", (long)updated_rows));
}
thd->check_for_truncated_fields = CHECK_FIELD_IGNORE;
thd->current_found_rows = found_rows;
assert(CountHiddenFields(*update_value_list) == 0);
// Following test is disabled, as we get RQG errors that are hard to debug
// assert((error >= 0) == thd->is_error());
return error >= 0 || thd->is_error();
}