bool Sql_cmd_update::update_single_table()

in sql/sql_update.cc [282:1100]


bool Sql_cmd_update::update_single_table(THD *thd)
{
  DBUG_ENTER("Sql_cmd_update::update_single_table");

  myf           error_flags= MYF(0);            /**< Flag for fatal errors */
  /*
    Most recent handler error
    =  1: Some non-handler error
    =  0: Success
    = -1: No more rows to process, or reached limit
  */
  int           error= 0;

  SELECT_LEX   *const select_lex= lex->select_lex;
  SELECT_LEX_UNIT *const unit= lex->unit;
  TABLE_LIST   *const table_list= select_lex->get_table_list();
  TABLE_LIST   *const update_table_ref= table_list->updatable_base_table();
  TABLE        *const table= update_table_ref->table;

      DBUG_ASSERT(table->pos_in_table_list == update_table_ref);

  const bool transactional_table= table->file->has_transactions();

  const bool has_update_triggers=
    table->triggers && table->triggers->has_update_triggers();

  const bool has_after_triggers=
    has_update_triggers && table->triggers->has_triggers(TRG_EVENT_UPDATE,
                                                         TRG_ACTION_AFTER);

  List<Item> *update_field_list= &select_lex->item_list;

  if (unit->set_limit(thd, unit->global_parameters()))
    DBUG_RETURN(true);                      /* purecov: inspected */

  ha_rows limit= unit->select_limit_cnt;
  const bool using_limit= limit != HA_POS_ERROR;

  // Used to track whether there are no rows that need to be read
  bool no_rows= limit == 0;

  THD::killed_state killed_status= THD::NOT_KILLED;
  COPY_INFO update(COPY_INFO::UPDATE_OPERATION,
                   update_field_list, update_value_list);
  if (update.add_function_default_columns(table, table->write_set))
    DBUG_RETURN(true);


  const bool safe_update= thd->variables.option_bits & OPTION_SAFE_UPDATES;

  QEP_TAB_standalone qep_tab_st;
  QEP_TAB &qep_tab= qep_tab_st.as_QEP_TAB();

  Item *conds;
  ORDER *order= select_lex->order_list.first;
  if (select_lex->get_optimizable_conditions(thd, &conds, NULL))
    DBUG_RETURN(true);                      /* purecov: inspected */

  /*
    See if we can substitute expressions with equivalent generated
    columns in the WHERE and ORDER BY clauses of the UPDATE statement.
    It is unclear if this is best to do before or after the other
    substitutions performed by substitute_for_best_equal_field(). Do
    it here for now, to keep it consistent with how multi-table
    updates are optimized in JOIN::optimize().
  */
  if (conds || order)
    static_cast<void>(substitute_gc(thd, select_lex, conds, NULL, order));

  if (conds)
  {
    COND_EQUAL *cond_equal= NULL;
    Item::cond_result result;
    if (table_list->check_option)
    {
      /*
        If this UPDATE is on a view with CHECK OPTION, field references in
        'conds' must not be replaced by constants. The reason is that when
        'conds' is optimized, 'check_option' is also optimized (it is
        part of 'conds'). Const replacement is fine for 'conds'
        because it is evaluated on a read row, but 'check_option' is
        evaluated on a row with updated fields and needs those updated
        values to be correct.

        Example:
        CREATE VIEW v1 ... WHERE fld < 2 WITH CHECK_OPTION
        UPDATE v1 SET fld=4 WHERE fld=1

        check_option is  "(fld < 2)"
        conds is         "(fld < 2) and (fld = 1)"

        optimize_cond() would propagate fld=1 to the first argument of
        the AND to create "(1 < 2) AND (fld = 1)". After this,
        check_option would be "(1 < 2)". But for check_option to work
        it must be evaluated with the *updated* value of fld: 4.
        Otherwise it will evaluate to true even when it should be
        false, which is the case for the UPDATE statement above.

        Thus, if there is a check_option, we do only the "safe" parts
        of optimize_cond(): Item_row -> Item_func_eq conversion (to
        enable range access) and removal of always true/always false
        predicates.

        An alternative to restricting this optimization of 'conds' in
        the presense of check_option: the Item-tree of 'check_option'
        could be cloned before optimizing 'conds' and thereby avoid
        const replacement. However, at the moment there is no such
        thing as Item::clone().
      */
      if (build_equal_items(thd, conds, &conds, NULL, false,
                            select_lex->join_list, &cond_equal))
        DBUG_RETURN(true);
      if (remove_eq_conds(thd, conds, &conds, &result))
        DBUG_RETURN(true);                      /* purecov: inspected */
    }
    else
    {
      if (optimize_cond(thd, &conds, &cond_equal, select_lex->join_list,
                        &result))
        DBUG_RETURN(true);
    }

    if (result == Item::COND_FALSE)
    {
      no_rows= true;                               // Impossible WHERE
      if (thd->lex->describe)
      {
        Modification_plan plan(thd, MT_UPDATE, table,
                               "Impossible WHERE", true, 0);
        bool err= explain_single_table_modification(thd, &plan, select_lex);
        DBUG_RETURN(err);
      }
    }
    if (conds)
    {
      conds= substitute_for_best_equal_field(conds, cond_equal, 0);
      if (conds == NULL)
        DBUG_RETURN(true);

      conds->update_used_tables();
    }
  }

  /*
    Also try a second time after locking, to prune when subqueries and
    stored programs can be evaluated.
  */
  if (table->part_info)
  {
    if (prune_partitions(thd, table, conds))
      DBUG_RETURN(true);                      /* purecov: inspected */
    if (table->all_partitions_pruned_away)
    {
      no_rows= true;

      if (thd->lex->describe)
      {
        Modification_plan plan(thd, MT_UPDATE, table,
                               "No matching rows after partition pruning",
                               true, 0);
        bool err= explain_single_table_modification(thd, &plan, select_lex);
        DBUG_RETURN(err);
      }
      my_ok(thd);
      DBUG_RETURN(false);
    }
  }
  // Initialize the cost model that will be used for this table
  table->init_cost_model(thd->cost_model());

  /* Update the table->file->stats.records number */
  table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);

  table->mark_columns_needed_for_update(thd, false/*mark_binlog_columns=false*/);
  if (table->vfield &&
      validate_gc_assignment(update_field_list, update_value_list, table))
    DBUG_RETURN(true);

  qep_tab.set_table(table);
  qep_tab.set_condition(conds);

  { // Enter scope for optimizer trace wrapper
    Opt_trace_object wrapper(&thd->opt_trace);
    wrapper.add_utf8_table(update_table_ref);

    if (!no_rows && conds != NULL)
    {
      Key_map keys_to_use(Key_map::ALL_BITS), needed_reg_dummy;
      QUICK_SELECT_I *qck;
      no_rows= test_quick_select(thd, keys_to_use, 0, limit, safe_update,
                                 ORDER_NOT_RELEVANT, &qep_tab,
                                 conds, &needed_reg_dummy, &qck) < 0;
      qep_tab.set_quick(qck);
      if (thd->is_error())
        DBUG_RETURN(true);
    }
    if (no_rows)
    {
      if (thd->lex->describe)
      {
        Modification_plan plan(thd, MT_UPDATE, table,
                               "Impossible WHERE", true, 0);
        bool err= explain_single_table_modification(thd, &plan, select_lex);
        DBUG_RETURN(err);
      }

      char buff[MYSQL_ERRMSG_SIZE];
      my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO), 0, 0,
                  (long) thd->get_stmt_da()->current_statement_cond_count());
      my_ok(thd, 0, 0, buff);

      DBUG_PRINT("info",("0 records updated"));
      DBUG_RETURN(false);
    }
  } // Ends scope for optimizer trace wrapper

  /* If running in safe sql mode, don't allow updates without keys */
  if (table->quick_keys.is_clear_all())
  {
    thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
    if (safe_update && !using_limit)
    {
      my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, MYF(0));
      DBUG_RETURN(true);
    }
  }
  if (select_lex->has_ft_funcs() && init_ftfuncs(thd, select_lex))
    DBUG_RETURN(true);                      /* purecov: inspected */

  if (table->update_const_key_parts(conds))
    DBUG_RETURN(true);

  order= simple_remove_const(order, conds);
  bool need_sort;
  bool reverse= false;
  bool used_key_is_modified= false;
  uint used_index;
  {
    ORDER_with_src order_src(order, ESC_ORDER_BY);
    used_index= get_index_for_order(&order_src, &qep_tab, limit,
                                    &need_sort, &reverse);
  }
  if (need_sort)
  { // Assign table scan index to check below for modified key fields:
    used_index= table->file->key_used_on_scan;
  }
  if (used_index != MAX_KEY)
  { // Check if we are modifying a key that we are used to search with:
    used_key_is_modified= is_key_used(table, used_index, table->write_set);
  }
  else if (qep_tab.quick())
  {
    /*
      select->quick != NULL and used_index == MAX_KEY happens for index
      merge and should be handled in a different way.
    */
    used_key_is_modified= (!qep_tab.quick()->unique_key_range() &&
                           qep_tab.quick()->is_keys_used(table->write_set));
  }

  if (table->part_info)
    used_key_is_modified|= partition_key_modified(table, table->write_set);

  const bool using_filesort= order && need_sort;

  table->mark_columns_per_binlog_row_image(thd);

  /*
    WL#2955 will change this to only request JSON diffs when needed.
    For now, always request JSON diffs so that the code can be tested.
  */
  if (table->setup_partial_update(true /* will be changed by WL#2955 */))
    DBUG_RETURN(true);                          /* purecov: inspected */

  ha_rows updated_rows= 0;
  ha_rows found_rows= 0;

  READ_RECORD info;

  { // Start of scope for Modification_plan
    ha_rows rows;
    if (qep_tab.quick())
      rows= qep_tab.quick()->records;
    else if (!conds && !need_sort && limit != HA_POS_ERROR)
      rows= limit;
    else
    {
      update_table_ref->fetch_number_of_rows();
      rows= table->file->stats.records;
    }
    qep_tab.set_quick_optim();
    qep_tab.set_condition_optim();
    DEBUG_SYNC(thd, "before_single_update");
    Modification_plan plan(thd, MT_UPDATE, &qep_tab,
                           used_index, limit,
                           (!using_filesort && (used_key_is_modified || order)),
                           using_filesort, used_key_is_modified, rows);
    DEBUG_SYNC(thd, "planned_single_update");
    if (thd->lex->describe)
    {
      bool err= explain_single_table_modification(thd, &plan, select_lex);
      DBUG_RETURN(err);
    }

    if (used_key_is_modified || order)
    {
      /*
        We can't update table directly;  We must first search after all
        matching rows before updating the table!
      */

      Key_map covering_keys_for_cond; // @todo - move this
      if (used_index < MAX_KEY && covering_keys_for_cond.is_set(used_index))
        table->set_keyread(true);

      /* note: We avoid sorting if we sort on the used index */
      if (using_filesort)
      {
        /*
          Doing an ORDER BY;  Let filesort find and sort the rows we are going
          to update
          NOTE: filesort will call table->prepare_for_position()
        */
        ha_rows sort_examined_rows, sort_found_rows, sort_returned_rows;
        Filesort fsort(&qep_tab, order, limit);

        DBUG_ASSERT(table->sort.io_cache == NULL);
        table->sort.io_cache=
          (IO_CACHE*) my_malloc(key_memory_TABLE_sort_io_cache,
                                sizeof(IO_CACHE),
                                MYF(MY_FAE | MY_ZEROFILL));

        if (filesort(thd, &fsort, true, &sort_examined_rows,
                     &sort_found_rows, &sort_returned_rows))
          DBUG_RETURN(true);

        table->sort.found_records= sort_returned_rows;
        thd->inc_examined_row_count(sort_examined_rows);
        /*
          Filesort has already found and selected the rows we want to update,
          so we don't need the where clause
        */
        qep_tab.set_quick(NULL);
        qep_tab.set_condition(NULL);
      }
      else
      {
        /*
          We are doing a search on a key that is updated. In this case
          we go trough the matching rows, save a pointer to them and
          update these in a separate loop based on the pointer.
        */
        table->prepare_for_position();

        /* If quick select is used, initialize it before retrieving rows. */
        if (qep_tab.quick() && (error= qep_tab.quick()->reset()))
        {
          if (table->file->is_fatal_error(error))
            error_flags|= ME_FATALERROR;

          table->file->print_error(error, error_flags);
          DBUG_RETURN(true);
        }
        table->file->try_semi_consistent_read(1);

        /*
          When we get here, we have one of the following options:
          A. used_index == MAX_KEY
          This means we should use full table scan, and start it with
          init_read_record call
          B. used_index != MAX_KEY
          B.1 quick select is used, start the scan with init_read_record
          B.2 quick select is not used, this is full index scan (with LIMIT)
          Full index scan must be started with init_read_record_idx
        */

        if (used_index == MAX_KEY ||
            qep_tab.quick() ?
             init_read_record(&info, thd, NULL, &qep_tab, 0, true, false) :
             init_read_record_idx(&info, thd, table, true, used_index, reverse))
          DBUG_RETURN(true);                      /* purecov: inspected */

        THD_STAGE_INFO(thd, stage_searching_rows_for_update);
        ha_rows tmp_limit= limit;

        IO_CACHE *tempfile= (IO_CACHE *)
          my_malloc(key_memory_TABLE_sort_io_cache,
                    sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL));

        if (open_cached_file(tempfile, mysql_tmpdir,TEMP_PREFIX,
                             DISK_BUFFER_SIZE, MYF(MY_WME)))
        {
          my_free(tempfile);
          DBUG_RETURN(true);
        }

        while (!(error=info.read_record(&info)) && !thd->killed)
        {
          DBUG_ASSERT(!thd->is_error());
          thd->inc_examined_row_count(1);

          bool skip_record;
          if (qep_tab.skip_record(thd, &skip_record))
          {
            error= 1;
            /*
             Don't try unlocking the row if skip_record reported an error since
             in this case the transaction might have been rolled back already.
            */
            break;
          }
          if (skip_record)
          {
            table->file->unlock_row();
            continue;
          }
          if (table->file->was_semi_consistent_read())
            continue;  /* repeat the read of the same row if it still exists */

          table->file->position(table->record[0]);
          if (my_b_write(tempfile,table->file->ref,
                         table->file->ref_length))
          {
            error=1; /* purecov: inspected */
            break; /* purecov: inspected */
          }
          if (!--limit && using_limit)
          {
            error= -1;
            break;
          }
        }

        if (thd->killed && !error)                   // Aborted
          error= 1; /* purecov: inspected */
        limit= tmp_limit;
        table->file->try_semi_consistent_read(0);
        end_read_record(&info);

        // Change reader to use tempfile
        if (reinit_io_cache(tempfile, READ_CACHE, 0L, 0, 0))
          error=1; /* purecov: inspected */

        DBUG_ASSERT(table->sort.io_cache == NULL);
        /*
          After this assignment, init_read_record() will run, and decide to
          read from sort.io_cache. This cache will be freed when qep_tab is
          destroyed.
         */
        table->sort.io_cache= tempfile;
        qep_tab.set_quick(NULL);
        qep_tab.set_condition(NULL);
        if (error >= 0)
          DBUG_RETURN(error > 0);
      }
      if (used_index < MAX_KEY && covering_keys_for_cond.is_set(used_index))
        table->set_keyread(false);
      table->file->ha_index_or_rnd_end();
    }

    if (thd->lex->is_ignore())
      table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
  
    if (qep_tab.quick() && (error= qep_tab.quick()->reset()))
    {
      if (table->file->is_fatal_error(error))
        error_flags|= ME_FATALERROR;

      table->file->print_error(error, error_flags);
      DBUG_RETURN(true);
    }

    table->file->try_semi_consistent_read(1);
    if (init_read_record(&info, thd, NULL, &qep_tab, 0, true, false))
      DBUG_RETURN(true);                      /* purecov: inspected */

    /*
      Generate an error (in TRADITIONAL mode) or warning
      when trying to set a NOT NULL field to NULL.
    */
    thd->check_for_truncated_fields= CHECK_FIELD_WARN;
    thd->num_truncated_fields= 0L;
    THD_STAGE_INFO(thd, stage_updating);

    bool will_batch;
    /// read_removal is only used by NDB storage engine
    bool read_removal= false;

    if (has_after_triggers)
    {
      /*
        The table has AFTER UPDATE triggers that might access to subject 
        table and therefore might need update to be done immediately. 
        So we turn-off the batching.
      */ 
      (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
      will_batch= false;
    }
    else
    {
      // No after update triggers, attempt to start bulk update
      will_batch= !table->file->start_bulk_update();
    }
    if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
        !thd->lex->is_ignore() &&
        !using_limit &&
        !has_update_triggers &&
        qep_tab.quick() && qep_tab.quick()->index != MAX_KEY &&
        check_constant_expressions(update_value_list))
      read_removal= table->check_read_removal(qep_tab.quick()->index);

    // If the update is batched, we cannot do partial update, so turn it off.
    if (will_batch)
      table->cleanup_partial_update();          /* purecov: inspected */

    uint dup_key_found;

    while (true)
    {
      error= info.read_record(&info);
      if (error || thd->killed)
        break;
      thd->inc_examined_row_count(1);
      bool skip_record;
      if (qep_tab.skip_record(thd, &skip_record))
      {
        error= 1;
        break;
      }
      if (skip_record)
      {
        table->file->unlock_row();  // Row failed condition check, release lock
        thd->get_stmt_da()->inc_current_row_for_condition();
        continue;
      }
      DBUG_ASSERT(!thd->is_error());

      if (table->file->was_semi_consistent_read())
        continue;  /* repeat the read of the same row if it still exists */

      table->clear_partial_update_diffs();

      store_record(table,record[1]);
      if (fill_record_n_invoke_before_triggers(thd, &update,
                                               *update_field_list,
                                               *update_value_list, table,
                                               TRG_EVENT_UPDATE, 0))
      {
        error= 1;
        break;
      }
      found_rows++;

      if (!records_are_comparable(table) || compare_records(table))
      {
        int check_result= table_list->view_check_option(thd);
        if (check_result != VIEW_CHECK_OK)
        {
          found_rows--;
          if (check_result == VIEW_CHECK_SKIP)
            continue;
          else if (check_result == VIEW_CHECK_ERROR)
          {
            error= 1;
            break;
          }
        }

        /*
          In order to keep MySQL legacy behavior, we do this update *after*
          the CHECK OPTION test. Proper behavior is probably to throw an
          error, though.
        */
        update.set_function_defaults(table);

        if (will_batch)
        {
            /*
              Typically a batched handler can execute the batched jobs when:
              1) When specifically told to do so
              2) When it is not a good idea to batch anymore
              3) When it is necessary to send batch for other reasons
              (One such reason is when READ's must be performed)

              1) is covered by exec_bulk_update calls.
              2) and 3) is handled by the bulk_update_row method.
            
              bulk_update_row can execute the updates including the one
              defined in the bulk_update_row or not including the row
              in the call. This is up to the handler implementation and can
              vary from call to call.

              The dup_key_found reports the number of duplicate keys found
              in those updates actually executed. It only reports those if
              the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
              If this hasn't been issued it returns an error code and can
              ignore this number. Thus any handler that implements batching
              for UPDATE IGNORE must also handle this extra call properly.

              If a duplicate key is found on the record included in this
              call then it should be included in the count of dup_key_found
              and error should be set to 0 (only if these errors are ignored).
            */
          error= table->file->ha_bulk_update_row(table->record[1],
                                                 table->record[0],
                                                 &dup_key_found);
          limit+= dup_key_found;
          updated_rows-= dup_key_found;
          }
        else
        {
          /* Non-batched update */
          error= table->file->ha_update_row(table->record[1],
                                            table->record[0]);
        }
        if (error == 0)
          updated_rows++;
        else if (error == HA_ERR_RECORD_IS_THE_SAME)
          error= 0;
        else
        {
          if (table->file->is_fatal_error(error))
            error_flags|= ME_FATALERROR;

          table->file->print_error(error, error_flags);

          // The error can have been downgraded to warning by IGNORE.
          if (thd->is_error())
            break;
        }
      }

      if (!error && has_after_triggers &&
          table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
                                            TRG_ACTION_AFTER, TRUE))
      {
        error= 1;
        break;
      }

      if (!--limit && using_limit)
      {
        /*
          We have reached end-of-file in most common situations where no
          batching has occurred and if batching was supposed to occur but
          no updates were made and finally when the batch execution was
          performed without error and without finding any duplicate keys.
          If the batched updates were performed with errors we need to
          check and if no error but duplicate key's found we need to
          continue since those are not counted for in limit.
        */
        if (will_batch &&
            ((error= table->file->exec_bulk_update(&dup_key_found)) ||
             dup_key_found))
        {
          if (error)
          {
            /* purecov: begin inspected */
            DBUG_ASSERT(false);
            /*
              The handler should not report error of duplicate keys if they
              are ignored. This is a requirement on batching handlers.
            */
            if (table->file->is_fatal_error(error))
              error_flags|= ME_FATALERROR;

            table->file->print_error(error, error_flags);
            error= 1;
            break;
            /* purecov: end */
          }
          /*
            Either an error was found and we are ignoring errors or there
            were duplicate keys found. In both cases we need to correct
            the counters and continue the loop.
          */
          limit= dup_key_found; //limit is 0 when we get here so need to +
          updated_rows-= dup_key_found;
        }
        else
        {
          error= -1;				// Simulate end of file
          break;
        }
      }

      thd->get_stmt_da()->inc_current_row_for_condition();
      DBUG_ASSERT(!thd->is_error());
      if (thd->is_error())
      {
        error= 1;
        break;
      }
    }

    table->auto_increment_field_not_null= false;
    dup_key_found= 0;
    /*
      Caching the killed status to pass as the arg to query event constuctor;
      The cached value can not change whereas the killed status can
      (externally) since this point and change of the latter won't affect
      binlogging.
      It's assumed that if an error was set in combination with an effective 
      killed status then the error is due to killing.
    */
    killed_status= thd->killed; // get the status of the volatile 
    // simulated killing after the loop must be ineffective for binlogging
    DBUG_EXECUTE_IF("simulate_kill_bug27571",
                    {
                      thd->killed= THD::KILL_QUERY;
                    };);
    if (killed_status != THD::NOT_KILLED)
      error= 1;
  
    int loc_error;
    if (error &&
        will_batch &&
        (loc_error= table->file->exec_bulk_update(&dup_key_found)))
      /*
        An error has occurred when a batched update was performed and returned
        an error indication. It cannot be an allowed duplicate key error since
        we require the batching handler to treat this as a normal behavior.

        Otherwise we simply remove the number of duplicate keys records found
        in the batched update.
      */
    {
      /* purecov: begin inspected */
      error_flags= MYF(0);
      if (table->file->is_fatal_error(loc_error))
        error_flags|= ME_FATALERROR;

      table->file->print_error(loc_error, error_flags);
      error= 1;
      /* purecov: end */
    }
    else
      updated_rows-= dup_key_found;
    if (will_batch)
      table->file->end_bulk_update();
    table->file->try_semi_consistent_read(0);

    if (read_removal)
    {
      /* Only handler knows how many records really was written */
      updated_rows= table->file->end_read_removal();
      if (!records_are_comparable(table))
        found_rows= updated_rows;
    }

  } // End of scope for Modification_plan

  if (!transactional_table && updated_rows > 0)
    thd->get_transaction()->mark_modified_non_trans_table(
      Transaction_ctx::STMT);

  end_read_record(&info);

  /*
    Invalidate the table in the query cache if something changed.
    This must be before binlog writing and ha_autocommit_...
  */
  if (updated_rows > 0)
    query_cache.invalidate_single(thd, update_table_ref, true);
  
  /*
    error < 0 means really no error at all: we processed all rows until the
    last one without error. error > 0 means an error (e.g. unique key
    violation and no IGNORE or REPLACE). error == 0 is also an error (if
    preparing the record or invoking before triggers fails). See
    ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
    Sometimes we want to binlog even if we updated no rows, in case user used
    it to be sure master and slave are in same state.
  */
  if ((error < 0) || thd->get_transaction()->cannot_safely_rollback(
      Transaction_ctx::STMT))
  {
    if (mysql_bin_log.is_open())
    {
      int errcode= 0;
      if (error < 0)
        thd->clear_error();
      else
        errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);

      if (thd->binlog_query(THD::ROW_QUERY_TYPE,
                            thd->query().str, thd->query().length,
                            transactional_table, FALSE, FALSE, errcode))
      {
        error=1;				// Rollback update
      }
    }
  }
  DBUG_ASSERT(transactional_table ||
              updated_rows == 0 ||
              thd->get_transaction()->cannot_safely_rollback(
                Transaction_ctx::STMT));

  // If LAST_INSERT_ID(X) was used, report X
  const ulonglong id= thd->arg_of_last_insert_id_function ?
                      thd->first_successful_insert_id_in_prev_stmt : 0;

  if (error < 0)
  {
    char buff[MYSQL_ERRMSG_SIZE];
    my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO),
                (long) found_rows, (long) updated_rows,
                (long) thd->get_stmt_da()->current_statement_cond_count());
    my_ok(thd, thd->get_protocol()->has_client_capability(CLIENT_FOUND_ROWS) ?
          found_rows : updated_rows, id, buff);
    DBUG_PRINT("info",("%ld records updated", (long) updated_rows));
  }
  thd->check_for_truncated_fields= CHECK_FIELD_IGNORE;
  thd->current_found_rows= found_rows;
  thd->current_changed_rows= updated_rows;
  // Following test is disabled, as we get RQG errors that are hard to debug
  //DBUG_ASSERT((error >= 0) == thd->is_error());
  DBUG_RETURN(error >= 0 || thd->is_error());
}