int mysql_load()

in sql/sql_load.cc [221:735]


int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
	        List<Item> &fields_vars, List<Item> &set_fields,
                List<Item> &set_values,
                enum enum_duplicates handle_duplicates,
                bool read_file_from_client)
{
  char name[FN_REFLEN];
  File file;
  int error= 0;
  const String *field_term= ex->field.field_term;
  const String *escaped=    ex->field.escaped;
  const String *enclosed=   ex->field.enclosed;
  bool is_fifo=0;
  SELECT_LEX *select= thd->lex->select_lex;
  LOAD_FILE_INFO lf_info;
  THD::killed_state killed_status= THD::NOT_KILLED;
  bool is_concurrent;
  bool transactional_table;
  const char *db = table_list->db;			// This is never null
  /*
    If path for file is not defined, we will use the current database.
    If this is not set, we will use the directory where the table to be
    loaded is located
  */
  const char *tdb= thd->db().str ? thd->db().str : db; //Result is never null
  ulong skip_lines= ex->skip_lines;
  DBUG_ENTER("mysql_load");

  /*
    Bug #34283
    mysqlbinlog leaves tmpfile after termination if binlog contains
    load data infile, so in mixed mode we go to row-based for
    avoiding the problem.
  */
  thd->set_current_stmt_binlog_format_row_if_mixed();

  if (escaped->length() > 1 || enclosed->length() > 1)
  {
    my_error(ER_WRONG_FIELD_TERMINATORS, MYF(0));
    DBUG_RETURN(TRUE);
  }

  /* Report problems with non-ascii separators */
  if (!escaped->is_ascii() || !enclosed->is_ascii() ||
      !field_term->is_ascii() ||
      !ex->line.line_term->is_ascii() || !ex->line.line_start->is_ascii())
  {
    push_warning(thd, Sql_condition::SL_WARNING,
                 WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED,
                 ER_THD(thd, WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED));
  } 

  if (open_and_lock_tables(thd, table_list, 0))
    DBUG_RETURN(true);

  THD_STAGE_INFO(thd, stage_executing);
  if (select->setup_tables(thd, table_list, false))
    DBUG_RETURN(true);

  if (run_before_dml_hook(thd))
    DBUG_RETURN(true);

  if (table_list->is_view() && select->resolve_derived(thd, false))
    DBUG_RETURN(true);                   /* purecov: inspected */

  TABLE_LIST *const insert_table_ref=
    table_list->is_updatable() &&        // View must be updatable
    !table_list->is_multiple_tables() && // Multi-table view not allowed
    !table_list->is_derived() ?          // derived tables not allowed
    table_list->updatable_base_table() : NULL;

  if (insert_table_ref == NULL ||
      check_key_in_view(thd, table_list, insert_table_ref))
  {
    my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "LOAD");
    DBUG_RETURN(TRUE);
  }
  if (select->derived_table_count &&
      select->check_view_privileges(thd, INSERT_ACL, SELECT_ACL))
    DBUG_RETURN(true);                   /* purecov: inspected */

  if (table_list->is_merged())
  {
    if (table_list->prepare_check_option(thd))
      DBUG_RETURN(TRUE);

    if (handle_duplicates == DUP_REPLACE &&
        table_list->prepare_replace_filter(thd))
      DBUG_RETURN(true);
  }

  // Pass the check option down to the underlying table:
  insert_table_ref->check_option= table_list->check_option;
  /*
    Let us emit an error if we are loading data to table which is used
    in subselect in SET clause like we do it for INSERT.

    The main thing to fix to remove this restriction is to ensure that the
    table is marked to be 'used for insert' in which case we should never
    mark this table as 'const table' (ie, one that has only one row).
  */
  if (unique_table(insert_table_ref, table_list->next_global, 0))
  {
    my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
    DBUG_RETURN(TRUE);
  }

  TABLE *const table= insert_table_ref->table;

  for (Field **cur_field= table->field; *cur_field; ++cur_field)
    (*cur_field)->reset_warnings();

  transactional_table= table->file->has_transactions();
  is_concurrent= (table_list->lock_descriptor().type ==
                  TL_WRITE_CONCURRENT_INSERT);

  if (!fields_vars.elements)
  {
    Field_iterator_table_ref field_iterator;
    field_iterator.set(table_list);
    for (; !field_iterator.end_of_fields(); field_iterator.next())
    {
      Item *item;
      if (!(item= field_iterator.create_item(thd)))
        DBUG_RETURN(TRUE);

      if (item->field_for_view_update() == NULL)
      {
        my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->item_name.ptr());
        DBUG_RETURN(true);
      }
      fields_vars.push_back(item->real_item());
    }
    bitmap_set_all(table->write_set);
    /*
      Let us also prepare SET clause, altough it is probably empty
      in this case.
    */
    if (setup_fields(thd, Ref_item_array(), set_fields, INSERT_ACL, NULL,
                     false, true) ||
        setup_fields(thd, Ref_item_array(), set_values, SELECT_ACL, NULL,
                     false, false))
      DBUG_RETURN(TRUE);
  }
  else
  {						// Part field list
    /*
      Because fields_vars may contain user variables,
      pass false for column_update in first call below.
    */
    if (setup_fields(thd, Ref_item_array(), fields_vars, INSERT_ACL, NULL,
                     false, false) ||
        setup_fields(thd, Ref_item_array(), set_fields, INSERT_ACL, NULL,
                     false, true))
      DBUG_RETURN(TRUE);

    /*
      Special updatability test is needed because fields_vars may contain
      a mix of column references and user variables.
    */
    Item *item;
    List_iterator<Item> it(fields_vars);
    while ((item= it++))
    {
      if ((item->type() == Item::FIELD_ITEM ||
           item->type() == Item::REF_ITEM) &&
          item->field_for_view_update() == NULL)
      {
        my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->item_name.ptr());
        DBUG_RETURN(true);
      }
    }
    /* We explicitly ignore the return value */
    (void)check_that_all_fields_are_given_values(thd, table, table_list);
    /* Fix the expressions in SET clause */
    if (setup_fields(thd, Ref_item_array(), set_values, SELECT_ACL, NULL,
                     false, false))
      DBUG_RETURN(TRUE);
  }

  const int escape_char= (escaped->length() && (ex->escaped_given() ||
                          !(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
                          ? (*escaped)[0] : INT_MAX;

  /*
    * LOAD DATA INFILE fff INTO TABLE xxx SET columns2
    sets all columns, except if file's row lacks some: in that case,
    defaults are set by read_fixed_length() and read_sep_field(),
    not by COPY_INFO.
    * LOAD DATA INFILE fff INTO TABLE xxx (columns1) SET columns2=
    may need a default for columns other than columns1 and columns2.
  */
  const bool manage_defaults= fields_vars.elements != 0;
  COPY_INFO info(COPY_INFO::INSERT_OPERATION,
                 &fields_vars, &set_fields,
                 manage_defaults,
                 handle_duplicates, escape_char);

  if (info.add_function_default_columns(table, table->write_set))
    DBUG_RETURN(TRUE);

  prepare_triggers_for_insert_stmt(thd, table);

  uint tot_length=0;
  bool use_blobs= 0, use_vars= 0;
  List_iterator_fast<Item> it(fields_vars);
  Item *item;

  while ((item= it++))
  {
    Item *real_item= item->real_item();

    if (real_item->type() == Item::FIELD_ITEM)
    {
      Field *field= ((Item_field*)real_item)->field;
      if (field->flags & BLOB_FLAG)
      {
        use_blobs= 1;
        tot_length+= 256;			// Will be extended if needed
      }
      else
        tot_length+= field->field_length;
    }
    else if (item->type() == Item::STRING_ITEM)
      use_vars= 1;
  }
  if (use_blobs && !ex->line.line_term->length() && !field_term->length())
  {
    my_error(ER_BLOBS_AND_NO_TERMINATED, MYF(0));
    DBUG_RETURN(TRUE);
  }
  if (use_vars && !field_term->length() && !enclosed->length())
  {
    my_error(ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR, MYF(0));
    DBUG_RETURN(TRUE);
  }

  if (read_file_from_client)
  {
    (void)net_request_file(thd->get_protocol_classic()->get_net(),
                           ex->file_name);
    file = -1;
  }
  else
  {
    if (!dirname_length(ex->file_name))
    {
      strxnmov(name, FN_REFLEN-1, mysql_real_data_home, tdb, NullS);
      (void) fn_format(name, ex->file_name, name, "",
		       MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
    }
    else
    {
      (void) fn_format(name, ex->file_name, mysql_real_data_home, "",
                       MY_RELATIVE_PATH | MY_UNPACK_FILENAME |
                       MY_RETURN_REAL_PATH);
    }

    if (thd->slave_thread & ((SYSTEM_THREAD_SLAVE_SQL |
                             (SYSTEM_THREAD_SLAVE_WORKER))!=0))
    {
      Relay_log_info* rli= thd->rli_slave->get_c_rli();

      if (strncmp(rli->slave_patternload_file, name,
                  rli->slave_patternload_file_size))
      {
        /*
          LOAD DATA INFILE in the slave SQL Thread can only read from 
          --slave-load-tmpdir". This should never happen. Please, report a bug.
        */
        LogErr(ERROR_LEVEL, ER_LOAD_DATA_INFILE_FAILED_IN_UNEXPECTED_WAY);
        my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--slave-load-tmpdir");
        DBUG_RETURN(TRUE);
      }
    }
    else if (!is_secure_file_path(name))
    {
      /* Read only allowed from within dir specified by secure_file_priv */
      my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
      DBUG_RETURN(TRUE);
    }

#if !defined(_WIN32)
    MY_STAT stat_info;
    if (!my_stat(name, &stat_info, MYF(MY_WME)))
      DBUG_RETURN(TRUE);

    // if we are not in slave thread, the file must be:
    if (!thd->slave_thread &&
        !((stat_info.st_mode & S_IFLNK) != S_IFLNK &&   // symlink
          ((stat_info.st_mode & S_IFREG) == S_IFREG ||  // regular file
           (stat_info.st_mode & S_IFIFO) == S_IFIFO)))  // named pipe
    {
      my_error(ER_TEXTFILE_NOT_READABLE, MYF(0), name);
      DBUG_RETURN(TRUE);
    }
    if ((stat_info.st_mode & S_IFIFO) == S_IFIFO)
      is_fifo= 1;
#endif
    if ((file= mysql_file_open(key_file_load,
                               name, O_RDONLY, MYF(MY_WME))) < 0)

      DBUG_RETURN(TRUE);
  }

  READ_INFO read_info(file,tot_length,
                      ex->cs ? ex->cs : thd->variables.collation_database,
		      *field_term,*ex->line.line_start, *ex->line.line_term,
                      *enclosed,
		      info.escape_char, read_file_from_client, is_fifo);
  if (read_info.error)
  {
    if (file >= 0)
      mysql_file_close(file, MYF(0));           // no files in net reading
    DBUG_RETURN(TRUE);				// Can't allocate buffers
  }

  if (mysql_bin_log.is_open())
  {
    lf_info.thd = thd;
    lf_info.logged_data_file = 0;
    lf_info.last_pos_in_file = HA_POS_ERROR;
    lf_info.log_delayed= transactional_table;
    read_info.set_io_cache_arg((void*) &lf_info);
  }

  thd->check_for_truncated_fields= CHECK_FIELD_WARN;
  thd->num_truncated_fields= 0L;
  /* Skip lines if there is a line terminator */
  if (ex->line.line_term->length() && ex->filetype != FILETYPE_XML)
  {
    /* ex->skip_lines needs to be preserved for logging */
    while (skip_lines > 0)
    {
      skip_lines--;
      if (read_info.next_line())
	break;
    }
  }

  if (!(error= read_info.error))
  {

    table->next_number_field=table->found_next_number_field;
    if (thd->lex->is_ignore() ||
	handle_duplicates == DUP_REPLACE)
      table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
    if (handle_duplicates == DUP_REPLACE &&
        (!table->triggers ||
         !table->triggers->has_delete_triggers()))
        table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
    if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
      table->file->ha_start_bulk_insert((ha_rows) 0);
    table->copy_blobs=1;

    if (ex->filetype == FILETYPE_XML) /* load xml */
      error= read_xml_field(thd, info, insert_table_ref, fields_vars,
                            set_fields, set_values, read_info,
                            skip_lines);
    else if (!field_term->length() && !enclosed->length())
      error= read_fixed_length(thd, info, insert_table_ref, fields_vars,
                               set_fields, set_values, read_info,
			       skip_lines);
    else
      error= read_sep_field(thd, info, insert_table_ref, fields_vars,
                            set_fields, set_values, read_info,
			    *enclosed, skip_lines);
    if (thd->locked_tables_mode <= LTM_LOCK_TABLES &&
        table->file->ha_end_bulk_insert() && !error)
    {
      table->file->print_error(my_errno(), MYF(0));
      error= 1;
    }
    table->next_number_field=0;
  }
  if (file >= 0)
    mysql_file_close(file, MYF(0));
  free_blobs(table);				/* if pack_blob was used */
  table->copy_blobs=0;
  thd->check_for_truncated_fields= CHECK_FIELD_IGNORE;
  /* 
     simulated killing in the middle of per-row loop
     must be effective for binlogging
  */
  DBUG_EXECUTE_IF("simulate_kill_bug27571",
                  {
                    error=1;
                    thd->killed= THD::KILL_QUERY;
                  };);

  killed_status= (error == 0) ? THD::NOT_KILLED : thd->killed;

  /*
    We must invalidate the table in query cache before binlog writing and
    ha_autocommit_...
  */
  query_cache.invalidate_single(thd, insert_table_ref, false);
  if (error)
  {
    if (read_file_from_client)
      read_info.skip_data_till_eof();

    if (mysql_bin_log.is_open())
    {
      {
	/*
	  Make sure last block (the one which caused the error) gets
	  logged.  This is needed because otherwise after write of (to
	  the binlog, not to read_info (which is a cache))
	  Delete_file_log_event the bad block will remain in read_info
	  (because pre_read is not called at the end of the last
	  block; remember pre_read is called whenever a new block is
	  read from disk).  At the end of mysql_load(), the destructor
	  of read_info will call end_io_cache() which will flush
	  read_info, so we will finally have this in the binlog:

	  Append_block # The last successfull block
	  Delete_file
	  Append_block # The failing block
	  which is nonsense.
	  Or could also be (for a small file)
	  Create_file  # The failing block
	  which is nonsense (Delete_file is not written in this case, because:
	  Create_file has not been written, so Delete_file is not written, then
	  when read_info is destroyed end_io_cache() is called which writes
	  Create_file.
	*/
	read_info.end_io_cache();
	/* If the file was not empty, wrote_create_file is true */
	if (lf_info.logged_data_file)
	{
          int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);

          /* since there is already an error, the possible error of
             writing binary log will be ignored */
	  if (thd->get_transaction()->cannot_safely_rollback(
	      Transaction_ctx::STMT))
            (void) write_execute_load_query_log_event(thd, ex,
                                                      table_list->db, 
                                                      table_list->table_name,
                                                      is_concurrent,
                                                      handle_duplicates,
                                                      transactional_table,
                                                      errcode);
	  else
	  {
	    Delete_file_log_event d(thd, db, transactional_table);
	    (void) mysql_bin_log.write_event(&d);
	  }
	}
      }
    }
    error= -1;				// Error on read
    goto err;
  }

  my_snprintf(name, sizeof(name),
              ER_THD(thd, ER_LOAD_INFO),
              (long) info.stats.records, (long) info.stats.deleted,
              (long) (info.stats.records - info.stats.copied),
              (long) thd->get_stmt_da()->current_statement_cond_count());

  if (mysql_bin_log.is_open())
  {
    /*
      We need to do the job that is normally done inside
      binlog_query() here, which is to ensure that the pending event
      is written before tables are unlocked and before any other
      events are written.  We also need to update the table map
      version for the binary log to mark that table maps are invalid
      after this point.
     */
    if (thd->is_current_stmt_binlog_format_row())
      error= thd->binlog_flush_pending_rows_event(TRUE, transactional_table);
    else
    {
      /*
        As already explained above, we need to call end_io_cache() or the last
        block will be logged only after Execute_load_query_log_event (which is
        wrong), when read_info is destroyed.
      */
      read_info.end_io_cache();
      if (lf_info.logged_data_file)
      {
        int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
        error= write_execute_load_query_log_event(thd, ex,
                                                  table_list->db, table_list->table_name,
                                                  is_concurrent,
                                                  handle_duplicates,
                                                  transactional_table,
                                                  errcode);
      }

      /*
        Flushing the IO CACHE while writing the execute load query log event
        may result in error (for instance, because the max_binlog_size has been 
        reached, and rotation of the binary log failed).
      */
      error= error || mysql_bin_log.get_log_file()->error;
    }
    if (error)
      goto err;
  }

  /* ok to client sent only after binlog write and engine commit */
  my_ok(thd, info.stats.copied + info.stats.deleted, 0L, name);
err:
  DBUG_ASSERT(table->file->has_transactions() ||
              !(info.stats.copied || info.stats.deleted) ||
              thd->get_transaction()->cannot_safely_rollback(
                Transaction_ctx::STMT));
  table->file->ha_release_auto_increment();
  table->auto_increment_field_not_null= FALSE;
  DBUG_RETURN(error);
}