int do_restore()

in storage/ndb/tools/restore/restore_main.cpp [1763:2442]


int do_restore(RestoreThreadData *thrdata) {
  init_progress();

  Vector<BackupConsumer *> &g_consumers = thrdata->m_consumers;
  char threadName[15] = "";
  if (opt_show_part_id)
    BaseString::snprintf(threadName, sizeof(threadName), "[part %u] ",
                         thrdata->m_part_id);
  restoreLogger.setThreadPrefix(threadName);

  /**
   * we must always load meta data, even if we will only print it to stdout
   */

  restoreLogger.log_debug("Start restoring meta data");

  RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId,
                           thrdata->m_part_id, ga_part_count);
#ifdef ERROR_INSERT
  if (_error_insert > 0) {
    metaData.error_insert(_error_insert);
  }
  for (Uint32 i = 0; i < g_consumers.size(); i++) {
    g_consumers[i]->error_insert(_error_insert);
  }
#endif
  restoreLogger.log_info("[restore_metadata] Read meta data file header");

  if (!metaData.readHeader()) {
    restoreLogger.log_error("Failed to read %s", metaData.getFilename());
    return NdbToolsProgramExitCode::FAILED;
  }

  {
    const BackupFormat::FileHeader &tmp = metaData.getFileHeader();
    const Uint32 backupFileVersion = tmp.BackupVersion;
    const Uint32 backupNdbVersion = tmp.NdbVersion;
    const Uint32 backupMySQLVersion = tmp.MySQLVersion;

    char buf[NDB_VERSION_STRING_BUF_SZ];
    info.setLevel(254);

    if (backupFileVersion >= NDBD_RAW_LCP) {
      restoreLogger.log_info(
          "Backup from version : %s file format : %x",
          ndbGetVersionString(backupNdbVersion, backupMySQLVersion, 0, buf,
                              sizeof(buf)),
          backupFileVersion);
    } else {
      restoreLogger.log_info("Backup file format : %x", backupFileVersion);
    }

    /**
     * check whether we can restore the backup (right version).
     */
    // in these versions there was an error in how replica info was
    // stored on disk
    if (backupFileVersion >= MAKE_VERSION(5, 1, 3) &&
        backupFileVersion <= MAKE_VERSION(5, 1, 9)) {
      char new_buf[NDB_VERSION_STRING_BUF_SZ];
      restoreLogger.log_error(
          "Restore program incompatible with backup file versions between %s "
          "and %s",
          ndbGetVersionString(MAKE_VERSION(5, 1, 3), 0, 0, buf, sizeof(buf)),
          ndbGetVersionString(MAKE_VERSION(5, 1, 9), 0, 0, new_buf,
                              sizeof(new_buf)));
      return NdbToolsProgramExitCode::FAILED;
    }

    if (backupFileVersion > NDB_VERSION) {
      restoreLogger.log_error(
          "Restore program older than backup version. Not supported. Use new "
          "restore program");
      return NdbToolsProgramExitCode::FAILED;
    }
  }

  restoreLogger.log_debug("Load content");
  restoreLogger.log_info("[restore_metadata] Load content");

  int res = metaData.loadContent();

  restoreLogger.log_info("Stop GCP of Backup: %u", metaData.getStopGCP());
  restoreLogger.log_info("Start GCP of Backup: %u", metaData.getStartGCP());

  if (res == 0) {
    restoreLogger.log_error("Restore: Failed to load content");
    return NdbToolsProgramExitCode::FAILED;
  }
  restoreLogger.log_debug("Get number of Tables");
  restoreLogger.log_info("[restore_metadata] Get number of Tables");
  if (metaData.getNoOfTables() == 0) {
    restoreLogger.log_error("The backup contains no tables");
    return NdbToolsProgramExitCode::FAILED;
  }

  if (_print_sql_log && _print_log) {
    restoreLogger.log_debug(
        "Check to ensure that both print-sql-log and print-log options are not "
        "passed");
    restoreLogger.log_error(
        "Both print-sql-log and print-log options passed. Exiting...");
    return NdbToolsProgramExitCode::FAILED;
  }

  if (_print_sql_log) {
    restoreLogger.log_debug(
        "Check for tables with hidden PKs or column of type blob when "
        "print-sql-log option is passed");
    for (Uint32 i = 0; i < metaData.getNoOfTables(); i++) {
      const TableS *table = metaData[i];
      if (!(checkSysTable(table) && checkDbAndTableName(table))) continue;
      /* Blobs are stored as separate tables with names prefixed
       * with NDB$BLOB. This can be used to check if there are
       * any columns of type blob in the tables being restored */
      BaseString tableName(table->getTableName());
      Vector<BaseString> tableNameParts;
      tableName.split(tableNameParts, "/");
      if (tableNameParts[2].starts_with("NDB$BLOB")) {
        restoreLogger.log_error(
            "Found column of type blob with print-sql-log option set. "
            "Exiting...");
        return NdbToolsProgramExitCode::FAILED;
      }
      /* Hidden PKs are stored with the name $PK */
      int noOfPK = table->m_dictTable->getNoOfPrimaryKeys();
      for (int j = 0; j < noOfPK; j++) {
        const char *pkName = table->m_dictTable->getPrimaryKey(j);
        if (strcmp(pkName, "$PK") == 0) {
          restoreLogger.log_error(
              "Found hidden primary key with print-sql-log option set. "
              "Exiting...");
          return NdbToolsProgramExitCode::FAILED;
        }
      }
    }
  }

  restoreLogger.log_debug("Validate Footer");
  restoreLogger.log_info("[restore_metadata] Validate Footer");

  if (!metaData.validateFooter()) {
    restoreLogger.log_error("Restore: Failed to validate footer.");
    return NdbToolsProgramExitCode::FAILED;
  }
  restoreLogger.log_debug("Init Backup objects");
  Uint32 i;
  for (i = 0; i < g_consumers.size(); i++) {
    if (!g_consumers[i]->init(g_tableCompabilityMask)) {
      restoreLogger.log_error("Failed to initialize consumers");
      return NdbToolsProgramExitCode::FAILED;
    }
  }

  if (ga_exclude_missing_tables)
    exclude_missing_tables(metaData, thrdata->m_consumers);

  if (!setup_column_remappings(metaData)) {
    return NdbToolsProgramExitCode::FAILED;
  }

  /* report to clusterlog if applicable */
  for (i = 0; i < g_consumers.size(); i++)
    g_consumers[i]->report_started(ga_backupId, ga_nodeId);

  /* before syncing on m_barrier, check if any threads have already exited */
  if (ga_error_thread > 0) {
    return NdbToolsProgramExitCode::FAILED;
  }

  if (!thrdata->m_restore_meta) {
    /**
     * Only thread 1 is allowed to restore metadata objects. restore_meta
     * flag is set to true on thread 1, which causes consumer-restore to
     * actually restore the metadata objects,
     * e.g. g_consumer->object(tablespace) restores the tablespace
     *
     * Remaining threads have restore_meta = false, which causes
     * consumer-restore to query metadata objects and save metadata for
     * reference by later phases of restore
     * e.g. g_consumer->object(tablespace) queries+saves tablespace metadata
     *
     * So thread 1 must finish restoring all metadata objects before any other
     * thread is allowed to start metadata restore. Use CyclicBarrier to allow
     * all threads except thread-1 to arrive at barrier. Barrier will not be
     * opened until all threads arrive at it, so all threads will wait till
     * thread 1 arrives at barrier. When thread 1 completes metadata restore,
     * it arrives at barrier, opening barrier and allowing all threads to
     * proceed to next restore-phase.
     */
    if (!thrdata->m_barrier->wait()) {
      ga_error_thread = thrdata->m_part_id;
      return NdbToolsProgramExitCode::FAILED;
    }
  }
  restoreLogger.log_debug("Restore objects (tablespaces, ..)");
  restoreLogger.log_info(
      "[restore_metadata] Restore objects (tablespaces, ..)");
  for (i = 0; i < metaData.getNoOfObjects(); i++) {
    for (Uint32 j = 0; j < g_consumers.size(); j++)
      if (!g_consumers[j]->object(metaData.getObjType(i),
                                  metaData.getObjPtr(i))) {
        restoreLogger.log_error(
            "Restore: Failed to restore table: %s ... Exiting",
            metaData[i]->getTableName());
        return NdbToolsProgramExitCode::FAILED;
      }
    if (check_progress()) {
      info.setLevel(255);
      restoreLogger.log_info(" Object create progress: %u objects out of %u",
                             i + 1, metaData.getNoOfObjects());
    }
  }

  restoreLogger.log_debug("Handling index stat tables");
  for (i = 0; i < g_consumers.size(); i++) {
    if (!g_consumers[i]->handle_index_stat_tables()) {
      restoreLogger.log_error(
          "Restore: Failed to handle index stat tables ... Exiting ");
      return NdbToolsProgramExitCode::FAILED;
    }
  }

  Vector<OutputStream *> table_output(metaData.getNoOfTables());
  restoreLogger.log_debug("Restoring tables");
  restoreLogger.log_info("[restore_metadata] Restoring tables");

  for (i = 0; i < metaData.getNoOfTables(); i++) {
    const TableS *table = metaData[i];
    table_output.push_back(NULL);
    if (!checkDbAndTableName(table)) continue;
    if (is_included_sys_table(table)) {
      table_output[i] = ndbout.m_out;
    }
    if (checkSysTable(table)) {
      if (!tab_path || isBlobTable(table) || isIndex(table)) {
        table_output[i] = ndbout.m_out;
      } else {
        FILE *res;
        char filename[FN_REFLEN], tmp_path[FN_REFLEN];
        const char *table_name;
        table_name = table->getTableName();
        while (*table_name != '/') table_name++;
        table_name++;
        while (*table_name != '/') table_name++;
        table_name++;
        convert_dirname(tmp_path, tab_path, NullS);
        res = my_fopen(fn_format(filename, table_name, tmp_path, ".txt", 4),
                       opt_append ? O_WRONLY | O_APPEND | O_CREAT
                                  : O_WRONLY | O_TRUNC | O_CREAT,
                       MYF(MY_WME));
        if (res == 0) {
          return NdbToolsProgramExitCode::FAILED;
        }
        FileOutputStream *f = new FileOutputStream(res);
        table_output[i] = f;
      }
      for (Uint32 j = 0; j < g_consumers.size(); j++) {
        if (!g_consumers[j]->table(*table)) {
          restoreLogger.log_error(
              "Restore: Failed to restore table: `%s` ... Exiting ",
              table->getTableName());
          return NdbToolsProgramExitCode::FAILED;
        }
      }
    } else {
      for (Uint32 j = 0; j < g_consumers.size(); j++) {
        if (!g_consumers[j]->createSystable(*table)) {
          restoreLogger.log_error(
              "Restore: Failed to restore system table: `%s` ... Exiting",
              table->getTableName());
          return NdbToolsProgramExitCode::FAILED;
        }
      }
    }
    if (check_progress()) {
      info.setLevel(255);
      restoreLogger.log_info("Table create progress: %u tables out of %u",
                             i + 1, metaData.getNoOfTables());
    }
  }

  restoreLogger.log_debug("Save foreign key info");
  restoreLogger.log_info("[restore_metadata] Save foreign key info");
  for (i = 0; i < metaData.getNoOfObjects(); i++) {
    for (Uint32 j = 0; j < g_consumers.size(); j++) {
      if (!g_consumers[j]->fk(metaData.getObjType(i), metaData.getObjPtr(i))) {
        return NdbToolsProgramExitCode::FAILED;
      }
    }
  }

  restoreLogger.log_debug("Close tables");
  for (i = 0; i < g_consumers.size(); i++) {
    if (!g_consumers[i]->endOfTables()) {
      restoreLogger.log_error("Restore: Failed while closing tables");
      return NdbToolsProgramExitCode::FAILED;
    }
    if (!ga_disable_indexes && !ga_rebuild_indexes) {
      if (!g_consumers[i]->endOfTablesFK()) {
        restoreLogger.log_error("Restore: Failed while closing tables FKs");
        return NdbToolsProgramExitCode::FAILED;
      }
    }
  }

  /* before syncing on m_barrier, check if any threads have already exited */
  if (ga_error_thread > 0) {
    return NdbToolsProgramExitCode::FAILED;
  }

  if (thrdata->m_restore_meta) {
    // thread 1 arrives at barrier -> barrier opens -> all threads continue
    if (!thrdata->m_barrier->wait()) {
      ga_error_thread = thrdata->m_part_id;
      return NdbToolsProgramExitCode::FAILED;
    }
  }

  /* report to clusterlog if applicable */
  for (i = 0; i < g_consumers.size(); i++) {
    g_consumers[i]->report_meta_data(ga_backupId, ga_nodeId);
  }
  restoreLogger.log_debug("Iterate over data");
  restoreLogger.log_info("[restore_data] Start restoring table data");
  if (ga_restore || ga_print) {
    Uint32 fragmentsTotal = 0;
    Uint32 fragmentsRestored = 0;
    if (_restore_data || _print_data) {
      // Check table compatibility
      for (i = 0; i < metaData.getNoOfTables(); i++) {
        if ((checkSysTable(metaData, i) && checkDbAndTableName(metaData[i])) ||
            (strcmp(metaData[i]->getTableName(),
                    NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 &&
             ga_with_apply_status)) {
          TableS &tableS = *metaData[i];  // not const
          for (Uint32 j = 0; j < g_consumers.size(); j++) {
            if (!g_consumers[j]->table_compatible_check(tableS)) {
              restoreLogger.log_error(
                  "Restore: Failed to restore data, %s table structure "
                  "incompatible with backup's ... Exiting ",
                  tableS.getTableName());
              return NdbToolsProgramExitCode::FAILED;
            }
            if (tableS.m_staging && !g_consumers[j]->prepare_staging(tableS)) {
              restoreLogger.log_error(
                  "Restore: Failed to restore data, %s failed to prepare "
                  "staging table for data conversion ... Exiting",
                  tableS.getTableName());
              return NdbToolsProgramExitCode::FAILED;
            }
          }
        }
      }
      for (i = 0; i < metaData.getNoOfTables(); i++) {
        if (checkSysTable(metaData, i) && checkDbAndTableName(metaData[i])) {
          // blob table checks use data which is populated by table
          // compatibility checks
          TableS &tableS = *metaData[i];
          if (isBlobTable(&tableS)) {
            for (Uint32 j = 0; j < g_consumers.size(); j++) {
              if (!g_consumers[j]->check_blobs(tableS)) {
                restoreLogger.log_error(
                    "Restore: Failed to restore data, "
                    "%s table's blobs incompatible with backup's ... Exiting ",
                    tableS.getTableName());
                return NdbToolsProgramExitCode::FAILED;
              }
            }
          }
        }
      }

      RestoreDataIterator dataIter(metaData, &free_data_callback,
                                   (void *)thrdata);

      if (!dataIter.validateBackupFile()) {
        restoreLogger.log_error(
            "Unable to allocate memory for BackupFile constructor");
        return NdbToolsProgramExitCode::FAILED;
      }

      if (!dataIter.validateRestoreDataIterator()) {
        restoreLogger.log_error(
            "Unable to allocate memory for RestoreDataIterator constructor");
        return NdbToolsProgramExitCode::FAILED;
      }

      restoreLogger.log_info("[restore_data] Read data file header");

      // Read data file header
      if (!dataIter.readHeader()) {
        restoreLogger.log_error(
            "Failed to read header of data file. Exiting...");
        return NdbToolsProgramExitCode::FAILED;
      }

      restoreLogger.log_info("[restore_data] Restore fragments");

      Uint32 fragmentCount = 0;
      Uint32 fragmentId;
      while (dataIter.readFragmentHeader(res = 0, &fragmentId)) {
        TableS *table = dataIter.getCurrentTable();
        OutputStream *output = table_output[table->getLocalId()];

        /**
         * Check whether we should skip the entire fragment
         */
        bool skipFragment = true;
        if (output == NULL) {
          restoreLogger.log_info("  Skipping fragment");
        } else {
          fragmentsTotal++;
          skipFragment =
              determine_slice_skip_fragment(table, fragmentId, fragmentCount);
          if (skipFragment) {
            restoreLogger.log_info("  Skipping fragment on this slice");
          } else {
            fragmentsRestored++;
          }
        }

        /**
         * Iterate over all rows stored in the data file for
         * this fragment
         */
        const TupleS *tuple;
#ifdef ERROR_INSERT
        Uint64 rowCount = 0;
#endif
        while ((tuple = dataIter.getNextTuple(res = 1, skipFragment)) != 0) {
          assert(output && !skipFragment);
#ifdef ERROR_INSERT
          if ((_error_insert == NDB_RESTORE_ERROR_INSERT_SKIP_ROWS) &&
              ((++rowCount % 3) == 0)) {
            restoreLogger.log_info("Skipping row on error insertion");
            continue;
          }
#endif
          OutputStream *tmp = ndbout.m_out;
          ndbout.m_out = output;
          for (Uint32 j = 0; j < g_consumers.size(); j++) {
            if (!g_consumers[j]->tuple(*tuple, fragmentId)) {
              restoreLogger.log_error(
                  "Restore: error occurred while restoring data. Exiting...");
              // wait for async transactions to complete
              for (i = 0; i < g_consumers.size(); i++)
                g_consumers[i]->endOfTuples();
              return NdbToolsProgramExitCode::FAILED;
            }
          }
          ndbout.m_out = tmp;
          if (check_progress())
            report_progress("Data file progress: ", dataIter);
        }  // while (tuple != NULL);

        if (res < 0) {
          restoreLogger.log_error(
              "Restore: An error occurred while reading data. Exiting...");
          return NdbToolsProgramExitCode::FAILED;
        }
        if (!dataIter.validateFragmentFooter()) {
          restoreLogger.log_error(
              "Restore: Error validating fragment footer. Exiting...");
          return NdbToolsProgramExitCode::FAILED;
        }
      }  // while (dataIter.readFragmentHeader(res))

      if (res < 0) {
        restoreLogger.log_error(
            "Restore: An error occurred while restoring data."
            "Exiting... res = %u",
            res);
        return NdbToolsProgramExitCode::FAILED;
      }

      dataIter.validateFooter();  // not implemented

      for (i = 0; i < g_consumers.size(); i++) g_consumers[i]->endOfTuples();

      /* report to clusterlog if applicable */
      for (i = 0; i < g_consumers.size(); i++) {
        g_consumers[i]->report_data(ga_backupId, ga_nodeId);
      }
    }

    if (_restore_data || _print_log || _print_sql_log) {
      RestoreLogIterator logIter(metaData);

      restoreLogger.log_info("[restore_log] Read log file header");

      if (!logIter.readHeader()) {
        restoreLogger.log_error(
            "Failed to read header of data file. Exiting...");
        return NdbToolsProgramExitCode::FAILED;
      }

      const LogEntry *logEntry = 0;

      restoreLogger.log_info("[restore_log] Restore log entries");

      while ((logEntry = logIter.getNextLogEntry(res = 0)) != 0) {
        const TableS *table = logEntry->m_table;
        OutputStream *output = table_output[table->getLocalId()];
        if (!output) continue;
        if (check_slice_skip_fragment(table, logEntry->m_frag_id)) continue;
        for (Uint32 j = 0; j < g_consumers.size(); j++) {
          if (!g_consumers[j]->logEntry(*logEntry)) {
            restoreLogger.log_error(
                "Restore: Error restoring the data log. Exiting...");
            return NdbToolsProgramExitCode::FAILED;
          }
        }

        if (check_progress()) report_progress("Log file progress: ", logIter);
      }
      if (res < 0) {
        restoreLogger.log_error(
            "Restore: Error reading the data log. Exiting... res = %d", res);
        return NdbToolsProgramExitCode::FAILED;
      }
      logIter.validateFooter();  // not implemented
      for (i = 0; i < g_consumers.size(); i++) g_consumers[i]->endOfLogEntrys();

      /* report to clusterlog if applicable */
      for (i = 0; i < g_consumers.size(); i++) {
        g_consumers[i]->report_log(ga_backupId, ga_nodeId);
      }
    }

    /* move data from staging table to real table */
    if (_restore_data) {
      for (i = 0; i < metaData.getNoOfTables(); i++) {
        const TableS *table = metaData[i];
        if (table->m_staging) {
          for (Uint32 j = 0; j < g_consumers.size(); j++) {
            if (!g_consumers[j]->finalize_staging(*table)) {
              restoreLogger.log_error(
                  "Restore: Failed staging data to table: %s. Exiting...",
                  table->getTableName());
              return NdbToolsProgramExitCode::FAILED;
            }
          }
        }
      }
    }

    if (_restore_data) {
      for (i = 0; i < metaData.getNoOfTables(); i++) {
        const TableS *table = metaData[i];
        check_data_truncations(table);
        OutputStream *output = table_output[table->getLocalId()];
        if (!output) {
          continue;
        }
        for (Uint32 j = 0; j < g_consumers.size(); j++) {
          if (!g_consumers[j]->finalize_table(*table)) {
            restoreLogger.log_error(
                "Restore: Failed to finalize restore table: %s. Exiting...",
                metaData[i]->getTableName());
            return NdbToolsProgramExitCode::FAILED;
          }
        }
      }
      if (ga_num_slices != 1) {
        restoreLogger.log_info(
            "Restore: Slice id %u/%u restored %u/%u fragments.", ga_slice_id,
            ga_num_slices, fragmentsRestored, fragmentsTotal);
      };
    }
  }

  if (ga_error_thread > 0) {
    restoreLogger.log_error("Thread %u exits on error", thrdata->m_part_id);
    // thread 1 failed to restore metadata, exiting
    return NdbToolsProgramExitCode::FAILED;
  }

  if (ga_with_apply_status) {
    /**
     * Wait for all the threads to finish restoring data before attempting to
     * delete the tuple with server_id = 0 in ndb_apply_status table.
     * Later, the appropriate data for that tuple is generated when ndb_restore
     * is with invoked with restore-epoch option.
     */
    if (!thrdata->m_barrier->wait()) {
      ga_error_thread = thrdata->m_part_id;
      return NdbToolsProgramExitCode::FAILED;
    }
    for (i = 0; i < g_consumers.size(); i++) {
      if (!g_consumers[i]->delete_epoch_tuple()) {
        restoreLogger.log_error(
            "Restore: Failed to delete tuple with server_id=0");
        return NdbToolsProgramExitCode::FAILED;
      }
    }
  }

  if (ga_error_thread > 0) {
    restoreLogger.log_error("Thread %u exits on error", thrdata->m_part_id);
    return NdbToolsProgramExitCode::FAILED;
  }

  if (ga_restore_epoch) {
    restoreLogger.log_info("[restore_epoch] Restoring epoch");
    RestoreLogIterator logIter(metaData);

    if (!logIter.readHeader()) {
      err << "Failed to read snapshot info from log file. Exiting..." << endl;
      return NdbToolsProgramExitCode::FAILED;
    }
    bool snapshotstart = logIter.isSnapshotstartBackup();
    for (i = 0; i < g_consumers.size(); i++)
      if (!g_consumers[i]->update_apply_status(metaData, snapshotstart)) {
        restoreLogger.log_error("Restore: Failed to restore epoch");
        return NdbToolsProgramExitCode::FAILED;
      }
  }

  if (ga_error_thread > 0) {
    restoreLogger.log_error("Thread %u exits on error", thrdata->m_part_id);
    return NdbToolsProgramExitCode::FAILED;
  }

  unsigned j;
  for (j = 0; j < g_consumers.size(); j++) {
    g_consumers[j]->log_temp_errors();
  }

  if (ga_rebuild_indexes) {
    /**
     * Index rebuild should not be allowed to start until all threads have
     * finished restoring data and epoch values are sorted out.
     * Wait until all threads have arrived at barrier, then allow all
     * threads to continue. Thread 1 will then rebuild indices, while all
     * other threads do nothing.
     */
    if (!thrdata->m_barrier->wait()) {
      ga_error_thread = thrdata->m_part_id;
      return NdbToolsProgramExitCode::FAILED;
    }
    restoreLogger.log_debug("Rebuilding indexes");
    restoreLogger.log_info("[rebuild_indexes] Rebuilding indexes");

    for (i = 0; i < metaData.getNoOfTables(); i++) {
      const TableS *table = metaData[i];
      if (!rebuildSysTableIdx(table)) continue;
      if (isBlobTable(table) || isIndex(table)) continue;
      for (Uint32 j = 0; j < g_consumers.size(); j++) {
        if (!g_consumers[j]->rebuild_indexes(*table)) {
          return NdbToolsProgramExitCode::FAILED;
        }
      }
    }
    for (Uint32 j = 0; j < g_consumers.size(); j++) {
      if (!g_consumers[j]->endOfTablesFK()) {
        return NdbToolsProgramExitCode::FAILED;
      }
    }
  }

  if (ga_error_thread > 0) {
    restoreLogger.log_error("Thread %u exits on error", thrdata->m_part_id);
    // thread 1 failed to restore metadata, exiting
    return NdbToolsProgramExitCode::FAILED;
  }

  /* report to clusterlog if applicable */
  for (i = 0; i < g_consumers.size(); i++)
    g_consumers[i]->report_completed(ga_backupId, ga_nodeId);

  for (i = 0; i < metaData.getNoOfTables(); i++) {
    if (table_output[i] && table_output[i] != ndbout.m_out) {
      my_fclose(((FileOutputStream *)table_output[i])->getFile(), MYF(MY_WME));
      delete table_output[i];
      table_output[i] = NULL;
    }
  }
  return NdbToolsProgramExitCode::OK;
}  // do_restore