def select_table_into_outfile()

in core/lib/payload/copy.py [0:0]


    def select_table_into_outfile(self):
        log.info("== Stage 2: Dump ==")
        stage_start_time = time.time()
        # We can not break the table into chunks when there's no existing pk
        # We'll have to use one big file for copy data
        if self.is_full_table_dump:
            log.info("Dumping full table in one go.")
            return self.select_full_table_into_outfile()
        outfile_suffix = 1
        # To let the loop run at least once
        affected_rows = 1
        use_where = False
        printed_chunk = 0
        while affected_rows:
            self.outfile_suffix_end = outfile_suffix
            outfile = "{}.{}".format(self.outfile, outfile_suffix)
            affected_rows = self.select_chunk_into_outfile(outfile, use_where)
            # Refresh where condition range for next select
            if affected_rows:
                self.refresh_range_start()
                use_where = True
                outfile_suffix += 1
            self.check_disk_free_space_reserved()
            progress_pct = int((float(outfile_suffix) / self.eta_chunks) * 100)
            progress_chunk = int(progress_pct / 10)
            if progress_chunk > printed_chunk and self.eta_chunks > 10:
                log.info(
                    "Dump progress: {}/{}(ETA) chunks".format(
                        outfile_suffix, self.eta_chunks
                    )
                )
                printed_chunk = progress_chunk
        self.commit()
        log.info("Dump finished")
        self.stats["time_in_dump"] = time.time() - stage_start_time