in automation/tinc/main/ext/qautils/gppylib/operations/dump.py [0:0]
def execute(self):
# First, get timestamp from .rpt file
path = self.report_dir if self.report_dir is not None else self.master_datadir
path = os.path.join(path, DUMP_DIR, DUMP_DATE)
reports = ListFilesByPattern(path, "gp_dump_*.rpt").run()
if not reports:
logger.error("Could not locate a report file on master.")
return {'exit_status': 2, 'timestamp': 'n/a'}
reports.sort()
reports.reverse()
report = reports[0]
timestamp = report[-18:-4] # last 14 digits, just before .rpt
if int(timestamp) < int(self.timestamp_start):
logger.error("Could not locate the newly generated report file on master.")
return {'exit_status': 2, 'timestamp': 'n/a'}
logger.info("Timestamp key = %s" % timestamp)
if self.ddboost:
return {'exit_status': 0, # feign success with exit_status = 0
'timestamp': timestamp}
# Check master dumps
path = self.backup_dir if self.backup_dir is not None else self.master_datadir
path = os.path.join(path, DUMP_DIR, timestamp[0:8])
status_file = os.path.join(path, "%s%s" % (MASTER_STATUS_PREFIX, timestamp))
dump_file = os.path.join(path, "%s%s" % (MASTER_DBDUMP_PREFIX, timestamp))
if self.compress: dump_file += ".gz"
try:
PostDumpSegment(status_file = status_file,
dump_file = dump_file).run()
except NoStatusFile, e:
logger.warn('Status file %s not found on master' % status_file)
return {'exit_status': 1, 'timestamp': timestamp}
except StatusFileError, e:
logger.warn('Status file %s on master indicates errors' % status_file)
return {'exit_status': 1, 'timestamp': timestamp}
except NoDumpFile, e:
logger.warn('Dump file %s not found on master' % dump_file)
return {'exit_status': 1, 'timestamp': timestamp}
else:
logger.info('Checked master status file and master dump file.')
# Perform similar checks for primary segments
operations = []
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
path = self.backup_dir if self.backup_dir is not None else seg.getSegmentDataDirectory()
path = os.path.join(path, DUMP_DIR, timestamp[0:8])
status_file = os.path.join(path, "%s%d_%s" % (SEG_STATUS_PREFIX, seg.getSegmentDbId(), timestamp))
dump_file = os.path.join(path, "%s%d_%s" % (SEG_DBDUMP_PREFIX, seg.getSegmentDbId(), timestamp))
if self.compress: dump_file += ".gz"
operations.append(RemoteOperation(PostDumpSegment(status_file = status_file,
dump_file = dump_file),
seg.getSegmentHostName()))
ParallelOperation(operations, self.batch_default).run()
success = 0
for remote in operations:
host = remote.host
status_file = remote.operation.status_file
dump_file = remote.operation.dump_file
try:
remote.get_ret()
except NoStatusFile, e:
logger.warn('Status file %s not found on %s' % (status_file, host))
except StatusFileError, e:
logger.warn('Status file %s on %s indicates errors' % (status_file, host))
except NoDumpFile, e:
logger.warn('Dump file %s not found on %s' % (dump_file, host))
else:
success += 1
if success < len(operations):
logger.warn("Dump was unsuccessful. %d segment(s) failed post-dump checks." % (len(operations) - success))
return {'exit_status': 1, 'timestamp': timestamp}
return {'exit_status': 0, 'timestamp': timestamp}