in core/lib/payload/copy.py [0:0]
def need_checksum(self):
"""
Check whether we should checksum or not
"""
if self.skip_checksum:
log.warning("Skip checksum because --skip-checksum is specified")
return False
# There's no point running a checksum compare for selective dump
if self.where:
log.warning("Skip checksum because --where is given")
return False
# If the collation of primary key column has been changed, then
# it's high possible that the checksum will mis-match, because
# the returning sequence after order by primary key may be vary
# for different collations
for pri_column in self._pk_for_filter:
old_column_tmp = [
col for col in self._old_table.column_list if col.name == pri_column
]
if old_column_tmp:
old_column = old_column_tmp[0]
new_column_tmp = [
col for col in self._new_table.column_list if col.name == pri_column
]
if new_column_tmp:
new_column = new_column_tmp[0]
if old_column and new_column:
if not is_equal(old_column.collate, new_column.collate):
log.warning(
"Collation of primary key column {} has been "
"changed. Skip checksum ".format(old_column.name)
)
return False
# There's no way we can run checksum by chunk if the primary key cannot
# be covered by any index of the new schema
if not self.validate_post_alter_pk():
if self.skip_pk_coverage_check:
log.warning(
"Skipping checksuming because there's no unique index "
"in new table schema can perfectly cover old primary key "
"combination for search".format(old_column.name)
)
return False
else:
# Though we have enough coverage for primary key doesn't
# necessarily mean we can use it for checksum, it has to be an
# unique index as well. Skip checksum if there's no such index
if not self.find_coverage_index():
log.warning(
"Skipping checksuming because there's no unique index "
"in new table schema can perfectly cover old primary key "
"combination for search".format(old_column.name)
)
return False
return True