in core/lib/payload/copy.py [0:0]
def validate_post_alter_pk(self):
"""
As we force (primary) when replaying changes, we have to make sure
rows in new table schema can be accessed using old PK combination.
The logic here is to make sure the old table's primary key list equals
to the set which one of the new table's index prefix can form.
Otherwise there'll be a performance issue when replaying changes
based on old primary key combination.
Note that if old PK is (a, b), new PK is (b, a, c) is acceptable,
because for each combination of (a, b), it still can utilize the new
PK for row searching.
Same for old PK being (a, b, c), new PK is (a, b) because new PK is more
strict, so it will always return at most one row when using old PK columns
as WHERE condition.
However if the old PK is (a, b, c), new PK is (b, c, d). Then there's
a chance the changes may not be able to be replay efficiently. Because
using only column (b, c) for row searching may result in a huge number
of matched rows
"""
idx_on_new_table = [self._new_table.primary_key] + self._new_table.indexes
old_pk_len = len(self._pk_for_filter)
for idx in idx_on_new_table:
log.debug("Checking prefix for {}".format(idx.name))
idx_prefix = idx.column_list[:old_pk_len]
idx_name_set = {col.name for col in idx_prefix}
# Identical set and covered set are considered as covering
if set(self._pk_for_filter) == idx_name_set:
log.info("PK prefix on new table can cover PK from old table")
return True
if idx.is_unique and set(self._pk_for_filter) > idx_name_set:
log.info("old PK can uniquely identify rows from new schema")
return True
return False