in src/dma/collector/workflows/readiness_check/_postgres/main.py [0:0]
def _check_max_worker_processes(self) -> None:
rule_code = "MAX_WORKER_PROCESSES"
url_link = "Refer to https://cloud.google.com/database-migration/docs/postgres/create-migration-job#specify-source-connection-profile-info for more info."
db_count_result = self.local_db.sql(
"select count(*) from extended_collection_postgres_all_databases"
).fetchone()
db_count = int(db_count_result[0]) if db_count_result is not None else 0
max_worker_processes_result = self.local_db.sql(
"select c.setting_value as max_worker_processes from collection_postgres_settings c where c.setting_name='max_worker_processes';"
).fetchone()
max_worker_processes = int(max_worker_processes_result[0]) if max_worker_processes_result is not None else 0
for c in self.rule_config:
max_required_subscriptions = db_count + c.extra_replication_subscriptions_required
if max_worker_processes < db_count:
self.save_rule_result(
c.db_variant,
rule_code,
ACTION_REQUIRED,
f"Insufficient `max_worker_processes`: {max_worker_processes}, should be set to at least {db_count}. Up to {c.extra_replication_subscriptions_required} additional `worker_processes` might be required depending on the parallelism level set for migration. {url_link}",
)
elif max_worker_processes < max_required_subscriptions:
self.save_rule_result(
c.db_variant,
rule_code,
WARNING,
f"`max_worker_processes` current value: {max_worker_processes}, this might need to be increased to {max_required_subscriptions} depending on the parallelism level set for migration. {url_link}",
)
else:
self.save_rule_result(
c.db_variant,
rule_code,
PASS,
f"`max_worker_processes` current value: {max_worker_processes}, this meets or exceeds the maximum required value of {max_required_subscriptions}",
)