fn main()

in eden/mononoke/mononoke_hg_sync_job/src/main.rs [1136:1373]


fn main(fb: FacebookInit) -> Result<()> {
    let app = args::MononokeAppBuilder::new("Mononoke -> hg sync job")
        .with_advanced_args_hidden()
        .with_fb303_args()
        .build()
        .arg(
            Arg::with_name("hg-repo-ssh-path")
                .takes_value(true)
                .required(true)
                .help("Remote path to hg repo to replay to. Example: ssh://hg.vip.facebook.com//data/scm/fbsource"),
        )
        .arg(
            Arg::with_name("log-to-scuba")
                .long("log-to-scuba")
                .takes_value(false)
                .required(false)
                .help("If set job will log individual bundle sync states to Scuba"),
        )
        .arg(
            Arg::with_name("lock-on-failure")
                .long("lock-on-failure")
                .takes_value(false)
                .required(false)
                .help("If set, mononoke repo will be locked on sync failure"),
        )
        .arg(
            Arg::with_name("base-retry-delay-ms")
                .long("base-retry-delay-ms")
                .takes_value(true)
                .required(false)
                .help("initial delay between failures. It will be increased on the successive attempts")
        )
        .arg(
            Arg::with_name("retry-num")
                .long("retry-num")
                .takes_value(true)
                .required(false)
                .help("how many times to retry to sync a single bundle")
        )
        .arg(
            Arg::with_name("batch-size")
                .long("batch-size")
                .takes_value(true)
                .required(false)
                .help("maximum number of bundles allowed over a single hg peer")
        )
        .arg(
            Arg::with_name("single-bundle-timeout-ms")
                .long("single-bundle-timeout-ms")
                .takes_value(true)
                .required(false)
                .help("a timeout to send a single bundle to (if exceeded, the peer is restarted)")
        )
        .arg(
            Arg::with_name("verify-server-bookmark-on-failure")
                .long("verify-server-bookmark-on-failure")
                .takes_value(false)
                .required(false)
                .help("if present, check after a failure whether a server bookmark is already in the expected location")
        )
        .arg(
            Arg::with_name("repo-lock-sqlite")
                .long("repo-lock-sqlite")
                .takes_value(false)
                .required(false)
                .help("Enable sqlite for repo_lock access, path is in repo-lock-db-address"),
        )
        .arg(
            Arg::with_name("repo-lock-db-address")
                .long("repo-lock-db-address")
                .takes_value(true)
                .required(false)
                .help("Db with repo_lock table. Will be used to lock/unlock repo"),
        )
        .arg(
            Arg::with_name(HGSQL_GLOBALREVS_USE_SQLITE)
                .long(HGSQL_GLOBALREVS_USE_SQLITE)
                .takes_value(false)
                .required(false)
                .help("Use sqlite for hgsql globalrev sync (use for testing)."),
        )
        .arg(
            Arg::with_name(HGSQL_GLOBALREVS_DB_ADDR)
                .long(HGSQL_GLOBALREVS_DB_ADDR)
                .takes_value(true)
                .required(false)
                .help("unused"),
        )
        .arg(
            Arg::with_name(GENERATE_BUNDLES)
                .long(GENERATE_BUNDLES)
                .takes_value(false)
                .required(false)
                .help("Generate new bundles instead of using bundles that were saved on Mononoke during push"),
        )
        .arg(
            Arg::with_name(ARG_BOOKMARK_REGEX_FORCE_GENERATE_LFS)
                .long(ARG_BOOKMARK_REGEX_FORCE_GENERATE_LFS)
                .takes_value(true)
                .required(false)
                .requires(GENERATE_BUNDLES)
                .help("force generation of lfs bundles for bookmarks that match regex"),
        )
        .arg(
            Arg::with_name("verify-lfs-blob-presence")
                .long("verify-lfs-blob-presence")
                .takes_value(true)
                .required(false)
                .help("If generating bundles, verify lfs blob presence at this batch endpoint"),
        )
        .arg(
            Arg::with_name(ARG_USE_HG_SERVER_BOOKMARK_VALUE_IF_MISMATCH)
                .long(ARG_USE_HG_SERVER_BOOKMARK_VALUE_IF_MISMATCH)
                .takes_value(false)
                .required(false)
                .requires(GENERATE_BUNDLES)
                .help("Every bundle generated by hg sync job tells hg server \
                'move bookmark BM from commit A to commit B' where commit A is the previous \
                value of the bookmark BM and commit B is the new value of the bookmark. \
                Sync job takes commit A from bookmark update log entry. \
                However it's possible that server's bookmark BM doesn't point to the same commit \
                as bookmark update log entry. \
                While usually it's a sign of problem in some cases it's an expected behaviour. \
                If this option is set let's allow sync job to take previous value of bookmark \
                from the server"),
        )
        .arg(
            Arg::with_name(ARG_BOOKMARK_MOVE_ANY_DIRECTION)
                .long(ARG_BOOKMARK_MOVE_ANY_DIRECTION)
                .takes_value(false)
                .required(false)
                .help("This flag controls whether we tell the server to allow \
                the bookmark movement in any direction (adding pushvar NON_FAST_FORWARD=true). \
                However, the server checks its per bookmark configuration before move."),
        )
        .arg(
            Arg::with_name(ARG_DARKSTORM_BACKUP_REPO_ID)
            .long(ARG_DARKSTORM_BACKUP_REPO_ID)
            .takes_value(true)
            .required(false)
            .help("Start hg-sync-job for syncing prod repo and darkstorm backup mononoke repo \
            and use darkstorm-backup-repo-id value as a target for sync."),
        )
        .arg(
            Arg::with_name(ARG_DARKSTORM_BACKUP_REPO_NAME)
            .long(ARG_DARKSTORM_BACKUP_REPO_NAME)
            .takes_value(true)
            .required(false)
            .help("Start hg-sync-job for syncing prod repo and darkstorm backup mononoke repo \
            and use darkstorm-backup-repo-name as a target for sync."),
        )
        .group(
            ArgGroup::with_name(ARG_DARKSTORM_BACKUP_REPO_GROUP)
                .args(&[ARG_DARKSTORM_BACKUP_REPO_ID, ARG_DARKSTORM_BACKUP_REPO_NAME])
        )
        .arg(
            Arg::with_name(ARG_BYPASS_READONLY)
                .long(ARG_BYPASS_READONLY)
                .takes_value(false)
                .required(false)
                .help("This flag make it possible to push bundle into readonly repos \
                (by adding pushvar BYPASS_READONLY=true)."),
        )
        .about(
            "Special job that takes bundles that were sent to Mononoke and \
             applies them to mercurial",
        );

    let sync_once = SubCommand::with_name(MODE_SYNC_ONCE)
        .about("Syncs a single bundle")
        .arg(
            Arg::with_name("start-id")
                .long("start-id")
                .takes_value(true)
                .required(true)
                .help("id in the database table to start sync with"),
        );
    let sync_loop = SubCommand::with_name(MODE_SYNC_LOOP)
        .about("Syncs bundles one by one")
        .arg(
            Arg::with_name("start-id")
                .long("start-id")
                .takes_value(true)
                .required(true)
                .help("if current counter is not set then `start-id` will be used"),
        )
        .arg(
            Arg::with_name("loop-forever")
                .long("loop-forever")
                .takes_value(false)
                .required(false)
                .help(
                    "If set job will loop forever even if there are no new entries in db or \
                     if there was an error",
                ),
        )
        .arg(
            Arg::with_name("bundle-prefetch")
                .long("bundle-prefetch")
                .takes_value(true)
                .required(false)
                .help("How many bundles to prefetch"),
        )
        .arg(
            Arg::with_name("exit-file")
                .long("exit-file")
                .takes_value(true)
                .required(false)
                .help(
                    "If you provide this argument, the sync loop will gracefully exit \
                     once this file exists",
                ),
        )
        .arg(
            Arg::with_name("combine-bundles")
                .long("combine-bundles")
                .takes_value(true)
                .required(false)
                .help("How many bundles to combine into a single bundle before sending to hg"),
        );
    let app = app.subcommand(sync_once).subcommand(sync_loop);

    let matches = app.get_matches(fb)?;
    let logger = matches.logger();

    let ctx = CoreContext::new_with_logger(fb, logger.clone());

    let fut = run(ctx, &matches);

    block_execute(
        fut,
        fb,
        "hg_sync_job",
        logger,
        &matches,
        cmdlib::monitoring::AliveService,
    )
}