scheduler.py [323:347]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    period_start = datetime.strptime("2024-10-22", "%Y-%m-%d")
    logging.info(f"Preparing the first df starting from {period_start}")

    # Header: start_time: str,job_id,template_id,duration,
    # uown_names,inputDataSize,outputDataSize,cputime, type
    df_presto = pd.concat([read_Presto(period_start + timedelta(days=i)) for i in range(period_day)])
    df_spark = pd.concat([read_Spark(period_start + timedelta(days=i)) for i in range(period_day)])
    df = pd.concat([df_spark, df_presto])
    df['totalDataSize'] = df['inputDataSize'] + df['outputDataSize']
    weight_group = df.groupby(['table']).agg(
        totalDataSize=('totalDataSize', 'mean')).reset_index()
    weight_lookup = weight_group.set_index('table').to_dict()['totalDataSize']

    logging.info(f"# of jobs: {len(df['job_id'].unique())}")

    period_start = period_start + timedelta(days=period_day)

    """ to calculate traffic rate per minute, """
    minute_buckets = OrderedDict()  # OrderedDict keeps minute order for easy popping

    # store logs for each period
    period_logs = []

    for period_offset in range(num_of_week):
        start_date = period_start + timedelta(days=period_offset * period_day)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



scheduler.py [486:510]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    period_start = datetime.strptime("2024-10-22", "%Y-%m-%d")
    logging.info(f"Preparing the first df starting from {period_start}")
    # Header: start_time: str,job_id,template_id,duration,
    # uown_names,inputDataSize,outputDataSize,cputime, type
    df_presto = pd.concat([read_Presto(period_start + timedelta(days=i)) for i in range(period_day)])
    df_spark = pd.concat([read_Spark(period_start + timedelta(days=i)) for i in range(period_day)])
    df = pd.concat([df_spark, df_presto])
    df['totalDataSize'] = df['inputDataSize'] + df['outputDataSize']
    # df = df.sort_values(['datetime', 'job_id'])
    weight_group = df.groupby(['table']).agg(
            totalDataSize=('totalDataSize', 'mean')).reset_index()
    weight_lookup = weight_group.set_index('table').to_dict()['totalDataSize']

    logging.info(f"# of jobs: {len(df['job_id'].unique())}")

    period_start = period_start + timedelta(days=period_day)

    """ to calculate traffic rate per minute, """
    minute_buckets = OrderedDict()  # OrderedDict keeps minute order for easy popping

    # store logs for each period
    period_logs = []

    for period_offset in range(num_of_week):
        start_date = period_start + timedelta(days=period_offset * period_day)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



