in tests.py [0:0]
def test_yugong(compute_on_cloud_pct: int = 30, rep_budget_rate: float = 0.004, num_of_week: int = 2):
try:
# Validate input
assert compute_on_cloud_pct in [30, 50, 70], "compute_on_cloud must be one of [30, 50, 70]"
# Set up parameters (not expected to change)
# - avg_bw_usage (float): Fraction of network bandwidth dedicated to Moirai on average.
avg_bw_usage_ratio = 0.2 # empirical value
sample_rate = 1
output_dir = f"yugong_results"
os.makedirs(output_dir, exist_ok=True)
# Redirect stdout to a file
original_stdout = sys.stdout
sys.stdout = open(f"{output_dir}/log_c{compute_on_cloud_pct}.txt", "a")
print(f"Time: {datetime.now()}", flush=True)
reserved_bandwidth_gb = avg_bw_usage_ratio * network_capacity_gb
# compute placement and storage constraints
compute_cloud_min, compute_cloud_max = compute_on_cloud_pct / 100, compute_on_cloud_pct / 100 + 0.05
storage_on_prem_min, storage_on_prem_max = 1 - compute_on_cloud_pct / 100 - 0.05, 1 - compute_on_cloud_pct / 100
base_path = f"{output_dir}/test_run_c{compute_on_cloud_pct}_bw{avg_bw_usage_ratio:.2f}_local{100 - compute_on_cloud_pct}"
last_dir = base_path # Track last processed directory
# Initialize graph if not in view mode (i.e., not just printing the path for sanity check)
view_mode = args.view
graph = None
# header: abstractFingerPrint,db_name,table_name,inputDataSize,outputDataSize,cputime
job_data_access_df, workload_print_info = read_yugong_df(datetime.strptime("2024-10-22", "%Y-%m-%d"),
datetime.strptime("2024-10-28", "%Y-%m-%d"))
job_data_access_df['totalDataSize'] = job_data_access_df['inputDataSize'] + job_data_access_df['outputDataSize']
workload_df = job_data_access_df.groupby('abstractFingerPrint').agg({'totalDataSize': 'sum'}).reset_index()
workload_df.sort_values('totalDataSize', ascending=False, inplace=True)
print(f"** Workload info **")
for abFP, totalDataSize in zip(workload_df['abstractFingerPrint'], workload_df['totalDataSize']):
print(f"Project {abFP} has access size {human_readable_size(totalDataSize)}", flush=True)
ownership = Ownership()
table_df = pd.read_csv("report-table-size-20241021.csv",
dtype={'hive_database_name': str, 'hive_table_name': str, 'uown_names': str},
na_values = ['\\N'])
table_df['table'] = table_df['hive_database_name'] + '.' + table_df['hive_table_name']
for table, uown_names in zip(table_df['table'], table_df['uown_names']):
if pd.isna(uown_names): # Check for NaN values
continue
#print(f"Table {table} has ownership {uown_names}", flush=True)
ownership.add_table_ownership(table, uown_names)
table_df['project'] = table_df['table'].apply(ownership.get_table_ownership)
merged_df = table_df.groupby('project').agg({'table': 'count', 'dir_size': 'sum'}).reset_index()
merged_df.sort_values('dir_size', ascending=False, inplace=True)
print(f"** Table ownership info **")
for project, table_count, dir_size in zip(merged_df['project'], merged_df['table'], merged_df['dir_size']):
print(f"Project {project} has {table_count} tables with total size {human_readable_size(dir_size)}", flush=True)
rep_list = pd.read_csv(f"{output_dir}/replicated_tables_{rep_budget_rate:.3f}.csv",
dtype={'replicated_tables': str})['replicated_tables'].tolist()
print(f"# of replicated tables: {len(rep_list)}")
if not view_mode:
graph = Query_on_DB_Table(
job_data_access_df,
workload_print_info,
'report-table-size-20241021.csv',
rep_threshold=rep_budget_rate, # optimizer will figure out the actual budget based on the data
k=sample_rate,
log_dir=output_dir,
yugong=True, # enable Yugong constraint
ownership=ownership,
rep_list=rep_list
)
if not os.path.exists(base_path):
graph.solve_gurobi(
egress_gb, storage_gb_week, compute_cloud_min, compute_cloud_max, reserved_bandwidth_gb,
base_path, storage_on_prem_min, storage_on_prem_max, True,
alpha=1, time_limit=24 * 60 * 60, # 24 hours
p_network_gb=p_network_gb * 5, # TODO: Hard-coded now
)
# Verify the placement file
placement_file = os.path.join(base_path, "dataset_placement.csv")
assert os.path.exists(placement_file), f"File not found: {placement_file}"
previous_placement = placement_file
period_start = datetime.strptime("2024-10-29", "%Y-%m-%d")
for week_offset in range(num_of_week):
start_date = period_start + timedelta(weeks=week_offset)
end_date = start_date + timedelta(days=6)
label = start_date.strftime("%m%d")
output_path = f"{output_dir}/test_run_c{compute_on_cloud_pct}_bw{avg_bw_usage_ratio:.2f}_local{100 - compute_on_cloud_pct}_{label}"
if os.path.exists(output_path):
previous_placement = os.path.join(output_path, "dataset_placement.csv")
print(f"Skip {output_path}")
continue
print(f"Previous placement: {previous_placement}", flush=True)
job_data_access_df, workload_print_info = read_yugong_df(start_date, end_date)
if not view_mode:
# Restore database table states from previous placement
graph.restore_unique_db_tables(previous_placement, log_dir=last_dir)
# Update the workload with the new access trace
graph.update_workload(job_data_access_df, workload_print_info, log_dir=last_dir)
# Update the previous placement
graph.update_previous_placement(previous_placement)
# Optimization parameters
alpha = 1 # the degree of penalty for table switch
print(f"Running optimization for week starting on {label}")
print("----------------------------------------")
print(f"Inputs: days=7, egress_gb={egress_gb}, storage_gb_week={storage_gb_week}, "
f"compute_cloud_min={compute_cloud_min}, compute_cloud_max={compute_cloud_max}, "
f"network_cap_gb={reserved_bandwidth_gb}, "
f"storage_on_prem_min={storage_on_prem_min}, storage_on_prem_max={storage_on_prem_max}")
print(f"penalty degree alpha={alpha}")
print("----------------------------------------", flush=True)
# Solve optimization problem for this period
if not view_mode:
graph.solve_gurobi(
egress_gb, storage_gb_week, compute_cloud_min, compute_cloud_max, reserved_bandwidth_gb,
output_path, storage_on_prem_min, storage_on_prem_max, True,
alpha=alpha, time_limit=24 * 60 * 60, # 24 hours
p_network_gb=p_network_gb * 5, # TODO: Hard-coded now
)
# Update the previous placement for the next iteration
previous_placement = os.path.join(output_path, "dataset_placement.csv")
last_dir = output_path
except Exception as e:
print(f"Error in test_yugong with compute_on_cloud_pct={compute_on_cloud_pct}, rep_budget_rate={rep_budget_rate}")
print("Exception traceback:")
print(traceback.format_exc())
raise