in data_collection/collection_utils/what_ifs/FARSI_what_ifs.py [0:0]
def input_error_output_cost_sensitivity_study(result_folder, sw_hw_database_population, system_workers=(1,1), input_error=False, input_cost=False):
current_process_id = system_workers[0]
total_process_cnt = system_workers[1]
# -----------------------
# set up the case study
# -----------------------
case_study = ""
if not input_error and not input_cost:
case_study = "cost_PPA"
file_prefix = config.FARSI_cost_correlation_study_prefix
elif input_error and not input_cost:
case_study = "input_error_output_cost"
print("input error cost study")
file_prefix = config.FARSI_input_error_output_cost_sensitivity_study_prefix
elif input_error and input_cost:
case_study = "input_error_input_cost"
file_prefix = config.FARSI_input_error_input_cost_sensitivity_study_prefix
else:
print("this study is not supported")
exit(0)
print("conducting " + case_study)
# -----------------------
# first extract the current budget
# -----------------------
accuracy_percentage = {}
accuracy_percentage["sram"] = accuracy_percentage["dram"] = accuracy_percentage["ic"] = accuracy_percentage["gpp"] = \
accuracy_percentage["ip"] = \
{"latency": 1,
"energy": 1,
"area": 1,
"one_over_area": 1}
hw_sampling = {"mode": "exact", "population_size": 1, "reduction": "most_likey",
"accuracy_percentage": accuracy_percentage}
db_input = database_input_class(sw_hw_database_population)
budgets_dict = {} # set the reference budget
budgets_dict['latency'] = db_input.budgets_dict['glass']['latency'][list(workloads)[0]]
budgets_dict['power'] = db_input.budgets_dict['glass']['power']
budgets_dict['area'] = db_input.budgets_dict['glass']['area']
#-------------------------------------------
# set sweeping parameters
#-------------------------------------------
experiment_repetition_cnt = 1
budget_cnt = 3
budget_upper_bound_factor = {}
budget_upper_bound_factor["perf"] = 10
budget_upper_bound_factor["power"] = 10
budget_upper_bound_factor["area"] = 100
if not input_error:
accuracy_lower_bound = accuracy_upper_bound = 1
accuracy_cnt = 1 # number of accuracy values to use
else:
accuracy_lower_bound = .5
accuracy_cnt = 3 # number of accuracy values to use
accuracy_upper_bound = 1
if not input_cost:
effort_lower_bound = effort_upper_bound = 100
effort_cnt = 1 # number of accuracy values to use
else:
effort_lower_bound = 20
effort_cnt = 3 # number of accuracy values to use
effort_upper_bound = 100
# -------------------------------------------
# generate all the combinations of the budgets
# -------------------------------------------
combination_input =[]
combination_input.append((budgets_dict["latency"], budget_upper_bound_factor["perf"]*budgets_dict["latency"], budget_cnt))
combination_input.append((budgets_dict["power"], budget_upper_bound_factor["power"]*budgets_dict["power"], budget_cnt))
combination_input.append((budgets_dict["area"], budget_upper_bound_factor["area"]*budgets_dict["area"], budget_cnt))
combination_input.append((accuracy_lower_bound, accuracy_upper_bound, accuracy_cnt))
combination_input.append((effort_lower_bound, effort_upper_bound, effort_cnt))
all_combinations = gen_combinations(combination_input)
#-------------------------------------------
# distribute the work
#-------------------------------------------
combo_cnt = len(list(all_combinations))
work_per_process = math.ceil(combo_cnt/total_process_cnt)
run_ctr = 0
#-------------------------------------------
# run the combination and collect the data
#-------------------------------------------
# get the budget, set them and run FARSI
reduction = "most_likely_with_accuracy_percentage"
for i in range(0, experiment_repetition_cnt):
for latency, power, area, accuracy, effort in list(all_combinations)[current_process_id* work_per_process: min((current_process_id+ 1) * work_per_process, combo_cnt)]:
# iterate though metrics and set the budget
accuracy_percentage = {}
accuracy_percentage["sram"] = accuracy_percentage["dram"] = accuracy_percentage["ic"] = accuracy_percentage["gpp"] = {"latency": 1, "energy": 1,
"area": 1, "one_over_area": 1}
accuracy_percentage["ip"] = {"latency": accuracy, "energy": 1 / pow(accuracy, 2), "area": 1,
"one_over_area": 1}
hw_sampling = {"mode": "exact", "population_size": 1, "reduction": reduction,
"accuracy_percentage": accuracy_percentage}
db_input = database_input_class(sw_hw_database_population)
# set the budget
budgets_dict = {}
budgets_dict["glass"] = {}
budgets_dict["glass"]["latency"] = {list(workloads)[0]:latency}
budgets_dict["glass"]["power"] = power
budgets_dict["glass"]["area"] = area
db_input.set_budgets_dict_directly(budgets_dict)
db_input.set_porting_effort_for_block("ip", effort) # only playing with ip now
unique_suffix = str(total_process_cnt) + "_" + str(current_process_id) + "_" + str(run_ctr)
print("hw_sampling:" + str(hw_sampling))
print("budget set to:" + str(db_input.budgets_dict))
dse_hndlr = run_FARSI(result_folder, unique_suffix, db_input, hw_sampling, sw_hw_database_population["hw_graph_mode"])
run_ctr += 1
# write the results in the general folder
result_dir_specific = os.path.join(result_folder, "result_summary")
write_one_results(dse_hndlr.dse.so_far_best_sim_dp, dse_hndlr.dse, dse_hndlr.dse.reason_to_terminate, case_study, result_dir_specific, unique_suffix,
file_prefix + "_" + str(current_process_id) + "_" + str(total_process_cnt))
# write the results in the specific folder
result_folder_modified = result_folder + "/runs/" + str(run_ctr) + "/"
os.system("mkdir -p " + result_folder_modified)
copy_DSE_data(result_folder_modified)
write_one_results(dse_hndlr.dse.so_far_best_sim_dp, dse_hndlr.dse, dse_hndlr.dse.reason_to_terminate, case_study, result_folder_modified, unique_suffix,
file_prefix + "_" + str(current_process_id) + "_" + str(total_process_cnt))