visualization_utils/plotting-ying.py [15:339]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_column_name_number(dir_addr, mode):
    column_name_number_dic = {}
    try:
        if mode == "all":
            file_name = "result_summary/FARSI_simple_run_0_1_all_reults.csv"
        else:
            file_name = "result_summary/FARSI_simple_run_0_1.csv"

        file_full_addr = os.path.join(dir_addr, file_name)
        with open(file_full_addr) as f:
            resultReader = csv.reader(f, delimiter=',', quotechar='|')
            for row in resultReader:
                for idx, el_name in enumerate(row):
                    column_name_number_dic[el_name] = idx
                break
        return column_name_number_dic
    except Exception as e:
        raise e



#


# the function to get the column information of the given category
def columnNum(dirName, fileName, cate, result):
    if result == "all":
        with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
            resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

            for i, row in enumerate(resultReader):
                if i == 0:
                    for j in range(0, len(row)):
                        if row[j] == cate:
                            return j
                    raise Exception("No such category in the list! Check the name: " + cate)
                break
    elif result == "simple":
        with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1.csv", newline='') as csvfile:
            resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

            for i, row in enumerate(resultReader):
                if i == 0:
                    for j in range(0, len(row)):
                        if row[j] == cate:
                            return j
                    raise Exception("No such category in the list! Check the name: " + cate)
                break
    else:
        raise Exception("No such result file! Check the result type! It should be either \"all\" or \"simple\"")

# the function to plot the frequency of all comm_comp in the pie chart
def plotCommCompAll(dirName, fileName, all_res_column_name_number):
    colNum = all_res_column_name_number["comm_comp"]
    truNum = all_res_column_name_number["move validity"]

    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        commNum = 0
        compNum = 0

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                if row[colNum] == "comm":
                    commNum += 1
                elif row[colNum] == "comp":
                    compNum += 1
                else:
                    raise Exception("comm_comp is not giving comm or comp! The new type: " + row[colNum])

        plt.figure()
        plt.pie([commNum, compNum], labels = ["comm", "comp"])
        plt.title("comm_comp: Frequency")
        plt.savefig(dirName + fileName + "/comm-compFreq-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot the frequency of all high level optimizations in the pie chart
def plothighLevelOptAll(dirName, fileName, all_res_column_name_number):
    colNum = all_res_column_name_number["high level optimization name"]
    truNum = all_res_column_name_number["move validity"]

    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        topoNum = 0
        tunNum = 0
        mapNum = 0
        idenOptNum = 0

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                if row[colNum] == "topology":
                    topoNum += 1
                elif row[colNum] == "customization":
                    tunNum += 1
                elif row[colNum] == "mapping":
                    mapNum += 1
                elif row[colNum] == "identity":
                    idenOptNum += 1
                else:
                    raise Exception("high level optimization name is not giving topology or customization or mapping or identity! The new type: " + row[colNum])
        
        plt.figure()
        plt.pie([topoNum, tunNum, mapNum, idenOptNum], labels = ["topology", "customization", "mapping", "identity"])
        plt.title("High Level Optimization: Frequency")
        plt.savefig(dirName + fileName + "/highLevelOpt-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot the frequency of all architectural variables to improve in the pie chart
def plotArchVarImpAll(dirName, fileName, colNum, trueNum):
    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        parazNum = 0
        custNum = 0
        localNum = 0
        idenImpNum = 0

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                if row[colNum] == "parallelization":
                    parazNum += 1
                elif row[colNum] == "customization":
                    custNum += 1
                elif row[colNum] == "locality":
                    localNum += 1
                elif row[colNum] == "identity":
                    idenImpNum += 1                
                else:
                    raise Exception("architectural principle is not parallelization or customization or locality or identity! The new type: " + row[colNum])

        plt.figure()
        plt.pie([parazNum, custNum, localNum, idenImpNum], labels = ["parallelization", "customization", "locality", "identity"])
        plt.title("Architectural Principle: Frequency")
        plt.savefig(dirName + fileName + "/archVarImp-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot simulation time vs. system block count
def plotSimTimeVSblk(dirName, fileName, blkColNum, simColNum, trueNum):
    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        sysBlkCount = []
        simTime = []

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                sysBlkCount.append(int(row[blkColNum]))
                simTime.append(float(row[simColNum]))

        plt.figure()
        plt.plot(sysBlkCount, simTime)
        plt.xlabel("System Block Count")
        plt.ylabel("Simulation Time")
        plt.title("Simulation Time vs. Sytem Block Count")
        plt.savefig(dirName + fileName + "/simTimeVSblk-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot move generation time vs. system block count
def plotMoveGenTimeVSblk(dirName, fileName, blkColNum, movColNum, trueNum):
    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        sysBlkCount = []
        moveGenTime = []

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                sysBlkCount.append(int(row[blkColNum]))
                moveGenTime.append(float(row[movColNum]))
        
        plt.figure()
        plt.plot(sysBlkCount, moveGenTime)
        plt.xlabel("System Block Count")
        plt.ylabel("Move Generation Time")
        plt.title("Move Generation Time vs. System Block Count")
        plt.savefig(dirName + fileName + "/moveGenTimeVSblk-" + fileName + ".png")
        # plt.show()
        plt.close('all')

def get_experiments_workload(all_res_column_name):
    latency_budget =  all_res_column_name_number["latency budget"][:-1]
    workload_latency = latency_budget.split(";")
    workloads = []
    for workload_latency in workload_latency:
        workloads.append(workload_latency.split("=")[0])
    return workloads

def get_experiments_name(file_full_addr, all_res_column_name_number):
    with open(file_full_addr, newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
        row1 = next(resultReader)
        row2 = next(resultReader)
        latency_budget =  row2[all_res_column_name_number["latency_budget"]]
        power_budget =  row2[all_res_column_name_number["power_budget"]]
        area_budget =  row2[all_res_column_name_number["area_budget"]]
        try:
            transformation_selection_mode =  row2[all_res_column_name_number["transformation_selection_mode"]]
        except:
            transformation_selection_mode =  ""


        workload_latency = latency_budget[:-1].split(';')
        latency_budget_refined =""
        for workload_latency in workload_latency:
            latency_budget_refined +="_" + (workload_latency.split("=")[0][0]+workload_latency.split("=")[1])

        return latency_budget_refined[1:]+"_" + power_budget + "_" + area_budget+"_"+transformation_selection_mode

def get_all_col_values_of_a_file(file_full_addr, all_res_column_name_number, column_name):
    column_number = all_res_column_name_number[column_name]
    all_values = []
    with open(file_full_addr, newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
        experiment_name = get_experiments_name(file_full_addr, all_res_column_name_number)
        for i, row in enumerate(resultReader):
            if i > 1:
                if not row[column_number] == '':
                    value =row[column_number]
                    values = value.split(";") # if mutiple values
                    for val in values:
                        if "=" in val:
                            val_splitted = val.split("=")
                            all_values.append(val_splitted[0])
                        else:
                            all_values.append(val)

    return all_values

def get_all_col_values_of_a_folders(input_dir_names, input_all_res_column_name_number, column_name):
    all_values = []
    for dir_name in input_dir_names:
        file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
        all_values.extend(get_all_col_values_of_a_file(file_full_addr, input_all_res_column_name_number, column_name))

    # get rid of duplicates
    all_values_rid_of_duplicates = list(set(all_values))
    return all_values_rid_of_duplicates

def extract_latency_values(values_):
    print("")


def plot_codesign_rate_efficacy_cross_workloads_updated(input_dir_names, res_column_name_number):
    #itrColNum = all_res_column_name_number["iteration cnt"]
    #distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
    trueNum  =  all_res_column_name_number["move validity"]
    move_name_number =  all_res_column_name_number["move name"]

    # experiment_names
    file_full_addr_list = []
    for dir_name in input_dir_names:
        file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
        file_full_addr_list.append(file_full_addr)

    axis_font = {'fontname': 'Arial', 'size': '4'}
    x_column_name = "iteration cnt"
    #y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
    y_column_name_list = ["exact optimization name",  "architectural principle", "comm_comp", "workload"]

    #y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]



    column_co_design_cnt = {}
    column_non_co_design_cnt = {}
    column_co_design_rate = {}
    column_non_co_design_rate = {}
    column_co_design_efficacy_avg = {}
    column_non_co_design_efficacy_rate = {}
    column_non_co_design_efficacy = {}
    column_co_design_dist= {}
    column_co_design_dist_avg= {}
    column_co_design_improvement = {}
    experiment_name_list = []
    last_col_val = ""
    for file_full_addr in file_full_addr_list:
        experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
        experiment_name_list.append(experiment_name)
        column_co_design_dist_avg[experiment_name] = {}
        column_co_design_efficacy_avg[experiment_name] = {}

        column_co_design_cnt = {}
        for y_column_name in y_column_name_list:
            y_column_number = res_column_name_number[y_column_name]
            x_column_number = res_column_name_number[x_column_name]


            dis_to_goal_column_number = res_column_name_number["dist_to_goal_non_cost"]
            ref_des_dis_to_goal_column_number = res_column_name_number["ref_des_dist_to_goal_non_cost"]
            column_co_design_cnt[y_column_name] = []
            column_non_co_design_cnt[y_column_name] = []

            column_non_co_design_efficacy[y_column_name] = []
            column_co_design_dist[y_column_name] = []
            column_co_design_improvement[y_column_name] = []
            column_co_design_rate[y_column_name] = []

            all_values = get_all_col_values_of_a_folders(input_dir_names, all_res_column_name_number, y_column_name)

            last_row_change = ""
            with open(file_full_addr, newline='') as csvfile:
                resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
                rows = list(resultReader)
                for i, row in enumerate(rows):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



visualization_utils/plotting.py [17:341]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_column_name_number(dir_addr, mode):
    column_name_number_dic = {}
    try:
        if mode == "all":
            file_name = "result_summary/FARSI_simple_run_0_1_all_reults.csv"
        else:
            file_name = "result_summary/FARSI_simple_run_0_1.csv"

        file_full_addr = os.path.join(dir_addr, file_name)
        with open(file_full_addr) as f:
            resultReader = csv.reader(f, delimiter=',', quotechar='|')
            for row in resultReader:
                for idx, el_name in enumerate(row):
                    column_name_number_dic[el_name] = idx
                break
        return column_name_number_dic
    except Exception as e:
        raise e



#


# the function to get the column information of the given category
def columnNum(dirName, fileName, cate, result):
    if result == "all":
        with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
            resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

            for i, row in enumerate(resultReader):
                if i == 0:
                    for j in range(0, len(row)):
                        if row[j] == cate:
                            return j
                    raise Exception("No such category in the list! Check the name: " + cate)
                break
    elif result == "simple":
        with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1.csv", newline='') as csvfile:
            resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

            for i, row in enumerate(resultReader):
                if i == 0:
                    for j in range(0, len(row)):
                        if row[j] == cate:
                            return j
                    raise Exception("No such category in the list! Check the name: " + cate)
                break
    else:
        raise Exception("No such result file! Check the result type! It should be either \"all\" or \"simple\"")

# the function to plot the frequency of all comm_comp in the pie chart
def plotCommCompAll(dirName, fileName, all_res_column_name_number):
    colNum = all_res_column_name_number["comm_comp"]
    truNum = all_res_column_name_number["move validity"]

    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        commNum = 0
        compNum = 0

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                if row[colNum] == "comm":
                    commNum += 1
                elif row[colNum] == "comp":
                    compNum += 1
                else:
                    raise Exception("comm_comp is not giving comm or comp! The new type: " + row[colNum])

        plt.figure()
        plt.pie([commNum, compNum], labels = ["comm", "comp"])
        plt.title("comm_comp: Frequency")
        plt.savefig(dirName + fileName + "/comm-compFreq-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot the frequency of all high level optimizations in the pie chart
def plothighLevelOptAll(dirName, fileName, all_res_column_name_number):
    colNum = all_res_column_name_number["high level optimization name"]
    truNum = all_res_column_name_number["move validity"]

    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        topoNum = 0
        tunNum = 0
        mapNum = 0
        idenOptNum = 0

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                if row[colNum] == "topology":
                    topoNum += 1
                elif row[colNum] == "customization":
                    tunNum += 1
                elif row[colNum] == "mapping":
                    mapNum += 1
                elif row[colNum] == "identity":
                    idenOptNum += 1
                else:
                    raise Exception("high level optimization name is not giving topology or customization or mapping or identity! The new type: " + row[colNum])
        
        plt.figure()
        plt.pie([topoNum, tunNum, mapNum, idenOptNum], labels = ["topology", "customization", "mapping", "identity"])
        plt.title("High Level Optimization: Frequency")
        plt.savefig(dirName + fileName + "/highLevelOpt-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot the frequency of all architectural variables to improve in the pie chart
def plotArchVarImpAll(dirName, fileName, colNum, trueNum):
    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        parazNum = 0
        custNum = 0
        localNum = 0
        idenImpNum = 0

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                if row[colNum] == "parallelization":
                    parazNum += 1
                elif row[colNum] == "customization":
                    custNum += 1
                elif row[colNum] == "locality":
                    localNum += 1
                elif row[colNum] == "identity":
                    idenImpNum += 1                
                else:
                    raise Exception("architectural principle is not parallelization or customization or locality or identity! The new type: " + row[colNum])

        plt.figure()
        plt.pie([parazNum, custNum, localNum, idenImpNum], labels = ["parallelization", "customization", "locality", "identity"])
        plt.title("Architectural Principle: Frequency")
        plt.savefig(dirName + fileName + "/archVarImp-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot simulation time vs. system block count
def plotSimTimeVSblk(dirName, fileName, blkColNum, simColNum, trueNum):
    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        sysBlkCount = []
        simTime = []

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                sysBlkCount.append(int(row[blkColNum]))
                simTime.append(float(row[simColNum]))

        plt.figure()
        plt.plot(sysBlkCount, simTime)
        plt.xlabel("System Block Count")
        plt.ylabel("Simulation Time")
        plt.title("Simulation Time vs. Sytem Block Count")
        plt.savefig(dirName + fileName + "/simTimeVSblk-" + fileName + ".png")
        # plt.show()
        plt.close('all')

# the function to plot move generation time vs. system block count
def plotMoveGenTimeVSblk(dirName, fileName, blkColNum, movColNum, trueNum):
    with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')

        sysBlkCount = []
        moveGenTime = []

        for i, row in enumerate(resultReader):
            if row[trueNum] != "True":
                continue

            if i > 1:
                sysBlkCount.append(int(row[blkColNum]))
                moveGenTime.append(float(row[movColNum]))
        
        plt.figure()
        plt.plot(sysBlkCount, moveGenTime)
        plt.xlabel("System Block Count")
        plt.ylabel("Move Generation Time")
        plt.title("Move Generation Time vs. System Block Count")
        plt.savefig(dirName + fileName + "/moveGenTimeVSblk-" + fileName + ".png")
        # plt.show()
        plt.close('all')

def get_experiments_workload(all_res_column_name):
    latency_budget =  all_res_column_name_number["latency budget"][:-1]
    workload_latency = latency_budget.split(";")
    workloads = []
    for workload_latency in workload_latency:
        workloads.append(workload_latency.split("=")[0])
    return workloads

def get_experiments_name(file_full_addr, all_res_column_name_number):
    with open(file_full_addr, newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
        row1 = next(resultReader)
        row2 = next(resultReader)
        latency_budget =  row2[all_res_column_name_number["latency_budget"]]
        power_budget =  row2[all_res_column_name_number["power_budget"]]
        area_budget =  row2[all_res_column_name_number["area_budget"]]
        try:
            transformation_selection_mode =  row2[all_res_column_name_number["transformation_selection_mode"]]
        except:
            transformation_selection_mode =  ""


        workload_latency = latency_budget[:-1].split(';')
        latency_budget_refined =""
        for workload_latency in workload_latency:
            latency_budget_refined +="_" + (workload_latency.split("=")[0][0]+workload_latency.split("=")[1])

        return latency_budget_refined[1:]+"_" + power_budget + "_" + area_budget+"_"+transformation_selection_mode

def get_all_col_values_of_a_file(file_full_addr, all_res_column_name_number, column_name):
    column_number = all_res_column_name_number[column_name]
    all_values = []
    with open(file_full_addr, newline='') as csvfile:
        resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
        experiment_name = get_experiments_name(file_full_addr, all_res_column_name_number)
        for i, row in enumerate(resultReader):
            if i > 1:
                if not row[column_number] == '':
                    value =row[column_number]
                    values = value.split(";") # if mutiple values
                    for val in values:
                        if "=" in val:
                            val_splitted = val.split("=")
                            all_values.append(val_splitted[0])
                        else:
                            all_values.append(val)

    return all_values

def get_all_col_values_of_a_folders(input_dir_names, input_all_res_column_name_number, column_name):
    all_values = []
    for dir_name in input_dir_names:
        file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
        all_values.extend(get_all_col_values_of_a_file(file_full_addr, input_all_res_column_name_number, column_name))

    # get rid of duplicates
    all_values_rid_of_duplicates = list(set(all_values))
    return all_values_rid_of_duplicates

def extract_latency_values(values_):
    print("")


def plot_codesign_rate_efficacy_cross_workloads_updated(input_dir_names, res_column_name_number):
    #itrColNum = all_res_column_name_number["iteration cnt"]
    #distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
    trueNum  =  all_res_column_name_number["move validity"]
    move_name_number =  all_res_column_name_number["move name"]

    # experiment_names
    file_full_addr_list = []
    for dir_name in input_dir_names:
        file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
        file_full_addr_list.append(file_full_addr)

    axis_font = {'fontname': 'Arial', 'size': '4'}
    x_column_name = "iteration cnt"
    #y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
    y_column_name_list = ["exact optimization name",  "architectural principle", "comm_comp", "workload"]

    #y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]



    column_co_design_cnt = {}
    column_non_co_design_cnt = {}
    column_co_design_rate = {}
    column_non_co_design_rate = {}
    column_co_design_efficacy_avg = {}
    column_non_co_design_efficacy_rate = {}
    column_non_co_design_efficacy = {}
    column_co_design_dist= {}
    column_co_design_dist_avg= {}
    column_co_design_improvement = {}
    experiment_name_list = []
    last_col_val = ""
    for file_full_addr in file_full_addr_list:
        experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
        experiment_name_list.append(experiment_name)
        column_co_design_dist_avg[experiment_name] = {}
        column_co_design_efficacy_avg[experiment_name] = {}

        column_co_design_cnt = {}
        for y_column_name in y_column_name_list:
            y_column_number = res_column_name_number[y_column_name]
            x_column_number = res_column_name_number[x_column_name]


            dis_to_goal_column_number = res_column_name_number["dist_to_goal_non_cost"]
            ref_des_dis_to_goal_column_number = res_column_name_number["ref_des_dist_to_goal_non_cost"]
            column_co_design_cnt[y_column_name] = []
            column_non_co_design_cnt[y_column_name] = []

            column_non_co_design_efficacy[y_column_name] = []
            column_co_design_dist[y_column_name] = []
            column_co_design_improvement[y_column_name] = []
            column_co_design_rate[y_column_name] = []

            all_values = get_all_col_values_of_a_folders(input_dir_names, all_res_column_name_number, y_column_name)

            last_row_change = ""
            with open(file_full_addr, newline='') as csvfile:
                resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
                rows = list(resultReader)
                for i, row in enumerate(rows):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



