def clustering_evaluate_industry()

in aiops/ContrastiveLearningLogClustering/utils/evaluation.py [0:0]


def clustering_evaluate_industry(file_name, cluster_assignment, clustered_sentences):

    df_log = pd.read_csv(file_name)
    # df_groundtruth = df_log_structured['EventId']
    df_log = df_log[df_log['label_id']!=-1]
    label_count = df_log['label_id'].value_counts()
    event_amount = len(label_count)
    cluster_amount = len(clustered_sentences)
    print('event amount: ',event_amount)
    print('cluster amount: ',cluster_amount)
    
    label_true = []

    for idx, line in df_log.iterrows():
        label = line['label_id']
        label_true.append(label)

    rand_index = metrics.rand_score(label_true, cluster_assignment) 
    homogeneity = metrics.homogeneity_score(label_true, cluster_assignment)
    completeness = metrics.completeness_score(label_true, cluster_assignment)
    v_measure = metrics.v_measure_score(label_true,cluster_assignment, beta=1) #v = (1 + beta) * homogeneity * completeness / (beta * homogeneity + completeness)
    adj_rand_index = metrics.adjusted_rand_score(label_true, cluster_assignment)
    normalized_mi = metrics.normalized_mutual_info_score(label_true, cluster_assignment)
    
    # print("rand_index: ",rand_index)
    # print('homogeneity score: ',homogeneity) 
    # print('completeness score: ',completeness) 
    # print('v measure score: ',v_measure)
    print('ARI',adj_rand_index)
    print('NMI',normalized_mi)

    label_groundtrue = label_true

    series_parsedlog = pd.Series(cluster_assignment)
    series_groundtruth = pd.Series(label_groundtrue)

    series_parsedlog_valuecounts = series_parsedlog.value_counts()
    # series_groundtruth_valuecounts = series_groundtruth.value_counts()

    accurate_pairs = 0
    accurate_events = 0 # determine how many lines are correctly parsed
    for parsed_eventId in series_parsedlog_valuecounts.index:
        logIds = series_parsedlog[series_parsedlog == parsed_eventId].index
        series_groundtruth_logId_valuecounts = series_groundtruth[logIds].value_counts()
        error_eventIds = (parsed_eventId, series_groundtruth_logId_valuecounts.index.tolist())
        error = True
        if series_groundtruth_logId_valuecounts.size == 1:
            groundtruth_eventId = series_groundtruth_logId_valuecounts.index[0]
            if logIds.size == series_groundtruth[series_groundtruth == groundtruth_eventId].size:
                accurate_events += logIds.size
                error = False

    parsing_accuracy = float(accurate_events) / series_groundtruth.size
    
    print("parsing accuarcy: ",parsing_accuracy)

    # F1_measure = metrics.f1_score(label_true,label_pre,average='micro')
    # print("F1 score: ",F1_measure)

    score = {}
    score['rand index'] = rand_index
    score['parsing accuracy'] = parsing_accuracy
    score['homogeneity'] = homogeneity
    score['completeness'] = completeness
    score['v measure'] = v_measure
    score['ARI'] = adj_rand_index
    score['NMI'] = normalized_mi

    return score, event_amount, cluster_amount