'''
获取到所有的PR信息，找出每一个PR创建的时间，以及在该PR创建时间那个时刻仍处于open状态的pr，
然后将这个时刻还处于open状态的pr作为输入X。
FIFO算法，根据pr创建的时间先创建，放在最前面，这样对上述pr列表进行排序。FIFOY
真实排序：在该时刻之后，该X中，被相应，或者被关闭或者被合并等发生改变的时间，根据该时间顺序进行排序，进而获取真实排序TRUEY
将FIFOY，与TRUEY进行比较，通过NDcg进行比较，判断排序效果
'''
import json
from datetime import timedelta
from evaluation_index.Kendall_tau_distance import kendall_tau_distance
from evaluation_index.mrr import mrr
import csv
from evaluation_index.ndcg import ndcg
import pandas as pd
# 增加代码的可读性
from utils.path_exist import path_exists_or_create


# 对模型进行调用，同时将数据写入到文件中，方便后续统计
def alg_model_result(result_data_path, top_k, reversed, alg_name, repo_name, start_day, end_day, sum_result):
    result_data = pd.read_excel(result_data_path)
    ndcg_list = []
    day_list = []
    mrr_list = []
    kendall_list = []
    open_pr_count_byday = []

    # 获取每天处于open状态的PR
    day_open_pr_data = {}
    for i in range((end_day - start_day).days + 1):
        day = start_day + timedelta(days=i)
        day_open_pr_data[str(day)] = []

    for index, row in result_data.iterrows():
        pr_result = {}
        pr_number = row['pr_number']
        pr_result[pr_number] = {}
        pr_result[pr_number]['created_time'] = pd.to_datetime(row['created_time'])
        pr_result[pr_number]['closed_time'] = pd.to_datetime(row['closed_time'])
        pr_result[pr_number]['merged_time'] = pd.to_datetime(row['merged_time'])
        pr_result[pr_number][alg_name] = row[alg_name]
        pr_result[pr_number]['original_rank'] = row['original_rank']
        pr_start_day = pr_result[pr_number]['created_time']
        if row['merged_time'].date() is not None and pd.isnull(row['merged_time'].date()) is False:
            pr_end_day = pr_result[pr_number]['merged_time']
        elif row['closed_time'].date() is not None and pd.isnull(row['closed_time'].date()) is False:
            pr_end_day = pr_result[pr_number]['closed_time']
        else:
            pr_end_day = end_day
        # 获取每天处于open状态的PR
        for i in range((pr_end_day - pr_start_day).days + 1):
            day = pr_start_day + timedelta(days=i)
            day = str(day.date()) + " 00:00:00"
            if day in day_open_pr_data.keys():
                day_open_pr_data[day].append(pr_result)
    # 只统计有PR的日期
    day_count = 0
    for day in day_open_pr_data.keys():
        pr_list = day_open_pr_data[day]
        if pr_list.__len__() == 0:
            continue
        day_count = day_count + 1
        # print("当前日期：", day)

        # 存储算法的排序列表
        rank_sort = []
        rank_value_sort = []
        rank_dict = {}
        # 存储真实的排序列表
        true_sort = []
        true_value_sort = []
        true_dict = {}
        for pr in pr_list:
            for key in pr.keys():
                true_dict[key] = pr[key]['original_rank']
                rank_dict[key] = pr[key][alg_name]
        # 将true_dict按照value进行排序，并将排序后的key存储到true_sort中
        true_sort = sorted(true_dict, key=true_dict.__getitem__, reverse=reversed)
        # 将rank_dict按照value进行排序，并将排序后的key存储到rank_sort中
        rank_sort = sorted(rank_dict, key=rank_dict.__getitem__, reverse=reversed)
        # 根据top_k判断是否需要截取，并截断前top_k个的PR——number
        if top_k is not None and top_k < rank_sort.__len__():
            rank_sort = rank_sort[0:top_k]
        for temp_rank_pr in rank_sort:
            rank_value_sort.append(true_dict[temp_rank_pr])
            true_value_sort.append(true_dict[temp_rank_pr])
        true_value_sort.sort(reverse=reversed)

        ndcg_num = ndcg(true_value_sort, rank_value_sort, rank_value_sort.__len__(), form="exp")
        mrr_num = mrr(true_value_sort, rank_value_sort)
        kendall_num = kendall_tau_distance(true_value_sort, rank_value_sort)
        # print("pr_number排序:", sort_result)
        # print("存储算法的排序列表rank_sort:", rank_sort)
        # print("存储真实的排序列表true_sort:", true_sort)
        # print("存储算法的排序列表rank_value_sort:", rank_value_sort)
        # print("存储真实的排序列表true_value_sort:", true_value_sort)
        # print("ndcg_num:", ndcg_num)
        # print("mrr_num:", mrr_num)
        # print("kendall_num:", kendall_num)
        day_list.append(day)
        ndcg_list.append(ndcg_num)
        mrr_list.append(mrr_num)
        kendall_list.append(kendall_num)
        open_pr_count_byday.append(pr_list.__len__())

    headers = ['日期', '当日openPR个数',
               'ndcg',
               'mrr',
               'kendall_tau_distance'
               ]
    row_data = []
    sum_result_tmp = []
    for i in range(len(day_list) + 1):
        tmp = []
        if i < len(day_list):
            tmp.append(day_list[i])
            tmp.append(open_pr_count_byday[i])
            tmp.append(ndcg_list[i])
            tmp.append(mrr_list[i])
            tmp.append(kendall_list[i])
        else:
            tmp.append("平均值")
            sum_result_tmp.append(alg_name)
            tmp.append(" ")
            sum_result_tmp.append(str(reversed))
            tmp.append(sum(ndcg_list) / len(ndcg_list))
            sum_result_tmp.append(sum(ndcg_list) / len(ndcg_list))
            tmp.append(sum(mrr_list) / len(mrr_list))
            sum_result_tmp.append(sum(mrr_list) / len(mrr_list))
            tmp.append(sum(kendall_list) / len(kendall_list))
            sum_result_tmp.append(sum(kendall_list) / len(kendall_list))
        row_data.append(tmp)
    # print(row_data)
    sum_result.append(sum_result_tmp)
    # 保存数据到csv文件
    result_path = "./rank_model/" + repo_name + "/result/rank_eval/"
    path_exists_or_create(result_path)
    with open(result_path + repo_name + "_" + str(reversed) + "_" + alg_name + "_result.csv",
              'w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f, dialect='excel')
        writer.writerow(headers)
        for item in row_data:
            writer.writerow(item)
    return None


# 计算仿真的Reality中的Ed1，Ed2，Ec1，Ec2
def sim_reality_result(result_data_path, repo_name, start_day, end_day):
    result_data = pd.read_excel(result_data_path)
    # 获取每天处于open状态的PR
    day_open_pr_data = {}
    # 每天被评审的PR数目
    day_reviewed_pr_data = {}
    # 每天被合的PR数目
    day_merged_pr_data = {}
    # 每天被关闭的PR数目
    day_abandon_pr_data = {}
    # 每天被评审的PR时间
    day_reviewed_pr_time_data = {}
    # 每天应当花费的评审时间
    day_open_pr_time_data = {}
    for i in range((end_day - start_day).days + 1):
        day = start_day + timedelta(days=i)
        day_open_pr_data[str(day)] = []
        day_reviewed_pr_data[str(day)] = []
        day_merged_pr_data[str(day)] = []
        day_abandon_pr_data[str(day)] = []
        day_reviewed_pr_time_data[str(day)] = 0
        day_open_pr_time_data[str(day)] = 0

    for index, row in result_data.iterrows():
        pr_result = {}
        pr_number = row['pr_number']
        pr_result[pr_number] = {}
        pr_result[pr_number]['created_time'] = pd.to_datetime(row['created_time'])
        pr_result[pr_number]['closed_time'] = pd.to_datetime(row['closed_time'])
        pr_result[pr_number]['merged_time'] = pd.to_datetime(row['merged_time'])

        temp_change_line = row['change_line']
        if temp_change_line is None or temp_change_line <= 100:
            pr_result[pr_number]['review_once_time'] = 10
        elif temp_change_line <= 300:
            pr_result[pr_number]['review_once_time'] = 15
        elif temp_change_line <= 500:
            pr_result[pr_number]['review_once_time'] = 20
        elif temp_change_line <= 1000:
            pr_result[pr_number]['review_once_time'] = 30
        else:
            pr_result[pr_number]['review_once_time'] = 45

        if row['comments'] == 0:
            if row['comment_time'] is None or row['comment_time'] == "NaT":
                if row['merged_time'].date() is not None and pd.isnull(row['merged_time'].date()) is False:
                    pr_result[pr_number]['comments'] = [pr_result[pr_number]['merged_time']]
                elif row['closed_time'].date() is not None and pd.isnull(row['closed_time'].date()) is False:
                    pr_result[pr_number]['comments'] = [pr_result[pr_number]['closed_time']]
                else:
                    pr_result[pr_number]['comments'] = [end_day]
            else:
                pr_result[pr_number]['comments'] = [pd.to_datetime(row['comment_time'])]
        else:
            temp_comment_time = row['comment_time']

            temp_comment_time = temp_comment_time.replace("'", "\"")
            tmp_comment_time = json.loads(temp_comment_time)
            pr_result[pr_number]['comments'] = []
            for tmp_time in tmp_comment_time:
                tmp_time_inside = pd.to_datetime(tmp_time.get('created_at'))
                pr_result[pr_number]['comments'].append(tmp_time_inside)
                day = str(tmp_time_inside.date()) + " 00:00:00"
                if day in day_reviewed_pr_time_data.keys():
                    # 这里记录每天评审的PR个数，同时记录每天评审的PR时间
                    day_reviewed_pr_data[day].append(pr_result)
                    day_reviewed_pr_time_data[day] += pr_result[pr_number]['review_once_time']

        pr_start_day = pr_result[pr_number]['created_time']
        if row['merged_time'].date() is not None and pd.isnull(row['merged_time'].date()) is False:
            pr_end_day = pr_result[pr_number]['merged_time']
            day = str(pr_end_day.date()) + " 00:00:00"
            if day in day_merged_pr_data.keys():
                day_merged_pr_data[day].append(pr_result)
        elif row['closed_time'].date() is not None and pd.isnull(row['closed_time'].date()) is False:
            pr_end_day = pr_result[pr_number]['closed_time']
            day = str(pr_end_day.date()) + " 00:00:00"
            if day in day_abandon_pr_data.keys():
                day_abandon_pr_data[day].append(pr_result)
        else:
            pr_end_day = end_day
        # 获取每天处于open状态的PR
        for i in range((pr_end_day - pr_start_day).days + 1):
            day = pr_start_day + timedelta(days=i)
            day = str(day.date()) + " 00:00:00"
            if day in day_open_pr_data.keys():
                day_open_pr_data[day].append(pr_result)

    # 只统计有PR的日期
    day_count = 0
    for day in day_open_pr_data.keys():
        pr_list = day_open_pr_data[day]
        if pr_list.__len__() == 0:
            continue

        for pr in pr_list:
            left_review_times = 0
            pr_number = list(pr.keys())[0]

            for temp_day in pr[pr_number]['comments']:
                temp_day = str(temp_day.date()) + " 00:00:00"
                if pd.to_datetime(temp_day) >= pd.to_datetime(day):
                    left_review_times += 1
            day_open_pr_time_data[day] += left_review_times * pr[pr_number]['review_once_time']

    prEc1EvalList = []
    prEc2EvalList = []
    prEd1EvalList = []
    prEd2EvalList = []
    for day in day_open_pr_data.keys():
        pr_list = day_open_pr_data[day]
        if pr_list.__len__() == 0:
            prEc1EvalList.append(0)
            continue
        open_pr_num = pr_list.__len__()
        review_num = day_reviewed_pr_data[day].__len__()
        prEc1EvalList.append(review_num / open_pr_num)

    for day in day_reviewed_pr_time_data.keys():
        if day_open_pr_time_data[day] == 0:
            prEc2EvalList.append(0)
        else:
            prEc2EvalList.append(day_reviewed_pr_time_data[day] / day_open_pr_time_data[day])
    for day in day_reviewed_pr_data.keys():
        pr_list = day_reviewed_pr_data[day]
        if pr_list.__len__() == 0:
            prEd1EvalList.append(0)
            continue
        review_num = pr_list.__len__()
        merge_num = day_merged_pr_data[day].__len__()
        abandon_num = day_abandon_pr_data[day].__len__()
        prEd1EvalList.append(merge_num / review_num)
        prEd2EvalList.append(abandon_num / review_num)
    dict_eval_result = {}
    dict_eval_result['prEc1EvalList'] = sum(prEc1EvalList) / len(prEc1EvalList)
    dict_eval_result['prEc2EvalList'] = sum(prEc2EvalList) / len(prEc2EvalList)
    dict_eval_result['prEd1EvalList'] = sum(prEd1EvalList) / len(prEd1EvalList)
    dict_eval_result['prEd2EvalList'] = sum(prEd2EvalList) / len(prEd2EvalList)
    dict_eval_result['sumEval'] = sum(prEc1EvalList) / len(prEc1EvalList) + sum(prEd2EvalList) / len(
        prEd2EvalList) + sum(prEd1EvalList) / len(prEd1EvalList) + sum(prEd2EvalList) / len(prEd2EvalList)
    # print("dict_eval_result:", dict_eval_result)
    # 保存数据到json文件
    result_path = "./sim_data/" + repo_name + "/result/"
    path_exists_or_create(result_path)
    with open(result_path + repo_name + "_sim_reality_result.json", 'w', encoding='utf-8') as f:
        json.dump(dict_eval_result, f)
    return None


def run_rank_evaluation(repo_name, start_time_str, end_time_str, top_k, all_alg_dict):
    # ranklib所能调的库

    # reversed为True是优先级越大越好，False是优先级越小越好
    reversed_list = [False, True]
    sum_result = []
    for alg_index in all_alg_dict.keys():
        alg_name = all_alg_dict.get(alg_index)
        result_path = "./sim_data/" + repo_name + "/"
        path_exists_or_create(result_path)
        result_data_path = result_path + repo_name + "_pr_mP.xlsx"

        for reversed in reversed_list:
            alg_model_result(result_data_path, top_k, reversed, alg_name, repo_name, pd.to_datetime(start_time_str),
                             pd.to_datetime(end_time_str), sum_result)

    sum_headers = ['算法',
                   "rear（false）——top（true）",
                   'ndcg',
                   'mrr',
                   'kendall_tau_distance'
                   ]
    # 保存数据到csv文件
    result_path = "./rank_model/" + repo_name + "/result/rank_eval/"
    path_exists_or_create(result_path)
    with open(result_path + repo_name + "_sum_result.csv",
              'w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f, dialect='excel')
        writer.writerow(sum_headers)
        for item in sum_result:
            writer.writerow(item)
    return None


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    repo_list = ["tensorflow"]
    # for repo_name in repo_list:
    #     # ranklib所能调的库
    #     alg_dict = {
    #         0: "MART",
    #         1: "RankNet",
    #         2: "RankBoost",
    #         3: "AdaRank",
    #         4: "Coordinate_Ascent",
    #         6: "LambdaMART",
    #         7: "ListNet",
    #         8: "Random_Forests",
    #         9: "FIFO",
    #         10: "SMF"
    #     }
    #     # reversed为True是优先级越大越好，False是优先级越小越好
    #     reversed_list = [False, True]
    #     for alg_index in alg_dict.keys():
    #         alg_name = alg_dict.get(alg_index)
    #         result_path = "./sim_data/" + repo_name + "/"
    #         path_exists_or_create(result_path)
    #         result_data_path = result_path + repo_name + "_pr_mP.xlsx"
    #         top_k = 20
    #         for reversed in reversed_list:
    #             alg_model_result(result_data_path, top_k, reversed, alg_name, repo_name, pd.to_datetime("2022-01-1"),
    #                              pd.to_datetime("2022-12-31"))
    sim_reality_result("./sim_data/opencv/opencv_pr_mP.xlsx",
                       "opencv",
                       pd.to_datetime("2022-01-1"),
                       pd.to_datetime("2022-12-31"))
