import pandas as pd
import numpy as np
import json
import argparse

from tqdm import tqdm

from utils.path_exist import path_exists_or_create


def has_text(body):
    """
        输入参数为字符串body，计算pr的body中是否存在关键词bug、document、feature、improve、refactor、test、@
        “Its JavaScript, google”
        输出为list，list[0]-list[6]分别对应变量has_bug、has_document、has_feature、has_improve、has_refactor、has_Test_Code、at_mention
        变量值为0代表body中不存在该关键词，1代表存在
    """
    has_bug = 0
    has_document = 0
    has_feature = 0
    has_improve = 0
    has_refactor = 0
    Has_Test_Code = 0
    at_mention = 0
    if type(body) != float and  type(body) != int:
        body = body.lower()
        if "bug" in body:
            has_bug = 1
        if "document" in body:
            has_document = 1
        if "feature" in body:
            has_feature = 1
        if "improve" in body:
            has_improve = 1
        if "refactor" in body:
            has_refactor = 1
        if "test" in body:
            Has_Test_Code = 1
        if "@" in body:
            at_mention = 1

    list = []
    list.append(has_bug)
    list.append(has_document)
    list.append(has_feature)
    list.append(has_improve)
    list.append(has_refactor)
    list.append(Has_Test_Code)
    list.append(at_mention)
    return list


def get_PR_type(hastext):
    # 不考虑at_mention
    # 1BUG，2功能，3文档配置，4重构，5多类别
    hastext = hastext[:-1]
    if sum(hastext) == 0:
        return 0
    if sum(hastext) >= 3:
        return 5
    if sum(hastext) == 1:
        if hastext[0] == 1 or hastext[5] == 1:
            return 1
        if hastext[2] == 1:
            return 2
        if hastext[1] == 1:
            return 3
        if hastext[4] == 1 or hastext[3] == 1:
            return 4
    if sum(hastext) == 2:
        if hastext[0] == 1 and hastext[5] == 1:
            return 1
        if hastext[4] == 1 and hastext[3] == 1:
            return 4
        return 5


def get_review_single_time(comment_data, pr_data):
    """
    对于一条PR记录，解析一个pr多条评审的真实时间
    """
    comment_data_dict = {}
    print("pr_features_pd" + str(len(comment_data)))
    for index, row in comment_data.iterrows():
        if row["belong_to_PR"] in comment_data_dict.keys():
            comment_data_dict[row["belong_to_PR"]].append(row["created_at"])
        else:
            comment_data_dict[row["belong_to_PR"]] = [row["created_at"]]

    re_comment_data = {}
    for index, row in pr_data.iterrows():
        pr_number = row['pr_number']
        if pr_number in comment_data_dict.keys():
            temp_dict = {}
            temp_dict['created_at'] = comment_data_dict[pr_number]
            df = pd.DataFrame(temp_dict)
            df['created_at'] = pd.to_datetime(df['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                              utc=True).dt.tz_localize(None)
            df.sort_values(by='created_at', inplace=True)
            re_comment_data[pr_number] = df['created_at'][0]

        else:
            # 合并了（merged为1）
            if row['merged_at'] is not None:
                re_comment_data[pr_number] = row['merged_time']
            else:
                re_comment_data[pr_number] = row['closed_time']
    return re_comment_data


def get_review_comment_time(comment_data, pr_data):
    """
    对于一条PR记录，解析一个pr多条评审的真实时间
    """
    comment_data_dict = {}
    print("pr_features_pd" + str(len(comment_data)))
    for index, row in comment_data.iterrows():
        if row["belong_to_PR"] in comment_data_dict.keys():
            comment_data_dict[row["belong_to_PR"]].append(row["created_at"])
        else:
            comment_data_dict[row["belong_to_PR"]] = [row["created_at"]]

    re_comment_data = {}
    for index, row in pr_data.iterrows():
        pr_number = row['pr_number']
        if pr_number in comment_data_dict.keys():
            temp_dict = {}
            temp_dict['created_at'] = comment_data_dict[pr_number]
            df = pd.DataFrame(temp_dict)
            df['created_at'] = pd.to_datetime(df['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                              utc=True).dt.tz_localize(None)
            df.sort_values(by='created_at', inplace=True)
            # 去除df中created_at时间小于2022年1月1日的数据
            df = df[df['created_at'] >= pd.to_datetime("2022-01-01 00:00:00")]
            # {'created_at': t} for t in df['created_at'].map(str).values.tolist() 在这个过程中过滤掉空值
            re_comment_data[pr_number] = [{'created_at': t} for t in df['created_at'].map(str).values.tolist()]
            if re_comment_data[pr_number] is None or re_comment_data[pr_number].__len__() == 0:
                # 合并了（merged为1）
                if row['merged_at'] is not None and pd.isna(row['merged_at']) is False:
                    re_comment_data[pr_number] = row['merged_time']
                else:
                    re_comment_data[pr_number] = row['closed_time']
        else:
            # 合并了（merged为1）
            if row['merged_at'] is not None and pd.isna(row['merged_at']) is False:
                re_comment_data[pr_number] = row['merged_time']
            else:
                re_comment_data[pr_number] = row['closed_time']


    return re_comment_data


def run_simulation_pr(repo_name, start_time_str, end_time_str, rank_alg_dict):
    # pr_info_add_conversation.xlsx中逐行获取数据
    data = pd.read_excel("./data/" + repo_name + "/pr_info_add_conversation.xlsx")

    data['created_time'] = pd.to_datetime(data['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                          utc=True).dt.tz_localize(None)
    data['closed_time'] = pd.to_datetime(data['closed_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                         utc=True).dt.tz_localize(None)
    data['merged_time'] = pd.to_datetime(data['merged_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                         utc=True).dt.tz_localize(None)

    data['pr_number'] = data['number']
    # 是否存在关键词
    data['keyword_in_body'] = data['body'].map(has_text)
    # PR type
    data['PR_type'] = data['keyword_in_body'].map(get_PR_type)

    comment_data = pd.read_excel("./data/" + repo_name + "/PR_comment_info.xlsx")
    # 真实的评审序列
    temp_dict = get_review_single_time(comment_data, data)
    for index, row in data.iterrows():
        pr_number = row['pr_number']
        # 添加一个comment_real_time到data中
        data.loc[index, 'comment_real_time'] = temp_dict[pr_number]
        # row['comment_real_time'] = temp_dict[pr_number]
    # 评审时间，多条评审的是json串，以列表形式返回
    temp_dict = get_review_comment_time(comment_data, data)
    for index, row in data.iterrows():
        pr_number = row['pr_number']
        # comment_time
        data.loc[index, 'comment_time'] = str(temp_dict[pr_number])
        # 根据comment_time列表的数量来更新data此行comments的值，但是comment_time不光可能是列表，也可能是TimeStamp
        if temp_dict[pr_number].__class__ is list:
            data.at[index, 'comments'] = len(temp_dict[pr_number])
        elif temp_dict[pr_number].__class__ is pd._libs.tslibs.timestamps.Timestamp:
            data.at[index, 'comments'] = 1
        else:
            data.at[index, 'comments'] = 0

    df = data[['pr_number', 'created_time', 'closed_time', 'merged_time', 'PR_type', 'merged', 'comments',
               'comment_real_time', 'comment_time']]  # review_comments_number->comments
    df['change_line'] = data['additions'] + data['deletions']
    df['change_file'] = data['changed_files']
    # 替换缺失值
    # last_day = pd.to_datetime(end_time_str + " 23:59:59")
    # df['closed_time'].fillna(last_day, inplace=True)
    # df['comment_real_time'].fillna(last_day, inplace=True)
    # df['comment_time'].fillna(last_day, inplace=True)
    # print(df['comment_time'].fillna(last_day))
    # 将df中的merged由True和false改为1和0
    df['merged'] = df['merged'].map(lambda x: 1 if x is True else 0)
    # 读取laravel_svm_rank_format_year_data.txt 每行最后以#开头的数字，将其作为pr_number，并据pr_number从df中进行筛选
    year_pr_number_list = []
    year_pr_number_dict = {}
    with open("./rank_data/" + repo_name + "/" + repo_name + "_svm_rank_format_year_data.txt", "r") as f:
        lines = f.readlines()
        for line in lines:
            year_pr_number_list.append(int(line.split("#")[-1]))
            # 3 qid:1 1:1 2:0 3:1 4:0 提取qid前的内容作为value
            year_pr_number_dict[int(line.split("#")[-1])] = line.split(" ")[0]
    df = df[df['pr_number'].isin(year_pr_number_list)]
    for index, row in df.iterrows():
        pr_number = row['pr_number']
        # comment_time
        df.loc[index, 'original_rank'] = str(year_pr_number_dict[pr_number])
    # 找到df中[created_time]列中2022年创建的第一个PR的时间
    first_pr_created_time = df[df['created_time'] >= pd.to_datetime("2022-01-01 00:00:00")].iloc[0]['created_time']
    # 获取df中[created_time]列中2022年之前创建的PR个数
    before_2022_pr_number = df[df['created_time'] < pd.to_datetime("2022-01-01 00:00:00")].shape[0]
    # 计算 2022-01-01 01:00:00到first_pr_created_time的时间间隔，并将其before_2022_pr_number的数量分割。然后将分割后的时间，逐一替换2022年之前的PR的[created_time]的值
    time_interval = (first_pr_created_time - pd.to_datetime("2022-01-01 00:30:00")).total_seconds() / before_2022_pr_number
    row_index = 0
    for index, row in df[df['created_time'] < pd.to_datetime("2022-01-01 00:00:00")].iterrows():
        df.loc[index, 'created_time'] = pd.to_datetime("2022-01-01 00:30:00") + pd.Timedelta(
            seconds=time_interval * row_index)
        row_index += 1

    # 将df中的created_time删除秒后的内容
    df['created_time'] = df['created_time'].map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))

    # 替换其[created_time]的值为first_pr_created_time-index min 其中的index是按照时间先后排序的下标
    path_exists_or_create("./sim_data/" + repo_name)

    df.to_excel("./sim_data/" + repo_name + "/" + repo_name + "_pr.xlsx", index=False)

    original_data = pd.read_excel("./sim_data/" + repo_name + "/" + repo_name + "_pr.xlsx")
    # 新建一个dict用于存储original_data中的pr_number和created_time。
    pr_number_created_time_dict = {}
    created_time_set = set()

    change_line_set = set()
    for index, row in original_data.iterrows():
        created_time_temp = pd.to_datetime(row['created_time'], format="%Y-%m-%d %H:%M:%S")
        pr_number_created_time_dict[row['pr_number']] = created_time_temp
        created_time_set.add(created_time_temp)
        change_line_set.add(row['change_line'])
    # 将created_time_list按照时间先后排序
    created_time_list = list(created_time_set)
    created_time_list.sort(reverse=True)
    # 将change_line_list按照大小排序
    change_line_list = list(change_line_set)
    change_line_list.sort(reverse=True)
    # 存储各个时间和其下标
    time_index_dict = {}
    for index, time in enumerate(created_time_list):
        time_index_dict[time] = index / (len(created_time_list) - 1)

    # 将original_data中的created_time替换为其对应的下标
    for index, row in original_data.iterrows():
        original_data.loc[index, 'FIFO'] = time_index_dict[pr_number_created_time_dict[row['pr_number']]]
        original_data.loc[index, 'SMF'] = change_line_list.index(row['change_line']) / (len(change_line_list) - 1)

    for alg_index in rank_alg_dict.keys():
        alg_name = rank_alg_dict.get(alg_index)
        if alg_name == "bayesian_network":
            continue
        # 输出测试集结果
        test_sort_result_path = "./rank_model/" + repo_name + "/result/" + repo_name + "_result" + "_" + alg_name + ".txt"
        test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]

        priority = pd.read_excel(f'{test_sort_result_path}_sorted.xlsx')
        print(f'{test_sort_result_path}_sorted.xlsx')

        original_data[alg_name] = priority[4]
        print(original_data[alg_name])

    original_data.to_excel("./sim_data/" + repo_name + "/" + repo_name + "_pr_mP.xlsx", index=False)


if __name__ == '__main__':

    repo_list = ["tensorflow"]  # ["cdnjs"]
    for repo_name in repo_list:
        # pr_info_add_conversation.xlsx中逐行获取数据
        data = pd.read_excel("./data/" + repo_name + "/pr_info_add_conversation.xlsx")

        data['created_time'] = pd.to_datetime(data['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                              utc=True).dt.tz_localize(None)
        data['closed_time'] = pd.to_datetime(data['closed_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                             utc=True).dt.tz_localize(None)
        data['merged_time'] = pd.to_datetime(data['merged_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                             utc=True).dt.tz_localize(None)

        data['pr_number'] = data['number']
        # 是否存在关键词
        data['keyword_in_body'] = data['body'].map(has_text)
        # PR type
        data['PR_type'] = data['keyword_in_body'].map(get_PR_type)

        comment_data = pd.read_excel("./data/" + repo_name + "/PR_comment_info.xlsx")
        # 真实的评审序列
        temp_dict = get_review_single_time(comment_data, data)
        for index, row in data.iterrows():
            pr_number = row['pr_number']
            # 添加一个comment_real_time到data中
            data.loc[index, 'comment_real_time'] = temp_dict[pr_number]
            # row['comment_real_time'] = temp_dict[pr_number]
        # 评审时间，多条评审的是json串，以列表形式返回
        temp_dict = get_review_comment_time(comment_data, data)
        for index, row in data.iterrows():
            pr_number = row['pr_number']
            # comment_time
            data.loc[index, 'comment_time'] = str(temp_dict[pr_number])
            # 根据comment_time列表的数量来更新data此行comments的值，但是comment_time不光可能是列表，也可能是TimeStamp
            if temp_dict[pr_number].__class__ is list:
                data.at[index, 'comments'] = len(temp_dict[pr_number])
            elif temp_dict[pr_number].__class__ is pd._libs.tslibs.timestamps.Timestamp:
                data.at[index, 'comments'] = 1
            else:
                data.at[index, 'comments'] = 0


        df = data[['pr_number', 'created_time', 'closed_time', 'merged_time', 'PR_type', 'merged', 'comments',
                   'comment_real_time', 'comment_time']]  # review_comments_number->comments
        df['change_line'] = data['additions'] + data['deletions']
        df['change_file'] = data['changed_files']
        # 替换缺失值
        # last_day = pd.to_datetime("2022-12-31 23:59:59")
        # df['closed_time'].fillna(last_day, inplace=True)
        # df['comment_real_time'].fillna(last_day, inplace=True)
        # df['comment_time'].fillna(last_day, inplace=True)
        # print(df['comment_time'].fillna(last_day))
        # 将df中的merged由True和false改为1和0
        df['merged'] = df['merged'].map(lambda x: 1 if x is True else 0)
        # 读取laravel_svm_rank_format_year_data.txt 每行最后以#开头的数字，将其作为pr_number，并据pr_number从df中进行筛选
        year_pr_number_list = []
        year_pr_number_dict = {}
        with open("./rank_data/" + repo_name + "/" + repo_name + "_svm_rank_format_year_data.txt", "r") as f:
            lines = f.readlines()
            for line in lines:
                year_pr_number_list.append(int(line.split("#")[-1]))
                # 3 qid:1 1:1 2:0 3:1 4:0 提取qid前的内容作为value
                year_pr_number_dict[int(line.split("#")[-1])] = line.split(" ")[0]
        df = df[df['pr_number'].isin(year_pr_number_list)]
        for index, row in df.iterrows():
            pr_number = row['pr_number']
            # comment_time
            df.loc[index, 'original_rank'] = str(year_pr_number_dict[pr_number])
        path_exists_or_create("./sim_data/" + repo_name)

        df.to_excel("./sim_data/" + repo_name + "/" + repo_name + "_pr.xlsx", index=False)

        original_data = pd.read_excel("./sim_data/" + repo_name + "/" + repo_name + "_pr.xlsx")
        # 新建一个dict用于存储original_data中的pr_number和created_time。
        pr_number_created_time_dict = {}
        created_time_set = set()

        change_line_set = set()
        for index, row in original_data.iterrows():
            created_time_temp = pd.to_datetime(row['created_time'], format="%Y-%m-%d %H:%M:%S")
            pr_number_created_time_dict[row['pr_number']] = created_time_temp
            created_time_set.add(created_time_temp)
            change_line_set.add(row['change_line'])
        # 将created_time_list按照时间先后排序
        created_time_list = list(created_time_set)
        created_time_list.sort(reverse=True)
        # 将change_line_list按照大小排序
        change_line_list = list(change_line_set)
        change_line_list.sort(reverse=True)
        # 存储各个时间和其下标
        time_index_dict = {}
        for index, time in enumerate(created_time_list):
            time_index_dict[time] = index / (len(created_time_list) - 1)

        # 将original_data中的created_time替换为其对应的下标
        for index, row in original_data.iterrows():
            original_data.loc[index, 'FIFO'] = time_index_dict[pr_number_created_time_dict[row['pr_number']]]
            original_data.loc[index, 'SMF'] = change_line_list.index(row['change_line']) / (len(change_line_list) - 1)

        alg_dict = {
            0: "MART",
            # 1: "RankNet",
            2: "RankBoost",
            3: "AdaRank",
            4: "Coordinate_Ascent",
            5: "bayesian_network",
            6: "LambdaMART",
            7: "ListNet",
            8: "Random_Forests"
        }
        for alg_index in alg_dict.keys():
            alg_name = alg_dict.get(alg_index)
            if alg_name == "bayesian_network":
                continue
            # 输出测试集结果
            test_sort_result_path = "./rank_model/" + repo_name + "/result/" + repo_name + "_result" + "_" + alg_name + ".txt"
            test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]

            priority = pd.read_excel(f'{test_sort_result_path}_sorted.xlsx')
            print(f'{test_sort_result_path}_sorted.xlsx')

            original_data[alg_name] = priority[4]
            print(original_data[alg_name])

        original_data.to_excel("./sim_data/" + repo_name + "/" + repo_name + "_pr_mP.xlsx", index=False)
