'''
获取到所有的PR信息，找出每一个PR创建的时间，以及在该PR创建时间那个时刻仍处于open状态的pr，
然后将这个时刻还处于open状态的pr作为输入X。
FIFO算法，根据pr创建的时间先创建，放在最前面，这样对上述pr列表进行排序。FIFOY
真实排序：在该时刻之后，该X中，被相应，或者被关闭或者被合并等发生改变的时间，根据该时间顺序进行排序，进而获取真实排序TRUEY
将FIFOY，与TRUEY进行比较，通过NDGC进行比较，判断排序效果
'''
import math
import re
from datetime import datetime
from baseline.true_order import get_true_order_dict
from utils.path_exist import path_exists_or_create
import pandas as pd
import tqdm
from utils.time_utils import time_reverse


# all_filename保存的是全集数据，train_filename,保存的是训练数据，test_filename保存的是测试数据，data为要写入数据列表.
def text_save(all_filename, train_filename, test_filename, data, max_train):  # filename为写入CSV文件的路径，data为要写入数据列表.
    all_file = open(all_filename, 'w+')
    train_file = open(train_filename, 'w+')
    test_file = open(test_filename, 'w+')
    data_len = len(data)
    for i in range(data_len):
        s = str(data[i]).replace('[', '').replace(']', '')  # 去除[],这两行按数据不同，可以选择
        s = s.replace("'", '').replace(',', '') + '\n'  # 去除单引号，逗号，每行末尾追加换行符
        if i < max_train:  # <=train_len
            train_file.write(s)
        else:
            test_file.write(s)
        all_file.write(s)
    all_file.close()
    train_file.close()
    test_file.close()
    print("保存文件成功")


def get_data_by_repo_name(repo_name, year_data_bool, max_created_at_year_str):
    # 从PR_features.xlsx中逐行获取数据
    pr_features_pd = pd.read_excel("./data/" + repo_name + "/PR_features.xlsx")
    pr_features = {}
    print("pr_features_pd" + str(len(pr_features_pd)))
    for index, row in tqdm.tqdm(pr_features_pd.iterrows(), "读取" + repo_name + "仓库的pr_features.xlsx"):
        # 第一列是pr_number
        pr_features[row[0]] = dict(row)
    print("pr_features" + str(len(pr_features)))

    # 从author_feature.xlsx中逐行获取数据
    author_features_pd = pd.read_excel("./data/" + repo_name + "/author_features.xlsx")
    author_features = {}
    print("author_features_pd" + str(len(author_features_pd)))
    for index, row in tqdm.tqdm(author_features_pd.iterrows(), "读取" + repo_name + "仓库的author_features.xlsx"):
        # 第一列是pr_number
        author_features[row[0]] = dict(row)
    print("author_features" + str(len(author_features)))

    # project_feature.xlsx中逐行获取数据
    project_feature_pd = pd.read_excel("./data/" + repo_name + "/project_features.xlsx")
    project_feature = {}
    print("project_feature_pd" + str(len(project_feature_pd)))
    for index, row in tqdm.tqdm(project_feature_pd.iterrows(), "读取" + repo_name + "仓库的project_feature.xlsx"):
        # 第一列是pr_number
        project_feature[row[0]] = dict(row)
    print("project_feature" + str(len(project_feature)))

    # reviewer_feature.xlsx中逐行获取数据
    reviewer_feature_pd = pd.read_excel("./data/" + repo_name + "/reviewer_features.xlsx")
    reviewer_feature = {}
    print("reviewer_feature_pd" + str(len(reviewer_feature_pd)))
    for index, row in tqdm.tqdm(reviewer_feature_pd.iterrows(), "读取" + repo_name + "仓库的reviewer_feature.xlsx"):
        # 第一列是pr_number
        reviewer_feature[row[0]] = dict(row)
    print("reviewer_feature" + str(len(reviewer_feature)))

    feature_dict = {}
    for pr_number in tqdm.tqdm(pr_features.keys(), desc="拼接四个特征:"):
        # 拼接pr特征
        feature_dict[pr_number] = []
        for row_key in pr_features.get(pr_number).keys():
            if row_key == "number" or row_key.__contains__("embedding"):
                continue
            # 如果rowkey包含关键词embedding，那么就是需要进行embedding的数据
            # elif row_key.__contains__("embedding") :
            #     embed_list = parse_embedding_data(pr_features.get(pr_number).get(row_key))
            #     for key in embed_list:
            #         feature_dict.get(pr_number).append(key)
            else:
                # 如果是布尔值，转换成0，1
                if isinstance(pr_features.get(pr_number).get(row_key), bool):
                    if pr_features.get(pr_number).get(row_key):
                        feature_dict.get(pr_number).append(1)
                    else:
                        feature_dict.get(pr_number).append(0)
                else:
                    feature_dict.get(pr_number).append(pr_features.get(pr_number).get(row_key))

        # 拼接author特征
        for row_key in author_features.get(pr_number).keys():
            if row_key == "number" or row_key == "name" or row_key.__contains__("embedding"):
                continue
            # elif row_key.__contains__("embedding"):
            #     embed_list = parse_embedding_data(author_features.get(pr_number).get(row_key))
            #     for key in embed_list:
            #         feature_dict.get(pr_number).append(key)
            else:
                # 如果是布尔值，转换成0，1
                if isinstance(author_features.get(pr_number).get(row_key), bool):
                    if author_features.get(pr_number).get(row_key):
                        feature_dict.get(pr_number).append(1)
                    else:
                        feature_dict.get(pr_number).append(0)
                elif isinstance(author_features.get(pr_number).get(row_key), str):
                    feature_dict.get(pr_number).append(0)
                else:
                    feature_dict.get(pr_number).append(author_features.get(pr_number).get(row_key))

        # 拼接project特征
        for row_key in project_feature.get(pr_number).keys():
            if row_key == "number" or row_key.__contains__("embedding"):
                continue
            # elif row_key.__contains__("embedding"):
            #     embed_list = parse_embedding_data(project_feature.get(pr_number).get(row_key))
            #     for key in embed_list:
            #         feature_dict.get(pr_number).append(key)
            else:
                feature_dict.get(pr_number).append(project_feature.get(pr_number).get(row_key))

        # 拼接reviewer特征
        for row_key in reviewer_feature.get(pr_number).keys():
            if row_key == "number" or row_key == "name" or row_key.__contains__("embedding"):
                continue
            # elif row_key.__contains__("embedding"):
            #     embed_list = parse_embedding_data(reviewer_feature.get(pr_number).get(row_key))
            #     for key in embed_list:
            #         feature_dict.get(pr_number).append(key)
            else:
                if isinstance(reviewer_feature.get(pr_number).get(row_key), bool):
                    if reviewer_feature.get(pr_number).get(row_key):
                        feature_dict.get(pr_number).append(1)
                    else:
                        feature_dict.get(pr_number).append(0)
                elif isinstance(reviewer_feature.get(pr_number).get(row_key), str):
                    feature_dict.get(pr_number).append(0)
                else:
                    feature_dict.get(pr_number).append(reviewer_feature.get(pr_number).get(row_key))

    # pr_info_add_conversation.xlsx中逐行获取数据
    pr_info_add_conversation_pd = pd.read_excel("./data/" + repo_name + "/pr_info_add_conversation.xlsx")
    pr_info_add_conversation = {}
    print("pr_info_add_conversation_pd" + str(len(pr_info_add_conversation_pd)))
    for index, row in tqdm.tqdm(pr_info_add_conversation_pd.iterrows(),
                                "读取" + repo_name + "仓库的pr_info_add_conversation.xlsx"):
        # 第一列是pr_number
        pr_info_add_conversation[row[0]] = dict(row)
    print("pr_info_add_conversation" + str(len(pr_info_add_conversation)))

    # # 获取每个PR的响应时间 merged_at-created_at 若没有选用closed_at-created_at

    response_time_dict = {}
    response_time_list = []
    conversation_num_dict = {}
    conversation_num_list = []
    pr_time_dict = {}
    max_created_at_year = time_reverse(max_created_at_year_str)
    for pr_number in pr_info_add_conversation.keys():
        conversation_num_dict[pr_number] = pr_info_add_conversation.get(pr_number).get("conversation")
        conversation_num_list.append(pr_info_add_conversation.get(pr_number).get("conversation"))

        created_at = time_reverse(pr_info_add_conversation.get(pr_number).get("created_at"))
        updated_at = time_reverse(pr_info_add_conversation.get(pr_number).get("updated_at"))
        merged_at = time_reverse(pr_info_add_conversation.get(pr_number).get("merged_at"))
        # max_created_at = max(max_created_at, created_at)
        pr_time_dict[pr_number] = {}
        pr_time_dict[pr_number]["created_at"] = pr_info_add_conversation.get(pr_number).get("created_at")
        pr_time_dict[pr_number]["updated_at"] = pr_info_add_conversation.get(pr_number).get("updated_at")
        pr_time_dict[pr_number]["merged_at"] = pr_info_add_conversation.get(pr_number).get("merged_at")
        pr_time_dict[pr_number]["closed_at"] = pr_info_add_conversation.get(pr_number).get("closed_at")
        if merged_at is None:
            closed_at = pr_info_add_conversation.get(pr_number).get("closed_at")
            if closed_at is None or (not isinstance(closed_at, str)):
                merged_at = time_reverse("2023-05-31T23:59:59Z")
            else:
                merged_at = time_reverse(closed_at)
        response_time_dict[pr_number] = (merged_at - created_at).total_seconds() / 60
        response_time_list.append((merged_at - created_at).total_seconds() / 60)
    # 将pr_dict写入到文件中
    with open("./data/" + repo_name + "/pr_time_dict.txt", "w") as f:
        f.write(str(pr_time_dict))
    # 时间越短优先级高
    response_time_label = get_true_order_dict(response_time_list, response_time_dict)
    # 这里计算每个prNumber对应的真实速度编号 讨论越少越好
    conversation_label_dict = get_true_order_dict(conversation_num_list, conversation_num_dict)
    label_dict = {}
    for pr_number in pr_info_add_conversation.keys():
        label_dict[pr_number] = 0
        if pr_info_add_conversation.get(pr_number).get("merged"):
            label_dict[pr_number] = label_dict[pr_number] + 4
        label_dict[pr_number] = (label_dict[pr_number] + conversation_label_dict[pr_number]
                                 + response_time_label[pr_number])
    # feature_dict中的每个key对应的数据是一个list，多个key对应的list的每一列，进行归一化处理
    feature_pd = pd.DataFrame(feature_dict).T
    # 过滤其中字符串 or math.isnan(x) or math.isinf(x)的数据，将其替换为0
    feature_pd = feature_pd.replace([math.inf, -math.inf, math.nan], 0)
    # 逐个判断feature_pd中的每一个数据是否为数字，如果不是数字，那么就将其替换为0
    for col in feature_pd.columns:
        feature_pd[col] = feature_pd[col].apply(lambda x: 0 if isinstance(x, str) else x)

    # 将feature_pd的每一列数据进行归一化,并且将归一化后的数据重新写入feature_dict中
    for col in feature_pd.columns:
        feature_pd[col] = (feature_pd[col] - feature_pd[col].min()) / (feature_pd[col].max() - feature_pd[col].min())

    row_data = []
    # 生成数据
    for key_item in label_dict.keys():
        tmp = []
        tmp.append(label_dict.get(key_item))
        tmp.append("qid:1")
        count = 1
        # 获取feature_pd对应key_item行名的数据
        for x in feature_pd.loc[key_item]:
            if isinstance(x, str) or math.isnan(x) or math.isinf(x):
                tmp.append(count.__str__() + ":0")
            else:
                tmp.append(count.__str__() + ":" + str(x))
            count += 1

        tmp.append("# " + str(key_item))
        row_data.append(tmp)
    year_data = []
    before_year_data = []
    if year_data_bool:
        # 将max_created_at往前推一年
        min_created_at_years = max_created_at_year - pd.Timedelta(days=365)
        print("============year_data  max_create_date" + str(
            max_created_at_year) + "===============min_created_at_years:" + str(min_created_at_years))
        year_pr_number = []
        for pr_number in pr_time_dict.keys():
            created_at_tmp = pr_time_dict[pr_number]["created_at"]
            created_at_tmp = time_reverse(created_at_tmp)
            merged_at_tmp = time_reverse(pr_time_dict[pr_number]["merged_at"])
            closed_at_tmp = time_reverse(pr_time_dict[pr_number]["closed_at"])
            if merged_at_tmp is not None and merged_at_tmp > min_created_at_years and merged_at_tmp < max_created_at_year:
                year_pr_number.append(pr_number)
            elif closed_at_tmp is not None and closed_at_tmp > min_created_at_years and closed_at_tmp < max_created_at_year and created_at_tmp > min_created_at_years:
                year_pr_number.append(pr_number)
            elif created_at_tmp > min_created_at_years and created_at_tmp < max_created_at_year:
                year_pr_number.append(pr_number)
        # 生成数据
        for key_item in year_pr_number:
            tmp = []
            tmp.append(label_dict.get(key_item))
            tmp.append("qid:1")
            count = 1
            for x in feature_pd.loc[key_item]:
                if isinstance(x, str) or math.isnan(x) or math.isinf(x):
                    tmp.append(count.__str__() + ":0")
                else:
                    tmp.append(count.__str__() + ":" + str(x))
                count += 1
            tmp.append("# " + str(key_item))
            year_data.append(tmp)
        print("year_pr_number: " + str(len(year_pr_number)))

        # 新增逻辑，项目成立之初到2022年1月1日之间创建的PR作为测试集
        before_year_pr_number = []
        for pr_number in pr_time_dict.keys():
            created_at_tmp = pr_time_dict[pr_number]["created_at"]
            created_at_tmp = time_reverse(created_at_tmp)
            if created_at_tmp < min_created_at_years:
                before_year_pr_number.append(pr_number)
        # 生成数据
        for key_item in before_year_pr_number:
            tmp = []
            tmp.append(label_dict.get(key_item))
            tmp.append("qid:1")
            count = 1
            for x in feature_pd.loc[key_item]:
                if isinstance(x, str) or math.isnan(x) or math.isinf(x):
                    tmp.append(count.__str__() + ":0")
                else:
                    tmp.append(count.__str__() + ":" + str(x))
                count += 1
            tmp.append("# " + str(key_item))
            before_year_data.append(tmp)
        print("after_year_pr_number: " + str(len(before_year_pr_number)))
    # return row_data, year_data
    return row_data, year_data, before_year_data


def parse_embedding_data(data):
    if data == 0:
        return [0] * 384
    # 使用正则表达式提取数字
    numbers = re.findall(r"-?\d+\.\d+", data)
    # 将字符串数字转换为浮点数
    numbers = [float(num) for num in numbers]
    return numbers


def pre_data(repo_name, year_data_bool, max_created_at_year_str):
    file_path = "./rank_data/" + repo_name + "/"
    path_exists_or_create(file_path)

    all_filename = file_path + repo_name + "_svm_rank_format_data.txt"
    train_filename = file_path + repo_name + "_svm_rank_format_train_data.txt"
    test_filename = file_path + repo_name + "_svm_rank_format_test_data.txt"
    year_filename = file_path + repo_name + "_svm_rank_format_year_data.txt"
    year_test_filename = file_path + repo_name + "_svm_rank_format_year_test_data.txt"

    row_data, year_data, before_year_data = get_data_by_repo_name(repo_name, year_data_bool, max_created_at_year_str)

    # text_save(all_filename, train_filename, test_filename, row_data, int(len(row_data) * 0.8))
    text_save(all_filename, train_filename, test_filename, before_year_data, len(before_year_data))
    text_save(all_filename, year_test_filename, test_filename, year_data, 0)
    text_save(all_filename, year_filename, year_test_filename, year_data, len(year_data))



# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    repo_list = ["rust"]  # django"laravel","angular.js"
    year_data_bool = True
    for repo_name in repo_list:
        # repo_name = "tensorflow"  # "storm"#"scikit-learn"#"moby"#"cocos2d-x"#"netbeans"#"yii2"#"dubbo"#"react"#"tensorflow"#"opencv"#"phoenix"#"helix"#"terraform"#"Ipython"# "kuma"#"incubator-heron"#"kuma"#"incubator-heron"#"Katello"#"zipkin"#"yii2"
        file_path = "./rank_data/" + repo_name + "/"
        path_exists_or_create(file_path)

        all_filename = file_path + repo_name + "_svm_rank_format_data.txt"
        train_filename = file_path + repo_name + "_svm_rank_format_train_data.txt"
        test_filename = file_path + repo_name + "_svm_rank_format_test_data.txt"
        year_filename = file_path + repo_name + "_svm_rank_format_year_data.txt"
        year_test_filename = file_path + repo_name + "_svm_rank_format_year_test_data.txt"
        max_train_num = 0
        row_data, year_data = get_data_by_repo_name(repo_name, year_data_bool, "2022-12-31T23:59:59Z")

        text_save(all_filename, train_filename, test_filename, row_data, int(len(row_data) * 0.8))
        text_save(all_filename, year_filename, year_test_filename, year_data, len(year_data))
