import pandas as pd
import numpy as np
import json
import argparse


def has_text(body):
    """
        输入参数为字符串body，计算pr的body中是否存在关键词bug、document、feature、improve、refactor、test、@
        “Its JavaScript, google”
        输出为list，list[0]-list[6]分别对应变量has_bug、has_document、has_feature、has_improve、has_refactor、has_Test_Code、at_mention
        变量值为0代表body中不存在该关键词，1代表存在
    """
    has_bug = 0
    has_document = 0
    has_feature = 0
    has_improve = 0
    has_refactor = 0
    Has_Test_Code = 0
    at_mention = 0
    if type(body) == float:
        print("body为空")
    else:
        body = body.lower()
        if "bug" in body:
            has_bug = 1
        if "document" in body:
            has_document = 1
        if "feature" in body:
            has_feature = 1
        if "improve" in body:
            has_improve = 1
        if "refactor" in body:
            has_refactor = 1
        if "test" in body:
            Has_Test_Code = 1
        if "@" in body:
            at_mention = 1
    list = []
    list.append(has_bug)
    list.append(has_document)
    list.append(has_feature)
    list.append(has_improve)
    list.append(has_refactor)
    list.append(Has_Test_Code)
    list.append(at_mention)
    return list


def get_PR_type(hastext):
    # 不考虑at_mention
    #1BUG，2功能，3文档配置，4重构，5多类别
    hastext = hastext[:-1]
    if sum(hastext) == 0:
        return 0
    if sum(hastext) >= 3:
        return 5
    if sum(hastext) == 1:
        if hastext[0] == 1 or hastext[5] == 1:
            return 1
        if hastext[2] == 1:
            return 2
        if hastext[1] == 1:
            return 3
        if hastext[4] == 1 or hastext[3] == 1:
            return 4
    if sum(hastext) == 2:
        if hastext[0] == 1 and hastext[5] == 1:
            return 1
        if hastext[4] == 1 and hastext[3] == 1:
            return 4
        return 5


def get_review_single_time(row):
    """
    对于一条PR记录，解析一个pr多条评审的真实时间
    """
    review_comments_content = row['review_comments_content']
    try:
        # 有reviewcomment
        if review_comments_content != '[]':
            d = json.loads(review_comments_content, strict=False)
            df = pd.DataFrame(d)
            df['created_at'] = pd.to_datetime(
                df['created_at'], dayfirst=True, utc=True).dt.tz_localize(None)
            df.sort_values(by='created_at', inplace=True)
            # 第一次评论的时间
            return df['created_at'][0]
        else:
            # 合并了（merged为1）
            if row['merged'] == 1:
                return row['merged_time']
            else:
                return row['closed_time']
    # json不完整，解析出错！
    except:
        print('err:',row['pr_number'])
        return []


def get_review_comment_time(row):
    """
    对于一条PR记录，解析一个pr多条评审的真实时间
    """
    review_comments_content = row['review_comments_content']
    try:
        # 有reviewcomment
        if review_comments_content != '[]':
            d = json.loads(review_comments_content, strict=False)
            df = pd.DataFrame(d)
            df['created_at'] = pd.to_datetime(
                df['created_at'], dayfirst=True, utc=True).dt.tz_localize(None)
            df.sort_values(by='created_at', inplace=True)
            # 时间json串
            l = [{'created_at': t} for t in df['created_at'].map(str).values.tolist()]
            return l
        else:
            # 合并了（merged为1）
            if row['merged'] == 1:
                return row['merged_time']
            else:
                return row['closed_time']
    # json不完整，解析出错！
    except:
        print('err:',row['pr_number'])
        return []


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='以pr_number为顺序输出created_time，closed_time，PR_type，Real_time，comment_time')
    # 添加参数
    parser.add_argument('-f', type=str, nargs=1, help='文件路径')
    args = parser.parse_args()
    #file_path = args.f[0]
    #repo_list = ["tensorflow", "dubbo", "Ipython", "Katello", "kuma", "moby", "opencv", "react", "scikit-learn", "terraform"]
    repo_list = ["yii2", "netbeans", "kafka", "django", "phoenix"]
    for repo_name in repo_list:
        file_path = "F:\\devops+\\评审仿真\\" + repo_name+"数据\\" + repo_name+ ".csv"
        data = pd.read_csv(file_path)
        data['created_time'] = pd.to_datetime(data['created_at'], dayfirst=True, utc=True).dt.tz_localize(None)
        data['closed_time'] = pd.to_datetime(
            data['closed_at'], dayfirst=True, utc=True).dt.tz_localize(None)
        data['merged_time'] = pd.to_datetime(
            data['merged_at'], dayfirst=True, utc=True).dt.tz_localize(None)

        # 是否存在关键词
        data['keyword_in_body'] = data['body'].map(has_text)
        # PR type
        data['PR_type'] = data['keyword_in_body'].map(get_PR_type)

        # 真实的评审序列
        data['comment_real_time'] = data.apply(get_review_single_time, axis=1)
        # 评审时间，多条评审的是json串，以列表形式返回
        data['comment_time'] = data.apply(get_review_comment_time, axis=1)
        # data['created_time'] = data['created_time'].dt.tz_localize(None)
        # data['closed_time'] = data['closed_time'].dt.tz_localize(None)
        # data['merged_time'] = data['merged_time'].dt.tz_localize(None)
        # data['comment_real_time'] = data['comment_real_time'].dt.tz_localize(None)
        # data['comment_time'] = data['comment_time'].dt.tz_localize(None)
        df = data[['pr_number', 'created_time', 'closed_time', 'PR_type', 'merged', 'review_comments_number', 'comment_real_time', 'comment_time']]
        df['change_line'] = data['total_add_line'] + data['total_delete_line']
        df['change_file'] = data['changed_file_num']
        #替换缺失值
        last_day = pd.to_datetime("2021-6-20 00:00:00")
        df['closed_time'].fillna(last_day, inplace=True)
        df['comment_real_time'].fillna(last_day, inplace=True)
        df['comment_time'].fillna(last_day, inplace=True)
        print(df['comment_time'].fillna(last_day))

        postfix = '.' + file_path.split('.')[-1]
        file_path = file_path[:file_path.find(postfix)]
        df.to_excel(f'{file_path}_pr.xlsx', index=False)
        # test_sort_result_path = "F:\\devops+\\评审仿真\\" + repo_name + "数据\\" + repo_name + "_result_sorted.xlsx"

        yuanshi = pd.read_excel(f'{file_path}_pr.xlsx')
        alg_dict = {
            0: "MART",
            2: "RankBoost",
            3: "AdaRank",
            4: "Coordinate_Ascent",
            5: "bayesian_network",
            6: "LambdaMART",
            7: "ListNet",
            8: "Random_Forests"
        }
        for alg_index in alg_dict.keys():
            alg_name = alg_dict.get(alg_index)
            test_sort_result_path = "F:\\devops+\\评审仿真\\" + repo_name + "数据\\" + repo_name + "_result.txt"
            test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
            test_sort_result_path = test_sort_result_path + "_" + alg_name
            priority = pd.read_excel(f'{test_sort_result_path}_sorted.xlsx')
            print(f'{test_sort_result_path}_sorted.xlsx')
            if alg_name == "bayesian_network":
                yuanshi[alg_name] = priority["priorities_number"]
            else:
                yuanshi[alg_name] = priority[4]
            print(yuanshi[alg_name])

        yuanshi.to_excel(f'{file_path}_pr_mP.xlsx', index=False)

    