from datetime import timedelta

import pandas as pd
import json
import argparse

from utils.path_exist import path_exists_or_create


def get_PR_count_byday(pr_data, start_day, end_day):
    """
    对于一条PR记录，解析一个pr多条评审的真实时间
    """
    re_count_byday = {}
    accumulated = "accumulated"
    open = "open"
    submitted = "submitted"
    merged = "merged"
    abandoned = "abandoned"
    # 从start_day到end_day的每天的时间戳 初始化re_comment_count_byday
    for i in range((end_day - start_day).days + 1):
        day = start_day + timedelta(days=i)
        re_count_byday[str(day)] = {}
        re_count_byday[str(day)][open] = 0
        re_count_byday[str(day)][accumulated] = 0
        re_count_byday[str(day)][submitted] = 0
        re_count_byday[str(day)][merged] = 0
        re_count_byday[str(day)][abandoned] = 0

    for index, row in pr_data.iterrows():
        pr_number = row['number']
        created_time = str(row['created_time'].date()) + " 00:00:00"
        closed_time = str(row['closed_time'].date()) + " 00:00:00"
        merged_time = str(row['merged_time'].date()) + " 00:00:00"
        pr_start_day = row['created_time'].date()
        if row['merged_time'].date() is not None and pd.isna(row['merged_time'].date()) is False:
            pr_end_day = row['merged_time'].date()
            if merged_time in re_count_byday.keys():
                # 如果被merged，此时，应该就是merged+1
                re_count_byday[merged_time][merged] = re_count_byday[merged_time][merged] + 1
        elif row['closed_time'].date() is not None and pd.isna(row['closed_time'].date()) is False:
            pr_end_day = row['closed_time'].date()
            # 如果未被merged，但是被关闭了，此时，应该就是abandoned
            if closed_time in re_count_byday.keys():
                re_count_byday[closed_time][abandoned] = re_count_byday[closed_time][abandoned] + 1
        else:
            pr_end_day = end_day.date()
        # 每天创建的PR数加1
        if created_time in re_count_byday.keys():
            re_count_byday[created_time][accumulated] = re_count_byday[created_time][accumulated] + 1
            re_count_byday[created_time][submitted] = re_count_byday[created_time][submitted] + 1
        # 从创建到关闭的每天的PR数加1
        for i in range((pr_end_day - pr_start_day).days + 1):
            day = pr_start_day + timedelta(days=i)
            day = str(day) + " 00:00:00"
            if day in re_count_byday.keys():
                re_count_byday[day][open] = re_count_byday[day][open] + 1
    accumulated_count = 0
    submitted_count = 0
    merged_count = 0
    abandoned_count = 0
    open_count = 0
    for i in re_count_byday.keys():
        abandoned_count = abandoned_count + re_count_byday[i][abandoned]
        merged_count = merged_count + re_count_byday[i][merged]
        submitted_count = submitted_count + re_count_byday[i][submitted]
        accumulated_count = accumulated_count + re_count_byday[i][accumulated]
        open_count = open_count + re_count_byday[i][open]

    day_count = (end_day - start_day).days + 1
    return accumulated_count, open_count / day_count, submitted_count / day_count, merged_count / day_count, abandoned_count / day_count


if __name__ == '__main__':

    repo_list = ["tensorflow"]  # ["cdnjs"]#"laravel","angular.js"
    for repo_name in repo_list:
        data = pd.read_excel("./data/" + repo_name + "/pr_info_add_conversation.xlsx")
        data['created_time'] = pd.to_datetime(data['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                              utc=True).dt.tz_localize(None)
        data['closed_time'] = pd.to_datetime(data['closed_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                             utc=True).dt.tz_localize(None)
        data['merged_time'] = pd.to_datetime(data['merged_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                             utc=True).dt.tz_localize(None)
        end_time = max(data['closed_time'].max(), data['merged_time'].max())

        # 真实的评审序列
        # 评审时间，多条评审的是json串，以列表形式返回
        accumulated_count, open_count_byday, submitted_count_byday, merged_count_byday, abandoned_count_byday = get_PR_count_byday(
            data,
            pd.to_datetime(
                "2015-11-09"),
            pd.to_datetime(
                "2022-12-31"))

        author_data = pd.read_excel("./data/" + repo_name + "/author_features.xlsx")
        author_set = set()
        for index, row in author_data.iterrows():
            name = row['name']
            author_set.add(name)

        comment_data = pd.read_excel("./data/" + repo_name + "/PR_comment_info.xlsx")
        reviewer_set = set()
        for index, row in comment_data.iterrows():
            reviewer = row['reviewer']
            reviewer_set.add(reviewer)

        print("accumulated_count=", accumulated_count)
        print("open_count_byday=", open_count_byday)
        print("submitted_count_byday=", submitted_count_byday)
        print("merged_count_byday=", merged_count_byday)
        print("abandoned_count_byday=", abandoned_count_byday)
        print("author_set len=", len(author_set))
        print("reviewer_set len=", len(reviewer_set))
        # 将下面的数据写到json文件中
        result = {}
        result['accumulated_count'] = accumulated_count
        result['open_count_byday'] = open_count_byday
        result['submitted_count_byday'] = submitted_count_byday
        result['merged_count_byday'] = merged_count_byday
        result['abandoned_count_byday'] = abandoned_count_byday
        result['author_set'] = len(author_set)
        result['reviewer_set'] = len(reviewer_set)
        path_exists_or_create("./rank_model/" + repo_name + "/result/static")
        with open("./rank_model/" + repo_name + "/result/static/day_count_static.json", "w") as f:
            json.dump(result, f)

