from datetime import timedelta

import pandas as pd
import json
import argparse


def get_delayed_days(row):
    if row['created_time'].date() == row['closed_time'].date():
        # 当天处理完，没有积压
        return []
    # 处理特殊情况：PR一直没有关闭
    if row['closed_time'] is pd.NaT:
        date_range = pd.date_range(start=row['created_time'], end=end_time, inclusive='left', normalize=True).tolist()
    else:
        # 左闭右开，normalize=True把评审时间规范到当天零点
        date_range = pd.date_range(start=row['created_time'], end=row['closed_time'], inclusive='left',
                                   normalize=True).tolist()
    return date_range


def get_review_comments(row):
    """
    对于一条记录，从review_comments_content解析一个pr多条评审的创建时间，赋值给review_comments_time字段
    """
    review_comments_content = row['review_comments_content']
    try:
        # 有reviewcomment
        if review_comments_content != '[]':
            d = json.loads(review_comments_content, strict=False)
            df = pd.DataFrame(d)
            df['created_at'] = pd.to_datetime(
                df['created_at'], dayfirst=True).dt.tz_localize(None)
            time_list = df['created_at'].to_list()
            # 合并了（merged为1）
            if row['merged'] == 1:
                time_list.append(row['merged_time'])
            else:
                time_list.append(row['closed_time'])
            return time_list
        else:
            # 合并了（merged为1）
            if row['merged'] == 1:
                return [row['merged_time']]
            else:
                return [row['closed_time']]
    # json不完整，解析出错！
    except:
        print('err:', row['pr_number'])
        return []


def get_review_comment_count_byday(comment_data, pr_data, start_day, end_day):
    """
    对于一条PR记录，解析一个pr多条评审的真实时间
    """
    re_comment_count_byday = {}
    # 从start_day到end_day的每天的时间戳 初始化re_comment_count_byday
    for i in range((end_day - start_day).days + 1):
        day = start_day + timedelta(days=i)
        re_comment_count_byday[str(day)] = 0
    comment_data_dict = {}
    print("pr_features_pd" + str(len(comment_data)))
    for index, row in comment_data.iterrows():
        if row["belong_to_PR"] in comment_data_dict.keys():
            comment_data_dict[row["belong_to_PR"]].append(row["created_at"])
        else:
            comment_data_dict[row["belong_to_PR"]] = [row["created_at"]]

    for index, row in pr_data.iterrows():
        pr_number = row['number']
        if pr_number in comment_data_dict.keys():
            temp_dict = {}
            temp_dict['created_at'] = comment_data_dict[pr_number]
            df = pd.DataFrame(temp_dict)
            df['created_at'] = pd.to_datetime(df['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                              utc=True).dt.tz_localize(None)
            df.sort_values(by='created_at', inplace=True)
            # 遍历df['created_at']中的每一个时间，看其的年月日是否在re_comment_count_byday的key中
            for index in range(len(df['created_at'].values)):
                # 将value 转换为当天的零点
                value=df['created_at'].values[index]
                # 将value转换为当天的零点
                value = pd.to_datetime(value, format="%Y-%m-%d %H:%M:%S").date()
                value = str(value)+" 00:00:00"
                if value in re_comment_count_byday.keys():
                    re_comment_count_byday[value] += 1

        else:
            # 合并了（merged为1）
            if row['merged_at'] is not None:
                value = pd.to_datetime(row['merged_at'], format="%Y-%m-%dT%H:%M:%SZ").date()
                value = str(value) + " 00:00:00"
                if value in re_comment_count_byday.keys():
                    re_comment_count_byday[value] += 1
            else:
                value = pd.to_datetime(row['closed_time'], format="%Y-%m-%dT%H:%M:%SZ").date()
                value = str(value) + " 00:00:00"
                if value in re_comment_count_byday.keys():
                    re_comment_count_byday[value] += 1
    return re_comment_count_byday


if __name__ == '__main__':

    repo_list = ["laravel"]
    for repo_name in repo_list:
        data = pd.read_excel("./data/" + repo_name + "/pr_info_add_conversation.xlsx")
        data['created_time'] = pd.to_datetime(data['created_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                              utc=True).dt.tz_localize(None)
        data['closed_time'] = pd.to_datetime(data['closed_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                             utc=True).dt.tz_localize(None)
        data['merged_time'] = pd.to_datetime(data['merged_at'], format="%Y-%m-%dT%H:%M:%SZ", dayfirst=True,
                                             utc=True).dt.tz_localize(None)
        end_time = max(data['closed_time'].max(), data['merged_time'].max())
        # 积压天数
        # delayed_days_list = data.apply(get_delayed_days, axis=1).to_list()
        # time_stamps = []
        # for ts in delayed_days_list:
        #     time_stamps += ts
        # s = pd.Series(range(len(time_stamps)), index=time_stamps)
        # resample_obj = s.resample('1d', label='left')
        # day_count = resample_obj.count()
        #
        # day_count.to_excel(f'{file_path}_delayed_days_count_no_closed.xlsx', sheet_name='Sheet1')

        # 按周/天统计评审意见的数量
        # comment_df = data.apply(get_review_comments, axis=1).to_list()
        comment_data = pd.read_excel("./data/" + repo_name + "/PR_comment_info.xlsx")
        # 真实的评审序列
        # 评审时间，多条评审的是json串，以列表形式返回
        temp_dict = get_review_comment_count_byday(comment_data, data,pd.to_datetime("2022-01-01"),pd.to_datetime("2022-12-31"))
        #将temp_dict写入到excel文件中，其中temp_dict的key作为第一列，value作为第二列
        df = pd.DataFrame(temp_dict.items(), columns=['day', 'count'])
        # df导出到excel时，去除索引列
        df.to_excel("./sim_data/" + repo_name + "/day_count_comment_+_1.xlsx", sheet_name='Sheet1',index=False)
