import pandas as pd
import json
import argparse


def get_delayed_days(row):
    if row['created_time'].date() == row['closed_time'].date():
        # 当天处理完，没有积压
        return []
    # 处理特殊情况：PR一直没有关闭
    if row['closed_time'] is pd.NaT:
        date_range = pd.date_range(start=row['created_time'], end=end_time, inclusive='left', normalize=True).tolist()
    else:
        # 左闭右开，normalize=True把评审时间规范到当天零点
        date_range = pd.date_range(start=row['created_time'], end=row['closed_time'], inclusive='left', normalize=True).tolist()
    return date_range


def get_review_comments(row):
    """
    对于一条记录，从review_comments_content解析一个pr多条评审的创建时间，赋值给review_comments_time字段
    """
    review_comments_content = row['review_comments_content']
    try:
        # 有reviewcomment
        if review_comments_content != '[]':
            d = json.loads(review_comments_content, strict=False)
            df = pd.DataFrame(d)
            df['created_at'] = pd.to_datetime(
                df['created_at'], dayfirst=True).dt.tz_localize(None)
            time_list = df['created_at'].to_list()
            # 合并了（merged为1）
            if row['merged'] == 1:
                time_list.append(row['merged_time'])
            else:
                time_list.append(row['closed_time'])
            return time_list
        else:
            # 合并了（merged为1）
            if row['merged'] == 1:
                return [row['merged_time']]
            else:
                return [row['closed_time']]
    # json不完整，解析出错！
    except:
        print('err:', row['pr_number'])
        return []


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='以日期为顺序输出每日评审次数day_count_coment_+_1.csv，delayed_days_count_no_closed.csv')
    # 添加参数
    parser.add_argument('-f', type=str, nargs=1, help='文件路径')
    args = parser.parse_args()
    #file_path = args.f[0]
    #repo_list = ["tensorflow","dubbo", "Ipython", "Katello", "kuma", "moby", "opencv", "react", "scikit-learn", "terraform"]
    repo_list = ["yii2", "netbeans", "kafka", "django", "phoenix"]
    for repo_name in repo_list:
        #file_path = "F:\\devops+\\评审仿真\\dubbo数据\\dubbo.csv"
        file_path = "F:\\devops+\\评审仿真\\" + repo_name + "数据\\" + repo_name + ".csv"
        data = pd.read_csv(file_path)
        data['created_time'] = pd.to_datetime(data['created_at'], dayfirst=True, utc=True).dt.tz_localize(None)
        data['closed_time'] = pd.to_datetime(
            data['closed_at'], dayfirst=True, utc=True).dt.tz_localize(None)
        data['merged_time'] = pd.to_datetime(
            data['merged_at'], dayfirst=True, utc=True).dt.tz_localize(None)
        end_time = max(data['closed_time'].max(), data['merged_time'].max())
        # 积压天数
        delayed_days_list = data.apply(get_delayed_days, axis=1).to_list()
        time_stamps = []
        for ts in delayed_days_list:
            time_stamps += ts
        s = pd.Series(range(len(time_stamps)), index=time_stamps)
        resample_obj = s.resample('1d', label='left')
        day_count = resample_obj.count()
        # p_col = ['day', 'count']
        # day_count.columns = p_col
        day_count.to_excel(f'{file_path}_delayed_days_count_no_closed.xlsx', sheet_name='Sheet1')

        # 按周/天统计评审意见的数量
        comment_df = data.apply(get_review_comments, axis=1).to_list()
        time_stamps = []
        for ts in comment_df:
            time_stamps += ts
        s = pd.Series(range(len(time_stamps)), index=time_stamps)
        resample_obj = s.resample('1d', label='left')
        day_count = resample_obj.count()
        day_count.to_excel(f'{file_path}_day_count_coment_+_1.xlsx', sheet_name='Sheet1')
