import argparse
import pickle

def get_pctl(sorted_list, p):
    """
    获取分位数，p 取值在 [0,1] 之间
    """
    if not sorted_list:
        return 0.0
    idx = int(len(sorted_list) * p)
    # 若恰好等于列表长度，取最后一个元素
    if idx >= len(sorted_list):
        idx = len(sorted_list) - 1
    return sorted_list[idx]

if __name__=="__main__":
    parser = argparse.ArgumentParser(description='Analyze FCT data with noise removal.')
    parser.add_argument('-p', dest='prefix', action='store', default='fct_fat',
                        help="Specify the prefix of the fct file. Usually like fct_<topology>_<trace>")
    parser.add_argument('-s', dest='step', action='store', default='5',
                        help="granularity step (percentage chunk size)")
    parser.add_argument('-T', dest='time_limit', action='store', type=int, default=3000000000,
                        help="only consider flows that finish before T (start_time + actual_fct < T)")
    parser.add_argument('-c', dest='ccs', action='store', default='dcqcn_0 dcqcn_1',
                        help="congestion control algorithms (separated by space)")
    args = parser.parse_args()

    # 解析参数
    ccs_list = args.ccs.split()
    step = int(args.step)
    time_limit = args.time_limit

    cc_name = "UnKnown"
    if "hp95" in args.prefix:
        cc_name = "hp95"
    elif "dcqcn" in args.prefix:
        cc_name = "dcqcn"

    # 准备分位段的起点列表: 0.00, 0.05, 0.10, ... 0.95
    fraction_list = [i/100. for i in range(0, 100, step)]
    # 每个分位段都存一行; 行的第 0 个元素是 fraction 值
    # 后面依次给不同 CC 填统计数据
    res = [[frac] for frac in fraction_list]

    for cc in ccs_list:
        # 读取文件 ../simulation/output/prefix_cc.txt
        file_path = f"../simulation/output/{args.prefix}_0_{cc}.txt"
        print("Read ", file_path)

        # 1) 读入所有流
        raw_flows = []
        with open(file_path, 'r') as f:
            for line in f:
                line = line.strip()
                tokens = line.split()
                if len(tokens) < 8:
                    # 无效行
                    continue
                # 解析各列
                dst_port = int(tokens[3])
                flow_size = int(tokens[4])
                start_time = int(tokens[5])
                actual_fct = int(tokens[6])
                best_fct = int(tokens[7])

                if start_time + actual_fct >= time_limit:
                    # 超过截止，不要
                    continue

                raw_flows.append((start_time, flow_size, actual_fct, best_fct))

        if not raw_flows:
            print("No Flows, Error!")
            exit(-1)

        # 2) 按开始时间排序，并去除前 10% 和后 10%
        raw_flows.sort(key=lambda x: x[0])  # x[0] = start_time
        N = len(raw_flows)
        filtered_flows = raw_flows[int(0.1 * N):int(0.9 * N)]

        # 3) 计算 slow = actual_fct / best_fct（若 best_fct=0 或 slow<1, 则置为1）
        flows_for_analysis = []
        for (st, sz, act, best) in filtered_flows:
            slow = act / best
            if slow < 1: 
                slow = 1
            flows_for_analysis.append((sz, slow))

        # 4) 按流大小 (sz) 排序
        flows_for_analysis.sort(key=lambda x: x[0])
        total_flows = len(flows_for_analysis)

        # 5) 对 0%-5%、5%-10%、... 做分桶统计
        for bucket_i in range(len(fraction_list)):
            frac = fraction_list[bucket_i]
            l = int(frac * total_flows)
            r = int((frac + step/100.0) * total_flows)
            if r > total_flows:
                r = total_flows

            bucket_data = flows_for_analysis[l:r]
            flow_sizes = [x[0] for x in bucket_data]
            slows = [x[1] for x in bucket_data]

            last_flow_size = flow_sizes[-1]  # 区间内最后一个流的大小（也可改平均值）
            avg_size = sum(flow_sizes) / len(flow_sizes)
            num_flows = len(flow_sizes)
            avg_slow = sum(slows) / len(slows)

            slows_sorted = sorted(slows)
            pct50 = get_pctl(slows_sorted, 0.5)
            pct95 = get_pctl(slows_sorted, 0.95)
            pct99 = get_pctl(slows_sorted, 0.99)

            res[bucket_i].extend([
                last_flow_size,
                avg_size,
                num_flows,
                avg_slow,
                pct50,
                pct95,
                pct99
            ])# 拼接的是不同的CC的数据

    group_size = 7
    # for row in res:
    #     fraction_val, sz, avg_sz, numf, st_avg_sl, st_p50, st_p95, st_p99 = row[0: group_size + 1]
    #     stats = row[1:]

    #     # 先打印 fraction 值：右对齐, 总长度 7, 小数点后 3 位
    #     line_str = f"{fraction_val:>.3f}{sz:>12d}{avg_sz:>12.0f}{numf:>7d}\t"

    #     # 依次处理每个 CC 的那一组统计
    #     for idx in range(group_size, len(stats), group_size):
    #         subgroup = stats[idx:idx+group_size]
    #         _sz, _avg_sz, _numf, avg_sl, p50, p95, p99 = subgroup
    #         if _sz != sz:
    #             print("Data not Consist!")
    #             exit(0)

    #         pro_avg_sl = (st_avg_sl - avg_sl) / st_avg_sl
    #         pro_p50 = (st_p50 - p50) / st_p50
    #         pro_p95 = (st_p95 - p95) / st_p95
    #         pro_p99 = (st_p99 - p99) / st_p99

    #         # 将这 7 个字段拼接到行字符串里
    #         line_str += (
    #             f"{pro_avg_sl:>2.3f} "    # 平均slow
    #             f"{pro_p50:>2.3f} "       # 50分位
    #             f"{pro_p95:>2.3f} "       # 95分位
    #             f"{pro_p99:>2.3f} \t"     # 99分位
    #         )

        # print(line_str)

    compare = []
    for i in range(len(ccs_list) - 1):
        compare.append([0,0,0,0,0,0,0,0,0,0,0,0])

    for row in res:
        fraction_val, sz, avg_sz, numf, st_avg_sl, st_p50, st_p95, st_p99 = row[0: group_size + 1]  # 当时使用cclist[0]=00作为DCQCN协议基线数据
        stats = row[1:]

        # 依次处理每个 CC 的那一组统计
        for idx in range(group_size, len(stats), group_size):
            cc_idx = (idx // group_size) - 1
            subgroup = stats[idx:idx+group_size]
            _sz, _avg_sz, _numf, avg_sl, p50, p95, p99 = subgroup

            pro_avg_sl = (st_avg_sl - avg_sl) / st_avg_sl
            pro_p50 = (st_p50 - p50) / st_p50
            pro_p95 = (st_p95 - p95) / st_p95
            pro_p99 = (st_p99 - p99) / st_p99

            if fraction_val >= 0.10 and fraction_val < 0.90:
                compare[cc_idx][0] += pro_avg_sl
                compare[cc_idx][1] += pro_p50
                compare[cc_idx][2] += pro_p95
                compare[cc_idx][3] += pro_p99

            if fraction_val == 0.95:
                compare[cc_idx][4] = pro_avg_sl
                compare[cc_idx][5] = pro_p50
                compare[cc_idx][6] = pro_p95
                compare[cc_idx][7] = pro_p99  
            
            compare[cc_idx][8] += pro_avg_sl
            compare[cc_idx][9] += pro_p50
            compare[cc_idx][10] += pro_p95
            compare[cc_idx][11] += pro_p99
            

    for i in range(len(compare)):
        compare[i][0:4] = [x / 16 for x in compare[i][0:4]]
        compare[i][8:12] = [x / 20 for x in compare[i][8:12]]

    alpha = []
    for cc in ccs_list[1:]:
        if "00" in cc:
            alpha.append(round(int(cc) * 0.01, 2))
        elif "0" in cc:
            alpha.append(round(int(cc) * 0.1, 2))

    saved_res = []
    for i, row in enumerate(compare):
        print(alpha[i], end=" ")
        for c in row:
            print(f"{c*100:+06.2f}", end=" ")
        print()  # 每一行输出后换行
        saved_res.append([alpha[i]] + row)

    with open(f'../simulation/data/alpha_{cc_name}.pkl', 'wb') as f:
        pickle.dump(saved_res, f)