import argparse
import pickle

def get_pctl(sorted_list, p):
    """
    获取分位数，p 取值在 [0,1] 之间
    """
    if not sorted_list:
        return 0.0
    idx = int(len(sorted_list) * p)
    # 若恰好等于列表长度，取最后一个元素
    if idx >= len(sorted_list):
        idx = len(sorted_list) - 1
    return sorted_list[idx]

if __name__=="__main__":
    parser = argparse.ArgumentParser(description='Analyze FCT data with noise removal.')
    parser.add_argument('-p', dest='prefix', action='store', default='fct_fat',
                        help="Specify the prefix of the fct file. Usually like fct_<topology>_<trace>")
    parser.add_argument('-s', dest='step', action='store', default='5',
                        help="granularity step (percentage chunk size)")
    parser.add_argument('-T', dest='time_limit', action='store', type=int, default=3000000000,
                        help="only consider flows that finish before T (start_time + actual_fct < T)")
    parser.add_argument('-c', dest='ccs', action='store', default='dcqcn_0 dcqcn_1',
                        help="congestion control algorithms (separated by space)")
    args = parser.parse_args()

    # 解析参数
    ccs_list = args.ccs.split()
    step = int(args.step)
    time_limit = args.time_limit

    cc_name = "UnKnown"
    if "hp95" in args.prefix:
        cc_name = "hp95"
    elif "dcqcn" in args.prefix:
        cc_name = "dcqcn"


    all_wl_alpha_res = {}
    for wl in [10,20,30,40,50,60,70,80,90]:
        # 准备分位段的起点列表: 0.00, 0.05, 0.10, ... 0.95
        fraction_list = [i/100. for i in range(0, 100, step)]
        # 每个分位段都存一行; 行的第 0 个元素是 fraction 值
        # 后面依次给不同 CC 填统计数据
        res = [[frac] for frac in fraction_list]

    
        for cc in ccs_list: 
            # 读取文件 ../simulation/output/prefix_cc.txt
            file_path = f"../simulation/output/{args.prefix}_{wl}_0_{cc}.txt"
            print("Read ", file_path)

            # 1) 读入所有流
            raw_flows = []
            with open(file_path, 'r') as f:
                for line in f:
                    line = line.strip()
                    tokens = line.split()
                    if len(tokens) < 8:
                        # 无效行
                        continue
                    # 解析各列
                    dst_port = int(tokens[3])
                    flow_size = int(tokens[4])
                    start_time = int(tokens[5])
                    actual_fct = int(tokens[6])
                    best_fct = int(tokens[7])

                    if start_time + actual_fct >= time_limit:
                        # 超过截止，不要
                        continue

                    raw_flows.append((start_time, flow_size, actual_fct, best_fct))

            if not raw_flows:
                print("No Flows, Error!")
                exit(-1)

            # 2) 按开始时间排序，并去除前 10% 和后 10%
            raw_flows.sort(key=lambda x: x[0])  # x[0] = start_time
            N = len(raw_flows)
            filtered_flows = raw_flows[int(0.1 * N):int(0.9 * N)]

            # 3) 计算 slow = actual_fct / best_fct（若 best_fct=0 或 slow<1, 则置为1）
            flows_for_analysis = []
            for (st, sz, act, best) in filtered_flows:
                slow = act / best
                if slow < 1: 
                    slow = 1
                flows_for_analysis.append((sz, slow))

            # 4) 按流大小 (sz) 排序
            flows_for_analysis.sort(key=lambda x: x[0])
            total_flows = len(flows_for_analysis)

            # 5) 对 0%-5%、5%-10%、... 做分桶统计
            for bucket_i in range(len(fraction_list)):
                frac = fraction_list[bucket_i]
                l = int(frac * total_flows)
                r = int((frac + step/100.0) * total_flows)
                if r > total_flows:
                    r = total_flows

                bucket_data = flows_for_analysis[l:r]
                flow_sizes = [x[0] for x in bucket_data]
                slows = [x[1] for x in bucket_data]

                last_flow_size = flow_sizes[-1]  # 区间内最后一个流的大小（也可改平均值）
                avg_size = sum(flow_sizes) / len(flow_sizes)
                num_flows = len(flow_sizes)
                avg_slow = sum(slows) / len(slows)

                slows_sorted = sorted(slows)
                pct50 = get_pctl(slows_sorted, 0.5)
                pct95 = get_pctl(slows_sorted, 0.95)
                pct99 = get_pctl(slows_sorted, 0.99)

                res[bucket_i].extend([
                    last_flow_size,
                    avg_size,
                    num_flows,
                    avg_slow,
                    pct50,
                    pct95,
                    pct99
                ])

        group_size = 7
        compare = []
        for i in range(len(ccs_list) - 1):
            compare.append([0,0,0,0,0,0,0,0])

        for row in res:
            fraction_val, sz, avg_sz, numf, st_avg_sl, st_p50, st_p95, st_p99 = row[0: group_size + 1]
            stats = row[1:]

            # 依次处理每个 CC 的那一组统计
            for idx in range(group_size, len(stats), group_size):
                cc_idx = (idx // group_size) - 1
                subgroup = stats[idx:idx+group_size]
                _sz, _avg_sz, _numf, avg_sl, p50, p95, p99 = subgroup

                pro_avg_sl = (st_avg_sl - avg_sl) / st_avg_sl
                pro_p50 = (st_p50 - p50) / st_p50
                pro_p95 = (st_p95 - p95) / st_p95
                pro_p99 = (st_p99 - p99) / st_p99

                if fraction_val >= 0.10 and fraction_val < 0.90:
                    compare[cc_idx][0] += pro_avg_sl
                    compare[cc_idx][1] += pro_p50
                    compare[cc_idx][2] += pro_p95
                    compare[cc_idx][3] += pro_p99
                
                compare[cc_idx][4] += pro_avg_sl
                compare[cc_idx][5] += pro_p50
                compare[cc_idx][6] += pro_p95
                compare[cc_idx][7] += pro_p99
                

        for i in range(len(compare)):
            compare[i][0:4] = [x / 16 for x in compare[i][0:4]]     # medium-flows
            compare[i][4:8] = [x / 20 for x in compare[i][4:8]]     # all-flows

        alpha = []
        for cc in ccs_list[1:]:
            if "00" in cc:
                alpha.append(round(int(cc) * 0.01, 2))
            elif "0" in cc:
                alpha.append(round(int(cc) * 0.1, 2))

        saved_res = []
        for i, row in enumerate(compare):
            print(alpha[i], end=" ")
            for c in row:
                print(f"{c*100:+06.2f}", end=" ")
            print()  # 每一行输出后换行
            saved_res.append([alpha[i]] + row)

        all_wl_alpha_res[wl] = saved_res

    with open(f'../simulation/data/best_alpha_2dim_{cc_name}_raw.pkl', 'wb') as f:
        pickle.dump(all_wl_alpha_res, f)


    # 处理成绘制三维图所需要的二维矩阵数据
    x = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]         # alpha
    y = [40, 50, 60, 70, 80, 90]  # workload
    z_avg = []      # only all-sized flows
    z_99 = []       # only all-sized flows

    for i, wl in enumerate([40,50,60,70,80,90]):
        fct_avg = []
        fct_99 = []
        for fct in all_wl_alpha_res[wl]:
            fct_avg.append(fct[1])
            fct_99.append(fct[4])
        z_avg.append(fct_avg)
        z_99.append(fct_99)

    print(z_avg)
    print(z_99)

    pic_data = {"fct_avg":{"x": x, "y": y, "z": z_avg}, "fct_avg":{"x": x, "y": y, "z": z_99}}
    with open(f'../simulation/data/best_alpha_2dim_{cc_name}_matrix.pkl', 'wb') as f:
        pickle.dump(pic_data, f)