import argparse
import pickle

def get_pctl(sorted_list, p):
    """
    获取分位数，p 取值在 [0,1] 之间
    """
    if not sorted_list:
        return 0.0
    idx = int(len(sorted_list) * p)
    # 若恰好等于列表长度，取最后一个元素
    if idx >= len(sorted_list):
        idx = len(sorted_list) - 1
    return sorted_list[idx]

if __name__=="__main__":
    parser = argparse.ArgumentParser(description='Analyze FCT data with noise removal.')
    parser.add_argument('-p', dest='prefix', action='store', default='fct_fat',
                        help="Specify the prefix of the fct file. Usually like fct_<topology>_<trace>")
    parser.add_argument('-s', dest='step', action='store', default='5',
                        help="granularity step (percentage chunk size)")
    parser.add_argument('-T', dest='time_limit', action='store', type=int, default=3000000000,
                        help="only consider flows that finish before T (start_time + actual_fct < T)")
    parser.add_argument('-c', dest='ccs', action='store', default='dcqcn_0 dcqcn_1',
                        help="congestion control algorithms (separated by space)")
    args = parser.parse_args()

    # 解析参数
    ccs_list = args.ccs.split()
    step = int(args.step)
    time_limit = args.time_limit

    cc_name = "UnKnown"
    if "hp95" in args.prefix:
        cc_name = "hp95"
    elif "dcqcn" in args.prefix:
        cc_name = "dcqcn"

    # 准备分位段的起点列表: 0.00, 0.05, 0.10, ... 0.95
    fraction_list = [i/100. for i in range(0, 100, step)]
    # 每个分位段都存一行; 行的第 0 个元素是 fraction 值
    # 后面依次给不同 CC 填统计数据
    res = [[frac] for frac in fraction_list]

    for cc in ccs_list:
        # 读取文件 ../simulation/output/prefix_cc.txt
        file_path = f"../simulation/output/{args.prefix}_{cc}.txt"
        print("Read ", file_path)

        # 1) 读入所有流
        raw_flows = []
        with open(file_path, 'r') as f:
            for line in f:
                line = line.strip()
                tokens = line.split()
                if len(tokens) < 8:
                    # 无效行
                    continue
                # 解析各列
                dst_port = int(tokens[3])
                flow_size = int(tokens[4])
                start_time = int(tokens[5])
                actual_fct = int(tokens[6])
                best_fct = int(tokens[7])

                if start_time + actual_fct >= time_limit:
                    # 超过截止，不要
                    continue

                raw_flows.append((start_time, flow_size, actual_fct, best_fct))

        if not raw_flows:
            print("No Flows, Error!")
            exit(-1)

        # 2) 按开始时间排序，并去除前 10% 和后 10%
        raw_flows.sort(key=lambda x: x[0])  # x[0] = start_time
        N = len(raw_flows)
        filtered_flows = raw_flows[int(0.1 * N):int(0.9 * N)]

        # 3) 计算 slow = actual_fct / best_fct（若 best_fct=0 或 slow<1, 则置为1）
        flows_for_analysis = []
        for (st, sz, act, best) in filtered_flows:
            slow = act / best
            if slow < 1: 
                slow = 1
            flows_for_analysis.append((sz, slow))

        # 4) 按流大小 (sz) 排序
        flows_for_analysis.sort(key=lambda x: x[0])
        total_flows = len(flows_for_analysis)

        # 5) 对 0%-5%、5%-10%、... 做分桶统计
        for bucket_i in range(len(fraction_list)):
            frac = fraction_list[bucket_i]
            l = int(frac * total_flows)
            r = int((frac + step/100.0) * total_flows)
            if r > total_flows:
                r = total_flows

            bucket_data = flows_for_analysis[l:r]
            flow_sizes = [x[0] for x in bucket_data]
            slows = [x[1] for x in bucket_data]

            last_flow_size = flow_sizes[-1]  # 区间内最后一个流的大小（也可改平均值）
            avg_size = sum(flow_sizes) / len(flow_sizes)
            num_flows = len(flow_sizes)
            avg_slow = sum(slows) / len(slows)

            slows_sorted = sorted(slows)
            pct50 = get_pctl(slows_sorted, 0.5)
            pct95 = get_pctl(slows_sorted, 0.95)
            pct99 = get_pctl(slows_sorted, 0.99)

            res[bucket_i].extend([
                last_flow_size,
                avg_size,
                num_flows,
                avg_slow,
                pct50,
                pct95,
                pct99
            ])

    group_size = 7
    fcts = [[],[],[],[]] # avg, 50, 95, 99
    for i in range(len(fcts)):
        for j in range(len(ccs_list) // 2):
            fcts[i].append([])
    x = []

    for row in res:
        fraction_val, sz, avg_sz = row[0:3]
        stats = row[1:]

        # 依次处理每个 CC 的那一组统计
        x.append(avg_sz)
        for idx in range(0, len(stats), group_size * 2):
            cc_idx = (idx // group_size)
            workload, id = ccs_list[cc_idx].split("_")
            workload_idx = int((int(workload) / 10) - 1)
            _sz, _avg_sz, _numf, st_avg, st_p50, st_p95, st_p99 = stats[idx:idx+group_size]
            _sz, _avg_sz, _numf, ex_avg, ex_p50, ex_p95, ex_p99 = stats[idx+group_size:idx+group_size*2]

            pro_avg = (st_avg - ex_avg) / st_avg
            pro_p50 = (st_p50 - ex_p50) / st_p50
            pro_p95 = (st_p95 - ex_p95) / st_p95
            pro_p99 = (st_p99 - ex_p99) / st_p99

            fcts[0][workload_idx].append(pro_avg)
            fcts[1][workload_idx].append(pro_p50)
            fcts[2][workload_idx].append(pro_p95)
            fcts[3][workload_idx].append(pro_p99)

    with open(f'../simulation/data/workload_{cc_name}.pkl', 'wb') as f:
        pickle.dump({"x":x, "y":fcts}, f)