import argparse

def get_pctl(sorted_list, p):
    """
    获取分位数，p 取值在 [0,1] 之间
    """
    if not sorted_list:
        return 0.0
    idx = int(len(sorted_list) * p)
    # 若恰好等于列表长度，取最后一个元素
    if idx >= len(sorted_list):
        idx = len(sorted_list) - 1
    return sorted_list[idx]

if __name__=="__main__":
    parser = argparse.ArgumentParser(description='Analyze FCT data with noise removal.')
    parser.add_argument('-p', dest='prefix', action='store', default='fct_fat',
                        help="Specify the prefix of the fct file. Usually like fct_<topology>_<trace>")
    parser.add_argument('-s', dest='step', action='store', default='5',
                        help="granularity step (percentage chunk size)")
    parser.add_argument('-t', dest='type', action='store', type=int, default=0,
                        help="0: normal (dstPort=100), 1: incast (dstPort=200), 2: all")
    parser.add_argument('-T', dest='time_limit', action='store', type=int, default=3000000000,
                        help="only consider flows that finish before T (start_time + actual_fct < T)")
    parser.add_argument('-b', dest='bw', action='store', type=int, default=25,
                        help="bandwidth of edge link (Gbps); not used in this script, keep for reference")
    parser.add_argument('-c', dest='ccs', action='store', default='dcqcn_0 dcqcn_1',
                        help="congestion control algorithms (separated by space)")
    args = parser.parse_args()

    # 解析参数
    ccs_list = args.ccs.split()
    step = int(args.step)
    flow_type = args.type
    time_limit = args.time_limit

    # 准备分位段的起点列表: 0.00, 0.05, 0.10, ... 0.95
    fraction_list = [i/100. for i in range(0, 100, step)]
    # 每个分位段都存一行; 行的第 0 个元素是 fraction 值
    # 后面依次给不同 CC 填统计数据
    res = [[frac] for frac in fraction_list]

    for cc in ccs_list:
        # 读取文件 ../simulation/output/prefix_cc.txt
        file_path = f"../simulation/output/{args.prefix}_{cc}.txt"
        print(file_path)
        # 1) 读入所有流
        raw_flows = []
        with open(file_path, 'r') as f:
            for line in f:
                line = line.strip()
                if not line:
                    continue
                tokens = line.split()
                if len(tokens) < 8:
                    # 无效行
                    continue
                # 解析各列
                src_ip = tokens[0]
                dst_ip = tokens[1]
                src_port = int(tokens[2])
                dst_port = int(tokens[3])
                flow_size = int(tokens[4])
                start_time = int(tokens[5])
                actual_fct = int(tokens[6])
                best_fct = int(tokens[7])

                # 根据 type 和 time_limit 进行初步筛选
                # type=0: 只要目的端口 == 100
                # type=1: 只要目的端口 == 200
                # type=2: 不限制
                if flow_type == 0 and dst_port != 100:
                    continue
                if flow_type == 1 and dst_port != 200:
                    continue
                if start_time + actual_fct >= time_limit:
                    # 超过截止，不要
                    continue

                raw_flows.append((start_time, flow_size, actual_fct, best_fct))

        if not raw_flows:
            # 没有数据，为每个分位段都插入占位
            for i in range(len(res)):
                # flow_size, avg_flow_size, num_flows, avg_slow, p50, p95, p99
                res[i].extend([0, 0, 0, 0, 0, 0, 0])
            continue

        # 2) 按开始时间排序，并去除前 10% 和后 10%
        raw_flows.sort(key=lambda x: x[0])  # x[0] = start_time
        N = len(raw_flows)
        cut_front = int(0.1 * N)  # 前 10%
        cut_back  = int(0.9 * N)  # 后 10%
        if cut_front >= cut_back:
            # 数据量太少或异常情况，保底处理
            filtered_flows = raw_flows
        else:
            filtered_flows = raw_flows[cut_front:cut_back]

        if not filtered_flows:
            for i in range(len(res)):
                res[i].extend([0, 0, 0, 0, 0, 0, 0])
            continue

        # 3) 计算 slow = actual_fct / best_fct（若 best_fct=0 或 slow<1, 则置为1）
        flows_for_analysis = []
        for (st, sz, act, best) in filtered_flows:
            slow = act / best if best != 0 else 1
            if slow < 1:
                slow = 1
            flows_for_analysis.append((sz, slow))

        # 4) 按流大小 (sz) 排序
        flows_for_analysis.sort(key=lambda x: x[0])
        total_flows = len(flows_for_analysis)

        # 5) 对 0%-5%、5%-10%、... 做分桶统计
        for bucket_i in range(len(fraction_list)):
            frac = fraction_list[bucket_i]
            l = int(frac * total_flows)
            r = int((frac + step/100.0) * total_flows)
            if r > total_flows:
                r = total_flows

            if l >= r:
                # 本区间没有流
                res[bucket_i].extend([0, 0, 0, 0, 0, 0, 0])
                continue

            bucket_data = flows_for_analysis[l:r]
            flow_sizes = [x[0] for x in bucket_data]
            slows = [x[1] for x in bucket_data]

            last_flow_size = flow_sizes[-1]  # 区间内最后一个流的大小（也可改平均值）
            avg_size = sum(flow_sizes) / len(flow_sizes) if len(flow_sizes) else 0
            num_flows = len(flow_sizes)
            avg_slow = sum(slows) / len(slows) if len(slows) else 0

            slows_sorted = sorted(slows)
            pct50 = get_pctl(slows_sorted, 0.5)
            pct95 = get_pctl(slows_sorted, 0.95)
            pct99 = get_pctl(slows_sorted, 0.99)

            res[bucket_i].extend([
                last_flow_size,
                avg_size,
                num_flows,
                avg_slow,
                pct50,
                pct95,
                pct99
            ])

    # 最后打印: 使用固定宽度字段对齐
    #
    # 对于每行: row = [fraction, (cc1的7个统计), (cc2的7个统计), ...]
    # 假设有k个CC => 后面 k*7 个字段
    # 
    # 每组 7 个统计字段对应:
    #   sz, avg_sz, numf, avg_slow, p50, p95, p99
    # 你可以根据需要调整每个字段的宽度/精度
    #
    # 示例: fraction(>7.3f), sz(>10d), avg_sz(>10.1f), numf(>7d), avg_slow(>7.3f), p50(>7.3f), p95(>7.3f), p99(>7.3f)

    group_size = 7
    for row in res:
        fraction_val, sz, avg_sz, numf = row[0:4]
        stats = row[1:]

        # 先打印 fraction 值：右对齐, 总长度 7, 小数点后 3 位
        line_str = f"{fraction_val:>.3f}{sz:>12d}{avg_sz:>12.0f}{numf:>7d}\t"

        # 依次处理每个 CC 的那一组统计
        for idx in range(0, len(stats), group_size):
            subgroup = stats[idx:idx+group_size]
            if len(subgroup) < group_size:
                # 万一最后一组不够7项，直接跳过
               continue
            _sz, _avg_sz, _numf, avg_sl, p50, p95, p99 = subgroup
            if _sz != sz:
                print("Data not Consist!")
                exit(0)

            # 将这 7 个字段拼接到行字符串里
            # 这里示例给了一个比较宽松的排版，可以根据自己数据大小再调宽/调窄
            line_str += (
                f"{avg_sl:>8.3f}"    # 平均slow
                f"{p50:>8.3f}"       # 50分位
                f"{p95:>8.3f}"       # 95分位
                f"{p99:>8.3f}\t"       # 99分位
            )

        print(line_str)
