# -*- coding: utf-8 -*-
"""
统计 comm_type == 51 的程序总带宽：
- 对于每行：bytes_row = total_size * count
  count 优先取 'count'；若不存在则取 'appearance_time'；都不存在则视为 1
- all_proc_total_size = Σ bytes_row（单位：B）
- all_comm_time = Σ comm_time(us) * count（单位：us）
- 带宽（B/us）= all_proc_total_size / all_comm_time

支持：
- 单文件计算（csv_dir）
- 批量计算（base_dir 下递归查找每个子文件夹的 log-0_processed.csv）

输出：
- target_dir/bandwidth_summary.csv，包含子文件夹名与带宽（以及总字节与总时间）
"""

import os
import sys
import pandas as pd
from typing import Optional, Tuple

REQUIRED_COLUMNS = [
    "comm_id","comm_size","comm_type","rank","root","time_stamp",
    "comm_time(us)","sendsize","sendcount","recvsize","recvcount",
    "src","dst","total_size","appearance_time"
]

def validate_columns(df: pd.DataFrame):
    missing = [c for c in REQUIRED_COLUMNS if c not in df.columns]
    if missing:
        # 只警告，不强制报错，因为有些文件可能缺少 appearance_time 或其他列
        print(f"警告：缺少列 {missing}；将尝试降级处理。")

def pick_count_column(df: pd.DataFrame) -> Optional[str]:
    """优先选择 'count'；否则选择 'appearance_time'；都没有返回 None"""
    if "count" in df.columns:
        return "count"
    if "appearance_time" in df.columns:
        return "appearance_time"
    return None

def compute_bandwidth_from_csv(
    csv_path: str,
    comm_type_filter: int = 51,
    sum_time_with_count: bool = True
) -> Tuple[float, int, float, int]:
    """
    计算单个 CSV 的带宽（仅 comm_type == comm_type_filter）
    返回：(bandwidth_B_per_us, all_proc_total_size_B, all_comm_time_us, used_rows)

    sum_time_with_count:
      - True：all_comm_time = Σ(comm_time(us) * count)
      - False：all_comm_time = Σ(comm_time(us))
    """
    try:
        df = pd.read_csv(csv_path)
    except UnicodeDecodeError:
        df = pd.read_csv(csv_path, encoding="utf-8-sig")

    validate_columns(df)

    # 过滤 comm_type
    df = df[df["comm_type"] == comm_type_filter].copy()
    if df.empty:
        return (0.0, 0, 0.0, 0)

    # 保障数值列
    for col in ["total_size", "comm_time(us)"]:
        df[col] = pd.to_numeric(df[col], errors="coerce")
    df = df.dropna(subset=["total_size", "comm_time(us)"])

    # 获取 count 列
    count_col = pick_count_column(df)
    if count_col is not None:
        df[count_col] = pd.to_numeric(df[count_col], errors="coerce").fillna(0).astype(int)
        counts = df[count_col].values
    else:
        # 若无计数列，视为每行出现 1 次
        counts = pd.Series([1] * len(df)).values

    # 计算总字节数与总时间
    total_sizes = df["total_size"].values
    times_us = df["comm_time(us)"].values

    all_proc_total_size = int((total_sizes * counts).sum())
    if sum_time_with_count:
        all_comm_time = float((times_us * counts).sum())
    else:
        all_comm_time = float(times_us.sum())

    # 带宽（B/us）
    bandwidth = (all_proc_total_size / all_comm_time) if all_comm_time > 0 else 0.0
    return (bandwidth, all_proc_total_size, all_comm_time, len(df))


def find_processed_csvs(base_dir: str) -> pd.DataFrame:
    """
    递归查找 base_dir 下的 log-0_processed.csv
    返回 DataFrame: columns = ['folder_name','csv_path']
    """
    records = []
    for root, dirs, files in os.walk(base_dir):
        if "log-0_processed.csv" in files:
            csv_path = os.path.join(root, "log-0_processed.csv")
            folder_name = os.path.basename(root)
            records.append({"folder_name": folder_name, "csv_path": csv_path})
    return pd.DataFrame(records)


def compute_bandwidths_in_base_dir(
    base_dir: str,
    target_dir: str,
    comm_type_filter: int = 51,
    sum_time_with_count: bool = True
) -> str:
    """
    对 base_dir 下的所有 log-0_processed.csv 计算带宽并输出到 target_dir/bandwidth_summary.csv
    返回输出文件路径
    """
    os.makedirs(target_dir, exist_ok=True)
    df_paths = find_processed_csvs(base_dir)
    if df_paths.empty:
        out_file = os.path.join(target_dir, "bandwidth_summary.csv")
        pd.DataFrame(columns=["folder_name", "bandwidth_B_per_us", "total_bytes_B", "total_time_us"]).to_csv(out_file, index=False)
        print(f"未找到任何 processed CSV。已生成空文件：{out_file}")
        return out_file

    results = []
    for _, row in df_paths.iterrows():
        folder_name = row["folder_name"]
        csv_path = row["csv_path"]
        try:
            bandwidth, total_bytes, total_time_us, used_rows = compute_bandwidth_from_csv(
                csv_path, comm_type_filter=comm_type_filter, sum_time_with_count=sum_time_with_count
            )
            results.append({
                "folder_name": folder_name,
                "bandwidth_B_per_us": bandwidth,
                "total_bytes_B": total_bytes,
                "total_time_us": total_time_us,
                "used_rows": used_rows
            })
        except Exception as e:
            print(f"处理 {csv_path} 出错：{e}")
            results.append({
                "folder_name": folder_name,
                "bandwidth_B_per_us": 0.0,
                "total_bytes_B": 0,
                "total_time_us": 0.0,
                "used_rows": 0
            })

    out_file = os.path.join(target_dir, "bandwidth_summary.csv")
    pd.DataFrame(results).to_csv(out_file, index=False)
    print(f"结果已保存：{out_file}")
    return out_file


def main():
    # 1) 在 __main__ 中定义 csv_dir 与 target_dir（单文件计算用）
    csv_dir = r"F:\PostGraduate\Point-to-Point-DATA\DIFF_ATOM\2node\2node-32proc-10interation-2000000atom-20250925_155735\log-0_processed.csv"
    target_dir = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\code\tools_output\get_bandwidth_from_one_csv\500atom\64node"

    # 4) 在 __main__ 中定义 base_dir（批量子文件夹下的 log-0_processed.csv）
    # before base_dir = r"F:\PostGraduate\Point-to-Point-DATA\DIFF_ATOM\2node"
    base_dir = r"F:\PostGraduate\Point-to-Point-DATA\WEAK_SCALING\weak_scaling_data_from_new_config_500atom_per_proc\64node"


    # # 计算单文件（可选）
    # if os.path.isfile(csv_dir):
    #     bw, total_bytes, total_time_us, used_rows = compute_bandwidth_from_csv(csv_dir, comm_type_filter=51, sum_time_with_count=True)
    #     print(f"[单文件] 带宽(B/us)={bw:.6f} | total_bytes(B)={total_bytes} | total_time(us)={total_time_us:.3f} | rows={used_rows}")
    # else:
    #     print(f"[单文件] 未找到文件：{csv_dir}")

    # 批量计算
    compute_bandwidths_in_base_dir(base_dir, target_dir, comm_type_filter=51, sum_time_with_count=True)


if __name__ == "__main__":
    main()
