# -*- coding: utf-8 -*-
from __future__ import annotations
import math
import os
import re
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Union

import pandas as pd


# ========== 可按需修改的配置（若不改也能直接跑） ==========
# 语种 -> 测试集列表（顺序即列顺序）


LANG_TO_TESTS: Dict[str, List[str]] = {
    "cn": ["wenetspeech_meeting", "aishell2_android", "aishell2_ios",
           "aishell2_mic", "testnet_test","aishell4_test","aishell1_test","fleurs_zh"],
    "en": ["librispeech_clean","librispeech_other", "gigaspeech_test", "fleurs_en","common_voice_en_test"],
}

# 4 种解码方式（顺序即列顺序）
DECODE_METHODS: List[str] = ["ctc_greedy_search" ,"ctc_prefix_beam_search" ,"attention" ,"attention_rescoring"]

# 输出 Excel 路径
EXCEL_PATH = Path("wer_log.xlsx")


# 一次写入的 WER（百分数或小数均可，小数自动乘 100）
# 结构：lang -> test_set -> decode_method -> wer
wer_data: Dict[str, Dict[str, Dict[str, float]]] = {
    "cn": {
        "wenetspeech_meeting": {"ctc_greedy_search": 4.1, "ctc_prefix_beam_search": 3.9, "attention": 3.7, "attention_rescoring": 3.5},
        "aishell2_android": {"ctc_greedy_search": 7.2, "ctc_prefix_beam_search": 6.8, "attention": 6.5, "attention_rescoring": 6.2},
        "aishell2_ios": {"ctc_greedy_search": 9.1, "ctc_prefix_beam_search": 8.7, "attention": 8.4, "attention_rescoring": 8.0},
    },
    "en": {
        "librispeech_clean": {"ctc_greedy_search": 2.6, "ctc_prefix_beam_search": 2.4, "attention": 2.3, "attention_rescoring": 2.1},
        "librispeech_other": {"ctc_greedy_search": 5.8, "ctc_prefix_beam_search": 5.5, "attention": 5.3, "attention_rescoring": 5.0},
        "gigaspeech_test": {"ctc_greedy_search": 8.9, "ctc_prefix_beam_search": 8.5, "attention": 8.2, "attention_rescoring": 7.9},
    },
}
# =======================================================


META_COLS = ("写入时间", "模型描述")
# 统计区：三层（统计 / 平均WER / 对应解码方式）
def avg_block_for_decode(dec: str) -> Tuple[str, str, str]:
    return ("统计", "平均WER", dec)


def build_columns(lang_to_tests: Dict[str, List[str]], decodes: List[str]) -> pd.MultiIndex:
    cols: List[Tuple[Optional[str], Optional[str], str]] = []
    # 左侧 meta 两列：前三层分别为（None, None, 列名）
    for name in META_COLS:
        cols.append(('meta', 'meta', name))
    # 中间 WER 区：三层（语种, 测试集, 解码方式）
    for lang, tests in lang_to_tests.items():
        for test in tests:
            for dec in decodes:
                cols.append((lang, test, dec))
    # 右侧统计列：按解码方式各放一列
    for dec in decodes:
        cols.append(avg_block_for_decode(dec))
    return pd.MultiIndex.from_tuples(cols)

def ensure_header(excel_path: Path, columns: pd.MultiIndex, append: bool = True) -> pd.DataFrame:
    if excel_path.exists() and append:
        df = pd.read_excel(excel_path, header=[0, 1, 2], index_col=0)
        # 对齐为“当前 columns 的顺序 + 旧表中多出来的列”
        all_cols = pd.MultiIndex.from_tuples(
            sorted(set(columns.tolist()) | set(df.columns.tolist()), key=lambda t: (
                "" if t[0] is None else str(t[0]),
                "" if t[1] is None else str(t[1]),
                "" if t[2] is None else str(t[2]),
            ))
        )
        extra = [c for c in all_cols.tolist() if c not in columns.tolist()]
        final_cols = pd.MultiIndex.from_tuples(columns.tolist() + extra)
        df = df.reindex(columns=final_cols)
        return df
    else:
        return pd.DataFrame(columns=columns)
    # return pd.DataFrame(columns=columns)



def flatten_wer_row(
    lang_to_tests: Dict[str, List[str]],
    decodes: List[str],
    wer_dict: Dict[str, Dict[str, Dict[str, float]]],
    model_desc: str = "xixixixihahaha",
) -> Dict[Tuple[Optional[str], Optional[str], str], Optional[float]]:
    row: Dict[Tuple[Optional[str], Optional[str], str], Union[str, float, None]] = {
        ('meta', 'meta', META_COLS[0]): datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        ('meta', 'meta', META_COLS[1]): model_desc}

    # meta：自动写入时间与随机模型描述
    # row[("lang","test", "decode")] = 0
    # 收集各解码方式的值以便单独求均值
    per_decode_values: Dict[str, List[float]] = {d: [] for d in decodes}
    # WER 单元格
    for lang, tests in lang_to_tests.items():
        for test in tests:
            for dec in decodes:
                v = None
                if lang in wer_dict and test in wer_dict[lang] and dec in wer_dict[lang][test]:
                    v = wer_dict[lang][test][dec]
                row[(lang, test, dec)] = v
                if isinstance(v, str):
                    v = float(v.split('|')[0])
                per_decode_values[dec].append(v)

    # 按解码方式分别计算平均 WER（忽略缺失）
    for dec in decodes:
        try:
            vals = per_decode_values.get(dec, None)
            avg_v = sum(vals) / len(vals) if vals else None
            row[avg_block_for_decode(dec)] = f'{avg_v:.2f}'
        except Exception as e:
            utils_file.logging_warning(f'failed to calculate {dec}: {e}')
            row[avg_block_for_decode(dec)] = None

    return row

def append_wer_row(
    excel_path: Path,
    wer_dict: Dict[str, Dict[str, Dict[str, float]]],
    model_desc: str = "xixixixihahaha",
    append: bool = True,
) -> None:
    cols = build_columns(LANG_TO_TESTS, DECODE_METHODS)
    df = ensure_header(excel_path, cols, append)
    row_dict = flatten_wer_row(LANG_TO_TESTS, DECODE_METHODS, wer_dict, model_desc)
    new_row = pd.DataFrame([row_dict]).reindex(columns=df.columns)
    out = pd.concat([df, new_row])

    out.to_excel(excel_path, index=True)
    print(f"已写入：{excel_path.resolve()}")

cn_set = {
    "wenetspeech_meeting",
    "aishell2_android",
    "aishell2_ios",
    "aishell2_mic",
    "testnet_test",
    "aishell4_test",
    "aishell1_test",
    "fleurs_zh",
}
en_set = {
    "librispeech_clean",
    "librispeech_other",
    "gigaspeech_test",
    "fleurs_en",
    "common_voice_en_test",
}


def extract_info_from_dir_name(dir_name):
    """
    从目录名中提取 testset_name 和各个数值（包括负数）。

    :param dir_name: 原始目录名
    :return: 一个字典，包含 'testset_name', 'chunk_size', 'ctc_weight', 'reverse_weight'
             如果匹配失败，则返回 None。
    """
    # 核心修改：将 chunk_size 的模式从 \d+ 改为 -?\d+，以支持负数
    pattern = (
        r'^(?P<testset_name>\w+)_decoding_chunk_size_(?P<chunk_size>-?\d+)_ctc_weight_(?P<ctc_weight>\d+\.?\d*)_reverse_weight_(?P<reverse_weight>\d+\.?\d*)$'
    )

    match = re.match(pattern, dir_name)

    if not match:
        print(f"警告：目录名 '{dir_name}' 不符合预期的格式。")
        return None

    info = {
        'testset_name': match.group('testset_name'),
        'chunk_size': int(match.group('chunk_size')),      # int() 可以正确转换负数字符串
        'ctc_weight': float(match.group('ctc_weight')),
        'reverse_weight': float(match.group('reverse_weight'))
    }

    return info

from gxl_ai_utils.utils import utils_file
def get_wer_all_from_wer_file(filepath):
    with open(filepath, 'r', encoding='utf-8') as file:
        content = file.read()
    # 使用正则表达式匹配你需要的数字
    matches = re.search(r'Overall -> (\d+\.?\d*) %.*S=(\d+) D=(\d+) I=(\d+)', content)

    # 如果匹配成功，将匹配到的结果放入一个列表中
    if matches:
        numbers = [float(matches.group(i)) for i in range(1, 5)]
        return numbers
    else:
        utils_file.logging_warning(f'no find wer num in {filepath}')
        return -1

def do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now):
    root_dir = os.path.join(dir, ckpt_name)
    dirnames = os.listdir(root_dir)
    wer_raw_dict = {}
    for dirname in dirnames:
        info = extract_info_from_dir_name(dirname)
        testset_name, chunk_size, ctc_weight, reverse_weight = info['testset_name'], info['chunk_size'], info['ctc_weight'], info['reverse_weight']
        if chunk_size != decoding_chunk_size_now or ctc_weight != ctc_weight_now or reverse_weight != reverse_weight_now:
            utils_file.logging_warning(f'param not match')
            continue
        if testset_name not in cn_set and testset_name not in en_set:
            utils_file.logging_warning(f'testset_name {testset_name} not in cn_set {cn_set} and en_set {en_set}')
            continue
        cn_str = "cn" if testset_name in cn_set else "en"
        for decode_method in DECODE_METHODS:
            leaf_dir = os.path.join(str(root_dir), dirname, decode_method)
            if not os.path.exists(leaf_dir):
                utils_file.logging_warning(f'{leaf_dir} not exist')
                continue
            wer_file = os.path.join(leaf_dir, "wer")
            num_list = get_wer_all_from_wer_file(wer_file)
            if num_list == -1:
                utils_file.logging_warning(f'{wer_file} 中无有效信息')
                continue
            wer, s, d, i = num_list
            show_str = f'{wer:.2f}|S{int(s)}:D{int(d)}:I{int(i)}'
            if cn_str not in wer_raw_dict:
                wer_raw_dict[cn_str] = {}
            if testset_name not in wer_raw_dict[cn_str]:
                wer_raw_dict[cn_str][testset_name] = {}
            wer_raw_dict[cn_str][testset_name][decode_method] = show_str
    return wer_raw_dict
def do_get_wer_info_from_dir():
    """
    wer_data: Dict[str, Dict[str, Dict[str, float]]] = {
        "cn": {
            "wenetspeech_meeting": {"ctc_greedy_search": 4.1, "ctc_prefix_beam_search": 3.9, "attention": 3.7, "attention_rescoring": 3.5},
            "aishell2_android": {"ctc_greedy_search": 7.2, "ctc_prefix_beam_search": 6.8, "attention": 6.5, "attention_rescoring": 6.2},
            "aishell2_ios": {"ctc_greedy_search": 9.1, "ctc_prefix_beam_search": 8.7, "attention": 8.4, "attention_rescoring": 8.0},
        },
        "en": {
            "librispeech_clean": {"ctc_greedy_search": 2.6, "ctc_prefix_beam_search": 2.4, "attention": 2.3, "attention_rescoring": 2.1},
            "librispeech_other": {"ctc_greedy_search": 5.8, "ctc_prefix_beam_search": 5.5, "attention": 5.3, "attention_rescoring": 5.0},
            "gigaspeech_test": {"ctc_greedy_search": 8.9, "ctc_prefix_beam_search": 8.5, "attention": 8.2, "attention_rescoring": 7.9},
        },
    }
    """
    dir = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage3_true_hq"
    ckpt_name = "step_61999"
    ctc_weight_now = 0.3
    reverse_weight_now = 0.5
    decoding_chunk_size_now = 1
    row_name = "zh8W.hq"
    model_desc = f"{row_name}|{ckpt_name}|chunk{decoding_chunk_size_now}|ctc{ctc_weight_now}|reverse{reverse_weight_now}"
    wer_raw_dict = do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now)
    append_wer_row(
        EXCEL_PATH,
        wer_raw_dict,
        model_desc,
        append=False,
    )
    decoding_chunk_size_now = -1
    model_desc = f"{row_name}|{ckpt_name}|chunk{decoding_chunk_size_now}|ctc{ctc_weight_now}|reverse{reverse_weight_now}"
    wer_raw_dict = do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now)
    append_wer_row(
        EXCEL_PATH,
        wer_raw_dict,
        model_desc
    )
    dir = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage4_true_hq_with_en"
    ckpt_name = "step_37999"
    ctc_weight_now = 0.3
    reverse_weight_now = 0.5
    decoding_chunk_size_now = 1
    row_name="zh8W.en3W.hq"
    model_desc = f"{row_name}|{ckpt_name}|chunk{decoding_chunk_size_now}|ctc{ctc_weight_now}|reverse{reverse_weight_now}"
    wer_raw_dict = do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now)
    append_wer_row(
        EXCEL_PATH,
        wer_raw_dict,
        model_desc
    )
    decoding_chunk_size_now = -1
    model_desc = f"{row_name}|{ckpt_name}|chunk{decoding_chunk_size_now}|ctc{ctc_weight_now}|reverse{reverse_weight_now}"
    wer_raw_dict = do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now)
    append_wer_row(
        EXCEL_PATH,
        wer_raw_dict,
        model_desc
    )
    dir = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage4_true_hq_with_en"
    ckpt_name = "step_59999"
    ctc_weight_now = 0.3
    reverse_weight_now = 0.5
    decoding_chunk_size_now = 1
    row_name="zh8W.en3W.hq"
    model_desc = f"{row_name}|{ckpt_name}|chunk{decoding_chunk_size_now}|ctc{ctc_weight_now}|reverse{reverse_weight_now}"
    wer_raw_dict = do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now)
    append_wer_row(
        EXCEL_PATH,
        wer_raw_dict,
        model_desc
    )
    decoding_chunk_size_now = -1
    model_desc = f"{row_name}|{ckpt_name}|chunk{decoding_chunk_size_now}|ctc{ctc_weight_now}|reverse{reverse_weight_now}"
    wer_raw_dict = do_get_wer_dict_by_param(dir, ckpt_name, decoding_chunk_size_now, ctc_weight_now, reverse_weight_now)
    append_wer_row(
        EXCEL_PATH,
        wer_raw_dict,
        model_desc
    )







if __name__ == "__main__":
    # append_wer_row(
    #     EXCEL_PATH,
    #     wer_data
    # )
    do_get_wer_info_from_dir()
