import argparse
import json
import os
import re
from collections import namedtuple
from pprint import pprint
from typing import Dict, List, Optional, Tuple

import pandas as pd


def print_operator_context_stack(oper_info: Tuple[int, str], df: pd.DataFrame,
                                 stack_info: Dict[str, List[str]], context_size: int=10):
    r"""打印算子调用的上下文&执行堆栈

    Args:
        oper_info (Tuple[int, str]): e.g. [index=10, name="Tensor.__mul__.0.forward.input.1"]
        df (pd.DataFrame): 加载自比较结果excel文件
        stack_info (Dict[str, List[str]]): 加载自stack-json文件
        context_size (int, optional): 算子调用的上下文大小. Defaults to 10.
    """
    oper_pattern = re.compile(r"(.*)\.(forward|backward)\.(.*)")
    # [start, end)
    row_index_start = max(0, oper_info.index - context_size // 2)
    row_index_end = min(len(df), row_index_start + context_size)
    column_name_list = ["NPU Name", "Cosine", "MaxAbsErr"]
    row_indices_list = list(range(row_index_start, row_index_end))
    show_context_df = df.iloc[row_indices_list][column_name_list].reset_index()
    show_context_df["index"] = show_context_df["index"].apply(lambda x: f">>> {x}" if x == oper_info.index else str(x))
    print(show_context_df.to_string(index=False))

    print("\n\n")
    matched = oper_pattern.search(oper_info.name)
    if not matched:
        print(f"Cannot recognize the operator: `{oper_info.name}`")
        return
    oper_base_name = f"{matched.group(1)}.{matched.group(2)}"
    if not (stack_info and oper_base_name in stack_info):
        print(f"Cannot find the stack-infos of operator: `{oper_info.name}`")
        return
    for line in stack_info[oper_base_name]:
        print(line)


def main(
    compare_result_file: str,
    cosine_min_tol: float=0.5,
    mae_max_tol: float=10.0,
    show_topk: int=1,
    context_size: int=20,
    stack_file: Optional[str]=None,
    *args, **kwargs,
):
    r"""读取compare_result_xxx.csv文件，获取精度未达标的算子，并根据Cosine和MaxAbsErr阈值筛选。
        
        e.g. (Cosine < cosine_min_tol) or (MaxAbsErr > mae_max_tol)

    Args:
        compare_result_file (str): 比较结果excel文件
        cosine_min_tol (float, optional): Cosine项筛选阈值. Defaults to 0.5.
        mae_max_tol (float, optional): MaxAbsErr项筛选阈值. Defaults to 10.0.
        show_topk (int, optional): 展示存在精度问题的前N个算子. Defaults to 1.
        context_size (int, optional): 展示算子调用的上下文大小. Defaults to 20.
        stack_file (Optional[str], optional): e.g. xxx/npu_dump/step0/rank0/stack.json
    """
    assert os.path.isfile(compare_result_file) and os.path.exists(compare_result_file), \
        f"Error Compare Result File, {compare_result_file}"
    
    print("[Prepare] Start to Parse CompareResultFile & StackFile.")
    if compare_result_file.endswith(".xlsx"):
        df = pd.read_excel(compare_result_file)
    elif compare_result_file.endswith(".csv"):
        df = pd.read_csv(compare_result_file)
    else:
        raise ValueError(f"Not Support FileType: {os.path.basename(compare_result_file)}")
    
    if stack_file and os.path.isfile(stack_file) and os.path.exists(stack_file):
        with open(stack_file, "r", encoding="utf8") as fout:
            stack_info = json.load(fout)
    else:
        stack_info = {}
    print("[Prepare] CompareResultFile & StackFile Load Successfully.")
    check_column_list = ["NPU Name", "Accuracy Reached or Not", "Cosine", "MaxAbsErr"]
    assert all([col in df for col in check_column_list]), check_column_list
    
    # 判断是否是数值，兼容csv和xlsx格式
    def check_number(x) -> bool:
        if isinstance(x, (int, float)): return True
        if not isinstance(x, str): return False
        try:
            float(x)
            return True
        except:
            return False

    # 筛选精度问题算子，比较粗糙 NOQA
    def oper_legality_precision(row: pd.DataFrame):
        if not check_number(row["Cosine"]):         # NAN, unsupported
            if not check_number(row["MaxAbsErr"]):  # NAN, unsupported
                return False
            return float(row["MaxAbsErr"]) > mae_max_tol
        if not check_number(row["MaxAbsErr"]):
            return float(row["Cosine"]) < cosine_min_tol
        return (float(row["MaxAbsErr"]) > mae_max_tol) or (float(row["Cosine"]) < cosine_min_tol)

    acc_not_reached_df = df[~(df["Accuracy Reached or Not"] == "Yes")]  # Yes, No, None
    rule_filtered_mask = acc_not_reached_df.apply(oper_legality_precision, axis=1)
    rule_filtered_df = acc_not_reached_df[rule_filtered_mask]
    oper_row_indices = rule_filtered_df.index.to_list()
    OperInfo = namedtuple("OperInfo", ["index", "name"])

    for idx in range(show_topk):
        print("\n\n")
        oper_info = OperInfo(index=oper_row_indices[idx],
                             name=df.loc[oper_row_indices[idx], "NPU Name"])
        print_operator_context_stack(oper_info=oper_info, df=df, stack_info=stack_info, context_size=context_size)


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-f", "--compare-result-file", type=str, help="e.g. compare_result_xxx.csv")
    parser.add_argument("--cosine-min-tol", type=float, default=0.5, help="cosine minimum tolerance")
    parser.add_argument("--mae-max-tol", type=float, default=10.0, help="max-abs-err maximum tolerance")
    parser.add_argument("--show-topk", type=int, default=1, help="show topk operators")
    parser.add_argument("--context-size", type=int, default=15, help="show context size of each operator")
    parser.add_argument("--stack-file", type=str, default=None, help="e.g. xxx/step0/rank0/stack.json")
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    args = get_args()
    pprint(vars(args))
    main(**vars(args))