import os
import json
import pandas as pd
from datasets import load_dataset, Dataset
import pandas as pd
import numpy as np
import re
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
from io import StringIO

from vllm import LLM, SamplingParams

import restore_metirc as rm


def prepare_dataset(data_dir, test_file, tokenizer):
    def get_prompt_resp(sample):
        prompt = tokenizer.apply_chat_template(sample['messages'][:-1], tokenize=False, add_generation_prompt=True)
        label_text = sample['messages'][-1]['content']
        return {'prompt': prompt, 'label': label_text}

    dataset = load_dataset('json', data_files=os.path.join(data_dir, test_file), split='train')
    dataset = dataset.map(get_prompt_resp)
    
    return dataset


def save_pred_label_json(pred, label, idx, save_dir):
    """
    保存单个样本的预测和标签为json文件
    """
    os.makedirs(save_dir, exist_ok=True)
    sample_dict = {"pred": pred, "label": label}
    file_path = os.path.join(save_dir, f"{idx}.json")
    with open(file_path, "w", encoding="utf-8") as f:
        json.dump(sample_dict, f, ensure_ascii=False, indent=2)


def append_mean_row(results, mean_label='mean', sample_col='sample'):
    df = pd.DataFrame(results)
    mean_row = df.mean(numeric_only=True).to_dict()
    mean_row[sample_col] = mean_label
    df[sample_col] = list(range(len(df)))
    df_out = pd.concat([df, pd.DataFrame([mean_row])], ignore_index=True)
    return df_out


def evaluate_outputs(outputs, dataset, save_dir=None):
    def parse_data_to_dfs(data_string):
        """
        Args:
            data_string (str): 包含线路阻抗表、节点负荷表和电源容量表的字符串。

        Returns:
            三个DataFrame，分别为 'line_impedance', 'node_load', 'power_source'。
                如果某个表格未找到，则对应的值为None。
        """

        # 提取线路阻抗表
        line_impedance_match = re.search(r"line impedance table is as follows:\s*(.*?)(?=\npower source capacity table is as follows:|\Z)", data_string, re.DOTALL)
        if line_impedance_match:
            line_impedance_str = line_impedance_match.group(1).strip()
            # 将 "j" 替换为 "J" 以便 pandas.read_csv 正确解析复数
            line_impedance_str = line_impedance_str.replace('j', 'J')
            line_impedance = pd.read_csv(StringIO(line_impedance_str), sep='|', skipinitialspace=True)
            line_impedance.columns = line_impedance.columns.str.strip() 
            # 将 Impedance 列转换为复数类型
            line_impedance['Impedance'] = line_impedance['Impedance'].apply(lambda x: complex(x) if isinstance(x, str) and '+' in x else float(x))
            # rename
            line_impedance = line_impedance.rename(columns={
                'From': 'F-Node-map',
                'To': 'T-Node-map',
                'Status': 'SwitchState'
            })
            # 如果 Impedance 的实部和虚部都为0，则设置为 'Y'，否则为 'N'
            line_impedance['Switch'] = line_impedance['Impedance'].apply(
                lambda x: 'Y' if (isinstance(x, (int, float)) and x == 0) or (isinstance(x, complex) and x.real == 0 and x.imag == 0) else 'N'
            )
        else:
            line_impedance = None

        # 提取节点负荷表
        node_load_match = re.search(r"node load table is as follows:\s*(.*?)(?=\nThe fault occurs at line between bus|\Z)", data_string, re.DOTALL)
        if node_load_match:
            node_load_str = node_load_match.group(1).strip()
            node_load = pd.read_csv(StringIO(node_load_str), sep='|', skipinitialspace=True)
            node_load.columns = node_load.columns.str.strip()
            # rename
            node_load = node_load.rename(columns={
                'Bus': 'Node-map',
                'Pd': 'P',
                'Qd': 'Q'
            })
        else:
            node_load = None

        # 提取电源容量表
        data_string = data_string.replace('\\n', '\n\n')
        power_source_match = re.search(r"power source capacity table is as follows:\s*(.*?)(?=\nnode load table is as follows:|\Z)", data_string, re.DOTALL)
        if power_source_match:
            power_source_str = power_source_match.group(1).strip()
            power_source = pd.read_csv(StringIO(power_source_str), sep='|', skipinitialspace=True)
            power_source.columns = power_source.columns.str.strip()
            # rename
            power_source = power_source.rename(columns={
                'Bus': 'id-map',
                'Capacity': 'capacity',
            })
        else:
            power_source = None

        source_buses = power_source['id-map'].tolist()

        return line_impedance, node_load, power_source, source_buses
    
    # 初始化统计信息
    format_errors = 0
    has_invalid = 0
    has_cycles = 0
    stat_records = []

    for i, output in enumerate(outputs):
        df_branch, df_loads, df_src, source_buses = parse_data_to_dfs(dataset[i]['messages'][1]['content'])
        # print(df_branch, df_loads, df_src, source_buses)
        gen_str = output.outputs[0].text
        gt_str = dataset[i]['messages'][-1]['content']

        save_pred_label_json(gen_str, gt_str, i, save_dir)

    # for i in range(48):
    #     save_dir = '/data/lwk/code/finetune/output/eval_results'
    #     file_path = os.path.join(save_dir, '{}.json'.format(i))
    #     with open(file_path, 'r', encoding='utf-8') as f:
    #         data = json.load(f)
    #     gen_str = data['pred']
    #     gt_str = dataset[i]['messages'][-1]['content']

        actions, action_count, has_action = rm.parse_action_switch(gen_str)

        if not has_action:
            format_errors += 1
            stat_records.append({
                "index": i,
                "actions": [],
                "cycles_loss": -1,
                "invalid_loss": -1,
                "unsupply_loss": -1,
                "format_error": True,
                "prediction": gen_str
            })
            continue

        # 计算各项指标
        invalid_loss = rm.compute_dis_invalid_loss(df_branch, actions)
        cycles_loss, unsupply_loss = rm.compute_dis_cycles_and_unsupply(
            df_branch, actions, df_loads, source_buses
        )
        if invalid_loss > 0:
            has_invalid += 1
        if cycles_loss > 0:
            has_cycles += 1

        stat_records.append({
            "index": i,
            "actions": actions,
            "cycles_loss": cycles_loss,
            "invalid_loss": invalid_loss,
            "unsupply_loss": unsupply_loss,
            "format_error": False,
            "prediction": gen_str
        })

    df_results = pd.DataFrame(stat_records)
    df_results.to_json(os.path.join(save_dir, "results.json"), index=False, orient='records')
    json.dump({
        "total_examples": len(dataset),
        "format errors": format_errors,
        "invalid num": has_invalid,
        "cycles num": has_cycles,
    }, open(os.path.join(save_dir, "summary.json"), 'w'), indent=4)

    return df_results


def evaluate(
    model_dir,
    data_dir,
    save_dir=None,
    test_file='eval_data.jsonl',
    cuda_visible_devices='7',
    max_seq_length=8192,
    tensor_parallel_size=1,
    dtype="bfloat16",
    temperature=1.0,
    top_k=1,
    seed=3407,
    max_tokens=512,
):
    os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
    llm = LLM(
        model=model_dir,
        tokenizer=model_dir,
        trust_remote_code=True,
        tensor_parallel_size=tensor_parallel_size,
        dtype=dtype,
        max_model_len=max_seq_length,
    )
    tokenizer = llm.get_tokenizer()
    dataset = prepare_dataset(data_dir, test_file, tokenizer)
    
    sampling_params = SamplingParams(
        temperature=temperature,
        top_k=top_k,
        seed=seed,
        stop_token_ids=[151329, 151336, 151338],
        max_tokens=max_tokens,
    )
    outputs = llm.generate(dataset["prompt"], sampling_params)

    # outputs = [i for i in range(40)]
    # dataset = None

    result_df = evaluate_outputs(outputs, dataset, save_dir=save_dir)

    return result_df


if __name__ == "__main__":
    # # eval mix train model
    # model_dir = '/data/public/models/proj_csg/mix_train/iter-646/'  # TODO
    # data_dir = '/data/lwk/code/finetune/data/emergency_transfer/dataset'
    # save_dir = '/data/lwk/code/finetune/output/eval/mix_train/emergency_transfer/iter646'
    # result_df = evaluate(model_dir=model_dir, data_dir=data_dir, save_dir=save_dir)
    # print(result_df)
    for i in [311, 622, 933, 1244, 1555, 1866, 2177, 2488, 2799, 3112]:
        # Format i as a 4-digit number with leading zeros
        formatted_i = f"{i:04d}"

        # eval mix train model
        model_dir = f'/data/ckpts/0827/iter-{formatted_i}/'
        data_dir = '/data/public/datasets/proj_csg/emergency_transfer_10665_1186_40_6.5k_200/'
        save_dir = f'/data/lwk/code/proj_csg/output/eval/mix_train/0827/emergency_transfer/iter-{formatted_i}/'

        result_df = evaluate(model_dir=model_dir, data_dir=data_dir, save_dir=save_dir, cuda_visible_devices='6', temperature=0)
        print(result_df)
