import argparse
import torch
import pandas as pd
from peft import PeftConfig, PeftModel
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM

import .restore_metirc as rm

"""
调用命令： python eval_lora_metric.py \
    --adapter_dir ./output/checkpoint-2000 \
    --test_file ./data/eval_data.jsonl \
    --max_length 12288 \
    --max_new_tokens 4096
"""
    



def main():
    parser = argparse.ArgumentParser(
        description="Load a fine-tuned LoRA adapter and run inference on a JSONL test set, printing only reference and prediction."
    )
    parser.add_argument(
        "--adapter_dir", type=str, required=True,
        help="Path to the LoRA adapter directory (must contain adapter_config.json)"
    )
    parser.add_argument(
        "--test_file", type=str, required=True,
        help='Path to the JSONL test file (each line a JSON object with a "messages" array)'
    )
    parser.add_argument(
        "--max_length", type=int, default=8040,
        help="Max length for input tokens (should match training max_input_length)"
    )
    parser.add_argument(
        "--max_new_tokens", type=int, default=50,
        help="Max number of tokens to generate (should match training max_output_length)"
    )
    args = parser.parse_args()

    # 1. 加载 LoRA 配置，从本地读取
    peft_config = PeftConfig.from_pretrained(
        args.adapter_dir,
        local_files_only=True
    )
    base_model_name_or_path = peft_config.base_model_name_or_path

    # 2. 加载分词器和基模型
    tokenizer = AutoTokenizer.from_pretrained(
        base_model_name_or_path,
        padding_side="left",
        trust_remote_code=True
    )
    model = AutoModelForCausalLM.from_pretrained(
        base_model_name_or_path,
        torch_dtype=torch.bfloat16
    )
    model = PeftModel.from_pretrained(
        model,
        args.adapter_dir,
        local_files_only=True
    )
    model.eval()
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)

    # 3. 加载测试集
    dataset = load_dataset(
        "json",
        data_files={"test": args.test_file},
        split="test"
    )
    print(f"Loaded {len(dataset)} examples from {args.test_file}")

    # 4. 遍历样本，推理并打印 Reference 和 Prediction
    # 加载数据文件
    # stats={"success": [], "cycles_loss": [], "invalid_loss": [],"unsupply_loss": [],"switch_loss": []}
    df_branch = pd.read_excel("dataset\\运行方式1-branch_reordered.xlsx")
    df_loads = pd.read_excel("dataset\\all_load_node_mapped.xlsx")
    df_src = pd.read_excel("dataset\\capacity.xlsx")
    source_buses = df_src["Node-map"].tolist()

    # 初始化统计信息
    total_examples = 0
    format_errors = 0
    stat_records = []

    for idx, example in enumerate(dataset):
        reference = example['messages'][-1]['content'].strip()
        chat_ids = tokenizer.apply_chat_template(
            example['messages'][:-1],
            tokenize=True,
            add_generation_prompt=True,
        )
        input_ids = torch.tensor([chat_ids], device=device)
        attention_mask = torch.ones_like(input_ids)

        # 6. 生成
        with torch.no_grad():
            outputs = model.generate(
                input_ids=input_ids,
                attention_mask=attention_mask,
                max_new_tokens=args.max_new_tokens,
                # pad_token_id=tokenizer.pad_token_id,
                # eos_token_id=(
                #     tokenizer.eos_token_id
                #     if isinstance(tokenizer.eos_token_id, int)
                #     else tokenizer.eos_token_id[0]
                # ),
            )

        prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
        actions, action_count, has_action = rm.parse_action_switch(prediction)
        if not has_action:
            format_errors += 1
            stat_records.append({
                "index": idx,
                "actions": [],
                "cycles_loss": -1,
                "invalid_loss": -1,
                "unsupply_loss": -1,
                "format_error": True,
                "prediction": prediction
            })
            continue

        # 计算各项指标
        invalid_loss = rm.compute_dis_invalid_loss(df_branch, actions)
        cycles_loss, unsupply_loss = rm.compute_dis_cycles_and_unsupply(
            df_branch, actions, df_loads, source_buses
        )
        
        stat_records.append({
            "index": idx,
            "actions": actions,
            "cycles_loss": cycles_loss,
            "invalid_loss": invalid_loss,
            "unsupply_loss": unsupply_loss,
            "format_error": False,
            "reference": reference,
            "prediction": prediction
        })
    df_stats = pd.DataFrame(stat_records)
    output_file = "evaluation_result.csv"
    df_stats.to_csv(output_file, index=False)
    print(f"\nDetailed results saved to {output_file}")



if __name__ == "__main__":
    main()