import os
os.environ["CUDA_VISIBLE_DEVICES"] = "7"  # 设置使用的GPU设备

import json
import torch
from datasets import Dataset
from safetensors.torch import load_file
from transformers import AutoTokenizer
from seqeval.metrics import classification_report
from component.model import BertLSTMCRF
from tqdm import tqdm
# ===== 配置 =====
model_dir = "adapter/full/bert-base-chinese/bert-lstm-crf-event"
eval_file = "data/init_data/event/test_100.json"
label_list = ["B-subject", "B-trigger", "I-subject", "I-trigger", "O"]
label2id = {l: i for i, l in enumerate(label_list)}
id2label = {i: l for l, i in label2id.items()}
max_length = 512
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# === 加载模型和 tokenizer ===
safetensor_path = f"{model_dir}/model.safetensors"
num_labels = len(label_list)
model_name = "model/bert-base-chinese"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
model = BertLSTMCRF(model_name=model_name, num_labels=num_labels)
model.load_state_dict(load_file(safetensor_path))
model.to(device).eval()
print("✅ 成功加载 model.safetensors 中的模型权重")

# ===== 数据转换函数 =====
def convert_to_bio(text, entities):
    labels = ["O"] * len(text)
    for ent in entities:
        ent_type = ent["type"]
        start = ent["start"]
        end = ent["end"]
        labels[start] = f"B-{ent_type}"
        for i in range(start + 1, end):
            labels[i] = f"I-{ent_type}"
    return list(text), labels

# ===== 加载数据 =====
# 原始test集
with open(eval_file, encoding="utf-8") as f:
    raw_data = json.load(f)

processed_data = []
for item in raw_data:
    tokens, labels = convert_to_bio(item["text"], item["entities"])
    processed_data.append({
        "tokens": tokens,
        "labels": labels,
        "raw": item["text"]
    })

dataset = Dataset.from_list(processed_data)  # 原始测试集，这里换成raw_data

# ===== 推理与保存 =====
all_preds = []
all_labels = []
results = []

for item in tqdm(dataset):
    tokens = item["tokens"]
    labels = item["labels"]
    text = item["raw"]

    # 重新调用 tokenizer 获取 word_ids
    encoding = tokenizer(
        tokens,
        is_split_into_words=True,
        return_tensors="pt",
        return_offsets_mapping=True,
        padding="max_length",
        truncation=True,
        max_length=max_length,
    ).to(device)

    word_ids = encoding.word_ids(batch_index=0)
    with torch.no_grad():
        outputs = model(**encoding)
        preds = outputs["logits"][0]

    # 对齐标签
    true_labels = []
    pred_labels = []
    for i, word_id in enumerate(word_ids):
        if word_id is None:
            continue
        true_labels.append(labels[word_id])
        pred_labels.append(id2label[preds[i]])

    all_labels.append(true_labels)
    all_preds.append(pred_labels)

    # ========= 提取实体 span =========
    entities = []
    current_entity = None
    for idx, label in enumerate(pred_labels):
        tok = tokens[idx] if idx < len(tokens) else ""
        if label.startswith("B-"):
            if current_entity:
                entities.append(current_entity)
            current_entity = {
                "label": label[2:],
                "text": tok,
                "start": idx,
                "end": idx + 1,
            }
        elif label.startswith("I-") and current_entity and current_entity["label"] == label[2:]:
            current_entity["text"] += tok
            current_entity["end"] = idx + 1
        else:
            if current_entity:
                entities.append(current_entity)
                current_entity = None
    if current_entity:
        entities.append(current_entity)

    results.append({
        "text": text,
        "tokens": tokens,
        "true_labels": true_labels,
        "pred_labels": pred_labels,
        "entities": entities
    })

# ===== 打印评估报告 =====
print("=== SeqEval Evaluation ===")
print(classification_report(all_labels, all_preds, digits=4))

# ===== 保存结果 =====
with open("predictions.txt", "w", encoding="utf-8") as f_txt:
    for tokens, trues, preds in zip(dataset["tokens"], all_labels, all_preds):
        for t, l, p in zip(tokens, trues, preds):
            f_txt.write(f"{t}\t{l}\t{p}\n")
        f_txt.write("\n")
    print("✅ Saved token-level results to predictions.txt")

with open("predictions.json", "w", encoding="utf-8") as f_json:
    json.dump(results, f_json, ensure_ascii=False)
    print("✅ Saved structured results to predictions.json")
