import json
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "7"  # 设置使用的GPU设备
from datasets import Dataset
from transformers import AutoTokenizer, TrainingArguments
from component.model import BertLSTMCRF
from component.trainer import CRFTrainer

# 加载数据
def load_dataset(file_path):
    with open(file_path, "r", encoding="utf-8") as f:
        return json.load(f)

train_data = load_dataset("data/init_data/event/train.json")
test_data = load_dataset("data/init_data/event/test.json")

# 构造 Dataset 对象
train_ds = Dataset.from_list(train_data)
test_ds = Dataset.from_list(test_data)
dataset = {"train": train_ds, "test": test_ds}

# 标签集合
label_list = sorted(list({label for d in train_data for label in d["labels"]}))
label2id = {l: i for i, l in enumerate(label_list)}
id2label = {i: l for l, i in label2id.items()}

# 初始化 tokenizer 和模型
model_name = "model/bert-base-chinese"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
model = BertLSTMCRF(model_name, num_labels=len(label_list))

def tokenize_and_align_labels(examples):
    tokenized = tokenizer(
        examples["tokens"],
        is_split_into_words=True,
        truncation=True,
        padding="max_length",
        max_length=128,
    )
    labels = []
    for i, word_labels in enumerate(examples["labels"]):
        word_ids = tokenized.word_ids(batch_index=i)
        label_ids = []
        for word_id in word_ids:
            if word_id is None:
                label_ids.append(label2id["O"])  # ← 用 "O" 替代 -100
            else:
                label_ids.append(label2id[word_labels[word_id]])
        labels.append(label_ids)
    tokenized["labels"] = labels
    return tokenized

# 编码数据
tokenized_dataset = {
    "train": dataset["train"].map(tokenize_and_align_labels, batched=True),
    "test": dataset["test"].map(tokenize_and_align_labels, batched=True),
}

output_dir = 'adapter/full/bert-base-chinese/bert-lstm-crf-event'

# 训练参数
args = TrainingArguments(
    output_dir=output_dir,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=16,
    num_train_epochs=1,
    learning_rate=2e-5,
    save_strategy="epoch",
    logging_dir=f"{output_dir}/logs",
)

# 自定义 CRF Trainer
trainer = CRFTrainer(
    model=model,
    args=args,
    train_dataset=tokenized_dataset["train"],
    eval_dataset=tokenized_dataset["test"],
    tokenizer=tokenizer,
)

trainer.train()
trainer.save_model(output_dir)