import os
import numpy as np
import torch
from transformers import BertTokenizer, TrainingArguments, Trainer, DataCollatorWithPadding
from datasets import load_dataset
from model.base_model import get_lora_model
import evaluate

# # 加载评估指标
# # metric = evaluate.combine(["accuracy", "f1"])
# # 替换为带token参数的单独加载方式
# accuracy = evaluate.load("accuracy", token=None)
# f1 = evaluate.load("f1", token=None)

# # 计算评估指标
# def compute_metrics(eval_pred):
#     logits, labels = eval_pred
#     predictions = np.argmax(logits, axis=-1)
#     return {
#         "accuracy": accuracy.compute(predictions=predictions, references=labels)['accuracy'],
#         "f1": f1.compute(predictions=predictions, references=labels)['f1']
#     }

def main():
    # 加载配置
    model_name = "bert-base-chinese"
    # 修改这行加载npy文件的代码
    num_labels = len(np.load("data/label_classes.npy", allow_pickle=True))
    output_dir = "model/saved_models"
    batch_size = 32
    learning_rate = 3e-4
    num_epochs = 10
    
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 加载分词器
    tokenizer = BertTokenizer.from_pretrained(model_name)
    
    # 加载数据集
    dataset = load_dataset('json', data_files={
        'train': ['data/processed/train.json', 'data/augmented/augmented_train.json'],
        'val': 'data/processed/val.json',
        'test': 'data/processed/test.json'
    },
    cache_dir='data/cache',  # 添加缓存目录参数
    ignore_verifications=True)  # 添加验证忽略参数
    
    # 分词处理
    def tokenize_function(examples):
        return tokenizer(
            examples['text'],
            truncation=True,
            max_length=128
        )
    
    tokenized_dataset = dataset.map(tokenize_function, batched=True)
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
    
    # 加载模型
    model = get_lora_model(model_name, num_labels)
    model.print_trainable_parameters()
    
    # 训练参数配置
    training_args = TrainingArguments(
        output_dir=output_dir,
        learning_rate=learning_rate,
        per_device_train_batch_size=batch_size,
        per_device_eval_batch_size=batch_size,
        num_train_epochs=num_epochs,
        logging_dir='./logs',
        logging_steps=100,
        evaluation_strategy='epoch',
        save_strategy='epoch',
        load_best_model_at_end=True,
        metric_for_best_model='f1',
        fp16=True,  # 混合精度训练
        gradient_accumulation_steps=2,
        weight_decay=0.01,
        warmup_ratio=0.1,
    )
    
    # 初始化Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_dataset['train'],
        eval_dataset=tokenized_dataset['val'],
        tokenizer=tokenizer,
        data_collator=data_collator,
        compute_metrics=compute_metrics,
    )
    
    # 开始训练
    trainer.train()
    
    # 评估测试集
    test_results = trainer.evaluate(tokenized_dataset['test'])
    print(f"Test results: {test_results}")
    
    # 保存评估结果
    with open(os.path.join(output_dir, "test_results.txt"), "w") as f:
        for key, value in test_results.items():
            f.write(f"{key}: {value}\n")

if __name__ == "__main__":
    main()