import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm

from bt_config import BT_Config
from model.bt_model import BT_model
from data_handle.data_loader import create_data_loader
from utils.logger import setup_logger
from utils.checkpoint import save_checkpoint, load_checkpoint
import logging


def train_epoch(model: BT_model, data_loader, optimizer, criterion, device):
    model.train()
    for step, batch in enumerate(tqdm(data_loader)):
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)

        logits = model.forward(input_ids, attention_mask)
        loss = criterion(logits, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % 5 == 0:
            logging.info(f"Step [{step}/{len(data_loader)}], Loss: {loss.item():.4f}")


def train(model, config: BT_Config, train_loader, optimizer, criterion, start_epoch):
    for epoch in range(start_epoch, config.epochs):
        logging.info(f"Epoch [{epoch}/{config.epochs}] 开始训练...")
        train_epoch(model, train_loader, optimizer, criterion, config.device)
        save_checkpoint(model, optimizer, epoch, config.save_model_dir)
        logging.info(f"Epoch [{epoch}] 完成，模型已保存。")


def main():
    # 加载配置
    params = BT_Config()

    # 初始化日志
    _ = setup_logger(params.log_dir, "train.log")

    # 加载自定义模型
    model = BT_model(params)
    model.to(params.device)

    # 创建数据加载器
    train_loader = create_data_loader(params.train_json_path, params.batch_size)

    # 定义优化器和损失函数
    optimizer = optim.Adam(model.fc.parameters(), lr=params.lr)
    criterion = nn.CrossEntropyLoss()

    # 断点续训逻辑
    model, optimizer, start_epoch = load_checkpoint(model, optimizer, params.save_model_dir, params.device)

    logging.info("训练开始...")
    train(model, params, train_loader, optimizer, criterion, start_epoch)


if __name__ == "__main__":
    main()
