# -*- coding: UTF-8 -*-
'''
@File ：train_cls.py
@IDE ：PyCharm
@Author ：chaojie
@Date ：2025/11/4 
@Introduce:  训练
'''
# -*- coding: UTF-8 -*-

import argparse
import os
import torch
from torch import nn, optim
from log import set_log
import importlib
from my_datasets.cls_dataset import get_dataloader
import sys


DEVICE = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")

log = None


def acc(y_, y):
    return (y_==y).sum() / y.shape[0]

def train(model, train_loader, val_loader, optimizer, scheduler, loss_fn, epochs):
    best_acc = 0

    for epoch in range(epochs):
        train_loss, train_acc = [], []

        model.train()
        for batch, (x, y) in enumerate(train_loader):
            x = x.to(DEVICE)
            y = y.to(DEVICE)
            y_ = model(x)

            loss = loss_fn(y_, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


            train_loss.append(loss.item())

            y_label = torch.argmax(y_, dim=1)

            acc_v = acc(y_label, y)
            train_acc.append(acc_v)
            if batch % 10 == 0:
                print(f"epoch{epoch + 1} batch: {batch + 1} Loss: {loss.item():.3f} acc: {acc_v.item():.3f}")
        scheduler.step()

        mean_loss = sum(train_loss) / len(train_loss)
        mean_acc = sum(train_acc) / len(train_acc)
        log.info(f"train: Epoch: {epoch + 1} Mean Loss: {mean_loss:.3f} Mean acc: {mean_acc:.3f} lr: {optimizer.param_groups[0]['lr']}")

        # eval
        model.eval()
        is_true = []
        for batch, (x, y) in enumerate(val_loader):
            x = x.to(DEVICE)
            y = y.to(DEVICE)
            y_ = model(x)
            y_label = torch.argmax(y_, dim=1)
            is_true.extend(y_label == y)
        mean_acc = sum(is_true) / len(is_true)

        log.info(f"val: Epoch: {epoch + 1}  Mean acc: {mean_acc:.3f}")
        log.info('\n----------------------------\n')


        save_mode(model, epoch, best_acc, optimizer, scheduler, 'last')

        if mean_acc > best_acc:
            save_mode(model, epoch, best_acc, optimizer, scheduler, 'best')

    log.info(f'best acc: {best_acc}')

def save_mode(model, epoch, best_acc, optimizer, scheduler, name):
    ckpt = {
        "epoch": epoch,
        "model_state_dict": model.state_dict(),
        "optimizer_state_dict": optimizer.state_dict(),
        "best_acc": best_acc,
        "scheduler_state_dict": scheduler.state_dict(),
    }

    torch.save(ckpt, os.path.join(output_path, f"model_{name}.pth"))


def main():
    # 1. 参数
    parser = argparse.ArgumentParser()
    parser.add_argument("--vocab_file", type=str, default=r"datas/cls/vocab.json")
    parser.add_argument("--label2id_file", type=str, default=r"datas/cls/label_2_id.csv")
    parser.add_argument("--train_path", type=str, default=r"datas/cls/train.csv")
    parser.add_argument("--val_path", type=str, default=r"datas/cls/val.csv")
    parser.add_argument("--model", type=str, default=r"text_cls_model_rnn")
    # parser.add_argument("--model", type=str, default=r"text_cls_model_lstm")
    parser.add_argument("--batch", type=int, default=128)
    parser.add_argument("--epochs", type=int, default=300)
    parser.add_argument("--lr", type=float, default=0.001)
    parser.add_argument("--out_dir", type=str, default=r"cls")
    args = parser.parse_args()

    # 日志和路径的
    global log, output_path
    log, output_path = set_log(args.out_dir)


    # 打印参数
    log.info(f"****************** 参数: ******************** \n {args} \n ")

    # 数据构建
    train_loader = get_dataloader(args.vocab_file, args.label2id_file, args.train_path, args.batch)
    val_loader = get_dataloader(args.vocab_file, args.label2id_file, args.val_path, args.batch, shuffle=False)

    # 模型构建
    model_class_name = args.model
    module_path = "models"
    module = importlib.import_module(module_path)
    model_class = getattr(module, model_class_name)
    model = model_class(512, train_loader.dataset.vocab_size, 12, hidden_size=512, num_layers=8)
    model.to(DEVICE)
    log.info(f"model:\n{model}")

    # 训练前设置
    loss_fn = nn.CrossEntropyLoss()

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.9)

    train(model, train_loader, val_loader, optimizer, scheduler, loss_fn, args.epochs)


if __name__ == "__main__":
    main()
