import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import time
from dataset import get_dataloader
from model import ReviewAnalyzeModel
from config import LOGS_DIR, MODELS_DIR, LEARNING_RATE, EPOCHS


def train_one_epoch(dataloader, model, loss_func, optimizer, device):
    """ 训练一个epoch """
    model.train()
    total_loss = 0

    for batch in tqdm(dataloader, desc="train"):
        input_ids = batch['input_ids'].to(device)  # [batch_size, seq_len]
        attention_mask = batch['attention_mask'].to(device)  # [batch_size, seq_len]
        label = batch['label'].float().to(device)  # [batch_size]

        optimizer.zero_grad()
        output = model(input_ids=input_ids, attention_mask=attention_mask)  # [batch_size]
        loss = loss_func(output, label)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    return total_loss / len(dataloader)


def train():
    """ 训练主函数 """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备

    dataloader = get_dataloader()  # 数据加载器

    model = ReviewAnalyzeModel(freeze_bert=True).to(device)  # 模型

    loss_func = nn.BCEWithLogitsLoss()  # BCE损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)  # Adam优化器

    writer = SummaryWriter(log_dir=LOGS_DIR / time.strftime("%Y-%m-%d-%H-%M-%S"))  # TensorBoard日志

    best_loss = float('inf')  # +∞

    for epoch in range(1, EPOCHS + 1):
        # 训练一个epoch
        avg_loss = train_one_epoch(dataloader, model, loss_func, optimizer, device)
        print(f'epoch: {epoch}, avg_loss: {avg_loss:.4f}')

        # 记录到TensorBoard
        writer.add_scalar('Loss/train', avg_loss, epoch)

        # 保存最优模型
        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(), MODELS_DIR / 'model.pt')


if __name__ == '__main__':
    train()
