import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import time
from preprocess.dataset import get_dataloader, DataType
from model.bert_classifier import BertClassifier
from configuration import config


class EarlyStopping:
    """ 早停类 """

    def __init__(self, patience=2, path=None):
        self.patience = patience
        self.counter = 0
        self.best_score = float('inf')
        self.early_stop = False
        self.path = path

    def __call__(self, valid_loss, model):
        if valid_loss < self.best_score:
            self.best_score = valid_loss
            self.counter = 0
            self.save_model(model)
        else:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True

    def save_model(self, model):
        torch.save(model.state_dict(), self.path)


def run_one_epoch(model, dataloader, loss_fn, device, scaler, optimizer=None, is_train=True):
    model.train() if is_train else model.eval()
    total_loss = 0

    with torch.set_grad_enabled(is_train):
        for batch in tqdm(dataloader, desc='train' if is_train else 'valid'):
            input_ids = batch['input_ids'].to(device)  # [batch_size, seq_len]
            attention_mask = batch['attention_mask'].to(device)  # [batch_size, seq_len]
            labels = batch['label'].to(device)  # [batch_size]

            # 自动混合精度的上下文管理器
            with torch.autocast(device_type=device.type, dtype=torch.float16):
                outputs = model(input_ids=input_ids, attention_mask=attention_mask)  # [batch_size, num_classes]
                loss = loss_fn(outputs, labels)

            # 动态损失缩放
            if is_train:
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad()

            total_loss += loss.item()

    return total_loss / len(dataloader)


def train():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_loader = get_dataloader(type=DataType.TRAIN)
    valid_loader = get_dataloader(type=DataType.VALID)

    model = BertClassifier(freeze_bert=True).to(device)

    loss_fn = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)

    writer = SummaryWriter(config.LOGS_DIR / time.strftime("%Y%m%d-%H%M%S"))

    # 早停机制
    early_stopping = EarlyStopping(patience=2, path=config.MODELS_DIR / 'model.pt')

    # 梯度缩放器
    scaler = torch.amp.GradScaler()

    # 检查点机制
    start_epoch = 1
    checkpoint_path = config.MODELS_DIR / 'checkpoint.pt'

    if checkpoint_path.exists():
        checkpoint = torch.load(checkpoint_path)

        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scaler.load_state_dict(checkpoint['scaler'])

        early_stopping.best_score = checkpoint.get('best_score', float('inf'))
        early_stopping.counter = checkpoint.get('counter', 0)

        start_epoch = checkpoint['epoch'] + 1

    for epoch in range(start_epoch, config.EPOCHS + 1):
        train_loss = run_one_epoch(model, train_loader, loss_fn, device, scaler, optimizer, is_train=True)
        valid_loss = run_one_epoch(model, valid_loader, loss_fn, device, scaler, is_train=False)

        print("epoch:{}, train_loss:{:4f}".format(epoch, train_loss))
        print("epoch:{}, valid_loss:{:4f}".format(epoch, valid_loss))

        writer.add_scalar('Loss/train', train_loss, epoch)
        writer.add_scalar('Loss/valid', valid_loss, epoch)

        # 检查早停条件
        early_stopping(valid_loss, model)
        if early_stopping.early_stop:
            break

        # checkpoint字典
        checkpoint = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scaler': scaler.state_dict(),
            'best_score': early_stopping.best_score,
            'counter': early_stopping.counter
        }
        torch.save(checkpoint, checkpoint_path)

    writer.close()
