import torch
from torch import nn
from torch import optim
import os
from sklearn.model_selection import train_test_split

from dataset import build_dataloader
from utils import get_score, get_logger, get_args, prepare
from model import build_lstm



def train(epoch, model, criterion, optimizer, dataloader, logger):

    model.train()

    total_pred = []
    total_target = []
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    for i, (batch_data, batch_targets) in enumerate(dataloader):
        batch_data, batch_targets = batch_data.to(device), batch_targets.to(device)

        # 前向传播
        outputs = model(batch_data)

        loss = criterion(outputs, batch_targets)

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 计算准确率
        _, preds = torch.max(outputs, dim=1)
        total_pred.extend(preds.cpu().tolist())
        total_target.extend(batch_targets.cpu().tolist())


        if i % 100 == 0:
            logger.info(f'Epoch[{epoch}][{i}/{len(dataloader)}], Loss: {loss.item():.4f}')

    f1, p, r = get_score(total_target, total_pred)
    logger.info(f'Epoch[{epoch}](train)\tf1: {f1:.2f}\tprecision: {p:.2f},\trecall: {r:.2f}')

best_f1 = 0
@torch.no_grad()
def evaluate(epoch, model, dataloader, logger):
    global best_f1
    model.eval()
    total_pred, total_target = [], []
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    for batch_data, batch_targets in dataloader:
        batch_data = batch_data.to(device)

        outputs = model(batch_data)

        _, preds = torch.max(outputs, dim=1)
        total_pred.extend(preds.cpu().tolist())
        total_target.extend(batch_targets.tolist())
    f1, p, r = get_score(total_target, total_pred)
    logger.info(f'Epoch[{epoch}](val)\tf1: {f1:.2f}\tprecision: {p:.2f},\trecall: {r:.2f}') 
    if f1 > best_f1:
        best_f1 = f1
        torch.save(model.state_dict(), best_ckpt_path)
        logger.info(f'Save best model(F1: {f1:.2f})')


@torch.no_grad()
def inference(model, loader, logger, checkpoint):
    model.load_state_dict(torch.load(checkpoint, weights_only=True))
    model.eval()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    total_pred = []
    logits = []
    for i, batch_data in enumerate(loader):
        batch_data = batch_data.to(device)

        outputs = model(batch_data)
        logits.extend(outputs.cpu().tolist())
        _, preds = torch.max(outputs, dim=1)
        total_pred.extend(preds.cpu().tolist())
        if i % 100 == 0:
            logger.info(f'Test [{i}/{len(loader)}]')

    with open('./ckpt/lstm-cls-result.csv', 'w') as f:
        f.write('label\n')
        for pred in total_pred:
            f.write(str(pred) + '\n')  

    torch.save(logits, './ckpt/lstm-cls-logits.pt')


def train_val(model, train_loader, val_loader, logger):
    num_epochs = 8
    criterion = nn.CrossEntropyLoss()  # 对于多分类任务
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=5e-6)
    for epoch in range(num_epochs):
        train(epoch, model, criterion, optimizer, train_loader, logger)
        evaluate(epoch, model, val_loader, logger)
        scheduler.step()

    torch.save(model.state_dict(), last_ckpt_path)



def main():
    
    logger = get_logger()
    if args.test:
        test_loader, model_params = prepare(train=False)
    else:
        train_loader, val_loader, model_params = prepare(train=True)
    
    num_classes = model_params['num_classes']
    padding_idx = model_params['padding_idx']
    vocab_size = model_params['vocab_size']
    # max_size = model_params['max_size']

    model = build_lstm(vocab_size, padding_idx, num_classes)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)

    if args.test:
        
        inference(model, test_loader, logger, best_ckpt_path)
    else:
        train_val(model, train_loader, val_loader, logger)


if __name__ == '__main__':
    best_ckpt_path = './ckpt/lstm-best-model.pt'
    last_ckpt_path = './ckpt/lstm-last-model.pt'
    args = get_args()
    main()