import torch
import os
from torch.amp import GradScaler

from utils import get_logger, prepare, get_args, get_score
from model import build_bert


def align_attention_mask(inputs_id, padding_idx):
    attention_mask = (inputs_id != padding_idx).long()
    return attention_mask


def train(epoch, model, dataloader, optimizer, scaler, logger):
    model.train()
    total_pred, total_target = [], []
    padding_idx = dataloader.dataset.padding_idx
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    for i, (input_ids, labels) in enumerate(dataloader):
        input_ids = input_ids.to(device)
        attention_mask = align_attention_mask(input_ids, padding_idx).to(device)
        labels = labels.to(device)
        
        optimizer.zero_grad()
        with torch.amp.autocast(device_type="cuda"):
            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs.loss
        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()
        
        _, preds = torch.max(outputs.logits, dim=1)
        total_pred.extend(preds.cpu().tolist())
        total_target.extend(labels.cpu().tolist())
        
        if i % 100 == 0:
            f1, p, r = get_score(total_target, total_pred)
            logger.info(f'Epoch[{epoch}][{i}/{len(dataloader)}] Loss: {loss.item():.4f} f1: {f1:.2f} precision: {p:.2f} recall: {r:.2f}')
            
    f1, p, r = get_score(total_target, total_pred)
    logger.info(f'Epoch[{epoch}](train) f1: {f1:.2f}, precision: {p:.2f}, recall: {r:.2f}')

best_score = 0

@torch.no_grad()
def evaluate(epoch, device, model, dataloader, logger):
    global best_score
    model.eval()
    total_pred, total_target = [], []
    padding_idx = dataloader.dataset.padding_idx
    for i, (input_ids, labels) in enumerate(dataloader):
        input_ids = input_ids.to(device)
        attention_mask = align_attention_mask(input_ids, padding_idx).to(device)
        labels = labels.to(device)
        
        outputs = model(input_ids, attention_mask=attention_mask)
        _, preds = torch.max(outputs.logits, dim=1)
        total_pred.extend(preds.cpu().tolist())
        total_target.extend(labels.tolist())
        if i % 100 == 0:
            logger.info(f'Epoch[{epoch}][{i}/{len(dataloader)}]')

    f1, p, r = get_score(total_target, total_pred)
    logger.info(f'Epoch[{epoch}](val) f1: {f1:.2f}, precision: {p:.2f}, recall: {r:.2f}')
    if f1 > best_score:
        best_score = f1
        if not os.path.exists('./ckpt'):
            os.makedirs('./ckpt')
        torch.save(model.state_dict(), best_ckpt_path)
        logger.info(f'Save best model(F1: {f1:.2f})')

def train_val(model, train_loader, val_loader, logger):
    num_epochs = 5
    lr = 1e-5
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=5e-6)
    scaler = GradScaler()
    for i in range(num_epochs):
        train(i, model, train_loader, optimizer, scaler, logger)
        evaluate(i, device, model, val_loader, logger)
        scheduler.step()
    torch.save(model.state_dict(), last_ckpt_path)



@torch.no_grad()
def inference(model, dataloader, logger, ckpt_path):
    model.load_state_dict(torch.load(ckpt_path, weights_only=True))
    model.eval()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    padding_idx = dataloader.dataset.padding_idx
    total_pred = []
    logits = []
    for i, batch in enumerate(dataloader):
        input_ids = batch.to(device)
        attention_mask = align_attention_mask(input_ids, padding_idx).to(device)
        
        outputs = model(input_ids, attention_mask=attention_mask)
        logits.extend(outputs.logits.cpu().tolist())
        _, preds = torch.max(outputs.logits, dim=1)
        total_pred.extend(preds.cpu().tolist())
        
        if i % 100 == 0:
            logger.info(f'Test [{i}/{len(dataloader)}]')
    with open('./ckpt/bert-cls-result.csv', 'w') as f:
        f.write('label\n')
        for pred in total_pred:
            f.write(str(pred) + '\n')   
    torch.save(logits, './ckpt/bert-cls-logits.pt')

def main():
    logger = get_logger()
    if args.test:
        test_loader, model_params = prepare(train=False)
    else:
        train_loader, val_loader, model_params = prepare(train=True)
    

    model = build_bert(**model_params)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)
    if args.test:
        inference(model, test_loader, logger, best_ckpt_path)
    else:
        train_val(model, train_loader, val_loader, logger)


if __name__ == '__main__':
    best_ckpt_path = './ckpt/bert-best-model.pt'
    last_ckpt_path = './ckpt/bert-last-model.pt'
    args = get_args()
    main()