from torch import Tensor, save
from torch.nn.utils import clip_grad_norm_
from torch.optim import AdamW
from tqdm import tqdm
from os import makedirs
from time import time
from rich import print
from core import (
    get_dataloader,
    MovieBiLSTM_CRF
)
from utils import conf

def train() -> None:
    # Get train and valid dataloader
    train_dl, _ = get_dataloader()
    # Initialize model
    model = MovieBiLSTM_CRF().to(conf.device)
    # Get loss function and optimizer
    optimizer = AdamW(
        model.parameters(),
        lr=conf.model.train.lr
    )
    print('[bold gold1]Training model...[/]')
    # Get start time
    start = time()
    # Train model
    epochs = conf.model.train.epochs
    for epoch in range(epochs):
        # Get progress bar
        l = len(str(epochs))
        desc = f'\033[1;32mEpoch\033[0m {epoch + 1:0{l}d}/{epochs}'
        pbar = tqdm(train_dl, desc=desc)
        # Train model
        for _, (input_ids, labels, mask) in enumerate(pbar):
            # Set model to train mode
            model.train()
            # Get input ids, labels and mask and move to GPU
            input_ids: Tensor = input_ids.to(conf.device)
            labels: Tensor = labels.to(conf.device)
            mask: Tensor = mask.to(conf.device)
            # Get loss
            loss = model.log_likelihood(input_ids, labels, mask)
            # Backward and step
            loss.backward()
            # Clip gradient
            clip_grad_norm_(model.parameters(), 1.0)
            # Step
            optimizer.step()
            # Zero grad
            optimizer.zero_grad()
            # Set progress bar
            pbar.set_postfix(loss=f'\033[1;35m{loss.item():.6f}\033[0m')
    # Save model
    makedirs(conf.model.path_torch.parent, exist_ok=True)
    save(model.state_dict(), conf.model.path_torch)
    print(f'[bold green]{conf.model.path_torch}[/] saved!')
    # Get end time
    end = time()
    print(f'[bold green]Time taken: [bold white]{end - start:.2f}s[/]')
