import sys, os
from torch.utils.data import DataLoader
import torch

from tf_gridnet import TF_GridNet

sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from data_loader import getXY
from data_loader import AudioData
from si_snr_se_loss import loss
import config


device = "cuda"
batch_size = 2
path = "tf_gridnet.pth"


def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader)
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)
        pred = model(X)
        loss = loss_fn(pred, y, name="si_snr_se") 
 
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
        optimizer.step()
        optimizer.zero_grad()

        if batch * len(X) % 1000 == 0:
            loss, current = loss.item(), (batch + 1) * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{(size*len(X)):>5d}]")
    pass


def test(dataloader, model, loss_fn):
    num_batches = len(dataloader)
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y, name="si_snr").item()
    test_loss /= num_batches
    print(f"Avg loss: {test_loss:>8f} \n")
    return test_loss


def main():
    x_train, y_train, x_cv, y_cv = getXY(config.tr_directory, config.cv_directory)
    train_dataloader = DataLoader(
        AudioData(x_train, y_train, device), batch_size=batch_size
    )
    test_dataloader = DataLoader(AudioData(x_cv, y_cv, device), batch_size=batch_size)
    ## load model
    model = TF_GridNet(B=6).to(device)
    pytorch_total_params = sum(p.numel() for p in model.parameters())
    """ print(f"model parameters {pytorch_total_params}") """
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_array = []
    epoch_start = 0
    epoch = 100
    loss_fn = loss
    if os.path.exists(path):
        checkpoint = torch.load(path)
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        loss_array = checkpoint["loss"]
        epoch_start = checkpoint["epoch"]
        print(f"load model from epoch {epoch_start}")
    for e in range(epoch_start, epoch):
        print(f"Epoch {e+1}\n-------------------------------")
        train(train_dataloader, model, loss_fn, optimizer)
        loss_array.append(test(test_dataloader, model, loss_fn))
        torch.save(
            {
                "epoch": e,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "loss": loss_array,
            },
            path,
        )
        if e > 1:
            if (
                loss_array[e] > loss_array[e - 1]
                and loss_array[e - 1] > loss_array[e - 2]
            ):
                for g in optimizer.param_groups:
                    g["lr"] = 0.5 * g["lr"]
                    print(f"half the learing rate at epoch {e+1}")
        pass
    pass


if __name__ == "__main__":
    device_array = [5,6,7]
    os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in device_array])
    main()
    pass
