import sys
import os
import torch
import torchaudio

sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))

from config import tr_directory as tr_dir
from config import cv_directory as cv_dir
from config import source_directory as sr_dir
from config import device
from spex_data import SpexData
from spex import Spex
from torch.utils.data import DataLoader
from loss_spex import loss_fn
from loss import loss


def load_data(tr_dir, cv_dir, sr_dir, spk=2):
    assert spk == 2
    ## si_tr_s directory
    si_dir_8k = os.path.join(sr_dir, "si_tr_s8k")
    files = os.listdir(si_dir_8k)
    tr_data = SpexData(tr_dir, si_dir_8k)
    for mix, encoder, output, embedding in tr_data:
        print(mix.shape)
        print(encoder.shape)
        print(output.shape)
        print(embedding)
        break
    cv_data = SpexData(cv_dir, si_dir_8k)
    for mix, encoder, output, embedding in cv_data:
        print(mix.shape)
        print(encoder.shape)
        print(output.shape)
        print(embedding)
        break
    return tr_data, cv_data
    pass


def train(train_dataloader, optimizer, model, alpha, beta, l):
    size = len(train_dataloader.dataset)
    model.train()
    for batch, (X, en, y, emb_true) in enumerate(train_dataloader):
        X, y, en, emb_true = (
            X.to(device),
            y.to(device),
            en.to(device),
            emb_true.to(device),
        )
        s1, s2, s3, emb = model(X, en)
        loss = loss_fn(alpha, beta, l, s1, s2, s3, emb, y, emb_true)
        loss.backward()

        optimizer.step()
        optimizer.zero_grad()

        if batch % 100 == 0:
            loss, current = loss.item(), (batch + 1) * len(X)
            print(f"train loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")

    pass


def test(test_dataloader, model):
    num_batches = len(test_dataloader)
    model.eval()
    with torch.no_grad():
        test_loss = 0
        for batch, (X, en, y, emb_true) in enumerate(test_dataloader):
            X, en, y = X.to(device), en.to(device), y.to(device)
            pred = model(X, en)[0]
            test_loss += loss(pred, y).item()
            pass
        test_loss /= num_batches
    print(f"test loss: {test_loss}")
    return test_loss
    pass


def main():
    ## load data
    tr_data, cv_data = load_data(tr_dir, cv_dir, sr_dir)
    batch_size = 15
    alpha, beta, l = 0.1, 0.1, 0.5
    train_dataloader = DataLoader(tr_data, batch_size=batch_size)
    test_dataloader = DataLoader(cv_data, batch_size=batch_size)
    ## TODO: load model
    model = Spex().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    epochs = 100
    path = "spex.pth"
    loss = []
    epoch_start = 0
    ## load the model before training
    if os.path.exists(path):
        checkpoint = torch.load(path)
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        epoch_start = checkpoint["epoch"]
        loss = checkpoint["loss"]
        print(f"load model from {path}, start training from epoch {epoch_start} ")

    for t in range(epoch_start, epochs):
        print(f"Epoch {t+1}\n-------------------------------")
        train(
            train_dataloader=train_dataloader,
            optimizer=optimizer,
            model=model,
            alpha=alpha,
            beta=beta,
            l=l,
        )
        loss.append(test(test_dataloader, model))
        ## model save
        torch.save(
            {
                "epoch": t,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "loss": loss,
            },
            path,
        )

        ## half the accuracy
        if t > 1:
            if loss[t - 2] < loss[t - 1] and loss[t - 1] < loss[t]:
                for g in optimizer.param_groups:
                    g["lr"] = 0.5 * g["lr"]
                pass

    pass


if __name__ == "__main__":
    main()
    pass
