import sys, os
from torch.utils.data import DataLoader
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
import torch.multiprocessing as mp
import torch.distributed as dist

from tf_gridnet import TF_GridNet

sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from data_loader import getXY
from data_loader import AudioData
from si_snr_se_loss import loss
import config

batch_size = 2
path = "tf_gridnet.pth"
block_num = 5


def train(dataloader, model, loss_fn, optimizer, rank):
    size = len(dataloader)
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(rank), y.to(rank)
        pred = model(X)
        loss = loss_fn(pred, y, name="si_snr_se", device = rank)

        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
        optimizer.step()
        optimizer.zero_grad()

        if batch * len(X) % 1000 == 0:
            loss, current = loss.item(), (batch + 1) * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{(size*len(X)):>5d}]")
    pass


def test(dataloader, model, loss_fn, rank):
    num_batches = len(dataloader)
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(rank), y.to(rank)
            pred = model(X)
            test_loss += loss_fn(pred, y, name="si_snr", device = rank).item()
    test_loss /= num_batches
    print(f"Avg loss: {test_loss:>8f} \n")
    return test_loss

## ddp process
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = '127.0.0.1'
    os.environ['MASTER_PORT'] = '12355'

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=world_size)

def cleanup():
    dist.destroy_process_group()


def main(rank, world_size):
    print(f"rank is {rank}")
    setup(rank,world_size)
    # Set the CUDA device based on local_rank
    torch.cuda.set_device(rank)
    torch.cuda.empty_cache()
    print(f"after rank: {rank}")

    x_train, y_train, x_cv, y_cv = getXY(config.tr_directory, config.cv_directory)
    train_dataset = AudioData(x_train, y_train,rank)
    test_dataset = AudioData(x_cv, y_cv,rank)
    train_dataloader = DataLoader(
        train_dataset, batch_size=batch_size, shuffle = False, sampler = DistributedSampler(dataset=train_dataset)
    )
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, sampler = DistributedSampler(dataset=test_dataset))
    ## load model
    print ("initializing model")
    model_base = TF_GridNet(B=block_num, device= rank).to(rank)
    model = DDP(model_base, device_ids=[rank])
    """ print(f"model parameters {pytorch_total_params}") """
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_array = []
    epoch_start = 0
    epoch = 100
    loss_fn = loss
    if os.path.exists(path):
        checkpoint = torch.load(path, map_location = "cpu")
        model.module.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        loss_array = checkpoint["loss"]
        epoch_start = checkpoint["epoch"]
        print(f"load model from epoch {epoch_start}")
    for e in range(epoch_start, epoch):
        print(f"Epoch {e+1}\n-------------------------------")
        train_dataloader.sampler.set_epoch(e)
        train(train_dataloader, model, loss_fn, optimizer,rank)
        loss_array.append(test(test_dataloader, model, loss_fn, rank))
        if rank == 0 : 
            print ("saving model")
            torch.save(
                {
                    "epoch": e,
                    "model_state_dict": model.module.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "loss": loss_array,
                },
                path,
            )
        dist.barrier()
        if e > 1:
            if (
                loss_array[e] > loss_array[e - 1]
                and loss_array[e - 1] > loss_array[e - 2]
            ):
                for g in optimizer.param_groups:
                    g["lr"] = 0.5 * g["lr"]
                    print(f"half the learing rate at epoch {e+1}")
        pass
    pass

if __name__ == "__main__":
    print("running with ddp")
    """
    Set up the available gpu card
    """

    device_array = [5,6,7]
    os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in device_array])
    world_size = len(device_array)
    mp.spawn(main,
             args=(world_size,),
             nprocs=world_size,
             join=True)
    pass
