import os
import torch
from argparse import ArgumentParser

from pytorch_lightning import seed_everything, Trainer

from pl_bolts.models.self_supervised.ssl_finetuner import SSLFineTuner
from train import SwAV
from module.loader import SPK_datamodule
from pl_bolts.models.self_supervised.swav.transforms import SwAVFinetuneTransform
from pl_bolts.transforms.dataset_normalizations import imagenet_normalization, stl10_normalization

from module.transform import Train_Transform, Mel_Spectrogram

def cli_main():  # pragma: no cover
    from pl_bolts.datamodules import ImagenetDataModule, STL10DataModule

    seed_everything(1234)

    parser = ArgumentParser()
    parser.add_argument('--ckpt_path', type=str, help='path to ckpt')

    #parser.add_argument("--batch_size", default=64, type=int, help="batch size per gpu")
    #parser.add_argument("--num_workers", default=8, type=int, help="num of workers per GPU")
    #parser.add_argument("--gpus", default=1, type=int, help="number of GPUs")
    #parser.add_argument('--num_epochs', default=100, type=int, help="number of epochs")

    # fine-tuner params
    #parser.add_argument('--in_features', type=int, default=2048)
    #parser.add_argument('--dropout', type=float, default=0.)
    #parser.add_argument('--learning_rate', type=float, default=0.3)
    #parser.add_argument('--weight_decay', type=float, default=1e-6)
    #parser.add_argument('--nesterov', type=bool, default=False)
    #parser.add_argument('--scheduler_type', type=str, default='cosine')
    #parser.add_argument('--gamma', type=float, default=0.1)
    #parser.add_argument('--final_lr', type=float, default=0.)

    parser = SwAV.add_model_specific_args(parser)

    args = parser.parse_args()
    args.num_samples = 1000

    dm = SPK_datamodule(train_csv_path="data/train.csv", trial_path="data/vox1_clean.txt",
            batch_size=args.batch_size, num_workers=args.num_workers)

    backbone = SwAV(
        gpus=args.gpus,
        nodes=1,
        num_samples=args.num_samples,
        batch_size=args.batch_size,
        maxpool1=args.maxpool1,
        first_conv=args.first_conv,
        dataset=args.dataset,
    ).load_from_checkpoint(args.ckpt_path, strict=False)

    backbone = backbone.eval().cuda()
    trans = Mel_Spectrogram()

    embeddings = []
    labels = []
    with torch.no_grad():
        for data, label in dm.train_dataloader():
            data = data.cuda()
            x = trans(data)
            embedding = backbone(x)
            input(embedding.shape)
            embedding = embedding.detach().cpu().numpy().tolist()
            input(embedding)

#    tuner = SSLFineTuner(
#        backbone,
#        in_features=args.in_features,
#        num_classes=28,
#        epochs=args.num_epochs,
#        hidden_dim=None,
#        dropout=args.dropout,
#        learning_rate=args.learning_rate,
#        weight_decay=args.weight_decay,
#        nesterov=args.nesterov,
#        scheduler_type=args.scheduler_type,
#        gamma=args.gamma,
#        final_lr=args.final_lr
#    )
#
#    trainer = Trainer(
#        gpus=args.gpus,
#        num_nodes=1,
#        precision=16,
#        max_epochs=args.num_epochs,
#        distributed_backend='ddp',
#        sync_batchnorm=True if args.gpus > 1 else False,
#    )
#
#    trainer.fit(tuner, dm)
#    trainer.test(datamodule=dm)


if __name__ == '__main__':
    cli_main()
