import argparse
import numpy as np
from pytorch_lightning.loggers.wandb import WandbLogger
from disentanglement_lib.methods.shared import architectures, losses
from disentanglement_lib.data.ground_truth import named_data
from disentanglement_lib.methods.unsupervised import train, model, callbacks
from disentanglement_lib.evaluation.metrics import mig
import pytorch_lightning as pl
import torch
import gin
# from pytorch_lightning.utilities import data, cloud_io
from argparse import *

def get_parser(*param):
    parser = argparse.ArgumentParser()
    parser.add_argument('--betas', type=str,)
    parser.add_argument('-s','--stage_steps', type=int, default=10000)
    parser.add_argument('-d','--data',type=str,default='dsprites_full')
    parser.add_argument('-r','--random', type=int, default=0)
    return parser.parse_args(*param)

def main(args):
    gin.parse_config([
        f"deft.betas={args.betas}",
        f"deft.stage_steps={args.stage_steps}",
        f"dataset.name='{args.data}'",
        f"model.encoder_fn=@frac_encoder",
        "frac_encoder.G=5"
        ])
    data = named_data.get_named_ground_truth_data()
    dl = torch.utils.data.DataLoader(train.Iterate(data),64,num_workers=4)
    pl.seed_everything(args.random)

    pl_model = train.PLModel(
        regularizers=[model.DEFT()])
    logger = WandbLogger()
    trainer = pl.Trainer(logger,
                        progress_bar_refresh_rate=100,  # disable progress bar
                        max_steps=40000,
                        checkpoint_callback=False,
                        callbacks=[
                            callbacks.ComputeMetric(1000,mig.compute_mig),
                            callbacks.Visualization(10000)],
                        gpus=1,)
    trainer.fit(pl_model, dl)

if __name__ == '__main__':
    args = get_parser()
    print(args)
    main(args)