import argparse
from typing import OrderedDict
import copy
from floorplangan_utils import *

def remove_module_in_state_dict(state_dict):
    state_dict_ = OrderedDict()
    for k,v in state_dict.items():
        state_dict_[k.split('module.')[-1]]=v
    return state_dict_

if __name__=="__main__":
    root_path = os.path.abspath(os.curdir)

    parser = argparse.ArgumentParser()
    parser.add_argument("--gpu_id", type=str, default="1", help="gpu id")
    opt = parser.parse_args()
    print(opt)

    os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
    print('os.environ["CUDA_VISIBLE_DEVICES"]',os.environ["CUDA_VISIBLE_DEVICES"])
    device = torch.device("cuda:0")

    os.chdir("./FloorplanGAN/")
    cfg = setup()
    print('dataset:\t{}'.format(cfg.DATASET.NAME))
    print('hostname:\t{}'.format(cfg.MANUAL.HOSTNAME))
    print('date:\t\t{}'.format(cfg.MANUAL.DATE))

    train_dataset = wireframeDataset_Rplan(cfg)
    #checkpoint = './FloorplanGAN/params/params_rplan_{0}.pkl'.format(cfg.MANUAL.DATE)
    batch_size = cfg.DATASET.BATCHSIZE
    train_dataloader = DataLoader(
        train_dataset, batch_size, shuffle=True,
        num_workers=cfg.SYSTEM.NUM_WORKERS, drop_last=True, pin_memory=True)
    cfg_valid = copy.deepcopy(cfg)
    cfg_valid.defrost()
    cfg_valid.DATASET.SUBSET = "namestest_0-1_1-1_2-1_3-1_4-0_5-0_6-0_7-1_8-0_9-1.pkl"
    valid_dataset = wireframeDataset_Rplan(cfg_valid)
    valid_dataloader = DataLoader(
        valid_dataset, batch_size, shuffle=True,
        num_workers=cfg.SYSTEM.NUM_WORKERS, drop_last=True, pin_memory=True)

    checkpoint = "./params/params_rplan_2021-12-04.pkl"
    renderer = renderer_g2v(
        render_size=cfg.MODEL.RENDERER.RENDERING_SIZE, 
        class_num=train_dataset.enc_len
        )

    generator = Generator(dataset=train_dataset).to(device)
    discriminator = WireframeDiscriminator(dataset=train_dataset, renderer=renderer, cfg=cfg).to(device)

    checkpoint = torch.load(checkpoint)
    generator.load_state_dict(remove_module_in_state_dict(checkpoint['generator_state_dict']))
    #generator_optimizer.load_state_dict(checkpoint['generator_optimizer_state_dict'])
    discriminator.load_state_dict(remove_module_in_state_dict(checkpoint['discriminator_state_dict']))
    #discriminator_optimizer.load_state_dict(checkpoint['discriminator_optimizer_state_dict'])
    epoch_last = checkpoint['epoch']
    n_iter = checkpoint['n_iter']
    print('load_previous_model')
    print('amount of parameters in generator:\t',sum(p.numel() for p in generator.parameters() if p.requires_grad))
    print('amount of parameters in discriminator:\t',sum(p.numel() for p in discriminator.parameters() if p.requires_grad))

    # Initialize optimizers.
    generator_optimizer = torch.optim.Adam(
        generator.parameters(), cfg.MODEL.GENERATOR.LEARNING_RATE)
    discriminator_optimizer = torch.optim.Adam(
        discriminator.parameters(), cfg.MODEL.DISCRIMINATOR.LEARNING_RATE)
    
    os.chdir(root_path)
    generator.eval()
    discriminator.eval()

    print("init probes...")
    probes = dict()
    probe_optimizers = dict()
    learning_rate =3e-4
    n_epochs=10000
    for feature_name, feature_length in DISCRIMINATOR_MIDFEATURE_SIZE.items():
        probes[feature_name] = LinearClassifierProbe(n_in_features=feature_length, n_out_features=2, softmax_dim=-1)
        probes[feature_name].to(device)
        probes[feature_name].train()
        probe_optimizers[feature_name] = torch.optim.Adam(probes[feature_name].parameters(), lr=learning_rate)
    print("training probes...")
    print(f"lr = {learning_rate}")
    print(f"n_epochs = {n_epochs}")
    loss_epochs, error_epochs, probe_stop = train_probes(generator,discriminator,train_dataloader,valid_dataloader, \
        probes, probe_optimizers, n_epochs=n_epochs)
    with open("FloorplanGAN_probe_training_result/losses_and_errors_and_stops_last.pkl","wb") as f:
        pickle.dump([loss_epochs, error_epochs, probe_stop],f)