import os
import os.path
import sys
import math
import argparse
import time
import random
import numpy as np
import utils

from torchvision.utils import save_image, make_grid
from collections import OrderedDict
from tqdm import tqdm

import torch
import torchvision
import torch.optim as optim
import models.blocks as B

from datasets import create_dataloader, create_dataset
from models import create_network
from losses import build_forward_loss, build_feedback_loss
from torch.autograd import Variable
from math import log10
from torch.optim.lr_scheduler import StepLR
from tensorboardX import SummaryWriter

# super parameter
END_EPOCH = 200
LAST_EPOCH = 40
START_EPOCH = LAST_EPOCH+1

CONTINUE = True
LAST_EXP = "NaturalScene"
FORWARD_NAME = "epoches/"+LAST_EXP+"/forward_net_epoch_"+str(LAST_EPOCH)+".pth"
FEEDBACK_NAME = "epoches/"+LAST_EXP + \
    "/feedback_net_epoch_"+str(LAST_EPOCH)+".pth"
DISCRIMINATOR_NAME = "epoches/"+LAST_EXP + \
    "/discriminator_net_epoch_"+str(LAST_EPOCH)+".pth"

EXPERENT = "NaturalScene"
SAVEFREQUENCY = 1
STEPFREQUENCY = 8
FEEDBACK_RATE = 0.1
LEARNING_RATE = 0.0001/(2**(LAST_EPOCH//STEPFREQUENCY))

if __name__ == "__main__":

    seed = random.randint(1, 10000)
    utils.set_random_seed(seed)

    # create writer
    writer = SummaryWriter("runs/"+EXPERENT)
    # create dis
    if not os.path.exists("epoches/"+EXPERENT):
        os.makedirs("epoches/"+EXPERENT)
    if not os.path.exists("results/"+EXPERENT):
        os.makedirs("results/"+EXPERENT)

    # create dataloader
    train_set = create_dataset("LRHR", "train", EXPERENT)
    train_loader = create_dataloader(train_set, "train")

    # create model
    forward_net, feedback_net, discriminator = create_network()

    # create loss function
    forward_loss_generator = build_forward_loss()
    feedback_loss_generator = build_feedback_loss()

    if torch.cuda.is_available():
        forward_net.cuda()
        feedback_net.cuda()
        discriminator.cuda()
        forward_loss_generator.cuda()
        feedback_loss_generator.cuda()
        if CONTINUE:
            forward_net.load_state_dict(torch.load(FORWARD_NAME))
            feedback_net.load_state_dict(torch.load(FEEDBACK_NAME))
            # discriminator.load_state_dict(torch.load(DISCRIMINATOR_NAME))
            print("Models have been loaded.")

    # create optimizer
    opt_forward = optim.Adam(forward_net.parameters(), lr=LEARNING_RATE)
    opt_feedback = optim.Adam(feedback_net.parameters(), lr=LEARNING_RATE)
    opt_discriminator = optim.Adam(
        discriminator.parameters(), lr=LEARNING_RATE)
    s_forward = StepLR(opt_forward, step_size=STEPFREQUENCY, gamma=0.5)
    s_feedback = StepLR(opt_feedback, step_size=STEPFREQUENCY, gamma=0.5)
    s_discriminator = StepLR(
        opt_discriminator, step_size=STEPFREQUENCY, gamma=0.5)

    # training
    total_steps = 0
    for epoch in range(START_EPOCH, END_EPOCH+1):
        train_bar = tqdm(train_loader, desc="[start training]")
        running_results = {"batch_size": 0, "forward_loss": 0,
                           "feedback_loss": 0, "total_loss": 0, "D_loss": 0}

        forward_net.train()
        feedback_net.train()
        discriminator.train()

        for img_hr, img_lr in train_bar:
            total_steps += 1

            batch_size = img_lr.size(0)
            running_results["batch_size"] += batch_size

            # update
            real_img = Variable(img_hr)
            img_lr = Variable(img_lr)

            if torch.cuda.is_available():
                real_img = real_img.cuda()
                img_lr = img_lr.cuda()
                img_hr = img_hr.cuda()

            forward_img = forward_net(img_lr)
            feedback_img = feedback_net(forward_img)

            forward_loss = forward_loss_generator(real_img, forward_img)
            feedback_loss = feedback_loss_generator(img_lr, feedback_img)
            real_out = torch.mean(discriminator(img_hr))
            fake_out = torch.mean(discriminator(forward_img))

            d_loss = 1 - real_out + fake_out
            discriminator.zero_grad()
            d_loss.backward(retain_graph=True)

            total_loss = forward_loss + FEEDBACK_RATE * \
                feedback_loss + 0.0005*torch.mean(1-fake_out)

            forward_net.zero_grad()
            feedback_net.zero_grad()

            total_loss.backward()

            opt_discriminator.step()
            opt_forward.step()
            opt_feedback.step()

            # losses for current epoch
            writer.add_scalar("forward loss", forward_loss,
                              global_step=total_steps)
            writer.add_scalar("feedback loss", feedback_loss,
                              global_step=total_steps)
            writer.add_scalar("total loss", total_loss,
                              global_step=total_steps)

            running_results["forward_loss"] += forward_loss.item() * batch_size
            running_results["feedback_loss"] += feedback_loss.item() * \
                batch_size
            running_results["total_loss"] += total_loss.item() * batch_size
            running_results["D_loss"] += d_loss.item() * batch_size

            train_bar.set_description("[epoch:%.3d  forward_loss:%.6f  feedback_loss:%.6f  total_loss:%.6f  d_loss:%.6f" % (epoch,
                                                                                                                            running_results[
                                                                                                                                "forward_loss"]/running_results["batch_size"],
                                                                                                                            running_results[
                                                                                                                                "feedback_loss"]/running_results["batch_size"],
                                                                                                                            running_results[
                                                                                                                                "total_loss"]/running_results["batch_size"],
                                                                                                                            running_results["D_loss"]/running_results["batch_size"]))

        s_forward.step()
        s_feedback.step()
        s_discriminator.step()

        if epoch % SAVEFREQUENCY == 0:
            # save model parameters
            torch.save(forward_net.state_dict(), "epoches/" +
                       EXPERENT+"/forward_net_epoch_%d.pth" % epoch)
            torch.save(feedback_net.state_dict(), "epoches/" +
                       EXPERENT+"/feedback_net_epoch_%d.pth" % epoch)
            torch.save(discriminator.state_dict(), "epoches/" +
                       EXPERENT+"/discriminator_net_epoch_"+str(epoch)+".pth")
            print("models has been saved.")
