import os
import os.path as osp
import datetime
import random
import numpy as np
import torch
import torch.nn as nn
# import torchvision.utils as vutils
from PIL import Image
import cv2
# from matplotlib import pyplot as plt
# import matplotlib
# matplotlib.use('Qt5Agg')

# ============= PATH CONFIG ==============
BASEDIR = osp.dirname(osp.abspath(__file__))
runtime = datetime.datetime.now()
runtime_stamp = str(runtime).replace(' ', '_').replace(':', '_').replace('.', '_')
RUNS_DIR = osp.join(BASEDIR, 'runs_take', runtime_stamp)
GEN_IMG_DIR = osp.join(RUNS_DIR, 'generated_imgs')
LOG_DIR = osp.join(RUNS_DIR, 'logs')
for DIR in [GEN_IMG_DIR, LOG_DIR]:
    if not osp.exists(DIR):
        os.makedirs(DIR)
LOGFILENAME = 'take_log.txt'
PATH_LOGFILE = osp.join(LOG_DIR, LOGFILENAME)

# ============= seed ==============
def set_seed(my_seed=1):
    random.seed(my_seed)
    np.random.seed(my_seed)
    torch.manual_seed(my_seed)
    torch.cuda.manual_seed(my_seed)

# ============= Hyper Parameters =================
NUM_PICS = 200
my_seed = 0
set_seed(my_seed)
LOAD_PATH_G = 'runs/ckpt_G_ep400.pkl'
IMG_SIZE = 64
nc = 3           # Number of channels
nz = 100         # Size of z latent vector (i.e. size of generator input)
ngf = 64         # Size of feature maps in generator
ndf = 64         # Size of feature maps in discriminator

with open(PATH_LOGFILE, 'w') as fp:
    fp.write(
             f"LOAD_PATH_G={LOAD_PATH_G}\n"
             f"IMG_SIZE={IMG_SIZE}\n"
             f"nc={nc}  # Number of channels\n"
             f"nz={nz}  # Size of z latent vector (i.e. size of generator input)\n"
             f"ngf={ngf}  # Size of feature maps in generator\n"
             f"ndf={ndf}  # Size of feature maps in discriminator\n"
             f"seed={my_seed}  # seed\n"
             )

# ============= device ============
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Generator
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
    def forward(self, inputs):
        return self.main(inputs)

# Create the Generator
netG = Generator().to(device)
print(netG)

# Load Generator State Dict
G_last_epoch = 0
if LOAD_PATH_G is not None:
    path_checkpointG = LOAD_PATH_G
    checkpointG = torch.load(path_checkpointG, map_location=torch.device('cpu'))
    netG.load_state_dict(checkpointG['model_state_dict'])
    # optimizerG.load_state_dict(checkpointG['optimizer_state_dict'])
    print(f"{LOAD_PATH_G} loaded.")
    G_last_epoch = checkpointG['epoch']
print(f"G_last_epoch: {G_last_epoch}")


def my_norm(a_tensor):
    # a_tensor: [B C H W]
    a_min = torch.min(a_tensor)
    # print(a_min)
    a_delta = 0 - a_min
    # print(a_delta)
    a_tensor += a_delta
    
    a_max = torch.max(a_tensor)
    # print(a_max)
    a_tensor /= a_max
    
    a_tensor[a_tensor>1] = 1.0
    a_tensor[a_tensor<0] = 0.0

# Testing
print("Starting Testing...")
for pic_i in range(NUM_PICS):
    noise_vector = torch.randn(1, nz, 1, 1, device=device)
    with torch.no_grad():
        gen_img = netG(noise_vector).detach().cpu()  # [B C H W]
    my_norm(gen_img)
    gen_img = gen_img.numpy()  # [B C H W]
    # print('aaaaa\n', gen_img)
    gen_img = gen_img[0]  # [C H W]
    # print('bbbbb\n', gen_img)
    gen_img = gen_img.transpose((1, 2, 0))  # [H W C]
    # print(type(gen_img), gen_img.shape, gen_img.dtype)
    gen_img = gen_img * 255
    # print('ccccc\n', gen_img)
    # gen_img = Image.fromarray(gen_img.astype(np.uint8))  # PIL.Image
    # # print(type(gen_img))
    path_img_out = osp.join(GEN_IMG_DIR, f"{pic_i}.png")
    # gen_img.save(path_img_out)
    cv2.imwrite(path_img_out, gen_img)
    print(pic_i, 'done.')
    # break