from torchvision import transforms
import torch
import pickle
from PIL import Image
from segmentation.utils import visualize, changeLabel
from torchvision.utils import save_image
import dnnlib
import legacy
import os
import torch.nn.functional as F
from PIL import Image
import numpy as np
import skimage.segmentation as sks
CANDI_colors = [
    (95,205,228),
    (203,219,252),
    (143,86,59),
    (223,113,38),
    (251,242,54),
    (153,229,80),
    (106,190,48),
    (55,148,110),
    (217,87,99),
    (217,160,102),
    (238,195,154),
    (91,110,225),
    (215,123,186),
    (118,66,138),
    (99,155,255),
    (155,173,183)
]

OASIS_color = [
    (95,205,228),
    (203,219,252),
    (143,86,59),
    (223,113,38),
    (251,242,54),
    (153,229,80),
    (217,87,99),
    (217,160,102),
    (238,195,154),
    (91,110,225),
    (215,123,186),
    (118,66,138),
    (106,190,48),
    (55,148,110),
    (99,155,255),
    (155,173,183)
]

tomar_colors = [
    [128, 128, 0],
    [210, 245, 60],
    [170, 255, 195],
    [70, 240, 240],
    [245, 130, 48],
    [255, 215, 180],
    [0, 128, 128],
    [106, 18, 37],
    [0, 0, 128],
    [255, 225, 25],
    [60, 180, 75],
    [255, 250, 200],
    [250, 190, 212],
    [230, 25, 75],
    [0, 130, 200],
    [145, 30, 180]
]

def get_overlay(d, seg:np.array):
    overlay = np.zeros_like(seg)
    if d=='CANDI':
        og_color = CANDI_colors
    elif d=='OASIS':
        og_color = OASIS_color

    for i in range(16):
        seg_i = np.where(
                np.sum(seg.astype(np.int16), 2)==np.array(og_color[i]).sum(),
                1,0
            ).astype(np.uint8)

        bound = sks.find_boundaries(seg_i, mode='inner').astype(np.uint8)

        for j in range(3):
            overlay[:, :, j] += tomar_colors[i][j]*bound
    return overlay

print('load u-net')
# U-Net test
# toTensor = transforms.ToTensor()
# with open(rf'save_seg/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout/exp29-128shot-0thres-L-aug-resize----/checkpoint/1_best.pth', 'rb') as f:
#     net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

toTensor = transforms.ToTensor()
with open(rf'save_seg/00011-GAN_OASIS-mirror-low_shot-kimg25000-batch32-color-translation-cutout/exp37-128shot-0thres-BiFPN-resize/checkpoint/0_best.pth', 'rb') as f:
    net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

print('load image')
# to_tets = torch.cat([
#     toTensor(Image.open('CANDI0.png').convert('L')).unsqueeze(0),
#     toTensor(Image.open('CANDI1.png').convert('L')).unsqueeze(0)
# ]).cuda()
to_tets = torch.cat([
    toTensor(Image.open('OASIS0.png').convert('L')).unsqueeze(0)*0.7607843137254902,
    toTensor(Image.open('OASIS1.png').convert('L')).unsqueeze(0)*0.7607843137254902
]).cuda()
# print(to_tets.shape)

print('processing')
out = net((to_tets-0.5)/0.5)
# save_image(visualize(out.argmax(1)), 'test.png', padding=0)
save_image(visualize(changeLabel(out.argmax(1))), 'test.png', padding=0)

print('saving')
# getOverLay
t = rf'test.png'
a = np.array(Image.open(t))[:,:,:3]
overlay = get_overlay('CANDI', a)
Image.fromarray(overlay).save(f'{t[:-4]}_overlay{t[-4:]}')


# GAN test
# x = 0
# if x == 0:
#     d = 'save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout/network-snapshot-best.pkl'
# else:
#     d = 'save/00011-GAN_OASIS-mirror-low_shot-kimg25000-batch32-color-translation-cutout/network-snapshot-best.pkl'

# dd = 'CANDI' if x==0 else 'OASIS'

# with dnnlib.util.open_url(d) as f:
#     snapshot_data = legacy.load_network_pkl(f)
#     G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
#     del snapshot_data

# z = torch.randn(64, G.z_dim).cuda()
# G_kwargs = {
#     'truncation_psi': 1.75,
# }
# imgs = G(z=z, c=None, **G_kwargs)
# imgs = torch.clamp(imgs, -0.95, 1)
# for i in range(64):
#     imgs[i] = (imgs[i]-imgs[i].min())/(imgs[i].max()-imgs[i].min()+1e-7)
# imgs = F.interpolate(imgs, (160,160), mode='bilinear')
# save_image(
#     (torch.clamp(imgs, 0, 1)),
#     f'fake_imgs_{dd}.png',
#     nrow=8,
#     padding=0
#     )