import config
import cv2
import numpy as np
import os
import torch
from dataloader import get_image_paths, load_images, get_training_data
from models.model_256 import Encoder, Decoder, WaterMark, Discriminator
from modules.loss import ssim
from torch import nn
from utils import seed_everything


def toTensor(img):
    img = torch.from_numpy(img.transpose((0, 3, 1, 2)))
    return img.float().to( config.DEVICE )


def var_to_np(img_var):
    return img_var.data.cpu().numpy()


def transfer(img):
    return cv2.resize(img, (256, 256))


def get_transpose_axes(n):
    if n % 2 == 0:
        y_axes = list(range(1, n - 1, 2))
        x_axes = list(range(0, n - 1, 2))
    else:
        y_axes = list(range(0, n - 1, 2))
        x_axes = list(range(1, n - 1, 2))
    return y_axes, x_axes, [n - 1]


def stack_images(images):
    images_shape = np.array(images.shape)
    new_axes = get_transpose_axes(len(images_shape))
    new_shape = [np.prod(images_shape[x]) for x in new_axes]
    return np.transpose(
        images,
        axes=np.concatenate(new_axes)
    ).reshape(new_shape)


def main():
    #os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    seed_everything()
    
    print("===> create model")
    encoder = Encoder().to( config.DEVICE )
    decoder_A  = Decoder().to( config.DEVICE )
    decoder_B = Decoder().to( config.DEVICE )

    discriminator_adv = Discriminator().to( config.DEVICE )

    watermark = WaterMark().to( config.DEVICE )
    discriminator = Discriminator().to( config.DEVICE )
    
    mse = nn.MSELoss()

    #loading parameters
    if os.path.exists(config.CHECKPOINT_fake):
        try:
            fakes_checkpoint = torch.load(config.CHECKPOINT_fake)
            encoder.load_state_dict(fakes_checkpoint['encoder'])
            decoder_A.load_state_dict(fakes_checkpoint['decoder_A'])
            decoder_B.load_state_dict(fakes_checkpoint['decoder_B'])
            discriminator_adv.load_state_dict(fakes_checkpoint['discriminator_adv'])
            print('===> Load last checkpoint data of fake module')
        except FileNotFoundError:
            print('Can\'t found checkpoint file of fake module')
            return
    else:
        print('Can\'t found checkpoint file of fake module')
        return

    if os.path.exists(config.CHECKPOINT_discriminator):
        try:
            dis_checkpoint = torch.load(config.CHECKPOINT_discriminator)
            watermark.load_state_dict(dis_checkpoint['watermark'])
            discriminator.load_state_dict(dis_checkpoint['discriminator'])
            print('===> Load last checkpoint data of discriminator module')
        except FileNotFoundError:
            print('Can\'t found checkpoint file of discriminator module')
            return
    else:
        print('Can\'t found checkpoint file of discriminator module')
        return
    
    
    print("===> Load training images") 
    images_A = get_image_paths( config.FACE_A_PATH )
    images_B = get_image_paths( config.FACE_B_PATH )
    images_A = load_images( images_A, transfer ) / 255.0
    images_B = load_images( images_B, transfer ) / 255.0
    images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))
    
    print("===> Start testing") 
    warped_A, target_A = get_training_data( images_A, images_A.shape[0], 256 )
    warped_B, target_B = get_training_data( images_B, images_B.shape[0], 256 )
    warped_A, target_A = toTensor(warped_A), toTensor(target_A)
    warped_B, target_B = toTensor(warped_B), toTensor(target_B)

    
    noisy_B = watermark( target_B ) + target_B
    fake_A_B = decoder_B( encoder( target_A ) )

    #D_real = discriminator( target_B )
    D_real2 = discriminator( noisy_B )
    D_fake = discriminator( fake_A_B )

    TP, FN, FP, TN = 0, 0, 0, 0

#     for p in D_real:
#         if(mse(p, torch.ones_like(p)).item() < mse(p, torch.zeros_like(p)).item()):
#             TP += 1
#         else:
#             FN += 1
    
    for p in D_real2:
        if(mse(p, torch.ones_like(p)).item() < mse(p, torch.zeros_like(p)).item()):
            TP += 1
        else:
            FN += 1
    
    for p in D_fake:
        if(mse(p, torch.ones_like(p)).item() > mse(p, torch.zeros_like(p)).item()):
            TN += 1
        else:
            FP += 1
    
    accuracy = (TP + TN) / (TP + FP + FN + TN)
    precision = TP / (TP + FP)
    recall = TP / (TP + FN)
    F1 = 2*precision*recall/(precision + recall)
    
    print("==> discriminator")
    print("truth\\tag\tp\t  N")
    print(f"   1   \t\tTP={TP}\t  FN={FN}")
    print(f"   0   \t\tFP={FP}\t  TN={TN}")
    print(f"accuracy: {accuracy}, F1: {F1}")

    #D_real = discriminator_adv( target_B )
    D_real2 = discriminator_adv( noisy_B )
    D_fake = discriminator_adv( fake_A_B )

    TP, FN, FP, TN = 0, 0, 0, 0

#     for p in D_real:
#         if(mse(p, torch.ones_like(p)).item() < mse(p, torch.zeros_like(p)).item()):
#             TP += 1
#         else:
#             FN += 1
    
    for p in D_real2:
        if(mse(p, torch.ones_like(p)).item() < mse(p, torch.zeros_like(p)).item()):
            TP += 1
        else:
            FN += 1
    
    for p in D_fake:
        if(mse(p, torch.ones_like(p)).item() > mse(p, torch.zeros_like(p)).item()):
            TN += 1
        else:
            FP += 1
    
    accuracy = (TP + TN) / (TP + FP + FN + TN)
    precision = TP / (TP + FP)
    recall = TP / (TP + FN)
    F1 = 2*precision*recall/(precision + recall)
    
    print("==> discriminator_adv")
    print("truth\\tag\tp\t  N")
    print(f"   1   \t\tTP={TP}\t  FN={FN}")
    print(f"   0   \t\tFP={FP}\t  TN={TN}")
    print(f"accuracy: {accuracy}, F1: {F1}")
    
    

if __name__=="__main__":
    main()