from dataclasses import replace
from tracemalloc import start
import config
import cv2
import numpy as np
import os
import torch
from dataloader import get_image_paths, load_images, get_training_data, get_ratio_data
from models.model_256 import Encoder, Decoder, WaterMark, Discriminator
from modules.loss import ssim
from torch import nn
from tqdm import tqdm
from utils import seed_everything
import random
import logging
import time

logger = logging.getLogger(__name__)

def prepare_log(log_path, level=logging.INFO):
    logger = logging.getLogger()
    logger.setLevel(level)
    sh = logging.StreamHandler()
    th = logging.FileHandler(filename=log_path, encoding='utf-8')
    logger.addHandler(sh)
    logger.addHandler(th)
    
    logger.info('model training time: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

#将img图片从numpy格式转换为device上的tensor
def toTensor(img):
    img = torch.from_numpy(img.transpose((0, 3, 1, 2)))
    return img.float().to( config.DEVICE )


def var_to_np(img_var):
    return img_var.data.cpu().numpy()


def transfer(img):
    return cv2.resize(img, (256, 256))


def get_transpose_axes(n):
    if n % 2 == 0:
        y_axes = list(range(1, n - 1, 2))
        x_axes = list(range(0, n - 1, 2))
    else:
        y_axes = list(range(0, n - 1, 2))
        x_axes = list(range(1, n - 1, 2))
    return y_axes, x_axes, [n - 1]


def stack_images(images):
    images_shape = np.array(images.shape)
    new_axes = get_transpose_axes(len(images_shape))
    new_shape = [np.prod(images_shape[x]) for x in new_axes]
    return np.transpose(
        images,
        axes=np.concatenate(new_axes)
    ).reshape(new_shape)


def main():
    seed_everything()
    prepare_log('test'+str(config.mark_ratio)+'.log')
    logger.info("===> create model")
    encoder = Encoder().to( config.DEVICE )
    decoder_A  = Decoder().to( config.DEVICE )
    decoder_B = Decoder().to( config.DEVICE )
    watermark = WaterMark().to( config.DEVICE )
    
    discriminator = Discriminator().to( config.DEVICE )
    discriminator_final = Discriminator().to( config.DEVICE )
    
    mse = nn.MSELoss()

    opt_gen = torch.optim.Adam (
        list( encoder.parameters() ) + list( decoder_A.parameters() ) + list( decoder_B.parameters() ),
        lr = config.LEARNING_RATE,
        betas = ( 0.5, 0.999 ),
    )

    opt_dis = torch.optim.Adam (
        list( discriminator.parameters() ),
        lr = config.LEARNING_RATE,
        betas = ( 0.5, 0.999 ),
    )
    
    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()

    #loading parameters
    #水印模块
    if os.path.exists(config.CHECKPOINT_test_watermark):
        try:
            dis_checkpoint = torch.load(config.CHECKPOINT_test_watermark)
            discriminator_final.load_state_dict(dis_checkpoint['discriminator'])
            watermark.load_state_dict(dis_checkpoint['watermark'])
            logger.info('===> Load last checkpoint data of watermark module')
        except FileNotFoundError:
            logger.info('Can\'t found checkpoint file of watermark module')
            return
    else:
        logger.info('Can\'t found checkpoint file of watermark module')
        return

    #fake生成模块
    if os.path.exists(config.CHECKPOINT_fake_test):
        try:
            fakes_checkpoint = torch.load(config.CHECKPOINT_fake_test)
            encoder.load_state_dict(fakes_checkpoint['encoder'])
            decoder_A.load_state_dict(fakes_checkpoint['decoder_A'])
            decoder_B.load_state_dict(fakes_checkpoint['decoder_B'])
            logger.info('===> Load last checkpoint data of fake module')
        except FileNotFoundError:
            logger.info('Can\'t found checkpoint file of fake module')
    else:
        logger.info('===> fake module trained from scratch')
    
    #fake鉴别模块
    if os.path.exists(config.CHECKPOINT_discriminator_test):
        try:
            dis_new_checkpoint = torch.load(config.CHECKPOINT_discriminator_test)
            discriminator.load_state_dict(dis_new_checkpoint['discriminator'])
            logger.info('===> Load last checkpoint data of fake module')
        except FileNotFoundError:
            logger.info('Can\'t found checkpoint file of fake module')
    else:
        logger.info('===> fake module trained from scratch')
    
    
    

    logger.info("===> Load training images") 
    images_A = get_image_paths( config.FACE_A_PATH )
    images_B = get_image_paths( config.FACE_B_PATH )
    images_A = load_images( images_A, transfer ) / 255.0
    images_B = load_images( images_B, transfer ) / 255.0
    images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))
    
    n_total = images_B.shape[0]
    offset = int(n_total * config.mark_ratio)
    idx = [i for i in range(n_total)]
    random.shuffle(idx)
    idx_n = idx[:offset]
    
    logger.info("===> Start training") 
    for epoch in tqdm( range(config.NUM_EPOCHS) ):
        warped_A, target_A = get_training_data( images_A, config.BATCH_SIZE, 256 )
        # 混合起来的noisy_B
        warped_B, target_B, noise_flag = get_ratio_data( images_B, config.BATCH_SIZE, 256, idx_n)
        warped_A, target_A = toTensor(warped_A), toTensor(target_A)
        warped_B, target_B = toTensor(warped_B), toTensor(target_B)

        fake_A = decoder_A( encoder( warped_A ) )
        fake_B = decoder_B( encoder( warped_B ) )
        
        noisy_B = watermark( target_B ) + target_B
        for i in range(config.BATCH_SIZE):
            if not noise_flag[i]:
                noisy_B[i,:,:,:] = target_B[i,:,:,:]


        #训练判别器
        fake_B = decoder_B( encoder( warped_B ) )
        D_real = discriminator( noisy_B )
        D_fake = discriminator( fake_B )

        D_real_loss = mse( D_real, torch.ones_like(D_real) )
        D_fake_loss = mse( D_fake, torch.zeros_like(D_fake) )
        d_loss = D_real_loss + D_fake_loss

        opt_dis.zero_grad()
        d_scaler.scale(d_loss).backward(retain_graph=True)
        d_scaler.step(opt_dis)
        d_scaler.update()


        #训练生成器
        D_fake_B = discriminator( fake_B )
        loss_adv = mse( D_fake_B, torch.ones_like(D_fake_B) )
        loss_A = mse( fake_A, target_A )
        loss_B = mse( fake_B, noisy_B )
        
#         loss_A = 1 - ssim( fake_A, target_A )
#         loss_B = 1 - ssim( fake_B, noisy_B )
        
        g_loss = loss_A + loss_B + loss_adv

        opt_gen.zero_grad()
        g_scaler.scale(g_loss).backward(retain_graph=True)
        g_scaler.step(opt_gen)
        g_scaler.update()


        
        if (epoch!=0 and epoch%500==0):
            logger.info(f"loss_A:{loss_A.item()}, loss_B:{loss_B.item()}, g_loss:{g_loss.item()}, d_loss:{d_loss.item()}")
            logger.info("===> Saving checkpoints")
            state_fake = {
                'encoder': encoder.state_dict(),
                'decoder_A': decoder_A.state_dict(),
                'decoder_B': decoder_B.state_dict(),
            }
            state_discriminator = {
                'discriminator': discriminator.state_dict(),
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state_fake, config.CHECKPOINT_fake_test)
            torch.save(state_discriminator, config.CHECKPOINT_discriminator_test)
            
            #打印换脸结果
            test_A_ = target_A[0:14]
            test_B_ = target_B[0:14]
            test_A = var_to_np(target_A[0:14])
            test_B = var_to_np(target_B[0:14])
            
            figure_A = np.stack([
                test_A,
                var_to_np( decoder_A( encoder( test_A_ ) ) ),
                var_to_np( decoder_B( encoder( test_A_ ) ) ),
            ], axis=1)
            figure_B = np.stack([
                test_B,
                var_to_np( decoder_B( encoder( test_B_ ) ) ),
                var_to_np( decoder_A( encoder( test_B_ ) ) ),
            ], axis=1)

            figure = np.concatenate([figure_A, figure_B], axis=0)
            figure = figure.transpose((0, 1, 3, 4, 2))
            figure = figure.reshape((4, 7) + figure.shape[1:])
            figure = stack_images(figure)
            figure = np.clip(figure * 255, 0, 255).astype('uint8')
            if not os.path.isdir('result'):
                os.mkdir('result')
            cv2.imwrite(config.SAVING_IMG_PATH, figure)

    #test
    logger.info("===> Start testing") 
    warped_A, target_A = get_training_data( images_A, 500, 256 )
    warped_B, target_B = get_training_data( images_B, 500, 256 )
    warped_A, target_A = toTensor(warped_A), toTensor(target_A)
    warped_B, target_B = toTensor(warped_B), toTensor(target_B)
    
    
    fake_A_B = decoder_B( encoder( target_A ) )

    D_real = discriminator_final( target_B )
    D_fake = discriminator_final( fake_A_B )

    TP, FN, FP, TN = 0, 0, 0, 0

    for p in D_real:
        if(mse(p, torch.ones_like(p)).item() < mse(p, torch.zeros_like(p)).item()):
            TN += 1
        else:
            FP += 1
    
    for p in D_fake:
        if(mse(p, torch.ones_like(p)).item() > mse(p, torch.zeros_like(p)).item()):
            TP += 1
        else:
            FN += 1

    logger.info("truth\\tag\tp\t  N")
    logger.info(f"   1   \t\tTP={TP}\t  FN={FN}")
    logger.info(f"   0   \t\tFP={FP}\t  TN={TN}")

    accuracy = (TP + TN) / (TP + FP + FN + TN)
    if TP + FP!=0:
        precision = TP / (TP + FP)
        recall = TP / (TP + FN)
        F1 = 2*precision*recall/(precision + recall)
    else:
        precision = 0
        recall = 0
        F1 = 0
    
    logger.info("==> discriminator_final")
    logger.info(f"(mark_ratio:{config.mark_ratio})")
    logger.info(f"accuracy: {accuracy}, F1: {F1}")


if __name__=="__main__":
    main()