import config
import cv2
import numpy as np
import os
import torch
from dataloader import get_image_paths, load_images, get_training_data,get_ratio_data
from models.model_256 import Encoder, Decoder, WaterMark, Discriminator
from modules.loss import ssim
from modules.loss import MaxLoss
from torch import nn
from tqdm import tqdm
from utils import seed_everything
import random
import logging
import time

logger = logging.getLogger(__name__)

def prepare_log(log_path, level=logging.INFO):
    logger = logging.getLogger()
    logger.setLevel(level)
    sh = logging.StreamHandler()
    th = logging.FileHandler(filename=log_path, encoding='utf-8')
    logger.addHandler(sh)
    logger.addHandler(th)
    
    logger.info('model training time: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

#将img图片从numpy格式转换为device上的tensor
def toTensor(img):
    img = torch.from_numpy(img.transpose((0, 3, 1, 2)))
    return img.float().to( config.DEVICE )


def var_to_np(img_var):
    return img_var.data.cpu().numpy()


def transfer(img):
    return cv2.resize(img, (256, 256))


def get_transpose_axes(n):
    if n % 2 == 0:
        y_axes = list(range(1, n - 1, 2))
        x_axes = list(range(0, n - 1, 2))
    else:
        y_axes = list(range(0, n - 1, 2))
        x_axes = list(range(1, n - 1, 2))
    return y_axes, x_axes, [n - 1]


def stack_images(images):
    images_shape = np.array(images.shape)
    new_axes = get_transpose_axes(len(images_shape))
    new_shape = [np.prod(images_shape[x]) for x in new_axes]
    return np.transpose(
        images,
        axes=np.concatenate(new_axes)
    ).reshape(new_shape)


def main():
    seed_everything()
    prepare_log('train'+str(config.mark_ratio)+'.log')
    logger.info("===> create model")
    encoder = Encoder().to( config.DEVICE )
    decoder_A  = Decoder().to( config.DEVICE )
    decoder_B = Decoder().to( config.DEVICE )

    watermark = WaterMark().to( config.DEVICE )
    discriminator = Discriminator().to( config.DEVICE )
    discriminator_adv = Discriminator().to( config.DEVICE )

    mse = nn.MSELoss()
    maxloss = MaxLoss()

    opt_gen = torch.optim.Adam (
        list( encoder.parameters() ) + list( decoder_A.parameters() ) + list( decoder_B.parameters() ),
        lr = config.LEARNING_RATE,
        betas = ( 0.5, 0.999 ),
    )

    opt_dis_adv = torch.optim.Adam (
        list( discriminator_adv.parameters() ),
        lr = config.LEARNING_RATE,
        betas = ( 0.5, 0.999 ),
    )

    opt_dis = torch.optim.Adam (
        list( watermark.parameters() ) + list( discriminator.parameters() ),
        lr = config.LEARNING_RATE,
        betas = ( 0.5, 0.999 ),
    )
        
    g_scaler = torch.cuda.amp.GradScaler()
    d_adv_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()


    #loading parameters
    if os.path.exists(config.CHECKPOINT_fake):
        try:
            fakes_checkpoint = torch.load(config.CHECKPOINT_fake)
            encoder.load_state_dict(fakes_checkpoint['encoder'])
            decoder_A.load_state_dict(fakes_checkpoint['decoder_A'])
            decoder_B.load_state_dict(fakes_checkpoint['decoder_B'])
            discriminator_adv.load_state_dict(fakes_checkpoint['discriminator_adv'])
            logger.info('===> Load last checkpoint data of fake module')
        except FileNotFoundError:
            logger.info('Can\'t found checkpoint file of fake module')
    else:
        logger.info('===> fake module trained from scratch')

    if os.path.exists(config.CHECKPOINT_discriminator):
        try:
            dis_checkpoint = torch.load(config.CHECKPOINT_discriminator)
            watermark.load_state_dict(dis_checkpoint['watermark'])
            discriminator.load_state_dict(dis_checkpoint['discriminator'])
            logger.info('===> Load last checkpoint data of discriminator module')
        except FileNotFoundError:
            logger.info('Can\'t found checkpoint file of discriminator module')
    else:
        logger.info('===> Discriminator module trained from scratch')
    
    
    logger.info("===> Load training images") 
    images_A = get_image_paths( config.FACE_A_PATH )
    images_B = get_image_paths( config.FACE_B_PATH )
    images_A = load_images( images_A, transfer ) / 255.0
    images_B = load_images( images_B, transfer ) / 255.0
    images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

    n_total = images_B.shape[0]
    offset = int(n_total * config.mark_ratio)
    idx = [i for i in range(n_total)]
    random.shuffle(idx)
    idx_n = idx[:offset]
    logger.info("===> Start training") 
    for epoch in tqdm( range(config.NUM_EPOCHS) ):
        batch_size = config.BATCH_SIZE

        warped_A, target_A = get_training_data( images_A, batch_size, 256 )
        # 混合起来的noisy_B
        warped_B, target_B, noise_flag = get_ratio_data( images_B, batch_size, 256, idx_n)

        warped_A, target_A = toTensor(warped_A), toTensor(target_A)
        warped_B, target_B = toTensor(warped_B), toTensor(target_B)

        #训练伪造模块
        #伪造模块中的判别器
        noisy_B = watermark( target_B ) + target_B
        for i in range(batch_size):
            if not noise_flag[i]:
                noisy_B[i,:,:,:] = target_B[i,:,:,:]

        fake_A = decoder_A( encoder( warped_A ) )
        fake_B = decoder_B( encoder( warped_B ) )
        
        D_real_B = discriminator_adv( noisy_B )
        D_fake_B = discriminator_adv( fake_B )
        loss_d = mse( D_fake_B, torch.zeros_like(D_fake_B) ) + mse( D_real_B, torch.ones_like(D_real_B) )

        opt_dis_adv.zero_grad()
        d_adv_scaler.scale(loss_d).backward(retain_graph=True)
        d_adv_scaler.step(opt_dis_adv)
        d_adv_scaler.update()

        #伪造模块中的生成器
        D_fake_B = discriminator_adv( fake_B )
        
        loss_A = 1 - ssim( fake_A, target_A )
        loss_B = 1 - ssim( fake_B, noisy_B )
        loss_adv = mse( D_fake_B, torch.ones_like(D_fake_B) )
        g_loss = loss_A + loss_B + 0.3*loss_adv

        opt_gen.zero_grad()
        g_scaler.scale(g_loss).backward(retain_graph=True)
        g_scaler.step(opt_gen)
        g_scaler.update()

        
        #训练水印模块和判别器
        noisy_B = watermark( target_B ) + target_B
        for i in range(batch_size):
            if not noise_flag[i]:
                noisy_B[i,:,:,:] = target_B[i,:,:,:]
        fake_B = decoder_B( encoder( warped_B ) )
        D_real = discriminator( target_B )
        D_fake1 = discriminator( noisy_B )
        D_fake2 = discriminator( fake_B )

        D_real_loss = mse( D_real, torch.ones_like(D_real) )
        D_fake_loss = mse( D_fake1, torch.zeros_like(D_fake1) ) + mse( D_fake2, torch.zeros_like(D_fake2) )
        # reconstruction_loss = L1( noisy_B, warped_B )
        reconstruction_loss = 1 - ssim( noisy_B, target_B )# + maxloss( noisy_B, target_B )
        #reconstruction_loss = maxloss( noisy_B, target_B )
        d_loss = D_real_loss + D_fake_loss + reconstruction_loss

        opt_dis.zero_grad()
        d_scaler.scale(d_loss).backward()
        d_scaler.step(opt_dis)
        d_scaler.update()
        


        if (epoch!=0 and epoch%500==0):
            logger.info(f"loss_A:{loss_A.item()}, loss_B:{loss_B.item()}, g_loss:{g_loss.item()}, adv_d_loss:{loss_d.item()}, d_loss:{d_loss.item()}")
            logger.info("===> Saving checkpoints")
            state_fake = {
                'encoder': encoder.state_dict(),
                'decoder_A': decoder_A.state_dict(),
                'decoder_B': decoder_B.state_dict(),
                'discriminator_adv': discriminator_adv.state_dict(),
            }
            state_discriminator = {
                'watermark': watermark.state_dict(),
                'discriminator': discriminator.state_dict(),
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state_fake, config.CHECKPOINT_fake)
            torch.save(state_discriminator, config.CHECKPOINT_discriminator)
            
            #打印换脸结果
            test_A_ = target_A[0:14]
            test_B_ = target_B[0:14]
            test_A = var_to_np(target_A[0:14])
            test_B = var_to_np(target_B[0:14])
            
            figure_A = np.stack([
                test_A,
                var_to_np( decoder_A( encoder( test_A_ ) ) ),
                var_to_np( decoder_B( encoder( test_A_ ) ) ),
            ], axis=1)
            figure_B = np.stack([
                test_B,
                var_to_np( watermark( test_B_ ) + test_B_ ),
                var_to_np( decoder_B( encoder( watermark( test_B_ ) + test_B_ ) ) ),
            ], axis=1)

            figure = np.concatenate([figure_A, figure_B], axis=0)
            figure = figure.transpose((0, 1, 3, 4, 2))
            figure = figure.reshape((4, 7) + figure.shape[1:])
            figure = stack_images(figure)
            figure = np.clip(figure * 255, 0, 255).astype('uint8')
            if not os.path.isdir('result'):
                os.mkdir('result')
            cv2.imwrite(config.SAVING_IMG_PATH, figure)


if __name__=="__main__":
    main()