import cv2
import numpy

from utils.utils import get_image_paths, load_images, stack_images
from utils.training_data import get_training_data

from model.encoderdecoder import Encoder, Decoder
import torch 
import torch.nn as nn 
import numpy as np 
images_A = get_image_paths( "data/trump" )
images_B = get_image_paths( "data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

device = torch.device("cpu")
encoder = Encoder().to(device)
encoder.train()
decoderA = Decoder().to(device) 
encoder.train()
decoderB = Decoder().to(device) 
encoder.train()

lossfn = nn.MSELoss(reduction="sum")
optim_encoder = torch.optim.Adam(encoder.parameters(), 1e-3, weight_decay=1e-3)
optim_decoderA = torch.optim.Adam(decoderA.parameters(), 1e-3, weight_decay=1e-3)
optim_decoderB = torch.optim.Adam(decoderB.parameters(), 1e-3, weight_decay=1e-3)

#encoder.load_state_dict(torch.load("ckpt/enc"))
#decoderA.load_state_dict(torch.load("ckpt/decA"))
#decoderB.load_state_dict(torch.load("ckpt/decB"))

for epoch in range(1000000):
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )
    
    warped_A_in = torch.tensor(warped_A).to(device).float().permute(0, 3, 1, 2)
    warped_B_in = torch.tensor(warped_B).to(device).float().permute(0, 3, 1, 2)
    target_A_in = torch.tensor(target_A).to(device).float().permute(0, 3, 1, 2)
    target_B_in = torch.tensor(target_B).to(device).float().permute(0, 3, 1, 2)
    vector = encoder(warped_A_in) 
    imageA = decoderA(vector) 
    #print(imageA.shape, target_A_in.shape)
    lossA = lossfn(imageA, target_A_in) 
    lossA.backward() 
    optim_encoder.step() 
    optim_decoderA.step() 
    optim_encoder.zero_grad()
    optim_decoderA.zero_grad()

    vector = encoder(warped_B_in) 
    imageB = decoderB(vector) 
    lossB = lossfn(imageB, target_B_in) 
    lossB.backward() 
    optim_encoder.step() 
    optim_decoderB.step() 
    optim_encoder.zero_grad()
    optim_decoderB.zero_grad()

    if epoch % 100 == 0:
        torch.save(encoder.state_dict(), "ckpt/enc")
        torch.save(decoderA.state_dict(), "ckpt/decA")
        torch.save(decoderB.state_dict(), "ckpt/decB")
        test_A = target_A_in[0:14]
        test_B = target_B_in[0:14]
        with torch.no_grad():
            figure_A = torch.cat([
                test_A,
                decoderA(encoder(test_A)),
                decoderB(encoder(test_A)),
                ], dim=-1)
            figure_B = torch.cat([
                test_B,
                decoderA(encoder(test_B)),
                decoderB(encoder(test_B)),
                ], dim=-1)
            figA = figure_A.permute(0, 2, 3, 1).cpu().numpy() 
            figB = figure_B.permute(0, 2, 3, 1).cpu().numpy()
            figs = np.concatenate([figA, figB], axis=1)
        print(lossA, lossB)
        cv2.imwrite("demo.jpg", (figs[0]*255).astype(np.uint8))
    #cv2.imshow("win", figs[0])
    #key = cv2.waitKey(100)
    #if key == ord('q'):
    #    exit()

