import torch 
from models.mtcnn import MTCNN 
import cv2 
import numpy as np 
from model.encoderdecoder import Encoder, Decoder
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
mtcnn = MTCNN(
    image_size=64, margin=0, min_face_size=20,
    thresholds=[0.6, 0.8, 0.9], factor=0.709, post_process=False, keep_all=True, 
    device=device
) 

encoder = Encoder().to(device)
encoder.eval()
decoderA = Decoder().to(device) 
encoder.eval()
decoderB = Decoder().to(device) 
encoder.eval()

encoder.load_state_dict(torch.load("ckpt/enc", map_location=device))
decoderA.load_state_dict(torch.load("ckpt/decA", map_location=device))
decoderB.load_state_dict(torch.load("ckpt/decB", map_location=device))

img = cv2.imread("data/trump.jpg")

boxes, scores, landmarks = mtcnn.detect(img, True) 
faceimg = mtcnn(img)
faceimg /= 255. 
with torch.no_grad():
    trans = decoderB(encoder(faceimg)).permute(0, 2, 3, 1).cpu().numpy() 
print(faceimg.shape, faceimg.max(), faceimg.min())
for box, score, landmark, timg in zip(boxes, scores, landmarks, trans):
    box = box.astype(np.int32)
    timg = (timg*255).astype(np.uint8)
    timg = cv2.resize(timg, (int(box[2]-box[0]), int(box[3]-box[1])))
    img[box[1]:box[3], box[0]:box[2], :] = timg
    cv2.putText(img, f"{score:.2f}", (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0))
    for p in landmark:
        p = p.astype(np.int32)
        cv2.circle(img, (p[0], p[1]), 3, (0, 0, 255), thickness=3)
cv2.imwrite("tp2cage.jpg", img)
cv2.imshow("imgs", img) 

cv2.waitKey(0)