import os
import cv2
import numpy as np
import torch
import math
from torch.autograd import Variable
import matplotlib.pyplot as plt
import argparse
from dataloader.u2net_data_loader import SalObjDataset
from dataloader.u2net_data_loader import RescaleT
from dataloader.u2net_data_loader import ToTensorLab
from torchvision import transforms
import mediapipe as mp
from model.photo2cartoon import ResnetGenerator as Photo2Cartoon_ResnetGenerator
from torch.utils.data import DataLoader
from PIL import Image
from utils.transform import scaling_image, put_text, blend_images
from utils.face_seg import FaceSeg
from model.u2net import U2NET


def get_face(image, scale, img_name):
    """剪切出人脸"""
    # Initialize MediaPipe FaceMesh module
    mp_face_mesh = mp.solutions.face_mesh
    mp_drawing = mp.solutions.drawing_utils

    # Convert image to RGB for MediaPipe
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    with mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh:
        results = face_mesh.process(image_rgb)

        if results.multi_face_landmarks:
            for face_landmarks in results.multi_face_landmarks:
                # Draw landmarks on the image
                mp_drawing.draw_landmarks(image=image, landmark_list=face_landmarks,
                                          connections=mp_face_mesh.FACEMESH_TESSELATION,
                                          landmark_drawing_spec=None,
                                          connection_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=1,
                                                                                         circle_radius=1))
            return image  # Return the image with landmarks drawn
        else:
            print("No face detected!")
            return image


def normPRED(d):
    ma = torch.max(d)
    mi = torch.min(d)
    dn = (d - mi) / (ma - mi)
    return dn


def save_output(image_name, pred, d_dir):
    predict = pred  # (1,320,320)
    predict = predict.squeeze()  # 320 320
    predict_np = predict.cpu().data.numpy()  # (320,320)

    im = Image.fromarray(predict_np * 255).convert('RGB')  # (320,320)
    img_name = image_name.split(os.sep)[-1]  # ao.jpg
    image = cv2.imread(image_name)  # 原图
    imo = im.resize((image.shape[1], image.shape[0]), resample=Image.BILINEAR)  # (1600,1600)

    aaa = img_name.split(".")  # ao jpg
    bbb = aaa[0:-1]  # ao
    imidx = bbb[0]  # ao
    for i in range(1, len(bbb)):
        imidx = imidx + "." + bbb[i]

    imo.save(d_dir)
    imo = cv2.imread(d_dir)
    imo_gray = cv2.cvtColor(imo, cv2.COLOR_RGB2GRAY)
    ret, binary = cv2.threshold(imo_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    cv2.imwrite(d_dir, binary)

    plt.title('face_mask')
    plt.imshow(binary)
    plt.show()
    return binary


def get_mask_U2net(face, img_name):
    # 模型路径
    model_dir = os.path.join(os.getcwd(), 'save_model', 'u2net_human_seg.pth')
    # 测试路径
    img_name_list = [os.path.join(os.getcwd(), 'dataset', 'face', img_name), ]
    # 预测路径
    prediction_dir = os.path.join(os.getcwd(), 'dataset', 'result_seg', img_name)

    # Dataset
    test_salobj_dataset = SalObjDataset(img_name_list=img_name_list,
                                        lbl_name_list=[],
                                        transform=transforms.Compose([RescaleT(320),
                                                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    u2net = U2NET(3, 1)
    u2net.load_state_dict(torch.load(model_dir, map_location='cpu'))
    u2net = u2net.cuda()
    u2net.eval()

    # 测试
    for i_test, data_test in enumerate(test_salobj_dataloader):
        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)
        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)
        d1, d2, d3, d4, d5, d6, d7 = u2net(inputs_test)

        pred = d1[:, 0, :, :]  # (1,320,320)
        pred = normPRED(pred)

        pred = save_output(img_name_list[i_test], pred, prediction_dir)
        return pred


def get_mask_FCN(face, img_name):
    prediction_dir = os.path.join(os.getcwd(), 'dataset', 'result_seg', img_name)
    segment = FaceSeg()
    mask = segment.get_mask(face)
    cv2.imwrite(prediction_dir, mask)
    plt.title('face_mask')
    plt.imshow(mask)
    plt.show()
    return mask


def get_face_white_bg_photo2cartoon(face_rgba, img_name):
    face = face_rgba[:, :, :3].copy()
    mask = face_rgba[:, :, 3].copy()[:, :, np.newaxis] / 255.
    face_white_bg = (face * mask + (1 - mask) * 255).astype(np.uint8)

    plt.title('face_white_bg')
    plt.imshow(face_white_bg)
    plt.show()
    cv2.imwrite(os.path.join(os.getcwd(), 'dataset', 'result_white_bg', img_name),
                cv2.cvtColor(face_white_bg, cv2.COLOR_RGB2BGR))

    face = face_rgba[:, :, :3].copy()
    mask = face_rgba[:, :, 3][:, :, np.newaxis].copy() / 255.
    face_white_bg = (face * mask + (1 - mask) * 255) / 127.5 - 1
    return face_white_bg


def get_cartoon_face_photo2cartoon(face_white_bg, mask, img_name):
    mask = cv2.resize(mask, (256, 256))
    mask = mask[:, :, np.newaxis].copy() / 255.

    face_white_bg = cv2.resize(face_white_bg, (256, 256), interpolation=cv2.INTER_AREA)
    face_white_bg = np.transpose(face_white_bg[np.newaxis, :, :, :], (0, 3, 1, 2)).astype(np.float32)
    face_white_bg = torch.from_numpy(face_white_bg)

    model_dir = os.path.join(os.getcwd(), 'models', 'photo2cartoon_weights.pt')

    net = Photo2Cartoon_ResnetGenerator(ngf=32, img_size=256, light=True)
    params = torch.load(model_dir, map_location='cpu')
    net.load_state_dict(params['genA2B'])
    cartoon = net(face_white_bg)[0][0]

    cartoon = np.transpose(cartoon.cpu().detach().numpy(), (1, 2, 0))
    cartoon = (cartoon + 1) * 127.5
    cartoon = (cartoon * mask + 255 * (1 - mask)).astype(np.uint8)
    cartoon = cv2.cvtColor(cartoon, cv2.COLOR_RGB2BGR)
    cartoon = cv2.resize(cartoon, (512, 512))[:, :, ::-1]

    plt.imshow(cartoon)
    plt.title('cartoon')
    plt.show()

    cv2.imwrite(os.path.join(os.getcwd(), 'dataset', 'cartoon_face', img_name),
                cartoon[:, :, ::-1])
    return cartoon


def merge_process(im, bg, mask, output_path):
    h, w = im.shape[:2]  # 获取卡通图像大小
    bh, bw = bg.shape[:2]  # 获取背景图像大小

    wratio = w / bw
    hratio = h / bh
    ratio = wratio if wratio > hratio else hratio

    if ratio > 1:
        bg = cv2.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)), interpolation=cv2.INTER_CUBIC)

    im = np.array(im, np.float32)[:, :, ::-1]
    bg_h, bg_w = bg.shape[:2]
    x = max(0, int((bg_w - w) / 2))
    y = max(0, int((bg_h - h) / 2))

    crop = np.array(bg[bh - h:bh, 0 + y: w + y], np.float32)

    alpha = np.zeros((h, w, 1), np.float32)
    alpha[:, :, 0] = mask / 255.

    im = alpha * im + (1 - alpha) * crop
    bg[bh - h:bh, 0 + y: w + y] = im

    cv2.imwrite(output_path, bg)
    plt.imshow((bg / 255)[:, :, ::-1])
    plt.title('merge')
    plt.show()

    return bg


def parse_opt():
    parser = argparse.ArgumentParser(description='Photo2Cartoon')

    parser.add_argument('--img-name', type=str, default='nini.png', help='Image name')
    parser.add_argument('--background-name', type=str, default='yourname2.jpeg', help='Background image name')
    parser.add_argument('--text-content', type=str, default='nini', help='The words written on the cartoon picture')
    parser.add_argument('--text-scale', type=int, default=70, help='The words size')
    parser.add_argument('--text-location', type=tuple, default=(220, 30), help='The words location')
    parser.add_argument('--fusion-method', type=str, default='pre_fusion', help='[pre_fusion/back_fusion]')
    parser.add_argument('--shear-rate', type=int, default=0.8, help='Head cut rate')
    parser.add_argument('--segment-model', type=str, default='U2net', help='[FCN/U2net]')
    parser.add_argument('--migration-method', type=str, default='Photo2cartoon',
                        help='[Photo2cartoon/U-GAT-IT/Pix2pix]')

    opt = parser.parse_args()

    return opt


if __name__ == "__main__":
    opt = parse_opt()

    image_path = 'testJPG/1.jpg'  # 示例绝对路径
    img = cv2.imread(image_path)
    if img is None:
        print("Error: Image not found or failed to load.")
    else:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    plt.title('img')
    plt.imshow(img)
    plt.show()

    # 识别人脸关键点->人脸摆正->分割人脸
    face = get_face(img, opt.shear_rate, opt.img_name)

    # 分割图像
    if opt.segment_model == 'U2net':
        mask = get_mask_U2net(face, opt.img_name)
    else:
        mask = get_mask_FCN(face, opt.img_name)

    # 使用photo2cartoon模型进行卡通风格迁移
    face_white_bg = get_face_white_bg_photo2cartoon(np.dstack((face, mask)), opt.img_name)
    cartoon_face = get_cartoon_face_photo2cartoon(face_white_bg, mask, opt.img_name)

    # 前景融合
    if opt.fusion_method == 'pre_fusion':
        background_img = cv2.imread(os.path.join(os.getcwd(), 'dataset', 'back_ground', opt.background_name))
        output_path = 'dataset/pre_fuse_output/' + opt.img_name[:-4] + '-' + opt.background_name[:-4] + '_cartoon.jpg'
        background = scaling_image(background_img, opt.background_name)
        background_text = put_text('background_resize', opt.background_name, opt.text_content, opt.text_scale,
                                   opt.text_location)  # 加上文字
        blend_images(cartoon_face, background_text, output_path)  # 漫画与背景融合
    else:
        output_path = 'dataset/back_fuse_output/' + opt.img_name[:-4] + '-' + opt.background_name[:-4] + '_cartoon.jpg'
        background_img = cv2.imread(os.path.join(os.getcwd(), 'dataset', 'back_ground', opt.background_name),
                                    cv2.COLOR_BGR2RGB)
        background_img = cv2.resize(background_img, (512, 512))
        cv2.imwrite(os.path.join(os.getcwd(), 'dataset', 'background_resize', opt.background_name), background_img)
        background_text = put_text('background_resize', opt.background_name, opt.text_content, opt.text_scale,
                                   opt.text_location)  # 加上文字

        background_text = cv2.imread(os.path.join(os.getcwd(), 'dataset', 'background_text', opt.background_name),
                                     cv2.COLOR_BGR2RGB)
        cartoon_face = cv2.resize(cartoon_face, (384, 384))
        mask = cv2.resize(mask, (384, 384))
        merge_cartoon = merge_process(cartoon_face, background_text, mask, output_path)
