import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pathlib import Path

import cv2
import numpy as np

from tqdm import tqdm
from utity import save, _load

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf


# class suppress_stdout_stderr(object):
#     '''
#     A context manager for doing a "deep suppression" of stdout and stderr in
#     Python, i.e. will suppress all print, even if the print originates in a
#     compiled C/Fortran sub-function.
#        This will not suppress raised exceptions, since exceptions are printed
#     to stderr just before a script exits, and after the context manager has
#     exited (at least, I think that is why it lets exceptions through).
#
#     '''
#
#     def __init__(self):
#         # Open a pair of null files
#         self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
#         # Save the actual stdout (1) and stderr (2) file descriptors.
#         self.save_fds = (os.dup(1), os.dup(2))
#
#     def __enter__(self):
#         # Assign the null pointers to stdout and stderr.
#         os.dup2(self.null_fds[0], 1)
#         os.dup2(self.null_fds[1], 2)
#
#     def __exit__(self, *_):
#         # Re-assign the real stdout/stderr back to (1) and (2)
#         os.dup2(self.save_fds[0], 1)
#         os.dup2(self.save_fds[1], 2)
#         # Close the null files
#         os.close(self.null_fds[0])
#         os.close(self.null_fds[1])


def generate_lm(dfa_fp='D:/pyProject/Datasets/3DFFA/train.configs/',
                img_fp='D:/pyProject/Datasets/3DFFA/train_aug_120x120/',
                dst='D:/pyProject/3ddfa_my/FaceRecon3dM/dataset/'):
# def generate_lm(dfa_fp='/home/chenxianhao/3DDFA_linux/3DDFA-master/3DDFA-master/train.configs/',
#                 img_fp='/home/chenxianhao/train_aug_120x120/',
#                 dst='/home/chenxianhao/FaceRecon3dM/dataset/'):
    train_names = Path(dfa_fp + 'train_aug_120x120.list.train').read_text().strip().split('\n')
    val_names = Path(dfa_fp + 'train_aug_120x120.list.val').read_text().strip().split('\n')
    print(f'文件名-训练集：{len(train_names)}, 验证集：{len(val_names)}')

    mean_face = np.loadtxt('./test_mean_face.txt')
    mean_face = mean_face.reshape([68, 2])
    graph_filename = './68lm_detector.pb'
    with tf.compat.v2.io.gfile.GFile(graph_filename, 'rb') as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def, name='net')
        img_224 = graph.get_tensor_by_name('net/input_imgs:0')
        output_lm = graph.get_tensor_by_name('net/lm:0')
        lm_sess = tf.compat.v1.Session(graph=graph)

    param_train = []
    # de_train = []
    i = 0
    with tqdm(total=len(train_names), desc='Train_param: ') as pbar:
        for s in train_names:
            GEN_SIZE = 224
            STD_SIZE = 120
            image = cv2.imread(img_fp + s)
            img = cv2.resize(image, dsize=(GEN_SIZE, GEN_SIZE), interpolation=cv2.INTER_LINEAR)
            # detector = MTCNN()
            # with suppress_stdout_stderr():
                # five = detector.detect_faces(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
                # five_points = []
                # if len(five) > 0:
                #     five = five[0]['keypoints']
                #     for k in five.keys():
                #         five_points.append(five[k])
                #     input_img, scale, bbox = align_for_lm(img, five_points)
                # else:
            input_img = img
            scale = 1.0
            bbox = [0, 0]
            input_img = np.reshape(input_img, [1, 224, 224, 3]).astype(np.float32)
            landmark = lm_sess.run(output_lm, feed_dict={img_224: input_img})

            landmark = landmark.reshape([68, 2]) + mean_face
            landmark[:, 1] = 223 - landmark[:, 1]
            landmark = landmark / scale
            landmark[:, 0] = landmark[:, 0] + bbox[0]
            landmark[:, 1] = landmark[:, 1] + bbox[1]
            # landmark[:, 1] = img.shape[0] - 1 - landmark[:, 1]
            kp = landmark.flatten(order='F')
            kp = [(float(i) * STD_SIZE / GEN_SIZE) for i in kp]

            param_train.append(kp)
            i += 1
            pbar.update(1)

    param_val = []
    i = 0
    with tqdm(total=len(val_names), desc='Train_param: ') as pbar:
        for s in val_names:
            GEN_SIZE = 224
            STD_SIZE = 120
            image = cv2.imread(img_fp + s)
            img = cv2.resize(image, dsize=(GEN_SIZE, GEN_SIZE), interpolation=cv2.INTER_LINEAR)
            # detector = MTCNN()
            # with suppress_stdout_stderr():
                # five = detector.detect_faces(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
                # five_points = []
                # if len(five) > 0:
                #     five = five[0]['keypoints']
                #     for k in five.keys():
                #         five_points.append(five[k])
                #     input_img, scale, bbox = align_for_lm(img, five_points)
                # else:
            input_img = img
            scale = 1.0
            bbox = [0, 0]
            input_img = np.reshape(input_img, [1, 224, 224, 3]).astype(np.float32)
            landmark = lm_sess.run(output_lm, feed_dict={img_224: input_img})

            landmark = landmark.reshape([68, 2]) + mean_face
            landmark[:, 1] = 223 - landmark[:, 1]
            landmark = landmark / scale
            landmark[:, 0] = landmark[:, 0] + bbox[0]
            landmark[:, 1] = landmark[:, 1] + bbox[1]
            # landmark[:, 1] = img.shape[0] - 1 - landmark[:, 1]
            kp = landmark.flatten(order='F')
            kp = [(float(i) * STD_SIZE / GEN_SIZE) for i in kp]

            param_val.append(kp)
            i += 1
            pbar.update(1)

    # print(f'de_t:{len(de_train)}, de_v:{len(de_val)}')
    save(dst + 'lm_train', param_train)
    save(dst + 'lm_val', param_val)


if __name__ == '__main__':
    generate_lm()
    print("finish")
