import json
import os.path
import random
from pathlib import Path
import logging

import cv2
from tqdm import tqdm
import pickle
import scipy.io as sio

from dataLoader import _load
from loss.param import *
from utity import save

"""
预处理，获得数据集图片文件名列表和参数文件，以及bfm相关参数，从3ddfa/configs中获得keypoints_sim.npy
300W-LP: http://www.cbsr.ia.ac.cn/users/xiangyuzhu/projects/3DDFA/main.htm
BFM: https://faces.dmi.unibas.ch/bfm/main.php?nav=1-2&id=downloads
"""


def random_list(paths, labels):
    # 打乱
    paired_lists = list(zip(paths, labels))
    random.shuffle(paired_lists)
    l1, l2 = zip(*paired_lists)
    paths = list(l1)
    labels = list(l2)
    return paths, labels


def load_BFM(model_path):
    """ load BFM 3DMM model
    Args:
        model_path: path to BFM model.
    Returns:
        model: (nver = 53215, ntri = 105840). nver: number of vertices. ntri: number of triangles.
            'shapeMU': [3*nver, 1]
            'shapePC': [3*nver, 199]
            'shapeEV': [199, 1]
            'expMU': [3*nver, 1]
            'expPC': [3*nver, 29]
            'expEV': [29, 1]
            'texMU': [3*nver, 1]
            'texPC': [3*nver, 199]
            'texEV': [199, 1]
            'tri': [ntri, 3] (start from 1, should sub 1 in python and c++)
            'tri_mouth': [114, 3] (start from 1, as a supplement to mouth triangles)
            'kpt_ind': [68,] (start from 1)
    PS:
        You can change codes according to your own saved data.
        Just make sure the model has corresponding attributes.
    """
    C = _load(model_path)
    model = C['model']
    model = model[0, 0]

    # change dtype from double(np.float64) to np.float32,
    # since big matrix process(especially matrix dot) is too slow in python.
    model['shapeMU'] = (model['shapeMU'] + model['expMU']).astype(np.float32)
    model['shapePC'] = model['shapePC'].astype(np.float32)
    model['shapeEV'] = model['shapeEV'].astype(np.float32)
    model['expEV'] = model['expEV'].astype(np.float32)
    model['expPC'] = model['expPC'].astype(np.float32)

    # matlab start with 1. change to 0 in python.
    model['tri'] = model['tri'].T.copy(order='C').astype(np.int32) - 1
    model['tri_mouth'] = model['tri_mouth'].T.copy(order='C').astype(np.int32) - 1

    # kpt ind
    model['kpt_ind'] = (np.squeeze(model['kpt_ind']) - 1).astype(np.int64)
    k = []
    for ind in model['kpt_ind']:
        k.append(ind * 3)
        k.append(ind * 3 + 1)
        k.append(ind * 3 + 2)

    model['kpt_ind'] = np.array(k, dtype=np.int64)

    return model


def _parse_param(param):
    """Work for both numpy and tensor"""
    p_ = param[:12].reshape(3, -1)
    p = p_[:, :3]
    offset = p_[:, -1].reshape(3, 1)
    offset[2] = [0]
    alpha_shp = param[12:12 + dim_shp].reshape(-1, 1)
    alpha_exp = param[12 + dim_shp:].reshape(-1, 1)
    return p, offset, alpha_shp, alpha_exp


def bfm_generate(fp='bfm/ori/BFM.mat', dst='bfm/generate/'):
    """ 生成u、w_exp、w_shp、tri

    Args:
        fp: bfm相关文件 参考https://github.com/yfeng95/face3d的方式，获得BFM.mat
        dst: 存放生成文件的路径

    Returns:
    """
    if not os.path.exists(dst):
        os.mkdir(dst)

    bfm = load_BFM(fp)
    # save(dst + 'u', bfm['shapeMU'], True)
    # save(dst + 'w_shp', bfm['shapePC'], True)
    # save(dst + 'w_exp', bfm['expPC'], True)
    # save(dst + 'tri', bfm['tri'], True)
    save(dst + 'keypoints_sim', bfm['kpt_ind'])


def param_tran(ori_param, dfa_param):
    """

    Args:
        dfa_param: 3ddfa数据集的参数
        ori_param: 300W-LP的原始参数
    Returns:
        param: 240维参数：12pose+199shp+29exp
    """
    shp = ori_param.get('Shape_Para').squeeze()
    exp = ori_param.get('Exp_Para').squeeze()
    pose = dfa_param[:12]
    param = np.concatenate((pose, shp))
    param = np.concatenate((param, exp))
    return param


def data_generate(dfa_fp='D:/pyProject/Datasets/3DFFA/train.configs/', ori_fp='D:/pyProject/3ddfa_my/300W_LP/',
                  dst="dataset/"):
    """产生训练及验证用的参数及文件

    Args:
        dst: 目标目录
        dfa_fp: 3ddfa的训练集，只包含40+10个参数，重构成199+29个参数，see in https://github.com/cleardusk/3DDFA
        将param_all_norm.pkl、param_all_norm_val.pkl、param_whitening.pkl和两个list文件放入该目录下
        ori_fp: 300WLP的目录
    Returns:
    """
    train_folders = ['AFW', 'HELEN', 'LFPW', 'AFW_Flip', 'HELEN_Flip', 'LFPW_Flip', 'IBUG', 'IBUG_Flip']
    # IBUG和IBUGFlip两个文件夹有问题，需要先将文件夹中带' '的文件重命名
    for folder_path in [ori_fp + 'IBUG', ori_fp + 'IBUG_Flip']:
        for filename in os.listdir(folder_path):
            old_file_path = os.path.join(folder_path, filename)
            if os.path.isfile(old_file_path):
                new_filename = filename.replace(' ', '')
                new_file_path = os.path.join(folder_path, new_filename)
                # 执行重命名操作
                os.rename(old_file_path, new_file_path)

    if not os.path.exists(dst):
        os.mkdir(dst)

    train_names = Path(dfa_fp + 'train_aug_120x120.list.train').read_text().strip().split('\n')
    val_names = Path(dfa_fp + 'train_aug_120x120.list.val').read_text().strip().split('\n')
    print(f'文件名-训练集：{len(train_names)}, 验证集：{len(val_names)}')
    dfa_param_train = _load(dfa_fp + 'param_all_norm.pkl')
    dfa_param_val = _load(dfa_fp + 'param_all_norm_val.pkl')
    mean_std = _load(dfa_fp + 'param_whitening.pkl')
    mean = mean_std.get("param_mean")
    std = mean_std.get("param_std")

    # 保存文件名
    save(dst + 'train', train_names)
    save(dst + 'val', val_names)

    # 处理原始参数
    param_train = []

    i = 0
    with tqdm(total=len(train_names), desc='Train_param: ') as pbar:
        for s in train_names:
            s = s.replace('.jpg', '')
            w = s.split('_')
            s = s[len(w[0]) + 1:len(s) - len(w[-1]) - 1]
            ori_param = sio.loadmat(ori_fp + w[0].replace('Flip', '_Flip') + '/' + s + '.mat')
            dfa_param = dfa_param_train[i] * std + mean
            p = param_tran(ori_param, dfa_param)
            param_train.append(p)
            i += 1
            pbar.update(1)
    param_val = []
    i = 0
    with tqdm(total=len(val_names), desc='Val_param: ') as pbar:
        for s in val_names:
            s = s.replace('.jpg', '')
            w = s.split('_')
            s = s[len(w[0]) + 1:len(s) - len(w[-1]) - 1]
            ori_param = sio.loadmat(ori_fp + w[0].replace('Flip', '_Flip') + '/' + s + '.mat')
            dfa_param = dfa_param_val[i] * std + mean
            p = param_tran(ori_param, dfa_param)
            param_val.append(p)
            i += 1
            pbar.update(1)
    print(f'参数-训练集：{len(param_train)}, 验证集：{len(param_val)}')
    # save(dst + 'param_train', param_train)
    # save(dst + 'param_val', param_val)


def change_light(image, percetage=0.9):
    image_copy = image.copy()
    w = image.shape[1]
    h = image.shape[0]
    for xi in range(0, w):
        for xj in range(0, h):
            t = int(image[xj, xi, 0] * percetage)
            image_copy[xj, xi, 0] = t if t < 255 else 255
            t = int(image[xj, xi, 1] * percetage)
            image_copy[xj, xi, 1] = t if t < 255 else 255
            t = int(image[xj, xi, 2] * percetage)
            image_copy[xj, xi, 2] = t if t < 255 else 255
    return image_copy


def augmentation(img_fp='/home/chenxianhao/train_aug_120x120/',
                 dst='/home/chenxianhao/FaceRecon3dM/dataset/',
                 img_224='/home/chenxianhao/train_aug_224x224/'):
    # def augmentation(img_fp='D:/pyProject/Datasets/3DFFA/train_aug_120x120/',
    #                  dst='D:/pyProject/3ddfa_my/FaceRecon3dM/dataset/'):
    logging.basicConfig(
        format='[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] %(message)s',
        level=logging.INFO,
        handlers=[
            logging.FileHandler('/home/chenxianhao/FaceRecon3dM/' + 'data_a.log', mode='w'),
            logging.StreamHandler()
        ]
    )
    old_train = _load(dst + 'param_train_n.pkl')
    old_t_name = _load(dst + 'train.pkl')
    new_name = []
    new_param = []
    logging.info('augmentation')
    # with tqdm(total=len(old_t_name), desc='Train_param: ') as pbar:
    for i in range(len(old_t_name)):
        name = old_t_name[i]
        image = cv2.imread(img_fp + name)
        img = cv2.resize(image, dsize=(224, 224), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(img_224 + name, img)
        for j in range(3):
            random_number = random.uniform(0.4 * (j + 1), 0.49 * (j + 1))
            image_a = change_light(image, random_number)
            n_n = str(name).replace('.jpg', f'_{j}.jpg')
            cv2.imwrite(img_fp + n_n, image_a)
            new_name.append(n_n)
            new_param.append(old_train[i])
            img = cv2.resize(image_a, dsize=(224, 224), interpolation=cv2.INTER_LINEAR)
            cv2.imwrite(img_224 + n_n, img)
        if i % 200 == 0:
            logging.info(f'process: {i + 1}')
    save(dst + 'new_name', new_name)
    save(dst + 'new_param', new_param)
    old_train.extend(new_param)
    old_t_name.extend(new_name)

    names, train = random_list(old_t_name, old_train)
    save(dst + 'param_train_n_a', train)
    save(dst + 'train_a', names)


def add_aug(dst='/home/chenxianhao/FaceRecon3dM/dataset/'):
    old_train = _load(dst + 'param_train_n.pkl')
    old_t_name = _load(dst + 'train.pkl')
    new_name = _load(dst + 'new_name.pkl')
    new_param = _load(dst + 'new_param.pkl')

    names, train = random_list(new_name, new_param)
    reso_l = int(len(old_train)*0.3)
    old_train.extend(train[:reso_l])
    old_t_name.extend(names[:reso_l])
    names, train = random_list(old_t_name, old_train)

    save(dst + 'param_train_n_a', train)
    save(dst + 'train_a', names)


def normalize(fp='dataset/'):
    """归一化参数 mean normalization

    Args:
        fp: 目录
    Returns:
        param_train_normal.pkl, param_val_normal.pkl, mean_std
    """
    param_train = _load(fp + 'param_train_n_a.pkl')
    param_train = np.array(param_train)
    mean = param_train.mean(0)
    std = param_train.std(0)
    param_train -= mean
    param_train /= std

    # param_val = _load(fp + 'param_val.pkl')
    # param_val -= mean
    # param_val /= std

    save(fp + 'param_train_normal_a', param_train)
    # save(fp + 'param_val_normal', param_val)
    save(fp + 'mean_std', {'mean': mean, "std": std})


def test():
    t = _load("dataset/mean_std.pkl")
    print(t)


if __name__ == '__main__':
    # bfm_generate()
    # data_generate()

    # test()
    # augmentation()
    add_aug()
    normalize()
    print("finish")
