""" SynergyNet eval script. """
import argparse
import time
import os
import glob
import math
from math import cos, atan2, asin
import cv2
import numpy as np
import mindspore as ms
import mindspore.dataset as ds
import sys
sys.path.append('./')
sys.path.append("/home/qinsiyi/codes/mind3d")
from mindspore import context, Tensor, load_checkpoint, load_param_into_net
import mindspore.ops as ops
from mind3d.utils.synergynet_builder import build_model, build_dataset
from mind3d.utils.synergynet_eval_utils import calc_nme as calc_nme_alfw2000
from mind3d.utils.synergynet_eval_utils import ana_msg as ana_alfw2000
from mind3d.utils.load_yaml import load_yaml

from mind3d.utils.synergynet_util import ParamsPack

param_pack = ParamsPack()


def parse_pose(param):
    '''parse parameters into pose'''
    if len(param) == 62:
        param = param * param_pack.param_std[:62] + param_pack.param_mean[:62]
    else:
        param = param * param_pack.param_std + param_pack.param_mean
    ps = param[:12].reshape(3, -1)  # camera matrix
    _, r, t3d = p2srt(ps)
    p = np.concatenate((r, t3d.reshape(3, -1)), axis=1)  # without scale
    pose = matrix2angle(r)  # yaw, pitch, roll
    return p, pose


def p2srt(p):
    '''decomposing camera matrix P'''
    t3d = p[:, 3]
    r1 = p[0:1, :3]
    r2 = p[1:2, :3]
    s = (np.linalg.norm(r1) + np.linalg.norm(r2)) / 2.0
    r1 = r1 / np.linalg.norm(r1)
    r2 = r2 / np.linalg.norm(r2)
    r3 = np.cross(r1, r2)
    r = np.concatenate((r1, r2, r3), 0)
    return s, r, t3d


def matrix2angle(r):
    '''convert matrix to angle'''
    if r[2, 0] != 1 and r[2, 0] != -1:
        x = asin(r[2, 0])
        y = atan2(r[1, 2] / cos(x), r[2, 2] / cos(x))
        z = atan2(r[0, 1] / cos(x), r[0, 0] / cos(x))

    else:  # Gimbal lock
        z = 0  # can be anything
        if r[2, 0] == -1:
            x = np.pi / 2
            y = z + atan2(r[0, 1], r[0, 2])
        else:
            x = -np.pi / 2
            y = -z + atan2(-r[0, 1], -r[0, 2])

    rx, ry, rz = x * 180 / np.pi, y * 180 / np.pi, z * 180 / np.pi

    return [rx, ry, rz]


def parsing(param):
    """
    parsing function
    """
    p_ = param[:, :12].reshape(-1, 3, 4)
    p = p_[:, :, :3]
    offset = p_[:, :, -1].reshape(-1, 3, 1)
    alpha_shp = param[:, 12:52].reshape(-1, 40, 1)
    alpha_exp = param[:, 52:62].reshape(-1, 10, 1)
    return p, offset, alpha_shp, alpha_exp


def reconstruct_vertex(param, data_param, whitening=True, transform=True, lmk_pts=68):
    """
    This function includes parameter de-whitening, reconstruction of landmarks,
    and transform from coordinate space (x,y) to image space (u,v)
    """
    param_mean, param_std, w_shp_base, u_base, w_exp_base = data_param
    param_mean = Tensor(param_mean, dtype=ms.float32)
    param_std = Tensor(param_std, dtype=ms.float32)
    w_shp_base = Tensor(w_shp_base, dtype=ms.float32)
    u_base = Tensor(u_base, dtype=ms.float32)
    w_exp_base = Tensor(w_exp_base, dtype=ms.float32)
    transpose = ops.Transpose()

    if whitening:
        if param.shape[1] == 62:
            param = param * param_std[:62] + param_mean[:62]
        else:
            raise NotImplementedError("Parameter length must be 62")

    if param.shape[1] == 62:
        p, offset, alpha_shp, alpha_exp = parsing(param)
    else:
        raise NotImplementedError("Parameter length must be 62")

    vertex1 = ops.matmul(w_shp_base, alpha_shp)
    vertex2 = ops.matmul(w_exp_base, alpha_exp)
    vertex = u_base + vertex1 + vertex2
    vertex = vertex.view(-1, lmk_pts, 3)
    vertex = transpose(vertex, (0, 2, 1))
    vertex = ops.matmul(p, vertex)
    vertex = vertex + offset
    if transform:
        vertex[:, 1, :] = param_pack.std_size + 1 - vertex[:, 1, :]

    return vertex


def extract_param(opt):
    """

    Args:
        checkpoint_fp: Path of the check point file.
        root: Location of data.
        batch_size: batch size

    """

    model = build_model(img_size= opt['val']['img_size'], mode="test")
    checkpoint = load_checkpoint(opt['val']['checkpoint_fp'])
    load_param_into_net(model, checkpoint)

    dataset_generator = build_dataset(opt,"test")
    dataset = ds.GeneratorDataset(dataset_generator, ["data"], shuffle=False)
    dataset = dataset.batch(batch_size=opt['val']['batch_size'], drop_remainder=True)

    param_mean = Tensor(param_pack.param_mean, dtype=ms.float32)
    param_std = Tensor(param_pack.param_std, dtype=ms.float32)

    # Online training needs these to parallel
    u_base = Tensor(param_pack.u_base, dtype=ms.float32)
    w_shp_base = Tensor(param_pack.w_shp_base, dtype=ms.float32)
    w_exp_base = Tensor(param_pack.w_exp_base, dtype=ms.float32)

    data_param = [param_mean, param_std, w_shp_base, u_base, w_exp_base]

    end = time.time()
    outputs = []
    for data in dataset.create_dict_iterator():
        inputs = data['data']
        output = model(inputs)
        param_prediction = output.asnumpy()
        outputs.append(param_prediction)
    outputs = np.concatenate(outputs, axis=0)

    print('Extracting params take {: .3f}s'.format(time.time() - end))
    return outputs, data_param


def _benchmark_aflw2000(outputs):
    '''Calculate the error statistics.'''
    return ana_alfw2000(calc_nme_alfw2000(outputs, option='ori'))


# AFLW2000 facial alignment
img_list = sorted(glob.glob('/data1/hujingsong/synergynet/aflw2000_data/AFLW2000-3D_crop/*.jpg'))


def benchmark_aflw2000_params(params, data_param):
    '''Reconstruct the landmark points and calculate the statistics'''
    outputs = []
    params = Tensor(params, dtype=ms.float32)

    batch_size = 50
    num_samples = params.shape[0]
    iter_num = math.floor(num_samples / batch_size)
    residual = num_samples % batch_size
    for i in range(iter_num + 1):
        if i == iter_num:
            if residual == 0:
                break
            batch_data = params[i * batch_size: i * batch_size + residual]
            lm = reconstruct_vertex(batch_data, data_param, lmk_pts=68)
            lm = lm.asnumpy()
            for j in range(residual):
                outputs.append(lm[j, :2, :])
        else:
            batch_data = params[i * batch_size: (i + 1) * batch_size]
            lm = reconstruct_vertex(batch_data, data_param, lmk_pts=68)
            lm = lm.asnumpy()
            for j in range(batch_size):
                if i == 0:
                    # plot the first 50 samples for validation
                    bkg = cv2.imread(img_list[i * batch_size + j], -1)
                    lm_sample = lm[j]
                    c0 = np.clip((lm_sample[1, :]).astype(np.int), 0, 119)
                    c1 = np.clip((lm_sample[0, :]).astype(np.int), 0, 119)
                    for y, x, in zip([c0, c0, c0 - 1, c0 - 1], [c1, c1 - 1, c1, c1 - 1]):
                        bkg[y, x, :] = np.array([233, 193, 133])
                    cv2.imwrite(f'./results/{i * batch_size + j}.png', bkg)

                outputs.append(lm[j, :2, :])
    return _benchmark_aflw2000(outputs)


# AFLW2000 face orientation estimation
def benchmark_foe(params):
    """
    FOE benchmark validation. Only calculate the groundtruth of angles within [-99, 99]
    (following FSA-Net https://github.com/shamangary/FSA-Net)
    """

    # AFLW200 groundturh and indices for skipping, whose yaw angle lies outside [-99, 99]
    exclude_aflw2000 = '/data1/hujingsong/synergynet/aflw2000_data/eval/ALFW2000-3D_pose_3ANG_excl.npy'
    skip_aflw2000 = '/data1/hujingsong/synergynet/aflw2000_data/eval/ALFW2000-3D_pose_3ANG_skip.npy'

    if not os.path.isfile(exclude_aflw2000) or not os.path.isfile(skip_aflw2000):
        raise RuntimeError('Missing data')

    pose_gt = np.load(exclude_aflw2000)
    skip_indices = np.load(skip_aflw2000)
    pose_mat = np.ones((pose_gt.shape[0], 3))

    idx = 0
    for i in range(params.shape[0]):
        if i in skip_indices:
            continue
        _, angles = parse_pose(params[i])
        angles[0], angles[1], angles[2] = angles[1], angles[0], angles[2]  # we decode raw-ptich-yaw order
        pose_mat[idx, :] = np.array(angles)
        idx += 1

    pose_analyis = np.mean(np.abs(pose_mat - pose_gt), axis=0)  # pose GT uses [pitch-yaw-roll] order
    mae = np.mean(pose_analyis)
    yaw = pose_analyis[1]
    pitch = pose_analyis[0]
    roll = pose_analyis[2]
    msg = 'Mean MAE = %3.3f (in deg), [yaw,pitch,roll] = [%3.3f, %3.3f, %3.3f]' % (mae, yaw, pitch, roll)
    print('\nFace orientation estimation:')
    print(msg)
    return mae


def synergynet_eval(opt):
    '''synergynet benchmark validation pipeline'''
    
    context.set_context(max_call_depth=100000, device_id=opt['device_id'], mode=context.GRAPH_MODE,device_target=opt['device_target'])

    if not os.path.isdir(opt['val']['root']):
        raise RuntimeError('check if the testing data exist')

    params, data_param = extract_param(opt)        

    info_out_fal = benchmark_aflw2000_params(params, data_param)
    print(info_out_fal)
    mae = benchmark_foe(params)



def main():    
    parser = argparse.ArgumentParser(description='SynergyNet eval.')
    parser.add_argument('-opt', type=str, default='mind3d/configs/synergy_net/synergynet.yaml', help='Path to option YAML file.')
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)
    synergynet_eval(opt)


if __name__ == '__main__':
    main()
