# -*- coding: utf-8 -*-
# @Author  : Zhang.Jingyi
import os
import numpy as np
import json
from tqdm import tqdm


def ped_extract(dataset_path, out_path, mode):
    # bbox expansion factor
    scaleFactor = 1.2

    # structs we need
    imgnames_, scales_, centers_ = [], [], []
    poses_, shapes_, trans_ = [], [], []
    human_vertex_ = []
    point_clouds_, calib_, timastamp_, tracking_id_ = [], [], [], []
    mesh_path_, points_path_ = [], []
    top_x_, top_y_, down_x_, down_y_ = [], [], [], []
    # training/test splits
    bboxs = []
    train_val_json_file = os.path.join(dataset_path, 'train_val.json')
    key = mode + '_human_list'
    with open(train_val_json_file, 'r') as file:
        content = json.load(file)[key]
    for id in content.keys():
        for frame in content[id].keys():
            if content[id][frame]['2D_bbox'] != 'None':
                bboxs.append(content[id][frame]['2D_bbox'])
    ignored_values = ["null", " ", None]

    for bbox_i in tqdm(bboxs):
        # image name
        info = bbox_i.split('_')
        info[0] = info[0].split('/')[-1]
        if len(info) == 1:
            continue
        img_name = os.path.join(dataset_path, 'images', info[0], info[1],
                                info[0] + '_' + info[1] + '_' + info[2] + '.jpg')

        if not os.path.exists(img_name):
            continue

        # scale and center
        bbox_filename = os.path.join(
            dataset_path, 'labels/2d', info[0], bbox_i)
        if not os.path.exists(bbox_filename):
            continue
        with open(bbox_filename, 'r') as f:
            context = json.load(f)['polygon']
            if context in ignored_values:
                continue
            bboxes = np.array(context).astype(np.float32)
        top_x = min(bboxes[:, 0])
        top_y = min(bboxes[:, 1])
        down_x = max(bboxes[:, 0])
        down_y = max(bboxes[:, 1])
        weight = down_x - top_x
        height = down_y - top_y
        bbox = np.array([top_x, top_y, weight, height])
        ul_corner = bbox[:2]
        center = ul_corner + 0.5 * bbox[2:]
        width = max(bbox[2], bbox[3])
        scale = width / 200.0

        # pose shape, trans
        json_file = os.path.join(dataset_path, 'labels/3d/pose', info[0],
                                 '{}_{}_{}'.format(info[0], info[2], info[3]))
        if not os.path.exists(json_file):
            continue
        with open(json_file, 'r')as f1:
            content = json.load(f1)
            pose = np.array(content['pose']).astype(np.float32)
            shape = np.array(content['betas']).astype(np.float32)
            trans = np.array(content['trans']).astype(np.float32)

        # human mesh
        human_mesh_file = os.path.join(dataset_path, 'labels/3d/pose', info[0],
                                       '{}_{}_{}.ply'.format(info[0], info[2],
                                                             info[3].strip('.json')))
        if not os.path.exists(human_mesh_file):
            continue
        with open(human_mesh_file, 'r') as f2:
            human_mesh_content = f2.read()
        person_vertex = []
        human_mesh = human_mesh_content.split('\n')[9:6899]
        for row in range(len(human_mesh)):
            location = human_mesh[row].strip("'").strip(' ').split(' ')
            person_vertex.append(location[0])
            person_vertex.append(location[1])
            person_vertex.append(location[2])
        human_vertex = np.array(person_vertex).astype(
            np.float32).reshape(-1, 3)

        # point_clouds
        human_file = '{}_{}_{}.ply'.format(
            info[0], info[2], info[3].strip('.json'))
        human_path = os.path.join(
            dataset_path, 'labels/3d/segment', info[0], human_file)
        if not os.path.exists(human_path):
            continue
        with open(human_path, 'r') as f3:
            human_point = f3.read()
        person_point = []
        person_content = human_point.split('\n')[7:-1]
        if len(person_content) == 0:
            continue
        for rows in range(len(person_content)):
            locations = person_content[rows].strip("'").strip(' ').split(' ')
            person_point.append(locations[0])
            person_point.append(locations[1])
            person_point.append(locations[2])
        point_clouds = np.array(person_point).astype(np.float32).reshape(-1, 3)

        # timestamp and tracking info
        timestamp = float(info[2])
        tracking_id = info[3].strip('.json')
        # store data
        imgnames_.append(img_name)
        centers_.append(center)
        scales_.append(scale)
        poses_.append(pose)
        shapes_.append(shape)
        trans_.append(trans)
        human_vertex_.append(human_vertex)
        point_clouds_.append(point_clouds)
        timastamp_.append(timestamp)
        tracking_id_.append(tracking_id)
        mesh_path_.append(human_mesh_file)
        points_path_.append(human_path)
        top_x_.append(top_x)
        top_y_.append(top_y)
        down_x_.append(down_x)
        down_y_.append(down_y)

    # store the data struct
    extra_path = os.path.join(out_path, 'extras_py3')
    if not os.path.isdir(extra_path):
        os.makedirs(extra_path)
    out_file = os.path.join(extra_path, 'pedx_%s.npz' % mode)

    if os.path.exists(out_file):
        os.remove(out_file)

    assert len(imgnames_) != 0
    assert len(centers_) != 0
    assert len(scales_) != 0
    assert len(poses_) != 0
    assert len(shapes_) != 0
    assert len(trans_) != 0
    assert len(human_vertex_) != 0
    assert len(point_clouds_) != 0
    assert len(timastamp_) != 0
    assert len(tracking_id_) != 0
    assert len(top_x_) != 0
    assert len(top_y_) != 0
    assert len(down_x_) != 0
    assert len(down_y_) != 0
    assert len(points_path_) != 0
    assert len(mesh_path_) != 0

    print(out_file)
    np.savez(out_file, imgname=imgnames_,
             center=centers_,
             scale=scales_,
             pose=poses_,
             shape=shapes_,
             trans=trans_,
             human_vertex=human_vertex_,
             point_clouds=point_clouds_,
             timestamp=timastamp_,
             tracking_id=tracking_id_,
             top_x=top_x_,
             top_y=top_y_,
             down_x=down_x_,
             down_y=down_y_,
             points_path=points_path_,
             mesh_path=mesh_path_)
