import csv
from ast import parse
import functools
from plyfile import PlyData, PlyElement
from typing import List
import argparse
import numpy as np
import json
import os
import re
import sys
import h5py
from PIL import Image

sys.path.append(os.path.dirname(os.path.dirname(
    os.path.dirname(os.path.abspath(__file__)))))
from tools import multiprocess

ROOT_PATH = '/home/ljl/hdd'
MAX_PROCESS_COUNT = 64

# img_filenames = []


def read_npy(filename):
    points = 0
    return points


def read_txt(filename):
    points = np.loadtxt(filename, dtype="float",
                        delimiter=' ', usecols=(0, 1, 2))
    return points


def read_ply(filename):
    """ read XYZ point cloud from filename PLY file """
    ply_data = PlyData.read(filename)['vertex'].data
    points = np.array([[x, y, z] for x, y, z, i in ply_data])
    return points


def save_ply(filename, points):
    points = [(points[i, 0], points[i, 1], points[i, 2])
              for i in range(points.shape[0])]
    vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
    el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
    PlyData([el], text=False).write(filename)


# def get_index(filename):
#     basename = os.path.basename(filename)
#     return int(os.path.splitext(basename)[0])


def get_index(filename):
    basename = os.path.basename(filename)
    return int(os.path.splitext(basename[0:6])[0])


def get_sorted_filenames_by_index(dirname, isabs=True):
    if not os.path.exists(dirname):
        return []
    filenames = os.listdir(dirname)
    filenames = sorted(os.listdir(dirname), key=lambda x: get_index(x))
    if isabs:
        filenames = [os.path.join(dirname, filename) for filename in filenames]
    return filenames


def parse_json(json_filename):
    with open(json_filename) as f:
        content = json.load(f)
        beta = np.array(content['beta'], dtype=np.float32)
        pose = np.array(content['pose'], dtype=np.float32)
        trans = np.array(content['trans'], dtype=np.float32)
    return beta, pose, trans


def fix_points_num(points: np.array, num_points: int):
    points = points[~np.isnan(points).any(axis=-1)]

    origin_num_points = points.shape[0]
    if origin_num_points < num_points:
        num_whole_repeat = num_points // origin_num_points
        res = points.repeat(num_whole_repeat, axis=0)
        num_remain = num_points % origin_num_points
        res = np.vstack((res, res[:num_remain]))
    if origin_num_points >= num_points:
        res = points[np.random.choice(origin_num_points, num_points)]
    return res


def read_image(image_filename):
    return np.array(Image.open(image_filename))


def foo(id, args):
    id = str(id)
    # cur_img_filenames = get_sorted_filenames_by_index(
    #     os.path.join(ROOT_PATH, 'images', id))

    # no need to read real file, because need set zero just for kitti and waymo vis data
    # pose_filenames = get_sorted_filenames_by_index(
    #     os.path.join(ROOT_PATH, 'labels', '3d', 'pose', id))
    # json_filenames = list(filter(lambda x: x.endswith('json'), pose_filenames))
    # ply_filenames = list(filter(lambda x: x.endswith('ply'), pose_filenames))

    # cur_betas, cur_poses, cur_trans = multiprocess.multi_func(
    #     parse_json, MAX_PROCESS_COUNT, len(json_filenames), 'Load json files',
    #     True, json_filenames)

    # cur_vertices = multiprocess.multi_func(
    #     read_ply, MAX_PROCESS_COUNT, len(ply_filenames), 'Load vertices files',
    #     True, ply_filenames)

    # depth_filenames = get_sorted_filenames_by_index(
    #     os.path.join(ROOT_PATH, 'labels', '3d', 'depth', id))
    # cur_depths = multiprocess.multi_func(read_image, MAX_PROCESS_COUNT, len(
    #     depth_filenames), 'Load depth files', True, depth_filenames)

    # segment_filenames = get_sorted_filenames_by_index(
    #     os.path.join(ROOT_PATH, 'labels', '3d', 'segment', id))
    #
    # cur_point_clouds = multiprocess.multi_func(
    #     read_ply, MAX_PROCESS_COUNT, len(segment_filenames),
    #     'Load segment files', True, segment_filenames)

    # TODO: this for read kitti txt

    segment_filenames = get_sorted_filenames_by_index(
        os.path.join(ROOT_PATH, args.dataset, id))

    cur_point_clouds = multiprocess.multi_func(
        read_ply, MAX_PROCESS_COUNT, len(segment_filenames),
        'Load segment files', True, segment_filenames)

    cur_points_nums = [min(args.npoints, points.shape[0])
                       for points in cur_point_clouds]
    cur_point_clouds = [fix_points_num(
        points, args.npoints) for points in cur_point_clouds]

    pc_len = len(cur_point_clouds)

    cur_betas = []
    cur_poses = []
    cur_trans = []
    cur_depths = []

    # train_poses = np.zeros((0, args.seqlen, 72))
    # train_betas = np.zeros((0, args.seqlen, 10))
    # train_trans = np.zeros((0, args.seqlen, 3))

    for i in range(pc_len):
        cur_betas.extend([np.zeros(10)])
        cur_poses.extend([np.zeros(72)])
        cur_trans.extend([np.zeros(3)])
        cur_depths.extend([np.zeros(4)])  # no use

    poses = []
    betas = []
    trans = []
    # vertices = []
    points_nums = []
    point_clouds = []
    depths = []

    if args.seqlen == 0:
        # img_filenames.extend(cur_img_filenames)
        betas.extend(cur_betas)
        poses.extend(cur_poses)
        trans.extend(cur_trans)
        # vertices.extend(cur_vertices)
        point_clouds.extend(cur_point_clouds)
        points_nums.extend(cur_points_nums)
        depths.extend(cur_depths)
    else:
        n = len(cur_betas)
        # 直接补齐
        while n % args.seqlen != 0:
            # cur_img_filenames.append(cur_img_filenames[-1])
            cur_betas.append(cur_betas[-1])
            cur_poses.append(cur_poses[-1])
            cur_trans.append(cur_trans[-1])
            # cur_vertices.append(cur_vertices[-1])
            cur_point_clouds.append(cur_point_clouds[-1])
            cur_points_nums.append(cur_points_nums[-1])
            cur_depths.append(cur_depths[-1])
            n += 1
        times = n // args.seqlen
        for i in range(times):
            # [lb, ub)
            lb = i * args.seqlen
            ub = lb + args.seqlen
            # img_filenames.append(cur_img_filenames[lb:ub])
            betas.append(np.stack(cur_betas[lb:ub]))
            poses.append(np.stack(cur_poses[lb:ub]))
            trans.append(np.stack(cur_trans[lb:ub]))
            # vertices.append(np.stack(cur_vertices[lb:ub]))
            point_clouds.append(np.stack(cur_point_clouds[lb:ub]))
            points_nums.append(np.stack(cur_points_nums[lb:ub]))
            depths.append(np.stack(cur_depths[lb:ub]))

    # return poses, betas, trans, vertices, point_clouds, points_nums
    return np.stack(poses), np.stack(betas), np.stack(trans), np.stack(point_clouds), np.stack(points_nums), np.stack(depths)


def test(args):
    pass


def get_sorted_ids(s):
    if re.match('^([1-9]\d*)-([1-9]\d*)$', s):
        start_index, end_index = s.split('-')
        indexes = list(range(int(start_index), int(end_index) + 1))
    elif re.match('^(([1-9]\d*),)*([1-9]\d*)$', s):
        indexes = [int(x) for x in s.split(',')]
    return sorted(indexes)


def dump(args):

    seq_str = '' if args.seqlen == 0 else 'seq{}_'.format(args.seqlen)
    ids = get_sorted_ids(args.ids)
    first = True

    # used to add rotation for smpl
    train_poses = np.zeros((0, args.seqlen, 72))
    train_betas = np.zeros((0, args.seqlen, 10))
    train_trans = np.zeros((0, args.seqlen, 3))
    # train_vertices = np.zeros((0, args.seqlen, 6890, 3))
    # allocate memory for point cloud: batch_size, len, points, channel
    train_point_clouds = np.zeros((0, args.seqlen, args.npoints, 3))
    train_points_nums = np.zeros((0, args.seqlen))
    train_depths = []

    test_poses = np.zeros((0, args.seqlen, 72))
    test_betas = np.zeros((0, args.seqlen, 10))
    test_trans = np.zeros((0, args.seqlen, 3))
    # test_vertices = np.zeros((0, args.seqlen, 6890, 3))
    test_point_clouds = np.zeros((0, args.seqlen, args.npoints, 3))
    test_points_nums = np.zeros((0, args.seqlen))
    test_depths = []

    whole_poses = np.zeros((0, args.seqlen, 72))
    whole_betas = np.zeros((0, args.seqlen, 10))
    whole_trans = np.zeros((0, args.seqlen, 3))
    # whole_vertices = np.zeros((0, args.seqlen, 6890, 3))
    # allocate memory for point cloud: batch_size, len, points, channel
    whole_point_clouds = np.zeros((0, args.seqlen, args.npoints, 3))
    whole_points_nums = np.zeros((0, args.seqlen))
    whole_depths = []

    n_split = 9
    for id in ids:
        # poses, betas, trans, vertices, point_clouds, points_nums = foo(
        poses, betas, trans, point_clouds, points_nums, depths = foo(
            id, args)

        n = len(poses)

        train_indexes = np.array(
            [x for x in range(n) if x % n_split != n_split - 1])
        test_indexes = np.array(
            [x for x in range(n) if x % n_split == n_split - 1])
        print(train_indexes)
        print(test_indexes)

        # train_poses = np.concatenate((train_poses, poses[train_indexes]))
        # train_betas = np.concatenate((train_betas, betas[train_indexes]))
        # train_trans = np.concatenate((train_trans, trans[train_indexes]))
        # # train_vertices = np.concatenate(
        # #     (train_vertices, vertices[train_indexes]))
        # train_point_clouds = np.concatenate(
        #     (train_point_clouds, point_clouds[train_indexes]))
        # train_points_nums = np.concatenate(
        #     (train_points_nums, points_nums[train_indexes]))
        #
        # test_poses = np.concatenate((test_poses, poses[test_indexes]))
        # test_betas = np.concatenate((test_betas, betas[test_indexes]))
        # test_trans = np.concatenate((test_trans, trans[test_indexes]))
        # # test_vertices = np.concatenate(
        # #     (test_vertices, vertices[test_indexes]))
        # test_point_clouds = np.concatenate(
        #     (test_point_clouds, point_clouds[test_indexes]))
        # test_points_nums = np.concatenate(
        #     (test_points_nums, points_nums[test_indexes]))

        whole_poses = np.concatenate((whole_poses, np.stack(poses)))
        whole_betas = np.concatenate((whole_betas, np.stack(betas)))
        whole_trans = np.concatenate((whole_trans, np.stack(trans)))
        # whole_vertices = np.concatenate(
        #     (whole_vertices, np.stack(vertices)))
        whole_point_clouds = np.concatenate(
            (whole_point_clouds, np.stack(point_clouds)))
        whole_points_nums = np.concatenate(
            (whole_points_nums, np.stack(points_nums)))

    # train_filename = 'lidarcap_' + seq_str + 'train.hdf5'
    # with h5py.File(os.path.join(extras_path, train_filename), 'w') as f:
    #     f.create_dataset('pose', data=train_poses)
    #     f.create_dataset('shape', data=train_betas)
    #     f.create_dataset('trans', data=train_trans)
    #     # f.create_dataset('human_vertex', data=train_vertices)
    #     f.create_dataset('point_clouds', data=train_point_clouds)
    #     f.create_dataset('points_num', data=train_points_nums)
    #
    # test_filename = 'lidarcap_' + seq_str + 'test.hdf5'
    # with h5py.File(os.path.join(extras_path, test_filename), 'w') as f:
    #     f.create_dataset('pose', data=test_poses)
    #     f.create_dataset('shape', data=test_betas)
    #     f.create_dataset('trans', data=test_trans)
    #     # f.create_dataset('human_vertex', data=test_vertices)
    #     f.create_dataset('point_clouds', data=test_point_clouds)
    #     f.create_dataset('points_num', data=test_points_nums)

    whole_filename = 'lidarcap_' + args.dataset + "_" + seq_str + 'whole.hdf5'
    with h5py.File(os.path.join(extras_path, whole_filename), 'w') as f:
        f.create_dataset('pose', data=whole_poses)
        f.create_dataset('shape', data=whole_betas)
        f.create_dataset('trans', data=whole_trans)
        # f.create_dataset('human_vertex', data=whole_vertices)
        f.create_dataset('point_clouds', data=whole_point_clouds)
        f.create_dataset('points_num', data=whole_points_nums)


# 输出像/home/scsc02/jingyi/project/3D_human_ljl/datasets/preprocess/pedx.py一样的npz文件
if __name__ == '__main__':
    # file_path
    extras_path = os.path.join(ROOT_PATH, 'lidarcap', 'extras')
    os.makedirs(extras_path, exist_ok=True)

    parser = argparse.ArgumentParser()
    subparser = parser.add_subparsers()

    parser_dump = subparser.add_parser('dump')
    #
    parser_dump.add_argument('--seqlen', type=int, default=0)
    parser_dump.add_argument('--npoints', type=int, default=512)
    parser_dump.add_argument('--dataset', type=str, required=True)
    parser_dump.add_argument('--ids', type=str, required=True)
    parser_dump.add_argument('--append', action='store_true',
                             help='whether rewrite the data file, append new data in default')
    parser_dump.set_defaults(func=dump)

    # test func pass
    parser_test = subparser.add_parser('test')
    parser_test.set_defaults(func=test)

    args = parser.parse_args()
    args.func(args)
