import numpy as np
import cv2
import json
import os
import logging
import pandas as pd
import easydict as edict

BODY_JOINT_INDEX = [2, 3, 8, 9, 13, 12]
config = edict.EasyDict()
config.DUMP_TO_HDF = True
config.DUMP_OUT_BOX = True


def convert_bbox_to_tlbr(bbox):
    return np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]).flatten()


def iou(box0, box1):
    bb_test = convert_bbox_to_tlbr(box0)
    bb_gt = convert_bbox_to_tlbr(box1)
    xx1 = np.maximum(bb_test[0], bb_gt[0])
    yy1 = np.maximum(bb_test[1], bb_gt[1])
    xx2 = np.minimum(bb_test[2], bb_gt[2])
    yy2 = np.minimum(bb_test[3], bb_gt[3])
    w = np.maximum(0., xx2 - xx1)
    h = np.maximum(0., yy2 - yy1)
    wh = w * h
    div = ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])
           + (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)
    o = wh / div if div != 0 else 0
    return o


class Person(object):
    pass


# class Analysis():
#     def __init__(self, joint_list):
#         self.joint_list = joint_list

def estimate_box_constraintion(i, o):
    """

    :param i: in box , trwh format
    :type i(numpy.Array)
    :param o:
    :return:
    """
    ws = i[2] / o[2]
    hs = i[3] / o[3]
    ip = i[[0, 1]] + i[[2, 3]] / 2.
    op = o[[0, 1]] + o[[2, 3]] / 2.
    v_offset_ratio = (ip[1] - op[1]) / i[3]
    return ws, hs, v_offset_ratio


class Test(object):
    def __init__(self, json_path, path_gt=None):
        self.frames = json.load(open(json_path, 'r'))
        self.df_names = ['fid', 'box', 'iid', 'tid']
        self.df_names.extend(Test.get_joint_names(16))
        self.df = pd.DataFrame(None, columns=self.df_names)
        self.gt = None

    def load_gt(self, path_gt):
        self.gt = pd.read_csv(path_gt, header=None)
        self.gt.columns = ['fid', 'tid', 'x', 'y', 'w', 'h', 'score', '?1', '?2', '?3']

    def load_df(self, df_path):
        self.df = pd.read_hdf(df_path, 'df')

    def to_df(self, df_path):
        self.df.to_hdf(df_path, 'df')

    @staticmethod
    def get_joint_names(l):
        return [j + str(i) for i in range(l) for j in ['x', 'y']]

    def __box_scale(self):
        pass

    def get_identities(self, img_number):
        k = '%06d.jpg' % img_number
        if k not in self.frames.keys():
            logging.error('k not in self.frames.keys():')
            return
        identities = self.frames[k]
        boxes = []
        df_tmp = []
        for i, identity in enumerate(identities):
            keypoints = identity['keypoints']
            keypoints = np.array(keypoints).reshape([-1, 3])
            min_x, min_y = np.min(keypoints[:, [0, 1]], axis=0)
            max_x, max_y = np.max(keypoints[:, [0, 1]], axis=0)
            box = [min_x, min_y, max_x - min_x, max_y - min_y]
            boxes.append(box)
            data = pd.Series([img_number, box, i, 0], ['fid', 'box', 'iid', 'tid'])
            series_keypoints = pd.Series(keypoints[:, [0, 1]].flatten(), Test.get_joint_names(16))
            data = data.append(series_keypoints)
            # todo 把joints 也加入到 data series .
            # self.df.append(data)
            df_tmp.append(data)
        df_tmp = pd.DataFrame(df_tmp)
        self.df = self.df.append(df_tmp, ignore_index=True)

    def parse_xml(self):

        for frame in self.frames:
            """
            {"image_id": "000328.jpg", "category_id": 1, "keypoints": [1178, 972, 0.867691159248, 1178, 837, 0.715989768505, 1157, 701, 0.554083526134, 1119, 697, 0.564945936203, 1112, 823, 0.730980694294, 1084, 948, 0.847509026527, 1134, 690, 0.48806476593, 1123, 522, 0.493920922279, 1128, 490, 0.665843605995, 1130, 408, 0.597517967224, 1210, 607, 0.763488173485, 1173, 628, 0.855022788048, 1157, 527, 0.872517228127, 1089, 522, 0.843122005463, 1112, 601, 0.446099877357, 1173, 592, 0.280099362135], "score": 2.554182176664975}
            """
            name = frame["image_id"]  # type:str
            keypoints = frame["keypoints"]
            score = frame["score"]
            fid = int(name.split('.')[0])
            keypoints = np.array(keypoints).reshape([-1, 3])
            min_x, min_y = np.min(keypoints[:, [0, 1]], axis=0)
            max_x, max_y = np.max(keypoints[:, [0, 1]], axis=0)
            pose_box = np.array([min_x, min_y, max_x - min_x, max_y - min_y])
            series_keypoints = pd.Series(keypoints[:, [0, 1]].flatten(), Test.get_joint_names(16))
            data = pd.Series([fid, pose_box, 0, 0], ['fid', 'box', 'iid', 'tid'])
            data = data.append(series_keypoints)
            self.df = self.df.append(data, ignore_index=True)
            # if analysis is not None:
            #     analysis.handle(data)
        self.df = self.df.sort_values(by='fid')
        self.df = self.df.reset_index(drop=True)

    # def iter_pose_xml(self):
    #     for fid in range(1, len(self.frames)):
    #         self.get_identities(fid)
    # yield

    def write_skeleton_box(self, file_path):
        self.df.to_csv(file_path)

    def attach_skeleton(self):
        # for fid, box, iid in zip(self.df.fid, self.df.box, self.df.iid):
        if self.gt is None:
            logging.error('gt file hasn\'t settle. ')
            return
        for i in range(len(self.df)):
            fid, box, iid = self.df.fid[i], self.df.box[i], self.df.iid[i]
            gt_frames = self.gt[self.gt.fid == fid]
            tids = gt_frames.tid.tolist()
            gt_boxes = gt_frames.loc[:, ['x', 'y', 'w', 'h']].as_matrix()
            iou_scores = [iou(box, gt_box) for gt_box in gt_boxes]
            tid = tids[np.argmax(iou_scores)]
            self.df.loc[i, 'tid'] = tid
        # todo 可能出现同一个gt关联到不同 pose上。合理做法是同一帧内二部图分配。

    def get_skeleton_data(self):
        return self.df

    def get_vec_material(self, joint_index, tid):
        df = self.df[self.df.tid == tid]
        df = df.sort_values(by='fid')
        if len(df) == 0:
            logging.warning('no this target:' + str(tid))
            return
        pos = df.loc[:, ['x' + str(joint_index), 'y' + str(joint_index)]]
        fid = df.loc[:, 'fid']
        return pos.values, fid.values

    def dump_mot_file(self, mot_file, points_index):
        output_file = open(mot_file, 'w')
        for i in range(len(self.df)):
            fid = self.df.fid[i]
            points = np.zeros((len(points_index), 2))
            for j, pi in enumerate(points_index):
                points[j, 0] = self.df.loc[i, 'x%d' % pi]
                points[j, 1] = self.df.loc[i, 'y%d' % pi]
            min_x, min_y = np.min(points[:, [0, 1]], axis=0)
            max_x, max_y = np.max(points[:, [0, 1]], axis=0)
            box = [min_x, min_y, max_x - min_x, max_y - min_y]
            out_box = self.df.box[i]
            print('%d, %d, %d, %d, %d, %d, 2.0, -1, -1, -1'
                  % (fid, i, box[0], box[1], box[2], box[3]), file=output_file)
            # if out_box_file_path is not None:
            #     print('%d, %d, %d, %d, %d, %d' % (fid, i, out_box[0], out_box[1], out_box[2], out_box[3])
            #           , file=out_box_file)

    def dump_mot_outter_box(self, mot_file):
        output_file = open(mot_file, 'w')
        for i in range(len(self.df)):
            fid = self.df.fid[i]
            box = self.df.loc[i, 'box']
            print('%d, -1, %d, %d, %d, %d, 2.0, -1, -1, -1'
                  % (fid, box[0], box[1], box[2], box[3]), file=output_file)

    def analysis_box_constrain(self, mot_file, points_index):
        # output_file = open(mot_file, 'w')
        result = np.zeros((len(self.df), 3))
        for i in range(len(self.df)):
            points = np.zeros((len(points_index), 2))
            out_box = self.df.loc[i, 'box']
            for j, pi in enumerate(points_index):
                points[j, 0] = self.df.loc[i, 'x%d' % pi]
                points[j, 1] = self.df.loc[i, 'y%d' % pi]
            min_x, min_y = np.min(points[:, [0, 1]], axis=0)
            max_x, max_y = np.max(points[:, [0, 1]], axis=0)
            box = np.array([min_x, min_y, max_x - min_x, max_y - min_y])
            wr, hr, v_offset = estimate_box_constraintion(box, out_box)
            result[i, 0] = wr
            result[i, 1] = hr
            result[i, 2] = v_offset
        # print(result)
        np.savetxt(mot_file, result, '%04f')
        var_min = np.ones(3) * 99999;
        # ransac pick
        avgs = None
        for i in range(100):
            sample = np.random.randint(0, len(self.df), 50)
            sample = result[sample, :]
            vars = np.var(sample, axis=0)

            if vars[0] < var_min[0]:
                var_min[0] = vars[0]
                avgs = np.average(sample, axis=0)
                # print('param 0 %f' % avgs[0])
            # if vars[1] < var_min[1]:
            #     var_min[1] = vars[1]
            # print('param 0 %f' % avgs[0])
        print('wr hr v offset param average :')
        print(avgs)
        param_file_path = './output/point__box_%d_%d_%d_%d_param.txt' % (
        points_index[0], points_index[1], points_index[2], points_index[3])
        np.savetxt('./output/point_box_param.txt', avgs, '%04f')
        print(var_min)


def attach_skeleton():
    """
    用来寻找 ground true 对应的骨架；
    :return:
    """
    json_path = 'alphapose_tf_results/result_mot16_02/POSE/alpha-pose-results-forvis.json'
    dest_path = './output/test_joints_stability.csv'
    test = Test(json_path)
    num_frame = 100
    for i in range(1, num_frame):
        test.get_identities(i)
    test.load_gt('./mot16Label/train/MOT16-02/gt/gt.txt')
    test.attach_skeleton()
    test.write_skeleton_box(dest_path)
    return test.get_skeleton_data()


# def dump_mot_file():
#     seq_name = 'MOT16-02'
#     json_path = 'alphapose_tf_results/result_mot16_02/POSE/alpha-pose-results-forvis.json'
#     my_mot_det_file = './output/mymot_joints_det_{}.txt'.format(seq_name)
#     point_inds = [2, 3, 13, 12]
#     test = Test(json_path)
#     test.iter_pose_xml()
#     test.dump_mot_file(my_mot_det_file, point_inds)


def dump_mot_file2(seq_name='PETS09-S2L1', set_name='train'):
    """
    step 1
    :return:
    """
    # json_path = 'alphapose_tf_results/result_mot16_02/POSE/alpha-pose-results.json'
    json_path = './output/mot15_pose/' + set_name + '/' + seq_name + '/POSE/alpha-pose-results.json'
    # json_path = './output/mot15_pose_acc/' + set_name + '/' + seq_name + '/POSE/alpha-pose-results.json'
    if not os.path.exists(json_path):
        logging.error('miss file' + json_path)
        return

    my_mot_det_file = './output/mymot_joints_det_{}.txt'.format(seq_name)
    # point_inds = [2, 3, 13, 12]
    point_inds = BODY_JOINT_INDEX
    test = Test(json_path)
    test.parse_xml()
    if config.DUMP_TO_HDF:
        df_path = './output/mot15_pose/{}/{}/det_pose.hdf5'.format(set_name, seq_name)
        test.to_df(df_path)
        test.dump_mot_file(my_mot_det_file, point_inds)  # 1  开关这里切换box or joints
    if config.DUMP_OUT_BOX:
        test.dump_mot_outter_box('./output/mot15_pose_det/{}/{}.txt'.format(set_name, seq_name))




def batch_dump_mot():
    set_name = 'test'
    seq_list = 'ADL-Rundle-6   ADL-Rundle-8   ETH-Bahnhof    ETH-Pedcross2  ETH-Sunnyday   KITTI-13       KITTI-17 ' \
               '   PETS09-S2L1    TUD-Campus     TUD-Stadtmitte Venice-2 '
    if set_name == 'test':
        seq_list = 'ADL-Rundle-1  AVG-TownCentre  ETH-Jelmoli      KITTI-16  PETS09-S2L2   Venice-1 ADL-Rundle-3  ' \
                   'ETH-Crossing    ETH-Linthescher  KITTI-19  TUD-Crossing '

    seq_list = seq_list.split()
    for seq_name in seq_list:
        logging.info('handling ' + seq_name + '...')
        dump_mot_file2(seq_name.strip(), set_name)


def analysis_box_constrain():
    """
    step 0
    :return:
    """
    # seq_name = 'MOT16-02'
    # json_path = 'alphapose_tf_results/result_mot16_02/POSE/alpha-pose-results.json'
    seq_name = 'PETS09-S2L1'
    my_mot_det_file = './output/constraint_box_{}_2_3_12_13.txt'.format(seq_name)
    json_path = 'output/mot15_pose/train/{}/POSE/alpha-pose-results.json'.format(seq_name)
    # point_inds = [2, 3, 13, 12]
    point_inds = BODY_JOINT_INDEX
    test = Test(json_path)
    test.parse_xml()
    test.analysis_box_constrain(my_mot_det_file, point_inds)


# todo move
def test_velocity_visualize():
    import matplotlib.pyplot as plt
    import matplotlib
    # matplotlib.use('Qt5Agg')
    # seq_name = 'MOT16-02'
    # json_path = 'alphapose_tf_results/result_mot16_02/POSE/alpha-pose-results.json'
    # my_mot_det_file = './output/mymot_joints_det_{}.txt'.format(seq_name)

    seq_nqme = 'TUD-Stadtmitte'
    json_path = './output/mot15_pose/train/{}/POSE/alpha-pose-results.json'.format(seq_nqme)
    test = Test(json_path)

    if True:
        # test.load_gt('./mot16Label/train/MOT16-02/gt/gt.txt')
        test.load_gt('MOT15/train/{}/gt/gt.txt'.format(seq_nqme))
        test.parse_xml()
        test.attach_skeleton()
        df = test.get_skeleton_data()
        df.to_hdf('./output/foo.h5', 'df')
    else:
        test.load_df('./output/foo.h5')
    tid = 5
    color = ['r', 'g', 'b']
    # m_var = pd.DataFrame(None, columns=['tid', 'joint_id', 'var'])
    m_var = pd.DataFrame(np.zeros((1, 15)), columns=['j' + str(i) for i in range(15)])
    # output_file = open('./output/joint_stability.txt', 'w')

    df = test.df[test.df.tid == tid]

    if len(df) == 0:
        logging.error('no this target:' + str(tid))
    assert len(df) > 0
    df = df.drop_duplicates('fid', 'first', False)

    for joint_index in range(15):
        # pos, fid = test.get_vec_material(joint_index, tid)

        pos = df.loc[:, ['x' + str(joint_index), 'y' + str(joint_index)]].values
        fid = df.loc[:, 'fid'].values
        assert len(fid) == len(pos)
        print(fid)
        vec = pos2vec(fid, pos)
        # todo 归一化。
        plt.plot(np.ones(len(vec)) * joint_index, vec, color[joint_index % 3] + 'o')
        var = np.var(vec)
        m_var.loc[0, 'j' + str(joint_index)] = var
        # print("%d, %d, %d" %( tid, joint_index, var), file=output_file)
    m_var.to_csv('./output/joint_stability_{}.txt'.format(seq_nqme))
    values = m_var.as_matrix()
    print(values)
    print('tid %d , joint : rank' % tid)
    print(np.argsort(values[0]))
    # plt.axis([-1, 15, 0, 30])
    # plt.show()


def pos2vec(l_fid, l_pos):
    assert len(l_fid) == len(l_pos)
    assert l_pos.shape[1] == 2
    # last_fid, last_pos = l_fid[0], l_pos[1]
    v_shift = l_pos[1:, :] - l_pos[:-1, :]
    s_shift = np.linalg.norm(v_shift, axis=1).flatten()
    fid_diff = l_fid[1:] - l_fid[0:-1]
    if 0 in fid_diff:
        logging.error("0 in fid diff")
        # logging.info(l_fid)
        logging.info(fid_diff)
    vec = s_shift / fid_diff
    return vec


def main():
    df = attach_skeleton()
    tids = df.tid
    for tid in tids.tolist():
        tracklet = df[tids == tid]


def oneshot():
    PATH_GT_FILE = 'MOT15/train/ADL-Rundle-6/gt/gt.txt'
    # gt = np.loadtxt(PATH_GT_FILE)
    gt = pd.read_csv(PATH_GT_FILE, header=None)
    gt.columns = ['fid', 'tid', 'x', 'y', 'w', 'h', '?', '?', '?', '?']
    tids = gt['tid'].unique()
    # for tid in tids:
    if True:
        tid = tids[0]
        data = gt[gt.tid == tid]
        fid = data.fid.values()
        pos = data[['x', 'y']].as_matrix()

        # for fid in data.fid:
        #     data_frame = data[data.fid == fid]
        #     box = data_frame.iloc[0, 2:2+4].tolist()
        #     print(box)


def visualize_iou_comparison():
    json_path = ''
    pd_path = ''


if __name__ == '__main__':
    # oneshot()
    # attach_skeleton()
    # main()
    # dump_mot_file2('TUD-Stadtmitte')        # 'ETH-Bahnhof'

    #  测试 哪个节点稳定
    # test_velocity_visualize()

    # 测试box比例
    # analysis_box_constrain()

    batch_dump_mot()
