#!/usr/bin/env python
# coding=utf-8
import logging
import os

import cv2
import easydict
import matplotlib.pyplot as plt
import numpy as np

from application_util import visualization
from deep_sort.detection import Detection

config = easydict.EasyDict()
config.REQUIRE_ESTIMATE = False
STR_DATA_TYPE = 'train'


class BoxEstimator:
    def __init__(self, param_file):
        param = np.loadtxt(param_file)
        self.scale_w = param[0]
        self.scale_h = param[1]
        self.offset = param[2]

    def estimate_box2(self, tlwh_joint_box, scale_w, scale_h, v_offset_ratio):
        ib = tlwh_joint_box
        dw, dh = ib[2], ib[3]
        new_box = np.zeros_like(ib)
        new_box[2] = dw / scale_w
        new_box[3] = dh / scale_h
        ip = ib[[0, 1]] + ib[[2, 3]] / 2.
        op_v = ip[1] - v_offset_ratio * dh
        new_box[0] = ip[0] - new_box[2] / 2.
        new_box[1] = op_v - new_box[3] / 2.
        new_box[0] = 0 if new_box[0] < 0 else new_box[0]
        new_box[1] = 0 if new_box[1] < 0 else new_box[1]
        return new_box

    def estimate(self, box):
        return self.estimate_box2(box, self.scale_w, self.scale_h, self.offset)

def __remove_axis():
    # https://stackoverflow.com/questions/8218608/scipy-savefig-without-frames-axes-only-content
    fig = plt.figure()
    fig.subplots_adjust(bottom = 0)
    fig.subplots_adjust(top = 1)
    fig.subplots_adjust(right = 1)
    fig.subplots_adjust(left = 0)

def visualize_det(det_path_list, data_name):
    det_pathes = det_path_list

    det_data = [np.loadtxt(path, delimiter=',') for path in det_pathes]
    frame_ids = det_data[0][:, 0].astype(np.int32)
    # parent_dir = '/Users/gerrie/unixShare/alphapose_tf/mot15_train/'
    # img_file_path = parent_dir + '/{}/RENDER/{:06d}.jpg'
    parent_dir = '/Users/gerrie/trainingData/MOT/2DMOT2015/train/'
    img_file_path = parent_dir + '/{}/img1/{:06d}.jpg'
    # './output/mot15_pose/{}
    colors = [[1.0, 0, 0],
              [0, 1.0, 0],
              [0, 0, 1.0],
              [1, 1, 0],
              [1, 0, 1],
              [0, 1, 1]]
    estimator = BoxEstimator('./output/point_box_param.txt')

    __remove_axis()
    # for fid in np.sort(np.unique(frame_ids)):
    for fid in range(100, 800):
        file_path = img_file_path.format(data_name, fid)
        print(file_path)
        frame = cv2.imread(file_path)
        frame_canvas = frame.copy()
        for det_ret_index, dets in enumerate(det_data):  # dets: all detections
            # dets = det_data[0]

            current_detections = dets[dets[:, 0] == fid].astype(np.int32)
            det_rects = current_detections[:, 2:6]
            det_ids = current_detections[:, 1]
            for rect, id in zip(det_rects, det_ids):
                # if det_ret_index == 1:
                #     rect = estimator.estimate(np.array(rect))
                cv2.rectangle(frame_canvas, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]),
                              color=[v * 255 for v in colors[det_ret_index]], thickness=2)

                # if id > 0:
                #     cv2.putText(frame_canvas, str(id), (rect[0], rect[1]), cv2.FONT_HERSHEY_COMPLEX, 0.8, [0, 0, 0], 2)

        # cv2.imshow('img', frame_canvas)
        # key = cv2.waitKey(0)
        plt.imshow(frame_canvas[:, :, [2, 1, 0]])
        keyPressed = plt.waitforbuttonpress()
        # if not keyPressed:
        #     break
        # if key == 'q':
        if key == ord('q'):
            break
    # for d in det_data:
    #     fid,_, x, y, w, h = d[:7]
    #     img = cv2.imread()


def matplotlib_setup():
    frame = plt.gca()
    # y 轴不可见
    frame.axes.get_yaxis().set_visible(False)
    # x 轴不可见
    frame.axes.get_xaxis().set_visible(False)
    plt.axis('off')

def main():
    # 'TUD-Campus'
    # data_name = 'TUD-Campus'
    # data_name = 'Venice-2'
    # data_name = 'ETH-Bahnhof'
    # Good result display below.
    # data_name = 'ETH-Pedcross2'
    data_name = 'PETS09-S2L1'
    # data_name = 'TUD-Stadtmitte'
    # data_name = 'ETH-Sunnyday'

    frcnn_det_path = '/Users/gerrie/source/cv/mot/sort/data/{}/det.txt'.format(data_name)
    gt_det_path = './MOT15/train/{}/gt/gt.txt'.format(data_name)
    # pose_det_path = './output/mot15_pose_det/{}.txt'.format(data_name)    # the pose out box
    pose_det_path = './output/mot15_pose_det/train/{}.txt'.format(data_name)    # the pose out box
    # pose_det_path = './output/tuning/{}'.format(data_name)
    for path in [frcnn_det_path, gt_det_path, pose_det_path]:
        if not os.path.exists(path):
            logging.error('path {} not exist'.format(path))
            return
    # pose_det_path = './output/mymot_joints_det_{}.txt'.format(data_name)
    det_path_list = []
    det_path_list.append(frcnn_det_path)  # b
    # det_path_list.append(pose_det_path)  # g
    # det_path_list.append(gt_det_path)  # r
    matplotlib_setup()
    visualize_det(det_path_list, data_name)

def visualize_gt_joint_box():

    data_name = 'PETS09-S2L1'
    STR_POSE_BOX_OUTPUT_DIR = './output/tuning/{}'
    gt_det_path = './MOT15/train/{}/gt/gt.txt'.format(data_name)
    pose_det_path = STR_POSE_BOX_OUTPUT_DIR.format(data_name)    # the pose out box

    matplotlib_setup()
    visualize_det([gt_det_path, pose_det_path], data_name)




def gather_sequence_info(sequence_dir, detection_file):
    """Gather sequence information, such as image filenames, detections,
    groundtruth (if available).

    Parameters
    ----------
    sequence_dir : str
        Path to the MOTChallenge sequence directory.
    detection_file : str
        Path to the detection file.

    Returns
    -------
    Dict
        A dictionary of the following sequence information:

        * sequence_name: Name of the sequence
        * image_filenames: A dictionary that maps frame indices to image
          filenames.
        * detections: A numpy array of detections in MOTChallenge format.
        * groundtruth: A numpy array of ground truth in MOTChallenge format.
        * image_size: Image size (height, width).
        * min_frame_idx: Index of the first frame.
        * max_frame_idx: Index of the last frame.

    """
    image_dir = os.path.join(sequence_dir, "img1")
    image_filenames = {
        int(os.path.splitext(f)[0]): os.path.join(image_dir, f)
        for f in os.listdir(image_dir)}
    groundtruth_file = os.path.join(sequence_dir, "gt/gt.txt")

    detections = None
    if detection_file is not None:
        # detections = np.load(detection_file)
        detections = np.loadtxt(detection_file, delimiter=',')
    groundtruth = None
    if os.path.exists(groundtruth_file):
        groundtruth = np.loadtxt(groundtruth_file, delimiter=',')

    if len(image_filenames) > 0:
        image = cv2.imread(next(iter(image_filenames.values())),
                           cv2.IMREAD_GRAYSCALE)
        image_size = image.shape
    else:
        image_size = None

    if len(image_filenames) > 0:
        min_frame_idx = min(image_filenames.keys())
        max_frame_idx = max(image_filenames.keys())
    else:
        min_frame_idx = int(detections[:, 0].min())
        max_frame_idx = int(detections[:, 0].max())

    info_filename = os.path.join(sequence_dir, "seqinfo.ini")
    if os.path.exists(info_filename):
        with open(info_filename, "r") as f:
            line_splits = [l.split('=') for l in f.read().splitlines()[1:]]
            info_dict = dict(
                s for s in line_splits if isinstance(s, list) and len(s) == 2)

        update_ms = 1000 / int(info_dict["frameRate"])
    else:
        update_ms = None

    feature_dim = detections.shape[1] - 10 if detections is not None else 0
    seq_info = {
        "sequence_name": os.path.basename(sequence_dir),
        "image_filenames": image_filenames,
        "detections": detections,
        "groundtruth": groundtruth,
        "image_size": image_size,
        "min_frame_idx": min_frame_idx,
        "max_frame_idx": max_frame_idx,
        "feature_dim": feature_dim,
        "update_ms": update_ms
    }
    return seq_info


def create_detections(detection_mat, frame_idx, min_height=0):
    """Create detections for given frame index from the raw detection matrix.

    Parameters
    ----------
    detection_mat : ndarray
        Matrix of detections. The first 10 columns of the detection matrix are
        in the standard MOTChallenge detection format. In the remaining columns
        store the feature vector associated with each detection.
    frame_idx : int
        The frame index.
    min_height : Optional[int]
        A minimum detection bounding box height. Detections that are smaller
        than this value are disregarded.

    Returns
    -------
    List[tracker.Detection]
        Returns detection responses at given frame index.

    """
    frame_indices = detection_mat[:, 0].astype(np.int)
    mask = frame_indices == frame_idx

    detection_list = []
    for row in detection_mat[mask]:
        bbox, confidence, feature = row[2:6], row[6], row[10:]
        if bbox[3] < min_height:
            continue
        detection_list.append(Detection(bbox, confidence, feature))
    return detection_list


class Track2:
    def __init__(self, row):
        self.fid = row[0]
        self.track_id = row[1]
        self.ltwh = row[2:2+4]

class FrameTracks:

    def __init__(self, rows):
        self.raw_frame_tracks = rows
        self.tracks = []
        for row in rows:
            self.tracks.append(Track2(row))


# class MyVisualization(visualization.Visualization):
#
#     def _update_fun(self, frame_callback):
#         if self.frame_idx > self.last_idx:
#             return False  # Terminate
#         frame_callback(self, self.frame_idx)
#         self.frame_idx += 1
#         return True

class Config:
    I_WAIT_KEY_GAP_MS = 5


def visualize_util(is_paused=True):
    """
    deep-sort 演示视频中的渲染方式.
    :return:
    """

    data_set = 'train'
    data_name = 'PETS09-S2L1'
    STR_POSE_BOX_OUTPUT_DIR = './output/tuning/{}'

    gt_det_path = './MOT15/train/{}/gt/gt.txt'.format(data_name)

    # pose_tck_path = STR_POSE_BOX_OUTPUT_DIR.format(data_name)    # the pose out box
    pose_tck_path = './output/sort_joint/{}/{}.txt'.format(data_set, data_name)
    frcnn_sort_path = './output/origin/{}/{}.txt'.format(data_set, data_name)
    seq_dir = os.path.join('./MOT15', data_set, data_name)
    detection_file = gt_det_path
    # detection_file = frcnn_sort_path
    # detection_file = pose_det_path
    track_file = pose_tck_path
    # track_file = frcnn_sort_path

    seq_info = gather_sequence_info(seq_dir, detection_file)
    # print(seq_info)
    vis = visualization.Visualization(seq_info, update_ms=5)
    image_shape = seq_info["image_size"][::-1]
    aspect_ratio = float(image_shape[1]) / image_shape[0]
    image_shape = 1024, int(aspect_ratio * 1024)
    # viewer = image_viewer.ImageViewer(
    #     10, image_shape, "Figure %s" % seq_info["sequence_name"])
    # viewer.thickness = 2
    viewer = vis.viewer

    frame_tracks = [None] * seq_info['max_frame_idx']
    raw_tracks = np.loadtxt(track_file, delimiter=',', dtype=np.int32)
    for i in range(seq_info['max_frame_idx']):
        f_ids = raw_tracks[:, 0]
        frame_tracks[i] = FrameTracks(raw_tracks[np.where(f_ids == i)])

    for i in range(1, seq_info['max_frame_idx']):
        print('frame: {}'.format(i))
        frame_idx = i
        detections = create_detections(
            seq_info["detections"], frame_idx)
        image = cv2.imread(seq_info["image_filenames"][frame_idx], cv2.IMREAD_COLOR)
        vis.set_image(image.copy())
        vis.draw_detections(detections)
        for track in frame_tracks[i].tracks:
            x, y, w, h = track.ltwh
            viewer.color = visualization.create_unique_color_uchar(track.track_id)
            viewer.rectangle(x, y, w, h, label=str(track.track_id))
        cv2.imshow('img', viewer.image)

        _wait_time = 0 if is_paused else Config.I_WAIT_KEY_GAP_MS
        key = cv2.waitKey(_wait_time)
        if key & 255 == 27 or key == ord('q'):  # ESC
            print("terminating")
            break
        elif key & 255 == 32:  # ' '
            print("toggeling pause: " + str(not is_paused))
            is_paused = not is_paused
        elif key & 255 == 115:  # 's'
            print("stepping")
        elif key & 255 == ord('i'):
            cv2.imwrite('./output/visualize_util/{}.jpg'.format(i), viewer.image)
            # is_paused = True
    """
    def visualize_loop():
        for i in range(1, seq_info['max_frame_idx']):
            frame_idx = i
            detections = create_detections(
                seq_info["detections"], frame_idx)
            image = cv2.imread(seq_info["image_filenames"][frame_idx], cv2.IMREAD_COLOR)
            vis.set_image(image.copy())
            vis.draw_detections(detections)
            for track in frame_tracks[i].tracks:
                x, y, w, h = track.ltwh
                viewer.rectangle(x, y, w, h, label=str(track.track_id))
            yield False
        # vis.draw_trackers(tracker.tracks)
    viewer.run(visualize_loop)
    """
    # matplotlib_setup()
    # image = cv2.imread(
    #     seq_info["image_filenames"][frame_idx], cv2.IMREAD_COLOR)
    # vis.set_image(image.copy())
    # vis.draw_detections(detections)
    # vis.draw_trackers(tracker.tracks)


if __name__ == '__main__':
    # main()
    # visualize_gt_joint_box()
    visualize_util()