import os
import cv2
import time
import argparse
import torch
import warnings
import numpy as np
import sys
import gc

sys.path.append(os.path.join(os.path.dirname(__file__), 'thirdparty/fast-reid'))


from detector import build_detector
from deep_sort import build_tracker
from utils.draw import draw_boxes
from utils.parser import get_config
from utils.log import get_logger
from utils.io import write_results
from utils.judge_direction import direction
from utils.process_cut import VideoCapture


class VideoTracker(object):
    def __init__(self, cfg, args, video_path):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != '-1':
            if args.cam == '0':
                print("Using webcam " + str(args.cam))
                # with VideoCapture(args.cam) as cap:
                #     self.vdo = cap
                self.vdo = cv2.VideoCapture(int(args.cam))
                # self.vdo = VideoCapture(args.cam)
            else:
                print("Using webcam " + str(args.cam))
                self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()
        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names

        self.direction = direction()
        self.total_counter = 0
        self.up_count = 0
        self.down_count = 0
        self.flag = False
        self.lr = 0.05  # 边界判断超参，区分用划线方法，还是用画框方法, 默认为0.05

        #  调用with 时自动执行
    def __enter__(self):
        #  不是视频，而是摄像头
        if self.args.cam != '-1':
            ret, frame = self.vdo.read()
            assert ret, "Error: Camera error"
            self.im_width = frame.shape[0]
            self.im_height = frame.shape[1]

        # 传入的是视频
        else:
            assert os.path.isfile(self.video_path), "Path error"
            self.vdo.open(self.video_path)
            self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
            self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
            print('width:{} height:{}'.format(str(self.im_width), str(self.im_height)))
            assert self.vdo.isOpened()

        # 画区域框坐标
        # 右侧框
        # self.point_list = [(int(0.5 * self.im_height), 0),
        #                    (int(self.im_height), 0),
        #                    (int(self.im_height), int(self.im_width)),
        #                    (int(0.5 * self.im_height), int(self.im_width))]
        # 左侧框（局部）
        # self.point_list = [(20, 20),
        #                    (int(0.5 * self.im_height), 20),
        #                    (int(0.5 * self.im_height), int(self.im_width)),
        #                    (20, int(self.im_width))]
        # 左侧框（局部，画框逻辑）
        # self.point_list = [(int(0.4 * self.im_height), int(0.4 * self.im_width)),
        #                    (int(0.7 * self.im_height), int(0.4 * self.im_width)),
        #                    (int(0.7 * self.im_height), int(0.7 * self.im_width)),
        #                    (int(0.4 * self.im_height), int(0.7 * self.im_width))]
        # 左侧框（局部，画框逻辑,视频流）
        self.point_list = [(int(0.4 * self.im_width), int(0.4 * self.im_height)),
                           (int(0.7 * self.im_width), int(0.4 * self.im_height)),
                           (int(0.7 * self.im_width), int(0.7 * self.im_height)),
                           (int(0.4 * self.im_width), int(0.7 * self.im_height))]
        # 下侧框(视频流）
        # self.point_list = [(0, int(0.5 * self.im_height)),
        #                    (int(self.im_width), int(0.5 * self.im_height)),
        #                    (int(self.im_width), int(self.im_height)),
        #                    (0, int(self.im_height))]
        # 下侧框(摄像头)
        # self.point_list = [(0, int(0.5 * self.im_width)),
        #                    (int(self.im_height), int(0.5 * self.im_width)),
        #                    (int(self.im_height), int(self.im_width)),
        #                    (0, int(self.im_width))]

        # 结果保存路径
        if self.args.save_path:
            os.makedirs(self.args.save_path, exist_ok=True)

            # path of saved video and results
            self.save_video_path = os.path.join(self.args.save_path, "results.avi")
            self.save_results_path = os.path.join(self.args.save_path, "results.txt")

            # create video writer
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))

            # logging
            self.logger.info("Save results to {}".format(self.args.save_path))

        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        if exc_type:
            print(exc_type, exc_value, exc_traceback)

    def write(self, stack, cam, top: int) -> None:
        ret, frame = self.vdo.read()
        assert ret, "Error: Camera error"
        if ret:
            stack.append(frame)
            # 每到一定容量清空一次缓冲栈
            # 利用gc库，手动清理内存垃圾，防止内存溢出
            if len(stack) >= top:
                del stack[::2]
                gc.collect()

    # def read(self, stack) -> None:

    def run(self):
        results = []
        idx_frame = 0
        # 预选框处理， 判断采用划线逻辑或者画框逻辑
        line_list = self.direction.process_frame(self.point_list, self.im_width, self.im_height, self.lr)
        #  self.vdo.grab() 用来指向下一帧
        while self.vdo.grab():
            idx_frame += 1
            #  指定帧间隔，能够实现跳帧检测
            if idx_frame % self.args.frame_interval:
                continue

            start = time.time()
            # retrieve()用于视频的解码并返回一帧 ，
            # 返回值image 为返回的视频帧，如果未成功，则返回一个空图像。
            # retval 为布尔类型，若未成功，返回False；否则返回True。
            _, ori_im = self.vdo.retrieve()
            im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)

            # do detection
            # 代表  从 YOLO v3 当中获取到的只需要有三个回参嘛 ？？？
            # 是的，返回候选框的坐标信息，置信度，以及 xx （待定）
            bbox_xywh, cls_conf, cls_ids = self.detector(im)

            # select person class
            #  mask 的值是 因为行人是第一个嘛？？？
            #  不是，mask的值不为0 ，这是找到对应的第i个所拥有的bbox
            #  数值和所对应的置信度，（有问题，待定）
            """  具体值为多少待定，需要另行确认 """
            mask = cls_ids == 0

            bbox_xywh = bbox_xywh[mask]
            # bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector
            bbox_xywh[:, 3:] *= 1.2
            cls_conf = cls_conf[mask]

            # do tracking
            outputs = self.deepsort.update(bbox_xywh, cls_conf, im)

            stay_count = 0  # 用于进行停留人数的统计

            #  1.画区域框
            ori_im = self.direction.draw_detection(ori_im, self.point_list)
            # 2.draw boxes for visualization
            if len(outputs) > 0:
                bbox_tlwh = []
                bbox_xyxy = outputs[:, :4]
                identities = outputs[:, -1]
                #  所得到的id 是由deep_sort 给出
                ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
                #  判断方向信息
                ori_im, self.total_counter, self.up_count, self.down_count, stay_count, self.flag = \
                    self.direction.line_direction(ori_im,
                                                  line_list,
                                                  self.point_list,
                                                  bbox_xyxy,
                                                  identities)

                for bb_xyxy in bbox_xyxy:
                    bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))

                # 写入的是对应的帧数， bbox 的位置信息，身份信息
                results.append((idx_frame - 1, bbox_tlwh, identities))

            #  3.画线作图
            ori_im = self.direction.draw_line(ori_im,
                                              line_list,
                                              self.total_counter,
                                              self.up_count, self.down_count, stay_count,
                                              self.flag)


            end = time.time()

            if self.args.display:
                cv2.imshow("test", ori_im)
                cv2.waitKey(1)

            if self.args.save_path:
                self.writer.write(ori_im)

            # save results
            write_results(self.save_results_path, results, 'mot')

            # logging
            self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
                             .format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))


#  所有命令行的传参
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("VIDEO_PATH", type=str)
    parser.add_argument("--config_mmdetection", type=str, default="./configs/mmdet.yaml")
    parser.add_argument("--config_detection", type=str, default="./configs/yolov3.yaml")
    parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
    parser.add_argument("--config_fastreid", type=str, default="./configs/fastreid.yaml")
    parser.add_argument("--fastreid", action="store_true")
    parser.add_argument("--mmdet", action="store_true")
    # parser.add_argument("--ignore_display", dest="display", action="store_false", default=True)
    parser.add_argument("--display", action="store_true")
    parser.add_argument("--frame_interval", type=int, default=1)
    parser.add_argument("--display_width", type=int, default=800)
    parser.add_argument("--display_height", type=int, default=600)
    parser.add_argument("--save_path", type=str, default="./output/test/")
    parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
    parser.add_argument("--camera", action="store", dest="cam", type=str, default="-1")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    cfg = get_config()
    if args.mmdet:
        cfg.merge_from_file(args.config_mmdetection)
        cfg.USE_MMDET = True
    else:
        cfg.merge_from_file(args.config_detection)
        cfg.USE_MMDET = False
    cfg.merge_from_file(args.config_deepsort)
    if args.fastreid:
        cfg.merge_from_file(args.config_fastreid)
        cfg.USE_FASTREID = True
    else:
        cfg.USE_FASTREID = False

    with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
        vdo_trk.run()
