from pathlib import Path
import shutil
import copy

import cv2
import tqdm
import numpy as np

from app.track import Sort
from app.merge import merge_cross_scales, merge_overlaps

COLORS = [
    (255, 0, 0),
    (0, 255, 0),
    (0, 0, 255),
    (255, 255, 0),
    (255, 0, 255),
    (0, 255, 255),
    (0, 0, 0),
    (127, 127, 127),
]


class DataSource:
    """数据源，统一管理视频和图片形式的输入"""

    def __init__(self, input_path) -> None:
        self.input_path = Path(input_path)

        if self.input_path.is_dir():  # 文件夹，搜索其中的图片
            self.data_mode = "image"
            self.file_list = [str(x) for x in self.input_path.glob("*.png")] + [
                str(x) for x in self.input_path.glob("*.jpg")
            ]
        elif self.input_path.is_file():  # 文件，判定是视频还是图片
            if self.input_path.suffix in [".mp4", ".avi"]:
                self.data_mode = "video"
                self.video_path = str(self.input_path)
            elif self.input_path.suffix in [".png", ".jpg"]:
                self.data_mode = "image"
                self.file_list = [str(self.input_path)]
            else:
                raise Exception(f"Unsupported file type {self.input_path.suffix}")
        else:
            raise Exception(f"Wrong input path: {self.input_path}")

    @property
    def mode(self):
        return self.data_mode

    @property
    def source(self):
        if self.data_mode == "video":
            return self.video_path
        elif self.data_mode == "image":
            return self.file_list


def scale_det_results(results, image_org, image_input):
    """将检测结果缩放到原图尺寸"""
    if results is not None:
        ho, wo, _ = image_org.shape
        hi, wi, _ = image_input.shape
        scale_h = ho / hi
        scale_w = wo / wi
        results[:, 3] *= scale_w
        results[:, 4] *= scale_h
        results[:, 5] *= scale_w
        results[:, 6] *= scale_h
    return results


def plot_results(image, results, color=(255, 0, 0), scale=0.5, verbose=False):
    """画图，显示识别结果"""
    if results is not None:
        for idx, obj in enumerate(results):
            p1 = (int(obj[3] * scale), int(obj[4] * scale))
            p2 = (int(obj[5] * scale), int(obj[6] * scale))
            cv2.rectangle(image, p1, p2, color, 2)
            if verbose:
                cv2.putText(image, f"{idx}", p1, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
    return image


def plot_results_v2(image, results, color=(255, 0, 0), scale=0.5):
    """画图，显示识别结果"""
    if results is not None:
        for idx, obj in enumerate(results):
            p1 = (int(obj[0] * scale), int(obj[1] * scale))
            p2 = (int(obj[2] * scale), int(obj[3] * scale))
            cv2.rectangle(image, p1, p2, color, 2)
    return image


def idx2time(idx, frame_rate):
    """将帧号转换为时间字符串，用于保存图片的文件名"""
    num_sec = idx / frame_rate
    num_min = int(num_sec) // 60
    num_sec = int(num_sec - num_min * 60)
    return f"{num_min}m{num_sec}s"


def time2idx(time_str, frame_rate):
    """将时间字符串转换为帧号，用于片段截取"""

    def get_frame_idx(time_str, frame_rate):
        """转换单一时间字符串"""
        kmin, ksec = time_str.split(":")
        return int((int(kmin) * 60 + int(ksec)) * frame_rate)

    time_start, time_stop = time_str.strip().split("-")
    return get_frame_idx(time_start, frame_rate), get_frame_idx(time_stop, frame_rate)


def remove_little_targets(targets, pixel):
    """根据输入的像素阈值，删除过小的目标"""
    new_targets = []
    for obj in targets:
        if abs(obj[0] - obj[2]) > pixel or abs(obj[1] - obj[3]) > pixel:
            new_targets.append(obj)
    return new_targets


def run_process_video(data_source: DataSource, model, decoder, cfg, signals=None):
    """对于输入为视频的数据，运行识别功能"""
    video_path = data_source.source

    # 载入视频
    vc = cv2.VideoCapture(video_path)
    num_frame = int(vc.get(7))
    frame_rate_org = vc.get(5)

    save_mode = cfg["save_mode"]
    scale_g = cfg["scale_g"]
    interval = cfg["interval"]
    idx_start, idx_stop = 0, num_frame

    # 设置片段
    if "period" in cfg and cfg["period"] is not None:
        save_mode = "video"
        cut_period = cfg["period"]
        print(f"片段模式，截取范围：{cut_period}")

    # 设置保存为视频的各种参数
    if save_mode == "video":
        output_video_path = cfg["output"] / Path(video_path).name
        image_w = vc.get(3)
        image_h = vc.get(4)
        video_size = (int(scale_g * image_w), int(scale_g * image_h))
        fourcc = cv2.VideoWriter.fourcc("m", "p", "4", "v")
        vw = cv2.VideoWriter(str(output_video_path), fourcc, frame_rate_org / interval, video_size)

        if "period" in cfg and cfg["period"] is not None:
            idx_start, idx_stop = time2idx(cfg["period"], frame_rate_org)
            assert idx_start < idx_stop, "起始点大于结束点"
            assert idx_start >= 0, "起始点小于0"
            assert idx_stop < num_frame, "结束点超过视频时长"

    # 设置保存为图片的各种参数
    elif save_mode == "image":
        output_root_dir = cfg["output"] / Path(video_path).stem
        shutil.rmtree(output_root_dir, ignore_errors=True)
        output_root_dir.mkdir(parents=True)
        output_origin_dir = output_root_dir / "origin"
        output_labeled_dir = output_root_dir / "labeled"
        output_origin_dir.mkdir()
        output_labeled_dir.mkdir()
    else:
        raise Exception(f"Unsupported save format {save_mode}")

    # 跟踪器
    mot_tracker = Sort(
        max_age=cfg["max_age"], min_hits=cfg["min_hit"], iou_threshold=cfg["iou_thres"]
    )

    # iter video
    vc.set(cv2.CAP_PROP_POS_FRAMES, idx_start)
    for idx_frame in tqdm.tqdm(range(idx_stop - idx_start)):
        _, frame = vc.read()
        if idx_frame % interval == 0:
            frame_org = cv2.resize(frame, (0, 0), fx=scale_g, fy=scale_g)
            frame_plot = frame_org.copy()
            frame_final = frame_org.copy()

            # 模型原始检测结果
            total_results = []
            for idx, size in enumerate(cfg["sizes"]):
                image = cv2.resize(frame, size)
                inputs = np.expand_dims(image.transpose(2, 0, 1).astype(np.float32), axis=0)
                # 运行模型推理
                outputs = model.run(None, {"input": inputs})
                # 运行模型解码
                results = decoder.run(outputs)[0]
                # 缩放结果
                results = scale_det_results(results, frame, image)

                total_results.append(results)
                frame_plot = plot_results(frame_plot, results, COLORS[idx], scale_g)

            # 后处理步骤，合并两个尺度的结果
            merged_results1 = merge_cross_scales(total_results)
            merged_results2 = merge_overlaps(merged_results1)

            if merged_results2 is not None:
                # 刷新跟踪列表
                trackers = mot_tracker.update(copy.deepcopy(merged_results2[:, 3:]))
                # 删除小目标
                targets_plot = remove_little_targets(trackers, cfg["min_pixel"])

                frame_final = plot_results_v2(frame_final, targets_plot, COLORS[2], scale_g)

                # 保存图片
                if save_mode == "image":
                    if targets_plot is not None and len(targets_plot) > 0:
                        cv2.imwrite(
                            str(output_origin_dir / f"{idx2time(idx_frame, frame_rate_org)}.jpg"),
                            frame_org,
                        )
                        cv2.imwrite(
                            str(output_labeled_dir / f"{idx2time(idx_frame, frame_rate_org)}.jpg"),
                            frame_final,
                        )
            # 刷新界面进度条
            if signals is not None:
                signals.percent.emit(int((idx_frame + 1) / num_frame * 100))
            # 保存视频
            if save_mode == "video":
                vw.write(frame_final)
            # 显示
            if cfg["show"]:
                cv2.imshow("post", frame_final)
                if ord("q") == cv2.waitKey(1):
                    break
    if signals is not None:
        signals.percent.emit(100)
    cv2.destroyAllWindows()
    return


def run_process_image(data_source: DataSource, model, decoder, cfg, signals=None):
    """对于输入为图片的数据，运行识别功能"""

    output_path = cfg["output"] / "images"
    shutil.rmtree(output_path, ignore_errors=True)
    output_path.mkdir(parents=True)

    scale_g = cfg["scale_g"]

    for idx_frame, image_path in tqdm.tqdm(enumerate(data_source.source)):
        frame = cv2.imread(image_path)
        frame_plot = cv2.resize(frame, (0, 0), fx=scale_g, fy=scale_g)

        total_results = []
        for idx, size in enumerate(cfg["sizes"]):
            image = cv2.resize(frame, size)
            inputs = np.expand_dims(image.transpose(2, 0, 1).astype(np.float32), axis=0)
            outputs = model.run(None, {"input": inputs})
            results = decoder.run(outputs)[0]
            results = scale_det_results(results, frame, image)

            total_results.append(results)

        merged_results1 = merge_cross_scales(total_results)
        merged_results2 = merge_overlaps(merged_results1)
        merged_results2 = remove_little_targets(merged_results2, cfg["min_pixel"])

        frame_plot = plot_results(frame_plot, merged_results2, COLORS[2], scale_g)

        image_save_path = output_path / Path(image_path).name
        cv2.imwrite(str(image_save_path), frame_plot)

        if signals is not None:
            signals.percent.emit(int((idx_frame + 1) / len(data_source.source) * 100))
        if cfg["show"]:
            cv2.imshow("det", frame_plot)
            cv2.waitKey(10)

    if signals is not None:
        signals.percent.emit(100)
    cv2.destroyAllWindows()
    return
