import io
import os
import time
import platform
import cv2
from PIL import Image
from moviepy import VideoFileClip
from utils import path
from ultralytics import YOLO
import numpy as np
class Detection():
    def __init__(self):
        self.init_detection_resources()
        self.continue_output = True
        self.continue_complete_output = True
        self.id = 0

    def init_detection_resources(self, model_path = path(['static','model','best.pt'])):
        self.detector = YOLO(model_path,task="detect")
        self.detector(np.zeros((48, 48, 3)))  # 预热模型
        # 需要定时器吗？

    def handle_image_input(self, image_path,save_path):
        """处理单张图片输入"""
        # 停止视频捕获

        self.source_path = image_path

        # 处理并计算时间
        start_time = time.time()
        detection_results = self.detector(image_path)[0]
        processing_time = time.time() - start_time

        # 解析检测结果
        boxes = detection_results.boxes.xyxy.tolist()
        print(f"boxes:{boxes}")
        detection_boxes = [list(map(int, box)) for box in boxes] # 检测框
        detection_classes = detection_results.boxes.cls.int().tolist() #类别
        confidences = detection_results.boxes.conf.tolist()
        confidence_scores = [f'{score * 100:.2f}%' for score in confidences] # 置信度
        print(detection_classes,confidence_scores)
        annotated_img = detection_results.plot()
        # print("annotated_img", annotated_img)
        # cv2.imshow("Yolov8 Inference", annotated_img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        cv2.imwrite(save_path, annotated_img)
        return annotated_img
        # 需要把这些数据保存下来，然后传递给前端。
        # 但似乎有点不合理，如果很大或者需要即时处理呢。
        # 但图片这样是没有问题的。

    def process_and_save_video(self,video_path, save_path):
        """
        处理视频文件，使用 YOLO 模型进行目标检测并保存结果
        :param video_path: 输入视频的文件路径
        :param model_path: YOLO 模型的文件路径
        :param save_path: 保存处理后视频的文件路径
        """

        def get_os_type():
            system = platform.system()
            if system == "Windows":
                return "Windows"
            elif system == "Linux":
                return "Linux"
            else:
                return "其他系统"
        def get_filename(path):
            last_separator_index = path.rfind('/')
            if last_separator_index == -1:
                last_separator_index = path.rfind('\\')
            if last_separator_index != -1:
                filename = path[last_separator_index + 1:]
                print(filename)
                return filename
            else:
                print("未找到有效的路径分隔符，可能提供的就是文件名")
                return path
        # 加载 YOLO 模型
        # detector = YOLO(model_path, task='detect')
        self.detector(np.zeros((48, 48, 3)))  # 预热模型

        # 打开视频文件
        cap = cv2.VideoCapture(video_path)
        clip = VideoFileClip(video_path)
        if not cap.isOpened():
            print('无法打开视频文件')
            return
        # 获取视频的音频
        audio = clip.audio

        # 获取视频的帧率、宽度和高度
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # 创建视频写入对象
        # fourcc = cv2.VideoWriter_fourcc(*'XVID')
        # fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
        if get_os_type() == "Linux":
            fourcc_str = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
            if fourcc_str == "h264":
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        temp_path = os.path.join(".","temp_files/",get_filename(video_path))
        print(temp_path)
        out = cv2.VideoWriter(temp_path, fourcc, fps, (width, height))

        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        current_frame = 0

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break

            current_frame += 1
            print(f"正在处理第 {current_frame} 帧，共 {total_frames} 帧")

            # 执行目标检测
            results = self.detector(frame)[0]
            annotated_frame = results.plot()
            if not self.continue_complete_output:
                self.continue_complete_output = True
                print("即将中止生成完整视频")
                break
            # 写入处理后的帧
            out.write(annotated_frame)
            if current_frame % 100 == 0 or current_frame == total_frames:
                yield current_frame/total_frames

        # 释放资源
        cap.release()
        out.release()
        new_clip = VideoFileClip(temp_path)
        new_clip = new_clip.with_audio(audio)
        new_clip.write_videofile(save_path)
        if not os.access(os.path.dirname(save_path), os.W_OK):
            print(f"没有权限写入 {save_path}")
        else:
            print(f'视频已保存至: {save_path}')
        clip.close()
        new_clip.close()
        audio.close()
        os.remove(temp_path)

    def process_video_frame(self,video_path,save_path,out_put = True):
        """
        处理视频文件，使用 YOLO 模型进行目标检测并保存结果
        :param video_path: 输入视频的文件路径
        :param model_path: YOLO 模型的文件路径
        :param save_path: 保存处理后视频的文件路径
        """
        self.continue_output = True
        def get_os_type():
            system = platform.system()
            if system == "Windows":
                return "Windows"
            elif system == "Linux":
                return "Linux"
            else:
                return "其他系统"
        def get_filename(path):
            last_separator_index = path.rfind('/')
            if last_separator_index == -1:
                last_separator_index = path.rfind('\\')
            if last_separator_index != -1:
                filename = path[last_separator_index + 1:]
                print(filename)
                return filename
            else:
                print("未找到有效的路径分隔符，可能提供的就是文件名")
                return path

        self.detector(np.zeros((48, 48, 3)))  # 预热模型

        # 打开视频文件
        cap = cv2.VideoCapture(video_path)
        clip = VideoFileClip(video_path)
        if not cap.isOpened():
            print('无法打开视频文件')
            return
        audio = clip.audio
        # 获取视频的帧率、宽度和高度
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # 创建视频写入对象
        fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
        if get_os_type() == "Linux":
            fourcc_str = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
            if fourcc_str == "h264":
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        temp_path = os.path.join(".", "temp_files", get_filename(video_path))
        if not os.path.exists(os.path.join(".", "temp_files")):
            os.mkdir(os.path.join(".", "temp_files"))
        print(temp_path)
        if not os.path.exists(os.path.dirname(save_path)):
            print("新建路径{}".format(save_path))
            os.makedirs(os.path.dirname(save_path))
        out = None
        if out_put:
            out = cv2.VideoWriter(temp_path, fourcc, fps, (width, height))
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        current_frame = 0
        try:
            while cap.isOpened():
                ret, frame = cap.read()
                if not ret:
                    break
                current_frame += 1
                print(f"正在处理第 {current_frame} 帧，共 {total_frames} 帧")

                # 执行目标检测
                results = self.detector(frame)[0]

                # 解析检测结果
                boxes = results.boxes.xyxy.tolist()
                print(f"boxes:{boxes}")
                detection_boxes = [list(map(int, box)) for box in boxes]  # 检测框
                detection_classes = results.boxes.cls.int().tolist()  # 类别
                confidences = results.boxes.conf.tolist()
                confidence_scores = [f'{score * 100:.2f}%' for score in confidences]  # 置信度
                cls_conf = {'fire':[],'smoke':[]}
                for i in range(len(detection_classes)):
                    if detection_classes[i] == 0:
                        cls_conf['fire'].append(confidence_scores[i])
                    elif detection_classes[i] == 1:
                        cls_conf['smoke'].append(confidence_scores[i])
                annotated_img = results.plot()
                # 写入处理后的帧
                if out_put and out:
                    out.write(annotated_img)
                progress = -1
                if current_frame % 100 == 0 or current_frame == total_frames:
                    progress = current_frame/total_frames
                annotated_img = Image.fromarray(np.uint8(annotated_img[..., ::-1]))

                # 将处理后的图像保存到二进制流
                # 创建一个用于存储二进制数据的 BytesIO 对象，这是一个内存中的文件对象，
                # 可以像操作文件一样对其进行读写操作，用于后续保存图像数据
                output_stream = io.BytesIO()

                # 使用 Pillow 库的 Image 对象（这里的 img 是经过处理的图像对象）的 save 方法，
                # 将图像保存到 output_stream 这个二进制流中，指定保存格式为 JPEG
                annotated_img.save(output_stream, format='JPEG')

                # 将二进制流的读取指针移动到流的起始位置。
                # 在写入数据后，读取指针通常会位于流的末尾，
                # 为了能够正确读取流中的数据，需要将指针移回起始位置
                output_stream.seek(0)

                # 从二进制流中读取数据，并将读取到的数据赋值给 processed_data 变量。
                # 此时 processed_data 中存储的就是处理后图像的二进制数据，
                # 后续可以将其发送给前端或者进行其他操作
                processed_data = output_stream.read()
                if not self.continue_output:
                    self.continue_output = True
                    print("即将中止输出视频帧")
                    break
                data = {"frame":processed_data,"cls_conf":cls_conf,"fps":fps,"progress":progress,"filename":get_filename(video_path),"time":current_frame/fps}
                if data['progress'] == 1:
                    # 释放资源
                    if out_put:
                        out.release()
                        new_clip = VideoFileClip(temp_path)
                        if audio:
                            new_clip = new_clip.with_audio(audio)
                        new_clip.write_videofile(save_path)
                        new_clip.close()
                        if not os.access(os.path.dirname(save_path), os.W_OK):
                            print(f"没有权限写入 {save_path}")
                        else:
                            print(f'视频已保存至: {save_path}')
                yield data
        except Exception as e:
            print(e)
        finally:
            clip.close()
            cap.release()
            if audio:
                audio.close()
            if out_put and out:
                out.release()
                os.remove(temp_path)

    def process_camera_frame(self, frame_data):
        # 数据转为二进制流
        img_stream = io.BytesIO(frame_data)
        # 打开图像
        img = Image.open(img_stream)
        # 使用模型进行目标检测
        detection_results = self.detector(img)[0]
        # 解析检测结果
        boxes = detection_results.boxes.xyxy.tolist()
        print(f"boxes:{boxes}")
        detection_boxes = [list(map(int, box)) for box in boxes]  # 检测框
        detection_classes = detection_results.boxes.cls.int().tolist()  # 类别
        confidences = detection_results.boxes.conf.tolist()
        confidence_scores = [f'{score * 100:.2f}%' for score in confidences]  # 置信度
        cls_conf = {'fire': [], 'smoke': []}
        for i in range(len(detection_classes)):
            if detection_classes[i] == 0:
                cls_conf['fire'].append(confidence_scores[i])
            elif detection_classes[i] == 1:
                cls_conf['smoke'].append(confidence_scores[i])
        annotated_img = detection_results.plot()

        # 将 numpy.ndarray 转换为 PIL.Image 对象
        # 将 BGR 格式（OpenCV 默认）转换为 RGB 格式
        annotated_img = Image.fromarray(np.uint8(annotated_img[..., ::-1]))

        # 将处理后的图像保存到二进制流
        # 创建一个用于存储二进制数据的 BytesIO 对象，这是一个内存中的文件对象，
        # 可以像操作文件一样对其进行读写操作，用于后续保存图像数据
        output_stream = io.BytesIO()

        # 使用 Pillow 库的 Image 对象（这里的 img 是经过处理的图像对象）的 save 方法，
        # 将图像保存到 output_stream 这个二进制流中，指定保存格式为 JPEG
        annotated_img.save(output_stream, format='JPEG')

        # 将二进制流的读取指针移动到流的起始位置。
        # 在写入数据后，读取指针通常会位于流的末尾，
        # 为了能够正确读取流中的数据，需要将指针移回起始位置
        output_stream.seek(0)

        # 从二进制流中读取数据，并将读取到的数据赋值给 processed_data 变量。
        # 此时 processed_data 中存储的就是处理后图像的二进制数据，
        # 后续可以将其发送给前端或者进行其他操作
        processed_data = output_stream.read()

        return {"frame":processed_data,"cls_conf":cls_conf}
    def copy_video(self,input_video_path, output_video_path):
        # 打开输入视频文件
        cap = cv2.VideoCapture(input_video_path)

        # 检查视频是否成功打开
        if not cap.isOpened():
            print("无法打开输入视频文件")
            return

        # 获取视频的帧率、宽度和高度
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # 定义视频编码器
        fourcc = cv2.VideoWriter_fourcc(*'XVID')

        # 创建 VideoWriter 对象，用于保存输出视频
        out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))

        # 逐帧读取视频并写入输出视频
        while cap.isOpened():
            ret, frame = cap.read()
            if ret:
                # 将当前帧写入输出视频
                out.write(frame)
            else:
                break

        # 释放资源
        cap.release()
        out.release()
        print(f"视频已成功保存到 {output_video_path}")
    def test_moiepy(self,input_video_path, output_video_path):

        new_clip = VideoFileClip(input_video_path)
        audio = new_clip.audio
        new_clip = new_clip.with_audio(audio)
        new_clip.write_videofile(output_video_path)
    def video_output_stop(self):
        self.continue_output = False
        print("detect.video_output_stop()")

if __name__ == '__main__':
    d = Detection()
    # image_path = "./test_pic/1004.jpg"
    # save_path = "./processed/1004.jpg"
    # d.handle_image_input(image_path,save_path)
    video_path = "./uploads/1134233650.mp4"
    save_path = "./processed/1134233650.mp4"
    # progress = d.process_and_save_video(video_path, save_path)
    # for p in progress:
    #     print(f"进度：{p}")
    d.test_moiepy(video_path, save_path)