import asyncio
import base64
import datetime
import os
import threading
import time
from collections import deque
import cv2
import django
import numpy as np
import yaml
from django.apps import apps

from ImageModule.views import VideoViewSet
from dataQuery.views import contraband_add
# from dataQuery.views import contraband_add


#


from yolov5.apply2 import Detect, draw_china
from yolov5.utils.plots import colors


class CacheItem:
    def __init__(self, data):
        self.data = data
        self.time = time.time()


class FrameCache:
    def __init__(self, max_size=10, timeout=60):
        self.max_size = max_size
        self.timeout = timeout  # 超时时间（秒）
        self.cache = deque(maxlen=max_size)
        self.lock = threading.Lock()
        self.p = threading.Thread(target=self.clear_expired_frames)
        self.p.start()

    def add_frame(self, frame):
        """添加一帧到缓存，如果缓存满了，最旧的帧会被自动丢弃"""

        if len(self.cache) >= self.max_size:
            with self.lock:
                self.cache.popleft()

        item = CacheItem(frame)
        self.cache.append(item)

    def get(self):

        """从缓存中获取帧，如果缓存为空则返回None"""
        if self.cache:
            with self.lock:
                return self.cache[-1]
        return None

    def get_frame(self):

        """从缓存中获取帧，如果缓存为空则返回None"""
        if self.cache:
            with self.lock:
                return self.cache[0].data
        return None

    def popAll(self):
        results = []
        with self.lock:
            for i in range(0, len(self.cache)):
                results.append(self.cache.popleft())
        return results

    def clear_expired_frames(self):
        """异步清除超时的帧"""
        while True:

            count = 0
            for item in self.cache:
                if (item.time - time.time()) > self.timeout:
                    count += 1

            for i in range(0, count):
                with self.lock:
                    self.cache.popleft()

            time.sleep(1)  # 每隔1秒检查一次


def compare_frame(frame1, frame2):
    # 对对比帧与实时帧先进行灰度化处理，后面要进行取差值
    if frame1.shape != frame2.shape:
        frame1 = cv2.resize(frame1, (frame2.shape[1], frame2.shape[0]))

    cv2.imwrite('./images/frame1.jpg', frame1)
    cv2.imwrite('./images/frame2.jpg', frame2)

    img1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
    # 对两帧进行取差值

    grey_diff = cv2.absdiff(img1, img2)
    change = np.average(grey_diff)
    # print(change)

    return change


def vertical_concat(img1, img2):
    img_vstack = cv2.vconcat((img1, img2))

    return img_vstack


def vertical_apart(image):
    height = image.shape[0] // 2
    img1 = image[:height, :]
    img2 = image[height:, :]

    return img1, img2


class InitModule:
    def __init__(self):
        self.detect = None

    def get_detect(self):
        if self.detect is None:
            self.detect = Detect()
        return self.detect


init = InitModule()
detect_frequency = 5
detect_result = FrameCache()
videoSet = VideoViewSet()


def start_detect():
    image_size = (1080, 960)
    detect = init.get_detect()

    # cap = cv2.VideoCapture('D:\\data\\机场物流项目\\过机视频3.mp4')
    # cap = cv2.VideoCapture(
    #     'D:\\BaiduNetdiskDownload\\20240115采集视频（违禁品）\\采集视频（违禁品）\\6号采集视频——13号机.mp4')

    config = get_yaml_data('./config/config.yaml')
    cap = cv2.VideoCapture(config['video_source'])
    now_time = datetime.datetime.now()
    # 将当前时间转换为通用的日期和时间格式
    formatted_time = now_time.strftime("%Y%m%d_%H%M%S")
    filename = '过机视频_' + str(formatted_time) + '.mp4'
    if not os.path.exists(config['video_path']):
        os.makedirs(config['video_path'])

    if not os.path.exists(config['image_path']):
        os.makedirs(config['image_path'])
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(os.path.join(config['video_path'], filename), fourcc, 10.0,
                          image_size)
    last_time = time.time()
    video_time = time.time()
    last_frame = None
    last_label_dicts = None
    last_det = None
    isFirst = True
    count = 0
    while True:
        time.sleep(0.05)
        ret, frame = cap.read()
        count += 1
        if not ret:
            continue
        image_size = frame.shape[:2]

        if not isFirst and count < detect_frequency:
            # for *xyxy, conf, cls in reversed(last_det):
            #
            #     c = int(cls)  # integer class
            #     label = f'{detect.cls_names[c]} {conf * 100:.2f}%'
            #     frame = draw_china(frame, xyxy, 3, detect.font, label, color=colors(c, True))
            detect_result.add_frame(frame)
            continue

        if not isFirst and compare_frame(last_frame, frame) < 0.1:
            last_frame = frame
            result_frame = frame.copy()
            for *xyxy, conf, cls in reversed(last_det):
                c = int(cls)  # integer class
                label = f'{detect.cls_names[c]} {conf * 100:.2f}%'

                result_frame = draw_china(result_frame, xyxy, 3, detect.font, label, color=colors(c, True))
            detect_result.add_frame(result_frame)

            continue
        last_frame = frame
        frame_up, frame_down = vertical_apart(frame)
        result_frame_up, label_dicts_up, det_up = detect.detect(frame_up)
        result_frame_down, label_dicts_down, det_down = detect.detect(frame_down)
        result_frame = vertical_concat(result_frame_up, result_frame_down)
        label_dicts = label_dicts_up
        det = list(det_up)
        for i in range(0, len(label_dicts_down)):
            label = label_dicts_down[i]
            flag = True
            for label2 in label_dicts_up:
                if label['label'] == label2['label']:
                    flag = False
                    break
            if flag:
                label_dicts.append(label)
                det.append(det_down[i])
        if last_label_dicts is None:
            last_label_dicts = label_dicts
            isFirst = True
        if last_det is None:
            last_det = det

        result_frame.dtype = np.uint8
        out.write(cv2.resize(result_frame, image_size))
        if time.time() - video_time > 60 * 30:
            out.release()

            response = videoSet.createByrealtime(filename, os.path.join(config['video_path'], filename),
                                                 now_time)

            print(response)
            os.remove(os.path.join(config['video_path'], filename))
            video_time = time.time()
            now_time = datetime.datetime.now()
            # 将当前时间转换为通用的日期和时间格式
            formatted_time = now_time.strftime("%Y%m%d_%H%M%S")
            filename = '过机视频_' + str(formatted_time) + '.mp4'
            out = cv2.VideoWriter(os.path.join(config['video_path'], filename), fourcc, 10.0,
                                  image_size)

        length = len(last_label_dicts)
        for i in range(0, len(label_dicts)):
            label_dict = label_dicts[i]
            if not isFirst:
                if length > i:
                    last_label_dict = last_label_dicts[i]

                    if (last_label_dict['label'] == label_dict['label'] and
                            (det[i][0] >= last_det[i][0] or det[i][0] - last_det[i][0] <= 10)):
                        # print('continue')
                        continue

            now_time = datetime.datetime.now()
            time_label = now_time.strftime("%Y%m%d_%H%M%S")
            original_file = os.path.join(config['image_path'],
                                         str(label_dict['label']) + '_' + time_label + '_original.jpg')
            detect_file = os.path.join(config['image_path'],
                                       str(label_dict['label']) + '_' + time_label + '_detect.jpg')

            cv2.imwrite(original_file, frame)
            cv2.imwrite(detect_file, result_frame)
            unix_timestamp = int(time.mktime(now_time.timetuple()))
            contraband_add(unix_timestamp, config['place'], label_dict['label'], original_file, detect_file)
            # label_dict['crop'] = image_to_base64(cv2.imencode('.jpeg', label_dict['crop'])[1])

        last_label_dicts = label_dicts
        last_det = det
        isFirst = False
        detect_result.add_frame(result_frame)
        last_time = time.time()
        count = 0


def get_result():
    return detect_result


def process_image(image):
    detect = init.get_detect()
    processed_img, label = detect.detect_upload(image)
    processed_img = cv2.imencode('.jpeg', processed_img)[1]

    processed_img = image_to_base64(processed_img)

    # 返回处理后的图像
    # buffer = BytesIO()
    # processed_img.save(buffer, format="JPEG")
    # processed_image_data = buffer.getvalue()
    return processed_img, label


def image_to_base64(image):
    # 将图像数据编码为 Base64 字符串
    image_base64 = base64.b64encode(image).decode('utf-8')
    return image_base64


def get_yaml_data(yaml_file):
    with open(yaml_file, encoding='utf-8') as file:
        content = file.read()
        # 设置Loader=yaml.FullLoader忽略YAMLLoadWarning警告
        data = yaml.load(content, Loader=yaml.FullLoader)
    return data


p = threading.Thread(target=start_detect)
p.start()

if __name__ == '__main__':
    f1 = cv2.imread("1.png")
    f2 = cv2.imread("1.png")
    vertical_concat(f1, f2)
