import multiprocessing as mp
import os
import time
from datetime import datetime
import cv2
import numpy as np
import pybgs as bgs
import serial


def timeit(method):
    """装饰器：测量函数执行时间"""

    def timed(*args, **kw):
        start_time = datetime.now()
        result = method(*args, **kw)
        end_time = datetime.now()
        elapsed_time_ms = (end_time - start_time).total_seconds() * 1000  # 转换为毫秒
        print(f"{method.__name__} 耗时: {elapsed_time_ms:.0f} 毫秒")
        return result

    return timed


def initialize_video(source):
    """初始化视频流"""
    return cv2.VideoCapture(source)


def initialize_frame(camera=None, init_count=50):
    # 初始化用于累加帧的数组
    sum_frame = None
    # initcount = 50  # 使用的帧数
    # 读取前50帧
    for _ in range(init_count):
        ret, frame = camera.read()
        if not ret:
            print("Failed to read frame from the camera.")
            break
        # 将帧累加
        if sum_frame is None:
            sum_frame = np.float32(frame)
        else:
            sum_frame += frame
    # 计算平均帧
    if sum_frame is not None:
        initial_frame = (sum_frame / init_count).astype(np.uint8)

        # print(keypoints_initial, descriptors_initial)
        # cv2.imshow('Initial Frame', initial_frame)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        return initial_frame
    else:
        print("No frames were read from the camera.")
        initial_frame = None
        return None


def initialize_frame_mp(q=None, init_count=50):
    # 初始化用于累加帧的数组
    sum_frame = None
    # initcount = 50  # 使用的帧数
    # 读取前50帧
    for _ in range(init_count):
        frame = q.get()

        # 将帧累加
        if sum_frame is None:
            sum_frame = np.float32(frame)
        else:
            sum_frame += frame
    # 计算平均帧
    if sum_frame is not None:
        initial_frame = (sum_frame / init_count).astype(np.uint8)

        # print(keypoints_initial, descriptors_initial)
        # cv2.imshow('Initial Frame', initial_frame)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        return initial_frame
    else:
        print("No frames were read from the camera.")
        initial_frame = None
        return None


def initialize_detectors():
    """初始化背景减除和特征提取器"""
    vibe = bgs.ViBe()
    # orb = cv2.ORB_create()
    # 创建SURF对象
    sift = cv2.SIFT_create(nfeatures=5000, nOctaveLayers=4, contrastThreshold=0.03, edgeThreshold=15, sigma=1.7)

    return vibe, sift


def initialize_features(frame, feature_extractor):
    """对第一帧进行特征点提取并保存特征点和描述符"""
    keypoints, descriptors = feature_extractor.detectAndCompute(frame, None)
    return keypoints, descriptors


def get_video_properties(camera):
    """读取视频属性"""
    width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(camera.get(cv2.CAP_PROP_FPS))
    frame_count = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))
    return width, height, fps, frame_count


def get_video_properties_mp(q):
    """读取视频属性"""
    # width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
    # height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
    # fps = int(camera.get(cv2.CAP_PROP_FPS))
    # frame_count = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))
    frame = q.get()
    height, width, _ = frame.shape
    return width, height, -1, -1


def match_features(descriptors_initial, descriptors_frame, matcher):
    """比较当前帧与初始帧的特征点"""

    def match(descriptors1, descriptors2, detection):
        if type(detection).__name__ == "ORB":
            FLANN_INDEX_LSH = 6
            index_params = dict(algorithm=FLANN_INDEX_LSH,
                                table_number=6,  # 12
                                key_size=12,  # 20
                                multi_probe_level=1)  # 2
            search_params = dict(checks=50)  # or pass empty dictionary
        elif type(detection).__name__ == "SIFT":
            # 创建FLANN匹配器
            FLANN_INDEX_KDTREE = 1
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)
        # 使用KNN算法匹配描述符
        matches = flann.knnMatch(descriptors1, descriptors2, k=2)
        # 仅保留好的匹配项（Lowe's ratio test）
        good_matches = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good_matches.append(m)
        return good_matches

    try:
        matches = match(descriptors_initial, descriptors_frame, matcher)
        # 返回匹配的特征点数量，用于后续逻辑
        return matches
    except Exception as e:
        return None


def detect_deviation(descriptors_initial, descriptors_frame, feature_extractor):
    def match(descriptors1, descriptors2, detection):
        if type(detection).__name__ == "ORB":
            FLANN_INDEX_LSH = 6
            index_params = dict(algorithm=FLANN_INDEX_LSH,
                                table_number=6,  # 12
                                key_size=12,  # 20
                                multi_probe_level=1)  # 2
            search_params = dict(checks=50)  # or pass empty dictionary
        elif type(detection).__name__ == "SIFT":
            # 创建FLANN匹配器
            FLANN_INDEX_KDTREE = 1
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)
        # 使用KNN算法匹配描述符
        matches = flann.knnMatch(descriptors1, descriptors2, k=2)
        # 仅保留好的匹配项（Lowe's ratio test）
        good_matches = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good_matches.append(m)
        return good_matches

    matches = match(descriptors_initial, descriptors_frame, feature_extractor)
    if len(matches) < 0.15 * len(descriptors_frame):
        return "tremble"
    elif 0.15 * len(descriptors_frame) < len(matches) < 0.7 * len(descriptors_frame):
        return "blocked"
    else:
        return "true_alarm"


@timeit
def false_alarm_elimination(frame, frame_initial, feature_extractor, keypoints_initial, descriptors_initial):
    """误警剔除"""
    _, descriptors_frame_initial = feature_extractor.compute(frame, keypoints_initial)

    # 比较特征点，确定是全局偏差、部分偏差还是无偏差
    # 假设有函数来比较特征点并分类偏差：detect_deviation()
    deviation_type = detect_deviation(descriptors_initial, descriptors_frame_initial, feature_extractor)
    # print(deviation_type)

    return deviation_type


# def handle_global_deviation(frame):
#     """处理全局偏差"""
#     # 这里可以包含去抖动、光影变化处理等
#     pass
#
#
# def handle_partial_deviation(frame):
#     """处理部分偏差，如遮挡"""
#     # 使用YOLO进行对象识别，判断是否为遮挡
#     pass
#
#
# def trigger_alarm(frame):
#     """触发报警"""
#     pass


# def detremble_frame(frame, frame_initial, feature_extractor, keypoints_initial, descriptors_initial):
#     keypoints_frame, descriptors_frame = feature_extractor.detectAndCompute(frame, None)
#     matches = match_features(descriptors_initial, descriptors_frame, feature_extractor)
#
#     # 提取位置信息
#     init_pts = np.float32([keypoints_initial[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
#     frame_pts = np.float32([keypoints_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
#
#     # 计算单应性矩阵
#     H, mask = cv2.findHomography(init_pts, frame_pts, cv2.RANSAC, 5.0)
#
#     # 使用单应性矩阵进行透视变换
#     height, width, _ = frame_initial.shape
#     transformed_image = cv2.warpPerspective(frame, H, (width, height))
#     return transformed_image

def detremble_preframe(frame, pre_frame, feature_extractor, keypoints_pre, descriptors_pre):
    keypoints_frame, descriptors_frame = feature_extractor.detectAndCompute(frame, None)
    matches = match_features(descriptors_pre, descriptors_frame, feature_extractor)
    if matches is None:
        return frame, keypoints_frame, descriptors_frame

    # 提取位置信息
    init_pts = np.float32([keypoints_pre[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
    frame_pts = np.float32([keypoints_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

    # 计算单应性矩阵
    H, mask = cv2.findHomography(init_pts, frame_pts, cv2.RANSAC, 5.0)

    # 使用单应性矩阵进行透视变换
    height, width, _ = pre_frame.shape
    transformed_image = cv2.warpPerspective(frame, H, (width, height))
    return transformed_image, keypoints_frame, descriptors_frame


@timeit
def process_frame(frame, rectangle, threshold, background_subtractor):
    """处理单个视频帧，并使用初始化的特征点进行误警剔除判断"""

    def change_detect(frame, background_subtractor, rectangle, threshold):
        """处理单个帧"""
        rectangleX, rectangleY, rectangleXCols, rectangleYCols = rectangle
        cv2.rectangle(frame, (rectangleX, rectangleY),
                      (rectangleX + rectangleXCols, rectangleY + rectangleYCols), (0, 255, 0), 2)

        img_delta = background_subtractor.apply(frame)
        roi = img_delta[rectangleY:rectangleY + rectangleYCols, rectangleX:rectangleX + rectangleXCols]
        thresh = cv2.threshold(roi, 25, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.erode(thresh, None, iterations=2)
        thresh = cv2.dilate(thresh, None, iterations=2)
        contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        area = sum(cv2.contourArea(x) for x in contours)
        warn_color = (0, 0, 255) if area > threshold else (55, 255, 155)
        cv2.putText(frame, 'warn index  %s' % area, (rectangleX, rectangleY), cv2.FONT_HERSHEY_SIMPLEX, 2,
                    warn_color,
                    2)
        return thresh, frame, area

    pre_frame_roi, frame, area = change_detect(frame, background_subtractor, rectangle, threshold)

    return pre_frame_roi, frame, area


# @timeit
# def main_video_processing(source):
#     camera = initialize_video(source)
#     background_subtractor, feature_extractor = initialize_detectors()
#
#     # 读取前50帧进行初始化
#     frame_initial = initialize_frame(camera, init_count=50)
#     width, height, fps, counterNum = get_video_properties(camera)
#     rectangle = (450, 260, 800, 500)  # 监测区域坐标和大小
#     threshold = 8000  # 变化报警阈值 100 for suidao.mp4
#
#     # 初始化特征点和描述符变量
#     keypoints_initial, descriptors_initial = initialize_features(frame_initial, feature_extractor)
#     # print(len(keypoints_initial), len(descriptors_initial))
#
#     counter = 0
#
#     detremble_frame_num = 5
#     detremble = False
#     detremble_counter = 0
#     keypoints_pre, descriptors_pre = None, None
#     pre_frame = None
#
#     blocked = False
#
#     while True:
#         ret, frame = camera.read()
#         if not ret:
#             break
#
#         if keypoints_pre is None:
#             keypoints_pre, descriptors_pre = initialize_features(frame, feature_extractor)
#             pre_frame = frame.copy()
#
#         if detremble_counter == detremble_frame_num:
#             background_subtractor, feature_extractor = initialize_detectors()
#             detremble = False
#             detremble_counter = 0
#             keypoints_pre, descriptors_pre = None, None
#             pre_frame = None
#
#         if detremble:
#             frame, keypoints_pre, descriptors_pre = detremble_preframe(frame, pre_frame, feature_extractor,
#                                                                        keypoints_pre, descriptors_pre)
#             pre_frame = frame.copy()
#             detremble_counter += 1
#
#         pre_frame_roi, frame_show, change_area = process_frame(frame.copy(), rectangle, threshold,
#                                                                background_subtractor)
#
#         if change_area > threshold:
#             deviation_type = false_alarm_elimination(frame, frame_initial, feature_extractor, keypoints_initial,
#                                                      descriptors_initial)
#             cv2.putText(frame_show, deviation_type, (rectangle[0], rectangle[1] + rectangle[3]),
#                         cv2.FONT_HERSHEY_SIMPLEX, 2,
#                         (0, 0, 255), 2)
#             print(deviation_type)
#             if deviation_type == "global":
#                 # 全局偏差处理  tremble 去抖动
#                 detremble = True
#                 # handle_global_deviation(frame)
#             elif deviation_type == "partial":
#                 # 部分偏差处理，可能涉及使用YOLO检测遮挡  blocked
#                 # frame_show=handle_partial_deviation(frame)
#                 background_subtractor, feature_extractor = initialize_detectors()
#
#             else:
#                 # 无偏差，报警
#                 trigger_alarm(frame)
#
#         cv2.imshow("roi", pre_frame_roi)
#         cv2.imshow("frame_show", frame_show)
#         # cv2.imshow("frame", frame)
#         cv2.waitKey(1)
#         counter += 1
#
#     camera.release()
#     cv2.destroyAllWindows()


def image_put(q, file_path):
    cap = cv2.VideoCapture(file_path)
    if cap.isOpened():
        print('opened')

    while True:
        q.put(cap.read()[1])
        q.get() if q.qsize() > 1 else time.sleep(0.01)


# def _image_get(q, window_name):
#     # cv2.namedWindow(window_name, flags=cv2.WINDOW_FREERATIO)
#     while True:
#         frame = q.get()
#
#         cv2.imshow(window_name, frame)
#         cv2.waitKey(1)


def image_get(q, window_name,save_path):
    # camera = initialize_video(source)
    background_subtractor, feature_extractor = initialize_detectors()
    # 配置串口参数
    serialPort = 'COM3'  # Windows上的COM端口，例如'COM3'。Linux或Mac上可能是'/dev/ttyUSB0'或类似的设备文件
    baudRate = 9600  # HC12模块的波特率，默认为9600
    dataToSend_alarm = "1"  # 填入要发送的数据
    dataToSend_blocked = "2"  # 填入要发送的数据
    dataToSend_tremble = "3"  # 填入要发送的数据

    # 初始化串口连接
    # ser = serial.Serial(serialPort, baudRate, timeout=1)


    # 读取前50帧进行初始化
    frame_initial = initialize_frame_mp(q, init_count=50)
    width, height, fps, counterNum = get_video_properties_mp(q)
    rect_width = int(width * 0.66)
    rect_height = int(height * 0.66)
    rect_x = int((width - rect_width) / 2)
    rect_y = int((height - rect_height) / 2)
    rectangle = (rect_x, rect_y, rect_width, rect_height)  # 监测区域坐标和大小 (450, 260, 800, 500)  for suidao.mp4
    threshold = int((rect_width) * (rect_height) / 500)  # 8000  # 变化报警阈值 100 for suidao.mp4
    # rectangle = (450, 260, 800, 500)
    # threshold = 100

    # 初始化特征点和描述符变量
    keypoints_initial, descriptors_initial = initialize_features(frame_initial, feature_extractor)
    # print(len(keypoints_initial), len(descriptors_initial))

    counter = 0

    detremble_frame_num = 5
    detremble = False
    detremble_counter = 0
    keypoints_pre, descriptors_pre = None, None
    pre_frame = None

    blocked = False
    blocked_frame_num = 3
    blocked_counter = 0

    alarmed = False
    alarm_counter = 0
    alarm_frame_nume = 3

    while True:
        frame = q.get()

        if keypoints_pre is None:
            keypoints_pre, descriptors_pre = initialize_features(frame, feature_extractor)
            pre_frame = frame.copy()



        pre_frame_roi, frame_show, change_area = process_frame(frame.copy(), rectangle, threshold,
                                                               background_subtractor)



        if change_area > threshold:
            deviation_type = false_alarm_elimination(frame, frame_initial, feature_extractor, keypoints_initial,
                                                     descriptors_initial)

            if deviation_type == "tremble":
                # 全局偏差处理  tremble 去抖动
                detremble = True
                # handle_global_deviation(frame)
                cv2.putText(frame_show, "false_alarm_tremble", (rectangle[0], rectangle[1] + rectangle[3]),
                            cv2.FONT_HERSHEY_SIMPLEX, 2,
                            (55, 255, 155), 2)
                print(deviation_type)

            elif deviation_type == "blocked":
                # 部分偏差处理，可能涉及使用YOLO检测遮挡  blocked
                # frame_show=handle_partial_deviation(frame)
                blocked = True
                cv2.putText(frame_show, "true_alarm_blocked", (rectangle[0], rectangle[1] + rectangle[3]),
                            cv2.FONT_HERSHEY_SIMPLEX, 2,
                            (0, 0, 255), 2)
                print(deviation_type)
                # 发送数据
                # print(f"Sending '{dataToSend_blocked}' to HC12...")
                # ser.write(dataToSend_blocked.encode())
                # time.sleep(1)

            else:
                # 无偏差，报警
                # trigger_alarm(frame)
                alarmed = True



        if alarm_counter == alarm_frame_nume :
            background_subtractor, feature_extractor = initialize_detectors()
            alarmed = False
            alarm_counter = 0

        if alarmed and not blocked and not detremble:
            cv2.putText(frame_show, "true_alarm", (rectangle[0], rectangle[1] + rectangle[3]),
                        cv2.FONT_HERSHEY_SIMPLEX, 2,
                        (0, 0, 255), 2)
            print(deviation_type)

            # # 发送数据
            # print(f"Sending '{dataToSend_alarm}' to HC12...")
            # ser.write(dataToSend_alarm.encode())
            # time.sleep(1)

            alarm_counter += 1
            alarmed = False

        if blocked_counter == blocked_frame_num:
            blocked = False
            blocked_counter = 0

        if blocked:



            background_subtractor, feature_extractor = initialize_detectors()
            alarmed = False
            blocked = False
            blocked_counter += 1

        if detremble_counter == detremble_frame_num:
            background_subtractor, feature_extractor = initialize_detectors()
            detremble = False
            detremble_counter = 0
            keypoints_pre, descriptors_pre = None, None
            pre_frame = None

        if detremble:


            frame, keypoints_pre, descriptors_pre = detremble_preframe(frame, pre_frame, feature_extractor,
                                                                       keypoints_pre, descriptors_pre)
            pre_frame = frame.copy()
            detremble_counter += 1



        # cv2.imshow("roi", pre_frame_roi)
        # cv2.imshow("frame_show", frame_show)
        # cv2.imshow("frame", frame)


        # cv2.waitKey(1)
        # 等待键盘输入
        # key = cv2.waitKey(1)
        # 按下空格键或关闭窗口即中断并关闭
        # if key == ord(' ') or key == 27:
        #     break
        cv2.imwrite(save_path + f"/bgroi_{counter}.jpg", pre_frame_roi)
        cv2.imwrite(save_path + f"/frame_{counter}.jpg", frame_show)
        counter += 1

    # 关闭串口连接
    # ser.close()
    cv2.destroyAllWindows()


def run_multiprocess(file_path,save_path):
    # # user_name, user_pwd, camera_ip = "admin", "admin123456", "172.20.114.196"
    # user_name, user_pwd, camera_ip = "admin", "buaa0707", "192.168.1.103"
    # file = "data/suidao.mp4"
    # fname = "suidao"
    # # user, pwd, ip, channel = "admin", "buaa0707", "192.168.1.103", 1  # 主码流
    # user, pwd, ip, channel = "admin", "buaa0707", "192.168.1.103", 2  # 子码流 分辨率低
    # file_path = "rtsp://%s:%s@%s//Streaming/Channels/%d" % (user, pwd, ip, channel)  # HIKIVISION new version 2017

    mp.set_start_method(method='spawn')  # init
    queue = mp.Queue(maxsize=2)
    processes = [mp.Process(target=image_put, args=(queue, file_path)),
                 mp.Process(target=image_get, args=(queue, file_path,save_path))]

    [process.start() for process in processes]
    [process.join() for process in processes]


if __name__ == "__main__":
    file = "data/suidao_stable.mp4"
    fname = "suidao"
    user, pwd, ip, channel = "admin", "buaa0707", "192.168.1.103", 1  # 主码流
    # user, pwd, ip, channel = "admin", "buaa0707", "192.168.1.103", 2  # 子码流 分辨率低
    video_stream_path = "rtsp://%s:%s@%s//Streaming/Channels/%d" % (
        user, pwd, ip, channel)  # HIKIVISION new version 2017

    timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
    save_path = os.path.join("data/result/mp", timestamp)
    os.makedirs(save_path, exist_ok=True)

    # main_process(file, fname)

    # main_video_processing(video_stream_path) # 单进程

    run_multiprocess(video_stream_path,save_path)  # 多进程
