# Copyright (c) Tencent Inc. All rights reserved.
# This file is modifef from mmyolo/demo/video_demo.py
import cv2
import torch
from mmengine.dataset import Compose
# from mmdet.apis import init_detector
import keyboard
import subprocess
import threading
import queue
import time
from utils import visualize, init_detector
from audio import AudioTransfer
default_text_setting = 'person'
input_text = default_text_setting
from sockdog import tcp_client,tcp_client_receive
def camera_in(video_queue, stop_event):
    # url = "rtsp://admin:avs123456@192.168.170.66"
    gstreamer_str = "udpsrc address=0.0.0.0 port=5000 ! application/x-rtp, media=video, encoding-name=H264 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! video/x-raw,width=1280,height=720,format=BGR ! appsink "
    # gstreamer_str = "udpsrc address=0.0.0.0 port=5000 ! application/x-rtp, media=video, encoding-name=H264 ! rtph264depay ! h264parse ! avdec_h264 ! videorate ! video/x-raw,framerate=30/1 ! videoconvert ! video/x-raw,width=1280,height=720,format=BGR ! appsink sync=false"
     # cap = cv2.VideoCapture(url)
    cap = cv2.VideoCapture(gstreamer_str, cv2.CAP_GSTREAMER)
    # if not cap.isOpened():
    #     print("Error opening video stream")
    #     return
    # cap = cv2.VideoCapture('/root/yolo-world/demo/output.mp4')
    # cap = cv2.VideoCapture('/root/yolo-world/demo/vedio/1.mp4')
    frame_count = 0
    error_count = 0  # 新增错误计数器
    max_error = 5    # 设置最大错误次数
    last_saved_time = time.time()
    while not stop_event.is_set():
        t0 = time.time()
        # ret, frame = cap.read()
        ret, frame = cap.read()

        # print("read: ", time.time() - t0)
        if  ret:
            # print("******get frame")
            video_queue.put(frame, timeout=2)
            current_time = time.time()
            # 每隔 1 秒保存一帧图片
            if current_time - last_saved_time >= 1:
                # 构造保存的文件路径，可以包含时间戳以避免覆盖
                image_path = f"/root/qwen2/frame.jpg"
                cv2.imwrite(image_path, frame)  # 保存帧为图片
                print(f"Frame saved at {image_path} at time: {current_time}")
                # 更新上次保存的时间
                last_saved_time = current_time

            frame_count += 1
            error_count = 0  # 重置错误计数器
            # if frame_count % 10 == 0:  # 每100帧打印一次状态
            #     print(f"Captured {frame_count} frames")
        else:
            error_count += 1
            print("Failed to capture frame, retrying...")
            time.sleep(1)
            if error_count >= max_error:
                print("Reinitializing camera capture...")
                cap.release()
                time.sleep(5)
                cap = cv2.VideoCapture(url)
                if not cap.isOpened():
                    print("Failed to reinitialize camera.")
                    break
                error_count = 0  # 重置错误计数

    # cap.release()
    cap.release()
    print("Camera feed stopped.")
def create_pipe(video_width, video_height, fps, rtmp):
    """推流"""
    sizeStr = str(int(video_width)) + 'x' + str(int(video_height))

    command = ['ffmpeg',
               '-y', '-an',
               '-f', 'rawvideo',
               '-vcodec', 'rawvideo',
               '-pix_fmt', 'bgr24',
               '-s', '1280x720',#1280*720
               '-r', '14',
               '-i', '-',
               '-vf', 'scale=640:360',
               '-c:v', 'libx264',
               '-pix_fmt', 'yuv420p',
               '-preset', 'ultrafast',
               '-f', 'flv',
               rtmp]
    print(" ".join(command))
    pipe = subprocess.Popen(command, shell=False, stdin=subprocess.PIPE)
    return pipe

# def get_input():
#     global input_text
#     while True:
#         audio_transfer = AudioTransfer()
#         new_input = audio_transfer.predict('/root/paraformer/example/asr_example.wav')
#         print(new_input)
#         if new_input:
#             input_text = new_input
#         time.sleep(1)

    # while True:
    #     # new_input = input("请输入文本: ")
    #     new_input = 'the light'
    #     if new_input != input_text:
    #         input_text = new_input
def get_input():
    global input_text
    file_path = '/root/paraformer/example/input.bin'

    try:
        # 使用 'rb' 模式以二进制方式读取文件内容
        with open(file_path, 'rb') as file:
            # 读取文件的全部内容
            binary_data = file.read()
            # 将二进制数据转换为 UTF-8 字符串
            new_input = binary_data.decode('utf-8')
            print("文件内容成功读取并转换为字符串。")

            # 如果 new_input 有内容，则赋值给全局变量 input_text
            if new_input:
                input_text = new_input
            # 暂停 1 秒
            time.sleep(1)
    except FileNotFoundError:
        print(f"文件 {file_path} 不存在。")
    except Exception as e:
        print(f"读取文件时发生错误: {e}")
def inference_detector(model, image, texts, test_pipeline, score_thr=0.3):
    data_info = dict(img_id=0, img=image, texts=texts)
    data_info = test_pipeline(data_info)
    data_batch = dict(inputs=data_info['inputs'].unsqueeze(0),
                      data_samples=[data_info['data_samples']])

    with torch.no_grad():
        output = model.test_step(data_batch)[0]
        pred_instances = output.pred_instances
        pred_instances = pred_instances[pred_instances.scores.float() >
                                        score_thr]
    return pred_instances



def video_stream(video_queue, stop_event, texts,
                 model, test_pipeline, score_thr,
                output_queue
                 ):
    print("Starting video_stream thread...")
    global input_text
    last_start_time = 1.0

    frame_ind = -1
    while not stop_event.is_set() or not video_queue.empty():
        try:
            frame = video_queue.get(timeout=0.5)  # 增加超时时间，以减少CPU占用
        except queue.Empty:
            if not stop_event.is_set():
                print("Waiting for new frames...")
            continue

        frame_ind += 1

        if input_text != default_text_setting:
            texts = [[t.strip()] for t in input_text.split(',')] + [[' ']]
            # print("texts: ", texts)
            # reparameterize texts
            model.reparameterize(texts)
        # print("inference..")
        start_time = time.time()
        result = inference_detector(model,
                                    frame,
                                    texts,
                                    test_pipeline,
                                    score_thr=score_thr)
        end_time = time.time()
        process_time = end_time - start_time
        if process_time > 0.05:
            print("det-process: ", process_time)

        interval_time = start_time - last_start_time
        if interval_time > 0.1:
            print("det-interval: ", interval_time)
        last_start_time = start_time

        # annot_frame = visualize(result, texts, frame)
        # cv2.imwrite(f"./demo_outputs/web_save/frame_{frame_ind}.jpg",
        #             annot_frame)
        # print(f"Processed frame {frame_ind}")
        output_queue.put([result, texts, frame])


# 数据可视化和保存线程
def data_visualization(output_queue, stop_event, pipe,dogmessage_queue):
    last_start_time = 0
    while not stop_event.is_set() or not output_queue.empty():
        try:
            # for center in center_coords:
            #     print(f"Center Point: x={center[0]}, y={center[1]}")
            # cv2.imwrite(f"./demo_outputs/web_save/frame_{frame_ind}.jpg",
            #             annot_frame)

            result, texts, frame = output_queue.get(timeout=1)
            start_time = time.time()
            # 进行数据可视化和保存
            annot_frame, center_coords = visualize(result, texts, frame,dogmessage_queue)
            pipe.stdin.write(annot_frame.tostring())
            end_time = time.time()
            process_time = end_time - start_time
            if process_time > 0.03:
                print("pipe: ", process_time)

            interval_time = start_time - last_start_time
            # print("pipe process_time: ", process_time, "pipe interval: ", interval_time)
            if interval_time > 0.1:
                print("pipe interval: ", interval_time)
            last_start_time = start_time

        except queue.Empty:
            continue

def main():

    config = "/root/yolo-world/configs/pretrain/yolo_world_v2_xl_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py"
    ck = "/root/yolo-world/yolo_world_v2_xl_obj365v1_goldg_cc3mlite_pretrain-5daf1395.pth"
    # out = "/root/yolo-world/demo/vedio//out.mp4"
    score_thr = 0.25
    rtmp = 'rtmp://192.168.171.20:1935/live/demo'
    # rtmp = 'rtmp://192.168.171.10:1935/abcs/room'
    # rtmp = 'rtmp://192.168.170.43:1935/live/test'
    # 创建推流
    pipe = create_pipe(1280, 720, 14, rtmp=rtmp)


    # 初始化模型
    model = init_detector(config, ck, device='cuda:0')
    print("sucess init model")
    model.cfg.test_dataloader.dataset.pipeline[
        0].type = 'mmdet.LoadImageFromNDArray'
    test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
    texts = [[t.strip()] for t in input_text.split(',')] + [[' ']]
    print("texts: ", texts)
    model.reparameterize(texts)

    # 创建队列和事件
    video_queue = queue.Queue(maxsize=200)
    output_queue = queue.Queue(maxsize=200)
    dogmessage_queue = queue.Queue(maxsize=200)
    stop_event = threading.Event()

    # 创建并启动输入监听线程
    input_thread = threading.Thread(target=get_input)

    input_thread.start()

    # 创建并启动视频读取线程
    video_thread = threading.Thread(target=camera_in,
                                    args=(video_queue, stop_event))
    video_thread.start()

    # 创建并启动算法处理线程
    algorithm_thread = threading.Thread(target=video_stream, args=(
    video_queue, stop_event, texts, model, test_pipeline, score_thr,
    output_queue
    ))
    algorithm_thread.start()

    visualization_thread = threading.Thread(target=data_visualization,
                                            args=(output_queue, stop_event, pipe,dogmessage_queue))
    visualization_thread.start()

    #机器狗通信线程
    sockdog_thread = threading.Thread(target=tcp_client, args=(dogmessage_queue,))
    sockdog_thread.start()

    sockdogrec_thread = threading.Thread(target=tcp_client_receive)
    sockdogrec_thread.start()
    try:
        # 主线程等待，直到按下 Ctrl+C
        while True:
            # print(f"Video queue size: {video_queue.qsize()}")
            time.sleep(1)
    except KeyboardInterrupt:
        # 设置停止事件
        stop_event.set()

    # 等待所有线程结束
    input_thread.join()
    video_thread.join()
    algorithm_thread.join()
    # for thread in algorithm_threads:
    #     thread.join()
    visualization_thread.join()
    sockdog_thread.join()
    sockdogrec_thread.join()
    # 创建并启动视频流线程
    # video_thread = threading.Thread(target=video_stream,
    #                                 args=(model, test_pipeline, score_thr, out))
    # video_thread.start()
    #
    #
    # # 等待线程结束
    # video_thread.join()


if __name__ == '__main__':
    main()
