import cv2
import os
from ultralytics import YOLO
import subprocess
from flask import Flask, request, jsonify, send_file
import gc
from multiprocessing import Pipe, Manager, shared_memory
from threading import Thread, Event, Lock
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import time
import datetime
import io
import configparser
import base64
import requests
import struct
from pynvml import *

app = Flask("real time video detection")

# 在当前同目录下存储config.ini文件
current_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(current_dir, 'config.ini')

config = configparser.ConfigParser()
config.read(config_path)
upload_dir = config['other_folder']['upload_dir'].strip("'")
font_path = config['font']['font_path'].strip("'")
font_size = int(config['font']['font_size'].strip("'"))
font = ImageFont.truetype(font_path, font_size)

# 从配置文件读取classes_list（从model1配置节）
classes_list = config['model1']['classes_list'].split(',')
classes_list = [class_name.strip() for class_name in classes_list]

imageReadAndInfer = None

# Message constants
msg_nomoreframe = "no more frame"
msg_error_openfail = "error to open video source"
msg_startread = "start read"
msg_needframe = "need frame"
conn_timeout = 100

ReadAndInfer_list={}
tcp_list={}

class ReadAndInfer:
    #mode=0表示rtmp模式，mode=1表示http模式
    def __init__(self, input_url, output_url, current_model_name, mode):
        self.shm_name = None
        self.lock = None
        self.shape = None
        self.dtype = None
        self.nbytes = None
        self.parent_conn = None
        self.child_conn = None
        
        self.mode = mode
        self.ffmpeg = None
        self.output_http_result_url = output_url+"/result"
        self.output_http_error_url = output_url+"/error"
        
        self.input_url = input_url
        self.output_url = output_url
        self.current_model_name = current_model_name
        
        self.Yolo_model = None
        self.running = True
        self.p_readFrame = None
        self.p_inferFrame = None
        self.rules = None
        self.conf = None
        self.class_mapping = {}  # 添加class映射字典

    def start(self, classes):
        self.running = True
        # 创建class名称到索引的映射关系
        self.class_mapping = {class_name: idx for idx, class_name in enumerate(classes)}
        print("类名映射", self.class_mapping, classes)
        self.rules, self.conf = parse_rules(config, self.current_model_name, classes)
        try:
            self.Yolo_model = get_model(config, self.current_model_name)
        except Exception as e:
            print(f"An error occurred during YOLO build up: {e}")
            raise "Error, No YOLOmodel"
        self.parent_conn, self.child_conn = Pipe()

        # 使用默认分辨率初始化共享内存，真实尺寸将在子线程中获取
        width = 3840
        height = 2160
        self.shape = (height, width, 3)
        self.dtype = np.uint8
        self.nbytes = height * width * 3

        shm = shared_memory.SharedMemory(create=True, size=self.nbytes)
        self.shm_name = shm.name
        self.lock = Lock()

        # 启动读帧线程
        self.p_readFrame = Thread(target=self.process_readFrame)
        self.p_readFrame.start()

        # 启动推理线程
        self.p_inferFrame = Thread(target=self.process_infer)
        self.p_inferFrame.start()

    def start_img_yolo(self):
        try:
            self.Yolo_model = get_model(config, self.current_model_name)
        except Exception as e:
            print(f"An error occurred during YOLO build up: {e}")
            raise "Error, No YOLOmodel"

    def img_infer(self, img_data, classes):
        self.rules, self.conf = parse_rules(config, self.current_model_name, classes)
        img = Image.open(io.BytesIO(img_data))
        img_array = np.array(img)
        img_np = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
        device_id = get_gpu_id()
        output_img, resultList = AImodel(img_np, self.Yolo_model, device_id, self.rules, self.conf)
        filename = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg"
        filepath = os.path.join(upload_dir, filename)
        output_img.save(filepath, format='JPEG')
        img_io = io.BytesIO()
        output_img.save(img_io, 'JPEG')
        image_binary = img_io.getvalue()
        image_base64 = base64.b64encode(image_binary).decode('utf-8')
        data = {
            "img": image_base64,  # 图像的 Base64 字符串
            "data": [result.to_dict() for result in resultList]
        }
        return data


    def start_ffmpeg(self, rtmp_input, rtmp_output, retries=5):
        print("进入start_ffmpeg程序")
        flag, input_width, input_height = self.get_rtmp_video_width_height(rtmp_input)
        if flag is False:
            return None
        print('input_width = ', input_width)
        print('input_height = ', input_height)

        command = ['ffmpeg',
                   '-y',
                   '-an',
                   '-f', 'rawvideo',
                   '-pix_fmt', 'bgr24',
                   '-s', "{}:{}".format(input_width, input_height),
                   '-re',
                   '-i', '-',
                   '-r', str(14),  # 帧率
                   '-c:v', 'libx264',
                   '-pix_fmt', 'yuv420p',
                   '-preset', 'ultrafast',
                   '-b:v', str(2000) + 'k',  # 码率
                   '-f', 'flv',
                   '-tune', 'zerolatency',
                   rtmp_output,
                   '-threads', '8']

        for attempt in range(retries):
            try:
                ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE)
                return ffmpeg
            except BrokenPipeError as e:
                print(f"Attempt {attempt + 1} failed with BrokenPipeError: {e}")
                time.sleep(2)  # Wait before retrying
        print("Failed to start ffmpeg after several attempts")
        return None

    def process_readFrame(self):
        parent_conn = self.parent_conn
        
        # 在子线程中获取视频尺寸（RTMP和HTTP模式都需要）
        print("开始在子线程中获取视频尺寸...")
        flag, width, height = self.get_rtmp_video_width_height(self.input_url)
        if not flag:
            print("获取视频尺寸失败，任务被中断")
            parent_conn.send(msg_error_openfail)
            return
        print(f"成功获取视频尺寸 {width}x{height}")
        
        # 更新实际的shape
        actual_shape = (height, width, 3)
        print(f"读帧进程使用actual_shape: {actual_shape}")
        
        existing_shm = shared_memory.SharedMemory(name=self.shm_name)
        frame_buffer = np.ndarray(actual_shape, dtype=self.dtype, buffer=existing_shm.buf)
        print(f"读帧进程frame_buffer shape: {frame_buffer.shape}")
        
        cap = None
        index = 0
        lastindex = 0
        consecutive_failures = 0
        max_failures_before_reconnect = 10  # 连续失败10次后重连
        
        # 初始化视频捕获
        while self.running:
            cap = cv2.VideoCapture(self.input_url)
            if cap.isOpened():
                ret, test_frame = cap.read()
                if ret and test_frame is not None:
                    print("成功打开视频流并读取到帧，开始处理...")
                    # 先发送实际视频尺寸，再发送开始信号
                    parent_conn.send(('video_size', width, height))
                    parent_conn.send(msg_startread)
                    break
                else:
                    print("视频流已打开但无法读取帧，1秒后重试...")
                    cap.release()
                    time.sleep(1)
            else:
                print("无法打开视频流，1秒后重试...")
                if cap is not None:
                    cap.release()
                time.sleep(1)
        
        # 主循环读取帧
        while self.running:
            ret, frame = cap.read()
            index += 1
            
            if not ret or frame is None:
                consecutive_failures += 1
                print(f"读取帧失败 ({consecutive_failures}次)，等待视频流恢复...")
                
                # 如果连续失败次数过多，尝试重新连接
                if consecutive_failures >= max_failures_before_reconnect:
                    print("连续失败次数过多，尝试重新连接视频流...")
                    cap.release()
                    time.sleep(2)
                    
                    print("重新获取视频尺寸...")
                    flag, width, height = self.get_rtmp_video_width_height(self.input_url)
                    if not flag:
                        print("任务被中断")
                        return
                    print(f"重新获取视频尺寸成功 {width}x{height}")
                    
                    # 重新连接
                    reconnect_success = False
                    reconnect_attempt = 0
                    last_log_time = time.time()
                    
                    while self.running and not reconnect_success:
                        reconnect_attempt += 1
                        cap = None
                        
                        try:
                            cap = cv2.VideoCapture(self.input_url)
                            if cap.isOpened():
                                test_ret, test_frame = cap.read()
                                if test_ret and test_frame is not None:
                                    print(f"重新连接成功（尝试{reconnect_attempt}次），继续处理...")
                                    consecutive_failures = 0
                                    reconnect_success = True
                                    # 成功后不释放cap，继续使用
                                else:
                                    cap.release()
                                    cap = None
                                    # 每20秒才打印一次日志，避免日志刷屏
                                    current_time = time.time()
                                    if current_time - last_log_time >= 20:
                                        print(f"等待视频流恢复中...（已尝试{reconnect_attempt}次）")
                                        last_log_time = current_time
                                    time.sleep(2)
                            else:
                                if cap is not None:
                                    cap.release()
                                    cap = None
                                # 减少日志输出
                                current_time = time.time()
                                if current_time - last_log_time >= 20:
                                    print(f"等待视频流可用...（已尝试{reconnect_attempt}次）")
                                    last_log_time = current_time
                                time.sleep(2)
                        except Exception as e:
                            # 异常情况确保释放资源
                            if cap is not None:
                                try:
                                    cap.release()
                                except:
                                    pass
                                cap = None
                            # 减少异常日志输出
                            current_time = time.time()
                            if current_time - last_log_time >= 20:
                                print(f"重连异常: {e}，继续重试...")
                                last_log_time = current_time
                            time.sleep(2)
                else:
                    time.sleep(1)  # 短暂等待后继续尝试
                continue
            
            # 成功读取帧
            consecutive_failures = 0
            
            if index - lastindex > 1:
                if parent_conn.poll():
                    print("parent_conn.poll() failed, readFrame stop.")
                    break
                else:
                    lastindex = index
                    with self.lock:
                        frame_buffer[:] = frame[:]
                    print(f"readframe: send frame, {index}, {datetime.datetime.now()}, to pipe{self.output_url}")
        
        print("process_readFrame end")
        if cap is not None:
            cap.release()

    def process_infer(self):
        device_id = get_gpu_id()
        existing_shm = shared_memory.SharedMemory(name=self.shm_name)
        child_conn = self.child_conn
        
        # 等待接收实际视频尺寸
        actual_shape = None
        while True:
            if child_conn.poll():
                data = child_conn.recv()
                if data == msg_error_openfail:
                    exit()
                elif isinstance(data, tuple) and data[0] == 'video_size':
                    _, width, height = data
                    actual_shape = (height, width, 3)
                    print(f"推理进程接收到实际视频尺寸: {width}x{height}")
                elif data == msg_startread:
                    break
        
        # 使用实际尺寸创建frame_buffer
        if actual_shape is None:
            print("警告：未接收到视频尺寸，使用默认shape")
            actual_shape = self.shape
        
        frame_buffer = np.ndarray(actual_shape, dtype=self.dtype, buffer=existing_shm.buf)
        print(f"推理进程frame_buffer shape: {frame_buffer.shape}")
        
        # RTMP模式：在子线程中启动ffmpeg
        if self.mode == 0:
            print("正在子线程中启动ffmpeg...")
            # 循环等待，直到ffmpeg成功启动或任务被停止
            last_log_time = time.time()
            attempt = 0
            while self.running:
                attempt += 1
                self.ffmpeg = self.start_ffmpeg(self.input_url, self.output_url)
                if self.ffmpeg is not None:
                    print("ffmpeg启动成功")
                    break
                # 减少日志输出频率，每20秒输出一次
                current_time = time.time()
                if current_time - last_log_time >= 20:
                    print(f"等待ffmpeg启动中...（已尝试{attempt}次）")
                    last_log_time = current_time
                time.sleep(2)
            
            # 如果因为self.running为False而退出循环，则返回
            if self.ffmpeg is None:
                print("任务被停止，ffmpeg未能启动")
                return
        
        ffmpeg_consecutive_failures = 0  # 记录ffmpeg连续写入失败次数
        max_ffmpeg_failures = 5  # 最大失败次数
        
        try:
            while self.running:
                with self.lock:  # 加锁，确保读取时不被写入
                    frame = frame_buffer.copy()  # 从共享内存中复制帧
                    # 检查frame是否为空或全零（已处理过的帧）
                    if frame is None or np.sum(frame) == 0:
                        continue  # 如果没有帧或是已处理过的帧，继续等待
                    # 复制完帧后立即清空共享内存，避免重复处理
                    frame_buffer[:] = 0
                
                # frame_rgb = frame_rgb.transpose(2, 0, 1)
                # img = np.expand_dims(frame_rgb, axis=0)

                pil_image, resultList = AImodel(frame, self.Yolo_model, device_id, self.rules, self.conf)
                img_array = np.array(pil_image)
                bgr_array = img_array[:, :, ::-1]
                bgr_img = Image.fromarray(bgr_array)
                if self.mode==0:
                    try:
                        self.ffmpeg.stdin.write(bgr_img.tobytes())
                        ffmpeg_consecutive_failures = 0  # 写入成功，重置失败计数
                    except Exception as e:
                        ffmpeg_consecutive_failures += 1
                        print(f"ffmpeg写入失败 ({ffmpeg_consecutive_failures}次): {e}")
                        
                        # 如果连续失败次数过多，尝试重启ffmpeg
                        if ffmpeg_consecutive_failures >= max_ffmpeg_failures:
                            print("ffmpeg连续失败次数过多，尝试重启ffmpeg...")
                            try:
                                if self.ffmpeg is not None:
                                    self.ffmpeg.stdin.close()
                                    self.ffmpeg.terminate()
                                    self.ffmpeg.wait(timeout=2)
                            except Exception as cleanup_e:
                                print(f"清理旧ffmpeg进程时出错: {cleanup_e}")
                                try:
                                    self.ffmpeg.kill()
                                except:
                                    pass
                            
                            # 循环等待，直到ffmpeg成功重启或任务被停止
                            print("正在重新启动ffmpeg...")
                            restart_last_log_time = time.time()
                            restart_attempt = 0
                            while self.running:
                                restart_attempt += 1
                                self.ffmpeg = self.start_ffmpeg(self.input_url, self.output_url)
                                if self.ffmpeg is not None:
                                    print("ffmpeg重启成功")
                                    ffmpeg_consecutive_failures = 0
                                    break
                                # 减少日志输出频率，每20秒输出一次
                                restart_current_time = time.time()
                                if restart_current_time - restart_last_log_time >= 20:
                                    print(f"等待ffmpeg重启中...（已尝试{restart_attempt}次）")
                                    restart_last_log_time = restart_current_time
                                time.sleep(2)
                else:
                    try:
                        byte_data = bytearray()
                        byte_data.append(0)  # 成功标志
                        
                        success, jpg_buf = cv2.imencode('.jpg', frame)
                        original_image_bytes = jpg_buf.tobytes()  
                        
                        image_length = len(original_image_bytes).to_bytes(4, 'little')
                        byte_data.extend(image_length)
                        byte_data.extend(original_image_bytes)
                        result_count = len(resultList).to_bytes(4, 'little')
                        byte_data.extend(result_count)
                        for result in resultList:
                            # 使用映射的索引值而不是class名称
                            class_index = self.class_mapping.get(result.class_, 0)
                            byte_data.extend(class_index.to_bytes(4, 'little'))
                            byte_data.extend(struct.pack('<f', result.score_))
                            byte_data.extend(int(result.x).to_bytes(4, 'little'))
                            byte_data.extend(int(result.y).to_bytes(4, 'little'))
                            byte_data.extend(int(result.w).to_bytes(4, 'little'))
                            byte_data.extend(int(result.h).to_bytes(4, 'little'))
                        response = requests.post(self.output_http_result_url, data=byte_data)
                        if response.status_code != 200:
                            print("Failed to send result")
                    except Exception as e:
                        # 发送错误信息到error_url
                        error_msg = f"Detection error: {str(e)}"
                        error_data = bytearray()
                        error_data.append(1)    #失败标志
                        error_length = len(error_msg).to_bytes(4, 'little')
                        error_data.extend(error_length)
                        error_data.extend(error_msg.encode('utf-8'))
                        try:
                            error_response = requests.post(self.output_http_error_url, data=error_data)
                            if error_response.status_code != 200:
                                print(f"Failed to send error: {error_response.status_code}")
                        except Exception as error_e:
                            print(f"Failed to send error to error_url: {error_e}")
                        print(f"Error during HTTP mode processing: {e}")
                    
        except Exception as e:
            print(f"An error occurred during processing: {e}")
            raise "Error, during process_infer_frame"
        try:
            child_conn.send(msg_nomoreframe)
        except Exception as e:
            print(f"parent_conn already closed: {e}")

    def get_rtmp_video_width_height(self, video_url):
        print("初始化视频流连接...")
        width = 1280
        height = 720
        flag = True
        
        while self.running:
            cap = cv2.VideoCapture(video_url)
            if cap.isOpened():
                width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                ret, frame = cap.read()  # 获取一帧以确定形状和类型
                if ret and frame is not None:
                    self.shape = frame.shape
                    self.dtype = frame.dtype
                    self.nbytes = frame.nbytes
                    cap.release()
                    print(f"成功连接视频流，分辨率: {width}x{height}")
                    return flag, width, height
                else:
                    print(f"视频流已打开但无法读取帧，等待重试...")
                    cap.release()
            else:
                print(f"无法打开视频流，等待重试...")
                cap.release()
            
            time.sleep(1)
        
        # 如果self.running变为False，返回False标志表示被中断
        print("视频流连接已被中断")
        flag = False
        self.shape = (height, width, 3)
        self.dtype = np.uint8
        self.nbytes = height * width * 3
        return flag, width, height

    def cleanup(self):
        self.running = False
        if self.shm_name is not None:
            try:
                existing_shm = shared_memory.SharedMemory(name=self.shm_name)
                existing_shm.close()
                existing_shm.unlink()
            except Exception as e:
                print(f"shm into error: {e} , when cleanup")
            self.shm_name = None
        if self.p_inferFrame is not None:
            self.p_inferFrame.join()
        if self.p_readFrame is not None:
            self.p_readFrame.join()

        if self.ffmpeg is not None:
            try:
                self.ffmpeg.stdin.close()
                self.ffmpeg.terminate()
                self.ffmpeg.kill()
                self.ffmpeg.wait()
            finally:
                print("ffmpeg The end")
                self.ffmpeg = None

        if self.lock is not None:
            if self.lock.locked():
                self.lock.release()
            self.lock = None

        if self.parent_conn is not None:
            self.parent_conn.close()
            self.parent_conn = None

        if self.child_conn is not None:
            self.child_conn.close()
            self.child_conn = None

        if self.Yolo_model is not None:
            del self.Yolo_model
            self.Yolo_model = None

class DetectResult:
    def __init__(self, x, y, w, h, score_, class_):
        self.x = x
        self.y = y
        self.w = w
        self.h = h
        self.score_ = score_
        self.class_ = class_

    def to_dict(self):
        return {
            "x": self.x,
            "y": self.y,
            "w": self.w,
            "h": self.h,
            "score": self.score_,
            "class": self.class_
        }

def get_gpu_id():
    nvmlInit()
    device_count = nvmlDeviceGetCount()
    print(f"Found {device_count} GPUs")

    max_free_memory = 0
    selected_gpu_index = -1

    for i in range(device_count):
        handle = nvmlDeviceGetHandleByIndex(i)
        gpu_name = nvmlDeviceGetName(handle)
        memory_info = nvmlDeviceGetMemoryInfo(handle)
        free_memory = memory_info.free / (1024 ** 2)  # 转换为 MB
        print(f"GPU {i}: {gpu_name.decode('utf-8')}, Free Memory: {free_memory:.2f} MB")
        if free_memory > max_free_memory:
            max_free_memory = free_memory
            selected_gpu_index = i
    nvmlShutdown()
    return selected_gpu_index

def get_model(config, model_name):
    base_dir = config[model_name]['base_dir'].strip("'")
    model_pt = config[model_name]['model_pt'].strip("'")
    return YOLO(os.path.join(base_dir, model_pt))

def parse_rules(config, model_name, rule_name_classes):
    rules = []
    rule_name_set = set()
    conf = float(config[model_name]['conf'].strip("'"))
    for key in config[model_name]:
        if key.startswith("rule_"):
            rule_name = key.split("_")[1]  # 提取规则名称 (如 car, person 等)
            rule_name_set.add(rule_name)
    
    # 检查客户端传入的类别是否在配置文件中有对应的规则
    input_classes_set = set(rule_name_classes)
    unmatched_classes = input_classes_set - rule_name_set
    
    if unmatched_classes:
        print(f"警告: 以下类别在配置文件中没有对应的规则定义: {', '.join(unmatched_classes)}")
        print(f"可用的类别为: {', '.join(rule_name_set)}")
    
    for rule_name in rule_name_set:
        print(rule_name)
        if rule_name in rule_name_classes:
            class_ids = list(map(float, config[model_name][f"rule_{rule_name}_class_ids"].split(',')))
            confidence_threshold = float(config[model_name][f"rule_{rule_name}_confidence_threshold"])
            color = config[model_name][f"rule_{rule_name}_color"]
            label = config[model_name][f"rule_{rule_name}_label"]
            rules.append({
                'class_ids': class_ids,
                'confidence_threshold': confidence_threshold,
                'color': color,
                'label': label,
                'class': rule_name
            })
    return rules, conf

def AImodel(frame, model, device_id, rules, conf):
    start_time=time.time()
    if frame is None:
        raise ValueError("AI Input image is None.")
    height, width, _ = frame.shape

    results = model.predict(frame, conf=conf, imgsz=min(height, width), device=device_id)
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    pil_image = Image.fromarray(frame_rgb)
    draw = ImageDraw.Draw(pil_image)
    resultList=[]
    for i in range(results[0].boxes.xywh.shape[0]):
        x, y, w, h = results[0].boxes.xywh[i]
        class_name, confidence = results[0].boxes.cls[i], results[0].boxes.conf[i]
        rectangle = [int(x.item() - w.item() / 2), int(y.item() - h.item() / 2), int(x.item() + w.item() / 2),
                     int(y.item() + h.item() / 2)]
        for rule in rules:
            if class_name.item() in rule['class_ids'] and confidence > rule['confidence_threshold']:
                print(f"绘制矩形框和标签: {rule['label']}, {class_name.item()}")
                # 绘制矩形框和标签
                draw.rectangle(rectangle, outline=rule['color'], width=2)
                draw.text(
                    (int(x.item() - w.item() / 2 - 25), int(y.item() - h.item() / 2) - 25),
                    f'{rule["label"]}: {confidence:.2f}',
                    fill=rule['color'],
                    font=font
                )
                # 创建检测结果对象并添加到列表
                resultbox = DetectResult(
                    x.item(), y.item(), w.item(), h.item(), confidence.item(), rule['class']
                )
                resultList.append(resultbox)

    print("AI_model_time:", time.time()-start_time)
    return pil_image, resultList


@app.route('/', methods=['POST'])
def home_page():
    return 'real time video detection'

@app.route('/rtmp_detection', methods=['POST'])
def process_rtmp_detection():
    data = request.get_json()
    input_rtmp_url = data.get('input')
    output_rtmp_url = data.get('output')
    input_classes = data.get('classes')
    if output_rtmp_url in ReadAndInfer_list:
        if ReadAndInfer_list[output_rtmp_url].running:
            print(output_rtmp_url, " already running, stopping it and restarting, please wait.")
            # 完美结束正在运行的任务
            ReadAndInfer_list[output_rtmp_url].running = False
            time.sleep(1)
            ReadAndInfer_list[output_rtmp_url].cleanup()
            del(ReadAndInfer_list[output_rtmp_url])
            gc.collect()
            print('- - - 旧任务已完美结束！')
        else:
            del(ReadAndInfer_list[output_rtmp_url])
    if input_classes == None:
        classes = classes_list
    elif isinstance(input_classes, str):
        # 如果是字符串，按逗号分隔成列表
        classes = [c.strip() for c in input_classes.split(',')]
    else:
        classes = input_classes
    processor = ReadAndInfer(input_rtmp_url, output_rtmp_url, "model1", 0)
    # 先添加到列表，这样stop请求就能找到并设置running=False
    ReadAndInfer_list[output_rtmp_url] = processor
    try:
        processor.start(classes)
    except Exception as e:
        processor.cleanup()
        del(ReadAndInfer_list[output_rtmp_url])
        del(processor)
        return jsonify({
            'code': -2,
            'message': "Error when start"
        }), 500
    return jsonify({
        'code': 0,
        'message': "Success!"
    }), 200

@app.route('/realtimevideo_start', methods=['POST'])
def process_realtimevideo_start():
    data = request.get_json()
    input_rtmp_url = data.get('input')
    output_rtmp_url = data.get('output')
    input_classes = data.get('classes')
    if input_classes==None:
        classes = classes_list
    elif isinstance(input_classes, str):
        # 如果是字符串，按逗号分隔成列表
        classes = [c.strip() for c in input_classes.split(',')]
    else:
        classes = input_classes
    if output_rtmp_url in ReadAndInfer_list:
        if ReadAndInfer_list[output_rtmp_url].running:
            print(output_rtmp_url, " already running, stopping it and restarting, please wait.")
            # 完美结束正在运行的任务
            ReadAndInfer_list[output_rtmp_url].running = False
            time.sleep(1)
            ReadAndInfer_list[output_rtmp_url].cleanup()
            del(ReadAndInfer_list[output_rtmp_url])
            gc.collect()
            print('- - - 旧任务已完美结束！')
        else:
            del(ReadAndInfer_list[output_rtmp_url])
    processor = ReadAndInfer(input_rtmp_url, output_rtmp_url, "model1", 0)
    # 先添加到列表，这样stop请求就能找到并设置running=False
    ReadAndInfer_list[output_rtmp_url] = processor
    try:
        processor.start(classes)
    except Exception as e:
        processor.cleanup()
        del(ReadAndInfer_list[output_rtmp_url])
        del(processor)
        return jsonify({
            'code': -2,
            'message': "Error when start"
        }), 500
    return jsonify({
        'code': 0,
        'message': "Success!"
    }), 200

@app.route('/realtimevideo_stop', methods=['POST'])
def stop_realtimevideo():
    data = request.get_json()
    output_rtmp_url = data.get('output')
    if output_rtmp_url in ReadAndInfer_list:
        ReadAndInfer_list[output_rtmp_url].running=False
        time.sleep(1)
        ReadAndInfer_list[output_rtmp_url].cleanup()
        del(ReadAndInfer_list[output_rtmp_url])
        gc.collect()
        print('- - - 所有进程都结束！')
        return jsonify({'code': 0})
    else:
        return jsonify({
            'code': 0,
            'message': "Already closed"
        }), 200
    

#tcp模式
@app.route('/realtimevideo_direct_start', methods=['POST'])
def process_realtimevideo_direct_start():
    data = request.get_json()
    input_http_url = data.get('input')
    output_ip = data.get('ip')
    output_port = data.get('port')
    output_http_url = 'http://'+str(output_ip)+':'+str(output_port)
    input_classes = data.get('classes')
    if input_classes==None:
        classes = classes_list
    elif isinstance(input_classes, str):
        # 如果是字符串，按逗号分隔成列表
        classes = [c.strip() for c in input_classes.split(',')]
    else:
        classes = input_classes
    if output_http_url in tcp_list:
        if tcp_list[output_http_url].running:
            print(output_http_url, " already running, stopping it and restarting, please wait.")
            # 完美结束正在运行的任务
            tcp_list[output_http_url].running = False
            time.sleep(1)
            tcp_list[output_http_url].cleanup()
            del(tcp_list[output_http_url])
            gc.collect()
            print('- - - 旧任务已完美结束！')
        else:
            del(tcp_list[output_http_url])
    processor = ReadAndInfer(input_http_url, output_http_url, "model1", 1)  #mode1表示tcp模式，传图像
    # 先添加到列表，这样stop请求就能找到并设置running=False
    tcp_list[output_http_url] = processor
    
    #try:
    #    processor.start(classes)
    #except Exception as e:
    #    processor.cleanup()
    #    del(processor)
    #    return jsonify({
    #        'code': -1,
    #        'message': "Cannot get images from input. "
    #    }), 500
    processor.start(classes)
    return jsonify({
        'code': 0,
        'message': "Success!"
    }), 200

@app.route('/realtimevideo_direct_stop', methods=['POST'])
def stop_realtimevideo_direct():
    data = request.get_json()
    output_ip = data.get('ip')
    output_port = data.get('port')
    output_http_url = 'http://'+str(output_ip)+':'+str(output_port)
    if output_http_url in tcp_list:
        tcp_list[output_http_url].running=False
        time.sleep(1)
        tcp_list[output_http_url].cleanup()
        del(tcp_list[output_http_url])
        gc.collect()
        print('- - - 所有进程都结束！')
        return jsonify({'code': 0})
    else:
        return jsonify({
            'code': 0,
            'message': "Already closed"
        }), 200

@app.route('/img_detection', methods=['POST'])
def process_img_detection():
    global imageReadAndInfer
    data = request.get_json()
    imageBase64 = data.get('data')
    img_data = base64.b64decode(imageBase64)
    input_classes = data.get('classes')
    if input_classes==None:
        classes = classes_list
    elif isinstance(input_classes, str):
        # 如果是字符串，按逗号分隔成列表
        classes = [c.strip() for c in input_classes.split(',')]
    else:
        classes = input_classes
    return jsonify(imageReadAndInfer.img_infer(img_data, classes))

@app.route('/get_realtimevideo_list', methods=['GET'])
def give_realtimevideo_running_info():
    keys_list = list(ReadAndInfer_list.keys())
    output_url_list = {"output_url": keys_list}
    return jsonify(output_url_list)

@app.route('/get_gpu_memory', methods=['GET'])
def give_gpu_info():
    gpulist=[]
    nvmlInit()
    device_count = nvmlDeviceGetCount()
    max_free_memory = 0
    selected_gpu_index = -1
    for i in range(device_count):
        handle = nvmlDeviceGetHandleByIndex(i)
        gpu_name = nvmlDeviceGetName(handle)
        memory_info = nvmlDeviceGetMemoryInfo(handle)
        free_memory = memory_info.free / (1024 ** 2)  # 转换为 MB
        gpulist.add(f"GPU {i}: {gpu_name.decode('utf-8')}, Free Memory: {free_memory:.2f} MB")
    nvmlShutdown()
    output_gpu_list = {"gpus": gpulist}
    return jsonify(output_gpu_list)

@app.route('/get_classes_list', methods=['GET'])
def give_classes_info():
    output_classes_list = {"classes": classes_list}
    return jsonify(output_classes_list)
    
#@app.route('/img_detection', methods=['POST'])
#def process_img_detection():
#    if 'file' not in request.files:
#        print(request)
#        print(request.files)
#        return "No file", 400
#    file = request.files['file']
#    print(file)
#    if file.filename == '':
#        return "No selected file", 400
#    if file:
#        img_bytes = file.read()
#        img = Image.open(io.BytesIO(img_bytes))
#        img_array = np.array(img)
#        img_np = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
#        output_img = AImodel(img_np, 0)
#        filename = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg"
#        filepath = os.path.join(upload_dir, filename)
#        output_img.save(filepath, format='JPEG')
#        img_io = io.BytesIO()
#        output_img.save(img_io, 'JPEG')
#        img_io.seek(0)  # 移动指针到起始位置
#        return send_file(img_io, mimetype='image/jpeg')

# 访问文件夹内的图像文件

# @app.route('/get_image/<filename>', methods=['GET'])
# def get_image(filename):
#     return send_from_directory(upload_dir, filename)

if __name__ == '__main__':
    # imageReadAndInfer = ReadAndInfer(None, None, "model1",1)
    # imageReadAndInfer.start_img_yolo()
    app.run(host='0.0.0.0', port=8000)
