import base64

import numpy as np
import time
from flask import Flask, Response, render_template, jsonify,request
import cv2
from ultralytics import YOLO
from PIL import Image
import io
import torch
from flask_cors import CORS


app = Flask(__name__)
CORS(app)  # 允许所有源进行跨域请求

# 检查是否有可用的GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

detected_objects = {}
detected_objects_a = {}
fps = 0.00
img_str = ''

def gen_frames():
    model_path = r"C:\Users\64531\Desktop\yolov11\weights\best.pt"
    model = YOLO(model_path)
    model.to(device)  # 将模型移动到GPU

    cap = cv2.VideoCapture(0)

    if not cap.isOpened():
        print("Error: Could not open video.")
        exit()

    frame_count = 0
    start_time = time.time()

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        # 将帧转换为Tensor并移动到GPU
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = torch.from_numpy(frame).permute(2, 0, 1).float().div(255.0).unsqueeze(0).to(device)

        # 记录模型预测开始时间
        start_pred_time = time.time()
        results = model(frame)
        end_pred_time = time.time()

        # 计算模型预测时间
        pred_time = end_pred_time - start_pred_time
        print(f"Prediction time: {pred_time:.4f} seconds")

        detections = results[0].boxes.data.cpu().numpy()  # 获取检测框数据并移动到CPU
        class_names = model.names  # 获取类别名称
        global detected_objects
        detected_objects = {}
        for detection in detections:
            class_id = int(detection[5])  # 类别ID
            confidence = detection[4]  # 置信度
            class_name = class_names[class_id]  # 类别名称

            if class_name in detected_objects:
                detected_objects[class_name] += 1
            else:
                detected_objects[class_name] = 1

        print("------------Detected objects:", detected_objects)

        annotated_frame = results[0].plot()  # 在当前帧上绘制检测结果

        # Convert the frame to JPEG format
        ret, buffer = cv2.imencode('.jpg', annotated_frame)
        frame = buffer.tobytes()

        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

        frame_count += 1
        end_time = time.time()

        global fps
        # 计算FPS
        if (end_time - start_time) > 1:
            fps = frame_count / (end_time - start_time)
            print(f"FPS: {fps:.2f}")
            fps = "{:.2f}".format(frame_count / (end_time - start_time))
            frame_count = 0
            start_time = end_time

    cap.release()


def picture_process(image_file):
    model_path = r"C:\Users\64531\Desktop\yolov11\weights\best.pt"
    model = YOLO(model_path)

    # 将 BytesIO 对象转换为 PIL 图像对象
    image = Image.open(image_file)

    # 如果图片模式不是 RGB，则转换为 RGB
    if image.mode != 'RGB':
        image = image.convert('RGB')

    # 将 PIL 图像对象转换为 NumPy 数组
    frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)

    # 调整图像尺寸以满足模型要求
    target_size = (800, 608)  # 目标尺寸为 800x608
    original_height, original_width, _ = frame.shape

    # 计算需要填充的高度
    padding_height = target_size[1] - original_height

    # 在图像底部添加白色边框
    frame_resized = cv2.copyMakeBorder(frame, 0, padding_height, 0, 0, cv2.BORDER_CONSTANT, value=[255, 255, 255])

    # 将帧转换为 Tensor 并移动到 GPU
    frame = torch.from_numpy(frame_resized).permute(2, 0, 1).float().div(255.0).unsqueeze(0).to(device)



    results = model(frame, stream=True)



    try:
        result = next(results)

        detections = result.boxes.data.cpu().numpy()  # 获取检测框数据并移动到CPU
        class_names = model.names  # 获取类别名称
        global detected_objects_a
        detected_objects_a = {}
        for detection in detections:
            class_id = int(detection[5])  # 类别ID
            confidence = detection[4]  # 置信度
            class_name = class_names[class_id]  # 类别名称

            if class_name in detected_objects_a:
                detected_objects_a[class_name] += 1
            else:
                detected_objects_a[class_name] = 1

        print("------------Detected objects:", detected_objects_a)

        annotated_frame = result.plot()  # 使用 YOLO 的 plot 方法绘制预测结果
    except StopIteration:
        print("Generator did not yield any results.")

    # annotated_frame = results[0].plot()  # 使用 YOLO 的 plot 方法绘制预测结果
    _, buffer = cv2.imencode('.jpg', annotated_frame)
    # 从 BytesIO 对象中提取字节流
    frame_bytes = io.BytesIO(buffer.tobytes()).getvalue()

    # 将字节流编码为 Base64 编码字符串
    global img_str
    img_str = base64.b64encode(frame_bytes).decode('utf-8')
    print(img_str)
    return img_str


    yield (b'--frame\r\n'
           b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')





@app.route('/')
def index():
    return render_template('index.html')

@app.route('/picture_feed', methods=['POST'])
def picture_feed():
    if 'file' not in request.files:
        return jsonify({'error': 'No file part'}), 400

    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400

    try:
        # 打开文件
        image = Image.open(file.stream)
        # 如果图片模式不是 RGB，则转换为 RGB
        if image.mode != 'RGB':
            image = image.convert('RGB')
        # 创建一个内存中的字节流
        buffer = io.BytesIO()
        # 将图片保存为 JPG 格式到字节流中
        image.save(buffer, 'JPEG')
        # 将字节流指针重置到开头
        buffer.seek(0)
        return Response(picture_process(buffer), mimetype='multipart/x-mixed-replace; boundary=frame')

    except Exception as e:
        return jsonify({'error': f'Error processing image: {str(e)}'}), 500


@app.route('/video_feed')
def video_feed():
    return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')

@app.route('/get_detected_objects', methods=['GET'])
def get_detected_objects():
   return jsonify(detected_objects)

@app.route('/get_detected_objects_a', methods=['GET'])
def get_detected_objects_a():
   return jsonify(detected_objects_a)

@app.route('/get_detected_imgStr', methods=['GET'])
def get_detected_imgStr():
   return jsonify(img_str)

@app.route('/get_detected_FPS', methods=['GET'])
def get_detected_FPS():
   return jsonify(fps)

@app.route('/video_feed_getLabel')
def video_feed_getLabel():
    return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')

@app.route('/MP4_feed')
def MP4_feed(aaa):
    return Response(mp4_feed(aaa), mimetype='multipart/x-mixed-replace; boundary=frame')

if __name__ == '__main__':
    app.run(host='0.0.0.0', debug=True)


def generate_12_digit_timestamp():
    # 获取当前时间戳（秒）
    current_time = time.time()
    # 转换为毫秒级时间戳
    milliseconds = int(current_time * 1000)
    # 格式化为12位字符串
    timestamp_12_digit = f"{milliseconds:012d}"
    return timestamp_12_digit