import concurrent.futures
import json
import os
import sys
import time

import torch
from flask import Flask, render_template,request, jsonify, Response
from flask_compress import Compress
from flask_cors import CORS
from ultralytics import YOLO

from util import *

app = Flask(__name__)
CORS(app)
Compress(app)

base_path = os.path.dirname(os.path.abspath(__file__))
detect_model = YOLO(os.path.join(base_path, "models", "best.pt"))
#seg_model = YOLO(os.path.join(base_path, "models", "best-seg.pt"))
models_list = [{"model": detect_model, "name": "detect"}]
device = 'cuda' if torch.cuda.is_available() else 'cpu'

[model["model"].to(device) for model in models_list]

image_size_limit = 15   # 图片大小限制

@app.route('/')
@app.route('/index.html')
def index():  # put application's code here
    return render_template("index.html", my_title="yolo11-flask")

@app.route('/predict', methods=['POST'])
def predict():
    ip = str(request.remote_addr)
    print("IP:" + ip)
    try:
        if request.method == 'POST':
            data = request.get_json()
            image = data['image']
            # 计算图片大小
            image_size = sys.getsizeof(image) * 3 / 4 / 1048567
            if image_size > image_size_limit:
                return jsonify({"error": f"图片太大，请选择小于{image_size_limit}MB的图片"}), 413
            img_ndarray = base64_to_cv2(image)
            cv2.imwrite(f"images/{ip}_{int(time.time())}.jpg", img_ndarray)
            with concurrent.futures.ThreadPoolExecutor(max_workers=len(models_list)) as executor:
                start = time.time()
                # 创建并提交异步任务,每个线程对应一个模型
                futures = [executor.submit(thread_safe_predict, model_dict, img_ndarray, float(data['conf']) / 100) for
                           model_dict in models_list]
                # 获取异步任务的结果
                results = [future.result() for future in futures]
                print(f"总耗时:{time.time() - start}")
            def generate():
                yield '['  # JSON 数组开始
                for i, item in enumerate(results):
                    yield json.dumps(item)
                    if i < len(results) - 1:
                        yield ','  # 元素之间用逗号分隔
                yield ']'  # JSON 数组结束
            return Response(generate(), content_type='application/json')
        else:
            return jsonify({"error": "请求方法不支持"}), 412
    except Exception as e:
        print(e)
        return jsonify({"error": "服务器内部错误"}), 500

def thread_safe_predict(model_dict, image, conf):
    model = model_dict['model']
    name = model_dict['name']
    print(f"模型{name}开始进行推理")
    print(f"图片维度{image.shape}")
    # 图片压缩
    image = compress_image(image, 1280, 1280, quality=80)
    results = model.predict(image, conf=conf, imgsz=640)
    img = None
    for r in results:
        if name in ['segment', 'pose']:
            img = r.plot(boxes=False)
        else:
            img = r.plot()
        # 将BGR数组转换为RGB数组
        img = img[:, :, ::-1]
        # 转换回base64
        img = np_to_base64(img)
        print(f"模型{name}推理完成")
        
        if name == 'detect':
            # 统计不同成熟度数量
            immature = sum(1 for box in r.boxes if box.cls == 0)  # 假设0是未成熟
            semi_mature = sum(1 for box in r.boxes if box.cls == 2)  # 假设1是半成熟
            mature = sum(1 for box in r.boxes if box.cls == 1)  # 假设2是成熟
            return {
                "name": name,
                "image": img,
                "immature": int(immature),
                "semi_mature": int(semi_mature),
                "mature": int(mature)
            }
        else:
            return {"name": name, "image": img}

@app.route('/test',methods=['GET'])
def test():
    return 'ok'

if __name__ == '__main__':
    # 配置SSL证书
    # HTTPS的默认访问端口443。
    # 配置443端口和证书绝对路径。'/ssl/cert.pem'需替换为证书文件绝对路径。'/ssl/cert.key'需替换为证书私钥绝对路径。
    # context = (r'/root/ssl/strarry.top.pem', r'/root/ssl/strarry.top.key')
    app.run(host="0.0.0.0",port=8081,threaded=True)
