from collections import defaultdict
from flask import Flask, request, jsonify
import base64
import numpy as np
import torch
import requests
from PIL import Image  
import io
from models.common import DetectMultiBackend
from utils.general import (
    Profile,
    check_img_size,
    increment_path,
    non_max_suppression,
)
from utils.augmentations import (
    letterbox,
)
from utils.torch_utils import select_device
# Flask应用初始化
app = Flask(__name__)

class GenericClassifier:
    def __init__(self):
        """
        初始化分类模型。
        :param model_name: 模型名称，例如 'resnet18'
        :param device: 使用的设备，例如 'cpu' 或 'cuda'
        """
        self.device = self.try_gpu()
        self.status = "initializing"
        self.status_msg = "initializing..."
        try:
            self.models = [
                DetectMultiBackend('./kf-1-best.pt', device=self.device),
                DetectMultiBackend('./kf-2-best.pt', device=self.device),
                DetectMultiBackend('./kf-3-best.pt', device=self.device),
                DetectMultiBackend('./kf-4-best.pt', device=self.device),
                DetectMultiBackend('./kf-5-best.pt', device=self.device),
            ]
            self.status = "ready"
            self.status_msg = "normal"
        except Exception as e:
            self.status = "abnormal"
            self.status_msg = str(e)
    
    def get_model(self,
        weights="yolov5s.pt",  # model path or triton URL
        data="data/coco128.yaml",  # dataset.yaml path
        device="",  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        half=False,  # use FP16 half-precision inference
        dnn=False,  # use OpenCV DNN for ONNX inference
    ):
        # Load model
        device = select_device(device)
        model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
        stride, names, pt = model.stride, model.names, model.pt
        imgsz = check_img_size((640, 640), s=stride)
        model.warmup(imgsz=(1 if pt or model.triton else 1, 3, *imgsz)) 
        return model
    
    # 检查GPU的可用性
    def try_gpu(self,i=0):
        """如果存在 GPU，则返回第 i 个 GPU，否则返回 CPU"""
        if torch.cuda.device_count() > i:
            return torch.device(f'cuda:{i}')
        return torch.device('cpu')
    
    def load_image(self, image_data, image_type):
        """
        加载图像。
        :param image_data: 图像数据
        :param image_type: 图像类型，0为Base64，1为URL
        :return: OpenCV图像
        """
        if image_type == 0:  # Base64类型
            img_data = base64.b64decode(image_data)
            image = Image.open(io.BytesIO(img_data))
            image = image.convert("RGB")
            # 保存图片
            # image.save('temp.jpg')
        elif image_type == 1:  # URL类型
            img_response = requests.get(image_data, verify=False)
            # print('*'*100)
            # print(type(img_response.content))
            # print('*'*100)
            image = Image.open(io.BytesIO(img_response.content))
            image = image.convert("RGB")
            # image.save('temp.jpg')
        else:
            raise ValueError("Invalid image_type. Supported: 0 (Base64), 1 (URL).")

        if image is None:
            raise ValueError("Failed to load image.")
        return np.array(image)
        # return image

    def preprocess(self, image):
        """
        预处理图像。
        :param image: OpenCV图像
        :return: 预处理后的张量
        """
        return self.transform(image).unsqueeze(0).to(self.device)

    def predict(self, input_tensor):
        """
        推理方法。
        :param input_tensor: 输入张量
        :return: 分类结果
        """
        with torch.no_grad():
            outputs = self.model(input_tensor)
            probabilities = torch.nn.functional.softmax(outputs[0], dim=0)
            
            return probabilities.cpu().numpy()
    
    def process(self, image_data='', image_type='',source=''):
        """
        处理图像数据，返回分类结果。
        :param image_data: 图像数据
        :param image_type: 图像类型，0为Base64，1为URL
        :return: 分类结果
        """
        # source = 
        stride, names, pt = 32, {0: '条锈病', 1: '白粉病', 2: '赤霉病', 3: '根腐病', 4: '黑穗病', 5: '纹枯病'}, True
        imgsz = check_img_size((640, 640), s=stride)  # check image size
        # dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=1)

        # im0 = cv2.imread(source)  # BGR
        # print(type(im0))

        im0 = self.load_image(image_data, image_type) 

        # print(type(im))

        device = select_device("")
        seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
        tongji = defaultdict(int)
        for model in self.models:
            # print('+'*100)
            with dt[0]:
                # print(type(im),'--------------------------') 
                im = letterbox(im0, imgsz, stride=stride, auto=pt)[0]  # padded resize
                im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
                im = np.ascontiguousarray(im)  # contiguous array   
                im = torch.from_numpy(im).to(model.device)
                im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
                im /= 255  # 0 - 255 to 0.0 - 1.0
                if len(im.shape) == 3:
                    im = im[None]  # expand for batch dim
                if model.xml and im.shape[0] > 1:
                    ims = torch.chunk(im, im.shape[0], 0)
            # Inference
            with dt[1]:
                visualize = increment_path(Path(path).stem, mkdir=True) if False else False
                if model.xml and im.shape[0] > 1:
                    pred = None
                    for image in ims:
                        if pred is None:
                            pred = model(image, augment=False, visualize=visualize).unsqueeze(0)
                        else:
                            pred = torch.cat((pred, model(image, augment=False, visualize=visualize).unsqueeze(0)), dim=0)
                    pred = [pred, None]
                else:
                    pred = model(im, augment=False, visualize=visualize)
            # NMS
            with dt[2]:
                # pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
                pred = non_max_suppression(pred, 0.1, 0.45, None, False, max_det=1000)
            one_pred = []
            for det in pred:  # per image
                for line in det:
                    line = line.tolist()
                    line = (int(line[5]), line[4])
                    class_min_conf = {
                    0: 0.15,
                    1: 0.13,
                    2: 0.15,
                    3: 0.2,
                    4: 0.4,
                    5: 0.12,}
                    if line[1]>=class_min_conf[line[0]]:
                        one_pred.append(line) 
            if not one_pred:
                pre_cls = 0
            else:
                temp = defaultdict(int)
                for x in one_pred:
                    temp[x[0]] += x[1]
                pre_cls = sorted(temp.items(), key=lambda x:x[1], reverse=True)[0][0]+1
            # print(one_pred,pre_cls)   
            tongji[pre_cls]+=1
        pre_cls = sorted(tongji.items(), key=lambda x:x[1], reverse=True)[0][0]
        return pre_cls, 1.0


        # img = Image.open('../data/wheat_disease/白粉病/2.jpg')  # 打开图片
        # img = img.convert('RGB')  # 首先统一为RGB的形式，然后进行处理
        # img = transforms.Resize((256, 256))(img) # 统一大小
        # X = transforms.ToTensor()(img)
        # X = X.unsqueeze(0)
        # X = X.to(self.device)
        # y_hat=self.model(X)
        # probabilities = torch.softmax(y_hat, dim=1)
        # top_idx = torch.argmax(probabilities, dim=1).item()
        # return top_idx, probabilities[0][top_idx]

# 单例模式，确保分类器唯一
class ClassifierSingleton:
    _instance = None

    @staticmethod
    def get_instance():
        if ClassifierSingleton._instance is None:
            ClassifierSingleton._instance = GenericClassifier()
        return ClassifierSingleton._instance

# API路由：版本信息
@app.route('/api/algo/instance/version', methods=['GET'])
def query_version_info():
    """
    查询版本信息。
    """
    try:
        version_info = {
            "code": "0",
            "msg": "success",
            "data": {
				"version":"V0.2.0-20250220"
			}
        }
    except Exception as e:
        version_info = {
            "code": "1",
            "msg": f"Error reading version info: {str(e)}",
            "data": {}
        }

    return jsonify(version_info)

# API路由：健康检查    
@app.route('/api/algo/instance/healthy', methods=['GET'])
def get_health_status():
    """
    获取分类器的健康状态
    """
    classifier = ClassifierSingleton.get_instance()
    health_status = {
        "status": classifier.status,
        "msg": classifier.status_msg
    }
    return jsonify({"code": "0", "msg": "success", "data": health_status})

# API路由：推理
@app.route('/api/algo/task/picture/analysis', methods=['POST'])
def picture_analysis():
    """
    执行图片分析任务，具体返回报文根据文档调整。
    """
    data = request.get_json()
    image_type = data.get('pictureType')
    image_data = data.get('pictureData')

    if image_type not in [0, 1]:
        raise ValueError("Invalid imageType. Supported: 0 (Base64), 1 (URL).")
    if not image_data:
        raise ValueError("Missing image data.")

    result, prob = ClassifierSingleton.get_instance().process(image_data, image_type)

    return jsonify({
        "code": "0",
        "msg": "success",
        "data":{
            'expand':{
                "class": int(result),
                "prob": float(prob)
                }
        }
    })
    # try:
    #     data = request.get_json()
    #     image_type = data.get('pictureType')
    #     image_data = data.get('pictureData')

    #     if image_type not in [0, 1]:
    #         raise ValueError("Invalid imageType. Supported: 0 (Base64), 1 (URL).")
    #     if not image_data:
    #         raise ValueError("Missing image data.")

    #     result, prob = ClassifierSingleton.get_instance().process(image_data, image_type)

    #     return jsonify({
    #         "code": "0",
    #         "msg": "success",
    #         "data": {
    #             "class": int(result),
    #             "prob": float(prob)
    #         }
    #     })
    # except Exception as e:
    #     return jsonify({"code": "1", "msg": str(e)}), 400

# 主程序入口
if __name__ == "__main__":
    # print('start------------------------------------')
    app.run(host='0.0.0.0', port=80, debug=True)
    # classifier = ClassifierSingleton.get_instance()
    # result, prob = ClassifierSingleton.get_instance().process()
    # ClassifierSingleton.get_instance().process()
    # result, prob = ClassifierSingleton.get_instance().process(image_data='http://www.lifeiscool.cn/files/wheat_disease/%E7%99%BD%E7%B2%89%E7%97%85/2.jpg',image_type=1)
    # print(result, prob)
    # aaa = list()
    # for image in Path('../data/wheat_disease_detection/images/val').rglob('*'):
    #     # result, prob = ClassifierSingleton.get_instance().process
    #     classes = ['jiankang','tiaoxiubing', 'baifenbing', 'chimeibing', 'genfubing', 'heisuibing', 'wenkubing',]
    #     cls = classes.index(image.stem.split('-')[0])
    #     result, prob = ClassifierSingleton.get_instance().process(source=str(image))
    #     # result, prob = ClassifierSingleton.get_instance().process(source='http://www.lifeiscool.cn/files/wheat_disease/%E7%99%BD%E7%B2%89%E7%97%85/2.jpg')
    #     aaa.append(cls == result)
    #     print(cls,result)
    # print('{}/{}:{}'.format(sum(aaa),len(aaa),sum(aaa)/len(aaa)))

