import os, stat
from ..model.infer import Lot, InferPanelImage, InferPanel, Failure, Material
from ..utils.log import logger
import json
from ..utils.database import InferSessionLocal
import os
import shutil
import time
from pathlib import Path
import pandas as pd
import numpy as np
from .. import async_msg
from ..config import lot_dir, export_dir, csv_dir
import csv
from sqlalchemy import and_
import math
import cv2
import glob
from ..utils.lot import panel_filter, failure_filter
from ..job.panel_job import PanelJob

root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


class LotJob(PanelJob):
    """
    手动推理
    """
    def __init__(self):
        super().__init__()


    def detect_lot(self, lot_path, material_code):
        lotname = os.path.basename(lot_path)

        msg = 'Lot %s 开始推理' % lotname
        async_msg(msg)

        super().detect_lot(lot_path, material_code)
    
        msg = 'Lot %s 结束推理' % lotname
        async_msg(msg)


def define_error(error_code):
    if (error_code is np.nan) or (error_code == "OK"): 
        return 1
    else:
        return 2
        

def update_lot_from_csv(lots, csv_files):
    t1 = time.time()

    # 保存已经处理的 panel 
    panel_done = {}
    # 已将发送过消息提示之后就不提示
    ws_done = []

    panel_updata = []

    try:
        # 查询对应的 lot 数据
        with InferSessionLocal() as session:
            lot_codes = []
            for lot_item in lots:
                lotcode = lot_item["lot_code"]
                lotname = lot_item["lotname"]
                end_time = lot_item["end_time"]
                
                lot_codes.append(lotcode)

            # 降序
            panel_images = session.query(InferPanel).filter(InferPanel.lot_code.in_(lot_codes)).order_by(InferPanel.start_time.desc())

            all_csv_data = []

            # 一次性读取所有 csv 文件内容,放到同一个变量中
            for csv_file in csv_files:
                suffix = Path(csv_file.filename).suffix[1:]

                # 判断是否是 zip 文件, 如果不是返回提示信息
                if suffix != "csv":
                    return {"msg": "请上传格式为 csv 文件", "code": 1}

                df = pd.read_csv(csv_file.file, encoding='gbk')

                all_csv_data.append(df)

            df = pd.concat(all_csv_data, ignore_index = False)

            t2 = time.time()
            logger.info(f"读取 csv 时间: {t2-t1}")

            
            # 按照日期排序, 降序
            df['开始日期和时间'] = pd.to_datetime(df['开始日期和时间'])
            df.sort_values(["开始日期和时间"], inplace=True, ascending=False)

            for _, row in df.iterrows():
                aoi_result = row["不良Code"]
                update_user = row["作业者ID"]
                update_time = row["开始日期和时间"]
                chip_id =row["Chip ID"]

                # 是否使用该数据,更新数据库
                if_use = False

                if chip_id not in panel_done:
                    # 按照 excel 的写入方式,一般前面的 panel 是最新的
                    panel_done[chip_id] = {"update_time": update_time, "update_user": update_user}
                    if_use = True
                else:
                    # 如果是已经处理的 panel,时间又是相同的,取作业 ID 不是 system 的
                    if panel_done[chip_id]["update_time"] == update_time:
                        if panel_done[chip_id]["update_user"] == "SYSTEM" and update_user != "SYSTEM":
                            # 使用该数据覆盖旧的数据
                            panel_done[chip_id] = {"update_time": update_time, "update_user": update_user}
                            if_use = True

                if if_use:
                    # 每次取的都是数据库中最新的那一条
                    all_panel_image = panel_images.filter(InferPanel.panel_id==chip_id).all()

                    panel_num = len(all_panel_image)
                    if panel_num > 1:
                        if chip_id not in ws_done:
                            ws_done.append(chip_id)
                            # 确保日志只发送一次
                            msg = '物料 %s 有重复,请人工再确认一遍' % chip_id
                            async_msg(msg, "csv", "error")

                    # 如果数据库中能找到相同的 panel, 更新
                    if panel_num>0:
                        panel_updata.append(chip_id)
                        panel_image = all_panel_image[0]
                        panel_image.judgment = define_error(aoi_result)

            t3 = time.time()

            msg = f"人工复检结果写入完成: {t3-t2}"
            async_msg(msg, "csv")

            logger.info(f"人工复检结果写入完成: {t3-t2}")
            session.commit()

            return True, panel_updata
    except Exception as e:
        msg = 'csv 文件内容不符合要求,请人工再确认一遍'
        async_msg(msg, "csv", "error")

        return False, None
    

# 导出没有 lotname 的图
def panel_export_task_no_lotname(panel_codes, export_type):
    # 每次导出之前清空文件夹
    if os.path.exists(export_dir):
        shutil.rmtree(export_dir)

    os.makedirs(export_dir)

    with InferSessionLocal() as session:

        for panel_code in panel_codes:
            panel = session.query(InferPanel).filter(InferPanel.panel_code == panel_code).first()

            # /sda2T/data/jd/lot/192.168.3.7_2025021817461553/20250218/175634_NJ4DC452JH2_NG/White(WHITE).bmp
            panel_file_path = panel.file_path
            # /sda2T/data/jd/lot/192.168.3.7_2025021817461553/20250218/175634_NJ4DC452JH2_NG
            panel_folder = os.path.dirname(panel_file_path)

            lot = session.query(Lot).filter(Lot.lot_code == panel.lot_code).first()

            failure_images = session.query(InferPanelImage).filter(InferPanelImage.panel_code == panel.panel_code)

            lot_dir_path = lot.dir_path
            lot_name = os.path.basename(lot_dir_path)

            # export_dir: /data/jd/export
            if "G163F_log" in lot_dir_path:
                panel_lot_dir = lot_dir_path
                export_path = lot_dir_path.replace(lot_dir, export_dir)
                export_panel_path = panel_folder.replace(os.path.dirname(panel_lot_dir), export_dir)

            else:
                # 导出流式推理结果
                # 判断流式目录
                if "/data/panel" in panel_file_path:
                    panel_lot_dir = os.path.dirname(os.path.dirname(panel_file_path))
                    panel_export_dir = panel_lot_dir
                    export_panel_path = panel_folder.replace(os.path.dirname(panel_lot_dir), export_dir)

                # 自动拷贝任务
                else:
                    # panel 所在的 lot 目录
                    # /mnt/md126/jd/lot/192.168.110.4_2025021416321702/20250221/165303_NJ4DC515TE2_NG/Black.bmp
                    # /mnt/md126/jd/lot/192.168.110.4_2025021416321702
                    panel_lot_dir = os.path.dirname(os.path.dirname(os.path.dirname(panel_file_path)))
                    # 192.168.110.4_2025021416321702
                    scp_job_name = os.path.basename(panel_lot_dir)

                    # /mnt/md126/jd/lot/20241230-71
                    panel_export_dir = panel_lot_dir.replace(scp_job_name, lot_name)

                    # panel_folder: /mnt/md126/jd/lot/192.168.110.4_2025021416321702/20250221/165303_NJ4DC515TE2_NG
                    # export_panel_folder: /mnt/md126/jd/lot/20241230/20250221/165303_NJ4DC515TE2_NG
                    export_panel_folder = panel_folder.replace(scp_job_name, lot_name)

                    # export_panel_path: /mnt/md126/jd/lot/20241230/20250221/165303_NJ4DC515TE2_NG
                    export_panel_path = export_panel_folder.replace(os.path.dirname(panel_export_dir), export_dir)
                    
                # /mnt/md126/jd/export/20241230-71
                export_path = panel_export_dir.replace(os.path.dirname(panel_export_dir), export_dir)

            # 导出小图
            # /export_path/lot_name/panel_id/failure_image_name
            # 导出大图
            # /export_path/lot_name/G163F_log
            # /export_path/lot_name/log

            # 导出小图
            if export_type in [0, 2]:
                
                export_img_path = os.path.join(export_path, panel.panel_id)

                os.makedirs(export_img_path, exist_ok=True)

                for failure_image in failure_images:
                    file_path = failure_image.file_path
                    if os.path.exists(file_path):
                        shutil.copy(file_path, export_img_path)

            # 导出大图
            if export_type in [1, 2]:
                
                # 导出上传的log, csv
                if lot_dir_path:
                    if not os.path.exists(os.path.join(export_path, "Log")):
                        shutil.copytree(os.path.join(lot_dir_path, "Log"), os.path.join(export_path, "Log"))

                    csv_fname = None
                    for fname in glob.glob(f"{lot_dir_path}/*.csv"):
                        csv_fname = fname
                    if csv_fname:
                        shutil.copy(csv_fname, export_path)
                # 导出自动拷贝的log
                else:
                    # /mnt/md126/jd/export/20241230-71
                    export_log_path = os.path.join(export_path, "log")

                    # /mnt/md126/jd/lot/192.168.110.4_2025021416321702/log
                    # /mnt/md126/jd/export/20241230-71/log
                    if os.path.exists(os.path.join(panel_lot_dir, "log")):
                        shutil.copytree(os.path.join(panel_lot_dir, "log"), export_log_path, dirs_exist_ok=True)
                   
                # 导出lot
                if os.path.exists(panel_folder):
                    shutil.copytree(panel_folder, export_panel_path, dirs_exist_ok=True)


def panel_export_task(panel_codes, export_type):
    # 每次导出之前清空文件夹
    if os.path.exists(export_dir):
        shutil.rmtree(export_dir)

    os.makedirs(export_dir)

    with InferSessionLocal() as session:

        for panel_code in panel_codes:
            panel = session.query(InferPanel).filter(InferPanel.panel_code == panel_code).first()

            # /sda2T/data/jd/lot/1740728155640/192.168.3.7_2025021817461553/20250218/175634_NJ4DC452JH2_NG/White(WHITE).bmp
            panel_file_path = panel.file_path

            # 如果是 ok 图片, 不导出
            if not panel_file_path:
                continue

            # /sda2T/data/jd/lot/1740728155640/192.168.3.7_2025021817461553/20250218/175634_NJ4DC452JH2_NG
            panel_folder = os.path.dirname(panel_file_path)

            lot = session.query(Lot).filter(Lot.lot_code == panel.lot_code).first()

            failure_images = session.query(InferPanelImage).filter(InferPanelImage.panel_code == panel.panel_code)

            lot_dir_path = lot.dir_path
            # 可能是重命名之后的
            # lot_name = lot.lot_name
            # lot 初始名
            lot_name = os.path.basename(lot.dir_path)

            # 如果是手动上传的 lot 目录
            if "G163F_log" in lot_dir_path:
                panel_lot_dir = lot_dir_path
                export_path = lot_dir_path.replace(lot_dir, export_dir)
                export_panel_path = panel_folder.replace(os.path.dirname(panel_lot_dir), export_dir)

            else:
                # 导出流式推理结果
                # 判断流式目录
                if "/data/panel" in panel_file_path:
                    panel_lot_dir = os.path.dirname(os.path.dirname(panel_file_path))
                    panel_export_dir = panel_lot_dir
                    export_panel_path = panel_folder.replace(os.path.dirname(panel_lot_dir), export_dir)

                # 自动拷贝任务
                else:
                    # panel 所在的 lot 目录
                    # /mnt/md126/jd/lot/lotname/192.168.110.4_2025021416321702/20250221/165303_NJ4DC515TE2_NG/Black.bmp
                    # /mnt/md126/jd/lot/lotname/192.168.110.4_2025021416321702
                    panel_lot_dir = os.path.dirname(os.path.dirname(os.path.dirname(panel_file_path)))

                    # 192.168.110.4_2025021416321702
                    scp_job_name = os.path.basename(panel_lot_dir)

                    # /mnt/md126/jd/lot/lotname
                    panel_export_dir = os.path.dirname(panel_lot_dir)

                    # panel_folder: /mnt/md126/jd/lot/lotname/192.168.110.4_2025021416321702/20250221/165303_NJ4DC515TE2_NG
                    # export_panel_folder: /mnt/md126/jd/lot/lotname/20250221/165303_NJ4DC515TE2_NG
                    export_panel_folder = panel_folder.replace(os.path.join(lot_name, scp_job_name), lot_name)

                    # export_panel_path: /mnt/md126/jd/export/lotname/20250221/165303_NJ4DC515TE2_NG
                    export_panel_path = export_panel_folder.replace(os.path.dirname(panel_export_dir), export_dir)
                    
                # /mnt/md126/jd/export/lotname
                export_path = panel_export_dir.replace(os.path.dirname(panel_export_dir), export_dir)

            # 导出小图
            # /export_path/lot_name/panel_id/failure_image_name
            # 导出大图
            # /export_path/lot_name/G163F_log
            # /export_path/lot_name/log

            # 导出小图
            if export_type in [0, 2]:
                
                export_img_path = os.path.join(export_path, panel.panel_id)

                os.makedirs(export_img_path, exist_ok=True)

                for failure_image in failure_images:
                    file_path = failure_image.file_path
                    if os.path.exists(file_path):
                        shutil.copy(file_path, export_img_path)

            # 导出大图
            if export_type in [1, 2]:
                # 导出lot
                if os.path.exists(panel_folder):
                    shutil.copytree(panel_folder, export_panel_path, dirs_exist_ok=True)
                
                # 导出上传的log, csv
                if "G163F_log" in lot_dir_path:
                    if not os.path.exists(os.path.join(export_path, "Log")):
                        shutil.copytree(os.path.join(lot_dir_path, "Log"), os.path.join(export_path, "Log"))

                    csv_fname = None
                    for fname in glob.glob(f"{lot_dir_path}/*.csv"):
                        csv_fname = fname
                    if csv_fname:
                        shutil.copy(csv_fname, export_path)
                # 导出自动拷贝的log
                else:

                    # /mnt/md126/jd/export/20241230-71/log
                    des_log_path = os.path.join(export_path, "log")
                    src_log_path = os.path.join(panel_lot_dir, "log")
                    
                    # 如果是未完成的 lot, 或运行出错的 lot
                    # /mnt/md126/jd/lot/192.168.110.4_2025021416321702/log
                    if os.path.exists(src_log_path):

                        for date_str in os.listdir(src_log_path):
                            # /mnt/md126/jd/lot/192.168.110.4_2025021416321702/log/20250306
                            src_date_log_path = os.path.join(src_log_path, date_str)
                            # /mnt/md126/jd/export/20241230-71/log/20250306
                            des_date_log_path = os.path.join(des_log_path, date_str)

                            if os.path.exists(src_log_path):
                                if not os.path.exists(des_date_log_path):
                                    shutil.copytree(src_log_path, des_log_path, dirs_exist_ok=True)
                                else:
                                    # 只拷贝最新时间的目录
                                    if os.path.getmtime(src_date_log_path) > os.path.getmtime(des_date_log_path):
                                        shutil.copytree(src_log_path, des_log_path, dirs_exist_ok=True)


def export_csv_task(lot_names, lot_code, panel_ids, failure_ids, judgments, pjudgment, end_time, start_time, ai_result, aoi_start_time, aoi_end_time, aoi_result):
    with InferSessionLocal() as session:
        panel_images = panel_filter(session, lot_names=lot_names, lot_code=lot_code, panel_ids=panel_ids, failure_ids=failure_ids, judgments=judgments, pjudgment=pjudgment, end_time=end_time, start_time=start_time, ai_result=ai_result, aoi_start_time=aoi_start_time, aoi_end_time=aoi_end_time, aoi_result=aoi_result)

        predict_data = []
        for panel_image in panel_images.order_by(InferPanel.start_time.desc()).all():
            lot = session.query(Lot).filter(Lot.lot_code == panel_image.lot_code).first()

            # # panel 上未去重的缺陷
            # failure_images = session.query(FailureImage).filter(FailureImage.panel_code == panel_image.panel_code)

            if panel_image.detection_info:
                # panel 上去重的缺陷
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []
            else:
                detection_info = ""
            
            item = {}
            item["PanelID"] = panel_image.panel_id
            item["Lot名称"] = lot.lot_name
            item["机种"] = "G163A"
            ai_result = panel_image.ai_result
            aoi_result = panel_image.aoi_result
            judgment = panel_image.judgment

            if ai_result == 1:
                item["AI判定结果"] = "OK"
            elif ai_result == 2:
                item["AI判定结果"] = "NG"
            else:
                item["AI判定结果"] = "Gray"

            if aoi_result == 1:
                item["AOI判定结果"] = "OK"
            elif aoi_result == 2:
                item["AOI判定结果"] = "NG"

            if judgment == 1:
                item["人工复判结果"] = "OK"
            elif judgment == 2:
                item["人工复判结果"] = "NG"
            else:
                item["人工复判结果"] = "未复判"

            if ai_result == judgment:
                item["综合判定结果"] = "判定一致"
                
            else:
                if judgment == 0:
                    item["综合判定结果"] = "未复判"
                elif judgment == 2:
                    if ai_result == 1:
                        item["综合判定结果"] = "判定不一致"
                    elif ai_result == 3:
                        item["综合判定结果"] = "判定一致"
                else:
                    item["综合判定结果"] = "判定不一致"
                    

            item["开始时间"] = lot.start_time.strftime('%Y-%m-%d %H:%M:%S')
            item["结束时间"] = lot.end_time.strftime('%Y-%m-%d %H:%M:%S') if lot.end_time else ""

            failures = session.query(Failure).filter(and_(Failure.del_flag=="0", Failure.failure_id!=0))

            item["总数"] = 0

            for failure in failures:
                failure_name = failure.failure_name
                failure_type = failure.failure_type
                failure_id = failure.failure_id

                # item[failure_name] = 0
                item[failure_type] = 0

                for detect_item in detection_info:
                    if detect_item["failure_id"] == failure_id:
                        item[failure_type] += 1

                item["总数"] += item[failure_type]

            predict_data.append(item)

        if predict_data:
            header = predict_data[0].keys() 

            timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
            export_csv_path = os.path.join(csv_dir, f"{timestamp}.csv")
            with open(export_csv_path, 'w', newline='',encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=header)  # 提前预览列名,当下面代码写入数据时,会将其一一对应。
                writer.writeheader()  # 写入列名
                writer.writerows(predict_data)  # 写入数据
            os.chmod(export_csv_path, stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)


def failure_image_task(page_index, page_size, lot_name, lot_code, panel_id, panel_code, failure_ids, judgments, end_time, start_time):
    offset_data = page_size * (page_index-1)

    with InferSessionLocal() as session:
        failure_images = failure_filter(session, lot_name=lot_name, lot_code=lot_code, panel_id=panel_id, panel_code=panel_code, failure_ids=failure_ids, judgments=judgments, end_time=end_time, start_time=start_time)

        failure_image_paginate = failure_images.offset(offset_data).limit(page_size).all()

        all_failure_images = []
        all_panel_images = []
        all_panel_codes = []
        for image in failure_image_paginate:
            item = {}
            panel = session.query(InferPanel).filter_by(panel_code=image.panel_code).first()
            item["panel_id"] = panel.panel_id
            item["file_name"] = image.image_name
            item["file_path"] = image.thumb_file_path
            item["detection_info"] = json.loads(image.detection_info) if image.detection_info else ""
            item["image_code"] = image.panel_image_code

            lot = session.query(Lot).filter(Lot.lot_code == image.lot_code).first()

            item["lot_name"] = lot.lot_name if lot else ""

            all_failure_images.append(item)

            if image.panel_code not in all_panel_codes:
                all_panel_codes.append(image.panel_code)

                all_panel_images.append(item)

        total_page = math.ceil(len(failure_images.all()) / page_size)
        total_num = len(failure_images.all())

    res = {"all_failure_images": all_failure_images, "all_panel_images": all_panel_images, "total_page": total_page, "total_num": total_num}

    return res


def panel_pareto_task(start_time, end_time, material_names):
    with InferSessionLocal() as session:
        panel_images = session.query(InferPanel)

        panel_images = panel_filter(session, start_time_contain=start_time, end_time_contain=end_time, material_names=material_names)
    
        predict_data = []
        for panel_image in panel_images.all():
            detection_info = panel_image.detection_info

            if detection_info:
                 # panel 上去重的缺陷
                detection_info = json.loads(detection_info)

            else:
                continue

            item = {}

            failures = session.query(Failure).all()

            item["failure_num"] = 0

            for failure in failures:
                failure_name = failure.failure_name
                failure_id = failure.failure_id

                item[failure_name] = 0

                for detect_item in detection_info:
                    if detect_item["failure_id"] == failure_id:
                        item[failure_name] += 1

                item["failure_num"] += item[failure_name]

            predict_data.append(item)

        return predict_data


def panel_box_heatmap_task(material_name, failure_ids, start_time, end_time):
    scale = 1
    t1 = time.time()
    with InferSessionLocal() as session:
        material = session.query(Material).filter(Material.material_name==material_name).first()
        material_code = material.material_code

        background_image = material.background_image

        # 读取图像文件
        cv_image = cv2.imread(background_image)
        cv_shape = cv_image.shape

        width = int(cv_shape[1]*scale)
        height = int(cv_shape[0]*scale)

        # 缩放图像
        # resized_image = cv2.resize(cv_image, (width, height))

        lots = session.query(Lot.lot_code).filter(Lot.material_code.like("%" + material_code + "%"))
            
        if start_time:
            lots = lots.filter(Lot.start_time >= start_time).filter(Lot.end_time <= end_time, Lot.end_time.isnot(None))

        panel_images = session.query(InferPanel)

        lot_codes = [row[0] for row in lots.all()]
        panel_images = panel_images.filter(InferPanel.lot_code.in_(lot_codes)).order_by(InferPanel.start_time.desc()).all()

        heatmap = np.zeros((width, height))
        all_box = []

        if failure_ids:
            for panel_image in panel_images:
                panel_code = panel_image.panel_code
                failure_image_ids = session.query(InferPanelImage.failure_id).filter_by(panel_code=panel_code).all()
                failure_image_ids = [failure_image_id[0] for failure_image_id in failure_image_ids]
                failure_images = list(set(failure_image_ids).intersection(failure_ids))
               
                
                if failure_images:
                    detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                    for box_item in detection_info:
                        failure_id = box_item["failure_id"]
                        # 过滤条件有错误类型的时候,只显示指定错误类型的错误
                        if failure_id in failure_ids:
                            box = box_item["box"]
                            x0 = int(box[0]*scale) - 1 
                            y0 = int(box[1]*scale) - 1
                            x1 = int(box[2]*scale) - 1
                            y1 = int(box[3]*scale) - 1

                            heatmap[x0:x1, y0:y1] += 1

                            all_box.append(box)
                    
        else:
            for panel_image in panel_images:
                item = {}
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                for box_item in detection_info:
                    box = box_item["box"]
                    x0 = int(box[0]*scale) - 1 
                    y0 = int(box[1]*scale) - 1
                    x1 = int(box[2]*scale) - 1
                    y1 = int(box[3]*scale) - 1
                    heatmap[x0:x1, y0:y1] += 1

                    all_box.append(box)

        max_value = np.max(heatmap)


        # 只返回又错误的像素点坐标
        indices = np.where(heatmap > 0) 

        # all_heatmap = [[i+1, j+1, int(heatmap[i, j])] for i, j in zip(indices[0], indices[1])]   

        # with open("heatmap.txt", "w") as f:
        #     f.write(str(all_heatmap))

        t2 = time.time()

        print(f"热力图总耗时：{t2-t1}")

        return {"max_value": max_value, "all_box": all_box}


# 按照指定比例拆分原图为多个子图,获取子图的堆叠最大值
def panel_pixel_heatmap_task(material_name, failure_ids, start_time, end_time): 

    scale = 15
    t1 = time.time()
    with InferSessionLocal() as session:

        material = session.query(Material).filter(Material.material_name==material_name).first()
        material_code = material.material_code

        background_image = material.background_image

        # 读取图像文件
        cv_image = cv2.imread(background_image)
        height, width, _ = cv_image.shape

        width_num = int(width/scale)
        height_num = int(height/scale)

        width = width_num*scale
        height = height_num*scale

        lots = session.query(Lot.lot_code).filter(Lot.material_code.like("%" + material_code + "%"))
            
        if start_time:
            
            
            lots = lots.filter(Lot.start_time >= start_time).filter(Lot.end_time <= end_time, Lot.end_time.isnot(None))

        panel_images = session.query(InferPanel)

        lot_codes = [row[0] for row in lots.all()]
        panel_images = panel_images.filter(InferPanel.lot_code.in_(lot_codes)).order_by(InferPanel.start_time.desc()).all()

        origin_heatmap = np.zeros((width, height))

        if failure_ids:
            for panel_image in panel_images:
                ttt1 = time.time()
                panel_code = panel_image.panel_code
                
                failure_image_ids = session.query(InferPanelImage.failure_id).filter_by(panel_code=panel_code).all()
                t3 = time.time()
                failure_image_ids = [failure_image_id[0] for failure_image_id in failure_image_ids]
                failure_images = list(set(failure_image_ids).intersection(failure_ids))
               
                ttt11 = time.time()
                
                if failure_images:
                    item = {}
                    detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                    ttt2 = time.time()

                    for box_item in detection_info:
                        failure_id = box_item["failure_id"]
                        # 过滤条件有错误类型的时候,只显示指定错误类型的错误
                        if failure_id in failure_ids:
                            box = box_item["box"]
                            x0 = box[0]
                            y0 = box[1]
                            x1 = box[2]
                            y1 = box[3]
                            # 如果检测框超出大图
                            if x0 > width or y0 > height:
                                continue
                            if x1 > width:
                                x1 = width
                            if y1 > height:
                                y1 = height

                            origin_heatmap[x0-1:x1-1, y0-1:y1-1] += 1

                    
        else:
            for panel_image in panel_images:
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                for box_item in detection_info:
                    box = box_item["box"]
                    x0 = box[0]
                    y0 = box[1]
                    x1 = box[2]
                    y1 = box[3]
                    # 如果检测框超出大图
                    if x0 > width or y0 > height:
                        continue
                    if x1 > width:
                        x1 = width
                    if y1 > height:
                        y1 = height

                    origin_heatmap[x0-1:x1-1, y0-1:y1-1] += 1

        vh_heatmap = np.zeros((width_num, height_num))
        vh_vh = []

        i = 0
        for v_h in np.vsplit(origin_heatmap, width_num):
            vh_vh.append([])
            j = 0
            for vh_h in np.hsplit(v_h, height_num):
                vh_heatmap[i, j] = np.max(vh_h)
                vh_vh[i].append(vh_h.tolist())
                j += 1
            i += 1

        shape = vh_heatmap.shape
        max_value = np.max(vh_heatmap)

        # 只返回有错误的像素点坐标
        # indices = np.where(heatmap > 10) 
        indices = np.where(vh_heatmap > 0) 

        res_heatmap = [[i, j, int(vh_heatmap[i, j]), vh_vh[i][j]] for i, j in zip(indices[0], indices[1])]   

        t2 = time.time()

        print(f"热力图总耗时: {t2-t1}")

        return {"max_value": max_value, "heatmap": str(res_heatmap), "shape": shape}
    

lot_job = LotJob()