import sys
from pathlib import Path

file = Path(__file__).resolve()
parent, top = file.parent, file.parents[2]

sys.path.append(str(top))
try:
    sys.path.remove(str(parent))
except ValueError: # Already removed
    pass

__package__ = 'src.job'

import paramiko
import os
from datetime import datetime, timedelta
from datetime import time as dtime
import time
import sys
from stat import S_ISDIR
from ..config import Config, lot_dir, material_dir, scp_ip

from ..utils.log import logger, json_logger
from queue import Queue
import shutil
from ..model.infer import ScpJob, AOI, Lot, Material
from ..utils.database import InferSessionLocal

import schedule
import threading
from .infer_job import infer_job
from ..utils.lot import panel_ok
from .. import async_msg
from sqlalchemy import not_


class ScpAOIs(object):
    """
    同时从多个 aoi 服务器拷贝
    """
    def __init__(self, today=True):
        self.stop = threading.Event()

        # 是否是拷贝当天的数据
        self.today = today
        self.lot_dir = lot_dir

        self.root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

        self.get_scp_config()

        # 连接的 aois 服务器
        self.aois = {}

        # 存放所有当天的拷贝任务
        self.queue = Queue()

        # 存放当前 AOI 机台未完成的文件夹
        self.unfinish_dir = []

        self.init_aoi()


    def init_aoi(self):
        with InferSessionLocal() as session:
            aois = session.query(AOI).all()

            # 当拷贝出现异常时,直接停止继续拷贝
            for aoi in aois:
                self.aois[aoi.aoi_code] = {"lot_name": "", "material_code": "", "aoi_name": aoi.aoi_name, "exception": False}

    def get_scp_config(self):
        config = Config()
        config.read(os.path.join(self.root_path, 'config', 'server.ini'))
        self.scp_type = config.getint('lot', 'scp_type')
        self.scp_interval = config.getint('lot', 'scp_interval')
        self.infer_type = config.getint('lot', 'infer_type')


    def connect(self, aoi):
        client = paramiko.SSHClient()
        # 自动添加主机密钥
        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        try:
            # 连接远程主机
            client.connect(aoi.hostname, aoi.port, aoi.username, aoi.password, timeout=5)
            # 创建 SCP 客户端
            scp = client.open_sftp()
            scp.get_channel().settimeout(5)
            aoi_code = aoi.aoi_code
            self.aois[aoi_code]["client"] = client
            self.aois[aoi_code]["scp"] = scp
            self.aois[aoi_code]["stop"] = threading.Event()     

        except Exception:
            msg = f'{aoi.aoi_name} 连接失败'
            async_msg(msg, level="error")
        

    def connect_aoi(self, aoi_code):
        with InferSessionLocal() as session:
            aoi = session.query(AOI).filter_by(aoi_code=aoi_code).first()
            self.connect(aoi)


    def disconnect_aoi(self, aoi_code):
        item = self.aois[aoi_code]
        scp = item.get("scp")
        if scp:
            scp = item["scp"]
            client = item["client"]
            try:
                # 关闭 SCP 客户端
                scp.close()
                # 关闭 SSH 连接
                client.close()
                # del self.aois[aoi_code]
            except Exception as e:
                logger.error("断开远程拷贝出错: ", str(e))

    def reconnect_aoi(self, aoi_code):
        # 断开连接
        self.disconnect_aoi(aoi_code)
        # 重新连接
        self.connect_aoi(aoi_code)
    

    def get_aoi_status(self):
        all_status = []
        # 判断所有 aoi 都是停止的状态
        all_aois_stop = True
        
        for aoi_code, aoi_item in self.aois.items():
            item = {}
            item["aoi_code"] = aoi_code
            item["aoi_name"] = aoi_item["aoi_name"]
            item["lot_name"] = aoi_item["lot_name"]
            item["material_code"] = aoi_item["material_code"]
            item["status"] = aoi_item.get("status", False)

            if item["status"]:
                all_aois_stop = False
                    
            all_status.append(item)
        if all_aois_stop:
            self.get_scp_config()

        return all_status

    def check_connection(self, client):
        try:
            # 尝试执行一个简单的命令,例如 "echo", 设置 timeout 否则会一直卡住
            client.exec_command("echo 'Connection check'", timeout=5)
            return True
        except Exception:
            return False
        

    # 用于拷贝未完成的生成的文件夹
    def scp_unfinish_dir(self, all_lot_size, scp, aoi_code):
        for dirname, remote_path, des_path in self.unfinish_dir:
            is_finish = False
            begin_time = time.time()
            while not is_finish:
                for item in scp.listdir(remote_path):
                    if dirname in item:
                        remote_hms_path = os.path.join(remote_path, item).replace("/", "\\")

                        if "_NG" in item:
                            des_hms_path =  os.path.join(des_path, item)
                            os.makedirs(des_hms_path, exist_ok=True)
                            all_lot_size = self.scp_remote_file(remote_hms_path, des_hms_path, all_lot_size, scp, aoi_code)
                            is_finish = True
                            logger.info(f"{item} 重新拷贝成功")
                            break
                        # 如果是 OK 只拷贝目录结构
                        elif "_OK" in item:
                            self.scp_remote_dir(remote_hms_path, des_hms_path, scp)
                            is_finish = True
                            logger.info(f"{item} 只拷贝文件夹")
                            break
                        # 如果生成的是 142750_NJ5D3254GF3_Xsyc 不拷贝
                        elif dirname + "_Xsyc" in item:
                            self.scp_remote_dir(remote_hms_path, des_hms_path, scp)
                            is_finish = True
                            logger.info(f"{item} 也拷贝, 但不推理")
                            break
                        # 如果一直没有生成 142750_NJ5D3254GF3
                        else:
                            used_time = time.time() - begin_time
                            # 如果超时 5 分钟, 退出循环
                            if used_time > 300:
                                is_finish = True
                                logger.info(f"{item} 重新拷贝失败")
                                break
                        # 如果 AOI 还没有生成完成, 则继续循环
                        
        return all_lot_size


    def scp_remote_dir(self, remote_path, des_path, scp):
        for item in scp.listdir(remote_path):
            item_path = os.path.join(remote_path, item).replace("/", "\\")
            r_file = scp.lstat(item_path)
            # 判断是不是一个文件夹, 如果是文件夹创建文件夹, 如果是文件不拷贝
            if S_ISDIR(r_file.st_mode):
                current_des_path = os.path.join(des_path, item)
                os.makedirs(current_des_path)
                self.scp_remote_dir(item_path, current_des_path, scp)

            
    def scp_remote_file(self, remote_path, des_path, all_lot_size, scp, aoi_code):
        for item in scp.listdir(remote_path):
            # if self.aois[aoi_code]["stop"].is_set():
            #     aoi_name = self.aois[aoi_code]["aoi_name"]
            #     logger.info(f"AOI {aoi_name} 手动停止拷贝任务, 取消拷贝文件")
            #     return  
            item_path = os.path.join(remote_path, item).replace("/", "\\")
            r_file = scp.lstat(item_path)
            # 判断是不是一个文件夹
            if S_ISDIR(r_file.st_mode):
                current_des_path = os.path.join(des_path, item)
                os.makedirs(current_des_path)
                all_lot_size = self.scp_remote_file(item_path, current_des_path, all_lot_size, scp, aoi_code)
            else:
                des_file_path = os.path.join(des_path, item)
                scp.get(item_path, des_file_path)

                fsize = os.path.getsize(des_file_path)
                fsize = fsize/float(1024*1024)
                logger.info(f"{item}{' '*(50-len(item))}{round(fsize)}MB")
                all_lot_size += fsize  

        return all_lot_size    
        

    # 指定 aoi 服务器,添加当前拷贝的时间点
    def add_aoi_datetime(self, aoi_code, lot_code, material_code, is_stop=False):
         
        # 获取当前时间
        # 获取拷贝的开始时间
        # 如果不指定,为当前时间前半个小时
        start_datetime = self.aois[aoi_code]["start_datetime"]
        aoi_name = self.aois[aoi_code]["aoi_name"]

        # 如果不是手动停止, 结束时间是当前时间半个小时
        if not is_stop:
            end_datetime = start_datetime + timedelta(minutes=self.scp_interval)
        # 如果是手动停止,则是手动停止时间
        else:
            # 只取到分钟
            end_datetime = datetime.now().replace(second=0, microsecond=0) 

        logger.info(f"AOI {aoi_name} 添加拷贝任务的时间范围 {start_datetime} ~ {end_datetime}")
        # 为了防止漏掉数据,拷贝的时间范围往前 1 分钟
        self.aois[aoi_code]["start_datetime"] = end_datetime

        # 添加时间段到拷贝队列
        with InferSessionLocal() as session:
            scp_job = session.query(ScpJob).filter_by(aoi_code=aoi_code, lot_code=lot_code, start_datetime=start_datetime, end_datetime=end_datetime).first()
            if not scp_job:
                # 如果不是最后一个拷贝任务
                if not is_stop:
                    scp_job = ScpJob(start_datetime=start_datetime, end_datetime=end_datetime, aoi_code=aoi_code, lot_code=lot_code, material_code=material_code)
                else:
                    # 最后一个拷贝任务
                    scp_job = ScpJob(start_datetime=start_datetime, end_datetime=end_datetime, aoi_code=aoi_code, lot_code=lot_code, material_code=material_code, last_flag='1')
                session.add(scp_job)
                session.commit()

            else:
                # 已存在数据库不再处理
                 logger.info(f"AOI {aoi_name} 的 Scp Job {start_datetime} ~ {end_datetime} 已存在数据库中")

            job_code = scp_job.job_code
            # 添加当前时间点到队列
            self.queue.put(job_code)   


    def scp_lot(self, job_code):
        
        t1 = time.time()

        # src new: G:\\1\\lotname\\235833_NJ4DC327NK2_NG
        # des new  /mnt/md126/jd/lot/lotname/时间戳/235833_NJ4DC327NK2_NG
        self.unfinish_dir = []
        
        with InferSessionLocal() as session:

            scp_job = session.query(ScpJob).filter_by(job_code=job_code).first()

            if not scp_job:
                logger.info(f"取到拷贝任务的 job_code:{job_code} 在数据库中不存在")
                return

            aoi_code = scp_job.aoi_code
            start_datetime = scp_job.start_datetime
            end_datetime = scp_job.end_datetime

            scp = self.aois[aoi_code]["scp"]
            aoi_name = self.aois[aoi_code]["aoi_name"]
            
            logger.info(f"AOI {aoi_name} 的 Scp Job {start_datetime} ~ {end_datetime} 开始拷贝")

            lot = session.query(Lot).filter_by(lot_code=scp_job.lot_code).first()

            lot_dir_path = lot.dir_path

            scp_timestamp = start_datetime.strftime("%Y%m%d%H%M") + '_' + end_datetime.strftime("%Y%m%d%H%M")
            scp_job.status = '2'
            session.commit()
            
            all_lot_size = 0

            # G:\1\
            aoi = session.query(AOI).filter_by(aoi_code=aoi_code).first()
            aoi_lotdir = aoi.lotdir

            try:
                # 构建 des 的目标路径 /mnt/md126/oqc/lot/lotname/scpname/时间戳
                d_lot_dir = os.path.join(lot_dir_path, scp_timestamp)

                # 防止之前已经生成过同名的文件夹,所以先删除
                if os.path.exists(d_lot_dir):
                    shutil.rmtree(d_lot_dir)
                os.makedirs(d_lot_dir)

                for panel_name in scp.listdir(aoi_lotdir):

                    # if self.aois[aoi_code]["stop"].is_set():
                    #     logger.info(f"AOI {aoi_name} 手动停止拷贝任务, 取消拷贝 Panel")
                    #     scp_job.status = '3'
                    #     lot.status = '3'
                    #     lot.end_time = datetime.utcnow() + timedelta(hours=8)
                    #     session.commit()
                    #     return  

                    panel_info = panel_name.split("_")
                    date_str = panel_info[2]

                    current_year = datetime.now().year
                    datetime_str = f"{current_year}{date_str[:2]}{date_str[2:4]}{date_str[4:6]}{date_str[6:8]}{date_str[8:10]}"

                    panel_time= datetime.strptime(datetime_str, "%Y%m%d%H%M%S")

                    if panel_time < end_datetime and panel_time >= start_datetime:
                        # 构建 src 目录下的 panel 目录
                        s_aoi_dir = os.path.join(aoi_lotdir, panel_name).replace("/", "\\")
                        d_panel_dir = os.path.join(d_lot_dir, panel_name)

                        os.makedirs(d_panel_dir, exist_ok=True)

                        logger.info(f"当前拷贝任务的时间范围: {start_datetime} ~ {end_datetime}, src 目录: {s_aoi_dir}, des 目录: {d_panel_dir}")

                        # 如果 remote_hms_path 里面包含 OK 关键字, 那么只拷贝目录结构, 不拷贝图片
                        if "OK_" in panel_name:
                            self.scp_remote_dir(s_aoi_dir, d_panel_dir, scp)
                        else:
                            all_lot_size = self.scp_remote_file(s_aoi_dir, d_panel_dir, all_lot_size, scp, aoi_code)

                        
                # logger.info(f"未拷贝的数据: {self.unfinish_dir}") 
                # # 拷贝未完成的 lot
                # all_lot_size = self.scp_unfinish_dir(all_lot_size, scp, aoi_code)

            except FileNotFoundError as e:
                msg = f"远程拷贝 lot 文件不存在: {e}"
                async_msg(msg, level="error")

                self.aois[aoi_code]["exception"] = True

                # 目标文件夹不存在,scp 无数据
                scp_job.status = '7'
                session.commit()

                return
                
            
            # 如果拷贝的 lot 没有图片, 则日志不拷贝
            # 最终 scp_job 的状态 0：无数据，8：推理已完成
            if all_lot_size == 0:
                scp_job.status = '0'
                logger.info(f"AOI {aoi_name} 的 Scp Job {start_datetime} ~ {end_datetime} 无数据")

                if len(os.listdir(d_lot_dir)) == 0:  # 检查是否为空
                    # 删除空文件夹
                    try:
                        os.rmdir(d_lot_dir)
                        logger.info(f"空文件夹 {d_lot_dir} 删除成功")
                    except OSError as e:
                        logger.error(f"文件夹 {d_lot_dir} 不为空, Error: {e.strerror}")
                else:
                    # 如果远程拷贝已完成,且无数据, 将 ok 数据写入数据库
                    panel_ok(d_lot_dir, scp_job.lot_code, scp_job.material_code, aoi_code, session)
            else:
                scp_job.status = '4'
                msg = f'AOI {aoi_name} 的 Scp Job {start_datetime} ~ {end_datetime} 复制已完成'
                async_msg(msg)

                t2 = time.time()
                logger.info(f"远程拷贝的 lot 大小: {round(all_lot_size)} MB")
                logger.info(f"远程拷贝花费时间: {round((t2-t1))} s")
                logger.info(f"远程拷贝速度: {round((all_lot_size)/(t2-t1))} MB/s")

                infer_job.add_lot_dir(job_code)
                msg = f'AOI {aoi_name} 的 Scp Job {start_datetime} ~ {end_datetime} 添加到推理队列'
                logger.info(msg)

            session.commit()
            
            # 添加到推理任务队列
            # infer_type: 0 表示 aoi ng 检测,1 表示全图检测
            # if self.infer_type == 1:

            # 如果是最后一个任务,断开远程连接
            if self.aois[aoi_code]["stop"].is_set() and scp_job.last_flag == '1':
                logger.info("断开 aoi 连接")
                self.disconnect_aoi(aoi_code)


    # 启动 serverv 服务的同时,启动自动拷贝任务,不停的从 scp 任务队列中取数据
    def start(self):
        logger.info("启动远程拷贝任务")
        
        while not self.stop.is_set():
            try:
                # 当没有处理拷贝任务时,取出新的任务
                job_code = self.queue.get(timeout=10)
            except Exception as e:
                continue

            logger.info(f"取到拷贝任务的 job_code:{job_code}")

            with InferSessionLocal() as session:

                try:
                    scp_job = session.query(ScpJob).filter_by(job_code=job_code).first()

                    if scp_job:
                        aoi_code = scp_job.aoi_code
                        self.scp_lot(job_code)
                except Exception as e:
                    logger.error(f"远程拷贝连接断开: {e}, 失败的 job_code: {job_code}")
            
                    msg = f"远程拷贝连接断开: {e}"
                    async_msg(msg, level="error")

                    # 如果连接已断开, 每5秒重连1次, 失败 10 次,取消重连
                    reconnect_num = 0
                    scp_job = session.query(ScpJob).filter_by(job_code=job_code).first()

                    # 如果 scp
                    if not scp_job:
                        logger.info(f"取到拷贝任务的 job_code:{job_code} 在数据库中不存在")
                        continue

                    aoi_code = scp_job.aoi_code
                    aoi_name = self.aois[aoi_code]["aoi_name"]
                    while True:
                        logger.info(f"AOI {aoi_name} 开始重连")
                        self.reconnect_aoi(aoi_code)

                        is_connect = self.check_connection(self.aois[aoi_code].get("client"))
                        
                        if is_connect or reconnect_num >= 5:
                            break
                        
                        reconnect_num += 1
                        time.sleep(5)

                    if is_connect:
                        msg = f"AOI {aoi_name} 重连成功"
                        async_msg(msg)

                        scp_job.status = '1'
                        # 如果重连成功, 将失败的 job code 重新添加的任务队列中
                        self.queue.put(job_code)    
                    else:
                        msg = f"AOI {aoi_name} 重连失败"
                        async_msg(msg)

                        scp_job.status = '7'
                    session.commit()


                # 将失败的 job code 重新添加的任务队列中
                # self.queue.put(job_code)  
            
            # 如果拷贝任务没有取消
            schedule.run_pending()
            time.sleep(1)

    def start_aoi(self, aoi_code, material_code, lot_name, start_datetime=None):

        with InferSessionLocal() as session:
            aoi = session.query(AOI).filter_by(aoi_code=aoi_code).first()
            aoi_name = aoi.aoi_name

            logger.info(f"启动远程 AOI 服务器 {aoi_name}")
            
            # 每次点击开始拷贝,需要初始化 aoi 信息,并且重新连接
            self.aois[aoi_code] = {"lot_name": "", "material_code": "", "aoi_name": aoi_name, "end_datetime": "", "job": "", "stop": threading.Event(), "exception": False, "status": True}

            self.reconnect_aoi(aoi_code)
            # 先判断是否连接成功
            is_connect = self.check_connection(self.aois[aoi_code].get("client"))

            # 如果连接已断开, 开始重连
            if not is_connect:
                # 如果连接已断开, 每3秒重连1次, 失败 20 次,取消重连
                reconnect_num = 0
                while True:
                    logger.info(f"AOI {aoi_name} 开始重连")
                    self.reconnect_aoi(aoi_code)

                    is_connect = self.check_connection(self.aois[aoi_code].get("client"))
                    
                    if is_connect or reconnect_num >= 5:
                        break
                    
                    reconnect_num += 1
                    time.sleep(3)

            if not is_connect:
                msg = f"AOI {aoi_name}重连失败"
                async_msg(msg, level="error")

                return

            # 如果连接成功,创建 scp job
            self.aois[aoi_code]["lot_name"] = lot_name
            self.aois[aoi_code]["material_code"] = material_code


            # 创建 lot
            # 每次开始之前, 删除上一次的 scp job
            session.query(ScpJob).filter_by(aoi_code=aoi_code).filter(not_(ScpJob.status.in_(['2', '4', '6']))).delete()

            session.commit()
            # 根据 lotname 创建 lot

            # 先判断是否有同名的 lot
            lot = session.query(Lot).filter(Lot.lot_name==lot_name, Lot.aoi_code!=None).first()
            if not lot:
                # lot 文件夹路径是唯一的,防止名字相同无法保存
                lot_timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
                lot = Lot(lot_name=lot_name, material_code=material_code, aoi_code=aoi_code, dir_path=os.path.join(self.lot_dir, lot_timestamp))
                session.add(lot)
            else:
                material_codes = lot.material_code.split(", ")
                aoi_codes = lot.aoi_code.split(", ")
                if material_code not in material_codes:
                    lot.material_code = lot.material_code + ", " + material_code
                if aoi_code not in aoi_codes:
                    lot.aoi_code = lot.aoi_code + ", " + aoi_code
                lot.status = '0'
            session.commit()

            lot_code = lot.lot_code

            # # 本地测试
            # if scp_ip=="192.168.2":
            #     start_datetime = datetime(2025, 4, 26, 6, 0, 0)
            #     # start_datetime = start_datetime

            if start_datetime:
                logger.info(f"AOI {aoi_name} 手动输入开始时间: {start_datetime}")
                # 先不定时拷贝, 而一次性拷贝当前时间点之前的数据
                current_datetime = datetime.utcnow() + timedelta(hours=8)

                # 如果指定的时间大于当前时间的前两分钟，需要等待两分钟，确保数据拷贝完全
                if (current_datetime - start_datetime).total_seconds() / 60 < 2:
                    time.sleep(120)
             
                end_datetime = start_datetime + timedelta(minutes=self.scp_interval)

                msg = f'Lot {lot_name} 开始拷贝任务, 拷贝过去时间点的数据, 远程拷贝立即开始'
                async_msg(msg, status="start")

                self.aois[aoi_code]["start_datetime"] = start_datetime

                while end_datetime + timedelta(minutes=2) < current_datetime:
                    if self.aois[aoi_code]["stop"].is_set():
                        logger.info(f"AOI {aoi_name} 手动停止拷贝任务, 取消添加历史任务")
                        break
                    self.add_aoi_datetime(aoi_code, lot_code, material_code)
                    current_datetime = datetime.utcnow() + timedelta(hours=8)
                    end_datetime = end_datetime + timedelta(minutes=self.scp_interval)

                    time.sleep(0.01)

                sleep_seconds = (end_datetime + timedelta(minutes=2)-current_datetime).seconds
                logger.info(f"AOI {aoi_name} 拷贝等待 {sleep_seconds}s")

                i = 0
                while i <= sleep_seconds:
                    if not self.aois[aoi_code]["stop"].is_set():
                        time.sleep(1)
                        i += 1
                    else:
                        logger.info(f"AOI {aoi_name} 手动停止拷贝任务, 取消拷贝等待")
                        self.aois[aoi_code]["status"] = False
                        return


                if self.aois[aoi_code]["stop"].is_set():
                    logger.info(f"AOI {aoi_name} 手动停止拷贝任务, 取消添加定时任务")
                    # 已停止
                    self.aois[aoi_code]["status"] = False
                    return

                # 确保每次拷贝的结束时间都是当前时间的前两分钟
                job = schedule.every(self.scp_interval).minutes.do(self.add_aoi_datetime, aoi_code, lot_code, material_code)
                logger.info(f"AOI {aoi_name} 添加定时任务, 并立即执行一次定时任务")

                # 立即执行
                self.add_aoi_datetime(aoi_code, lot_code, material_code)
                self.aois[aoi_code]["job"] = job

            else:
                # 拷贝当前时间2分钟前的数据，防止数据不完整
                start_datetime = datetime.now().replace(second=0, microsecond=0) - timedelta(minutes=2) 

                self.aois[aoi_code]["start_datetime"] = start_datetime

                msg = f'Lot {lot_name} 开始拷贝任务, 拷贝当前时间点的数据, 远程拷贝任务等待中'
                async_msg(msg, status="start")

                if self.aois[aoi_code]["stop"].is_set():
                    logger.info(f"AOI {aoi_name} 手动停止拷贝任务, 取消添加定时任务")
                    self.aois[aoi_code]["status"] = False
                    return

                # 每 30 分钟添加一次时间
                job = schedule.every(self.scp_interval).minutes.do(self.add_aoi_datetime, aoi_code, lot_code, material_code)
                logger.info(f"AOI {aoi_name} 添加定时任务")
                self.aois[aoi_code]["job"] = job

            logger.info(f"Start aoi self.aois: {self.aois}")


    def stop_aoi(self, aoi_code, material_code, lot_name):

        if self.aois[aoi_code].get("stop"):
            self.aois[aoi_code]["stop"].set()

        aoi_name = self.aois[aoi_code]["aoi_name"]

        msg = f"停止远程 AOI 服务器 {aoi_name}"
        async_msg(msg, status="stop")
        logger.info(msg)

        # 取消拷贝任务, 清除历史数据
        with InferSessionLocal() as session:

            lot = session.query(Lot).filter_by(lot_name=lot_name).first()

            # if not self.aois[aoi_code]["exception"]:
            #     # 最后一个时间不拷贝
            #     # self.add_aoi_datetime(aoi_code, lot.lot_code, material_code, True)
            #     lot.status = '1'

            # # 如果拷贝过程中发生异常, lot 状态改成出错
            # else:
            if self.aois[aoi_code]["exception"]:
                # 运行出错
                lot.status = '2'
                lot.end_time = datetime.utcnow() + timedelta(hours=8)
        
            # 将正在复制和正在推理以外的 scp job 状态改为已停止
            scp_jobs = session.query(ScpJob).filter_by(aoi_code=aoi_code)
            scp_jobs_item = scp_jobs.all()

            for scp_job in scp_jobs_item:
                if scp_job.status not in ['2', '4', '6']:
                    scp_job.status = 3
                scp_job.last_flag = '1'

            # 清空队列
            self.queue.queue.clear()
               
            session.commit()

        if self.aois[aoi_code]["job"]:
            # 取消定时任务
            logger.info(f"AOI {aoi_name} 取消定时任务")
            schedule.cancel_job(self.aois[aoi_code]["job"])
            self.aois[aoi_code]["status"] = False

        # 清空队列
        self.queue.queue.clear()


if __name__ == "__main__":

    scp_lot = ScpAOIs()

    
