import os
import random

from ..model.master import Lot, PanelImage, FailureImage, Failure, Material
import json
from .database import SessionLocal
from PIL import Image
import os
import time
import numpy as np
from ..config import Config, csv_dir
from ..utils.lot import get_material_lot, failure_filter, panel_filter
from sqlalchemy import and_
import math
import cv2
import csv
from .. import async_msg
from .log import get_logger

root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

logger = get_logger(__name__) 


def image_thumb(image_path, output_path, scale=6):
    image = Image.open(image_path)

    width = int(image.size[0]/scale)
    height = int(image.size[1]/scale)

    image.thumbnail((width, height))

    image.save(output_path)

def get_infer_type():
    config = Config()
    config.read(os.path.join(root_path, 'config', 'server.ini'))
    infer_type = config.getint('lot', 'infer_type')

    return infer_type
    

def define_error(error_code):
    if (error_code is np.nan) or (error_code == "OK"): 
        return 1
    elif error_code in ["P301","P302","P303","P304","P305","P401","P402","P403","P404","P501","P502","P503","P504"]:
        return 2
    else:
        return 3


def failure_image_task(page_index, page_size, lot_name, lot_code, panel_id, panel_code, failure_ids, judgments, end_time, start_time, server_hosts):
    offset_data = page_size * (page_index-1)

    with SessionLocal() as session:
        failure_images = failure_filter(session, lot_name=lot_name, lot_code=lot_code, panel_id=panel_id, panel_code=panel_code, failure_ids=failure_ids, judgments=judgments, end_time=end_time, start_time=start_time, server_hosts=server_hosts)

        failure_image_paginate = failure_images.offset(offset_data).limit(page_size)

        all_failure_images = []
        all_panel_images = []
        all_panel_codes = []
        for image in failure_image_paginate:
            item = {}
            item["panel_id"] = image.panel_id
            item["file_name"] = image.file_name
            item["file_path"] = image.file_path
            item["failure_id"] = image.failure_id
            item["image_code"] = image.image_code
            item["server_host"] = image.server_host

            lot = session.query(Lot).filter(Lot.lot_code == image.lot_code).first()

            item["lot_name"] = lot.lot_name

            all_failure_images.append(item)

            if image.panel_code not in all_panel_codes:
                all_panel_codes.append(image.panel_code)

                all_panel_images.append(item)


        total_page = math.ceil(len(failure_images.all()) / page_size)
        total_num = len(failure_images.all())

    res = {"all_failure_images": all_failure_images, "all_panel_images": all_panel_images, "total_page": total_page, "total_num": total_num}

    return res


def panel_pareto_task(start_time, end_time, material_names, server_hosts):
    with SessionLocal() as session:

        panel_images = panel_filter(session, start_time_contain=start_time, end_time_contain=end_time, material_names=material_names, server_hosts=server_hosts)
    
        predict_data = []
        for panel_image in panel_images.all():

            detection_info = panel_image.detection_info

            if detection_info:
                 # panel 上去重的缺陷
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

            else:
                continue

            item = {}

            failures = session.query(Failure).filter(and_(Failure.del_flag=="0", Failure.failure_id!=0))

            item["failure_num"] = 0

            for failure in failures:
                failure_name = failure.failure_name
                failure_id = failure.failure_id

                item[failure_name] = 0

                for detect_item in detection_info:
                    if detect_item["failure_id"] == failure_id:
                        item[failure_name] += 1

                item["failure_num"] += item[failure_name]

            predict_data.append(item)

        return predict_data


def panel_box_heatmap_task(material_name, failure_ids, start_time, end_time):
    scale = 1
    t1 = time.time()
    with SessionLocal() as session:
        material = session.query(Material).filter(Material.material_name==material_name).first()

        background_image = material.background_image

        # 读取图像文件
        cv_image = cv2.imread(background_image)
        cv_shape = cv_image.shape

        width = int(cv_shape[1]*scale)
        height = int(cv_shape[0]*scale)

        # 缩放图像
        resized_image = cv2.resize(cv_image, (width, height))

        lots = session.query(Lot.lot_code).filter(Lot.del_flag == '0')

        lots = get_material_lot(material_name, lots)
            
        if start_time:
            
            
            lots = lots.filter(Lot.start_time >= start_time).filter(Lot.end_time <= end_time, Lot.end_time.isnot(None))

        panel_images = session.query(PanelImage)

        lot_codes = [row[0] for row in lots.all()]
        panel_images = panel_images.filter(PanelImage.lot_code.in_(lot_codes)).order_by(PanelImage.aoi_time.desc()).all()

        heatmap = np.zeros((width, height))
        all_box = []

        if failure_ids:
            for panel_image in panel_images:
                panel_code = panel_image.panel_code
                failure_image_ids = session.query(FailureImage.failure_id).filter_by(panel_code=panel_code).all()
                failure_image_ids = [failure_image_id[0] for failure_image_id in failure_image_ids]
                failure_images = list(set(failure_image_ids).intersection(failure_ids))
               
                
                if failure_images:
                    item = {}
                    detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                    for box_item in detection_info:
                        failure_id = box_item["failure_id"]
                        # 过滤条件有错误类型的时候，只显示指定错误类型的错误
                        if failure_id in failure_ids:
                            box = box_item["box"]
                            x0 = int(box[0]*scale) - 1 
                            y0 = int(box[1]*scale) - 1
                            x1 = int(box[2]*scale) - 1
                            y1 = int(box[3]*scale) - 1

                            heatmap[x0:x1, y0:y1] += 1

                            all_box.append(box)
                    
        else:
            for panel_image in panel_images:
                item = {}
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                for box_item in detection_info:
                    box = box_item["box"]
                    x0 = int(box[0]*scale) - 1 
                    y0 = int(box[1]*scale) - 1
                    x1 = int(box[2]*scale) - 1
                    y1 = int(box[3]*scale) - 1
                    heatmap[x0:x1, y0:y1] += 1

                    all_box.append(box)

        max_value = np.max(heatmap)


        # 只返回又错误的像素点坐标
        # indices = np.where(heatmap > 0) 
        # all_heatmap = [[i+1, j+1, int(heatmap[i, j])] for i, j in zip(indices[0], indices[1])]   
        # with open("heatmap.txt", "w") as f:
        #     f.write(str(all_heatmap))

        t2 = time.time()
        # logger.info(f"热力图总耗时：{t2-t1}")

        return {"max_value": max_value, "all_box": all_box}


# 按照指定比例拆分原图为多个子图，获取子图的堆叠最大值
def panel_pixel_heatmap_task(material_name, failure_ids, start_time, end_time, server_hosts):

    # 缩放倍数
    scale = 15
    t1 = time.time()
    with SessionLocal() as session:

        materials = session.query(Material).filter(Material.material_name==material_name).all()

        material_codes = []
        material_server_hosts = []
        material0 = None
        for material in materials:
            material_codes.append(material.material_code)
            material_server_hosts.append(material.server_host)
            
        if server_hosts:
            intersection = list(set(material_server_hosts) & set(server_hosts))
            if intersection:
                material0 = session.query(Material).filter_by(material_name=material_name, server_host=intersection[0]).first()
            else:
                return
        else:
            material0 = materials[0]

        background_image = material0.background_image
        server_host = material0.server_host

        image_url = f"http://{server_host}:8869{background_image}"

        # 读取图像远程文件
        cap = cv2.VideoCapture(image_url)
        _, cv_image = cap.read()
        cap.release()

        if cv_image is None:
            return {"code": 1, "msg": "背景图不存在"}
        height, width, _ = cv_image.shape

        width_num = int(width/scale)
        height_num = int(height/scale)

        width = width_num*scale
        height = height_num*scale

        lots = session.query(Lot).filter(Lot.del_flag == '0')
     
        lots = get_material_lot(material_name, lots)

        if start_time:
            
            
            lots = lots.filter(Lot.start_time >= start_time).filter(Lot.end_time <= end_time, Lot.end_time.isnot(None))

        panel_images = session.query(PanelImage).filter(PanelImage.material_code.in_(material_codes))

        if server_hosts:
            panel_images = panel_images.filter(PanelImage.server_host.in_(server_hosts))

        lot_codes = [row.lot_code for row in lots.all()]

        panel_images = panel_images.filter(PanelImage.lot_code.in_(lot_codes)).order_by(PanelImage.aoi_time.desc()).all()

        origin_heatmap = np.zeros((width, height))


        if failure_ids:
            for panel_image in panel_images:
                panel_code = panel_image.panel_code
                
                failure_image_ids = session.query(FailureImage.failure_id).filter_by(panel_code=panel_code).all()
                failure_image_ids = [failure_image_id[0] for failure_image_id in failure_image_ids]
                failure_images = list(set(failure_image_ids).intersection(failure_ids))
               
                if failure_images:
                    detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                    for box_item in detection_info:
                        failure_id = box_item["failure_id"]
                        # 过滤条件有错误类型的时候，只显示指定错误类型的错误
                        if failure_id in failure_ids:
                            box = box_item["box"]
                            x0 = box[0]
                            y0 = box[1]
                            x1 = box[2]
                            y1 = box[3]
                            # 如果检测框超出大图
                            if x0 > width or y0 > height:
                                continue
                            if x1 > width:
                                x1 = width
                            if y1 > height:
                                y1 = height

                            origin_heatmap[x0-1:x1-1, y0-1:y1-1] += 1
        else:
            for panel_image in panel_images:
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []

                for box_item in detection_info:
                    box = box_item["box"]
                    x0 = box[0]
                    y0 = box[1]
                    x1 = box[2]
                    y1 = box[3]
                    # 如果检测框超出大图
                    if x0 > width or y0 > height:
                        continue
                    if x1 > width:
                        x1 = width
                    if y1 > height:
                        y1 = height

                    origin_heatmap[x0-1:x1-1, y0-1:y1-1] += 1

        vh_heatmap = np.zeros((width_num, height_num))
        vh_vh = []

        i = 0
        for v_h in np.vsplit(origin_heatmap, width_num):
            vh_vh.append([])
            j = 0
            for vh_h in np.hsplit(v_h, height_num):
                vh_heatmap[i, j] = np.max(vh_h)
                vh_vh[i].append(vh_h.tolist())
                j += 1
            i += 1

        shape = vh_heatmap.shape
        max_value = np.max(vh_heatmap)

        # 只返回有错误的像素点坐标
        # indices = np.where(heatmap > 10) 
        indices = np.where(vh_heatmap > 0) 

        res_heatmap = [[int(i), int(j), int(vh_heatmap[i, j]), vh_vh[i][j]] for i, j in zip(indices[0], indices[1])]   

        t2 = time.time()

        # logger.info(f"热力图总耗时: {t2-t1}")

        return {"max_value": max_value, "heatmap": str(res_heatmap), "shape": shape}

def get_random_failure_id(data_list, exclude_ids):
    if not data_list:
        return None

    valid_items = [item for item in data_list
                   if isinstance(item, dict) and item.get("failure_id") not in exclude_ids]

    return random.choice(valid_items).get("failure_id") if valid_items else None

def get_failure_name(session, failure_id):
    if not failure_id:
        return ""

    current_failure = session.query(Failure).filter(
        and_(
            Failure.failure_id == failure_id,
            Failure.del_flag == "0",
            Failure.failure_id != 0
        )
    ).first()

    return current_failure.failure_name if current_failure else ""


def export_csv_task(lot_name, lot_code, panel_ids, failure_ids, judgments, pjudgment, end_time, start_time, ai_result, aoi_start_time, aoi_end_time, aoi_result, server_hosts, task_id=None):
    logger.info(f"export_csv_task 开始执行导出")
    with SessionLocal() as session:
        t1 = time.time()
        panel_images = panel_filter(session, lot_name=lot_name, lot_code=lot_code, panel_ids=panel_ids, failure_ids=failure_ids, judgments=judgments, pjudgment=pjudgment, end_time=end_time, start_time=start_time, ai_result=ai_result, aoi_start_time=aoi_start_time, aoi_end_time=aoi_end_time, aoi_result=aoi_result, server_hosts=server_hosts)

        t2 = time.time()

        logger.info(f"export_csv_task 查询数据库时间：{t2 - t1}")
        predict_data = []
        item = {}
        # 排除以下几个ID的缺陷类型
        exclude_ids = [18, 112, 113, 114]
        item["AI判定缺陷"] = ""

        t3 = time.time()
        for panel_image in panel_images.order_by(PanelImage.aoi_time.desc()).all():
            logger.info(f"export_csv_task 处理当前 panel: {panel_image.panel_id} 开始")
            t11 = time.time()
            lot = session.query(Lot).filter(Lot.lot_code == panel_image.lot_code).first()

            # # panel 上未去重的缺陷
            # failure_images = session.query(FailureImage).filter(FailureImage.panel_code == panel_image.panel_code)

            if panel_image.detection_info:
                # panel 上去重的缺陷
                detection_info = json.loads(panel_image.detection_info) if panel_image.detection_info else []
                if detection_info:
                    query_failure_id = get_random_failure_id(detection_info, exclude_ids)
                    if query_failure_id:
                        # 查询数据库获取 failure_name
                        item["AI判定缺陷"] = get_failure_name(session, query_failure_id)
                        logger.info('debug-CSV' + item["AI判定缺陷"])
            else:
                detection_info = ""

            item["人工复判缺陷"] = panel_image.failure_code
            item["PanelID"] = panel_image.panel_id
            item["Lot名称"] = lot.lot_name
            item["机种"] = "G163A"
            ai_result = panel_image.ai_result
            aoi_result = panel_image.aoi_result
            judgment = panel_image.judgment

            if ai_result == 1:
                item["AI判定结果"] = "OK"
            elif ai_result == 2:
                item["AI判定结果"] = "NG"
            else:
                item["AI判定结果"] = "Gray"

            if aoi_result == 1:
                item["AOI判定结果"] = "OK"
            elif aoi_result == 2:
                item["AOI判定结果"] = "NG"

            if judgment == 1:
                item["人工复判结果"] = "OK"
            elif judgment == 2:
                item["人工复判结果"] = "NG"
            else:
                item["人工复判结果"] = "未复判"

            if ai_result == judgment:
                item["综合判定结果"] = "判定一致"
                
            else:
                if judgment == 0:
                    item["综合判定结果"] = "未复判"
                elif judgment == 2:
                    if ai_result == 1:
                        item["综合判定结果"] = "判定不一致"
                    elif ai_result == 3:
                        item["综合判定结果"] = "判定一致"
                else:
                    item["综合判定结果"] = "判定不一致"
                    

            item["开始时间"] = lot.start_time.strftime('%Y-%m-%d %H:%M:%S')
            item["结束时间"] = lot.end_time.strftime('%Y-%m-%d %H:%M:%S') if lot.end_time else ""

            failures = session.query(Failure).filter(and_(Failure.del_flag=="0", Failure.failure_id!=0))

            item["总数"] = 0
            logger.info(f"export_csv_task 处理当前 panel: {panel_image.panel_id} 的 failure 开始")

            for failure in failures:
                failure_name = failure.failure_name
                failure_type = failure.failure_type
                failure_id = failure.failure_id

                # item[failure_name] = 0
                item[failure_type] = 0

                for detect_item in detection_info:
                    if detect_item["failure_id"] == failure_id:
                        item[failure_type] += 1

                item["总数"] += item[failure_type]

            logger.info(f"export_csv_task 处理当前 panel: {panel_image.panel_id} 的 failure 结束")

            predict_data.append(item)

            t22 = time.time()

            logger.info(f"PanelID: {panel_image.panel_id} 数据处理时间：{t22-t11}")

        t4 = time.time()
        logger.info(f"export_csv_task 处理数据时间: {t4-t3}")
        if task_id:
            csv_path = os.path.join(csv_dir, f"{task_id}.csv")
            tmp_path = os.path.join(csv_dir, f"{task_id}.tmp")
            #  utf-8-sig 避免 Excel 乱码
            with open(tmp_path, mode='w', newline='', encoding='utf-8-sig') as f:
                # 提取CSV列名（使用第一个数据项的键）
                header = list(predict_data[0].keys())
                
                writer = csv.DictWriter(f, fieldnames=header)
                
                # 写入CSV内容
                writer.writeheader()
                writer.writerows(predict_data)

            # 原子性重命名（确保文件要么不存在，要么完整）
            os.replace(tmp_path, csv_path)

            msg = {"code": 0, "msg": "CSV生成完成", "task_id": task_id}
            async_msg(msg)

            logger.info(f"✅ 文件生成完成: {csv_path}")
        
        t5 = time.time()
        logger.info(f"export_csv_task 保存到csv文件时间: {t5-t4}")

        return predict_data