import concurrent.futures
import time
import os

from config.ServerApp import app
from service.ImageClassfyService import image_classfy, image_classify_route
from service.OcrHandlerService import ocr_extract
from utils.FileUtils import download_file
from config.ServerCache import LocalCache
from config.LoadConfig import get_config
from config.Common import chunk_list_into_chunks
import json as jsonObj
from datetime import datetime
from utils.HttpRequest import do_request,do_query_request
import json
from utils.PathUtil import  build_save_file_dir

__job_config = get_config('jobConf')

__process_type = get_config('system').get("server_name","ai-img-classify-server")

__process_cache = LocalCache()

# 一批次处理图片数量
__batch_process_img_count = 1000 if 'batch_process_img_count' not in __job_config else int(__job_config['batch_process_img_count'])
__max_process_task_count = 5 if 'max_process_task_count' not in __job_config else int(__job_config['max_process_task_count'])
max_workers = 10 if 'max_workers' not in __job_config else int(__job_config['max_workers'])

job_pool = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)


def __create_batch_result_dir(batch_id):
    """
    为指定的batch_id创建结果存储目录
    """
    # 获取tmp目录路径
    save_info = get_config("save_info")
    __system_conf = get_config("system")
    base_work_dir=f"{save_info['save_dir']}/{__system_conf['simple_server_name']}"
    tmp_dir =f"{base_work_dir}/result" 
    # 当前时间，到日  YYYY-MM-DD
    today_str = datetime.now().strftime("%Y-%m-%d")
    batch_dir =f"{tmp_dir}/{today_str}/{batch_id}" 


    """
    为指定的batch_id创建结果存储目录
    """
    # /home/admin/datas/classfy/results
    data_path=build_save_file_dir("datas")
    result_dir=f"{data_path}/result"

    # 当前时间，到日  YYYY-MM-DD
    today_str = datetime.now().strftime("%Y-%m-%d")
    batch_dir =f"{result_dir}/{today_str}/{batch_id}" 

    # 创建batch目录
    if not os.path.exists(batch_dir): 
        os.makedirs(batch_dir)
    return batch_dir

def __write_result_to_file(batch_id, result_data, is_success=True):
    """
    将处理结果写入文件
    """
    batch_dir = __create_batch_result_dir(batch_id)
    
    # 根据成功/失败选择文件名
    filename = "success_results.txt" if is_success else "failed_results.txt"
    file_path=f"{batch_dir}/{filename}"
    
    # a表示追加写入结果
    with open(file_path, 'a', encoding='utf-8') as f:
        # 添加时间戳
        result_data['timestamp'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        # 写入JSON格式的结果
        f.write(json.dumps(result_data, ensure_ascii=False) + '\n')
    
    return file_path




async def process_img_classfy():
    import asyncio
    await asyncio.get_event_loop().run_in_executor(job_pool, process_img_classfy_execute)


def img_classify():
    try:
        pass
    except Exception as e:
        app.ctx.logger.error(f"图片分类执行失败：{str(e)}")


def process_img_classfy_execute():
    """图片分类定时任务实现"""
    try:
        app.ctx.logger.info(f"开始执行图片分类处理任务！")
        # 判断是否已达当前服务处理上限
        cache_size = __process_cache.size(__process_type)
        app.ctx.logger.info(f"当前处理队列数量：{cache_size}")
        if cache_size >= __max_process_task_count:
            app.ctx.logger.info(f"处理队列已满，等待中...........")
            return
        task_record_list = __get_task_record_list()
        if task_record_list is None or len(task_record_list) == 0:
            app.ctx.logger.info(f"暂无需要处理的任务...........")
            return
        submit_count = 0
        for task_record in task_record_list:
            task_id = task_record.get("id")
            task_id_str = str(task_id)
            cache_key = __process_type+"："+ task_id_str
            # 检查任务是否已经在处理中，避免重复提交
            if __process_cache.get(cache_key) is not None:
                app.ctx.logger.warning(f"任务 {task_id} 已在处理中，跳过重复提交")
                continue
            # 先尝试更新状态为PROCESSING，如果失败则跳过该任务
            update_success, _ = do_request("POST", "/task/update", json={"status":"PROCESSING","id":task_id})
            if not update_success:
                app.ctx.logger.warning(f"任务 {task_id} 状态更新失败，跳过处理")
                continue
            # 状态更新成功后，立即放入缓存，防止重复提交
            __process_cache.set(cache_key, task_id)
            job_pool.submit(__process_task, task_record)
            submit_count += 1
        app.ctx.logger.info(f"本轮提交批次任务数量: {submit_count}")
    except Exception as e:
        app.ctx.logger.error(f"图片分类执行失败：{str(e)}")


def __get_task_record_list():
    """
    查询INIT状态下的任务
    """
    success,data = do_query_request("POST", "/task/query", json={"status":"INIT","scene":__process_type,"limit":__max_process_task_count})
    return data


def __process_task(task_record: dict):
    """处理任务"""
    try:
        # 获取任务中得请求参数
        task_start_time = time.time()
        task_param = task_record.get("taskParam","")
        batch_id=task_record.get("taskId","")

        # 修改任务状态PROCESSING
        do_request("POST", "/task/update", json={"status":"PROCESSING","id":task_record["id"]})

        # 参数转换为json对象，取出要处理图片
        task_param_json_obj=jsonObj.loads(task_param)
        image_list=task_param_json_obj.get("imageList",[])
        total_images=len(image_list)
        app.ctx.logger.info(f"处理批次：{batch_id}，共计：{total_images}条记录需处理！**")

        # 放入缓存
        __process_cache.set(__process_type+"："+ str(task_record['id']), task_record['id'])

        # 分批处理 TODO
        chunk_data_list = chunk_list_into_chunks(image_list,__batch_process_img_count)

        processed_count = 0
        total_success = 0 # 任务下得所有成功个数--不是某个chunk得成功个数
        total_failure = 0 # 任务下得所有失败个数--不是某个chunk得失败个数
        for chunk_data in chunk_data_list:
            start_index = processed_count + 1  # 当前批次的起始序号
            
            # 统计整个任务得成功失败个数
            chunk_success,chunk_failed=__process_chunks(chunk_data, task_record,start_index, total_images)
            total_success+=chunk_success
            total_failure+=chunk_failed

            # 显示整体进度
            processed_count += len(chunk_data) 
            elapsed_time = time.time() - task_start_time
            progress = (processed_count / total_images * 100) if total_images > 0 else 0
            app.ctx.logger.info(f"****任务进度: {processed_count}/{total_images} ({progress:.1f}%)，已耗时: {elapsed_time:.1f}秒****")

        # 整体任务完成计时
        total_task_time = time.time() - task_start_time
        avg_time_per_image = total_task_time / total_images if total_images > 0 else 0
        app.ctx.logger.info(f"****任务结束，任务id:{batch_id},耗时：{total_task_time:.3f}秒,平均每张：{avg_time_per_image:.3f}秒")

        # 构建路径，更新mysql记录
        result_dir=__create_batch_result_dir(batch_id)
        success_file_path = f"{result_dir}/success_results.txt"
        fail_file_path  = f"{result_dir}/failed_results.txt"
        # 更新参数
        update_data = {
           "status":"FINISH","id":task_record["id"],"taskEndTime":'now'
        }
         # 全部成功
        if total_failure == 0:
            update_data["taskSuccessResult"] = success_file_path
            app.ctx.logger.info(f"批次:【{batch_id}】 全部成功")
        # 全部失败
        elif total_success == 0:
            update_data["taskFailResult"] = fail_file_path
            app.ctx.logger.warning(f"批次:【{batch_id}】全部失败")
        # 部分成功
        else:
            update_data["taskSuccessResult"] = success_file_path
            update_data["taskFailResult"] = fail_file_path
            app.ctx.logger.warning(f"批次:【{batch_id}】 部分成功 ,成功：{total_success} || 失败 {total_failure}")

        # ✅ 最终只更新一次数据库
        do_request("POST", "/task/update", json=update_data)

    except Exception as e:
        fail_file_path = f"{__create_batch_result_dir(batch_id)}/failed_results.txt"  
         # 写入异常结果文件
        __write_result_to_file(batch_id, str(e), is_success=False)
        do_request("POST", "/task/update", json={"status":"ERROR","id":task_record["id"],"taskFailResult":fail_file_path,"taskEndTime":'now'})
        app.ctx.logger.error(f"任务批次：【{batch_id}】处理中断！，异常为：{str(e)}")
    finally:
        __process_cache.remove(__process_type+"："+ str(task_record['id']))
        app.ctx.logger.info(f"任务批次：{task_record['taskId']}数据处理完毕！")



def __process_chunks(chunk_data: list, task_record: dict,start_index: int = 1, total_images: int = 0):
    """
    处理图片,仅表示批次中得某个chunk，参数如下
    batch_id:批次id
    chunk_data[
        {"imageUrl": "http://10.118.21.135:8091/5503769682/总料/新建文件夹 (3)/00001.png","uniqueId": "dd780640-1a4d-43e7-82f2-223f2fdbab62"},
        {"imageUrl": "http://10.118.21.135:8091/5503769682/总料/新建文件夹 (3)/000013.png","uniqueId": "e9a76853-c02a-4119-b6a9-fd4391d2a9c1"}]

    task_record:
    {
     'id': 11144, 'taskName': 'X501-192.168.2.49-图片分类任务', 'taskId': 'batch-20250927-6-1000', 'scene': 'ai-img-classify-server', 
     'description': '图片分类任务', 'status': 'INIT', 'creator': 'system', 'modifier': 'system', 'gmtCreate': '2025-10-14T14:56:06', 
     'gmtModify': '2025-10-14T14:56:06', 'priority': 5, 'taskStartTime': '2025-10-14T14:56:06', 'taskEndTime': '2025-10-14T14:56:06', 
     'taskParam': '{"batchId": "batch-20250927-6-1000", "imageList": [{"imageUrl": "http://10.118.21.180...5a580eb092da77.jpg", "uniqueId": "u218"}]}', 
     'taskSuccessResult': None, 'taskFailResult': None
    }
    """
    from config.JobConfig import JobManager
    # 批次处理开始计时
    batch_start = time.time()
    chunk_size = len(chunk_data)
    batch_id = task_record.get('taskId', 'unknown')
    app.ctx.logger.info(f"chunk处理[{batch_id}]START--范围[{start_index}~{start_index+chunk_size-1}],数量：{chunk_size}]")

    ext_params_json = task_record.get("ext_params",None)
    ext_params = jsonObj.loads(ext_params_json) if ext_params_json is not None and len(ext_params_json) > 0 else {}
    # 提交任务
    futures = []
    chunk_success=0
    chunk_failed=0
    completed_count = 0
    for index, img_info in enumerate(chunk_data):
        current_index = start_index + index
        futures.append(JobManager.pool.submit_task(__process_img, img_info, task_record, ext_params, current_index,total_images))
    
    for future in concurrent.futures.as_completed(futures):
        # 统计chunk内成功失败个数
        handler_success = future.result() 
        if handler_success:
            chunk_success += 1
        else:
            chunk_failed += 1
       
        # 统计进度
        completed_count += 1
        if completed_count % 10 == 0 or completed_count == chunk_size:  # 每10个或最后打印进度
            progress = (completed_count / chunk_size) * 100
            elapsed = time.time() - batch_start
            app.ctx.logger.info(f"[chunk处理] 批次:[{batch_id}] 进度: {completed_count}/{chunk_size} ({progress:.1f}%)，已耗时: {elapsed:.1f}秒")
        pass

    # 批次处理结束计时
    batch_time = time.time() - batch_start
    avg_time = batch_time / chunk_size if chunk_size > 0 else 0
    app.ctx.logger.info(f"chunk[{batch_id}]END--范围[{start_index}~{start_index+chunk_size-1}],总耗时: {batch_time:.3f}秒，平均每张: {avg_time:.3f}秒")
    return chunk_success,chunk_failed



def __process_img(img_info: dict, task_record: dict, ext_params: dict, current_index: int = 0,total_count: int = 0):
    
    """
    处理一张图片：
    task_record_detail: 当前处理这张图片信息，如下
    {'imageUrl': 'http://10.118.21.180:89//dev/20250326/ANNEX/137-589874455-331023-331023-1742933505-00.../9a/01/th_9a011cee8d2f4cbbf5cfcabb2eec370d', 'uniqueId': 'u217'}

    task_record:整个任务信息
    {'id': 11144, 'taskName': 'X501-192.168.2.49-图片分类任务', 'taskId': 'batch-20250927-6-1000', 'scene': 'ai-img-classify-server', 'description': '图片分类任务', 'status': 'INIT', 'creator': 'system', 'modifier': 'system', 'gmtCreate': '2025-10-14T14:56:06', 'gmtModify': '2025-10-14T15:11:37', 'priority': 5, 'taskStartTime': '2025-10-14T14:56:06', 'taskEndTime': '2025-10-14T15:09:04', 'taskParam': '{"batchId": "batch-20250927-6-1000", "imageList": [{"imageUrl": "http://10.118.21.180...5a580eb092da77.jpg", "uniqueId": "u218"}]}', 'taskSuccessResult': None, 'taskFailResult': "bad operand type for unary +: 'str'"}

    ext_params:扩展信息
    """
    img_url: str = ''
    img_path = None
    handler_success = False  # 图片是否执行成功
    try:
        unique_id = img_info.get("uniqueId","")
        img_url=img_info.get("imageUrl",)
        batch_id=task_record.get("taskId","")
        # 判定是否仅OCR流程
        task_param_str = task_record.get("taskParam", "")
        process_type = ""
        if task_param_str:
            try:
                tjson = jsonObj.loads(task_param_str)
                process_type = tjson.get("processType", "")
            except Exception:
                process_type = ""

        img_path = download_file(img_url, "url", ext_params)
        start_time = time.time()
        if process_type == "ocr_only":
            ocr_text = ocr_extract(img_path)
            result = {
                "extInfo": "",
                "msg": "",
                "code": 200,
                "data": {
                    "imageUrl": img_url,
                    "uniqueId": unique_id,
                    "ocrResult": ocr_text
                }
            }
        else:
            result = image_classify_route(img_path, img_url, unique_id, kwargs=ext_params)
        end_time = time.time()  # 记录结束时间
        execution_time = end_time - start_time  # 计算执行时间
        # 构建成功结果数据
        success_result = {
                "batch_id": batch_id,
                "unique_id": unique_id,
                "image_url": img_url,
                "classfy_result": result.get("data",{}),
                "execution_time": execution_time,
                "status": "SUCCESS"
            }
        # 写入成功结果文件
        __write_result_to_file(batch_id, success_result, is_success=True)
        app.ctx.logger.info(f"处理数据：{img_url}成功！结果已保存到文件")
        handler_success=True
    except Exception as e:
        app.ctx.logger.exception(f"处理数据：{img_url}失败！{str(e)}")
        failure_result = {
                "batch_id": batch_id,
                "unique_id": img_info.get('uniqueId', ''),
                "image_url": img_url,
                "error_message": str(e),
                "status": "FAIL"
        }
        # 写入失败结果文件
        __write_result_to_file(batch_id, failure_result, is_success=False)
    finally:
        if img_path and img_path != '':
            os.remove(img_path)
    return handler_success
