import datetime
import json
import re
import traceback
import torch
import torch.nn.functional as F
from openai import OpenAI

from config.LoadConfig import get_config
from config.ServerApp import app
from utils.Singleton import Singleton
# from utils.SqliteExecutor import sqlite_execute_query
from typing import List, Optional

_system_conf = get_config("system")
# USE_DB_CATEGORIES = str(_system_conf.get("use_db_categories", "false")).lower() == "true"
# SUB_SIMILARITY_THRESHOLD = float(_system_conf.get("sub_reclassify_score", "0.60"))
@Singleton
class ModelClient:
    client = None
    def __init__(self):
        if hasattr(self, 'initialized'):
            return
        self.initialized = True
        # 优先从common.conf中读取VL模型配置
        __vl_models_conf = get_config("vl_models")
        self.vllm_server_url = __vl_models_conf.get("vl_server_url") or _system_conf.get("vl_openai_server", "")
        self.api_key = __vl_models_conf.get("vl_api_key") or _system_conf.get("vl_openai_api_key", "EMPTY")
        self.model_name = __vl_models_conf.get("vl_model_name") or _system_conf.get("vl_openai_model_name", "")
        if not self.vllm_server_url or not self.api_key or not self.model_name:
            raise Exception("vl_openai_server or vl_openai_api_key or vl_openai_model_name is empty")
        self.client = OpenAI(
            api_key=self.api_key,
            base_url=f'{self.vllm_server_url}/v1'
        )
        self.format = _system_conf.get("vl_openai_format", "yaml")
        self.prompt = _system_conf.get("vl_openai_prompt", """你是专业的图片分类分析师，请分析图片包含的内容并提取OCR文字，分类时必须同时考虑图片视觉内容和文字内容的一致性，确保分类结果合理。

## 关键分类规则
1. **OCR与视觉结合**: 优先考虑OCR文字内容，但也要结合视觉特征
2. **彩票类**: 包含"开奖"、"彩票"、"六合彩"、"双色球"、"大乐透"、"期"等关键词 → 彩票类
3. **人物类**: 图片以人物为主题时优先分类到人物类，必须详细描述人数、性别、年龄、服装、表情、姿态。子分类包括：自拍、全身照、多人合影、身体局部特写、敏感部位、证件照、裸照、擦边照、其他人物照片
4. **证件类**: 包含"身份证"、"护照"、"驾驶证"、"营业执照"、"银行卡"、"信用卡"等关键词 → 相应证件类
5. **交易记录类**: 包含转账金额、支付界面、银行信息、订单详情等 → 交易记录截图
6. **置信度原则**: 当OCR和视觉信息冲突时，选择置信度更高的结果.


## 分类列表
- **涉赌**: 彩票类、六合彩、棋牌赌博现场、线下赌博机、赌博游戏、体育竞猜类、线上真人荷官、盈亏统计单、赌博结算界面
- **涉黄**: 涉黄场所实拍、性暴露图片、擦边照、裸照、性行为过程、线上招嫖广告图片、性交易聊天截图、酒吧夜店商K
- **人物**: 自拍、全身照、多人合影、身体局部特写、敏感部位、证件照、裸照、擦边照、其他人物照片
- **卡证**: 居民身份证、护照、驾驶证、工作证、学生证、营业执照、其他证件
- **银行卡**: 信用卡、银行卡、其他金融卡
- **交易记录截图**: 支付宝/微信转账截图、银行转账截图、购物订单截图、账单截图、其他交易凭证
- **聊天记录截图**: 微信聊天截图、QQ聊天记录、短信截图、其他通讯工具截图
  - 特征：包含对话界面、消息气泡、时间戳、联系人信息等聊天界面元素
  - 区分：与交易记录截图不同，聊天记录主要是对话内容，不是转账或支付界面
- **二维条形码**: 付款码、收钱码、加好友码、小程序/公众号码、网页链接码、条形码、Wi-Fi连接码、健康码、行程码、其他二维码
- **单据**: 发票、收据、合同/协议、快递单、其他单据
- **文件**: 合同、报告、表格、证书、其他文档
- **动物**: 宠物、家畜、野生动物、昆虫、其他动物
- **植物**: 花卉、树木、农作物、盆栽、其他植物
- **食物**: 餐饮场景（餐桌、餐厅环境）、成品菜肴、主食小吃、生鲜食材、饮品甜品、包装食品、其他食品
- **风景**: 天空、海洋、山川、河流、城市、旅游经典、地标与建筑景观、微观自然、其他风景图片
- **卡通**: 动漫人物、动物、怪兽、科幻机甲、表情包、品牌吉祥物、萌系卡通、儿童插画、其他卡通图片
- **游戏**: 网络游戏截图、棋牌类游戏界面、角色信息展示、游戏结算界面、屏幕拍摄、游戏壁纸
- **建筑**: 住宅、商场、古迹、桥梁、寺庙、地标建筑、其他建筑
- **物品**: 电子数码产品、服装鞋帽、家具、家电、人民币、汽车、飞机、轮船、文具与办公用品、工具与五金、体育用品、美妆用品、玩具与模型、其他物品
- **海关**: 集装箱、报关单、禁限物品、海关作业现场、其他海关监管图像
- **涉毒**: 吸毒现场、吸毒工具、毒品展示、毒品制作过程
- **个人极端**: 割腕、虐待、殴打场景
- **刀枪**: 厨房刀具、农用刀具、管制刀具、手枪、步枪/冲锋枪、猎枪、霰弹枪、自制枪械、军事与历史题材、攻击与威胁场景
- **血腥暴力**: 伤口特写、严重血腥与残害、暴力场景、影视特效、割腕
- **其他**: 其他

## 输出要求
- contentSummary: 30字内描述图片主要内容，描述主要场景、活动或情感，说明关键特征和重要细节
- ocrResult: 完整提取所有文字内容
- classify: 提供3-5个分类结果，每个包含subClassifyName、classifyDesc、score
- **人物类特殊要求**: 人物类必须使用具体子分类名称（自拍、全身照、多人合影、身体局部特写、敏感部位、证件照、裸照、擦边照、其他人物照片），不要使用"人物"作为subClassifyName


## 输出格式
```json
{
  "classify": [
    {
      "subClassifyName": "具体分类名称",
      "classifyDesc": "分类依据",
      "score": "0.95"
    }
  ],
  "contentSummary": "30字内描述",
  "ocrResult": "完整文字内容"
}
```"""
)


        # 优先从common.conf中读取embeddings配置
        __embeddings_conf = get_config("embadding_models")
        self.embeddings_api_key = __embeddings_conf.get("embedding_api_key") or _system_conf.get("embeddings_api_key", 'EMPTY')
        self.embeddings_vllm_server_url = __embeddings_conf.get("embedding_server_url") or _system_conf.get("embeddings_vllm_server_url", '')
        self.embeddings_model_name = __embeddings_conf.get("embedding_model_name") or _system_conf.get("embeddings_model_name", 'GanymedeNil/text2vec-large-chinese')
        self.embeddings_client = OpenAI(
            api_key=self.embeddings_api_key,
            base_url=f'{self.embeddings_vllm_server_url}/v1'
        )

    def model_execute(self,image_path:str,prompt :str = None):

        start_time = datetime.datetime.now()
        try:
            response = model_client.client.chat.completions.create(
                model=model_client.model_name,
                messages=[
                    {
                        "role": "system",
                        "content": prompt if prompt else model_client.prompt
                    },
                    {
                        "role": "user",
                        "content": [
                            {
                                "type": "image_url",
                                "image_url": {"url": image_path}
                            }
                        ]
                    }
                ]
            )
            end_time = datetime.datetime.now()
            app.ctx.logger.info(f"vl_model Time taken: {end_time - start_time},url:{image_path} ,resp:{response.choices[0].message.content}")

            return response.choices[0].message.content
        except Exception as e:
            app.ctx.logger.error(f"vl_model error :{str(e)} \n{traceback.format_exc()}")


model_client = ModelClient()


def classfy_by_model(img_url:str):

    '''
    通过vl模型分类
    '''
    start_time = datetime.datetime.now()
    resp = ""
    try:
        resp = model_client.model_execute(img_url)
        if resp:
            return __resp_format(resp)
    except Exception as e:
        print(f"model_execute error:{e}")
        return None
    finally:
        end_time = datetime.datetime.now()
        print(f"vl_model Time taken: {end_time - start_time},url:{img_url} ,resp:{resp}")

def __resp_format(content:str):
    from utils import DataUtil
    from utils.MilvusClient import MilvusClient
    result = []
    content_summary = ""
    ocr_result = ""
    try:
        if model_client.format == "json":
            format_data = to_json(content)
        elif model_client.format == "yaml":
            format_data = yaml_to_json(content)
        elif model_client.format == "auto":
            format_data = parse_auto(content)
        else:
            # 兜底：先尝试 JSON，再尝试 YAML
            try:
                format_data = to_json(content)
            except Exception:
                format_data = yaml_to_json(content)
        content_summary = format_data.get("contentSummary", "")
        ocr_result = format_data.get("ocrResult", "")
        classify_data = format_data.get("classify", [])
        # 准备 Milvus 客户端
        milvus_client = None
        try:
            milvus_client = MilvusClient()
        except Exception as _:
            milvus_client = None

        for classify in classify_data:
            # classify_name = classify.get("classifyName", "")
            sub_classify_name = classify.get("subClassifyName", "")
            classify_desc = classify.get("classifyDesc", "")
            # third_classify_name = classify.get("thirdClassifyName", sub_classify_name)
            classify_name_list = []

            # 从现有的classify_cache中提取所有高优先级类别
            priority_categories = []
            for category_group in classify_cache.values():
                if "feature" in category_group:
                    priority_categories.extend(category_group["feature"])
            
            # 检查原分类是否包含高优先级类别
            is_priority_category = False
            matched_priority_cat = None
            
            # 首先检查分类名称是否完全匹配优先级类别
            for priority_cat in priority_categories:
                if priority_cat == sub_classify_name:
                    is_priority_category = True
                    matched_priority_cat = priority_cat
                    break
            
            # 如果没有精确匹配，再检查优先级类别是否包含分类名称（但要避免误匹配）
            if not is_priority_category:
                for priority_cat in priority_categories:
                    # 避免"其他"匹配到"其他证件"等误匹配情况
                    if (sub_classify_name in priority_cat and 
                        not (sub_classify_name == "其他" and priority_cat != "其他")):
                        is_priority_category = True
                        matched_priority_cat = priority_cat
                        break
            
            # 如果分类名称没有匹配，再检查描述中是否包含优先级类别
            if not is_priority_category:
                for priority_cat in priority_categories:
                    if priority_cat in classify_desc:
                        is_priority_category = True
                        matched_priority_cat = priority_cat
                        break
            
            if is_priority_category:
                # 直接使用匹配到的优先级类别，并找到对应的L1分类
                l1_name = None
                for category_key, category_group in classify_cache.items():
                    if "feature" in category_group and matched_priority_cat in category_group["feature"]:
                        l1_name = category_group.get("key", category_key)
                        break
                if l1_name:
                    classify_name_list = [(l1_name, matched_priority_cat)]
                else:
                    classify_name_list = [(matched_priority_cat, matched_priority_cat)]
                try:
                    app.ctx.logger.info(f"Priority category matched: '{sub_classify_name}' -> '{matched_priority_cat}' (L1: {l1_name}) - skipping vector database query")
                except Exception:
                    pass
            else:
                # 如果不是高优先级类别，则使用 Milvus 向量检索
                try:
                    if milvus_client is not None and sub_classify_name:
                        _milvus_conf = get_config("milvus")
                        _sys_conf = get_config("system")
                        milvus_collection = (_milvus_conf.get("collection") or "categories_zh_v1") if _milvus_conf else "categories_zh_v1"
                        milvus_metric = (_milvus_conf.get("metric") or "COSINE").upper() if _milvus_conf else "COSINE"
                        try:
                            milvus_ef = int((_milvus_conf.get("ef") or 128)) if _milvus_conf else 128
                        except Exception:
                            milvus_ef = 128
                        # 融合查询：优先 contentSummary，再结合 subClassifyName 与 classifyDesc
                        # 从配置文件读取融合权重
                        vl_fusion_weights_str = _sys_conf.get("vl_fusion_weights", "0.1,0.4,0.5")
                        fusion_weights = [float(w.strip()) for w in vl_fusion_weights_str.split(",")]
                        
                        texts = []
                        weights = []
                        if (content_summary or '').strip():
                            texts.append(content_summary.strip())
                            weights.append(fusion_weights[0])
                        if (sub_classify_name or '').strip():
                            texts.append(sub_classify_name.strip())
                            weights.append(fusion_weights[1])
                        if (classify_desc or '').strip():
                            texts.append(classify_desc.strip())
                            weights.append(fusion_weights[2])
                        if not texts:
                            
                            texts = [sub_classify_name or ""]
                            weights = [1.0]
                        # 生成查询向量（复用 embeddings_client）
                        resp = model_client.embeddings_client.embeddings.create(
                            input=texts,
                            model=model_client.embeddings_model_name
                        )
                        vecs = [d.embedding for d in resp.data]
                        # 加权融合并归一化以匹配 COSINE
                        import math
                        w_sum = sum(weights) or 1.0
                        weights = [w / w_sum for w in weights]
                        fused = [0.0] * len(vecs[0])
                        for wi, vi in zip(weights, vecs):
                            for j, x in enumerate(vi):
                                fused[j] += wi * x
                        s = math.sqrt(sum(x * x for x in fused)) or 1.0
                        vec = [x / s for x in fused]

                        # 多向量融合检索：在 vector_name / vector_aliases / vector_desc 上分别检索并融合
                        mapped_name = None
                        mapped_l1_name = None
                        try:
                            # 从配置文件读取字段权重
                            vl_field_weights_str = _sys_conf.get("vl_field_weights", "0.8,0,0.2")
                            field_weights = [float(w.strip()) for w in vl_field_weights_str.split(",")]
                            weights = {"vector_name": field_weights[0], "vector_aliases": field_weights[1], "vector_desc": field_weights[2]}
                            combined = {}
                            name2l1 = {}
                            for field, w in weights.items():
                                hits = milvus_client.search_topk([vec], collection=milvus_collection, topk=5, metric=milvus_metric, ef=milvus_ef, anns_field=field)[0]
                                for h in hits:
                                    n = (h.get("name") or "").strip()
                                    if not n:
                                        continue
                                    dist = float(h.get("distance") or 0.0)
                                    metric_u = (milvus_metric or "COSINE").upper()
                                    if metric_u == "COSINE":
                                        sim = 1.0 - dist
                                    elif metric_u in ("L2", "EUCLIDEAN"):
                                        sim = 1.0 / (1.0 + dist)
                                    else:
                                        sim = dist
                                    combined[n] = combined.get(n, 0.0) + w * sim
                                    if not name2l1.get(n):
                                        name2l1[n] = (h.get("l1_name") or "").strip()
                            if combined:
                                mapped_name = max(combined.items(), key=lambda x: x[1])[0]
                                mapped_l1_name = name2l1.get(mapped_name) or ""
                        except Exception:
                            # 回退到单向量字段 'vector'
                            hits = milvus_client.search_topk([vec], collection=milvus_collection, topk=1, metric=milvus_metric, ef=milvus_ef, anns_field="vector")[0]
                            if hits:
                                mapped_name = (hits[0].get("name") or "").strip()
                                mapped_l1_name = (hits[0].get("l1_name") or "").strip()

                        if mapped_name:
                            # 返回向量库中的 L1/L2
                            classify_name_list = [(mapped_l1_name or mapped_name, mapped_name)]
                            try:
                                app.ctx.logger.info(f"Milvus mapped(fused): mapped='{mapped_name}'")
                            except Exception:
                                pass
                except Exception as _:
                    # Milvus 不可用则回退本地匹配
                    pass

            # 严格映射：未命中 Milvus 则输出“其他”
            if not classify_name_list:
                classify_name_list = [("其他","其他")]
            score = classify.get("score", 0.0)
            if classify_name_list:
                for item in classify_name_list:
                    try:
                        l1n, l2n = item
                    except Exception:
                        l1n = item
                        l2n = item
                    result.append(DataUtil.build_classfy_third_obj(
                        classifyName=l1n,
                        subClassifyName=l2n,
                        thirdClassifyName=l2n,
                        confidence=score
                    ))


    except Exception as e:
        app.ctx.logger.exception(f"format error:{e}")
    result = sorted(result, key=lambda x: x['confidence'], reverse=True)
    # 统一映射到DB类目（若开启 use_db_categories），确保VL路径也只输出库内类目
    # try:
    #     result = remap_to_db_categories(result)
    # except Exception as e:
    #     app.ctx.logger.warning(f"remap_to_db_categories failed: {e}")
    return {"result": result,"contentSummary": content_summary,"ocrResult":ocr_result}

def yaml_to_json(yaml_data:str):
    import yaml
    # 统一剥离围栏/清理
    def _strip_fences(text:str)->str:
        t = (text or "").strip()
        if t.startswith("```"):
            nl = t.find("\n")
            if nl != -1:
                t = t[nl+1:]
        if t.endswith("```"):
            t = t[:-3]
        return t.strip()

    s = _strip_fences(yaml_data)
    # 若本质是 JSON，直接按 JSON 解析
    if s.startswith('{') or s.startswith('['):
        return json.loads(s)
    # 正常 YAML 解析
    data = yaml.safe_load(s)
    return json.loads(json.dumps(data, ensure_ascii=False))

def to_json(content_data:str):
    # 剥离 Markdown 围栏
    s = (content_data or "").strip()
    if s.startswith("```"):
        nl = s.find("\n")
        if nl != -1:
            s = s[nl+1:]
    if s.endswith("```"):
        s = s[:-3]
    s = s.strip()

    # 直接 JSON 情况
    if (s.startswith('{') and s.rstrip().endswith('}')) or (s.startswith('[') and s.rstrip().endswith(']')):
        return json.loads(s)

    # 截取首个 JSON 块
    m = re.search(r"[\[{]", s)
    if m:
        start = m.start()
        end = max(s.rfind('}'), s.rfind(']'))
        if end >= start:
            block = s[start:end+1]
            return json.loads(block)
    raise ValueError("JSON parse failed")


def parse_auto(content:str):
    # 优先按 JSON，再按 YAML；都失败则抛给上层兜底
    try:
        return to_json(content)
    except Exception:
        try:
            return yaml_to_json(content)
        except Exception as e:
            # 记录一段前缀便于排障
            try:
                app.ctx.logger.warning(f"auto-parse failed. head={(content or '')[:200]}")
            except Exception:
                pass
            raise e







def vl_classify_format(img_url: str, prompt: str , prompt_code: str = 'single_classify',
                       format: str = 'to_json'):
    """
    VL模型分类
    """

    from config.promptConfig import get_prompt
    _prompt = prompt if prompt and len(prompt) > 0 else get_prompt(prompt_code)
    response = model_client.model_execute(img_url, _prompt)
    if format == 'yaml_to_json':
        response = yaml_to_json(response)
    elif format == 'to_json':
        response = to_json(response)
    return response



classify_cache = json.loads(_system_conf.get('classify_cache',"""
{
    "卡证":{"feature":["居民身份证","护照","驾驶证","工作证","学生证","营业执照","其他证件"]},
    "银行卡":{"feature":["信用卡","银行卡","其他金融卡"]},
    "交易记录截图":{"feature":["支付宝/微信转账截图","银行转账截图","购物订单截图","账单截图","其他交易凭证"]},
    "聊天记录截图":{"feature":["微信聊天截图","QQ聊天截图","短信截图","其他通讯工具截图"]},
    "二维条形码":{"feature":["付款码","收钱码","加好友码","小程序/公众号码","网页链接码","条形码","Wi-Fi连接码","健康码","行程码","其他二维码"]},
    "单据":{"feature":["发票","收据","合同/协议","快递单","其他单据"]},
    "文件":{"feature":["合同","报告","表格","证书","其他文档"]},
    "人物":{"feature":["自拍","全身照","多人合影","身体局部特写","敏感部位","证件照","裸照","擦边照","其他人物照片"]},
    "动物":{"feature":["宠物","家畜","野生动物","昆虫","其他动物"]},
    "植物":{"feature":["花卉","树木","农作物","盆栽","其他植物"]},
    "食物":{"feature":["餐饮场景","成品菜肴","主食小吃","生鲜食材","饮品甜品","包装食品","其他食品"]},
    "风景":{"feature":["天空","海洋","山川","河流","城市","旅游经典","地标与建筑景观","微观自然","其他风景图片"]},
    "卡通":{"feature":["动漫人物","动物","怪兽","科幻机甲","表情包","品牌吉祥物","萌系卡通","儿童插画","其他卡通图片"]},
    "建筑":{"feature":["住宅","商场","古迹","桥梁","寺庙","地标建筑","其他建筑"]},
    "物品":{"feature":["电子数码产品","服装鞋帽","家具","家电","人民币","汽车","飞机","轮船","文具与办公用品","工具与五金","体育用品","美妆用品","玩具与模型","其他物品"]},
    "游戏":{"feature":["网络游戏截图","棋牌类游戏界面","角色信息展示","游戏结算界面","屏幕拍摄","游戏壁纸"]},
    "海关":{"feature":["集装箱","报关单","禁限物品","海关作业现场","其他海关监管图像"]},
    "涉黄":{"feature":["涉黄场所实拍","性暴露图片","擦边照","裸照","性行为过程","线上招嫖广告图片","性交易聊天截图","酒吧夜店商K"]},
    "涉赌":{"feature":["棋牌赌博现场","线下赌博机","赌博游戏","体育竞猜类","彩票类","线上真人荷官","盈亏统计单","赌博结算界面","六合彩"]},
    "涉毒":{"feature":["吸毒现场","吸毒工具","毒品展示","毒品制作过程"]},
    "个人极端":{"feature":["割腕","虐待","殴打场景"]},
    "刀枪":{"feature":["厨房刀具","农用刀具","管制刀具","手枪","步枪/冲锋枪","猎枪","霰弹枪","自制枪械","军事与历史题材","攻击与威胁场景"]},
    "血腥暴力":{"feature":["伤口特写","严重血腥与残害","暴力场景","影视特效","割腕"]},
    "其他":{"feature":["其他"]}
}
"""))

feature_cache = {}

max_similarity_assist = float(_system_conf.get("reclassify_score", "0.65"))
def load_cache():
    # 优先尝试从DB加载类目，成功则覆盖硬编码缓存
    # load_categories_from_db_or_fallback()
    for key,values in classify_cache.items():
        classify_cache[key].update({"key": key})
    _cache_key = []
    _cache_key += [item for value in classify_cache.values() for item in value["feature"]]

    # for key,values in classify_cache.items():
    #     if not values or len(values) == 0:
    #         continue
    #     _cache_key += values
    batch_embedding(_cache_key)


def batch_embedding(keys: list, is_search: bool = False) -> dict:
    #拆分处理 200一个批次
    _batch_size = 200
    chunk_data_list = [keys[i:i + _batch_size] for i in range(0, len(keys), _batch_size)]
    result = {}
    for chunk_data in chunk_data_list:
        _features = embedding_api(chunk_data,is_search)
        _feature_dict = {chunk_data[i]: _features[i] for i in range(len(chunk_data))}
        result.update({chunk_data[i]: _features[i] for i in range(len(chunk_data))})
    return result


def embedding_api(keys: list, is_search: bool = False) -> list:
    # 先从缓存中查找已存在的特征向量
    cached_features = []
    keys_to_fetch = []

    for key in keys:
        key = key if is_search else key
        if key in feature_cache:
            cached_features.append(feature_cache[key])
        else:
            keys_to_fetch.append(key)

    new_features = []
    if keys_to_fetch:
        # 对于未缓存的键，调用API获取特征向量
        response = model_client.embeddings_client.embeddings.create(
            input=keys_to_fetch,
            model=model_client.embeddings_model_name
        )
        new_features = [item.embedding for item in response.data]

        # 将新获取的特征向量存入缓存
        for i, key in enumerate(keys_to_fetch):
            feature_cache[key] = new_features[i]

    # 按原始顺序组合结果
    result_features = []
    new_feature_index = 0
    for key in keys:
        key =  key if is_search else key
        if key in feature_cache and key not in keys_to_fetch:
            result_features.append(feature_cache[key])
        else:
            result_features.append(new_features[new_feature_index])
            new_feature_index += 1

    return result_features


def add_classify_cache(classify_name: str):
    """
    添加分类缓存
    实现持久化
    """
    if classify_name in classify_cache:
        return
    classify_cache.update({classify_name: []})
    app.ctx.logger.info(f"更新后的一级分类:{','.join(classify_cache.keys())}")


def match_classify(assist_list: List[str]) -> List[str]:
    """
    匹配最相近的分类类目
    返回相似度大于0.75的一级类目
    """
    classify_list = []
    # 如果缓存为空，先加载缓存
    if not feature_cache:
        load_cache()

    # 获取一级分类列表
    first_level_categories = list(classify_cache.keys())
    if not first_level_categories:  # No categories to compare with
        app.ctx.logger.warning("No first-level categories found in classify_cache.")
        return classify_list

    combined_assist_text_list = assist_list+[", ".join(assist_list)]
    embedded_results = embedding_api(combined_assist_text_list, is_search=True)
    assist_embeddings = embedded_results

    assist_tensors = torch.tensor(assist_embeddings).unsqueeze(1)


    for category in first_level_categories:
        category_embeddings = embedding_api(classify_cache[category]["feature"])
        category_tensors = torch.tensor(category_embeddings).unsqueeze(0)

        similarities = F.cosine_similarity(assist_tensors, category_tensors,dim=2)
        # 找最大值和对应索引
        max_sim_flat, flat_idx = torch.max(similarities.view(-1), dim=0)
        if max_sim_flat.item() > max_similarity_assist:
            classify_list.append(category)
    return classify_list


# def load_categories_from_db_or_fallback():
#     """
#     若 use_db_categories=true 且库里有类目，则用DB覆盖 classify_cache
#     结构: { L1: {"feature": L1同义词[], "children": [L2...], "l2_feature": {L2: 同义词[]} } }
#     """
#     global classify_cache
#     if not USE_DB_CATEGORIES:
#         return
#     try:
#         l1_rows = sqlite_execute_query("SELECT id, name, alias_json FROM category_l1 WHERE status=1", ())
#         if not l1_rows:
#             return
#         l2_rows = sqlite_execute_query("SELECT l1_id, name, alias_json FROM category_l2 WHERE status=1", ())
#         id2l1 = {r["id"]: r["name"] for r in l1_rows}
#         cache = {}
#         for r in l1_rows:
#             name = r["name"]
#             aliases = json.loads(r.get("alias_json") or "[]") if isinstance(r, dict) else json.loads(r["alias_json"] or "[]")
#             cache[name] = {"feature": aliases, "children": [], "l2_feature": {}}
#         for r in l2_rows:
#             l1_name = id2l1.get(r["l1_id"]) if isinstance(r, dict) else id2l1.get(r[0])
#             if not l1_name or l1_name not in cache:
#                 continue
#             l2_name = r["name"] if isinstance(r, dict) else r[1]
#             aliases_raw = r.get("alias_json") if isinstance(r, dict) else r[2]
#             aliases = json.loads(aliases_raw or "[]")
#             cache[l1_name]["children"].append(l2_name)
#             cache[l1_name]["l2_feature"][l2_name] = aliases
#         classify_cache = cache
#         app.ctx.logger.info(f"Loaded categories from DB. L1 count={len(classify_cache)}")
#     except Exception as e:
#         app.ctx.logger.warning(f"load db categories failed: {e}")
#
#
# def match_subclassify(l1_name: str, assist_list: List[str]) -> Optional[str]:
#
#
#     #----------------------------------------------------
#     if not l1_name or l1_name not in classify_cache:
#         return None
#     l2_feat = classify_cache[l1_name].get("l2_feature") or {}
#     if not l2_feat:
#         return None
#
#     combined = assist_list + [", ".join(assist_list)]
#     assist_emb = embedding_api(combined, is_search=True)
#     assist_t = torch.tensor(assist_emb).unsqueeze(1)
#
#     best_name, best_sim = None, -1.0
#     for l2_name, aliases in l2_feat.items():
#         if not aliases:
#             continue
#         l2_emb = embedding_api(aliases)
#         l2_t = torch.tensor(l2_emb).unsqueeze(0)
#         sims = F.cosine_similarity(assist_t, l2_t, dim=2)
#         max_sim = torch.max(sims.view(-1)).item()
#         if max_sim > best_sim:
#             best_sim, best_name = max_sim, l2_name
#
#     return best_name if best_sim >= SUB_SIMILARITY_THRESHOLD else None
#
#
# def remap_to_db_categories(items: List[dict]) -> List[dict]:
#     from utils import DataUtil
#     if not items:
#         return items
#     if not feature_cache:
#         load_categories_from_db_or_fallback()
#         load_cache()
#     remapped = []
#     for it in items:
#         cls = (it.get("classifyName") or "").strip()
#         sub = (it.get("subClassifyName") or "").strip()
#         thr = (it.get("thirdClassifyName") or "").strip()
#         conf = float(it.get("confidence") or 0.0)
#         assist = [t for t in [cls, sub, thr] if t]
#         l1_list = match_classify(assist) if assist else []
#         l1 = l1_list[0] if l1_list else "其他"
#         l2 = match_subclassify(l1, assist) or "其他"
#         remapped.append(DataUtil.build_classfy_third_obj(
#             classifyName=l1, subClassifyName=l2, thirdClassifyName=l2, confidence=conf
#         ))
#     return sorted(remapped, key=lambda x: x.get("confidence", 0.0), reverse=True)