import logging
import cx_Oracle
import numpy as np
from typing import List, Dict, Optional, Tuple
from datetime import datetime
from config import Config
# 在现有导入语句后添加
from models import (
    QueryRequest,
    BatchQueryRequest
)
# 配置日志
logger = logging.getLogger(__name__)

class CosmicSimilarityAnalyzer:
    """根据cosmicid查询并分析相似度的服务类"""

    def __init__(self, semantic_service=None):
        self.db_config = {
            'user': Config.ORACLE_USER,
            'password': Config.ORACLE_PASSWORD,
            'dsn': Config.ORACLE_DSN
        }
        # 通过依赖注入接收已存在的语义相似度服务实例
        self.semantic_service = semantic_service

    def _get_db_connection(self):
        """获取数据库连接"""
        try:
            return cx_Oracle.connect(
                user=self.db_config['user'],
                password=self.db_config['password'],
                dsn=self.db_config['dsn']
            )
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            raise

    def query_cosmic_data(self, cosmicid: str) -> List[Dict]:
        """根据cosmicid查询数据"""
        sql = """
        SELECT f.id,f.GNROW,c.cosmicid,c.FIRMCODE,r.attrvaluestr AS contract_code, 
            DBMS_LOB.SUBSTR(f.gnyhxq, 4000, 1) AS gnyhxq, 
            DBMS_LOB.SUBSTR(f.gngc, 4000, 1) AS gngc, 
            DBMS_LOB.SUBSTR(f.zgcms, 4000, 1) AS zgcms 
        FROM TB_COSMIC_DATA f 
        JOIN tb_cosmic c ON c.cosmicid = f.cosmicid 
        JOIN tb_upe_instanceattr r ON r.wfinstid = c.pswfinstid AND r.attrcode = 'contractcode' 
        WHERE c.state = '1' and f.cosmicid = :cosmicid  and f.fyd = '新增'
        ORDER BY f.id DESC
        """

        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()
                cursor.execute(sql, {'cosmicid': cosmicid})

                columns = [desc[0].lower() for desc in cursor.description]
                results = []

                for row in cursor.fetchall():
                    row_dict = dict(zip(columns, row))
                    results.append(row_dict)

                logger.info(f"查询到 {len(results)} 条数据，cosmicid: {cosmicid}")
                return results

        except Exception as e:
            logger.error(f"查询数据失败: {e}")
            raise

    def deduplicate_content(self, data_list: List[Dict], field: str) -> List[str]:
        """对指定字段内容去重"""
        seen = set()
        unique_contents = []

        for item in data_list:
            content = item.get(field, '')
            if content and content.strip() and content not in seen:
                seen.add(content)
                unique_contents.append(content)

        logger.info(f"字段 {field} 去重前: {len(data_list)}, 去重后: {len(unique_contents)}")
        return unique_contents

    def insert_default_ai_result(self, wfinstid: str, cosmicid: str):
        """插入默认的AI分析结果到TB_COSMIC_AI_RESULT表和tb_upe_instanceattr表"""
        # 第一条插入语句 - TB_COSMIC_AI_RESULT表
        insert_sql_1 = """
        INSERT INTO TB_COSMIC_AI_RESULT 
        (resultid, wfinstid, cosmicid, ruletype, rulename, checkcontent, checkresult, state, createtime) 
        VALUES (SEQ_TB_COSMIC_AI_RESULT.NEXTVAL, :wfinstid, :cosmicid, '历史相似性', '与历史需求比重', '三要素和重复性检查','通过','Y',sysdate)
        """

        # 第二条插入语句 - tb_upe_instanceattr表
        insert_sql_2 = """
        INSERT INTO tb_upe_instanceattr 
        (wfinstid, ATTRCODE, ATTRNAME, ATTRVALUE, DBTYPE, CREATEUSERID, CREATETIME, ATTRVALUESTR) 
        VALUES (:wfinstid, :attrcode, :attrname, :attrvalue, :dbtype, :createuserid, sysdate, :attrvaluestr)
        """

        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()

                # 执行第一条插入
                cursor.execute(insert_sql_1, {
                    'wfinstid': wfinstid,
                    'cosmicid': cosmicid
                })

                # 执行第二条插入
                cursor.execute(insert_sql_2, {
                    'wfinstid': wfinstid,
                    'attrcode': 'localAI_result',
                    'attrname': '语义分析模型稽核结果',
                    'attrvalue': 'Y',
                    'dbtype': 'string',
                    'createuserid': 'ADMIN',
                    'attrvaluestr': 'Y'
                })

                conn.commit()
                logger.info(f"成功插入默认AI结果到两个表，cosmicid: {cosmicid}, wfinstid: {wfinstid}")

        except Exception as e:
            logger.error(f"插入默认AI结果失败: {e}")
            raise

    def save_ai_result(self, cosmicid: str, wfinstid: str, step: int, field_type: str,
                       similar_cosmicids: List[str], similarity_scores: List[float],
                       check_contents: List[str], gnrow_numbers: List[str] = None,
                       line_ids: List[str] = None, other_ids: List[str] = None):
        """保存AI分析结果到TB_COSMIC_AI_RESULT表"""
        insert_sql = """
        INSERT INTO TB_COSMIC_AI_RESULT 
        (resultid, wfinstid, cosmicid, lineid, ruletype, rulename, checkcontent, checkresult, 
         reson_desc, other_id, state, createtime)
        VALUES (SEQ_TB_COSMIC_AI_RESULT.NEXTVAL, :wfinstid, :cosmicid, :lineid, :ruletype, :rulename, :checkcontent, 
                :checkresult, :reson_desc, :other_id, :state, :createtime)
        """

        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()

                for i, (similar_cosmicid, score) in enumerate(zip(similar_cosmicids, similarity_scores)):
                    rule_type = f"历史相似性"

                    # 修改为中文显示名称
                    field_name_mapping = {
                        'gnyhxq': '功能用户需求',
                        'gngc': '功能过程',
                        'zgcms': '子过程描述'
                    }
                    field_display_name = field_name_mapping.get(field_type, field_type)
                    rule_name = f"{field_display_name}相似度检查"

                    check_content = check_contents[i] if i < len(check_contents) else ""
                    # check_result = f"相似度: {score:.2f}%"
                    check_result = f"不通过"

                    # 构建包含行号的reason_desc，使用中文字段名
                    # if gnrow_numbers and i < len(gnrow_numbers):
                    #     reason_desc = f"通过{field_display_name}分析，发现与cosmicid {similar_cosmicid} 的 {gnrow_numbers[i]} 行相似度为 {score:.2f}%"
                    # else:
                    #     reason_desc = f"通过{field_display_name}分析，发现与cosmicid {similar_cosmicid} 的相似度为 {score:.2f}%"

                    reason_desc = f"与其他需求存在相似性《{field_display_name}》，相似率{score:.2f}%"

                    # 获取lineid和other_id
                    lineid = line_ids[i] if line_ids and i < len(line_ids) else ""
                    other_id = other_ids[i] if other_ids and i < len(other_ids) else ""

                    cursor.execute(insert_sql, {
                        'wfinstid': wfinstid,
                        'cosmicid': cosmicid,
                        'lineid': lineid,
                        # 'ruletype': rule_type,
                        # 'rulename': rule_name,
                        # 'checkcontent': check_content[:500],  # 限制长度
                        'ruletype': "历史相似性",
                        'rulename': "与历史需求比重",
                        'checkcontent': "三要素和重复性检查",  # 限制长度
                        'checkresult': check_result,
                        'reson_desc': reason_desc[:4000],  # 根据新表结构调整长度
                        'other_id': other_id,
                        'state': 'Y',  # 有效状态
                        'createtime': datetime.now()
                    })

                conn.commit()
                logger.info(f"保存 {len(similar_cosmicids)} 条AI结果，cosmicid: {cosmicid}, 步骤: {step}")

        except Exception as e:
            logger.error(f"保存AI结果失败: {e}")
            raise

    def deduplicate_content_with_gnrow(self, data_list: List[Dict], field: str) -> Tuple[  List[str], List[str], List[str]]:
        """对指定字段内容去重，同时返回对应的gnrow和id信息"""
        seen = set()
        unique_contents = []
        gnrow_numbers = []
        line_ids = []

        for item in data_list:
            content = item.get(field, '')
            if content and content.strip() and content not in seen:
                seen.add(content)
                unique_contents.append(content)
                gnrow_numbers.append(str(item.get('gnrow', '')))
                line_ids.append(str(item.get('id', '')))

        logger.info(f"字段 {field} 去重前: {len(data_list)}, 去重后: {len(unique_contents)}")
        return unique_contents, gnrow_numbers, line_ids

    def get_gnrow_by_cosmicid_and_content(self, cosmicid: str, content: str, field: str) -> str:
        """根据cosmicid和内容获取对应的gnrow"""
        sql = f"""
        SELECT f.GNROW 
        FROM TB_COSMIC_DATA f 
        JOIN tb_cosmic c ON c.cosmicid = f.cosmicid 
        WHERE c.state = '1' and f.cosmicid = :cosmicid 
        AND DBMS_LOB.SUBSTR(f.{field}, 4000, 1) = :content
        """

        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()
                cursor.execute(sql, {'cosmicid': cosmicid, 'content': content})
                result = cursor.fetchone()
                return str(result[0]) if result else ""
        except Exception as e:
            logger.error(f"查询gnrow失败: {e}")
            return ""

    def get_wfinstid_by_cosmicid(self, cosmicid: str) -> str:
        """根据cosmicid获取wfinstid"""
        sql = "SELECT pswfinstid FROM tb_cosmic WHERE cosmicid = :cosmicid AND state = '1'"

        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()
                cursor.execute(sql, {'cosmicid': cosmicid})
                result = cursor.fetchone()

                if result:
                    return result[0]
                else:
                    logger.warning(f"未找到cosmicid {cosmicid} 对应的wfinstid")
                    return ""

        except Exception as e:
            logger.error(f"查询wfinstid失败: {e}")
            return ""

    def analyze_cosmic_similarity(self, cosmicid: str, exclude_pswfinstid: str = None) -> Dict:
        """主要分析函数：根据cosmicid进行相似度分析"""
        if not self.semantic_service:
            raise ValueError("SemanticSimilarityService实例未提供，请通过构造函数传入")

        try:
            logger.info(f"开始分析cosmicid: {cosmicid}")

            # 初始化分析环境
            wfinstid, cosmic_data, result = self._initialize_analysis(cosmicid)
            if not cosmic_data:
                logger.info(f"未找到数据，准备插入默认结果，cosmicid: {cosmicid}, wfinstid: {wfinstid}")
                self.insert_default_ai_result(cosmicid=cosmicid, wfinstid=wfinstid)
                return {'status': 'error', 'message': '未找到有效数据'}

            # 执行步骤一：gnyhxq相似度分析（不保存结果）
            step1_data = self._analyze_gnyhxq_similarity(cosmicid, wfinstid, cosmic_data, result,
                                                         exclude_pswfinstid)

            # 如果步骤一成功，执行步骤二
            if step1_data:
                logger.info(f"步骤一有结果，准备执行步骤二")
                step2_has_results = self._analyze_step2_similarity(cosmicid, wfinstid, cosmic_data, result,
                                                                   exclude_pswfinstid)
                # 只有当步骤二有结果时，才保存步骤一的结果
                if step2_has_results:
                    # 保存步骤一结果
                    self.save_ai_result(
                        cosmicid=cosmicid,
                        wfinstid=wfinstid,
                        step=1,
                        field_type='gnyhxq',
                        similar_cosmicids=step1_data['final_cosmicids'],
                        similarity_scores=step1_data['final_scores'],
                        check_contents=step1_data['final_contents'],
                        line_ids=step1_data['unique_line_ids'],
                        other_ids=step1_data['unique_other_ids']
                    )
                    result['step1_completed'] = True
                    result['gnyhxq_results'] = step1_data['unique_detailed_results']
                    logger.info(f"步骤二有结果，已保存步骤一结果")
                else:
                    logger.info(f"步骤二无结果，跳过步骤一结果保存，准备插入默认结果")
                    self.insert_default_ai_result(cosmicid=cosmicid, wfinstid=wfinstid)
            else:
                # 步骤一没有结果，插入默认结果
                logger.info(f"步骤一无结果，准备插入默认结果，cosmicid: {cosmicid}, wfinstid: {wfinstid}")
                self.insert_default_ai_result(cosmicid=cosmicid, wfinstid=wfinstid)

            logger.info(f"分析完成，cosmicid: {cosmicid}")
            result['status'] = 'success'
            return result

        except Exception as e:
            logger.error(f"分析过程中发生错误: {e}")
            return {'status': 'error', 'message': str(e)}

    def _analyze_gnyhxq_similarity(self, cosmicid: str, wfinstid: str, cosmic_data: List[Dict], result: Dict,
                                   exclude_pswfinstid: str = None) -> Dict:
        """分析gnyhxq字段的相似度（步骤一）- 不保存结果版本"""
        gnyhxq_contents, gnyhxq_gnrows, gnyhxq_line_ids = self.deduplicate_content_with_gnrow(cosmic_data, 'gnyhxq')

        if not gnyhxq_contents:
            logger.info("gnyhxq内容为空，返回None")
            return None

        logger.info("执行步骤一：gnyhxq相似度查询")

        # 执行批量查询，传递exclude_pswfinstid参数
        search_results = self._perform_batch_search(gnyhxq_contents, "功能用户需求", top_k=3,
                                                    exclude_pswfinstid=exclude_pswfinstid)

        # 处理查询结果
        processed_results = self._process_gnyhxq_results(search_results, cosmicid, gnyhxq_line_ids)

        logger.info(f"处理后的结果：final_cosmicids数量: {len(processed_results['final_cosmicids'])}")

        if not processed_results['final_cosmicids']:
            logger.info("步骤一处理后无有效结果，返回None")
            return None

        # 将结果保存到result中供步骤二使用
        result['final_cosmicids'] = processed_results['final_cosmicids']

        # 返回处理后的结果数据，但不保存到数据库
        logger.info(f"步骤一返回有效结果，cosmicids: {processed_results['final_cosmicids']}")
        return processed_results

    def _initialize_analysis(self, cosmicid: str) -> Tuple[str, List[Dict], Dict]:
        """初始化分析环境"""
        wfinstid = self.get_wfinstid_by_cosmicid(cosmicid)
        cosmic_data = self.query_cosmic_data(cosmicid)

        if not cosmic_data:
            logger.warning(f"未找到cosmicid: {cosmicid} 的数据")
            return wfinstid, None, None

        result = {
            'cosmicid': cosmicid,
            'wfinstid': wfinstid,
            'step1_completed': False,
            'step2_completed': False,
            'gnyhxq_results': [],
            'gngc_results': [],
            'zgcms_results': []
        }

        return wfinstid, cosmic_data, result

    def _perform_batch_search(self, contents: List[str], query_type: str, top_k: int = 3,
                              target_cosmicid: str = None, exclude_pswfinstid: str = None) -> List:
        """执行批量相似度查询"""
        queries = []
        for content in contents:
            query_params = {
                'query_text': content,
                'query_type': query_type,
                'top_k': top_k
            }
            if target_cosmicid:
                query_params['cosmicid'] = target_cosmicid
            if exclude_pswfinstid:
                query_params['exclude_pswfinstid'] = exclude_pswfinstid

            query = QueryRequest(**query_params)
            queries.append(query)

        batch_request = BatchQueryRequest(queries=queries)
        return self.semantic_service.batch_search(batch_request)

    def _process_gnyhxq_results(self, search_results: List, cosmicid: str, gnyhxq_line_ids: List[str]) -> Dict:
        """处理gnyhxq查询结果"""
        gnyhxq_detailed_results = []
        similar_cosmicids = []
        similarity_scores = []
        check_contents = []
        other_ids = []
        current_line_ids = []

        for i, response in enumerate(search_results):
            for result_item in response.results:
                if result_item.cosmicid != cosmicid:  # 排除自己
                    detailed_result = self._build_detailed_result(
                        result_item, response.query_text, 'gnyhxq', '功能用户需求相似度检查'
                    )
                    gnyhxq_detailed_results.append(detailed_result)

                    similar_cosmicids.append(result_item.cosmicid)
                    similarity_scores.append(result_item.similarity_percentage)
                    check_contents.append(response.query_text)
                    other_ids.append(result_item.data_id)

                    # 根据查询内容找到对应的line_id
                    if i < len(gnyhxq_line_ids):
                        current_line_ids.append(gnyhxq_line_ids[i])
                    else:
                        current_line_ids.append("")

        # 去重处理
        return self._deduplicate_results(
            similar_cosmicids, similarity_scores, check_contents,
            gnyhxq_detailed_results, other_ids, current_line_ids
        )

    def _build_detailed_result(self, result_item, query_text: str, field_type: str, rule_name: str,
                               target_gnrow: str = None) -> Dict:
        """构建详细结果对象"""
        field_type_map = {
            'gnyhxq': '功能用户需求',
            'gngc': '功能过程',
            'zgcms': '子过程描述'
        }

        reason_desc = f"通过{field_type_map[field_type]}字段相似度分析，发现与cosmicid {result_item.cosmicid}"
        if target_gnrow:
            reason_desc += f" 的 {target_gnrow} 行"
        reason_desc += f"相似度为 {result_item.similarity_percentage:.2f}%"

        detailed_result = {
            'cosmicid': result_item.cosmicid,
            'similarity_percentage': result_item.similarity_percentage,
            'similarity_score': result_item.similarity_score,
            'content': result_item.content,
            'query_text': query_text,
            'field_type': field_type,
            'rule_type': '历史相似性',
            'rule_name': rule_name,
            'check_result': f"相似度: {result_item.similarity_percentage:.2f}%",
            'reason_desc': reason_desc,
            'firmcode': result_item.firmcode,
            'contract_code': result_item.contract_code
        }

        if target_gnrow:
            detailed_result['target_gnrow'] = target_gnrow

        return detailed_result

    def _deduplicate_results(self, similar_cosmicids: List, similarity_scores: List,
                             check_contents: List, detailed_results: List,
                             other_ids: List, current_line_ids: List) -> Dict:
        """去重处理结果"""
        unique_results = {}
        unique_detailed_results = []
        unique_other_ids = []
        unique_line_ids = []

        for i, sim_cosmicid in enumerate(similar_cosmicids):
            if sim_cosmicid not in unique_results:
                unique_results[sim_cosmicid] = {
                    'score': similarity_scores[i],
                    'content': check_contents[i]
                }
                unique_detailed_results.append(detailed_results[i])
                unique_other_ids.append(other_ids[i])
                unique_line_ids.append(current_line_ids[i])

        final_cosmicids = list(unique_results.keys())
        final_scores = [unique_results[cid]['score'] for cid in final_cosmicids]
        final_contents = [unique_results[cid]['content'] for cid in final_cosmicids]

        return {
            'final_cosmicids': final_cosmicids,
            'final_scores': final_scores,
            'final_contents': final_contents,
            'unique_detailed_results': unique_detailed_results,
            'unique_other_ids': unique_other_ids,
            'unique_line_ids': unique_line_ids
        }

    def _analyze_step2_similarity(self, cosmicid: str, wfinstid: str, cosmic_data: List[Dict], result: Dict,
                                  exclude_pswfinstid: str = None) -> bool:
        """分析步骤二：gngc和zgcms字段的相似度"""
        logger.info("执行步骤二：gngc和zgcms相似度查询")

        final_cosmicids = result.get('final_cosmicids', [])
        if not final_cosmicids:
            return False

        # 分析gngc字段，传递exclude_pswfinstid参数
        gngc_results = self._analyze_field_similarity(
            cosmic_data, 'gngc', '功能过程', final_cosmicids, cosmicid, wfinstid, step=2,
            exclude_pswfinstid=exclude_pswfinstid
        )

        # 分析zgcms字段，传递exclude_pswfinstid参数
        zgcms_results = self._analyze_field_similarity(
            cosmic_data, 'zgcms', '子过程描述', final_cosmicids, cosmicid, wfinstid, step=2,
            exclude_pswfinstid=exclude_pswfinstid
        )

        # 检查是否有任何结果
        has_results = len(gngc_results) > 0 or len(zgcms_results) > 0

        if has_results:
            result['gngc_results'] = gngc_results
            result['zgcms_results'] = zgcms_results
            result['step2_completed'] = True

        return has_results

    def _analyze_field_similarity(self, cosmic_data: List[Dict], field_type: str, query_type: str,
                                  target_cosmicids: List[str], cosmicid: str, wfinstid: str, step: int,
                                  exclude_pswfinstid: str = None) -> List:
        """分析特定字段的相似度"""
        field_contents, field_gnrows, field_line_ids = self.deduplicate_content_with_gnrow(cosmic_data, field_type)

        if not field_contents:
            return []

        logger.info(f"处理{field_type}字段相似度查询")

        # 收集所有结果，稍后进行去重
        all_results = []
        detailed_results = []

        for target_cosmicid in target_cosmicids:
            for i, content in enumerate(field_contents):
                # 执行单个查询，传递exclude_pswfinstid参数
                search_results = self._perform_batch_search([content], query_type, top_k=10,
                                                            target_cosmicid=target_cosmicid,
                                                            exclude_pswfinstid=exclude_pswfinstid)

                for response in search_results:
                    for result_item in response.results:
                        # 获取目标cosmicid对应内容的gnrow
                        target_gnrow = self.get_gnrow_by_cosmicid_and_content(
                            result_item.cosmicid, result_item.content, field_type
                        )

                        rule_name = f"{query_type}相似度检查"
                        detailed_result = self._build_detailed_result(
                            result_item, response.query_text, field_type, rule_name, target_gnrow
                        )

                        # 获取当前的line_id
                        current_line_id = field_line_ids[i] if i < len(field_line_ids) else ""

                        # 收集所有结果
                        all_results.append({
                            'detailed_result': detailed_result,
                            'cosmicid': result_item.cosmicid,
                            'score': result_item.similarity_percentage,
                            'check_content': response.query_text,
                            'target_gnrow': target_gnrow,
                            'line_id': current_line_id,
                            'other_id': result_item.data_id
                        })

        # 在保存之前进行去重：每个line_id只保留相似度最高的结果
        best_results_by_line = {}
        for result_data in all_results:
            line_id = result_data['line_id']
            if line_id not in best_results_by_line:
                best_results_by_line[line_id] = result_data
            else:
                # 确保数字比较：显式转换为float
                current_score = float(result_data['score'])
                existing_score = float(best_results_by_line[line_id]['score'])
                if current_score > existing_score:
                    best_results_by_line[line_id] = result_data

        # 从去重后的结果中提取数据并保存
        if best_results_by_line:
            cosmicids = []
            scores = []
            check_contents = []
            target_gnrows = []
            line_ids = []
            other_ids = []

            for line_id, result_data in best_results_by_line.items():
                detailed_results.append(result_data['detailed_result'])
                cosmicids.append(result_data['cosmicid'])
                scores.append(result_data['score'])
                check_contents.append(result_data['check_content'])
                target_gnrows.append(result_data['target_gnrow'])
                line_ids.append(result_data['line_id'])
                other_ids.append(result_data['other_id'])

            logger.info(f"去重前结果数量: {len(all_results)}, 去重后结果数量: {len(best_results_by_line)}")

            # 保存结果
            self.save_ai_result(
                cosmicid=cosmicid,
                wfinstid=wfinstid,
                step=step,
                field_type=field_type,
                similar_cosmicids=cosmicids,
                similarity_scores=scores,
                check_contents=check_contents,
                gnrow_numbers=target_gnrows,
                line_ids=line_ids,
                other_ids=other_ids
            )

        return detailed_results


    def analyze_multi_field_similarity_within_cosmic(self, cosmicid: str, fields: List[str]) -> Dict:
        """分析同一个cosmicid中多个字段去重后每个内容是否有超过阈值的相似度"""
        try:
            logger.info(f"开始分析cosmicid {cosmicid} 中字段 {fields} 的内部相似度")

            # 查询原始数据
            cosmic_data = self.query_cosmic_data(cosmicid)
            if not cosmic_data:
                logger.warning(f"未找到cosmicid: {cosmicid} 的数据")
                return {'status': 'error', 'message': '未找到数据'}

            # 字段映射到查询类型和阈值
            field_mapping = {
                'gnyhxq': '功能用户需求',
                'gngc': '功能过程',
                'zgcms': '子过程描述'
            }

            result = {
                'status': 'success',
                'cosmicid': cosmicid,
                'analyzed_fields': fields,
                'field_results': {},
                'summary': {
                    'total_fields': len(fields),
                    'fields_with_high_similarity': 0,
                    'total_high_similarity_pairs': 0
                }
            }

            # 分析每个字段
            for field in fields:
                if field not in field_mapping:
                    logger.warning(f"未知字段: {field}，跳过分析")
                    continue

                query_type = field_mapping[field]
                threshold = Config.SIMILARITY_THRESHOLDS.get(query_type, 0.8)

                logger.info(f"分析字段 {field}，使用阈值: {threshold}")

                # 对当前字段去重
                field_contents = self.deduplicate_content(cosmic_data, field)

                field_result = {
                    'field_name': field,
                    'query_type': query_type,
                    'threshold': threshold,
                    'total_unique_contents': len(field_contents),
                    'unique_contents': field_contents,
                    'similarity_pairs': [],
                    'high_similarity_pairs': 0,
                    'has_high_similarity': False
                }

                if len(field_contents) < 2:
                    field_result['message'] = '去重后内容少于2条，无需进行相似度比较'
                    result['field_results'][field] = field_result
                    continue

                # 计算两两之间的相似度
                high_similarity_count = 0

                # 使用语义服务计算嵌入向量
                embeddings = []
                for content in field_contents:
                    embedding = self.semantic_service.model.encode([content])[0]
                    embeddings.append(embedding)

                # 计算两两相似度
                for i in range(len(field_contents)):
                    for j in range(i + 1, len(field_contents)):
                        # 计算余弦相似度
                        similarity = np.dot(embeddings[i], embeddings[j]) / (
                                np.linalg.norm(embeddings[i]) * np.linalg.norm(embeddings[j])
                        )

                        # 转换为百分比
                        similarity_percentage = max(0, similarity * 100)

                        pair_info = {
                            'content_1': field_contents[i],
                            'content_2': field_contents[j],
                            'similarity_score': float(similarity),
                            'similarity_percentage': float(similarity_percentage),
                            'exceeds_threshold': similarity_percentage >= (threshold * 100)
                        }

                        field_result['similarity_pairs'].append(pair_info)

                        if pair_info['exceeds_threshold']:
                            high_similarity_count += 1
                            logger.info(f"字段 {field} 发现高相似度内容对: {similarity_percentage:.2f}%")

                field_result['high_similarity_pairs'] = high_similarity_count
                field_result['has_high_similarity'] = high_similarity_count > 0

                # 更新总体统计
                if field_result['has_high_similarity']:
                    result['summary']['fields_with_high_similarity'] += 1
                result['summary']['total_high_similarity_pairs'] += high_similarity_count

                result['field_results'][field] = field_result

                logger.info(
                    f"字段 {field} 分析完成 - 总内容数: {len(field_contents)}, 高相似度对数: {high_similarity_count}")

            logger.info(
                f"多字段分析完成 - 分析字段数: {len(fields)}, 有高相似度的字段数: {result['summary']['fields_with_high_similarity']}")
            return result

        except Exception as e:
            logger.error(f"多字段内部相似度分析失败: {e}")
            raise

    def get_cosmicid_by_pswfinstid(self, pswfinstid: str) -> str:
        """根据pswfinstid查询最新的cosmicid"""
        sql = """
        SELECT cosmicid 
        FROM ( 
          SELECT c.cosmicid 
          FROM tb_cosmic c 
          WHERE c.pswfinstid = :pswfinstid AND c.state = '1'
          ORDER BY c.createtime DESC 
        ) 
        WHERE ROWNUM = 1
        """

        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()
                cursor.execute(sql, {'pswfinstid': pswfinstid})
                result = cursor.fetchone()
                return result[0] if result else None
        except Exception as e:
            logger.error(f"根据pswfinstid查询cosmicid失败: {e}")
            raise
