import hashlib
import logging
import os
import pickle
import uvicorn
import numpy as np
import cx_Oracle
import faiss
import asyncio
import gc
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from cosmic_similarity_analyzer import CosmicSimilarityAnalyzer


from datetime import datetime
from typing import List, Dict
from pathlib import Path
from fastapi import FastAPI, HTTPException, BackgroundTasks,Request
from fastapi.openapi.models import Response
from sentence_transformers import SentenceTransformer
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse

# 导入配置
from config import Config
# 在现有导入语句后添加
from models import (
    QueryRequest,
    BatchQueryRequest,
    UpdateRequest,
    BatchUpdateRequest,
    DeleteRequest,
    SimilarityResult,
    QueryResponse,
    ThresholdUpdateRequest,
    MultiFieldSimilarityRequest,
    PswfinstidSimilarityRequest
)

# 设置前端访问令牌
FRONTEND_TOKEN = Config.FRONTEND_TOKEN

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('semantic_api.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 全局变量用于控制定时器
auto_update_timer = None
timer_running = False

# 新增：任务执行状态控制
update_task_running = False
update_task_lock = threading.Lock()


# 添加定时器相关函数
def start_auto_update_timer():
    """启动自动更新定时器"""
    global auto_update_timer, timer_running

    if not Config.ENABLE_AUTO_UPDATE_TIMER:
        logger.info("自动更新定时器已禁用")
        return

    if timer_running:
        logger.info("定时器已在运行中")
        return

    def timer_task():
        global timer_running, update_task_running
        timer_running = True
        logger.info(f"启动自动更新定时器，间隔: {Config.AUTO_UPDATE_INTERVAL}秒")

        while timer_running:
            try:
                time.sleep(Config.AUTO_UPDATE_INTERVAL)
                if timer_running:  # 再次检查，防止在sleep期间被停止
                    # 检查是否有任务正在执行
                    with update_task_lock:
                        if update_task_running:
                            logger.info("上次增量更新任务仍在执行中，跳过本次定时触发")
                            continue

                        # 设置任务执行状态
                        update_task_running = True
                        logger.info("开始执行定时增量更新任务")

                    try:
                        start_time = time.time()
                        semantic_service.incremental_update()
                        end_time = time.time()
                        logger.info(f"定时增量更新任务完成，耗时: {end_time - start_time:.2f}秒")
                    except Exception as e:
                        logger.error(f"定时增量更新任务失败: {e}")
                    finally:
                        # 确保任务状态被重置
                        with update_task_lock:
                            update_task_running = False
                            logger.info("增量更新任务状态已重置，可以接受下次定时触发")
            except Exception as e:
                logger.error(f"定时器任务异常: {e}")
                # 确保在异常情况下也重置任务状态
                with update_task_lock:
                    update_task_running = False

    auto_update_timer = threading.Thread(target=timer_task, daemon=True)
    auto_update_timer.start()
    logger.info("自动更新定时器已启动")

def stop_auto_update_timer():
    """停止自动更新定时器"""
    global timer_running
    timer_running = False
    logger.info("自动更新定时器已停止")


def get_timer_status():
    """获取定时器状态"""
    return {
        "enabled": Config.ENABLE_AUTO_UPDATE_TIMER,
        "running": timer_running,
        "interval": Config.AUTO_UPDATE_INTERVAL,
        "task_running": update_task_running,  # 显示当前是否有任务在执行
        "description": "单任务模式：同一时间只能有一个增量更新任务执行"
    }

# 语义相似度服务类
class SemanticSimilarityService:
    def __init__(self, model_path, cache_dir,batch_size: int = 1024):

        self.model_path = model_path
        self.cache_dir = Path(cache_dir)
        self.cache_dir.mkdir(exist_ok=True)
        self.batch_size = batch_size

        # 多线程配置
        self.embedding_lock = threading.Lock()  # 保护嵌入缓存的线程锁
        self.data_lock = threading.Lock()  # 保护数据缓存的线程锁
        self.max_workers = Config.MAX_EMBEDDING_WORKERS  # 限制线程数量
        self.embedding_workers = Config.MAX_INDEX_WORKERS  # 嵌入向量生成线程数

        # 新增：删除标记集合
        self.deleted_data_ids = set()  # 存储已删除的data_id

        # 关键改进：为每个字段维护独立的索引映射
        # 格式：field -> [data_id1, data_id2, ...]，索引i对应data_id_list[i]
        self.field_index_mapping = {
            'gnyhxq': [],  # FAISS索引位置 -> data_id的映射
            'gngc': [],
            'zgcms': []
        }

        # 内容去重映射：content_hash -> data_id (第一个出现的)
        self.content_dedup_mapping = {
            'gnyhxq': {},
            'gngc': {},
            'zgcms': {}
        }

        self.max_processed_id = 0

        # 初始化模型
        logger.info(f"正在加载模型: {model_path}")

        if Config.OFFLINE:
            logger.info(f"以离线模式加载本地模型: {model_path}")
            if not os.path.exists(model_path):
                raise FileNotFoundError(f"模型路径不存在: {model_path}")
            self.model = SentenceTransformer(model_path, device=Config.DEVICE_MODE)
            logger.info(f"模型加载完成，使用设备: {Config.DEVICE_MODE}")
        else:
            logger.info(f"以在线模式加载模型: {Config.MODEL_NAME}")
            self.model = SentenceTransformer(Config.MODEL_NAME, device=Config.DEVICE_MODE)
            self.model.save(model_path)
            logger.info(f"模型加载完成，使用设备: {Config.DEVICE_MODE}")

        # 数据缓存
        self.data_cache = {}  # data_id -> 完整数据记录
        self.embedding_cache = {}  # text_hash -> embedding
        self.faiss_indices = {}  # field -> faiss_index

        # 数据库配置
        self.db_config = {
            'user': Config.ORACLE_USER,
            'password': Config.ORACLE_PASSWORD,
            'dsn': Config.ORACLE_DSN
        }

        # 加载缓存
        self._load_cache()

    def _get_content_hash(self, content: str) -> str:
        """生成内容的哈希值用于去重"""
        if not content or not content.strip():
            return ""
        return hashlib.md5(content.strip().encode('utf-8')).hexdigest()

    def _get_db_connection(self):
        """获取数据库连接"""
        try:
            # 方式1: 直接使用 SID 格式的 DSN
            print(f"正在连接数据库 (SID格式): {self.db_config['dsn']}")
            return cx_Oracle.connect(
                user=self.db_config['user'],
                password=self.db_config['password'],
                dsn=self.db_config['dsn']
            )
        except Exception as e:
            print(f"详细错误信息: {type(e).__name__}: {str(e)}")
            logger.error(f"数据库连接失败: {e}")
            raise HTTPException(status_code=500, detail=f"数据库连接失败: {str(e)}")

    def _load_cache(self):
        """加载本地缓存"""
        cache_files = {
            'data': self.cache_dir / 'data_cache.pkl',
            'embeddings': self.cache_dir / 'embedding_cache.pkl',
            'index_mapping': self.cache_dir / 'field_index_mapping.pkl',
            'dedup_mapping': self.cache_dir / 'content_dedup_mapping.pkl',
            'faiss_gnyhxq': self.cache_dir / 'faiss_gnyhxq.index',
            'faiss_gngc': self.cache_dir / 'faiss_gngc.index',
            'faiss_zgcms': self.cache_dir / 'faiss_zgcms.index'
        }

        try:
            # 加载数据缓存
            if cache_files['data'].exists():
                with open(cache_files['data'], 'rb') as f:
                    self.data_cache = pickle.load(f)
                logger.info(f"加载数据缓存: {len(self.data_cache)} 条记录")

            # 加载嵌入向量缓存
            if cache_files['embeddings'].exists():
                with open(cache_files['embeddings'], 'rb') as f:
                    self.embedding_cache = pickle.load(f)
                logger.info(f"加载嵌入向量缓存: {len(self.embedding_cache)} 条记录")

            # 加载索引映射
            if cache_files['index_mapping'].exists():
                with open(cache_files['index_mapping'], 'rb') as f:
                    self.field_index_mapping = pickle.load(f)

            # 加载去重映射
            if cache_files['dedup_mapping'].exists():
                with open(cache_files['dedup_mapping'], 'rb') as f:
                    self.content_dedup_mapping = pickle.load(f)

            # 加载FAISS索引
            for field in ['gnyhxq', 'gngc', 'zgcms']:
                faiss_file = cache_files[f'faiss_{field}']
                if faiss_file.exists():
                    self.faiss_indices[field] = faiss.read_index(str(faiss_file))
                    logger.info(f"加载FAISS索引 {field}: {self.faiss_indices[field].ntotal} 条记录")

            # 加载最大处理ID
            max_id_file = self.cache_dir / 'max_processed_id.txt'
            if max_id_file.exists():
                with open(max_id_file, 'r') as f:
                    self.max_processed_id = int(f.read().strip())

            logger.info(f"当前最大索引ID为: {self.max_processed_id}")

            # 加载删除标记
            self._load_deleted_ids()

        except Exception as e:
            logger.error(f"加载缓存失败: {e}")
            self._reset_cache()

    def _reset_cache(self):
        """重置所有缓存"""
        self.data_cache = {}
        self.embedding_cache = {}
        self.faiss_indices = {}
        self.field_index_mapping = {'gnyhxq': [], 'gngc': [], 'zgcms': []}
        self.content_dedup_mapping = {'gnyhxq': {}, 'gngc': {}, 'zgcms': {}}
        self.max_processed_id = 0

    def _save_cache(self):
        """保存缓存到本地"""
        try:
            # 保存数据缓存
            with open(self.cache_dir / 'data_cache.pkl', 'wb') as f:
                pickle.dump(self.data_cache, f)

            # 保存嵌入向量缓存
            with open(self.cache_dir / 'embedding_cache.pkl', 'wb') as f:
                pickle.dump(self.embedding_cache, f)

            # 保存索引映射
            with open(self.cache_dir / 'field_index_mapping.pkl', 'wb') as f:
                pickle.dump(self.field_index_mapping, f)

            # 保存去重映射
            with open(self.cache_dir / 'content_dedup_mapping.pkl', 'wb') as f:
                pickle.dump(self.content_dedup_mapping, f)

            # 保存FAISS索引
            for field, index in self.faiss_indices.items():
                faiss.write_index(index, str(self.cache_dir / f'faiss_{field}.index'))

            # 保存最大处理ID
            with open(self.cache_dir / 'max_processed_id.txt', 'w') as f:
                f.write(str(self.max_processed_id))

            logger.info("缓存保存完成")
        except Exception as e:
            logger.error(f"保存缓存失败: {e}")

    def _get_embedding(self, text: str) -> np.ndarray:
        """获取文本嵌入向量，带缓存和调试"""
        if not text or text.strip() == '':
            logger.warning("输入文本为空，返回零向量")
            return np.zeros(Config.EMBEDDING_DIMENSION)  # BGE-large-zh-v1.5 的维度

        text_hash = hash(text)
        if text_hash in self.embedding_cache:
            embedding = self.embedding_cache[text_hash]
            logger.debug(f"从缓存获取嵌入向量: {text[:50]}... -> norm: {np.linalg.norm(embedding):.6f}")
            return embedding

        try:
            embedding = self.model.encode([text])[0]

            # 检查嵌入向量是否正常
            norm = np.linalg.norm(embedding)
            if norm < 1e-8:
                logger.warning(f"嵌入向量接近零向量: {text[:50]}... -> norm: {norm}")
            else:
                logger.debug(f"生成嵌入向量: {text[:50]}... -> norm: {norm:.6f}")

            self.embedding_cache[text_hash] = embedding
            return embedding

        except Exception as e:
            logger.error(f"生成嵌入向量失败: {text[:50]}... -> {str(e)}")
            return np.zeros(1024)

    def _get_embeddings_batch(self, texts: List[str]) -> List[np.ndarray]:
        """批量获取文本嵌入向量，带缓存"""
        results = []
        uncached_texts = []
        uncached_indices = []

        # 检查缓存
        for i, text in enumerate(texts):
            if not text or text.strip() == '':
                results.append(np.zeros(Config.EMBEDDING_DIMENSION))
            else:
                text_hash = hash(text.strip())
                if text_hash in self.embedding_cache:
                    results.append(self.embedding_cache[text_hash])
                else:
                    results.append(None)  # 占位符
                    uncached_texts.append(text.strip())
                    uncached_indices.append(i)

        # 批量处理未缓存的文本
        if uncached_texts:
            logger.info(f"批量编码 {len(uncached_texts)} 条文本")
            embeddings = self.model.encode(uncached_texts, batch_size=self.batch_size)

            # 更新缓存和结果
            for j, (text, embedding) in enumerate(zip(uncached_texts, embeddings)):
                text_hash = hash(text)
                self.embedding_cache[text_hash] = embedding
                results[uncached_indices[j]] = embedding

        return results

    def _build_faiss_index(self, embeddings: np.ndarray) -> faiss.Index:
        """构建FAISS索引 - 大数据量使用IVF索引"""
        if embeddings.shape[0] == 0:
            # 创建空索引
            dimension = Config.EMBEDDING_DIMENSION  # BGE模型维度
            quantizer = faiss.IndexFlatIP(dimension)
            index = faiss.IndexIVFFlat(quantizer, dimension, Config.FAISS_NLIST, faiss.METRIC_INNER_PRODUCT)
            return index

        dimension = embeddings.shape[1]

        # 固定使用IVF索引，适合大数据量
        quantizer = faiss.IndexFlatIP(dimension)
        index = faiss.IndexIVFFlat(quantizer, dimension, Config.FAISS_NLIST, faiss.METRIC_INNER_PRODUCT)

        # 归一化向量以使用余弦相似度
        embeddings_norm = embeddings.copy().astype('float32')
        faiss.normalize_L2(embeddings_norm)

        # 训练和添加索引
        index.train(embeddings_norm)
        index.add(embeddings_norm)

        # 设置搜索参数
        index.nprobe = Config.FAISS_NPROBE

        logger.info(
            f"构建IVF索引完成: {embeddings.shape[0]} 条记录, 维度: {dimension}, nlist: {Config.FAISS_NLIST}, nprobe: {Config.FAISS_NPROBE}")
        return index

    def initialize_index(self, batch_size: int = Config.INDEX_BATCH_SIZE):
        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()

                # 获取ID范围
                range_sql = """
                SELECT MIN(f.id), MAX(f.id) 
                FROM TB_COSMIC_DATA f
                JOIN tb_cosmic c ON c.cosmicid = f.cosmicid
                WHERE c.state = '1' and c.type = '1'
                """
                cursor.execute(range_sql)
                min_id, max_id = cursor.fetchone()
                if min_id is None:
                    logger.warning("数据库中没有有效数据")
                    return

                logger.info(f"数据ID范围: {min_id} - {max_id}")

                # 重置缓存
                self._reset_cache()

                # 基于ID范围的分批查询（数据库查询部分保持单线程）
                current_id = min_id
                processed_count = 0
                batch_count = 0

                while current_id <= max_id:
                    batch_count += 1
                    end_id = current_id + batch_size

                    logger.info(f"处理批次: {batch_count}, ID范围: {current_id} - {end_id - 1}")

                    # 使用ID范围查询
                    sql = """
                    SELECT f.id, c.cosmicid, c.FIRMCODE, r.attrvaluestr AS contract_code, c.pswfinstid,
                        DBMS_LOB.SUBSTR(f.gnyhxq, 4000, 1) AS gnyhxq,
                        DBMS_LOB.SUBSTR(f.gngc, 4000, 1) AS gngc,
                        DBMS_LOB.SUBSTR(f.zgcms, 4000, 1) AS zgcms
                    FROM TB_COSMIC_DATA f
                    JOIN tb_cosmic c ON c.cosmicid = f.cosmicid
                    JOIN tb_upe_instanceattr r ON r.wfinstid = c.pswfinstid AND r.attrcode = 'contractcode'
                    WHERE c.state = '1'  and c.type = '1'
                        AND f.id >= :start_id 
                        AND f.id < :end_id
                    ORDER BY f.id
                    """

                    cursor.execute(sql, {'start_id': current_id, 'end_id': end_id})
                    rows = cursor.fetchall()

                    if not rows:
                        current_id = end_id
                        continue

                    # 处理当前批次
                    self._process_batch(rows)

                    processed_count += len(rows)
                    current_id = rows[-1][0] + 1

                    logger.info(f"已处理: {processed_count} 条记录")

                    # 定期保存缓存和清理内存
                    if processed_count % (batch_size * 20) == 0:
                        logger.info(f"中间保存缓存，已处理: {processed_count}")
                        self._save_cache()
                        self._cleanup_memory()

                # 使用多线程构建FAISS索引
                logger.info("数据加载完成，开始多线程构建FAISS索引...")
                index_start_time = time.time()

                self._build_all_faiss_indices()

                index_time = time.time() - index_start_time
                logger.info(f"多线程索引构建完成，耗时: {index_time:.2f}秒")

                # 最终保存
                self._save_cache()
                self._log_index_statistics()
                logger.info("分批索引初始化完成！")

        except Exception as e:
            logger.error(f"分批索引初始化失败: {str(e)}")
            raise

    def _process_batch(self, rows):
        """处理单个批次的数据"""
        for row in rows:
            # 只取前7个值，忽略多余的列
            data_id, cosmicid, firmcode, contract_code,pswfinstid,gnyhxq, gngc, zgcms = row[:8]
            self.max_processed_id = max(self.max_processed_id, data_id)

            # 存储完整数据
            self.data_cache[str(data_id)] = {
                'id': str(data_id),
                'cosmicid': cosmicid or '',
                'firmcode': firmcode or '',
                'contract_code': contract_code or '',
                'pswfinstid': pswfinstid or '',
                'gnyhxq': gnyhxq or '',
                'gngc': gngc or '',
                'zgcms': zgcms or ''
            }

            # 处理去重映射
            for field in ['gnyhxq', 'gngc', 'zgcms']:
                content = locals()[field]
                if content and content.strip():
                    content_hash = self._get_content_hash(content)
                    if content_hash not in self.content_dedup_mapping[field]:
                        self.content_dedup_mapping[field][content_hash] = str(data_id)

    def _build_all_faiss_indices(self):
        """分批构建所有FAISS索引"""
        for field in ['gnyhxq', 'gngc', 'zgcms']:
            logger.info(f"开始构建 {field} 字段的索引...")
            self._build_field_index_batch(field)

    def _build_field_index_batch(self, field, embedding_batch_size=Config.MAX_EMBEDDING_BATCH_SIZE):
        """分批构建单个字段的索引"""
        # 收集该字段的所有唯一内容
        field_data = []
        for content_hash, data_id in self.content_dedup_mapping[field].items():
            if data_id in self.data_cache:
                content = self.data_cache[data_id][field]
                if content and content.strip():
                    field_data.append((content.strip(), data_id))

        if not field_data:
            logger.info(f"字段 {field} 没有有效内容，跳过")
            return

        logger.info(f"字段 {field} 去重后有 {len(field_data)} 条唯一内容")

        # 分批生成嵌入向量
        all_embeddings = []
        all_data_ids = []

        for i in range(0, len(field_data), embedding_batch_size):
            batch_data = field_data[i:i + embedding_batch_size]
            batch_texts = [item[0] for item in batch_data]
            batch_ids = [item[1] for item in batch_data]

            logger.info(
                f"生成嵌入向量批次 {i // embedding_batch_size + 1}/{(len(field_data) - 1) // embedding_batch_size + 1}")

            # 批量生成嵌入向量
            embeddings = self._get_embeddings_batch(batch_texts)
            all_embeddings.extend(embeddings)
            all_data_ids.extend(batch_ids)

        # 构建FAISS索引
        embeddings_array = np.array(all_embeddings)
        self.faiss_indices[field] = self._build_faiss_index(embeddings_array)
        self.field_index_mapping[field] = all_data_ids.copy()

        logger.info(f"字段 {field} 索引构建完成: {len(all_data_ids)} 条记录")

    def _cleanup_memory(self):
        """清理内存"""
        gc.collect()
        logger.info("执行内存清理")

    def _log_index_statistics(self):
        """输出索引统计信息"""
        logger.info("=== 索引统计信息 ===")
        logger.info(f"总数据记录数: {len(self.data_cache)}")
        logger.info(f"最大处理ID: {self.max_processed_id}")

        for field in ['gnyhxq', 'gngc', 'zgcms']:
            total_with_content = sum(1 for data in self.data_cache.values()
                                     if data.get(field, '').strip())
            unique_count = len(self.content_dedup_mapping[field])
            index_count = len(self.field_index_mapping[field])

            logger.info(f"{field}: 总记录{total_with_content}, 去重后{unique_count}, 索引{index_count}")

    def search_similar(self, query_request: QueryRequest) -> QueryResponse:
        """搜索相似内容 - 增强版：支持相似度阈值过滤和已删除数据过滤"""
        start_time = datetime.now()

        type_mapping = {
            '功能用户需求': 'gnyhxq',
            '功能过程': 'gngc',
            '子过程描述': 'zgcms'
        }

        field = type_mapping.get(query_request.query_type)
        if not field:
            raise HTTPException(status_code=400, detail="无效的查询类型")

        if field not in self.faiss_indices or len(self.field_index_mapping[field]) == 0:
            logger.warning(f"字段 {field} 没有可用的索引")
            return QueryResponse(
                query_text=query_request.query_text,
                query_type=query_request.query_type,
                results=[],
                total_count=0,
                execution_time=(datetime.now() - start_time).total_seconds()
            )

        try:
            # 获取当前查询类型的相似度阈值
            similarity_threshold = Config.SIMILARITY_THRESHOLDS.get(query_request.query_type, 0.9)
            logger.info(f"使用相似度阈值: {similarity_threshold} for {query_request.query_type}")

            # 获取查询嵌入向量
            query_embeddings = self._get_embeddings_batch([query_request.query_text])
            query_embedding = query_embeddings[0].reshape(1, -1).astype('float32')

            logger.info(f"查询嵌入向量: {query_embedding[0][:5]}...")  # 打印前5个值

            faiss.normalize_L2(query_embedding)

            # 调试信息
            logger.info(f"查询文本: {query_request.query_text[:50]}...")
            logger.info(f"查询嵌入向量范数: {np.linalg.norm(query_embedding)}")

            # 计算搜索数量 - 由于要过滤相似度，需要搜索更多候选
            search_k = min(
                query_request.top_k * 5,  # 增加搜索范围以补偿阈值过滤
                self.faiss_indices[field].ntotal,
                2000
            )

            # FAISS搜索 - 返回的是内积分数（对于归一化向量就是余弦相似度）
            scores, indices = self.faiss_indices[field].search(query_embedding, search_k)

            # 调试信息 - 检查分数范围
            logger.info(f"FAISS搜索返回的分数范围: {scores[0].min():.6f} ~ {scores[0].max():.6f}")
            logger.info(f"前5个分数: {scores[0][:5]}")

            # 构建结果
            results = []
            seen_data_ids = set()
            filtered_by_threshold = 0
            filtered_by_deletion = 0

            for raw_score, idx in zip(scores[0], indices[0]):
                if idx < 0 or idx >= len(self.field_index_mapping[field]):
                    continue

                data_id = self.field_index_mapping[field][idx]

                if data_id in seen_data_ids:
                    continue
                seen_data_ids.add(data_id)

                # 过滤已删除的数据
                if data_id in self.deleted_data_ids:
                    logger.debug(f"跳过已删除的数据ID: {data_id}")
                    filtered_by_deletion += 1
                    continue

                if data_id not in self.data_cache:
                    logger.warning(f"数据ID {data_id} 不在缓存中")
                    continue

                data = self.data_cache[data_id]

                # 新增：排除相同pswfinstid的记录
                if query_request.exclude_pswfinstid:
                    if data.get('pswfinstid') == query_request.exclude_pswfinstid:
                        logger.debug(f"跳过相同pswfinstid的数据: {data.get('pswfinstid')}")
                        continue

                if not self._apply_filters(data, query_request):
                    continue

                content = data.get(field, '').strip()
                if not content:
                    continue

                # 修正相似度计算
                # 对于归一化向量的内积，范围是[-1, 1]，1表示最相似
                cosine_similarity = float(raw_score)

                # 确保相似度在合理范围内
                cosine_similarity = max(-1.0, min(1.0, cosine_similarity))

                # 应用相似度阈值过滤
                if cosine_similarity < similarity_threshold:
                    logger.debug(
                        f"相似度过低被过滤: {cosine_similarity:.6f} < {similarity_threshold}, 内容: {content[:30]}...")
                    filtered_by_threshold += 1
                    continue

                # 转换为百分制（只保留正相似度）
                if cosine_similarity < 0:
                    similarity_percentage = 0.0
                else:
                    similarity_percentage = cosine_similarity * 100

                # 调试信息
                logger.debug(
                    f"通过阈值检查 - 内容: {content[:30]}... | 原始分数: {raw_score:.6f} | 余弦相似度: {cosine_similarity:.6f} | 百分比: {similarity_percentage:.2f}%")

                results.append(SimilarityResult(
                    data_id=data_id,
                    cosmicid=data['cosmicid'],
                    firmcode=data['firmcode'],
                    contract_code=data['contract_code'],
                    content=content,
                    similarity_score=cosine_similarity,  # 使用正确的余弦相似度
                    similarity_percentage=similarity_percentage
                ))

                if len(results) >= query_request.top_k:
                    break

            # 按相似度降序排序（高相似度在前）
            results.sort(key=lambda x: x.similarity_score, reverse=True)

            # 记录过滤统计信息
            logger.info(
                f"搜索完成 - 返回结果: {len(results)}, 相似度过滤: {filtered_by_threshold}, 删除过滤: {filtered_by_deletion}")

            return QueryResponse(
                query_text=query_request.query_text,
                query_type=query_request.query_type,
                results=results,
                total_count=len(results),
                execution_time=(datetime.now() - start_time).total_seconds()
            )

        except Exception as e:
            logger.error(f"搜索过程中发生错误: {str(e)}")
            raise HTTPException(status_code=500, detail=f"搜索失败: {str(e)}")

    def batch_search(self, batch_request: BatchQueryRequest) -> List[QueryResponse]:
        """批量搜索相似内容"""
        try:
            results = []

            # 遍历每个查询请求
            for query_request in batch_request.queries:
                try:
                    # 调用单个搜索方法
                    result = self.search_similar(query_request)
                    results.append(result)
                except Exception as e:
                    # 如果单个查询失败，返回错误信息
                    logger.error(f"批量搜索中单个查询失败: {e}, 查询: {query_request.query_text}")
                    error_result = QueryResponse(
                        query_text=query_request.query_text,
                        query_type=query_request.query_type,
                        results=[],
                        total_count=0,
                        error=str(e)
                    )
                    results.append(error_result)

            return results

        except Exception as e:
            logger.error(f"批量搜索失败: {e}")
            raise

    def _calculate_similarity_percentage(self, cosine_similarity: float) -> float:
        """将余弦相似度转换为百分制分数 - 修正版本"""
        # 对于归一化向量的内积，范围是[-1, 1]
        # 只保留正相似度，负相似度设为0
        if cosine_similarity < 0:
            return 0.0
        else:
            # 将[0, 1]映射到[0, 100]
            return cosine_similarity * 100

    def _apply_filters(self, data: Dict, query_request: QueryRequest) -> bool:
        """应用过滤条件 - 改进匹配逻辑"""
        if query_request.cosmicid:
            if data.get('cosmicid', '').strip() != query_request.cosmicid.strip():
                return False

        if query_request.contract_code:
            if data.get('contract_code', '').strip() != query_request.contract_code.strip():
                return False

        if query_request.firmcode:
            if data.get('firmcode', '').strip() != query_request.firmcode.strip():
                return False

        return True

    def incremental_update(self):
        """重构的增量更新方法"""
        try:
            logger.info("开始增量更新索引...")

            with self._get_db_connection() as conn:
                cursor = conn.cursor()

                # 查询新增数据
                sql = """
                   SELECT f.id, c.cosmicid, c.FIRMCODE, r.attrvaluestr AS contract_code, 
                       DBMS_LOB.SUBSTR(f.gnyhxq, 4000, 1) AS gnyhxq,
                       DBMS_LOB.SUBSTR(f.gngc, 4000, 1) AS gngc,
                       DBMS_LOB.SUBSTR(f.zgcms, 4000, 1) AS zgcms
                   FROM TB_COSMIC_DATA f
                   JOIN tb_cosmic c ON c.cosmicid = f.cosmicid
                   JOIN tb_upe_instanceattr r ON r.wfinstid = c.pswfinstid AND r.attrcode = 'contractcode'
                   WHERE c.state = '1' AND f.id > :max_id and c.type = '1'
                   ORDER BY f.id
                   """

                cursor.execute(sql, max_id=self.max_processed_id)
                new_rows = cursor.fetchall()

                if not new_rows:
                    logger.info("没有新数据需要更新")
                    return

                logger.info(f"发现 {len(new_rows)} 条新数据")

            # 处理新数据
            new_max_id = 0
            new_field_data = {'gnyhxq': [], 'gngc': [], 'zgcms': []}
            new_field_ids = {'gnyhxq': [], 'gngc': [], 'zgcms': []}

            for row in new_rows:
                data_id, cosmicid, firmcode, contract_code, gnyhxq, gngc, zgcms = row
                new_max_id = max(new_max_id, data_id)

                # 添加到数据缓存
                self.data_cache[str(data_id)] = {
                    'id': str(data_id),
                    'cosmicid': cosmicid or '',
                    'firmcode': firmcode or '',
                    'contract_code': contract_code or '',
                    'gnyhxq': gnyhxq or '',
                    'gngc': gngc or '',
                    'zgcms': zgcms or ''
                }

                # 处理每个字段
                for field in ['gnyhxq', 'gngc', 'zgcms']:
                    content = locals()[field]
                    if content and content.strip():
                        content_hash = self._get_content_hash(content)

                        # 检查是否为新的唯一内容
                        if content_hash not in self.content_dedup_mapping[field]:
                            self.content_dedup_mapping[field][content_hash] = str(data_id)
                            new_field_data[field].append(content.strip())
                            new_field_ids[field].append(str(data_id))

            # 为每个字段增量更新索引
            for field in ['gnyhxq', 'gngc', 'zgcms']:
                if new_field_data[field]:
                    logger.info(f"为字段 {field} 增加 {len(new_field_data[field])} 条新记录")

                    # 生成新的嵌入向量
                    new_embeddings = self._get_embeddings_batch(new_field_data[field])
                    new_embeddings_array = np.array(new_embeddings, dtype=np.float32)
                    faiss.normalize_L2(new_embeddings_array)

                    # 如果该字段还没有索引，创建新索引
                    if field not in self.faiss_indices:
                        self.faiss_indices[field] = self._build_faiss_index(new_embeddings_array)
                        self.field_index_mapping[field] = new_field_ids[field].copy()
                    else:
                        # 添加到现有索引
                        self.faiss_indices[field].add(new_embeddings_array)
                        self.field_index_mapping[field].extend(new_field_ids[field])

            # 更新最大处理ID
            self.max_processed_id = new_max_id

            # 保存缓存
            self._save_cache()

            logger.info(f"增量更新完成！处理了 {len(new_rows)} 条新数据，最大ID更新为 {new_max_id}")

        except Exception as e:
            logger.error(f"增量更新失败: {str(e)}")
            raise

    def get_update_status(self):
        """获取更新状态信息"""
        try:
            with self._get_db_connection() as conn:
                cursor = conn.cursor()

                # 查询数据库中的最大ID（使用相同的关联查询逻辑）
                sql = """
                SELECT MAX(f.id) 
                FROM TB_COSMIC_DATA f
                JOIN tb_cosmic c ON c.cosmicid = f.cosmicid
                JOIN tb_upe_instanceattr r ON r.wfinstid = c.pswfinstid AND r.attrcode = 'contractcode'
                WHERE c.state = '1' and c.type = '1'
                """

                cursor.execute(sql)
                db_max_id = cursor.fetchone()[0] or 0

                pending_count = max(0, db_max_id - self.max_processed_id)

                return {
                    'max_processed_id': self.max_processed_id,
                    'db_max_id': db_max_id,
                    'pending_updates': pending_count,
                    'cache_size': len(self.data_cache),
                    'embedding_cache_size': len(self.embedding_cache),
                    'index_stats': {
                        field: self.faiss_indices[field].ntotal if field in self.faiss_indices else 0
                        for field in ['gnyhxq', 'gngc', 'zgcms']
                    }
                }
        except Exception as e:
            return {'error': str(e)}

    def delete_data(self, delete_request: DeleteRequest):
        """根据cosmicid批量删除数据（逻辑删除，不重建索引）"""
        try:
            deleted_count = 0
            cosmicids_to_delete = set(delete_request.data_ids)  # 实际上是cosmicid列表
            data_ids_to_delete = set()

            logger.info(f"开始根据cosmicid逻辑删除，涉及 {len(cosmicids_to_delete)} 个cosmicid")

            # 1. 根据cosmicid查找所有相关的data_id
            for data_id, data_record in self.data_cache.items():
                if data_record.get('cosmicid') in cosmicids_to_delete:
                    data_ids_to_delete.add(data_id)

            logger.info(f"找到 {len(data_ids_to_delete)} 条数据需要删除")

            # 2. 添加到删除标记集合
            for data_id in data_ids_to_delete:
                if data_id in self.data_cache:
                    self.deleted_data_ids.add(data_id)
                    deleted_count += 1

            # 3. 从数据缓存中删除（可选，节省内存）
            for data_id in data_ids_to_delete:
                if data_id in self.data_cache:
                    del self.data_cache[data_id]

            # 4. 保存删除标记到缓存文件
            self._save_deleted_ids()

            logger.info(f"逻辑删除完成！标记删除了 {deleted_count} 条数据")

            return {
                "deleted_cosmicids": list(cosmicids_to_delete),
                "deleted_data_count": deleted_count,
                "deleted_data_ids": list(data_ids_to_delete)
            }

        except Exception as e:
            logger.error(f"根据cosmicid批量删除失败: {str(e)}")
            raise

    def _save_deleted_ids(self):
        """保存删除标记到文件"""
        try:
            deleted_ids_file = self.cache_dir / 'deleted_data_ids.pkl'
            with open(deleted_ids_file, 'wb') as f:
                pickle.dump(self.deleted_data_ids, f)
        except Exception as e:
            logger.error(f"保存删除标记失败: {e}")

    def _load_deleted_ids(self):
        """从文件加载删除标记"""
        try:
            deleted_ids_file = self.cache_dir / 'deleted_data_ids.pkl'
            if deleted_ids_file.exists():
                with open(deleted_ids_file, 'rb') as f:
                    self.deleted_data_ids = pickle.load(f)
                logger.info(f"加载了 {len(self.deleted_data_ids)} 个删除标记")
            else:
                self.deleted_data_ids = set()
        except Exception as e:
            logger.error(f"加载删除标记失败: {e}")
            self.deleted_data_ids = set()

    def update_similarity_thresholds(self, new_thresholds: Dict[str, float], save_to_file: bool = True):
        """更新相似度阈值配置"""
        try:
            # 验证阈值范围
            for field, threshold in new_thresholds.items():
                if not 0.0 <= threshold <= 1.0:
                    raise ValueError(f"阈值 {field} 的值 {threshold} 必须在 0.0 到 1.0 之间")

                # 验证字段名是否有效
                valid_fields = ['功能用户需求', '功能过程', '子过程描述']
                if field not in valid_fields:
                    raise ValueError(f"无效的字段名: {field}，有效字段: {valid_fields}")

            # 更新内存中的配置
            Config.SIMILARITY_THRESHOLDS.update(new_thresholds)

            logger.info(f"相似度阈值已更新: {new_thresholds}")
            return {
                "status": "success",
                "message": "相似度阈值更新成功",
                "updated_thresholds": new_thresholds,
                "current_thresholds": Config.SIMILARITY_THRESHOLDS.copy(),
                "saved_to_file": save_to_file
            }

        except Exception as e:
            logger.error(f"更新相似度阈值失败: {e}")
            raise e

    def get_similarity_thresholds(self):
        """获取当前相似度阈值配置"""
        return {
            "status": "success",
            "thresholds": Config.SIMILARITY_THRESHOLDS.copy(),
            "description": {
                "功能用户需求": "gnyhxq 字段的相似度阈值",
                "功能过程": "gngc 字段的相似度阈值",
                "子过程描述": "zgcms 字段的相似度阈值"
            }
        }

    def _get_embeddings_batch_threaded(self, texts: List[str], max_workers: int = None) -> List[np.ndarray]:
        """多线程批量获取文本嵌入向量"""
        if max_workers is None:
            max_workers = self.embedding_workers

        results = [None] * len(texts)
        uncached_items = []

        # 检查缓存（线程安全）
        with self.embedding_lock:
            for i, text in enumerate(texts):
                if not text or text.strip() == '':
                    results[i] = np.zeros(1024)
                else:
                    text_hash = hash(text.strip())
                    if text_hash in self.embedding_cache:
                        results[i] = self.embedding_cache[text_hash]
                    else:
                        uncached_items.append((i, text.strip(), text_hash))

        if not uncached_items:
            return results

        logger.info(f"多线程处理 {len(uncached_items)} 条未缓存文本，使用 {max_workers} 个线程")

        # 将未缓存的文本分组给不同线程处理
        chunk_size = max(1, len(uncached_items) // max_workers)
        chunks = [uncached_items[i:i + chunk_size] for i in range(0, len(uncached_items), chunk_size)]

        def process_chunk(chunk):
            """处理一个文本块"""
            try:
                chunk_texts = [item[1] for item in chunk]
                # 使用较小的批处理大小避免内存问题
                chunk_batch_size = min(self.batch_size // max_workers, 64)
                chunk_embeddings = self.model.encode(chunk_texts, batch_size=chunk_batch_size)

                # 线程安全地更新缓存和结果
                with self.embedding_lock:
                    for (orig_idx, text, text_hash), embedding in zip(chunk, chunk_embeddings):
                        self.embedding_cache[text_hash] = embedding
                        results[orig_idx] = embedding

            except Exception as e:
                logger.error(f"处理文本块时出错: {e}")
                raise

        # 使用线程池执行
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = [executor.submit(process_chunk, chunk) for chunk in chunks]

            # 等待所有线程完成
            for future in as_completed(futures):
                try:
                    future.result()
                except Exception as e:
                    logger.error(f"线程处理出错: {e}")
                    raise

        return results

    def _build_all_faiss_indices_threaded(self):
        """多线程构建所有字段的FAISS索引"""
        fields = ['gnyhxq', 'gngc', 'zgcms']
        embedding_batch_size = Config.MAX_EMBEDDING_BATCH_SIZE

        logger.info(f"开始多线程构建索引，使用 {min(len(fields), self.max_workers)} 个线程")
        start_time = time.time()

        def build_field_index(field):
            """构建单个字段的索引"""
            try:
                thread_id = threading.current_thread().ident
                logger.info(f"线程 {thread_id} 开始构建字段 {field} 的索引")
                self._build_field_index_batch_threaded(field, embedding_batch_size)
                logger.info(f"线程 {thread_id} 完成字段 {field} 的索引构建")
            except Exception as e:
                logger.error(f"构建字段 {field} 索引时出错: {e}")
                raise

        # 使用线程池并行构建不同字段的索引
        with ThreadPoolExecutor(max_workers=min(len(fields), self.max_workers)) as executor:
            futures = {executor.submit(build_field_index, field): field for field in fields}

            for future in as_completed(futures):
                field = futures[future]
                try:
                    future.result()
                    logger.info(f"字段 {field} 索引构建完成")
                except Exception as e:
                    logger.error(f"字段 {field} 索引构建失败: {e}")
                    raise

        build_time = time.time() - start_time
        logger.info(f"所有字段索引构建完成，总耗时: {build_time:.2f}秒")

    def _build_field_index_batch_threaded(self, field: str, embedding_batch_size: int):
        """构建单个字段的索引（单线程嵌入向量生成）"""
        # 收集该字段的唯一内容
        field_data = []
        with self.data_lock:
            for content_hash, data_id in self.content_dedup_mapping[field].items():
                if data_id in self.data_cache:
                    content = self.data_cache[data_id].get(field, '').strip()
                    if content:
                        field_data.append((content, data_id))

        if not field_data:
            logger.warning(f"字段 {field} 没有有效内容")
            return

        logger.info(f"字段 {field} 去重后有 {len(field_data)} 条唯一内容")

        # 分批生成嵌入向量（使用单线程，避免嵌套多线程）
        all_embeddings = []
        all_data_ids = []

        for i in range(0, len(field_data), embedding_batch_size):
            batch_data = field_data[i:i + embedding_batch_size]
            batch_texts = [item[0] for item in batch_data]
            batch_ids = [item[1] for item in batch_data]

            logger.info(
                f"字段 {field} - 生成嵌入向量批次 {i // embedding_batch_size + 1}/{(len(field_data) - 1) // embedding_batch_size + 1}")

            # 使用单线程批量生成嵌入向量
            embeddings = self._get_embeddings_batch(batch_texts)
            all_embeddings.extend(embeddings)
            all_data_ids.extend(batch_ids)

        # 构建FAISS索引
        embeddings_array = np.array(all_embeddings)
        index = self._build_faiss_index(embeddings_array)

        # 线程安全地更新索引
        with self.data_lock:
            self.faiss_indices[field] = index
            self.field_index_mapping[field] = all_data_ids.copy()

        logger.info(f"字段 {field} 索引构建完成: {len(all_data_ids)} 条记录")

# 初始化服务
semantic_service = SemanticSimilarityService(
    model_path=Config.MODEL_PATH,
    cache_dir=Config.CACHE_DIR,
    batch_size=Config.EMBEDDING_BATCH_SIZE  # 使用配置中的批处理大小
)
# FastAPI 应用
app = FastAPI(
    title="语义相似度分析API",
    description="基于BGE模型的语义相似度分析服务",
    version="1.0.0"
)

# 在 app 创建后添加
app.mount("/static", StaticFiles(directory="static"), name="static")
# 添加webfonts路径映射
app.mount("/webfonts", StaticFiles(directory="webfonts"), name="webfonts")


# 修改根路径，只对前端页面进行token验证
@app.get("/")
async def read_root(request: Request):
    token = request.query_params.get("token")
    if token != FRONTEND_TOKEN:
        # 什么都不返回，直接挂起
        await asyncio.sleep(Config.FRONTEND_AUTH_TIMEOUT)
        return Response(content="", status_code=444)
    return FileResponse('static/index.html')

@app.post("/search", response_model=QueryResponse)
async def search_similar(query_request: QueryRequest):
    """搜索相似内容"""
    try:
        return semantic_service.search_similar(query_request)
    except Exception as e:
        logger.error(f"搜索出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/batch_search", response_model=List[QueryResponse])
async def batch_search(batch_request: BatchQueryRequest):
    """批量搜索相似内容"""
    try:
        return semantic_service.batch_search(batch_request)
    except Exception as e:
        logger.error(f"批量搜索出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/update")
async def update_data(update_request: UpdateRequest):
    """更新单条数据"""
    try:
        semantic_service.update_data(update_request)
        return {"message": "数据更新成功", "data_id": update_request.data_id}
    except Exception as e:
        logger.error(f"更新数据出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/batch_update")
async def batch_update(batch_request: BatchUpdateRequest):
    """批量更新数据"""
    try:
        semantic_service.batch_update(batch_request)
        return {"message": "批量更新成功", "count": len(batch_request.updates)}
    except Exception as e:
        logger.error(f"批量更新出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/delete")
async def delete_data(delete_request: DeleteRequest):
    """根据cosmicid删除数据"""
    try:
        result = semantic_service.delete_data(delete_request)
        return {
            "message": "数据删除成功",
            "cosmicid_count": len(delete_request.data_ids),
            "deleted_data_count": result["deleted_data_count"],
            "details": result
        }
    except Exception as e:
        logger.error(f"删除数据出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/initialize")
async def initialize_index(background_tasks: BackgroundTasks):
    """初始化/重建索引"""
    try:
        background_tasks.add_task(semantic_service.initialize_index)
        return {"message": "索引初始化任务已启动，请稍后查看日志"}
    except Exception as e:
        logger.error(f"初始化索引出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "healthy",
        "cache_size": len(semantic_service.data_cache),
        "embedding_cache_size": len(semantic_service.embedding_cache),
        "indices": list(semantic_service.faiss_indices.keys())
    }


@app.post("/incremental_update")
async def incremental_update():
    """增量更新索引"""
    global update_task_running

    try:
        # 检查是否有任务正在执行
        with update_task_lock:
            if update_task_running:
                return {
                    "status": "skipped",
                    "message": "增量更新任务正在执行中，请稍后再试",
                    "task_running": True
                }

            # 设置任务执行状态
            update_task_running = True
            logger.info("手动触发增量更新任务")

        try:
            start_time = time.time()
            semantic_service.incremental_update()
            end_time = time.time()
            status = semantic_service.get_update_status()

            logger.info(f"手动增量更新任务完成，耗时: {end_time - start_time:.2f}秒")

            return {
                "status": "success",
                "message": "增量更新完成",
                "execution_time": f"{end_time - start_time:.2f}秒",
                "update_info": status
            }
        finally:
            # 确保任务状态被重置
            with update_task_lock:
                update_task_running = False
                logger.info("手动增量更新任务状态已重置")

    except Exception as e:
        # 确保在异常情况下也重置任务状态
        with update_task_lock:
            update_task_running = False
        raise HTTPException(status_code=500, detail=f"增量更新失败: {str(e)}")

@app.get("/update_status")
async def get_update_status():
    """获取更新状态"""
    try:
        status = semantic_service.get_update_status()
        return {
            "status": "success",
            "data": status
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取状态失败: {str(e)}")

@app.post("/update_thresholds")
async def update_thresholds(request: ThresholdUpdateRequest):
    """更新相似度阈值配置"""
    try:
        result = semantic_service.update_similarity_thresholds(
            new_thresholds=request.thresholds,
            save_to_file=request.save_to_file
        )
        return result
    except ValueError as e:
        logger.error(f"阈值更新参数错误: {e}")
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        logger.error(f"更新阈值出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/get_thresholds")
async def get_thresholds():
    """获取当前相似度阈值配置"""
    try:
        result = semantic_service.get_similarity_thresholds()
        return result
    except Exception as e:
        logger.error(f"获取阈值配置出错: {e}")
        raise HTTPException(status_code=500, detail=str(e))


# 在API端点部分添加新的路由
@app.post("/analyze_cosmic_similarity")
async def analyze_cosmic_similarity(cosmicid: str):
    """根据cosmicid进行相似度分析"""
    try:
        # 使用已存在的semantic_service实例
        analyzer = CosmicSimilarityAnalyzer(semantic_service=semantic_service)
        result = analyzer.analyze_cosmic_similarity(cosmicid)
        return result
    except Exception as e:
        logger.error(f"相似度分析失败: {e}")
        raise HTTPException(status_code=500, detail=f"分析失败: {str(e)}")


@app.post("/analyze_cosmic_similarity_by_pswfinstid")
async def analyze_cosmic_similarity_by_pswfinstid(request: PswfinstidSimilarityRequest):
    """根据pswfinstid进行相似度分析"""
    try:
        # 使用已存在的semantic_service实例
        analyzer = CosmicSimilarityAnalyzer(semantic_service=semantic_service)
        # 根据pswfinstid查询最新的cosmicid
        cosmicid = analyzer.get_cosmicid_by_pswfinstid(request.pswfinstid)
        if not cosmicid:
            raise HTTPException(status_code=404, detail=f"未找到pswfinstid {request.pswfinstid} 对应的cosmicid")
        # 调用相似度分析，传递exclude_pswfinstid参数
        result = analyzer.analyze_cosmic_similarity(cosmicid, exclude_pswfinstid=request.pswfinstid)
        # 在结果中添加pswfinstid信息
        result['pswfinstid'] = request.pswfinstid
        result['queried_cosmicid'] = cosmicid

        return result
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"通过pswfinstid进行相似度分析失败: {e}")
        raise HTTPException(status_code=500, detail=f"分析失败: {str(e)}")


@app.get("/timer/status")
async def timer_status():
    """获取定时器状态"""
    try:
        return get_timer_status()
    except Exception as e:
        logger.error(f"获取定时器状态失败: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/timer/config")
async def update_timer_config(request: Request, background_tasks: BackgroundTasks):
    """更新定时器配置"""
    try:
        data = await request.json()
        enable = data.get('enable', None)
        interval = data.get('interval', None)

        if enable is not None:
            # 动态修改配置
            Config.ENABLE_AUTO_UPDATE_TIMER = bool(enable)

            # 如果禁用定时器且当前正在运行，则停止
            if not enable and timer_running:
                stop_auto_update_timer()
            # 如果启用定时器且当前未运行，则启动
            elif enable and not timer_running:
                start_auto_update_timer()

        if interval is not None and interval > 0:
            Config.AUTO_UPDATE_INTERVAL = int(interval)
            # 如果定时器正在运行，需要重启以应用新间隔
            if timer_running:
                # 使用后台任务处理重启逻辑，避免阻塞
                def restart_timer():
                    stop_auto_update_timer()
                    time.sleep(1)  # 在后台线程中等待
                    start_auto_update_timer()

                background_tasks.add_task(restart_timer)

        return {
            "success": True,
            "message": "定时器配置更新成功",
            "config": {
                "enable": Config.ENABLE_AUTO_UPDATE_TIMER,
                "interval": Config.AUTO_UPDATE_INTERVAL
            },
            "status": get_timer_status()
        }
    except Exception as e:
        logger.error(f"更新定时器配置失败: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/analyze_multi_field_similarity")
async def analyze_multi_field_similarity(request: MultiFieldSimilarityRequest):
    """分析指定cosmicid中多个字段去重后每个内容的相似度"""
    try:
        # 默认分析所有三个字段
        fields = request.fields if request.fields is not None else ['gnyhxq', 'gngc', 'zgcms']

        analyzer = CosmicSimilarityAnalyzer(semantic_service=semantic_service)
        result = analyzer.analyze_multi_field_similarity_within_cosmic(request.cosmicid, fields)
        return result
    except Exception as e:
        logger.error(f"多字段相似度分析失败: {e}")
        raise HTTPException(status_code=500, detail=f"分析失败: {str(e)}")

@app.on_event("startup")
async def startup_event():
    """启动时初始化"""
    logger.info("API服务启动")

    # 启动定时器
    if Config.ENABLE_AUTO_UPDATE_TIMER:
        # 延迟启动定时器，确保semantic_service已初始化
        def delayed_timer_start():
            time.sleep(5)  # 等待5秒确保服务完全启动
            start_auto_update_timer()

        timer_thread = threading.Thread(target=delayed_timer_start, daemon=True)
        timer_thread.start()
        logger.info("定时器启动任务已安排")

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)
