from fastapi import FastAPI, UploadFile, File, Form
from typing import List, Optional, Dict
import uvicorn
import requests
import os
import logging
import yaml
import concurrent.futures as cf
from call_llm import _run_all_configs_once
import asyncio
import re
import time

# 日志级别
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)

app = FastAPI()

# 加载同目录下的 config.yaml（若存在）
CONFIG = {}
try:
    current_dir = os.path.dirname(__file__)
    config_path = os.path.join(current_dir, "config.yaml")
    if os.path.exists(config_path):
        with open(config_path, "r", encoding="utf-8") as f:
            CONFIG = yaml.safe_load(f) or {}
            if not isinstance(CONFIG, dict):
                CONFIG = {}
except Exception as _config_exc:
    log.warning(f"加载 config.yaml 失败：{_config_exc}")

api_url = CONFIG.get("KNOWLEDGE_BASES_API_URL")
api_key = CONFIG.get("KNOWLEDGE_BASES_API_KEY", "")
# top_k = CONFIG.get("KNOWLEDGE_BASES_TOP_K")
# score_threshold = CONFIG.get("KNOWLEDGE_BASES_SCORE_THRESHOLD")

# top_k=1 & score_threshold=0.29 完全可以通过http请求传参，不传的话默认值为1和0.29
def process_datasetid(dataset_id, query, top_k, score_threshold, content_max_length=7000, max_retries=3):
    # e415e223-3b18-4cec-93b7-f4674c5d4441 AI案件助手-案件档案库
    # f2ddcabb-6e3a-4cbd-a557-abfd21416393 AI案件助手-工作资料库
    url = f"{api_url}/datasets/{dataset_id}/retrieve"

    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json",
    }

    payload = {
        "query": query,
        "retrieval_model": {
            "search_method": "hybrid_search",
            "reranking_enable": True,
            "reranking_model": {
                "reranking_provider_name": CONFIG.get("RERANKING_PROVIDER_NAME",
                                                      "langgenius/huggingface_tei/huggingface_tei"),
                "reranking_model_name": CONFIG.get("RERANKING_MODEL_NAME", "bge-reranker-v2-m3")
            },
            "weights": None,
            "top_k": top_k,
            "score_threshold_enabled": True,
            "score_threshold": score_threshold,
        },
    }

    # 重试逻辑
    for attempt in range(max_retries):
        try:
            # headers,json都打印日志
            log.info(f"知识库引用的文件请求开始 (尝试 {attempt + 1}/{max_retries}): headers={headers}, json={payload}, dataset_id={dataset_id}, top_k={top_k}, score_threshold={score_threshold}")
            resp = requests.post(url, headers=headers, json=payload, timeout=30)
            resp.raise_for_status()

            # 处理返回数据，提取segments和file_dict
            data = resp.json()
            segments = []
            file_dict = {}
            
            if 'records' in data:
                for record in data['records']:
                    # 收集所有检索到的 segment
                    if 'segment' in record and isinstance(record['segment'], dict):
                        segment = record['segment'].copy()  # 创建副本避免修改原始数据
                        
                        # 删除sign_content字段
                        if 'sign_content' in segment:
                            del segment['sign_content']
                        
                        # 截取content字段到指定字符数
                        if 'content' in segment and isinstance(segment['content'], str):
                            if len(segment['content']) > content_max_length:
                                segment['content'] = segment['content'][:content_max_length]
                        
                        segment['score'] = record['score']
                        segment['dataset_id'] = dataset_id
                        segments.append(segment)

                    if 'segment' in record and 'document' in record['segment']:
                        doc = record['segment']['document']
                        doc_id = doc.get('id')
                        doc_name = doc.get('name')
                        if doc_id and doc_name and doc_id not in file_dict:
                            file_dict[doc_id] = doc_name
            
            # 提取file_dict中的id，然后获取上传文件download_url     --- "doc_id_3": "文件名3,"  // 如果获取下载链接失败，只有文件名
            for doc_id, doc_name in file_dict.items():
                try:
                    file_url = f"{api_url}/datasets/{dataset_id}/documents/{doc_id}/upload-file"
                    file_resp = requests.get(file_url, headers=headers, timeout=30)
                    file_resp.raise_for_status()
                    file_data = file_resp.json()
                    download_url = file_data.get('download_url', '')
                    
                    # 拼接完整的下载URL
                    if download_url:
                        # 去掉api_url中的/v1，然后拼接
                        base_url = api_url.replace('/v1', '') if api_url.endswith('/v1') else api_url
                        full_download_url = f"{base_url}{download_url}"
                    else:
                        full_download_url = ''
                    
                    file_dict[doc_id] = f"{doc_name},{full_download_url}"
                except Exception as e:
                    log.error(f"获取文件 {doc_id} 下载链接失败: {e}")
                    file_dict[doc_id] = f"{doc_name},"

            log.info(f"请求 dataset_id {dataset_id} 成功")
            return {"dataset_id": dataset_id, "segments": segments, "file_dict": file_dict}
            
        except Exception as e:
            log.error(f"请求 dataset_id {dataset_id} 失败 (尝试 {attempt + 1}/{max_retries}): {e}")
            if attempt < max_retries - 1:
                # 如果不是最后一次尝试，等待一小段时间后重试
                time.sleep(0.5)  # 重试前等待500ms
            else:
                # 最后一次尝试也失败了，返回错误
                return {"dataset_id": dataset_id, "error": str(e)}

# 方法的开始
async def create_upload_files(
        knowledge_base_list: str,
        query: str,
        top_k: Optional[int],
        score_threshold: Optional[float],
        query_rewrite: Optional[str] = "false",
        concurrent: Optional[int] = 1,
        system_prompt: Optional[str] = None,
        disable_delay: Optional[str] = "false",
        content_max_length: Optional[int] = 7000
):
    # 如果query_rewrite是“true”,调用llm进行改写，然后替换query
    result = ""
    if query_rewrite.lower() == "true":
        if not system_prompt:
            # 调用llm进行改写
            system_prompt = """
                你是一个专业的query提取专家，专门提取关键字优化用户查询以提升知识库检索效果。
                
                核心任务：对输入问题进行多角度提取关键词，生成2个简洁版本。
                
                改写规则：
                ** 保留核心实体和意图，去除"帮我查询"等无关用语
                ** 提取关键词，关键信息
                ** 去掉问号等特殊符号
                ** 去掉信息、内容、事件人物时间地点等无关字样，具体的事件人物时间地点除外
                ** 去掉关键、相关、检索、案件的字样
                ** 确保语义完整性
                ** 提取结果尽量30字内，生成两个提取结果（如果没有更多关键词，两个提取结果可以重复），用分号隔开
                
                处理流程：
                1. 接收用户原始query
                2. 识别核心实体和关键词
                3. 从不同角度生成2个变体
                4. 输出最终结果
                
                ## 思考模式
                /no_think
                
                输出格式：提取结果1；提取结果2
                
                输出示例1：
                输入：我现在有个需求，帮我查询一下陈某某案件，并且返回关键信息
                输出：陈某某；陈某某
                
                输出示例2：
                输入：现在我有个需求，帮我查询一下陈某某案件，总结事件人物时间地点，30字内
                输出：陈某某；陈某某
                
                输出示例3：
                输入：现在我有个需求，帮我查询一下陈某某案件，2000年3月，在广东省，30字内
                输出：陈某某；陈某某2000年3月广东省
                """
        user_prompt = f"""
            请对以下query进行提取：
            {query}
            """
        print(f"开始调用LLM改写: {query}")
        # 默认将当前文本作为 user_prompt；若外部传入 user_prompt，则按传入值使用
        results = await _run_all_configs_once(
            name=None,
            system_prompt=system_prompt or "you are a helpful assistant.",
            user_prompt=(user_prompt if user_prompt is not None else query),
            concurrent=concurrent or 1,
        )
        # _run_all_configs_once 返回 List[str]，取第一条作为该项结果
        result = results[0] if results else query  # 报错返回None，返回原文本
        # 去掉<think>\n\n</think>\n\n
        result = re.sub(r'<think>.*?</think>', '', result, flags=re.DOTALL).strip()

    print(f"是否改写：{query_rewrite}，LLM改写结果: {result or query}")

    datasetid_list = knowledge_base_list.split(",")

    # 并行执行所有任务
    with cf.ThreadPoolExecutor(max_workers=len(datasetid_list)) as executor:
        # 提交所有任务到线程池，根据disable_delay参数决定是否添加延迟
        futures = []
        for i, dataset_id in enumerate(datasetid_list):
            # 如果disable_delay不是"true"，则添加延迟（默认有延迟）
            if i > 0 and disable_delay.lower() != "true":
                time.sleep(0.05)  # 50ms延迟
            future = executor.submit(process_datasetid, dataset_id, result or query, top_k, score_threshold, content_max_length)
            futures.append(future)

        # 获取所有结果
        results = [future.result() for future in futures]

    # 合并所有结果
    all_segments = []
    all_file_dict = {}
    
    for result in results:
        if 'segments' in result:
            all_segments.extend(result['segments'])
        if 'file_dict' in result:
            all_file_dict.update(result['file_dict'])

    # 按照score从高到低排序
    all_segments.sort(key=lambda x: x.get('score', 0), reverse=True)

    return {"results": all_segments, "file_dict": all_file_dict}

if __name__ == '__main__':
    sever_host = str(CONFIG.get("KNOWLEDGE_BASES_HOST"))
    server_port = int(CONFIG.get("KNOWLEDGE_BASES_PORT"))
    uvicorn.run(app, host=sever_host, port=server_port)