import asyncio
import json

from fastapi import FastAPI
from openai import AsyncOpenAI
from pydantic import BaseModel

from config import embedder_config, milvus_config, llm_config
from milvus_op import MilvusOP
from sentence_transformers import SentenceTransformer as TextEmbeder
from typing import List, Optional, Dict, Any


def build_prompt(user_question, retrieved_info, img_drawn):
    prompt = "你是一名技术专家，擅长解答用户关于 DeepSeek 的问题。请根据下面提供的参考资料，结合用户的问题，给出准确、简洁的回答。\n\n"
    prompt += f"【用户问题】\n{user_question}\n\n"
    prompt += "【检索到的参考资料】\n"
    map = {}
    descs = []
    for idx, passage in enumerate(retrieved_info):
        prompt += f"[资料{idx + 1}]\n{passage}\n---\n"
        page_info = img_drawn[idx][0]
        map[f'资料{idx + 1}'] = {'page': page_info}
        pos_desc = '\n'.join([f'### 页码：{k}\n### 位置：{v}' for k, v in page_info.items()])
        descs.append(f"## 资料{idx + 1}\n{pos_desc}\n---")
    prompt += """
【要求】
- 回答必须基于以上参考资料，避免使用虚构内容。
- 回答中请在末尾使用“[资料X]”来引用相应资料，例如：“[资料2]”。
- 请勿添加资料之外的信息或链接。
- 如参考资料中未包含足够信息，请明确说明“根据现有资料，无法准确回答该问题”。
- 使用简洁、专业的语言表达答案。
- 在回复结尾，无需重复列出资料，只保留正文即可。

【最终回答】
"""
    return prompt, map, '\n'.join(descs)


async def ask_llm(prompt, client, llm_config):
    try:
        response = await client.chat.completions.create(
            model=llm_config['model_name'],  # 替换为你的模型名
            messages=[{"role": "user", "content": prompt}],
            stream=False,
            extra_body={"enable_thinking": llm_config['enable_thinking']},
        )
        return response.choices[0].message.content
    except Exception as e:
        return f"[ERROR] {e}"


async def generate_answers(prompts, client, llm_config):
    tasks = [ask_llm(prompt, client, llm_config) for prompt in prompts]
    results = await asyncio.gather(*tasks)
    return results

app = FastAPI()


# 初始化组件
embedder = TextEmbeder(**embedder_config)
milvus_op = MilvusOP(**milvus_config)
llm_client = AsyncOpenAI(api_key=llm_config['api_key'], base_url=llm_config['base_url'])


# 请求体结构
class PredictRequest(BaseModel):
    queries: List[str] = ['如何利用deepseek进行医疗影像分析？']

@app.post("/predict")
async def predict(request: PredictRequest):
    try:
        queries = request.queries
        embeddings = embedder.encode(queries)
        search_out = milvus_op.hybrid_search(queries, query_embs=embeddings, limit=5, sparse_block=True)

        prompts, position_maps = [], []
        for qid, answer in enumerate(search_out):
            retrieved_info = []
            img_drawn = []
            for top_answer in answer:
                metadata = json.loads(top_answer.fields['posInfo'])
                retrieved_info.append(top_answer.fields['block'])
                img_drawn.append((
                    metadata['pbox'],
                    [f"output/retrieval/refer_{page_num}.png" for page_num in metadata['pbox']]
                ))
            prompt, position_map, desc = build_prompt(queries[qid], retrieved_info, img_drawn)
            prompts.append(prompt)
            position_maps.append(position_map)

        answers = await generate_answers(prompts, llm_client, llm_config)
        return {'error': '', 'answers': answers, 'position_maps': position_maps}

    except Exception as e:
        return {'error': str(e), 'answers': [], 'position_maps': []}
# 🚀 启动服务
# uvicorn main:app --host 0.0.0.0 --port 8000
