# ask_question.py

import argparse
import requests
import json
import os
import config
import datetime

from sentence_transformers import SentenceTransformer
from pymilvus import connections, Collection, utility
from chatchat.server.chat.utils import History
from langchain.prompts.chat import ChatPromptTemplate
from chatchat.server.db.repository.message_repository import filter_message, add_message_to_db
from chatchat.settings import Settings
from sqlalchemy import inspect

print("[调试] Settings.basic_settings.SQLALCHEMY_DATABASE_URI =", Settings.basic_settings.SQLALCHEMY_DATABASE_URI)
from chatchat.server.db.base import engine
from sqlalchemy import inspect

print("[调试] ORM engine url：", engine.url)
try:
    print("[调试] 数据库表：", inspect(engine).get_table_names())
except Exception as e:
    print("[调试] inspect(engine) 报错：", e)

LOG_DIR = "logs"
os.makedirs(LOG_DIR, exist_ok=True)

def log_interaction(question, history, prompt, outputs):
    date_str = datetime.datetime.now().strftime("%Y-%m-%d")
    log_path = os.path.join(LOG_DIR, f"{date_str}.log")
    with open(log_path, "a", encoding="utf-8") as f:
        f.write("\n" + "="*40 + f" {datetime.datetime.now()} " + "="*40 + "\n")
        f.write(f"[QUESTION]: {question}\n")
        if history:
            f.write(f"[HISTORY]: {history}\n")
        f.write(f"[PROMPT]: {prompt}\n")
        f.write(f"[OUTPUT]: ")
        for out in outputs:
            f.write(str(out))
        f.write("\n")

def connect_milvus():
    """建立到 Milvus 服务器的连接"""
    print(f"正在连接 Milvus: {config.MILVUS_HOST}:{config.MILVUS_PORT}...")
    connections.connect("default", host=config.MILVUS_HOST, port=config.MILVUS_PORT)
    print("Milvus 连接成功。")

def search_milvus(question_vector, collection):
    """在 Milvus 中搜索相关的文本块"""
    search_params = {
        "metric_type": "IP",
        "params": {"nprobe": 10},
    }
    
    print("\n--- 正在 Milvus 中搜索相关上下文 ---")
    results = collection.search(
        data=[question_vector],
        anns_field="vector",
        param=search_params,
        limit=config.TOP_K,
        output_fields=["text", "source"] # 指定需要返回的字段
    )
    
    hits = results[0]
    if not hits:
        print("在知识库中未找到相关内容。")
        return [], [], [] # 返回三个空列表
        
    matched_chunks = [hit.entity.get('text') for hit in hits]
    matched_scores = [hit.distance for hit in hits]
    # 【新增日志】返回匹配到的源文件路径
    matched_sources = [hit.entity.get('source') for hit in hits]
    
    print(f"找到 {len(matched_chunks)} 个相关文本块，最高分: {matched_scores[0]:.4f}")
    # 【新增日志】打印每个匹配项的详细信息
    for i, (chunk, score, source) in enumerate(zip(matched_chunks, matched_scores, matched_sources)):
        print(f"  [匹配项 {i+1}] 分数: {score:.4f}, 来源: {source}")
        # 为了简洁，可以只打印部分文本块内容
        print(f"  内容片段: \"{chunk[:100].replace(chr(10), ' ')}...\"")

    return matched_chunks, matched_scores, matched_sources

def call_llm_stream(prompt):
    """流式调用智谱/GLM大模型，yield 每个token/片段"""
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {config.ZHIPU_API_KEY}"
    }
    payload = {
        "model": config.LLM_MODEL,
        "stream": True,  # 关键参数
        "messages": [
            {"role": "system", "content": "你是一个智能助手。请根据下面提供的'已知内容'来回答用户的问题。如果'已知内容'与问题无关，请说明情况并根据你自己的知识来回答。"},
            {"role": "user", "content": prompt}
        ]
    }
    try:
        with requests.post(config.ZHIPU_API_URL, headers=headers, json=payload, stream=True, timeout=120) as response:
            response.raise_for_status()
            for line in response.iter_lines(decode_unicode=True):
                if not line or line.strip() == "":
                    continue
                if line.startswith("data:"):
                    line = line[5:].strip()
                if line == "[DONE]":
                    break
                try:
                    data = json.loads(line)
                    content = data.get("choices", [{}])[0].get("delta", {}).get("content") \
                        or data.get("choices", [{}])[0].get("message", {}).get("content") \
                        or data.get("data", "")
                    if content:
                        yield content
                except Exception as e:
                    continue
    except Exception as e:
        yield f"\n发生错误: {e}\n"

def build_prompt_with_history(question, history):
    history_objs = [History.from_data(h) for h in (history or [])]
    input_msg = History(role="user", content=question).to_msg_template(False)
    chat_prompt = ChatPromptTemplate.from_messages([h.to_msg_template() for h in history_objs] + [input_msg])
    return chat_prompt.format()

def get_history_from_db(conversation_id, limit=10):
    """从数据库获取多轮历史消息，返回 History 对象列表"""
    if not conversation_id:
        return []
    messages = filter_message(conversation_id=conversation_id, limit=limit)
    history = []
    for m in messages:
        history.append({"role": "user", "content": m["query"]})
        history.append({"role": "assistant", "content": m["response"]})
    return history

def ask_and_stream(question, conversation_id=None):
    """流式生成问答内容，供API SSE使用，支持后端自动多轮对话历史管理，并记录日志（项目内多轮机制）"""
    import time
    outputs = []
    prompt = None
    answer_acc = ""
    try:
        start = time.time()
        connect_milvus()
        print(f"Milvus连接耗时: {time.time()-start:.2f}s")
        if not utility.has_collection(config.MILVUS_COLLECTION):
            msg = f"错误: 集合 '{config.MILVUS_COLLECTION}' 不存在。请先运行 `ingest_documents.py` 来创建并填充数据。"
            outputs.append(msg)
            yield msg
            log_interaction(question, conversation_id, prompt, outputs)
            return
        collection = Collection(config.MILVUS_COLLECTION)
        collection.load()
        # 2. 向量化问题
        msg = f"正在向量化问题: '{question}'\n"
        outputs.append(msg)
        yield msg
        model = SentenceTransformer(config.EMBEDDING_MODEL)
        question_vector = model.encode(question, normalize_embeddings=True)
        # 3. 在 Milvus 中搜索上下文
        matched_chunks, matched_scores, _ = search_milvus(question_vector.tolist(), collection)
        # 4. 自动获取历史
        history = get_history_from_db(conversation_id)
        print("[调试] 读取到的历史消息：", history)
        # 5. 构建 Prompt，使用项目内多轮机制
        if matched_chunks and matched_scores[0] >= config.SIMILARITY_THRESHOLD:
            context = "\n\n---\n\n".join(matched_chunks)
            prompt_prefix = f"已知内容:\n\n{context}\n\n"
        else:
            prompt_prefix = ""
            msg = "未找到高度相关的上下文，将直接使用通用知识回答。\n"
            outputs.append(msg)
            yield msg
        # 拼接历史和当前问题
        prompt = prompt_prefix + build_prompt_with_history(question, history)
        print(f"最终prompt长度: {len(prompt)}")
        # 6. 真正的流式大模型输出
        for token in call_llm_stream(prompt):
            outputs.append(token)
            answer_acc += token
            yield token
    except Exception as e:
        import traceback
        err = f"\n发生错误: {e}\n"
        outputs.append(err)
        yield err
        outputs.append(traceback.format_exc())
        yield traceback.format_exc()
    finally:
        if "default" in connections.list_connections():
            connections.disconnect("default")
            msg = "\n已断开与 Milvus 的连接。\n"
            outputs.append(msg)
            yield msg
        # 日志写入，prompt为最终完整prompt
        log_interaction(question, conversation_id, prompt, outputs)
        # 写入本轮消息到数据库
        if conversation_id and answer_acc.strip():
            try:
                msg_id = add_message_to_db(conversation_id=conversation_id, chat_type="llm_chat", query=question, response=answer_acc)
                print("[调试] 写入数据库返回ID：", msg_id)
            except Exception as e:
                print("[调试] 写入数据库异常：", e)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="使用 RAG 从 Milvus 知识库向大模型提问。")
    parser.add_argument("--question", type=str, required=True, help="你想要问的问题。")
    args = parser.parse_args()

    # 使用生成器函数进行流式输出
    for line in ask_and_stream(args.question):
        print(line, end="", flush=True)

    from chatchat.server.db.repository.message_repository import add_message_to_db
    msg_id = add_message_to_db(conversation_id="test", chat_type="test", query="hello", response="world")
    print("[调试] 手动写入数据库返回ID：", msg_id)