
from fastapi import APIRouter, Request, HTTPException, Response, UploadFile

import json, os, sys
import uuid
from datetime import datetime
import chromadb
from openai import OpenAI
from pathlib import Path

from configs import config

chroma_path = Path(config.setting.persists.path) / "chroma_db"
chroma_path.mkdir(parents=True, exist_ok=True)

chroma_client = chromadb.PersistentClient(path=str(chroma_path))

openai_client = OpenAI(base_url=config.setting.gitee.base_url, api_key=config.setting.gitee.api_key, default_headers={"X-Failover-Enabled":"true"})



# 定义路由信息
router = APIRouter(
    prefix='/rag/knowledge',
    tags = ['知识库问答']
)


@router.post("/add")
async def knowledge_add(request: Request):
    data = await request.json()

    collection = chroma_client.get_or_create_collection(name=data["connection_name"])
    texts = [doc["text"] for doc in data["documents"]]
    metadatas = [{"source": doc.get("source", "unknown")} for doc in data["documents"]]

    ids = []
    embeddings = []
    for i, text in enumerate(texts):
        response = openai_client.embeddings.create(
            input=text,
            model=config.setting.gitee.models.embedding
        )
        embedding = response.data[0].embedding
        embeddings.append(embedding)
        ids.append(str(uuid.uuid4()))

    try:
        collection.upsert(
            ids=ids,
            embeddings=embeddings,
            documents=texts,
            metadatas=metadatas,
        )
    except Exception:
        collection.add(
            ids=ids,
            embeddings=embeddings,
            documents=texts,
            metadatas=metadatas,
        )

@router.post("/search_similar")
async def knowledge_search_similar(request: Request):
    data = await request.json()

    collection = chroma_client.get_or_create_collection(name=data["connection_name"])

    response = openai_client.embeddings.create(
        input=data["question"],
        model=config.setting.gitee.models.embedding
    )
    query_embedding = response.data[0].embedding

    # results = collection.query(
    #     query_embeddings=[query_embedding],
    #     n_results=data.get("top_k", 3),
    # )

    # retrieved_texts = results['documents'][0] if results['documents'] else []

    # return {
    #     "question": data["question"],
    #     "retrieved_documents": retrieved_texts
    # }
    results = collection.query(
        query_embeddings=[query_embedding],
        n_results=data.get("n_results", 5),
        include=["documents", "metadatas", "distances"]
    )

    search_results = []
    for i, doc in enumerate(results["documents"][0]):
        metadata = results["metadatas"][0][i]
        # 从metadata生成doc_id
        doc_id = f"{metadata['transcript_id']}_chunk_{metadata['chunk_index']}" if metadata.get('transcript_id') and metadata.get('chunk_index') is not None else None
        search_results.append({
            "doc_id": doc_id,
            "text": doc,
            "metadata": metadata,
            "distance": results["distances"][0][i]
        })

    return search_results