import asyncio
import uuid
import logging
from fastapi import FastAPI, BackgroundTasks, HTTPException, Query
from pydantic import BaseModel
import aiohttp
import chromadb
from chromadb.config import Settings
from typing import List, Optional, Dict, Any

# 配置日志
logging.basicConfig(
    level=logging.DEBUG,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[
        logging.FileHandler("app.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

app = FastAPI(title="多知识库向量存储服务")

# 初始化Chroma客户端
client = chromadb.Client(
    Settings(
        persist_directory="./chroma_kbs",
        anonymized_telemetry=False
    )
)

# Ollama配置 - 使用官方确认支持的嵌入模型
OLLAMA_URL = "http://localhost:11434/api/embeddings"
# 建议先使用Ollama官方验证的嵌入模型
EMBED_MODEL = "hy-embed-model:latest"  # 替换为这个模型进行测试
OLLAMA_TIMEOUT = 30

# 数据模型定义
class TextRequest(BaseModel):
    text: str
    metadata: Optional[Dict[str, Any]] = None
    kb_name: str = "default"

class BatchTextRequest(BaseModel):
    texts: List[str]
    metadatas: Optional[List[Dict[str, Any]]] = None
    kb_name: str = "default"

class QueryRequest(BaseModel):
    query_text: str
    n_results: int = 5
    kb_name: str = "default"


async def validate_ollama_connection():
    """验证Ollama服务是否可达"""
    try:
        async with aiohttp.ClientSession() as session:
            async with session.get("http://localhost:11434", timeout=10) as response:
                return response.status == 200
    except:
        return False


async def list_available_models():
    """获取Ollama中可用的模型列表"""
    try:
        async with aiohttp.ClientSession() as session:
            async with session.get("http://localhost:11434/api/tags") as response:
                if response.status == 200:
                    data = await response.json()
                    return [model["name"] for model in data.get("models", [])]
                return []
    except Exception as e:
        logger.error(f"获取模型列表失败: {str(e)}")
        return []


def is_valid_vector(vector: Any) -> bool:
    """严格验证向量是否有效"""
    if not isinstance(vector, list):
        return False
    if len(vector) == 0:
        return False
    # 检查是否所有元素都是数字
    for item in vector:
        if not isinstance(item, (int, float)):
            return False
    return True


async def get_embeddings(texts: List[str]) -> List[List[float]]:
    """获取嵌入向量，增加严格验证和格式转换"""
    if not texts:
        return []
    
    # 检查Ollama服务是否可用
    if not await validate_ollama_connection():
        raise HTTPException(status_code=500, detail="Ollama服务不可达，请检查服务是否启动")
    
    # 检查模型是否存在
    available_models = await list_available_models()
    if EMBED_MODEL not in available_models:
        raise HTTPException(
            status_code=500, 
            detail=f"模型 {EMBED_MODEL} 不存在，请先运行: ollama pull {EMBED_MODEL}"
        )
    
    async with aiohttp.ClientSession() as session:
        embeddings = []
        for text in texts:
            # 文本预处理：移除可能导致问题的特殊字符
            text = text.replace('\x00', '').strip()
            if not text:
                logger.warning("空文本，跳过处理")
                continue
            
            success = False
            for attempt in range(3):  # 最多重试3次
                try:
                    payload = {
                        "model": EMBED_MODEL,
                        "prompt": text
                    }
                    
                    async with session.post(
                        OLLAMA_URL, 
                        json=payload,
                        timeout=aiohttp.ClientTimeout(total=OLLAMA_TIMEOUT)
                    ) as response:
                        response_text = await response.text()
                        logger.debug(f"Ollama响应 (尝试{attempt+1}): {response_text[:500]}")  # 限制长度
                        
                        if response.status != 200:
                            raise Exception(f"HTTP状态码错误: {response.status}, 响应: {response_text[:200]}")
                    
                    # 解析响应
                    try:
                        result = await response.json()
                        print("==============result==============",result)
                        logger.debug(f"解析后的响应数据: {result}")  # 添加日志记录
                    except ValueError as e:
                        logger.error(f"无法解析为JSON: {response_text[:200]}, 错误信息: {str(e)}")
                        raise Exception(f"无法解析为JSON: {response_text[:200]}")
                    
                    # 尝试从各种可能的字段中提取向量
                    vector = None
                
                    # 检查标准字段
                    if isinstance(result, dict):
                        # 标准embeddings字段
                        if "embeddings" in result and isinstance(result["embeddings"], list):
                            if len(result["embeddings"]) > 0:
                                vector = result["embeddings"][0]
                                logger.debug("从 embeddings 字段获取向量")
                        
                        # 检查 embedding 顶层字段
                        elif "embedding" in result:
                            vector = result["embedding"]
                            logger.debug("从 embedding 字段获取向量")
                        
                        # 检查可能的替代字段
                        elif "data" in result and isinstance(result["data"], list):
                            for item in result["data"]:
                                if isinstance(item, dict) and "embedding" in item:
                                    vector = item["embedding"]
                                    logger.debug("从 data 字段获取向量")
                                    break
                        
                        # 检查其他可能的字段名
                        elif "vector" in result:
                            vector = result["vector"]
                            logger.debug("从 vector 字段获取向量")
                        elif "output" in result:
                            vector = result["output"]
                            logger.debug("从 output 字段获取向量")
                    
                    # 验证并尝试转换向量
                    print("==============vector==============",vector)
                    if vector is not None:
                        # 如果是字符串形式的向量，尝试解析
                        if isinstance(vector, str):
                            try:
                                vector = eval(vector)  # 谨慎使用，仅用于调试
                                logger.warning("已将字符串形式的向量转换为列表")
                            except Exception as e:
                                logger.error(f"转换字符串形式的向量失败: {str(e)}")
                                pass
                        
                        # 再次验证
                        if is_valid_vector(vector):
                            # 转换为float列表
                            vector = [float(x) for x in vector]
                            embeddings.append(vector)
                            success = True
                            logger.debug(f"成功获取向量，维度: {len(vector)}")
                            break
                        else:
                            logger.error(f"向量格式无效: {str(vector)[:200]}")
                            raise Exception(f"向量格式无效: {str(vector)[:200]}")
                    else:
                        # 修复日志记录，使用 f-string 插入 result 变量
                        logger.error(f"未在响应中找到向量数据，完整响应: {result}")  # 添加完整响应日志
                        raise Exception("未在响应中找到向量数据")
                
                except Exception as e:
                    logger.warning(f"尝试{attempt+1}失败: {str(e)}")
                    if attempt < 2:
                        await asyncio.sleep(1)  # 等待后重试
            
            if not success:
                raise HTTPException(
                    status_code=500, 
                    detail=f"多次尝试后仍无法获取有效的嵌入向量"
                )
            
            await asyncio.sleep(0.1)
    
    return embeddings


def get_kb_collection(kb_name: str):
    try:
        return client.get_or_create_collection(name=kb_name)
    except Exception as e:
        logger.error(f"获取知识库[{kb_name}]失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"知识库[{kb_name}]不存在或创建失败")

async def add_to_kb(texts: List[str], ids: List[str], kb_name: str,
                   metadatas: Optional[List[Dict[str, Any]]] = None):
    collection = get_kb_collection(kb_name)
    embeddings = await get_embeddings(texts)

    if len(embeddings) != len(texts):
        raise ValueError(f"向量数量({len(embeddings)})与文本数量({len(texts)})不匹配")

    metadatas = metadatas or [{} for _ in range(len(texts))]
    if len(metadatas) != len(texts):
        raise ValueError("元数据数量与文本数量不匹配")

    collection.add(
        documents=texts,
        embeddings=embeddings,
        ids=ids,
        metadatas=metadatas
    )
    # 移除 client.persist() 调用
    logger.info(f"成功添加 {len(texts)} 条数据到知识库[{kb_name}]" )


# 后台任务处理函数
async def process_single(text: str, text_id: str, kb_name: str, metadata: Optional[Dict[str, Any]]):
    try:
        await add_to_kb([text], [text_id], kb_name, [metadata] if metadata else None)
    except Exception as e:
        logger.error(f"单条数据处理失败（知识库[{kb_name}]）: {str(e)}")

async def process_batch(texts: List[str], ids: List[str], kb_name: str, metadatas: Optional[List[Dict[str, Any]]]):
    try:
        await add_to_kb(texts, ids, kb_name, metadatas)
    except Exception as e:
        logger.error(f"批量数据处理失败（知识库[{kb_name}]）: {str(e)}")


# API接口
@app.post("/add-text", summary="添加单条文本到指定知识库")
async def add_text(request: TextRequest, background_tasks: BackgroundTasks):
    text_id = str(uuid.uuid4())
    background_tasks.add_task(process_single, request.text, text_id, request.kb_name, request.metadata)
    return {
        "status": "success",
        "message": f"文本已加入知识库[{request.kb_name}]处理队列",
        "id": text_id,
        "kb_name": request.kb_name
    }

@app.post("/query", summary="从指定知识库查询相似文本")
async def query_similar(request: QueryRequest):
    collection = get_kb_collection(request.kb_name)
    try:
        query_embedding = await get_embeddings([request.query_text])
        results = collection.query(
            query_embeddings=query_embedding,
            n_results=request.n_results
        )
        return {
            "status": "success",
            "kb_name": request.kb_name,
            "results": results
        }
    except Exception as e:
        logger.error(f"查询失败（知识库[{request.kb_name}]）: {str(e)}")
        raise

@app.get("/list-kbs", summary="列出所有知识库")
async def list_kbs():
    collections = client.list_collections()
    return {
        "status": "success",
        "kb_list": [coll.name for coll in collections]
    }

@app.get("/available-models", summary="列出Ollama中可用的模型")
async def available_models():
    models = await list_available_models()
    return {
        "status": "success",
        "models": models
    }

@app.on_event("startup")
async def startup_event():
    logger.info("服务启动中，检查Ollama连接...")
    if not await validate_ollama_connection():
        logger.error("无法连接到Ollama服务，请确保已启动Ollama (ollama serve)")
    else:
        logger.info("Ollama服务连接成功")
        models = await list_available_models()
        logger.info(f"可用模型: {', '.join(models)}")
        if EMBED_MODEL not in models:
            logger.warning(f"模型 {EMBED_MODEL} 未找到，建议运行: ollama pull {EMBED_MODEL}")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run("main:app", host="0.0.0.0", port=8000)
