from fastapi import FastAPI, Request
from pydantic import BaseModel
import uvicorn
from loguru import logger
import time
import requests
from typing import Optional, List, Dict
import traceback
import re
# from sentence_transformers import SentenceTransformer, util
import jieba
import numpy as np
import faiss  # 替换milvus为faiss
import json
import os

# -------------------------- 服务初始化 --------------------------
app = FastAPI(title="FAISS RAG Service")

# Nacos 配置（保持和之前一致）
NACOS_SERVER = "http://192.168.150.101:8848"
SERVICE_NAME = "faiss_rag_service"
AGENT_IP = "192.168.150.1"  # 替换为实际IP
AGENT_PORT = 8008
NACOS_NAMESPACE = ""

# FAISS 配置（本地文件存储，无需部署服务）
FAISS_INDEX_PATH = "./faiss_rag_index.index"  # FAISS索引文件
DATA_STORAGE_PATH = "./rag_data.json"         # 原始数据存储文件
VECTOR_DIM = 384                              # 匹配sentence-transformers模型维度

# 模型初始化（不变）
# model = SentenceTransformer('all-MiniLM-L6-v2')

# -------------------------- FAISS 初始化 --------------------------
# 全局变量：存储FAISS索引和原始数据
faiss_index = None
rag_data = []  # 格式: [{"id": str, "text": str, "keywords": str, "chunk_id": str, "timestamp": int}, ...]

def init_faiss():
    """初始化FAISS索引（本地文件加载/创建，无需外部服务）"""
    global faiss_index, rag_data
    try:
        # 1. 加载或初始化原始数据
        if os.path.exists(DATA_STORAGE_PATH):
            with open(DATA_STORAGE_PATH, "r", encoding="utf-8") as f:
                rag_data = json.load(f)
            logger.info(f"加载本地数据成功，共{len(rag_data)}条")
        else:
            rag_data = []
            logger.info("本地数据文件不存在，将创建新数据")

        # 2. 加载或创建FAISS索引
        faiss_index = faiss.IndexFlatL2(VECTOR_DIM)  # 基础L2索引（无需训练，适合小规模数据）

        # 若有历史数据，加载向量到索引
        if rag_data:
            vectors = [np.array(json.loads(data["vector"]), dtype=np.float32) for data in rag_data]
            faiss_index.add(np.array(vectors))
            logger.info(f"FAISS索引加载完成，共{faiss_index.ntotal}个向量")
        else:
            logger.info("FAISS索引初始化完成，暂无数据")

        return True
    except Exception as e:
        logger.error(f"FAISS初始化失败: {str(e)}")
        return False

def save_faiss_data():
    """保存原始数据到本地文件（FAISS索引无需单独保存，随数据重建）"""
    global rag_data
    try:
        with open(DATA_STORAGE_PATH, "w", encoding="utf-8") as f:
            json.dump(rag_data, f, ensure_ascii=False, indent=2)
        return True
    except Exception as e:
        logger.error(f"数据保存失败: {str(e)}")
        return False

# -------------------------- Nacos注册（不变） --------------------------
def register_to_nacos() -> bool:
    url = f"{NACOS_SERVER}/nacos/v1/ns/instance"
    params = {
        "serviceName": SERVICE_NAME,
        "ip": AGENT_IP,
        "port": AGENT_PORT,
        "weight": 10.0,
        "metadata": '{"version":"v1.0","type":"faiss_rag"}',
        "ephemeral": "true",
        "clusterName": "DEFAULT",
        "namespaceId": NACOS_NAMESPACE
    }
    try:
        response = requests.post(url, params=params, timeout=5)
        if response.status_code == 200 and response.text.strip() == "ok":
            logger.info("服务注册成功")
            return True
        else:
            logger.error(f"服务注册失败：Nacos返回 {response.status_code} - {response.text}")
            return False
    except Exception as e:
        logger.error(f"服务注册异常：{str(e)}")
        return False

def get_ollama_embedding(text: str, model: str = "all-minilm:latest") -> np.ndarray:
    """调用本地ollama的all-minilm模型生成向量（无需下载，直接用本地模型）"""
    try:
        # ollama默认API地址：http://localhost:11434/api/embeddings
        response = requests.post(
            "http://localhost:11434/api/embeddings",
            json={"model": model, "prompt": text},
            timeout=10  # 10秒超时
        )
        response.raise_for_status()  # 若HTTP状态码错误，抛异常
        data = response.json()
        # 返回向量（转为float32格式，适配FAISS）
        return np.array(data["embedding"], dtype=np.float32)
    except requests.exceptions.ConnectionError:
        logger.error("❌ 无法连接到ollama服务！请先执行 `ollama serve` 启动服务")
        raise
    except Exception as e:
        logger.error(f"❌ ollama向量生成失败：{str(e)}")
        raise


# -------------------------- 数据模型（不变） --------------------------
class RAGRequest(BaseModel):
    flow_id: str
    query: str
    trace_id: str
    document_id: Optional[str] = None

class RAGResponse(BaseModel):
    code: int
    data: Dict
    message: str
    trace_id: str

# -------------------------- 中间件（不变） --------------------------
@app.middleware("http")
async def add_trace_middleware(request: Request, call_next):
    trace_id = request.headers.get("X-Trace-ID", f"trace_rag_{str(time.time_ns())[:16]}")
    logger.bind(trace_id=trace_id).info(f"[请求] path={request.url.path}，trace_id={trace_id}")

    response = await call_next(request)
    response.headers["X-Trace-ID"] = trace_id
    return response

# -------------------------- 核心功能（只换存储/检索部分） --------------------------
def intent_recognition(query: str) -> Dict:
    intents = {"knowledge_query": False, "document_analysis": False, "comparison": False, "definition": False}
    if re.search(r"(什么是|定义|含义)", query):
        intents["definition"] = True
    elif re.search(r"(比较|区别|不同)", query):
        intents["comparison"] = True
    elif re.search(r"(分析|解析)", query):
        intents["document_analysis"] = True
    else:
        intents["knowledge_query"] = True
    return {"intents": intents, "confidence": 0.85}

def chunk_question(query: str) -> List[str]:
    chunks = re.split(r'[，,;；。？?！!]', query)
    return [chunk.strip() for chunk in chunks if chunk.strip()]

def pre_retrieval_processing(query: str) -> str:
    processed = re.sub(r'[^\w\s\u4e00-\u9fa5]', ' ', query)
    return re.sub(r'\s+', ' ', processed).strip()

def keyword_extraction(text: str) -> List[str]:
    stopwords = {'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'}
    return [word for word in jieba.cut(text) if word not in stopwords and len(word) > 1]

# -------------------------- 替换：FAISS检索 --------------------------
def faiss_retrieval(query: str, top_k: int = 10) -> List[Dict]:
    global faiss_index, rag_data
    try:
        if faiss_index.ntotal == 0:
            return []

        # 关键修改：用ollama生成查询向量，替代原来的model.encode
        query_vec = get_ollama_embedding(query)
        # FAISS搜索（逻辑不变）
        distances, indices = faiss_index.search(np.array([query_vec]), top_k)

        # 匹配原始数据（逻辑不变）
        results = []
        for idx, (dist, rag_idx) in enumerate(zip(distances[0], indices[0])):
            if rag_idx < len(rag_data):
                data = rag_data[rag_idx]
                results.append({
                    "id": data["id"],
                    "text": data["text"],
                    "keywords": data["keywords"].split(","),
                    "chunk_id": data["chunk_id"],
                    "distance": float(dist),
                    "rank": idx + 1
                })
        return results
    except Exception as e:
        logger.error(f"FAISS检索失败: {str(e)}")
        return []

def keyword_retrieval(query: str, keywords: List[str], top_k: int = 10) -> List[Dict]:
    """关键词检索（基于原始数据匹配）"""
    global rag_data
    if not keywords or not rag_data:
        return []

    # 关键词匹配得分（包含关键词越多得分越高）
    score_list = []
    for data in rag_data:
        data_keywords = data["keywords"].split(",")
        match_count = len(set(keywords) & set(data_keywords))
        if match_count > 0:
            score_list.append({
                "data": data,
                "score": match_count / len(keywords)  # 匹配率作为得分
            })

    # 排序取前K
    sorted_list = sorted(score_list, key=lambda x: x["score"], reverse=True)[:top_k]
    return [
        {
            "id": item["data"]["id"],
            "text": item["data"]["text"],
            "keywords": item["data"]["keywords"].split(","),
            "chunk_id": item["data"]["chunk_id"],
            "distance": 1 - item["score"],  # 模拟距离（得分越高距离越小）
            "rank": idx + 1
        }
        for idx, item in enumerate(sorted_list)
    ]

# -------------------------- 替换：FAISS存储 --------------------------
def store_in_faiss(texts: List[str], chunk_ids: List[str]) -> bool:
    global faiss_index, rag_data
    try:
        if len(texts) != len(chunk_ids):
            return False

        new_data = []
        new_vectors = []
        for text, chunk_id in zip(texts, chunk_ids):
            # 关键修改：用ollama生成向量，替代原来的model.encode
            vec = get_ollama_embedding(text)
            # 提取关键词（逻辑不变）
            keywords = keyword_extraction(text)
            # 构造数据（逻辑不变）
            data = {
                "id": f"doc_{str(time.time_ns())[:16]}_{chunk_id}",
                "text": text,
                "keywords": ",".join(keywords),
                "chunk_id": chunk_id,
                "timestamp": int(time.time()),
                "vector": json.dumps(vec.tolist())  # 向量转JSON存原始数据
            }
            new_data.append(data)
            new_vectors.append(vec)

        # 添加到FAISS和原始数据列表（逻辑不变）
        faiss_index.add(np.array(new_vectors))
        rag_data.extend(new_data)
        # 保存原始数据到本地（逻辑不变）
        return save_faiss_data()
    except Exception as e:
        logger.error(f"FAISS存储失败: {str(e)}")
        return False

# -------------------------- RRF重排（不变） --------------------------
def rrf_rerank(semantic_results: List[Dict], keyword_results: List[Dict], k: int = 60) -> List[Dict]:
    result_map = {}
    # 语义检索结果打分
    for idx, res in enumerate(semantic_results):
        doc_id = res["id"]
        if doc_id not in result_map:
            result_map[doc_id] = {**res, "score": 0}
        result_map[doc_id]["score"] += 1 / (k + idx + 1)
    # 关键词检索结果打分
    for idx, res in enumerate(keyword_results):
        doc_id = res["id"]
        if doc_id not in result_map:
            result_map[doc_id] = {**res, "score": 0}
        result_map[doc_id]["score"] += 1 / (k + idx + 1)
    # 按得分排序
    return sorted(result_map.values(), key=lambda x: x["score"], reverse=True)

# -------------------------- API接口（只换存储/检索调用） --------------------------
@app.post("/rag/process", response_model=RAGResponse)
async def process_rag(req: RAGRequest):
    try:
        logger.info(f"处理RAG请求: flow_id={req.flow_id}, query={req.query}")

        # 1. 意图识别
        intent_result = intent_recognition(req.query)
        # 2. 问题分块
        chunks = chunk_question(req.query)
        # 3. 检索前处理
        processed_query = pre_retrieval_processing(req.query)
        # 4. 关键词提取
        keywords = keyword_extraction(processed_query)

        # 5. 检索（FAISS向量检索 + 关键词检索）
        semantic_results = faiss_retrieval(processed_query)
        keyword_results = keyword_retrieval(processed_query, keywords) if keywords else []

        # 6. RRF重排
        reranked_results = rrf_rerank(semantic_results, keyword_results)

        # 7. 存储（FAISS存储）
        store_success = True
        if req.document_id:
            sample_chunks = [f"文档{req.document_id}片段{i}: {req.query}" for i in range(3)]
            chunk_ids = [f"{req.document_id}_chunk_{i}" for i in range(3)]
            store_success = store_in_faiss(sample_chunks, chunk_ids)

        return {
            "code": 200,
            "data": {
                "intent": intent_result,
                "chunks": chunks,
                "processed_query": processed_query,
                "keywords": keywords,
                "results": reranked_results[:5],
                "store_success": store_success
            },
            "message": "success",
            "trace_id": req.trace_id
        }
    except Exception as e:
        err_msg = f"RAG处理失败: {str(e)}"
        logger.error(f"[处理异常] flow_id={req.flow_id}, err={err_msg}, 堆栈: {traceback.format_exc()}")
        return {
            "code": 500,
            "data": {},
            "message": err_msg,
            "trace_id": req.trace_id
        }

@app.get("/health")
async def health_check():
    global faiss_index
    return {
        "status": "healthy",
        "service": SERVICE_NAME,
        "port": AGENT_PORT,
        "faiss_status": "ready" if faiss_index else "error",
        "data_count": len(rag_data) if 'rag_data' in globals() else 0
    }

# -------------------------- 启动入口（不变） --------------------------
# -------------------------- 启动入口（修改后） --------------------------
if __name__ == "__main__":
    # 1. 先初始化FAISS（这是核心依赖，必须成功）
    if not init_faiss():
        logger.error("❌ FAISS初始化失败，无法启动服务")
        exit(1)

    # 2. Nacos注册：失败只警告，不退出服务（关键修改）
    if not register_to_nacos():
        logger.warning("⚠️ Nacos服务注册失败（不影响核心功能），后续可排查Nacos连接问题")
    else:
        logger.info("✅ Nacos服务注册成功")

    # 3. 无论Nacos是否成功，都启动RAG服务
    logger.info(f"🎉 RAG服务启动成功！访问地址：http://{AGENT_IP}:{AGENT_PORT}")
    logger.info(f"   健康检查：http://{AGENT_IP}:{AGENT_PORT}/health")
    logger.info(f"   RAG接口：POST http://{AGENT_IP}:{AGENT_PORT}/rag/process")
    uvicorn.run(app, host="0.0.0.0", port=AGENT_PORT, log_config=None)