import os
import json
import pickle
from typing import List, Dict, Optional
from pathlib import Path
from dotenv import load_dotenv
import requests
import faiss
from fastapi import FastAPI
from pydantic import BaseModel
from contextlib import asynccontextmanager
from sentence_transformers import SentenceTransformer

def load_environment():
    """加载环境变量，支持自动重载"""
    env_loaded = load_dotenv()
    if env_loaded:
        print("✓ 环境变量文件加载成功")
    else:
        print("⚠ 未找到.env文件，使用系统环境变量")
    
    # 验证关键环境变量
    api_key = os.getenv("DEEPSEEK_API_KEY", "")
    if api_key:
        print(f"✓ DeepSeek API Key 已加载 (长度: {len(api_key)})")
    else:
        print("⚠ 未找到 DeepSeek API Key")
    
    return env_loaded

# 加载环境变量
load_environment()

INDEX_PATH = "indexes/vectors.faiss"
META_PATH = "indexes/metadatas.pkl"
CONFIG_PATH = "indexes/config.json"

TOP_K_DEFAULT = 5
SYSTEM_PROMPT = (
    "你是一个中文知识助手，请基于给定上下文准确、简洁地回答用户问题。"
    "如果无法从上下文找到答案，请明确说明。"
)


class QueryRequest(BaseModel):
    query: str
    top_k: Optional[int] = None


class QueryResponse(BaseModel):
    answer: str
    contexts: List[Dict]


def load_index():
    with open(CONFIG_PATH, "r", encoding="utf-8") as f:
        cfg = json.load(f)
    
    # 检查并准备模型缓存
    CACHE_DIR = Path("model_cache")
    CACHE_DIR.mkdir(exist_ok=True, parents=True)
    
    # 检查本地模型是否存在
    possible_model_paths = [
        CACHE_DIR / cfg["model"].replace("/", "_"),  # 简单替换格式
        CACHE_DIR / "BAAI/bge-small-zh-v1.5",  # 子目录格式
        CACHE_DIR / "BAAI/bge-small-zh-v1___5",  # 实际存在的格式

    ]
    
    # 调试：显示所有检查的路径
    print(f"调试信息：检查以下路径：")
    for path in possible_model_paths:
        exists = path.exists()
        is_dir = path.is_dir() if exists else False
        print(f"  {path} - 存在: {exists}, 是目录: {is_dir}")
    
    model = None
    for model_path in possible_model_paths:
        if model_path.exists() and model_path.is_dir():
            try:
                print(f"正在从本地加载模型: {model_path}")
                model = SentenceTransformer(str(model_path), device='cpu', cache_folder=CACHE_DIR)
                print("模型加载成功")
                break
            except Exception as e:
                print(f"模型加载失败: {e}")
                continue
    
    if model is None:
        raise RuntimeError(f"无法加载本地模型 {cfg['model']}，请先运行 build_index.py 下载模型")
    
    # 加载索引和元数据
    index = faiss.read_index(INDEX_PATH)
    with open(META_PATH, "rb") as f:
        metas = pickle.load(f)
    return model, index, metas


def search(model: SentenceTransformer, index: faiss.Index, metas: List[Dict], query: str, top_k: int) -> List[Dict]:
    q_vec = model.encode([query], normalize_embeddings=True).astype("float32")
    scores, idxs = index.search(q_vec, top_k)
    results = []
    for score, idx in zip(scores[0], idxs[0]):
        if idx == -1:
            continue
        meta = metas[idx]
        results.append({
            "text": meta["text"],
            "source": meta["source"],
            "chunk_id": meta["chunk_id"],
            "score": float(score)
        })
    return results


def format_context(contexts: List[Dict]) -> str:
    parts = []
    for i, c in enumerate(contexts, 1):
        parts.append(f"[{i}] 来源: {c['source']} / {c['chunk_id']}\n{c['text']}")
    return "\n\n".join(parts)


def call_llm(prompt: str) -> str:
    # 确保环境变量已加载
    load_environment()
    
    api_key = os.getenv("DEEPSEEK_API_KEY", "")
    
    if not api_key:
        return "（演示模式）未配置 DeepSeek API Key。以下是基于检索上下文的答复草案：\n" + prompt[:1200]
    try:
        # 直接使用requests调用DeepSeek API，避免LangChain的兼容性问题

        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": "deepseek-chat",
            "messages": [
                {"role": "system", "content": SYSTEM_PROMPT},
                {"role": "user", "content": prompt}
            ],
            "temperature": 0.2
        }
        
        # 尝试SSL验证，如果失败则跳过
        try:
            response = requests.post(
                "https://api.deepseek.com/v1/chat/completions",
                headers=headers,
                json=data,
                timeout=30,
                verify=True  # 先尝试验证证书
            )
        except requests.exceptions.SSLError:
            print("SSL验证失败，尝试跳过证书验证")
            response = requests.post(
                "https://api.deepseek.com/v1/chat/completions",
                headers=headers,
                json=data,
                timeout=30,
                verify=False,  # 跳过证书验证
                proxies={'http': None, 'https': None}  # 禁用代理
            )
        
        if response.status_code == 200:
            result = response.json()
            return result["choices"][0]["message"]["content"]
        else:
            print(f"API调用失败: {response.status_code} - {response.text}")
            return f"LLM调用失败: HTTP {response.status_code}"
            
    except Exception as e:
        print(f"LLM 调用失败：{e}")
        return f"LLM 调用失败：{e}"


@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时加载模型和索引
    global _model, _index, _metas
    _model, _index, _metas = load_index()
    yield
    # 关闭时清理（如果需要）

app = FastAPI(title="RAG-1 Demo", lifespan=lifespan)

_model: Optional[SentenceTransformer] = None
_index: Optional[faiss.Index] = None
_metas: Optional[List[Dict]] = None


@app.post("/ask", response_model=QueryResponse)
def ask(req: QueryRequest):
    assert _model is not None and _index is not None and _metas is not None, "索引未加载"
    k = req.top_k or TOP_K_DEFAULT
    ctxs = search(_model, _index, _metas, req.query, k)
    context_block = format_context(ctxs)
    prompt = (
        f"问题：{req.query}\n\n以下是可用的参考资料（可能不完整）：\n{context_block}\n\n"
        f"请结合参考资料作答，并在不确定时直说不知道。"
    )
    ans = call_llm(prompt)
    return QueryResponse(answer=ans, contexts=ctxs) 