import requests
from haystack.nodes import EmbeddingRetriever
from haystack.nodes import PromptNode, PromptTemplate
from haystack.pipelines import Pipeline
from haystack.document_stores import InMemoryDocumentStore, FAISSDocumentStore
from haystack.nodes.prompt.prompt_model import PromptModel
import pandas as pd
from fastapi import FastAPI, Response, Query as QueryParam
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
import uvicorn
import mysql.connector
from mysql.connector import Error
import logging
from logging.handlers import RotatingFileHandler
import os
from datetime import datetime
from tqdm import tqdm
import json
import asyncio
import httpx
from threading import Lock
from contextlib import asynccontextmanager
from haystack.schema import Document  # 添加这个导入
import signal
from haystack_asyn_faiss import start_vector_sync, get_sync_progress

app = FastAPI()

# MySQL 配置
MYSQL_CONFIG = {
    'host': '47.104.188.246',
    'port': 13307,
    'database': 'rag',
    'user': 'root',
    'password': 'G@lMmX8rVCLcegOH',
    'charset': 'utf8mb4'
}

# 从数据库读取数据集配置
def load_dataset_mapping():
    try:
        connection = mysql.connector.connect(**MYSQL_CONFIG)
        cursor = connection.cursor(dictionary=True)
        
        # 查询数据集配置
        query = "SELECT code, path FROM dataset_config order by id asc"
        cursor.execute(query)
        dataset_configs = cursor.fetchall()
        
        # 转换为字典格式
        dataset_mapping = {row['code']: row['path'] for row in dataset_configs}
        
        cursor.close()
        connection.close()
        
        print("Dataset mapping loaded from database:", dataset_mapping)
        return dataset_mapping
        
    except Error as e:
        print(f"Error connecting to MySQL database: {e}")
        # 如果数据库连接失败，返回空字典
        return {}

# 修改数据集配置为动态加载
DATASET_MAPPING = {}

# 添加数据集缓存
class DatasetCache:
    def __init__(self):
        self.document_stores = {}
        self.retrievers = {}
        self.pipelines = {}
        self.lock = Lock()  # 添加线程锁
        
    def refresh_mapping(self):
        """刷新数据集映射"""
        global DATASET_MAPPING
        DATASET_MAPPING = load_dataset_mapping()

    @asynccontextmanager
    async def get_pipeline(self, dataset_name):
        with self.lock:
            if dataset_name not in self.pipelines:
                # 初始化新的pipeline
                doc_store, ret, pipe = init_rag_system(dataset_name)
                self.document_stores[dataset_name] = doc_store
                self.retrievers[dataset_name] = ret
                self.pipelines[dataset_name] = pipe
            
            try:
                yield self.pipelines[dataset_name]
            finally:
                pass

# 创建全局缓存对象
dataset_cache = DatasetCache()

# 定义请求模型
class Query(BaseModel):
    question: str
    type: str = "LLM"  # 默认为 LLM 模式
    dataset: str = ""   # 默认为空

# 配置API地址
# OLLAMA_API_URL = "http://localhost:11434/api/generate"
DEEPSEEK_API_URL = "https://api.deepseek.com/chat/completions"
DEEPSEEK_API_KEY = "sk-6578fea050de41ca880ef320becb3786"  # API key

# 自定义DeepSeek模型调用函数
def deepseek_call(prompt):
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {DEEPSEEK_API_KEY}"
    }
    data = {
        "model": "deepseek-chat",
        "messages": [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ],
        "stream": False
    }
    response = requests.post(DEEPSEEK_API_URL, headers=headers, json=data)
    if response.status_code == 200:
        try:
            result = response.json()
            print(result)
            return result.get("choices", [{}])[0].get("message", {}).get("content", "")
        except requests.exceptions.JSONDecodeError as e:
            raise Exception(f"Failed to parse JSON response: {str(e)}")
    else:
        raise Exception(f"DeepSeek request failed with status code {response.status_code}: {response.text}")

# 自定义Ollama模型调用函数 (注释掉但保留)
"""
def ollama_call(prompt):
    data = {
        "model": "deepseek-r1:7b",
        "prompt": prompt,
        "stream": False
    }
    response = requests.post(OLLAMA_API_URL, json=data)
    if response.status_code == 200:
        try:
            result = response.json()
            return result.get("response", "")
        except requests.exceptions.JSONDecodeError as e:
            print("Raw response:", response.text)
            raise Exception(f"Failed to parse JSON response: {str(e)}")
    else:
        raise Exception(f"Ollama request failed with status code {response.status_code}: {response.text}")
    
# 添加自定义调用层类
class OllamaInvocationLayer:
    def __init__(self, model):
        self.model = model
        
    def _ensure_token_limit(self, prompt):
        # 简单返回prompt，不做token限制
        return prompt
        
    def invoke(self, *args, **kwargs):
        return self.model.invoke(*args, **kwargs)
"""
# 添加 DeepSeek 调用层类
class DeepSeekInvocationLayer:
    def __init__(self, model):
        self.model = model
        
    def _ensure_token_limit(self, prompt):
        # 简单返回prompt，不做token限制
        return prompt
        
    def invoke(self, *args, **kwargs):
        return self.model.invoke(*args, **kwargs)

# 修改 PromptModel 类以支持流式输出
class DeepSeekPromptModel(PromptModel):
    def __init__(self):
        self.model_name_or_path = "deepseek-chat"
        self.model_invocation_layer = DeepSeekInvocationLayer(self)
        
    async def ainvoke(self, prompt: str, **kwargs):
        # 使用流式调用
        async for chunk in deepseek_call_stream(prompt):
            yield chunk
            
    def invoke(self, *args, **kwargs):
        prompt = kwargs.get('prompt', args[0] if args else None)
        if prompt:
            return [{"generated_text": deepseek_call(prompt)}]
        raise ValueError("No prompt provided.")

# 创建文档存储
document_store = FAISSDocumentStore(
    faiss_index_factory_str="Flat",
    embedding_dim=384,
    return_embedding=True,
    sql_url="sqlite:///faiss_document_store.db",  # 持久化存储
    index="document"
)

# 创建嵌入检索器
local_model_path="sentence-transformersall-MiniLM-L6-v2"  # 使用本地的sentence-transformers模型
retriever = EmbeddingRetriever(
    document_store=document_store,
    embedding_model=local_model_path,
    top_k=5,  # 限制返回的文档数量
    batch_size=32,  # 增加批处理大小以提高性能
)

# 创建提示节点
def deepseek_prompt_node():
    deepseek_model = DeepSeekPromptModel()
    template = PromptTemplate(prompt="Context: {join(documents)}; Question: {query}; Answer:")
    
    return PromptNode(
        model_name_or_path=deepseek_model,
        default_prompt_template=template,
        max_length=256
    )

prompt_node = deepseek_prompt_node()

# 创建RAG管道
rag_pipeline = Pipeline()
rag_pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
rag_pipeline.add_node(component=prompt_node, name="PromptNode", inputs=["Retriever"])

# 配置日志
def setup_logger():
    # 创建日志目录
    log_dir = 'logs'
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    
    # 生成日志文件名（包含日期）
    current_date = datetime.now().strftime('%Y%m%d')
    log_file = os.path.join(log_dir, f'rag_{current_date}.log')
    
    # 配置日志格式
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # 设置 RotatingFileHandler（200MB = 200 * 1024 * 1024 bytes）
    file_handler = RotatingFileHandler(
        log_file,
        maxBytes=200 * 1024 * 1024,  # 200MB
        backupCount=10,  # 保留10个备份文件
        encoding='utf-8'
    )
    file_handler.setFormatter(formatter)
    
    # 设置控制台输出
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    
    # 配置根日志记录器
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)
    root_logger.addHandler(file_handler)
    root_logger.addHandler(console_handler)
    
    return logging.getLogger(__name__)

# 创建日志记录器
logger = setup_logger()

# 修改初始化函数，支持缓存和动态数据集配置
def init_rag_system(dataset_name):
    logger.info(f"Initializing dataset: {dataset_name}")
    
    # 如果数据集已经在缓存中，直接使用缓存的内容
    if dataset_name in dataset_cache.document_stores:
        logger.info(f"Using cached dataset: {dataset_name}")
        return (
            dataset_cache.document_stores[dataset_name],
            dataset_cache.retrievers[dataset_name],
            dataset_cache.pipelines[dataset_name]
        )
    
    # 创建新的文档存储
    faiss_index_path = f"faiss_index_{dataset_name}.faiss"
    sql_url = f"sqlite:///{dataset_name}_document_store.db"
    
    # 如果存在已保存的索引，则加载
    if os.path.exists(faiss_index_path):
        logger.info(f"Loading existing FAISS index from {faiss_index_path}")
        try:
            db_path = sql_url.replace('sqlite:///', '')
            if os.path.exists(db_path):
                logger.info("Loading existing FAISSDocumentStore...")
                # 加载已有的FAISSDocumentStore
                doc_store = FAISSDocumentStore.load(
                    index_path=faiss_index_path
                )
                # 设置数据库连接信息
                doc_store.sql_url = sql_url
                # 然后加载 FAISS 索引
                # doc_store.load(faiss_index_path)
            else:
                # 如果db文件不存在，创建新的
                doc_store = FAISSDocumentStore(
                    faiss_index_factory_str="Flat",
                    embedding_dim=384,
                    return_embedding=True,
                    sql_url=sql_url,
                    index=dataset_name
                )
                doc_store.load(faiss_index_path)
            
            # 创建检索器和pipeline
            ret = EmbeddingRetriever(
                document_store=doc_store,
                embedding_model="sentence-transformersall-MiniLM-L6-v2",
                top_k=5,
                batch_size=32
            )
            
            pipe = Pipeline()
            pipe.add_node(component=ret, name="Retriever", inputs=["Query"])
            pipe.add_node(component=prompt_node, name="PromptNode", inputs=["Retriever"])
            
            # 存入缓存
            dataset_cache.document_stores[dataset_name] = doc_store
            dataset_cache.retrievers[dataset_name] = ret
            dataset_cache.pipelines[dataset_name] = pipe
            
            return doc_store, ret, pipe
        
        except Exception as e:
            # 如果处理失败，清理已创建的文件
            logger.error(f"Error during initialization: {e}")
            if os.path.exists(faiss_index_path):
                os.remove(faiss_index_path)
            if os.path.exists(f"{dataset_name}_document_store.db"):
                os.remove(f"{dataset_name}_document_store.db")
            raise
    
    # 处理新数据集
    dataset_path = DATASET_MAPPING[dataset_name]
    df = pd.read_json(dataset_path)

    #取前2000条数据
    df = df[:2000]
    
    documents = []
    seen_content = set()
    
    for _, row in tqdm(df.iterrows(), total=len(df), desc="Processing documents"):
        try:
            q = row.get('input', row.get('Question', ''))
            a = row.get('output', row.get('Response', ''))
            if q and a:
                content = f"问题：{q} 答案：{a}"
                if content not in seen_content:
                    seen_content.add(content)
                    documents.append(
                        Document(
                            content=content,
                            meta={
                                "type": "qa_pair",
                                "question": q,
                                "answer_length": len(a),
                                "id": f"{dataset_name}_{len(documents)}"
                            }
                        )
                    )
        except Exception as e:
            logger.error(f"Error processing row: {e}")
            continue
    
    logger.info(f"Found {len(documents)} unique documents")
    
    try:
        doc_store = FAISSDocumentStore(
            faiss_index_factory_str="Flat",
            embedding_dim=384,
            return_embedding=True,
            sql_url=sql_url,
            index=dataset_name
        )
        # 创建检索器
        ret = EmbeddingRetriever(
            document_store=doc_store,
            embedding_model="sentence-transformersall-MiniLM-L6-v2",
            top_k=5,
            batch_size=32
        )
        
        # 一次性处理：写入文档并更新嵌入
        doc_store.write_documents(documents)
        doc_store.update_embeddings(ret)
        
        # 保存索引
        doc_store.save(faiss_index_path)
        logger.info(f"Successfully processed and saved {len(documents)} documents")
        
        # 创建pipeline
        pipe = Pipeline()
        pipe.add_node(component=ret, name="Retriever", inputs=["Query"])
        pipe.add_node(component=prompt_node, name="PromptNode", inputs=["Retriever"])
        
        # 存入缓存
        dataset_cache.document_stores[dataset_name] = doc_store
        dataset_cache.retrievers[dataset_name] = ret
        dataset_cache.pipelines[dataset_name] = pipe
        
        return doc_store, ret, pipe
        
    except Exception as e:
        # 如果处理失败，清理已创建的文件
        logger.error(f"Error during initialization: {e}")
        if os.path.exists(faiss_index_path):
            os.remove(faiss_index_path)
        if os.path.exists(f"{dataset_name}_document_store.db"):
            os.remove(f"{dataset_name}_document_store.db")
        raise

# 修改 DeepSeek 调用为真正的流式接口
async def deepseek_call_stream(prompt):
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {DEEPSEEK_API_KEY}"
    }
    data = {
        "model": "deepseek-chat",
        "messages": [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ],
        "stream": True
    }
    
    async with httpx.AsyncClient() as client:
        async with client.stream('POST', DEEPSEEK_API_URL, headers=headers, json=data) as response:
            if response.status_code != 200:
                error_msg = await response.text()
                yield f"data: {json.dumps({'error': f'DeepSeek request failed with status code {response.status_code}: {error_msg}'}, ensure_ascii=False)}\n\n"
                return
                
            async for line in response.aiter_lines():
                if line.strip():
                    if line.startswith("data: "):
                        line = line[6:]  # 移除 "data: " 前缀
                    if line == "[DONE]":
                        yield f"data: [DONE]\n\n"
                        break
                    try:
                        json_data = json.loads(line)
                        content = json_data.get("choices", [{}])[0].get("delta", {}).get("content", "")
                        if content:
                            yield f"data: {json.dumps({'answer': content}, ensure_ascii=False)}\n\n"
                            await asyncio.sleep(0.01)  # 小延迟确保流式效果
                    except json.JSONDecodeError:
                        continue

# 修改 API 端点为真正的流式响应
@app.post("/ask")
async def ask_question(query: Query):
    try:
        if query.type.upper() == "LLM" or not query.dataset:
            return StreamingResponse(
                deepseek_call_stream(query.question),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        
        elif query.type.upper() == "RAG":
            async def generate_rag_response():
                async with dataset_cache.get_pipeline(query.dataset) as pipeline:
                    # 添加过滤条件和参数
                    result = await asyncio.to_thread(
                        pipeline.run,
                        query=query.question,
                        params={
                            "Retriever": {
                                "top_k": 10,  # 先检索更多文档
                                "filters": {"type": "qa_pair"},
                                "scale_score": True,  # 启用分数缩放
                            }
                        }
                    )
                    
                    # 获取文档并根据相似度过滤
                    documents = result.get("documents", [])
                    # 打印文档数量
                    logger.info(f"Found {len(documents)} documents")
                    filtered_docs = [
                        doc for doc in documents 
                        if hasattr(doc, 'score') and doc.score >= 0.511  # 设置相似度阈值
                    ][:5]  # 取最相关的5个
                    
                    if not filtered_docs:
                        prompt = f"Context: {'请先客气地围绕当前知识库中没有找到相似度足够高的数据,请您提供更多信息来回答,然后再回答用户问题'}; Question: {query.question}; Answer:"
                    else:
                        # 打印相似度分数，用于调试
                        for doc in filtered_docs:
                            logger.info(f"Document score: {doc.score}, content: {doc.content[:20]}...")
                            
                        prompt = f"Context: {' '.join([doc.content for doc in filtered_docs])}; Question: {query.question}; Answer:"
                    print("prompt:" + prompt)
                    async for chunk in deepseek_call_stream(prompt):
                        yield chunk
            
            return StreamingResponse(
                generate_rag_response(),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        else:
            return Response(
                content=json.dumps(
                    {"error": f"Invalid type: {query.type}. Must be either 'LLM' or 'RAG'"}, 
                    ensure_ascii=False
                ),
                media_type="application/json"
            )
            
    except Exception as e:
        return Response(
            content=json.dumps({"error": str(e)}, ensure_ascii=False),
            media_type="application/json"
        )

# 修改启动事件，添加数据集配置加载
@app.on_event("startup")
async def startup_event():
    global document_store, retriever, rag_pipeline, DATASET_MAPPING
    
    logger.info("Starting up RAG system...")
    
    try:
        # 加载数据集配置
        DATASET_MAPPING = load_dataset_mapping()
        if not DATASET_MAPPING:
            logger.error("No dataset configurations found in database")
            raise Exception("Failed to load dataset configurations from database")
        
        # 获取第一个可用的数据集作为默认数据集
        default_dataset = next(iter(DATASET_MAPPING.keys()))
        document_store, retriever, rag_pipeline = init_rag_system(default_dataset)
        app.current_dataset = default_dataset
        logger.info(f"RAG system initialized with dataset: {default_dataset}")
        
    except Exception as e:
        logger.error(f"Error during startup: {e}")
        raise

# 添加刷新数据集配置的端点
@app.post("/refresh-datasets")
async def refresh_datasets():
    try:
        dataset_cache.refresh_mapping()
        return {"status": "success", "datasets": list(DATASET_MAPPING.keys())}
    except Exception as e:
        return {"status": "error", "message": str(e)}

# 1. 添加清理函数
async def cleanup_resources():
    logger.info("Cleaning up resources...")
    try:
        # 关闭所有文档存储
        for doc_store in dataset_cache.document_stores.values():
            if hasattr(doc_store, 'close'):
                await asyncio.to_thread(doc_store.close)
                
        # 关闭所有的 SQLite 连接
        for doc_store in dataset_cache.document_stores.values():
            if hasattr(doc_store, '_connection'):  # FAISSDocumentStore 的 SQLite 连接
                try:
                    doc_store._connection.close()
                    logger.info(f"Closed SQLite connection for {doc_store.index}")
                except Exception as e:
                    logger.error(f"Error closing SQLite connection: {e}")
            
            if hasattr(doc_store, 'session'):  # SQLAlchemy session
                try:
                    doc_store.session.close()
                    logger.info(f"Closed SQLAlchemy session for {doc_store.index}")
                except Exception as e:
                    logger.error(f"Error closing SQLAlchemy session: {e}")

        # 清空缓存
        dataset_cache.document_stores.clear()
        dataset_cache.retrievers.clear()
        dataset_cache.pipelines.clear()
        
    except Exception as e:
        logger.error(f"Error during cleanup: {e}")
    logger.info("Cleanup completed")

# 2. 修改关闭事件处理
@app.on_event("shutdown")
async def shutdown_event():
    logger.info("Shutting down RAG system...")
    try:
        await cleanup_resources()
    except Exception as e:
        logger.error(f"Error during shutdown: {e}")

# 3. 添加信号处理
def signal_handler(signum, frame):
    logger.info(f"Received signal {signum}")
    asyncio.create_task(cleanup_resources())

signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)

# 新增的两个API端点
@app.post("/sync-vectors")
async def sync_vectors(dataset_name: str = QueryParam(..., description="数据集名称")):
    try:
        logger.info(f"Starting vector sync for dataset: {dataset_name}")
        result = start_vector_sync(dataset_name)
        logger.info(f"Sync result: {result}")
        return result
    except Exception as e:
        logger.error(f"Error in sync_vectors: {e}")
        return {"status": "error", "message": str(e)}

@app.get("/sync-progress")
async def check_sync_progress(dataset_name: str = QueryParam(..., description="数据集名称")):
    try:
        logger.info(f"Checking sync progress for dataset: {dataset_name}")
        progress = get_sync_progress(dataset_name)
        logger.info(f"Progress: {progress}")
        return progress
    except Exception as e:
        logger.error(f"Error in check_sync_progress: {e}")
        return {"status": "error", "message": str(e)}  

# 4. 修改主函数
if __name__ == "__main__":
    try:
        logger.info("Starting RAG server on http://0.0.0.0:8080")
        uvicorn.run(
            app, 
            host="0.0.0.0", 
            port=8080,
            log_level="info",
            loop="asyncio"
        )
    except KeyboardInterrupt:
        logger.info("Received keyboard interrupt")
    finally:
        logger.info("Server shutdown completed")  