# -*- coding: utf-8 -*-
import torch
import os
from pathlib import Path
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage import StorageContext
from llama_index.vector_stores.faiss import FaissVectorStore
import faiss
import time
import requests

def check_ollama_service():
    """检查Ollama服务是否可用"""
    try:
        response = requests.get("http://localhost:11434/api/tags")
        return response.status_code == 200
    except:
        return False

def wait_for_ollama(max_wait=60):
    """等待Ollama服务启动"""
    print("等待Ollama服务启动...")
    for i in range(max_wait):
        if check_ollama_service():
            print("Ollama服务已就绪")
            return True
        print(f"等待中... ({i+1}/{max_wait})")
        time.sleep(1)
    return False

def initialize_components():
    """初始化所有组件"""
    try:
        # 等待Ollama服务启动
        if not wait_for_ollama():
            raise Exception("Ollama服务启动超时，请确保Ollama服务正在运行")
        
        # 检查CUDA可用性
        device = "cuda" if torch.cuda.is_available() else "cpu"
        print(f"使用设备: {device}")
        
        # --------------------- 1. 配置嵌入模型 ---------------------
        embed_model = HuggingFaceEmbedding(
            model_name="BAAI/bge-base-zh-v1.5",
            device=device,
            embed_batch_size=32
        )
        
        # --------------------- 2. 配置本地大语言模型 ---------------------
        # 添加重试机制
        max_retries = 3
        retry_delay = 5
        for attempt in range(max_retries):
            try:
                llm = Ollama(
                    model="qwen3:4b",
                    temperature=0.3,
                    request_timeout=120,
                    base_url="http://192.168.18.5:11434"
                )
                # 测试连接
                test_response = llm.complete("测试连接")
                if test_response:
                    print("Ollama模型连接成功")
                    break
            except Exception as e:
                if attempt == max_retries - 1:
                    raise Exception(f"无法连接到Ollama模型: {str(e)}")
                print(f"连接Ollama失败，{retry_delay}秒后重试...")
                time.sleep(retry_delay)
        
        # --------------------- 3. 配置文本分块器 ---------------------
        text_splitter = SentenceSplitter(
            chunk_size=512,
            chunk_overlap=64,
            separator="\n",
            paragraph_separator="\n\n"
        )
        
        return embed_model, llm, text_splitter
    except Exception as e:
        print(f"初始化组件时出错: {str(e)}")
        raise

def build_vector_index(documents, embed_model, text_splitter, persist_dir="./storage"):
    """构建并持久化向量索引"""
    try:
        # 确保持久化目录存在
        persist_path = Path(persist_dir)
        persist_path.mkdir(parents=True, exist_ok=True)
        
        # BGE模型的向量维度是768
        dimension = 768
        
        # 使用FAISS作为向量存储
        faiss_index = faiss.IndexFlatL2(dimension)
        vector_store = FaissVectorStore(faiss_index=faiss_index)
        
        storage_context = StorageContext.from_defaults(
            vector_store=vector_store
        )
        
        # 构建向量索引
        index = VectorStoreIndex.from_documents(
            documents,
            storage_context=storage_context,
            embed_model=embed_model,
            transformations=[text_splitter],
            show_progress=True
        )
        
        # 持久化索引
        storage_context.persist(persist_dir=str(persist_path))
        return index
    except Exception as e:
        print(f"构建向量索引时出错: {str(e)}")
        raise

def main():
    try:
        # 初始化所有组件
        embed_model, llm, text_splitter = initialize_components()
        
        # 全局设置
        Settings.embed_model = embed_model
        Settings.llm = llm
        Settings.text_splitter = text_splitter
        
        # --------------------- 文档加载 ---------------------
        print("正在加载文档...")
        data_dir = Path("./input_files")
        if not data_dir.exists():
            print(f"错误: 数据目录 {data_dir} 不存在")
            return
            
        documents = SimpleDirectoryReader(
            input_dir=str(data_dir),
            recursive=True,
            required_exts=[".pdf", ".txt", ".docx", ".md"],
            exclude_hidden=True
        ).load_data()
        
        if not documents:
            print("错误: 没有找到任何文档")
            return
            
        # --------------------- 构建索引 ---------------------
        print("正在构建索引...")
        index = build_vector_index(documents, embed_model, text_splitter)
        
        # --------------------- 查询引擎 ---------------------
        query_engine = index.as_query_engine(
            similarity_top_k=3,
            vector_store_query_mode="hybrid",
            alpha=0.5,
            verbose=True
        )
        
        # --------------------- 交互式查询 ---------------------
        print("系统准备就绪，输入'退出'结束会话")
        while True:
            try:
                # 使用input()的encoding参数处理中文输入
                query = input("\n请输入您的问题: ")
                if query.lower() in ["退出", "exit", "quit"]:
                    break
                    
                # 添加重试机制
                max_retries = 3
                retry_delay = 5
                for attempt in range(max_retries):
                    try:
                        response = query_engine.query(query)
                        break
                    except Exception as e:
                        if attempt == max_retries - 1:
                            print(f"查询失败: {str(e)}")
                            continue
                        print(f"查询出错，{retry_delay}秒后重试...")
                        time.sleep(retry_delay)
                        continue
                
                # 打印回答
                print("\n回答:", response)
                
                # 打印参考来源
                print("\n参考来源:")
                for i, node in enumerate(response.source_nodes, 1):
                    print(f"{i}. 文件: {node.metadata.get('file_path', '未知')}")
                    print(f"   相关度: {node.score:.4f}")
                    print(f"   内容: {node.text[:200]}...\n")
            except Exception as e:
                print(f"查询时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"程序运行出错: {str(e)}")

if __name__ == "__main__":
    main()