import os
import time
import sys
# 设置环境变量以禁用符号链接警告
os.environ['HF_HUB_DISABLE_SYMLINKS_WARNING'] = '1'
# 禁用 Xet Storage 警告
os.environ['HF_HUB_DISABLE_XET_WARNING'] = '1'

# 添加项目根目录到Python路径，以便导入config_reader模块
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(current_dir))
sys.path.append(project_root)

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
from config_reader import get_bailian_config

def main():
    try:
        print("开始加载数据...")
        # 数据加载 - 修复路径问题
        data_path = os.path.join(os.path.dirname(__file__), "..", "..", "data", "C2", "pdf")
        print(f"数据路径: {os.path.abspath(data_path)}")
        documents = SimpleDirectoryReader(data_path).load_data()
        print(f"成功加载 {len(documents)} 个文档")
        
        print("开始构建嵌入模型...")
        print("💡 提示：首次运行需要下载模型文件，可能需要几分钟时间...")
        print("🚀 正在初始化嵌入模型...")
        
        # 记录开始时间
        start_time = time.time()
        
        # 索引构建 - 添加缓存和详细进度
        # 优先使用轻量级模型，如果失败再尝试其他模型
        model_options = [
            ("sentence-transformers/all-MiniLM-L6-v2", "轻量级模型 (推荐)"),
            ("BAAI/bge-base-en-v1.5", "高质量模型"),
            ("sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2", "多语言模型")
        ]
        
        embed_model = None
        for model_name, description in model_options:
            try:
                print(f"📥 正在尝试加载 {description} ({model_name})...")
                embed_model = HuggingFaceEmbedding(
                    model_name=model_name,
                    cache_folder="./model"  # 添加本地缓存
                )
                elapsed_time = time.time() - start_time
                print(f"✅ {description} 加载成功 (耗时: {elapsed_time:.2f}秒)")
                break
            except Exception as e:
                print(f"❌ {description} 加载失败: {e}")
                if model_name != model_options[-1][0]:  # 不是最后一个模型
                    print("🔄 尝试下一个模型...")
                    start_time = time.time()  # 重置计时器
                else:
                    print("❌ 所有模型加载失败，请检查网络连接")
                    raise e
        
        print("开始构建向量索引...")
        index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
        print("向量索引构建完成")
        
        print("配置查询引擎...")
        print("🤖 正在初始化百炼（通义千问）LLM...")
        
        # 获取百炼 API 配置
        try:
            bailian_config = get_bailian_config()
            bailian_api_token = bailian_config['token']
            if not bailian_api_token:
                raise ValueError("请在 config.ini 文件中设置 BAILIAN_API_TOKEN")
            print("✅ 百炼 API 密钥获取成功")
        except Exception as e:
            print(f"❌ 获取百炼 API 密钥失败: {e}")
            raise e
        
        # 配置百炼 LLM - 使用兼容的模型名称但调用百炼 API
        llm = OpenAI(
            model="gpt-3.5-turbo",  # 使用兼容的模型名称，但实际调用百炼 API
            api_key=bailian_api_token,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 百炼 API 地址
            temperature=0.7,
            max_tokens=2000,
            timeout=60.0
        )
        
        print("✅ 百炼 LLM 配置完成")
        
        query_engine = index.as_query_engine(
            similarity_top_k=5,
            llm=llm  # 使用百炼（通义千问）
        )
        
        print("开始查询...")
        response = query_engine.query("RAG的核心价值是什么？")
        
        print("查询结果:")
        print(response)
        
    except Exception as e:
        print(f"发生错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()
