from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.llms import CustomLLM, CompletionResponse
from transformers import AutoModelForCausalLM, AutoTokenizer
from pydantic import Field
import torch
from typing import Any, Iterator, List
from llama_index.core.llms import LLMMetadata
from llama_index.core import QueryBundle
from llama_index.core.postprocessor import (
    KeywordNodePostprocessor,
    SentenceEmbeddingOptimizer,
    LLMRerank
)

# 1. 全局设置
Settings.chunk_size = 512
Settings.chunk_overlap = 50

# 2. 嵌入模型配置
embed_model = HuggingFaceEmbedding(
    model_name=r"D:\ideaSpace\MyPython\models\m3e-base",
    device="cpu",
    embed_batch_size=4  # 减少CPU内存压力
)

# 3. 文档加载与索引构建
documents = SimpleDirectoryReader(
    r"D:\ideaSpace\rag-in-action-master\90-文档-Data\山西文旅",
    file_metadata=lambda x: {"encoding": "utf-8"}
).load_data()

index = VectorStoreIndex.from_documents(
    documents,
    embed_model=embed_model,
    show_progress=True
)

# 4. 自定义本地LLM（优化版）
class OptimizedQwenLLM(CustomLLM):
    tokenizer: Any = Field(default=None, exclude=True)
    model: Any = Field(default=None, exclude=True)

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        model_path = r"D:\ideaSpace\MyPython\models\qwen\Qwen1.5-0.5B-Chat"

        # 优化加载配置
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True,
            use_fast=False  # 更稳定的tokenizer
        )

        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            device_map="cpu",
            torch_dtype=torch.float32,
            trust_remote_code=True,
            low_cpu_mem_usage=True  # 减少内存占用
        )
        self.model.eval()  # 设置为评估模式

    @property
    def metadata(self) -> LLMMetadata:
        return LLMMetadata(
            model_name="Qwen1.5-0.5B-Chat",
            context_window=2048,
            num_output=512,
            is_chat_model=True
        )

    def complete(self, prompt: str, **kwargs) -> CompletionResponse:
        inputs = self.tokenizer(
            prompt,
            return_tensors="pt",
            truncation=True,
            max_length=1800  # 保留上下文空间
        )

        with torch.no_grad():  # 禁用梯度计算
            outputs = self.model.generate(
                **inputs,
                max_new_tokens=256,  # 减少生成长度
                do_sample=True,
                temperature=0.7,
                top_p=0.9
            )

        text = self.tokenizer.decode(
            outputs[0],
            skip_special_tokens=True
        )
        return CompletionResponse(text=text)

    def stream_complete(self, prompt: str, **kwargs) -> Iterator[CompletionResponse]:
        response = self.complete(prompt, **kwargs)
        yield CompletionResponse(text=response.text, delta=response.text)

# 5. 初始化LLM
llm = OptimizedQwenLLM()

# 6. 后处理器配置
keyword_nodes = ["景点", "旅游", "文化", "山西", "晋", "景区", "文化遗产"]
keyword_compress = KeywordNodePostprocessor(
    required_keywords=keyword_nodes,
    required_all=False,
    exclude_keywords=["价格", "门票", "费用", "￥"]
)

embedding_optimizer = SentenceEmbeddingOptimizer(
    embed_model=embed_model,
    percentile_cutoff=0.2,  # 调整为保留20%
    threshold_cutoff=None
)

llm_rerank = LLMRerank(
    llm=llm,
    top_n=3  # 只保留最优的3个结果
)

# 7. 查询引擎配置
query_engine = index.as_query_engine(
    llm=llm,
    similarity_top_k=15,  # 初始检索数量
    node_postprocessors=[
        keyword_compress,    # 先关键词过滤
        embedding_optimizer, # 再相似度优化
        llm_rerank           # 最后精细排序
    ],
    verbose=True
)

# 8. 测试流程
def test_retrieval(query_str: str):
    print(f"\n=== 测试查询: '{query_str}' ===")

    # 初始检索
    retriever = index.as_retriever(similarity_top_k=15)
    initial_nodes = retriever.retrieve(query_str)
    print(f"初始检索到 {len(initial_nodes)} 个节点")

    query_bundle = QueryBundle(query_str)

    # 关键词过滤
    keyword_nodes = keyword_compress.postprocess_nodes(initial_nodes, query_bundle)
    print(f"关键词过滤后剩余: {len(keyword_nodes)}个节点")

    # 相似度优化
    embedding_nodes = embedding_optimizer.postprocess_nodes(keyword_nodes, query_bundle)
    print(f"相似度优化后剩余: {len(embedding_nodes)}个节点")

    # LLM重排序
    if embedding_nodes:
        final_nodes = llm_rerank.postprocess_nodes(embedding_nodes, query_bundle)
        print(f"最终保留 {len(final_nodes)} 个节点:")

        for i, node in enumerate(final_nodes):
            print(f"\n[节点 {i+1}] 得分: {node.score:.4f}")
            print(node.text[:300] + "...")
    else:
        print("警告: 所有节点已被过滤")

    # 完整查询
    response = query_engine.query(query_str)
    print("\n=== 系统回答 ===")
    print(response if response else "未能生成有效回答")

# 9. 测试用例
test_queries = [
    "山西省有哪些著名的文化旅游景点？",
    "请介绍云冈石窟的历史价值",
    "山西有哪些被列入世界文化遗产的景点？"
]

for query in test_queries:
    test_retrieval(query)