from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.llms import CustomLLM, CompletionResponse
from llama_index.core.postprocessor import LLMRerank
from transformers import AutoModelForCausalLM, AutoTokenizer
from pydantic import Field
import torch
from typing import Any, Iterator
from llama_index.core.llms import LLMMetadata

# 1. 嵌入模型配置
embed_model = HuggingFaceEmbedding(
    model_name="D:/ideaSpace/MyPython/models/bge-small-zh-v1.5",
    device="cpu"
)

# 2. 文档加载与索引
documents = SimpleDirectoryReader("D:\\ideaSpace\\rag-in-action-master\\90-文档-Data\\山西文旅").load_data()
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)

# 3. 自定义本地LLM
class LocalQwenLLM(CustomLLM):
    tokenizer: Any = Field(default=None, exclude=True)
    model: Any = Field(default=None, exclude=True)

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        model_path = r"D:\ideaSpace\MyPython\models\qwen\Qwen1.5-0.5B-Chat"
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            device_map="cpu",
            torch_dtype=torch.float32,
            trust_remote_code=True
        )

    @property
    def metadata(self) -> LLMMetadata:
        return LLMMetadata(
            model_name="Qwen-CPU",
            context_window=2048,
            num_output=256,
            is_chat_model=True
        )

    def complete(self, prompt: str, **kwargs) -> CompletionResponse:
        inputs = self.tokenizer(prompt, return_tensors="pt")
        outputs = self.model.generate(**inputs, max_new_tokens=256)
        return CompletionResponse(text=self.tokenizer.decode(outputs[0], skip_special_tokens=True))


    def stream_complete(self, prompt: str, **kwargs: Any) -> Iterator[CompletionResponse]:
        inputs = self.tokenizer(prompt, return_tensors="pt")
        outputs = self.model.generate(**inputs, max_new_tokens=256)
        text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        yield CompletionResponse(text=text, delta=text)

# 4. 构建查询引擎（使用LLM重排序作为压缩）
llm = LocalQwenLLM()
query_engine = index.as_query_engine(
    llm=llm,
    similarity_top_k=5,
    node_postprocessors=[
        LLMRerank(llm=llm, top_n=2)  # 使用LLM选择最重要的2个节点
    ]
)

# 5. 执行查询
response = query_engine.query("山西省的主要旅游景点有哪些？")
print(response)

# 加载spacy中文模型zh_core_web_sm
# import spacy
# # 1. 直接指向模型目录
# nlp = spacy.load(r"D:\ideaSpace\MyPython\models\zh_core_web_sm")
# print(nlp("山西省的主要旅游景点有哪些？"))