import os
from llama_index.core import VectorStoreIndex
from typing import Any
from llama_index.core import ServiceContext, SimpleDirectoryReader
from llama_index.core.llms import (
    CustomLLM,
    CompletionResponse,
    CompletionResponseGen,
    LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig


# llamaindex 版本：0.8.69
# 大模型：Qwen-7B-Chat
# 向量模型：m3e
# https://zhuanlan.zhihu.com/p/666964821

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

# set context window size
context_window = 2048
# set number of output tokens
num_output = 256

# store the pipeline/model outside of the LLM class to avoid memory issues
model_name = "qwen7bchat"

tokenizer = AutoTokenizer.from_pretrained(f"Qwen-7B-Chat", trust_remote_code=True)
# model = AutoModel.from_pretrained("Qwen-7B-Chat", trust_remote_code=True, device='cuda')
model = AutoModelForCausalLM.from_pretrained(f"Qwen-7B-Chat", device_map="auto", trust_remote_code=True,
                                             bf16=True).eval()

# model = model.eval()
model.generation_config = GenerationConfig.from_pretrained(f"Qwen-7B-Chat", trust_remote_code=True)


class OurLLM(CustomLLM):
    @property
    def metadata(self) -> LLMMetadata:
        """Get LLM metadata."""
        return LLMMetadata(
            context_window=context_window,
            num_output=num_output,
            model_name=model_name,
        )

    @llm_completion_callback()
    def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
        prompt_length = len(prompt)

        # only return newly generated tokens
        text, _ = model.chat(tokenizer, prompt, history=[])
        return CompletionResponse(text=text)

    @llm_completion_callback()
    def stream_complete(
            self, prompt: str, **kwargs: Any
    ) -> CompletionResponseGen:
        raise NotImplementedError()


def main():
    # define llm
    llm = OurLLM()
    # define service context
    service_context = ServiceContext.from_defaults(llm=llm, embed_model="local:/Users/weigang/D/LLM-Model/m3e-base")
    # load document
    documents = SimpleDirectoryReader("./data").load_data()
    # build index from document
    index = VectorStoreIndex.from_documents(documents, service_context=service_context)
    # build query engine
    query_engine = index.as_query_engine()
    # query index
    response = query_engine.query("请问是什么")
    print(response)


if __name__ == "__main__":
    main()
