import torch
import re
from llama_index.core import Settings
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

# Set prompt template for generation (optional)
from llama_index.core import PromptTemplate
from datetime import datetime

device = "cuda:4"
device = "auto"


def completion_to_prompt(completion):
    return f"<|im_start|>system\n<|im_end|>\n<|im_start|>user\n{completion}<|im_end|>\n<|im_start|>assistant\n"


def messages_to_prompt(messages):
    prompt = ""
    for message in messages:
        if message.role == "system":
            prompt += f"<|im_start|>system\n{message.content}<|im_end|>\n"
        elif message.role == "user":
            prompt += f"<|im_start|>user\n{message.content}<|im_end|>\n"
        elif message.role == "assistant":
            prompt += f"<|im_start|>assistant\n{message.content}<|im_end|>\n"

    if not prompt.startswith("<|im_start|>system"):
        prompt = "<|im_start|>system\n" + prompt

    prompt = prompt + "<|im_start|>assistant\n"

    return prompt


# Set Qwen2 as the language model and set generation config
Settings.llm = HuggingFaceLLM(
    model_name="/home/zhengzhenzhuang/models/qwen/Qwen2-7B",
    tokenizer_name="/home/zhengzhenzhuang/models/qwen/Qwen2-7B",
    context_window=30000,
    max_new_tokens=2000,
    #generate_kwargs={"temperature": 0.7, "top_k": 50, "top_p": 0.95},
    generate_kwargs={"temperature": 0.7, "top_k": 50, "top_p": 0.9},
    messages_to_prompt=messages_to_prompt,
    completion_to_prompt=completion_to_prompt,
    device_map=device,
)

# Set embedding model
Settings.embed_model = HuggingFaceEmbedding(
    model_name="/home/zhengzhenzhuang/models/qwen/bge-base-zh/bge-base-zh-v1.5"
)

# Set the size of the text chunk for retrieval
Settings.transformations = [SentenceSplitter(chunk_size=1024)]

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader

print("加载知识库")

documents = SimpleDirectoryReader("/home/zhengzhenzhuang/models/qwen/document/llamaindex").load_data()
index = VectorStoreIndex.from_documents(
    documents,
    embed_model=Settings.embed_model,
    transformations=Settings.transformations
)

"""
from llama_index.core import StorageContext, load_index_from_storage
# save index
storage_context = StorageContext.from_defaults(persist_dir="/home/zhengzhenzhuang/models/qwen/save")
# load index
index = load_index_from_storage(storage_context)
"""
def getIndex():
    return index

def getAnswer(question):
    query_engine = getIndex().as_query_engine()
    answer=query_engine.query(question).response
    match = re.search(r'\n[a-zA-Z]', answer)
    # 如果找到了匹配项
    if match:
        # 获取匹配项的开始位置（索引），这个位置实际上是\n的位置
        index = match.start()
        # 输出\n之前的字符串
        return answer[:index]
    else:
        return answer

def test():
    print("开始回答问题")
    from datetime import datetime
    st = datetime.now()
    query_engine = index.as_query_engine()
    your_query = "首席执行官是谁？"
    print(query_engine.query(your_query).response)
    second = (st - datetime.now()).total_seconds()
    print(f"耗时{second}")


from fastapi import FastAPI
app = FastAPI()
from pydantic import BaseModel
from datetime import datetime
import uvicorn

class MyClassModel(BaseModel):
    question: str

@app.get("/")
def read_root():

    return {"Hello": "World"}

@app.post("/qwen2/api")
async def create_myclass(myclass: MyClassModel):
    st = datetime.now()
    print("问题：" + myclass.question)
    answer=getAnswer(myclass.question)

    print(answer)
    second = (st - datetime.now()).total_seconds()
    print(f"耗时{second}")
    return {"answer": f"{answer}"}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8091)
