# -*- coding: utf-8 -*-
import sys
import yaml
from fastapi import APIRouter
import logging
from llama_index.core import StorageContext, VectorStoreIndex, ServiceContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.llms.together import TogetherLLM
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.llms.openai import OpenAI
import torch
from llama_index.vector_stores.elasticsearch import ElasticsearchStore
from pydantic import BaseModel

from myutils import WenXinLLM, QianWenLLM


chunk_size = 1024
llm_status = -1
embedding_status = -1
ancient_status = -1
vector_retriever = None
query_engine = None

# 读取配置信息
with open('config.yaml', 'r') as file:
    config = yaml.load(file, Loader=yaml.FullLoader)
# 设置日志级别
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    handlers=[logging.StreamHandler(sys.stdout)],
    )

router = APIRouter(
    prefix="/gj-rag",
    tags=["search"],
    responses={404: {"description": "Not found"}},
)


# 加载LLM和embedding
@router.on_event("startup")
async def startup():
    main()


class Query(BaseModel):
    query: str
    llm: int = 0
    embedding: int = 0
    is_ancient: int = 0


@router.post("/doc_search", description="检索相关文档")
async def doc_search(request: Query):
    try:
        load_llm_embedding(request.llm, request.embedding, request.is_ancient)
        metadatas = []
        # 查询文档元数据
        nodes = vector_retriever.retrieve(request.query)
        for node in nodes:
            result = node.metadata
            result["score"] = node.score
            metadatas.append(result.copy())
        logging.info(f"Query: {request}, response: 文档检索")
        return {"code": 200, "data": metadatas}
    except Exception as e:
        return {"code": 500, "message": str(e)}


@router.post("/llm_summary", description="llm根据检索到的文档对问题做回答")
async def llm_summary(request: Query):
    try:
        load_llm_embedding(request.llm, request.embedding, request.is_ancient)
        response = query_engine.query(request.query)
        logging.info(f"Query: {request}, response: {response.response}")
        # return {"code": 200, "data": response}
        return {"code": 200, "data": {"response": response.response}}
    except Exception as e:
        return {"code": 500, "message": str(e)}

# 加载llm和Embedding
def main():
    global local_es, openai_es, local_llm, glm4_llm, wenxin_llm, azure_llm, openai35_llm, openai4_llm, \
        openai_embed_model, local_embed_model, abstract_es, local_ancient_es, openai_ancient_es, qianwen_llm
    # 设置es存储
    local_es = ElasticsearchStore(
        index_name=config["es"]["local_index"],
        es_url=config["es"]["url"],
        es_user=config["es"]["user"],
        es_password=config["es"]["password"],
    )

    local_ancient_es = ElasticsearchStore(
        index_name=config["es"]["local_ancient_index"],
        es_url=config["es"]["url"],
        es_user=config["es"]["user"],
        es_password=config["es"]["password"],
    )

    openai_es = ElasticsearchStore(
        index_name=config["es"]["openai_index"],
        es_url=config["es"]["url"],
        es_user=config["es"]["user"],
        es_password=config["es"]["password"],
    )

    openai_ancient_es = ElasticsearchStore(
        index_name=config["es"]["openai_ancient_index"],
        es_url=config["es"]["url"],
        es_user=config["es"]["user"],
        es_password=config["es"]["password"],
    )

    # 加载本地LLM
    local_llm = HuggingFaceLLM(
        model_name=config["local_llm"]["model_path"],
        tokenizer_name=config["local_llm"]["model_path"],
        device_map="auto",
        context_window=3500,
        system_prompt="中文回答",
        model_kwargs={
            "trust_remote_code": True,
            "torch_dtype": torch.float16,
            # "load_in_8bit": True,
            # "top_p": 0.2,
            # "temperature": 0.2,
            # "do_sample": True
        },
        tokenizer_kwargs={
            "trust_remote_code": True
        }
    )
    # glm4
    glm4_llm = TogetherLLM(
        model=config["glm4_llm"]["model"],
        api_base=config["glm4_llm"]["url"],
        api_key=config["glm4_llm"]["access_token"],
        system_prompt="中文回答",
    )
    # 文心4.0
    wenxin_llm = WenXinLLM(
        api_base=config["wenxin_llm"]["url"],
        api_key=config["wenxin_llm"]["access_token"]
    )
    # 千问2.1
    qianwen_llm = QianWenLLM(
        model=config["qianwen_llm"]["model"],
        api_base=config["qianwen_llm"]["url"],
        api_key=config["qianwen_llm"]["api_key"]
    )
    # 加载Azure
    azure_llm = AzureOpenAI(
        model=config["azure_llm"]["model"],
        azure_endpoint=config["azure_llm"]["azure_endpoint"],
        api_key=config["azure_llm"]["api_key"],
        engine=config["azure_llm"]["engine"],
        api_version=config["azure_llm"]["api_version"],
    )
    # 加载Azure的embedding
    # embed_model = AzureOpenAIEmbedding(
    #     model="text-embedding-ada-002",
    #     azure_endpoint="https://fortunegptapp.openai.azure.com/",
    #     api_key="7445d0b6d7e844dcbbe133ee83d95c33",
    #     embed_batch_size=chunk_size,
    #     api_version="2023-07-01-preview",
    #     azure_deployment="embed"
    # )
    # 加载openai的LLM
    openai35_llm = OpenAI(
        api_base=config["openai_llm"]["api_base"],
        api_key=config["openai_llm"]["api_key"],
        model=config["openai_llm"]["model"]["gpt3.5"]
    )
    openai4_llm = OpenAI(
        api_base=config["openai_llm"]["api_base"],
        api_key=config["openai_llm"]["api_key"],
        model=config["openai_llm"]["model"]["gpt4"]
    )
    # 加载openai的embedding
    openai_embed_model = OpenAIEmbedding(
        api_key=config["openai_embedding"]["api_key"],
        api_base=config["openai_embedding"]["api_base"],
        model=config["openai_embedding"]["model"],
        dimension=config["openai_embedding"]["dimension"],
    )
    # 加载本地embedding
    local_embed_model = HuggingFaceEmbedding(
        model_name=config["local_embedding"]["model_path"],
        max_length=config["local_embedding"]["batch_size"]
    )


# 根据用户参数加载query_engine和vector_retriever
def load_llm_embedding(llm: int, embedding: int, is_ancient: int):
    global llm_status, embedding_status, vector_retriever, query_engine, chunk_size, ancient_status
    llms = [local_llm, glm4_llm, wenxin_llm, azure_llm, openai35_llm, openai4_llm, qianwen_llm]
    embeddings = [local_embed_model, openai_embed_model]
    ess = [local_es, local_ancient_es, openai_es, openai_ancient_es]
    # 判断是否需要更改模型
    if llm == llm_status and embedding == embedding_status and is_ancient == ancient_status:
        pass
    else:
        llm_status = llm
        embedding_status = embedding
        ancient_status = is_ancient
        if embedding == 0:
            chunk_size = 512
        else:
            chunk_size = 1024
        # 加载index
        service_context = ServiceContext.from_defaults(
            llm=llms[llm],
            embed_model=embeddings[embedding],
            chunk_size=chunk_size,
        )

        storage_context = StorageContext.from_defaults(vector_store=ess[embedding + is_ancient])

        index = VectorStoreIndex.from_vector_store(
            vector_store=ess[embedding + is_ancient],
            storage_context=storage_context,
            service_context=service_context,
        )

        vector_retriever = index.as_retriever(similarity_top_k=10)
        query_engine = index.as_query_engine(similarity_top_k=5)
