from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import (
    Settings,
    VectorStoreIndex,
    StorageContext,
    load_index_from_storage,
    Document,
)
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.schema import IndexNode
from typing import List, Union
from openai import OpenAI
from langchain.tools import StructuredTool
import os, json, glob
from dotenv import load_dotenv

load_dotenv()
qwen_key = os.getenv("DASHSCOPE_API_KEY")


class RagConfig:
    # embedding model
    embedding_model_name = "/Users/hezuguang/Desktop/人工智能学习/深度学习130项目课/多轮对话/agent-work/BAAI/bge-base-zh-v1.5"
    query_embedding_prefix = "为这个句子生成表示以用于检索相关文章："

    # llm
    llm_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
    llm_key = qwen_key
    llm_model_name = "qwen-long"

    #
    max_context_length = 30000


class RagTool:
    Settings.embed_model = HuggingFaceEmbedding(
        model_name=RagConfig.embedding_model_name
    )
    Settings.llm = None

    def __init__(
        self,
        rag_id: str,
        document_path: Union[str, List[str]],
        index_path: str,
        build_index_required: bool,
    ):
        self.rag_id = rag_id
        # 读入文档
        if isinstance(document_path, str):
            document_path = [document_path]
        self.alltexts = self.__extract_text(document_path[0])
        for i in range(1, len(document_path)):
            self.alltexts.extend(self.__extract_text(document_path[i]))

        self.documents = [
            Document(text=text, metadata={"doc_id": str(i)})
            for i, text in enumerate(self.alltexts)
        ]

        if build_index_required:
            index = self.__build_index(self.documents, index_path)
        else:
            storage_context = StorageContext.from_defaults(persist_dir=index_path)
            index = load_index_from_storage(storage_context)

        self.retriever = index.as_retriever(similarity_top_k=100)

        self.llm_client = OpenAI(api_key=RagConfig.llm_key, base_url=RagConfig.llm_url)
        self.llm_model_name = RagConfig.llm_model_name

    def query(self, query: str) -> str:
        passages = self.__retrieve_passages(
            self.retriever, query, RagConfig.query_embedding_prefix
        )

        answer = "没有找到相关问题的答案"
        if passages:
            answer = self.__call_llm(
                self.llm_client,
                self.llm_model_name,
                query,
                passages,
                RagConfig.max_context_length,
            )

        return answer

    def retrieve(self, query: str) -> str:
        passages = self.__retrieve_passages(
            self.retriever, query, RagConfig.query_embedding_prefix
        )

        context = "\n".join(passages)
        if len(context) > RagConfig.max_context_length:
            context = context[: RagConfig.max_context_length]
        return context

    def __extract_text(self, jsonl):
        lines = []
        with open(jsonl, "r") as f:
            for line in f.readlines():
                line = line.strip()
                if line:
                    lines.append(line)

        text_contents = []
        for line in lines:
            # print(line)
            obj = json.loads(line)
            if "headers" in obj:
                text_contents.append(f'{obj["headers"]}\n{obj["content"]}')
            else:
                text_contents.append(f'{obj["company"]}\n{obj["content"]}')

        return text_contents

    def __build_index(self, documents, index_path):
        # 创建节点解析器
        node_parser = SimpleNodeParser.from_defaults(chunk_size=512, chunk_overlap=128)

        # 分割文档为节点
        base_nodes = node_parser.get_nodes_from_documents(documents)

        # 创建父节点和子节点
        sub_chunk_sizes = [256]
        sub_node_parsers = [
            SimpleNodeParser.from_defaults(chunk_size=c, chunk_overlap=64)
            for c in sub_chunk_sizes
        ]
        all_nodes = []
        for base_node in base_nodes:
            for i, n in enumerate(sub_node_parsers):
                if len(base_node.text) <= sub_chunk_sizes[i]:
                    continue
                sub_nodes = n.get_nodes_from_documents([base_node])
                sub_inodes = [
                    IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
                ]
                all_nodes.extend(sub_inodes)
            original_node = IndexNode.from_text_node(base_node, base_node.node_id)
            all_nodes.append(original_node)

        # 增加来自metadata中的前缀和embedding_prefix
        print(f"{len(all_nodes)} in total.")

        # 创建vector index
        index = VectorStoreIndex(nodes=all_nodes)

        # 持久化
        if index_path:
            if not os.path.isdir(index_path):
                os.makedirs(index_path)
            storage_context = index.storage_context
            storage_context.persist(index_path)

        return index

    def __call_llm(self, client, model_name, query, passages, max_passage_length=30000):
        contexts = "\n".join(passages)
        if len(contexts) > max_passage_length:
            contexts = contexts[:max_passage_length]

        prompts = f"""
请基于上下文来回答用户的问题。如果上下文的信息不足以回答问题，就说你不知道。
只输出回答即可。

问题：
{query}

可参考的上下文：
{contexts}
        """

        try:
            completion = client.chat.completions.create(
                model=model_name,  # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=[
                    {"role": "system", "content": "你是一个金融领域的AI助手。"},
                    {"role": "user", "content": f"{prompts}"},
                ],
                temperature=0,
                max_tokens=2048,
                top_p=0.8,
            )
            return completion.choices[0].message.content
        except Exception as e:
            print(f"错误信息：{e}")
            print(
                "请参考文档：https://help.aliyun.com/zh/model-studio/developer-reference/error-code"
            )

        return ""

    def __retrieve_passages(self, retriever, query, query_embedding_prefix):
        retrieved_results = retriever.retrieve(query_embedding_prefix + query)

        # 归并
        results = {}
        for result in retrieved_results:
            key = result.metadata["doc_id"]
            if key in results:
                results[key]["score"] = max(result.score, results[key]["score"])
            else:
                results[key] = {
                    "text": self.alltexts[int(key)],
                    "doc_id": key,
                    "score": result.score,
                }

        results = [(results[key]["score"], results[key]["text"]) for key in results]
        results.sort(reverse=True)

        print(f"{len(results)} passages have been retrieved.")

        # for i,result in enumerate(results):
        #    print(f'top-{i+1}: text=\"{result[1]}\", score={result[0]}')

        passages = [result[1] for result in results]

        return passages

    def as_rag_tool(self):
        return StructuredTool.from_function(
            func=self.query,
            name=self.rag_id + "_RagTool",
            description=f"根据{self.rag_id}招股知识库的内容，回答一个问题。考虑上下文信息，确保问题对相关概念的定义表述完整。",
        )

    def as_retrieve_tool(self):
        return StructuredTool.from_function(
            func=self.retrieve,
            name=self.rag_id + "_RetrieveTool",
            description=f"根据{self.rag_id}招股知识库的内容，搜索和用户查询相关的知识。",
        )
