from typing import Any, Optional, Type, Iterator
from langchain_core.outputs import GenerationChunk
from langchain.llms.base import LLM
from openai import OpenAI
import os

base_url = 'http://localhost:8000/v1/'


class GLM_LLMS(LLM):
    # client: ZhipuAiClient = ZhipuAiClient(api_key="d0fc2026b50344b18e25187d9393ce3f.P2XsXy1lpeqc2Gl0")
    client: Any = OpenAI(api_key='EMPTY', base_url=base_url, timeout=120)
    # loacl_model: str = "GLM-4-Flash-250414"
    loacl_model: str = "LOCAL_GLM-3-6B"

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "LOCAL_GLM-3-6B"

    def _call(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> str:
        messages = [{"role": "user", "content": prompt}]
        response = self.client.chat.completions.create(
            model=self.loacl_model,
            messages=messages,
        )
        # print("_call ====response=====> ", response)
        return response.choices[0].message.content

    def _stream(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> Iterator:
        messages = [
            {"role": "system", "content": f"你可以参考代理返回的数据进行最后的回答。 代理返回如下:{prompt}"},
            {"role": "user", "content": prompt}
        ]
        # 获取最终回答
        response = self.client.chat.completions.create(
            model=self.loacl_model,
            messages=messages,
            stream=True
        )
        append_text = ""
        for chunk in response:
            if chunk.choices[0].delta.content:
                append_text += chunk.choices[0].delta.content
                yield GenerationChunk(text=chunk.choices[0].delta.content)


from langchain_community.document_loaders import DirectoryLoader, TextLoader

load = TextLoader(r"F:\A_wokecode\gradio_study\langchain_study\html\Material.html", encoding='UTF-8')
doc = load.load()

from langchain_community.document_transformers import BeautifulSoupTransformer

bf = BeautifulSoupTransformer()
doc_transforms = bf.transform_documents(
    documents=doc,
    tags_to_extract=['h1', 'h2', 'h3', 'p'],
    # tags_to_extract=['h1'],
)

from langchain.prompts import ChatPromptTemplate, PromptTemplate

# prompt = PromptTemplate(
#     template="""
#     请仔细阅读下面的这个文档
#     #####
#     {doc}
#     #####
#     分析这个文档中包含哪些类型，属性、方法、知识点、罗列其中所涉及到的属性、方法与知识点，请不要有任何的遗漏，以JSON的格式返回，
#     例如
#     #####
#     [{{ "title":属性类型、名称、方法、知识点名称，"description":"简单的描述"}}, ...]
#     #####
#     """,
#     input_variables=["doc"]
# )

# from langchain.text_splitter import CharacterTextSplitter
#
# splitter = CharacterTextSplitter(
#     chunk_size=500,
#     chunk_overlap=100,
#     add_start_index=True
# )

# sp_text1 = splitter.split_documents(doc_transforms)

# 分割舱内瓯让
from langchain.text_splitter import RecursiveCharacterTextSplitter

splitter2 = RecursiveCharacterTextSplitter(
    chunk_size=1000,
    chunk_overlap=100,
    add_start_index=True
)

sp_text2 = splitter2.split_documents(doc_transforms)

from pydantic import BaseModel, Field


class Know(BaseModel):
    title: str = Field(description="属性类型、名称、方法、知识点名称")
    description: str = Field(description="简单的描述")
    type: str = Field(description="数据的类型，例如类型、名称、方法、知识点")


from langchain_core.output_parsers import JsonOutputParser

json_output_parser = JsonOutputParser(pydantic_object=Know)

llm = GLM_LLMS()


def prompt_func(llm):
    template = """
        请仔细阅读下面的这个文档
        #####
        {doc}
        #####
        分析这个文档中包含哪些类型，属性、方法、知识点、罗列其中所涉及到的属性、方法与知识点，请不要有任何的遗漏，以JSON的格式返回，
        例如
        #####
        [{{ "title":属性类型、名称、方法、知识点名称，"description":"简单的描述", "type":"数据的类型，例如类型、名称、方法、知识点"}}, ...]
        #####
        """
    chat_prompt = ChatPromptTemplate.from_template(template)
    chain = chat_prompt | llm
    # print("prompt.invoke", prompt.invoke({"doc": "111111"}))
    # print("chat_prompt.invoke", chat_prompt.invoke({"doc": "111111"}))
    res = chain.invoke({"doc": sp_text2[0]})
    # print(res)
    return res


def point_putout(llm, json_parse_one):
    point_template = """
    如果学习three.js的Material的{point_type}
    #####
    {point_description}
    #####
    这个{point_title}的内容我们应该如何学习，进行详细的介绍写一篇文章
    """
    point_template = ChatPromptTemplate.from_template(point_template)
    point_chain = point_template | llm
    return point_chain.invoke({"point_title": json_parse_one["title"],
                               "point_description": json_parse_one["description"],
                               "point_type": json_parse_one["type"]})


from langchain_huggingface import HuggingFaceEmbeddings

embeddings_path = r'M:\moudels\BAAIbge-large-zh-v1.5'
embeddings = HuggingFaceEmbeddings(model_name=embeddings_path)

from langchain_community.vectorstores import FAISS

vectors = FAISS.from_documents(sp_text2, embedding=embeddings)
retriever = vectors.as_retriever()


def check_know(llm, json_parse_one, ai_des):
    like_know = """
        这是three.js的Material的{point_title}的相关知识。
        ######
        {ai_des}
        #####
        下面是官网文档的片段
        #####
        {doc}
        #####
        结合官网文档的片段，检查内容是否正确或者是否有遗漏。如果有错误请更正，有遗漏请补充，然后输出
    """
    check_chat = ChatPromptTemplate.from_template(like_know)
    check_chain = check_chat | llm
    return check_chain.invoke(
        {
            "point_title": json_parse_one["title"],
            "doc": retriever.invoke(json_parse_one["description"]),
            "ai_des": ai_des
        }
    )


if __name__ == '__main__':
    pass
    # result = prompt_func(llm)
    # json_parse = json_output_parser.invoke(result)
    # point_putout_res = point_putout(llm, json_parse[2])
    # res = check_know(llm, json_parse[2], point_putout_res)
    res = llm.invoke("你好")
    print(res)
