from typing import List
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field


# Output parser will split the LLM result into a list of queries
class LineList(BaseModel):
    # "lines" is the key (attribute name) of the parsed output
    lines: List[str] = Field(description="Lines of text")


class LineListOutputParser(PydanticOutputParser):
    def __init__(self) -> None:
        super().__init__(pydantic_object=LineList)

    def parse(self, text: str) -> LineList:
        lines = text.strip().split("\\n\\n")
        return LineList(lines=lines)

from chatglm_api import ChatGLM

llm = ChatGLM()

output_parser = LineListOutputParser()

template = """{city1}和{city2}两座城市有什么不同？"""
prompt = PromptTemplate(template=template, input_variables=["city1","city2"])

llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)

#question = "北京和上海两座城市有什么不同？"

#print(llm_chain.run(city1="北京", city2="上海"))


#exit()

from langchain.chains.summarize import load_summarize_chain
#from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import WebBaseLoader

loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
docs = loader.load()



#from langchain.document_loaders import TextLoader

#loader = TextLoader("imput_example_14.txt")

#from langchain.document_loaders import DirectoryLoader
#loader = DirectoryLoader('../', glob="**/*.md")
#docs = loader.load()

from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(
    separator = "\n\n",
    chunk_size = 1000,
    chunk_overlap  = 200,
    length_function = len,
    is_separator_regex = False,
)

from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
    # Set a really small chunk size, just to show.
    chunk_size = 1000,
    chunk_overlap  = 20,
    length_function = len,
    is_separator_regex = False,
)

texts = text_splitter.split_documents(docs)
#print(texts)

chain = load_summarize_chain(llm, chain_type="refine")

print(docs)
print(chain.run(texts))
