from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter

from src.constance import SEARCH_ARTICLE_TEXT
from src.module.TongyiModel import TongyiModel


class Summary(TongyiModel):
    @staticmethod
    def load_file():
        return TextLoader(file_path=str(SEARCH_ARTICLE_TEXT), encoding='utf8').load()

    @property
    def chain(self):
        return load_summarize_chain(llm=self.model, chain_type='refine')

    def start(self):
        docs = self.load_file()
        splitters = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
        text = splitters.split_documents(docs)

        response = self.chain.run(text)
        print('response is:', response)
