# -*- coding:UTF-8 -*-
import json
import os
import re
from typing import Iterator, List

from langchain_community.document_loaders.base import BaseLoader
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
import tqdm

class ChatJSONLLoader(BaseLoader):


    def __init__(self,file_path:str,id_key:str,question_key:str,answer_key:str,encoding:str="utf-8",**kwargs):
        self.file_path=file_path
        self.id_key=id_key
        self.question_key=question_key
        self.answer_key=answer_key
        self.encoding=encoding


    text_splitter:RecursiveCharacterTextSplitter=None


    @classmethod
    def get_text_splitter(cls):
        '''实现懒加载，没有正真实现，只是形式实现'''
        if cls.text_splitter==None:
            print("没有 text_splitter")
            return RecursiveCharacterTextSplitter(
                chunk_size=450,
                chunk_overlap=45,
                length_function=len,
                is_separator_regex=False
            )

        else:
            print("有 text_splitter")
            return cls.text_splitter

    def load(self) -> List[Document]:
        documents=[]
        with open(self.file_path,'r',encoding=self.encoding) as fp:
            print("开始加载文件...")
            contents=fp.readlines()
            print("文件加载完成...")
            print("开始生成document")
            for content in tqdm.tqdm(contents):
                json_data=json.loads(content)
                id=json_data[self.id_key]
                question=json_data[self.question_key]
                answer=json_data[self.answer_key]
                question_split_list=ChatJSONLLoader.get_text_splitter().split_text(question)
                for question_split in question_split_list:
                    documents.append(Document(page_content=question_split,metadata={self.id_key:id}))
                answer_split_list = ChatJSONLLoader.get_text_splitter().split_text(answer)
                for answer_split in answer_split_list:
                    documents.append(Document(page_content=answer_split,metadata={self.id_key:id}))
        return documents


    def lazy_load(self) -> Iterator[Document]:
        """A lazy loader for Documents."""
        raise NotImplementedError(
            f"{self.__class__.__name__} does not implement lazy_load()"
        )

class LocalJSONLDataSetDatabase:
    """dir_path 如果不知到怎么设置的化最好为绝对路径 """
    def __init__(self,id_key:str,dir_path:str="data",encoding="utf-8"):
        if not os.path.isdir(dir_path):
            raise RuntimeError(f"dataset path {dir_path} is not a dir")
        file_path_list=[]
        for root,dirs,files in os.walk(dir_path):
            for file in files:
                # 数据文件只能是jsonl格式文件
                if re.match("^.*\.jsonl$",file):
                    file_path_list.append(os.path.join(root,file))
        self.store=dict()
        for file_path in file_path_list:
            with open(file_path,'r',encoding=encoding) as fp:
                contents=fp.readlines()
                for content in contents:
                    json_data=json.loads(content)
                    id=json_data[id_key]
                    self.store[id]=json_data

    def __len__(self):
        return len(self.store)

    def __getitem__(self, item):
        return self.store[item]