# -*- coding:UTF-8 -*-
import json
import os
import re
import tqdm
import importlib
import pathlib
from typing import Iterator, List,Dict
from langchain_community.document_loaders.base import BaseLoader
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter


'''
文本文件加载器
'''


class FileLoader(BaseLoader):

    LOADER_DIC={
        ".doc":"UnstructuredWordDocumentLoader",
        ".docx":"UnstructuredWordDocumentLoader",
        ".pdf":"PyPDFLoader",
        '.md':"UnstructuredMarkdownLoader"
    }

    # 默认使用递归文本分割器，后期可以改进，如加载不同的文件可以用不同的分割器
    text_splitter:RecursiveCharacterTextSplitter=None

    file_loader:BaseLoader=None

    def __init__(self,file_path:str,encoding:str="utf-8",**kwargs):
        self.file_path=file_path
        self.encoding=encoding



    # 形式上的懒加载，需要改进
    def get_file_loader(self)->BaseLoader:
        if self.file_loader is None:
            loader_kwargs={}
            file_path=pathlib.Path(self.file_path)
            extension=file_path.suffix
            loader_name=self.LOADER_DIC[extension]
            if loader_name==None:
                raise Exception(f"not find loader for {self.file_path}")
            try:
                document_loaders_module=importlib.import_module("langchain_community.document_loaders")
                DocumentLoader=getattr(document_loaders_module,loader_name)
            except Exception as e:
                print("文件加载器获取异常:",str(e))
                document_loaders_module = importlib.import_module("langchain_community.document_loaders")
                DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader")   
            if loader_name=="UnstructuredFileLoader":
                loader_kwargs.setdefault("autodetect_encoding", True)
            file_loader = DocumentLoader(self.file_path, **loader_kwargs)
        return file_loader
    


    @classmethod
    def get_text_splitter(cls):
        '''实现懒加载，没有正真实现，只是形式实现'''
        if cls.text_splitter==None:
            print("没有 text_splitter")
            return RecursiveCharacterTextSplitter(
                chunk_size=450,
                chunk_overlap=45,
                length_function=len,
                is_separator_regex=False
            )

        else:
            print("有 text_splitter")
            return cls.text_splitter

    def load(self) -> List[Document]:
        file_loader=self.get_file_loader()
        file_docs=file_loader.load()
        text_splitter=FileLoader.get_text_splitter()
        documents=text_splitter.split_documents(file_docs)
        for document in documents:
            document.metadata["file_name"]=pathlib.Path(self.file_path).name
        return documents


    def lazy_load(self) -> Iterator[Document]:
        """A lazy loader for Documents."""
        raise NotImplementedError(
            f"{self.__class__.__name__} does not implement lazy_load()"
        )
    


class FileDataSetDatabase:
    def __init__(self,store_path:str,encoding="utf-8"):
        path=pathlib.Path(store_path)
        self.store_path=store_path
        self.encoding=encoding
        self.store:Dict[str,List]=dict()
        if not os.path.exists(path.parent):
            os.makedirs(path.parent)
        if not os.path.exists(self.store_path):
             with open(self.store_path,'w',encoding=self.encoding) as fp:
                 pass
        self._load_store()

    def _load_store(self):
        with open(self.store_path,'r',encoding=self.encoding) as fp:
            content=fp.read()
            if content=='' or content is None:
                self.store=dict()
            else:
                self.store=json.loads(content)

    def _add_element(self):
        for key in self.store:
            self.store[key]=list(set(self.store[key]))
        with open(self.store_path,'w',encoding=self.encoding) as fp:
            # print("当前缓存:",self.store)
            fp.write(json.dumps(self.store,ensure_ascii=False,indent=4))

    def remove(self,key):
        self.store.pop(key)
        with open(self.store_path,'w',encoding=self.encoding) as fp:
                # print("当前缓存:",self.store)
                fp.write(json.dumps(self.store,ensure_ascii=False,indent=4))

    def __len__(self):
        return len(self.store)

    def __getitem__(self, item):
        return self.store.get(item)
    
    
    def __setitem__(self,key,value) -> None:
        if not self.store.get(key):
            self.store.setdefault(key,list())
        if isinstance (value,(tuple,List)):
            self.store.get(key).extend(value)
        else:
            self.store.get(key).append(value)
        self._add_element()
    
    