import os
import PyPDF2
import tiktoken
from common.document_utils import DocxReader, DocReader

# 用于数据切分时，判断字块的token长度，速度比较快
enc = tiktoken.get_encoding("cl100k_base")

class ReadFileFolder():

    def __init__(self, folder_path):
        self.folder_path = folder_path
        self.load_conf()
        self.file_dict = {}

    def load_conf(self):
        """加载配置文件"""
        self.ext_list = ["md", "txt", "pdf", "docx", "doc"]
        self.ext_dict = {
            "md": self.read_md_content,
            "pdf": self.read_pdf_content,
            "txt": self.read_txt_content,
            "docx": self.read_docx_content,
            "doc": self.read_doc_content
        }
        
    def readlist(self):
        file_list = []
        file_dict = {}
        for file_path, dir_names, file_names in os.walk(self.folder_path):
            for file_name in file_names:
                ext_name = file_name.split(".")[-1]
                if ext_name in self.ext_list:
                    try:
                        file_dict[ext_name].append(os.path.join(file_path, file_name))
                    except KeyError:
                        file_dict[ext_name] = [os.path.join(file_path, file_name)]
                    file_list.append(os.path.join(file_path, file_name))
        self.file_dict = file_dict
        return file_dict
    
    def read_file_content(self, ext, file_list):
        file_obj = self.ext_dict[ext] if ext in self.ext_dict.keys() else None
        if not file_obj:
            raise Exception("没有此文件处理的方法:{}".format(ext))
        docs = []
        for file_path in file_list:
            docs.append(file_obj(file_path))
        return docs

    def read_md_content(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()

    def read_pdf_content(self, file_path):
        text=""
        with open(file_path, 'rb') as f:
            reader=PyPDF2.PdfReader(f)
            for num_page in range(len(reader.pages)):
                text+=reader.pages[num_page].extract_text()
        return text
    
    def read_txt_content(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    
    def read_doc_content(self, file_path):
        """读取doc的文本内容"""
        doc = DocReader(file_path)
        return doc.load()
        
    def read_docx_content(self, file_path):
        """读取docx的文本内容"""
        docx = DocxReader(file_path)
        return docx.load()

    def get_all_chunk_content(self, max_len:int=600, cover_len:int=150):
        docs = []
        for ext, file_list in self.readlist().items():
            doc_list = self.read_file_content(ext, file_list)
            for content in doc_list:
                chunk_content = self.chunk_content(content, max_len, cover_len)
                docs.extend(chunk_content)
        return docs


    def chunk_content(cls, text: str, max_token_len: int = 600, cover_content: int = 150):
        chunk_text = []
        curr_len = 0
        curr_chunk = ''
        print("=================")
        print(text)
        lines = text.split('\n')
        for line in lines:
            line = line.replace(' ', '')
            line_len = len(enc.encode(line))
            if curr_len + line_len <= max_token_len:
                curr_chunk += line
                curr_chunk += '\n'
                curr_len += line_len
                curr_len += 1
            else:
                chunk_text.append(curr_chunk)
                curr_chunk = curr_chunk[-cover_content:]+line
                curr_len = line_len + cover_content
        if curr_chunk:
            chunk_text.append(curr_chunk)
        return chunk_text


if __name__ == "__main__":
    # from components.data_utils import ReadFileFolder
    # fobj = ReadFileFolder("./dataset")
    # fobj.readlist()
    # fobj.get_all_chunk_content()
    fobj = ReadFileFolder("../dataset")
    file_list = fobj.readlist()
    print(file_list)
    fobj.get_all_chunk_content()