import os
import markdown
import PyPDF2
import re
from bs4 import BeautifulSoup
import tiktoken
from tqdm import tqdm

class FileLoader:
    def __init__(self, file_path, encoder=None):
        self.file_path = file_path
        self.support_suffixes = ['.md', '.txt', '.pdf']
        # 默认使用tiktoken的cl100k_base编码器
        # 这个编码器适用于大多数GPT模型
        # 如果需要其他编码器，可以在初始化时传入
        if encoder:
            self.encoder = encoder
        else:
            self.encoder = tiktoken.get_encoding("cl100k_base")

    def get_files(self):
        files = []
        for root, dirs, filenames in os.walk(self.file_path):
            for filename in filenames:
                if filename.endswith(tuple(self.support_suffixes)):
                    files.append(os.path.join(root, filename))
        return files

    @classmethod
    def get_file_content(cls, file_path):
        if file_path.endswith('.pdf'):
            return cls.read_pdf(file_path)
        elif file_path.endswith('.md'):
            return cls.read_markdown(file_path)
        elif file_path.endswith('.txt'):
            return cls.read_text(file_path)
        else:
            raise ValueError("Unsupported file type")
    
    @classmethod
    def read_pdf(cls, file_path: str):
        # 读取PDF文件
        with open(file_path, 'rb') as file:
            reader = PyPDF2.PdfReader(file)
            text = ""
            for page_num in range(len(reader.pages)):
                text += reader.pages[page_num].extract_text()
            return text
    
    @classmethod
    def read_markdown(cls, file_path: str):
        # 读取Markdown文件
        with open(file_path, 'r', encoding='utf-8') as file:
            md_text = file.read()
            html_text = markdown.markdown(md_text)
            # 使用BeautifulSoup从HTML中提取纯文本
            soup = BeautifulSoup(html_text, 'html.parser')
            plain_text = soup.get_text()
            # 使用正则表达式移除网址链接
            text = re.sub(r'http\S+', '', plain_text) 
            return text
    
    @classmethod
    def read_text(cls, file_path: str):
        # 读取文本文件
        with open(file_path, 'r', encoding='utf-8') as file:
            return file.read()
    
    def get_chunk(self, text: str, chunk_size: int = 100, overlap_size: int = 10) -> list:
        """
        Splits text into chunks of approximately chunk_size tokens with overlap_size tokens of overlap.
        
        Args:
            text (str): Input text to be chunked.
            chunk_size (int): Maximum size of each chunk in tokens (default: 30).
            overlap_size (int): Number of tokens to overlap between chunks (default: 10).
        
        Returns:
            list: List of text chunks.
        """
        if not text:
            return []
            
        chunk_text = []
        curr_chunk = ""
        curr_size = 0
        lines = text.split('\n')
        
        for line in lines:
            line = line.strip()
            if not line:  # Skip empty lines
                continue
                
            line_tokens = self.encoder.encode(line)
            line_size = len(line_tokens)
            
            # Handle lines longer than chunk_size
            while line_size > chunk_size:
                part_tokens = line_tokens[:chunk_size]
                part = self.encoder.decode(part_tokens)
                chunk_text.append(part)
                line_tokens = line_tokens[chunk_size - overlap_size:]  # Include overlap for next chunk
                line = self.encoder.decode(line_tokens)
                line_size = len(line_tokens)
            
            # Process remaining line or short lines
            if curr_size + line_size <= chunk_size:
                if curr_chunk:
                    curr_chunk += '\n' + line
                    curr_size += line_size + 1  # +1 for newline
                else:
                    curr_chunk = line
                    curr_size = line_size
            else:
                # Save current chunk if it exists
                if curr_chunk:
                    chunk_text.append(curr_chunk)
                
                # Start new chunk with overlap from previous chunk
                if chunk_text and overlap_size > 0:
                    prev_chunk = chunk_text[-1]
                    overlap_tokens = self.encoder.encode(prev_chunk)[-overlap_size:]
                    overlap_part = self.encoder.decode(overlap_tokens)
                    curr_chunk = overlap_part + ('\n' + line if line else '')
                    curr_size = len(self.encoder.encode(curr_chunk))
                else:
                    curr_chunk = line
                    curr_size = line_size
        
        # Append the last chunk if it exists
        if curr_chunk:
            chunk_text.append(curr_chunk)
        
        return chunk_text

if __name__ == "__main__":
    # 示例用法
    file_loader = FileLoader("C:\JaredLyu\Project\MiniMind\\raw_data")
    files = file_loader.get_files()
    for file in files:
        content = file_loader.get_file_content(file)
        chunks = file_loader.get_chunk(content)
        print(f"File: {file}, Chunks: {len(chunks)}")
        for i, chunk in enumerate(chunks):
            print(f"Chunk {i+1}: {chunk}")  # 打印每个块的前100个字符