import os
import re
from typing import List, Dict
import PyPDF2
import docx
import pandas as pd
from tqdm import tqdm


class DocumentProcessor:
    @staticmethod
    def read_file(file_path: str) -> str:
        """读取单个文件内容"""
        ext = os.path.splitext(file_path)[1].lower()
        try:
            if ext == '.pdf':
                with open(file_path, 'rb') as f:
                    reader = PyPDF2.PdfReader(f)
                    return "\n".join([page.extract_text() for page in reader.pages])
            elif ext == '.docx':
                doc = docx.Document(file_path)
                return "\n".join([para.text for para in doc.paragraphs])
            elif ext == '.txt':
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
            elif ext == '.csv':
                df = pd.read_csv(file_path)
                return "\n".join(df.apply(lambda row: str(row.to_dict()), axis=1))
            else:
                raise ValueError(f"不支持的格式: {ext}")
        except Exception as e:
            print(f"读取文件 {file_path} 失败: {str(e)}")
            return ""

    @staticmethod
    def clean_text(text: str) -> str:
        """清理文本"""
        text = re.sub(r'\s+', ' ', text)  # 合并空白字符
        text = re.sub(r'[^\w\s\u4e00-\u9fff]', ' ', text)  # 保留中英文和基本符号
        return text.strip()

    @staticmethod
    def chunk_text(text: str, max_chars: int = 15000) -> List[str]:
        """更严格的分块逻辑"""
        if len(text) <= max_chars:
            return [text]

        # 按句子分割保持语义
        sentences = re.split(r'(?<=[。！？.?!])\s+', text)
        chunks = []
        current_chunk = []
        current_length = 0

        for sent in sentences:
            sent = sent.strip()
            if not sent:
                continue

            if current_length + len(sent) + 1 > max_chars:
                if current_chunk:
                    chunks.append(" ".join(current_chunk))
                    current_chunk = []
                    current_length = 0
                # 处理超长单句
                if len(sent) > max_chars:
                    chunks.extend([sent[i:i + max_chars] for i in range(0, len(sent), max_chars)])
                    continue

            current_chunk.append(sent)
            current_length += len(sent) + 1

        if current_chunk:
            chunks.append(" ".join(current_chunk))

        return chunks

    @staticmethod
    def process_folder(folder_path: str, max_chars: int = 15000) -> List[Dict[str, str]]:
        """处理整个文件夹的文档"""
        documents = []

        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
            return documents

        for filename in tqdm(os.listdir(folder_path), desc="处理文档"):
            if filename.startswith('.'):  # 跳过隐藏文件
                continue

            file_path = os.path.join(folder_path, filename)
            if os.path.isfile(file_path):
                content = DocumentProcessor.read_file(file_path)
                if content:
                    cleaned = DocumentProcessor.clean_text(content)
                    chunks = DocumentProcessor.chunk_text(cleaned, max_chars)
                    for i, chunk in enumerate(chunks):
                        documents.append({
                            "content": chunk,
                            "source": f"{os.path.splitext(filename)[0]}_part{i + 1}"
                        })
        return documents