import re
import pandas as pd
from pathlib import Path
from typing import List, Tuple, Set, Dict


class ChapterWordAnalyzer:
    def __init__(self,
                 chapters: List[Tuple[str, str]],
                 exclude_dir: str = None):
        self.chapters = chapters
        self.word_pattern = re.compile(r"\b[a-zA-Z']+\b")  # 匹配带撇号的单词
        self.exclude_lower = self._load_exclude_words(exclude_dir) if exclude_dir else set()

    def _load_exclude_words(self, directory: str) -> Set[str]:
        """加载排除词（统一转为小写）"""
        word_set = set()
        try:
            for file_path in Path(directory).glob("*.xlsx"):
                try:
                    with pd.ExcelFile(file_path) as xls:
                        for sheet_name in xls.sheet_names:
                            try:
                                df = pd.read_excel(
                                    xls,
                                    sheet_name=sheet_name,
                                    usecols=[0],
                                    header=None,
                                    names=["word"]
                                )
                                words = (
                                    df["word"]
                                    .dropna()
                                    .astype(str)
                                    .str.strip()
                                    .str.lower()
                                    .replace(r'^\s*$', pd.NA, regex=True)
                                    .dropna()
                                )
                                word_set.update(words)
                            except Exception as e:
                                print(f"文件 {file_path.name} 工作表 {sheet_name} 读取失败: {str(e)}")
                except Exception as e:
                    print(f"无法打开文件 {file_path.name}: {str(e)}")
        except FileNotFoundError:
            print(f"警告：排除词目录 {directory} 不存在")
        return word_set

    def _generate_unique_sheet_names(self, base_name: str, existing_names: Dict[str, int]) -> str:
        """生成唯一的工作表名称"""
        clean_name = re.sub(r'[\\/*?:[\]]', '_', base_name)
        if clean_name and clean_name[0].isdigit():
            clean_name = "_" + clean_name
        clean_name = clean_name.strip()[:28]

        if clean_name not in existing_names:
            existing_names[clean_name] = 1
            return clean_name[:31]

        counter = existing_names[clean_name] + 1
        while True:
            suffix = f"_{counter}"
            new_name = f"{clean_name[:28]}{suffix}"[:31]
            if new_name not in existing_names:
                existing_names[clean_name] = counter
                existing_names[new_name] = 0
                return new_name
            counter += 1

    def _process_text(self, text: str) -> pd.DataFrame:
        """处理文本并拆分带撇号的单词"""
        sentences = re.split(r'(?<=[.!?])\s+', text)
        word_records = {}
        order_tracker = []

        for sentence in sentences:
            raw_words = self.word_pattern.findall(sentence)
            for word in raw_words:
                # 拆分带撇号的单词
                parts = [part for part in word.split("'") if len(part) > 2]

                for part in parts:
                    # 排除词检查（不区分大小写）
                    if part.lower() in self.exclude_lower:
                        continue
                    # 记录原始形式
                    if part not in word_records:
                        word_records[part] = {
                            'count': 1,
                            'example': sentence.strip(),
                            'first_seen': len(order_tracker)
                        }
                        order_tracker.append(part)
                    else:
                        word_records[part]['count'] += 1

        if not word_records:
            return pd.DataFrame()

        df = pd.DataFrame.from_dict(word_records, orient='index').reset_index()
        df.columns = ['Word', 'Count', 'Example', 'First_Seen']
        return df.sort_values(by='First_Seen').drop(columns=['First_Seen'])

    def export_to_excel(self, filename: str):
        """导出结果到Excel"""
        with pd.ExcelWriter(filename, engine='openpyxl') as writer:
            sheet_counter = {}

            for idx, (title, content) in enumerate(self.chapters):
                df = self._process_text(content)
                if df.empty:
                    print(f"跳过空章节: {title}")
                    continue

                base_name = re.sub(r'[\\/*?:[\]]', '_', title).strip()[:28]
                sheet_name = self._generate_unique_sheet_names(base_name, sheet_counter)

                df.to_excel(
                    writer,
                    sheet_name=sheet_name,
                    index=False,
                    header=False
                )


# 测试用例
if __name__ == "__main__":
    test_chapters = [
        ("Apostrophe Test",
         "Couldn't open the file. I'll retry. They're working on Don't problem."),
        ("Short Words",
         "A I am OK. He's in the lab. It's a test.")
    ]

    analyzer = ChapterWordAnalyzer(
        chapters=test_chapters,
        exclude_dir="exclude_words"  # 假设排除词包含 "the"
    )
    analyzer.export_to_excel("apostrophe_processed.xlsx")