# -*- coding: utf-8 -*-
from ebooklib import epub
from ebooklib.epub import EpubHtml
from bs4 import BeautifulSoup
import re
import pandas as pd
import os
import sys
from datetime import datetime
import nltk
from nltk.stem import WordNetLemmatizer

# ████████████████████████████████████████████
# NLTK数据路径配置（根据实际路径修改）
NLTK_DATA_PATH = r"D:\dev\py\dict\venv\nltk_data"
nltk.data.path.append(NLTK_DATA_PATH)

# 初始化词形还原器
try:
    lemmatizer = WordNetLemmatizer()
except LookupError:
    print(f"❌ 词形还原数据加载失败，请确认：{NLTK_DATA_PATH}/corpora/wordnet")
    sys.exit(1)

# 配置参数
EPUB_PATH = "books/sample.epub"
EXCEL_PATH = "known_words.xlsx"
OUTPUT_PATH = "vocab_by_chapter.xlsx"
MIN_WORD_LEN = 4

# 自定义英文停用词表（可根据需要扩展）
CUSTOM_STOPWORDS = {
    'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',
    'your', 'yours', 'he', 'him', 'his', 'she', 'her', 'hers', 'it', 'its',
    'they', 'them', 'their', 'what', 'which', 'who', 'this', 'that', 'am',
    'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had',
    'do', 'does', 'did', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because',
    'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'between'
}

# 进度打印函数
def log_progress(message, level=1):
    timestamp = datetime.now().strftime("%H:%M:%S")
    prefix = "  " * (level-1) + "▶" if level > 1 else "▶"
    print(f"[{timestamp}] {prefix} {message}")

def load_known_words():
    """加载已知词表（Excel的A列）"""
    try:
        log_progress(f"正在加载已知词表 {EXCEL_PATH}...")
        df = pd.read_excel(
            EXCEL_PATH,
            sheet_name='ALL',
            usecols="A",
            header=None,
            names=['word'],
            engine='openpyxl'
        )
        log_progress(f"成功加载 {len(df)} 个已知单词")
        return set(df['word'].astype(str).str.lower().dropna().unique())
    except Exception as e:
        log_progress(f"词表加载异常: {str(e)}", level=2)
        return set()

def lemmatize_word(word):
    """词形还原核心函数"""
    # 先尝试动词还原（处理时态变化）
    lemma = lemmatizer.lemmatize(word, pos='v')
    # 如果无变化则尝试名词还原（处理复数）
    if lemma == word:
        lemma = lemmatizer.lemmatize(word, pos='n')
    return lemma.lower()

def extract_chapters(book):
    """提取章节内容"""
    log_progress("开始解析电子书章节结构...")
    chapters = []
    for item in book.get_items():
        if isinstance(item, EpubHtml):
            soup = BeautifulSoup(item.get_content(), 'html.parser')

            # 提取章节标题
            title = "未命名章节"
            for tag in ['h1', 'h2', 'h3', 'div.chapter-title']:
                if soup.select(tag):
                    title = soup.select(tag)[0].get_text().strip()
                    break

            # 清理内容
            for elem in soup(['script', 'style', 'nav', 'footer']):
                elem.decompose()

            text = ' '.join([p.get_text() for p in soup.find_all(['p', 'li'])])
            chapters.append( (title, text) )

    log_progress(f"共发现 {len(chapters)} 个章节")
    return chapters

def process_book():
    try:
        # 初始化
        start_time = datetime.now()
        log_progress(f"开始处理电子书: {os.path.basename(EPUB_PATH)}")
        log_progress(f"NLTK数据路径: {NLTK_DATA_PATH}", level=2)

        # 加载数据
        known_words = load_known_words()
        book = epub.read_epub(EPUB_PATH)
        chapters = extract_chapters(book)

        # 准备输出
        writer = pd.ExcelWriter(OUTPUT_PATH, engine='openpyxl')
        valid_sheet_count = 0

        for idx, (chap_title, text) in enumerate(chapters, 1):
            # 进度提示
            log_progress(f"正在处理第 {idx}/{len(chapters)} 章: {chap_title[:30]}...", level=2)

            # 分词处理（使用NLTK增强版）
            words = re.findall(r"\b[a-zA-Z]+(?:-[a-zA-Z]+)*\b", text.lower())

            # 词形还原+过滤
            filtered_words = []
            seen_lemmas = set()  # 防止重复

            for word in words:
                # 词形还原
                lemma = lemmatize_word(word)

                # 过滤条件
                if (len(lemma) >= MIN_WORD_LEN) and \
                        (lemma not in CUSTOM_STOPWORDS) and \
                        (lemma not in known_words) and \
                        (lemma not in seen_lemmas):

                    filtered_words.append(lemma)
                    seen_lemmas.add(lemma)

            # 跳过无生词的章节
            if not filtered_words:
                log_progress("本章无新生词，跳过保存", level=3)
                continue

            # 统计词频
            word_counts = pd.Series(filtered_words).value_counts().reset_index()
            word_counts.columns = ['单词', '出现次数']

            # 提取例句（显示原始形态）
            examples = {}
            for word in word_counts['单词']:
                # 查找所有原始形态的例句
                raw_matches = re.findall(rf'(\b{word}\b.*?[.!?])', text, re.I) or \
                              re.findall(rf'(\b{word}\w*\b.*?[.!?])', text, re.I)
                examples[word] = raw_matches[0].strip() if raw_matches else "无上下文"

            # 生成表格
            df = word_counts.merge(
                pd.DataFrame(examples.items(), columns=['单词', '例句']),
                on='单词'
            ).sort_values('出现次数', ascending=False)

            # 处理Sheet名称
            sheet_name = re.sub(r'[\\/*?:[\]]', '', chap_title)[:30].strip()
            if not sheet_name:
                sheet_name = f"Chapter_{idx}"

            # 保存章节
            df.to_excel(writer, sheet_name=sheet_name, index=False)
            valid_sheet_count += 1
            log_progress(f"保存词形归一化数据：{sheet_name}（{len(df)}个词根）", level=3)

        # 处理结果
        writer.close()
        time_cost = (datetime.now() - start_time).total_seconds()

        log_progress(f"处理完成！有效章节 {valid_sheet_count} 个，总耗时 {time_cost:.1f} 秒")
        log_progress(f"结果文件：{os.path.abspath(OUTPUT_PATH)}")

    except Exception as e:
        log_progress(f"处理失败: {str(e)}", level=3)
        sys.exit(1)

if __name__ == '__main__':
    # 隐藏第三方库警告
    import warnings
    warnings.filterwarnings("ignore", category=UserWarning)
    warnings.filterwarnings("ignore", category=FutureWarning)

    process_book()