# -*- coding: utf-8 -*-
from ebooklib import epub
from ebooklib.epub import EpubHtml
from bs4 import BeautifulSoup
import re
import pandas as pd
import os
import sys
from datetime import datetime
import nltk
from nltk.stem import WordNetLemmatizer, wordnet
from nltk import pos_tag

# ================== 配置区域 ==================
NLTK_DATA_PATH = r"D:\dev\py\dict\venv\nltk_data"  # NLTK数据路径
EPUB_PATH = "books/sample.epub"  # 电子书路径
EXCEL_PATH = "known_words.xlsx"  # 已知词表路径
OUTPUT_PATH = "vocab_by_chapter.xlsx"  # 输出文件路径
MIN_WORD_LEN = 4  # 最小单词长度
MAX_WORD_LEN = 20  # 最大单词长度
# =============================================

# 初始化NLTK路径
nltk.data.path.append(NLTK_DATA_PATH)

# 词形还原配置
try:
    lemmatizer = WordNetLemmatizer()
except LookupError:
    print(f"❌ 词形还原数据加载失败，请确认：{NLTK_DATA_PATH}/corpora/wordnet")
    sys.exit(1)

# 自定义配置
IRREGULAR_VERBS = {  # 不规则动词映射
    'came': 'come', 'come': 'come', 'called': 'call', 'calling': 'call',
    'got': 'get', 'gotten': 'get', 'spent': 'spend', 'wheels': 'wheel',
    'getting': 'get', 'witnessed': 'witness', 'leaned': 'lean',
    'listened': 'listen', 'breathed': 'breathe', 'wrapped': 'wrap'
}

SUFFIX_FILTER = {  # 需要排除的后缀
    'ing', 'ed', 'es', 's', 'n\'t', '\'ll', '\'re', '\'ve', 'er', 'est'
}

CUSTOM_STOPWORDS = {  # 自定义停用词
    'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',
    'your', 'yours', 'he', 'him', 'his', 'she', 'her', 'hers', 'it', 'its',
    'they', 'them', 'their', 'what', 'which', 'who', 'this', 'that', 'am',
    'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had',
    'do', 'does', 'did', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because',
    'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'between'
}

SPECIAL_ALLOW = {  # 特殊允许词
    "o'clock": 'clock'
}

# ================== 功能函数 ==================
def log_progress(message, level=1):
    """分级日志输出"""
    timestamp = datetime.now().strftime("%H:%M:%S")
    prefix = "  " * (level-1) + "▶" if level > 1 else "▶"
    print(f"[{timestamp}] {prefix} {message}")

def load_known_words():
    """加载已知词表"""
    try:
        log_progress(f"加载已知词表 {EXCEL_PATH}...")
        df = pd.read_excel(EXCEL_PATH, sheet_name='ALL', usecols="A",
                           header=None, names=['word'], engine='openpyxl')
        known = set(df['word'].astype(str).str.lower().dropna().unique())
        log_progress(f"已加载 {len(known)} 个已知单词")
        return known
    except Exception as e:
        log_progress(f"词表加载失败: {str(e)}", 2)
        return set()

def enhanced_lemmatize(word):
    """增强型词形还原"""
    word = word.lower()

    # 处理特殊允许词
    if word in SPECIAL_ALLOW:
        return SPECIAL_ALLOW[word]

    # 排除所有带'的词汇（特殊允许词除外）
    if "'" in word:
        return None

    # 处理连字符复合词
    if '-' in word:
        parts = word.split('-')
        if parts[0] in IRREGULAR_VERBS:
            return IRREGULAR_VERBS[parts[0]]
        word = parts[0]

    # 处理自定义不规则动词
    if word in IRREGULAR_VERBS:
        return IRREGULAR_VERBS[word]

    # 标准词性标注处理
    try:
        pos = pos_tag([word])[0][1][0].lower()
        pos = 'v' if pos in ['v', 'n'] else 'n'
        lemma = lemmatizer.lemmatize(word, pos=pos)
    except:
        lemma = word

    # 处理常见后缀
    for suffix in sorted(SUFFIX_FILTER, key=len, reverse=True):
        if lemma.endswith(suffix):
            lemma = lemma[:-len(suffix)]
            break
    return lemma

def extract_chapters(book):
    """解析电子书章节"""
    log_progress("解析章节结构中...")
    chapters = []
    for item in book.get_items():
        if isinstance(item, EpubHtml):
            soup = BeautifulSoup(item.get_content(), 'html.parser')

            # 修复点：正确的标题提取逻辑
            title = "未命名章节"
            # 按优先级检查不同标题标签
            for selector in ['h1', 'h2', 'h3', 'div.chapter-title']:
                tags = soup.select(selector)
                if tags:
                    title = tags[0].get_text().strip()
                    break

            # 清理内容
            for elem in soup(['script', 'style', 'nav', 'footer']):
                elem.decompose()

            text = ' '.join(p.get_text() for p in soup.find_all(['p', 'li']))
            chapters.append((title, text))
    log_progress(f"找到 {len(chapters)} 个章节")
    return chapters

# 加载NLTK不规则动词词典
def load_irregular_verbs():
    """加载NLTK内置不规则动词词典"""
    try:
        verb_dict = {}
        # 通过WordNet获取动词变化
        for synset in wordnet.all_synsets(pos=wordnet.VERB):
            for lemma in synset.lemmas():
                # 获取所有变位形式
                for form in lemma.derivationally_related_forms():
                    verb_dict[form.name().lower()] = lemma.name().lower()
        return verb_dict
    except:
        # 备用方案：加载预置CSV文件
        csv_path = os.path.join(NLTK_DATA_PATH, 'corpora', 'irregular_verbs.csv')
        if os.path.exists(csv_path):
            df = pd.read_csv(csv_path)
            return dict(zip(df['past'], df['base']))
        print("⚠️ 建议下载完整动词表：https://github.com/english-verbs/irregular-verbs/raw/master/verbs.csv")
        return {}

IRREGULAR_VERBS = load_irregular_verbs()

# ================== 修改词形还原函数 ==================
def enhanced_lemmatize(word):
    """增强型词形还原（使用NLTK原生数据）"""
    word = word.lower()

    # 1. 处理特殊允许词
    if word in SPECIAL_ALLOW:
        return SPECIAL_ALLOW[word]

    # 2. 排除带'的词汇
    if "'" in word:
        return None

    # 3. 处理不规则动词
    if word in IRREGULAR_VERBS:
        return IRREGULAR_VERBS[word]

    # 4. 使用NLTK原生词形还原
    try:
        # 优先尝试动词还原
        lemma_v = lemmatizer.lemmatize(word, pos='v')
        if lemma_v != word:
            return lemma_v

        # 其次尝试名词还原
        lemma_n = lemmatizer.lemmatize(word, pos='n')
        return lemma_n
    except:
        return word

def is_valid_word(lemma, original_word):
    """单词有效性验证"""
    # 基础检查
    if not lemma or len(lemma) < MIN_WORD_LEN or len(lemma) > MAX_WORD_LEN:
        return False

    # 格式检查
    if any(c.isdigit() for c in lemma):
        return False

    # 特殊格式排除
    if '-' in original_word and len(original_word) > 15:
        return False

    # 后缀排除
    if any(original_word.endswith(suffix) for suffix in SUFFIX_FILTER):
        return False

    # 停用词检查
    return lemma not in CUSTOM_STOPWORDS

def process_book():
    """主处理流程"""
    try:
        log_progress("启动处理流程")
        start_time = datetime.now()

        # 初始化数据
        known_words = load_known_words()
        book = epub.read_epub(EPUB_PATH)
        chapters = extract_chapters(book)

        # 准备输出
        writer = pd.ExcelWriter(OUTPUT_PATH, engine='openpyxl')
        valid_sheets = 0

        # 处理每个章节
        for idx, (title, text) in enumerate(chapters, 1):
            log_progress(f"处理章节 {idx}/{len(chapters)}: {title[:30]}...", 2)

            # 分词处理
            words = re.findall(r"\b[a-zA-Z]+(?:[-'][a-zA-Z]+)*\b", text.lower())

            # 词形处理
            seen = set()
            vocab = []
            for word in words:
                # 排除带'词汇（特殊词已处理）
                if "'" in word and word not in SPECIAL_ALLOW:
                    continue

                lemma = enhanced_lemmatize(word)
                if not lemma or lemma in seen:
                    continue

                # 有效性验证
                if not is_valid_word(lemma, word):
                    continue

                # 已知词检查
                if lemma in known_words or word in known_words:
                    continue

                vocab.append(lemma)
                seen.add(lemma)

            # 跳过无生词章节
            if not vocab:
                log_progress("无新生词，跳过", 3)
                continue

            # 收集例句
            examples = {}
            for term in vocab:
                match = re.search(rf'(\b{term}\b.*?[.!?])', text, re.I)
                examples[term] = match.group(0).strip() if match else "无上下文"

            # 生成数据表
            df = pd.DataFrame({
                '单词': vocab,
                '出现次数': [vocab.count(t) for t in vocab],
                '例句': [examples[t] for t in vocab]
            }).drop_duplicates().sort_values('出现次数', ascending=False)

            # 生成Sheet名称
            sheet_name = re.sub(r'[\\/*?:[\]]', '', title)[:30].strip()
            sheet_name = sheet_name or f"Chapter_{idx}"

            # 保存数据
            df.to_excel(writer, sheet_name=sheet_name, index=False)
            valid_sheets += 1
            log_progress(f"保存 {len(df)} 个生词到 [{sheet_name}]", 3)

        # 完成处理
        writer.close()
        time_cost = (datetime.now() - start_time).total_seconds()
        log_progress(f"处理完成！共 {valid_sheets} 个有效章节，耗时 {time_cost:.1f} 秒")
        log_progress(f"结果文件：{os.path.abspath(OUTPUT_PATH)}")

    except Exception as e:
        log_progress(f"处理失败: {str(e)}", 3)
        sys.exit(1)

if __name__ == '__main__':
    # 隐藏警告信息
    import warnings
    warnings.filterwarnings("ignore", category=UserWarning)
    warnings.filterwarnings("ignore", category=FutureWarning)

    process_book()