import nltk
from nltk.corpus import wordnet as wn, verbnet as verb
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from typing import List, Tuple, Dict, Set


class NLTKLoader:
    def __init__(self):
        self.lemmatizer = WordNetLemmatizer()
        self.verb_forms = self._load_irregular_verbs()
        self.adj_exceptions = {
            'good': ('better', 'best'),
            'bad': ('worse', 'worst'),
            'far': ('farther', 'farthest')
        }

    def _load_irregular_verbs(self) -> Dict[str, Set[str]]:
        """手动加载常见不规则动词变形（替代NLTK缺失数据）"""
        return {
            'be': {'am', 'is', 'are', 'was', 'were', 'been', 'being'},
            'have': {'has', 'had', 'having'},
            'do': {'does', 'did', 'done', 'doing'},
            'go': {'went', 'gone', 'going'},
            'see': {'saw', 'seen', 'seeing'},
            'take': {'took', 'taken', 'taking'},
            # 可在此继续扩展更多常见动词
        }

    def get_excluded_set(self, known_words: Set[str]) -> Set[str]:
        excluded = set()
        for word in known_words:
            word_lower = word.lower()
            excluded.add(word_lower)

            # 处理动词变形
            if word_lower in self.verb_forms:
                excluded.update(self.verb_forms[word_lower])

            # 处理形容词比较级/最高级
            if word_lower in self.adj_exceptions:
                excluded.update(self.adj_exceptions[word_lower])
            else:
                excluded.add(self._get_comparative(word_lower))
                excluded.add(self._get_superlative(word_lower))

            # 处理名词复数
            excluded.add(self._get_plural(word_lower))

        return excluded

    # def _load_irregular_verbs(self):
    #     """更健壮的不规则动词加载方法"""
    #     verb_dict = {}
    #     try:
    #         for entry in verb.words():
    #             parts = entry.strip().split()
    #             if len(parts) >= 3:
    #                 base = parts[0].lower()
    #                 # 包含所有变形形式：过去式、过去分词、第三人称单数、现在分词等
    #                 forms = [p.lower() for p in parts if p != base]
    #                 verb_dict.setdefault(base, set()).update(forms)
    #     except LookupError as e:
    #         print(f"需要先下载verb语料库: {e}")
    #         nltk.download('verb')
    #         return self._load_irregular_verbs()  # 递归重试
    #     return verb_dict

    def get_excluded_set(self, known_words: Set[str]) -> Set[str]:
        """生成需要排除的单词集合（包含所有变形）"""
        excluded = set()
        for word in known_words:
            word_lower = word.lower()
            excluded.add(word_lower)

            # 处理动词变形
            if word_lower in self.verb_forms:
                excluded.update(self.verb_forms[word_lower])

            # 处理形容词比较级/最高级
            if word_lower in self.adj_exceptions:
                excluded.update(self.adj_exceptions[word_lower])
            else:
                excluded.add(self._get_comparative(word_lower))
                excluded.add(self._get_superlative(word_lower))

            # 处理名词复数
            excluded.add(self._get_plural(word_lower))

        return excluded

    def _get_comparative(self, adj: str) -> str:
        """生成形容词比较级（简单规则处理）"""
        if len(adj) > 2 and adj[-1] == 'y' and adj[-2] not in 'aeiou':
            return adj[:-1] + 'ier'
        return adj + 'er'

    def _get_superlative(self, adj: str) -> str:
        """生成形容词最高级（简单规则处理）"""
        if len(adj) > 2 and adj[-1] == 'y' and adj[-2] not in 'aeiou':
            return adj[:-1] + 'iest'
        return adj + 'est'

    def _get_plural(self, noun: str) -> str:
        """生成名词复数（简单规则处理）"""
        if noun.endswith(('s', 'x', 'z', 'ch', 'sh')):
            return noun + 'es'
        elif noun.endswith('y') and len(noun) > 1 and noun[-2] not in 'aeiou':
            return noun[:-1] + 'ies'
        return noun + 's'

class VocabularyProcessor:
    def __init__(self, nltk_loader: NLTKLoader):
        self.nltk_loader = nltk_loader
        self.lemmatizer = WordNetLemmatizer()

    def process_book(
            self,
            chapters: List[Tuple[str, str]],
            known_words: Set[str]
    ) -> Dict[str, List[Tuple[str, int, str]]]:
        excluded = self.nltk_loader.get_excluded_set(known_words)
        book_vocab = {}

        for chapter_title, chapter_content in chapters:
            chapter_dict = {}
            sentences = sent_tokenize(chapter_content)

            for sentence in sentences:
                words = word_tokenize(sentence)
                pos_tags = nltk.pos_tag(words)

                for word, pos in pos_tags:
                    word_lower = word.lower()
                    if word_lower in excluded:
                        continue

                    # 获取词性对应的WordNet POS
                    wn_pos = self._get_wordnet_pos(pos)

                    # 词形还原
                    lemma = self.lemmatizer.lemmatize(word_lower, pos=wn_pos) if wn_pos else word_lower
                    if lemma in excluded:
                        continue

                    # 统计生词
                    if word_lower not in chapter_dict:
                        chapter_dict[word_lower] = {'count': 0, 'example': None}
                    chapter_dict[word_lower]['count'] += 1
                    if not chapter_dict[word_lower]['example']:
                        chapter_dict[word_lower]['example'] = sentence

            # 转换为排序后的列表
            sorted_words = sorted(
                chapter_dict.items(),
                key=lambda x: (-x[1]['count'], x[0])
            )
            book_vocab[chapter_title] = [
                (word, data['count'], data['example'])
                for word, data in sorted_words
            ]

        return book_vocab

    # def process_book(
    #         self,
    #         chapters: List[Tuple[str, str]],
    #         known_words: Set[str]
    # ) -> Dict[str, List[Tuple[str, int, str]]]:
    #     excluded = self.nltk_loader.get_excluded_set(known_words)
    #     book_vocab = {}
    #
    #     for chapter_title, chapter_content in chapters:
    #         chapter_dict = {}
    #         sentences = sent_tokenize(chapter_content)
    #
    #         for sentence in sentences:
    #             words = word_tokenize(sentence)
    #             pos_tags = nltk.pos_tag(words)
    #
    #             for word, pos in pos_tags:
    #                 word_lower = word.lower()
    #                 if word_lower in excluded:
    #                     continue
    #
    #                 # 获取词性对应的WordNet POS
    #                 wn_pos = self._get_wordnet_pos(pos)
    #
    #                 # 词形还原
    #                 lemma = self.lemmatizer.lemmatize(word_lower, pos=wn_pos) if wn_pos else word_lower
    #                 if lemma in excluded:
    #                     continue
    #
    #                 # 统计生词
    #                 if word_lower not in chapter_dict:
    #                     chapter_dict[word_lower] = {'count': 0, 'example': None}
    #                 chapter_dict[word_lower]['count'] += 1
    #                 if not chapter_dict[word_lower]['example']:
    #                     chapter_dict[word_lower]['example'] = sentence
    #
    #         # 转换为排序后的列表
    #         sorted_words = sorted(
    #             chapter_dict.items(),
    #             key=lambda x: (-x[1]['count'], x[0])
    #         )
    #         book_vocab[chapter_title] = [
    #             (word, data['count'], data['example'])
    #             for word, data in sorted_words
    #         ]
    #
    #     return book_vocab

    def _get_wordnet_pos(self, treebank_tag: str) -> str:
        """将树库词性标签转换为WordNet词性标签"""
        if treebank_tag.startswith('J'):
            return wn.ADJ
        elif treebank_tag.startswith('V'):
            return wn.VERB
        elif treebank_tag.startswith('N'):
            return wn.NOUN
        elif treebank_tag.startswith('R'):
            return wn.ADV
        return None