from collections.abc import Sequence
from dataclasses import dataclass
from operator import itemgetter
from typing import no_type_check, override

import jieba.analyse
from tqdm import tqdm

from .interface import Processor
from .tokenizer import TokenizedDocumentStage

__all__ = ["Keyword", "KeywordStage", "JiebaKeywordExtractionProcessor"]


@dataclass(frozen=True)
class Keyword(object):
    word: str
    importance: float


@dataclass(frozen=True)
class KeywordStage(object):
    keywords: Sequence[Keyword]


class JiebaKeywordExtractionProcessor(Processor[TokenizedDocumentStage, KeywordStage]):
    """
    A jieba-based keyword extractor that uses TF-IDF to extract keywords.

    This extractor uses `jieba.analyse.extract_tags` to extract keywords from each document. Note that jieba
    automatically handles the tokenization, so we join the tokens into a single string before extracting keywords.
    """

    @no_type_check
    @override
    def process(self, stage: TokenizedDocumentStage) -> KeywordStage:
        # use jieba.analyse.extract_tags to extract keywords from each document
        words: dict[str, float] = {}
        for document in tqdm(stage.tokenized_documents, desc="Keyword extraction"):
            sentence = " ".join(document)
            for word, importance in jieba.analyse.extract_tags(sentence, topK=None, withWeight=True):
                words[word] = words.get(word, 0.0) + importance

        # transform the words to `Keyword` objects
        keywords = []
        for word, importance in sorted(words.items(), key=itemgetter(1), reverse=True):
            keywords.append(Keyword(word, importance / len(stage.tokenized_documents)))
        return KeywordStage(keywords)
