import os
from collections.abc import Sequence
from dataclasses import dataclass
from typing import override

import jieba.posseg

from ..unmarshalers import DocumentStage
from .interface import Processor

__all__ = ["TokenizedDocument", "TokenizedDocumentStage", "JiebaTokenizationProcessor"]

type TokenizedDocument = Sequence[str]


@dataclass(frozen=True)
class TokenizedDocumentStage(object):
    tokenized_documents: Sequence[TokenizedDocument]


class JiebaTokenizationProcessor(Processor[DocumentStage, TokenizedDocumentStage]):
    """
    A tokenizer that uses Jieba to tokenize a sentence.

    By default, only nouns, verbs, and adjectives are returned.
    """

    _stopwords: set[str]

    def __init__(self, path: os.PathLike | str | None = None) -> None:
        if path is None:
            self._stopwords = set()
        else:
            with open(path, encoding="utf-8") as file:
                self._stopwords = set(file.read().splitlines())

    @override
    def process(self, stage: DocumentStage) -> TokenizedDocumentStage:
        tokenized_documents: list[TokenizedDocument] = []
        for document in stage.documents:
            tokens: list[str] = []
            for word, part_of_speech in jieba.posseg.cut(document):
                if part_of_speech.startswith(("n", "v", "a")) and word not in self._stopwords:
                    tokens.append(word)
            tokenized_documents.append(tokens)
        return TokenizedDocumentStage(tokenized_documents)
