import os
import pathlib
from typing import override

import torch
from torch import nn
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline

from .interface import Emotion, EmotionClassifier

__all__ = ["SimpleEmotionClassifier", "ChineseEmotionSmallClassifier"]


class SimpleEmotionClassifier(EmotionClassifier):
    """
    A simple emotion classifier at https://huggingface.co/WJL110/emotion-classifier.
    """

    _EMOTION_MAP = {
        "LABEL_0": Emotion.POSITIVE,
        "LABEL_1": Emotion.NEGATIVE,
        "LABEL_2": Emotion.NEGATIVE,
    }

    def __init__(self, cache_dir: os.PathLike | str | pathlib.Path) -> None:
        self._model = pipeline(
            "text-classification",
            model="WJL110/emotion-classifier",
            model_kwargs={"cache_dir": cache_dir},
        )

    @override
    def classify(self, sentence: str) -> Emotion:
        result = self._model(sentence)[0]
        return SimpleEmotionClassifier._EMOTION_MAP[result["label"]]


class ChineseEmotionSmallClassifier(EmotionClassifier):
    """
    Pretrained chinese emotion classifier at https://huggingface.co/Johnson8187/Chinese-Emotion-Small.
    """

    _EMOTION_MAP = {
        0: Emotion.NEUTRAL,  # neutral
        1: Emotion.NEUTRAL,  # concerned
        2: Emotion.POSITIVE,  # happy
        3: Emotion.NEGATIVE,  # angry
        4: Emotion.NEGATIVE,  # sad
        5: Emotion.NEUTRAL,  # questioning
        6: Emotion.POSITIVE,  # surprised
        7: Emotion.NEGATIVE,  # disgusted
    }

    def __init__(self, cache_dir: os.PathLike | str | pathlib.Path) -> None:
        self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._tokenizer = AutoTokenizer.from_pretrained(
            "Johnson8187/Chinese-Emotion-Small",
            cache_dir=cache_dir,
        )
        self._model: nn.Module = AutoModelForSequenceClassification.from_pretrained(
            "Johnson8187/Chinese-Emotion-Small",
            cache_dir=cache_dir,
        )
        self._model.to(self._device)

    @override
    def classify(self, sentence: str) -> Emotion:
        inputs: torch.Tensor = self._tokenizer(sentence, return_tensors="pt", truncation=True, padding=True)
        inputs = inputs.to(self._device)

        with torch.no_grad():
            outputs = self._model(**inputs)  # type: ignore

        predicted_class = torch.argmax(outputs.logits).item()
        predicted_emotion = ChineseEmotionSmallClassifier._EMOTION_MAP[predicted_class]  # type: ignore
        return predicted_emotion
