from __future__ import annotations

from typing import Any, cast

import numpy as np
import pytest
import torch
from tokenizers import Tokenizer
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers.pre_tokenizers import Whitespace
from transformers import AutoTokenizer
from transformers.modeling_utils import PreTrainedModel
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast

from model2vec.inference import StaticModelPipeline
from model2vec.train import StaticModelForClassification

_TOKENIZER_TYPES = ["wordpiece", "bpe", "unigram"]


@pytest.fixture(scope="session", params=_TOKENIZER_TYPES, ids=_TOKENIZER_TYPES)
def mock_tokenizer(request: pytest.FixtureRequest) -> Tokenizer:
    """Create a mock tokenizer."""
    vocab = ["[PAD]", "word1", "word2", "word3", "[UNK]"]
    unk_token = "[UNK]"

    tokenizer_type = request.param

    if tokenizer_type == "wordpiece":
        model = WordPiece(
            vocab={token: idx for idx, token in enumerate(vocab)}, unk_token=unk_token, max_input_chars_per_word=100
        )
    elif tokenizer_type == "bpe":
        model = BPE(
            vocab={token: idx for idx, token in enumerate(vocab)},
            merges=[],
            unk_token=unk_token,
            fuse_unk=True,
            ignore_merges=True,
        )
    elif tokenizer_type == "unigram":
        model = Unigram(vocab=[(token, 0.0) for token in vocab], unk_id=0, byte_fallback=False)
    else:
        raise ValueError(f"Unsupported tokenizer type: {tokenizer_type}")
    tokenizer = Tokenizer(model)
    tokenizer.pre_tokenizer = Whitespace()  # type: ignore  # Tokenizer issue

    return tokenizer


@pytest.fixture(scope="function")
def mock_berttokenizer() -> PreTrainedTokenizerFast:
    """Load the real BertTokenizerFast from the provided tokenizer.json file."""
    return cast(PreTrainedTokenizerFast, AutoTokenizer.from_pretrained("tests/data/test_tokenizer"))


@pytest.fixture
def mock_transformer() -> PreTrainedModel:
    """Create a mock transformer model."""

    class MockPreTrainedModel:
        def __init__(self, dim: int = 768, with_pooler: bool = True, pooler_value: float = 7.0) -> None:
            self.device = "cpu"
            self.name_or_path = "mock-model"
            self.dim = dim
            self.with_pooler = with_pooler
            self.pooler_value = pooler_value

        def to(self, device: str) -> MockPreTrainedModel:
            self.device = device
            return self

        def eval(self) -> MockPreTrainedModel:
            return self

        def forward(self, *args: Any, **kwargs: Any) -> Any:
            input_ids = kwargs["input_ids"]
            B, T = input_ids.shape
            hidden = torch.arange(T, dtype=torch.float32, device=self.device).repeat(B, self.dim, 1).transpose(1, 2)
            out = {"last_hidden_state": hidden}
            if self.with_pooler:
                out["pooler_output"] = torch.full((B, self.dim), self.pooler_value, device=self.device)
            return type("BaseModelOutputWithPoolingAndCrossAttentions", (object,), out)()

        __call__ = forward

    return cast(PreTrainedModel, MockPreTrainedModel())


@pytest.fixture(scope="session")
def mock_vectors() -> np.ndarray:
    """Create mock vectors."""
    return np.array([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.0, 0.0], [0.0, 0.0]])


@pytest.fixture
def mock_config() -> dict[str, Any]:
    """Create a mock config."""
    return {"some_config": "value"}


@pytest.fixture(scope="session")
def mock_inference_pipeline(mock_trained_pipeline: StaticModelForClassification) -> StaticModelPipeline:
    """Mock pipeline."""
    return mock_trained_pipeline.to_pipeline()


@pytest.fixture(
    params=[
        (False, "single_label", "str"),
        (False, "single_label", "int"),
        (True, "multilabel", "str"),
        (True, "multilabel", "int"),
    ],
    ids=lambda param: f"{param[1]}_{param[2]}",
    scope="session",
)
def mock_trained_pipeline(request: pytest.FixtureRequest) -> StaticModelForClassification:
    """Mock StaticModelForClassification with different label formats."""
    tokenizer = AutoTokenizer.from_pretrained("tests/data/test_tokenizer").backend_tokenizer
    torch.random.manual_seed(42)
    vectors_torched = torch.randn(len(tokenizer.get_vocab()), 12)
    model = StaticModelForClassification(vectors=vectors_torched, tokenizer=tokenizer, hidden_dim=12).to("cpu")

    X = ["dog", "cat"]
    is_multilabel, label_type = request.param[0], request.param[2]

    if label_type == "str":
        y = [["a", "b"], ["a"]] if is_multilabel else ["a", "b"]  # type: ignore
    else:
        y = [[0, 1], [0]] if is_multilabel else [0, 1]  # type: ignore

    model.fit(X, y)  # type: ignore

    return model
