# Copyright © 2024-2025 Advanced Micro Devices, Inc. All rights reserved.

from abc import ABC, abstractmethod
from transformers import AutoTokenizer
import onnxruntime_genai as og
import numpy as np

class BaseTokenizer(ABC):
    """Abstract base class for tokenizers."""

    @abstractmethod
    def encode(self, text):
        pass

    @abstractmethod
    def decode(self, tokens):
        pass

    @abstractmethod
    def get_prompt_length(self, tokens):
        pass

    @abstractmethod
    def set_params(self, prompt_length, tokens):
        pass

class ChatGLMTokenizer(BaseTokenizer):
    """Tokenizer for ChatGLM model."""

    def __init__(self, model_path):
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

    def encode(self, text):
        return self.tokenizer.build_chat_input(text)["input_ids"].cpu().numpy()

    def decode(self, tokens):
        return self.tokenizer.decode(tokens)

    def get_prompt_length(self, tokens):
        return tokens.shape[1]

    def set_params(self, prompt_length, tokens):
        input_ids = tokens[:, :prompt_length-1]
        end_special_token = tokens[-1][-1]
        input_ids = np.concatenate([input_ids, np.array([[end_special_token]])], axis=1)
        return input_ids

class ONNXTokenizerWrapper(BaseTokenizer):
    """Tokenizer wrapper for ONNX-based models (LLaMA, Phi, Mistral, Qwen)."""

    def __init__(self, model):
        self.tokenizer = og.Tokenizer(model)

    def encode(self, text):
        return self.tokenizer.encode(text)

    def decode(self, tokens):
        return self.tokenizer.decode(tokens)

    def get_prompt_length(self, tokens):
        return len(tokens)

    def set_params(self, prompt_length, tokens):
        input_ids = tokens[:prompt_length-1]  # Truncate to the required length
        end_special_token = tokens[-1]  # Get the last token
        input_ids.append(end_special_token)  # Append the end token
        return input_ids


def get_tokenizer(model_path, model_type, model):
    """Returns the appropriate tokenizer instance based on model type."""
    tokenizer_classes = {
        "chatglm": ChatGLMTokenizer,
        "llama": ONNXTokenizerWrapper,
        "phi": ONNXTokenizerWrapper,
        "mistral": ONNXTokenizerWrapper,
        "qwen": ONNXTokenizerWrapper,
    }

    if model_type not in tokenizer_classes:
        print(f"Warning: Unsupported model type '{model_type}'. Defaulting to ONNXTokenizerWrapper.")
        return ONNXTokenizerWrapper(model)

    # Instantiate tokenizer
    if model_type == "chatglm":
        return tokenizer_classes[model_type](model_path)
    return tokenizer_classes[model_type](model)
