#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/1/22 14:12
# @Author  : wanghaoran
# @File    : inference.py
from typing import Optional
from transformers import AutoModelForSequenceClassification, AutoTokenizer,\
    PreTrainedModel, PreTrainedTokenizer
from torch import nn
# from optimum.onnxruntime import ORTModelForSequenceClassification


# class RerankerForInference(object):
#     def __init__(self, model_path):
#         self.model_path = model_path
#
#     def from_pretrained(self):
#         tokenizer = AutoTokenizer.from_pretrained(self.model_path)
#         model_ort = ORTModelForSequenceClassification.from_pretrained(self.model_path, file_name="onnx/model.onnx")
#         return model_ort, tokenizer


class RerankerForInference(nn.Module):
    def __init__(
            self,
            hf_model: Optional[PreTrainedModel] = None,
            tokenizer: Optional[PreTrainedTokenizer] = None
    ):
        super().__init__()
        self.hf_model = hf_model
        self.tokenizer = tokenizer

    def tokenize(self, *args, **kwargs):
        return self.tokenizer(*args, **kwargs)

    def forward(self, batch):
        return self.hf_model(**batch)

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path: str):
        hf_model = AutoModelForSequenceClassification.from_pretrained(
            pretrained_model_name_or_path)
        hf_tokenizer = AutoTokenizer.from_pretrained(
            pretrained_model_name_or_path)

        hf_model.eval()
        return cls(hf_model, hf_tokenizer)

    def load_pretrained_model(self, pretrained_model_name_or_path, *model_args, **kwargs):
        self.hf_model = AutoModelForSequenceClassification.from_pretrained(
            pretrained_model_name_or_path, *model_args, **kwargs
        )

    def load_pretrained_tokenizer(self, pretrained_model_name_or_path, *inputs, **kwargs):
        self.tokenizer = AutoTokenizer.from_pretrained(
            pretrained_model_name_or_path, *inputs, **kwargs
        )
