|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
class ModelHandler: |
|
def __init__(self): |
|
self.model = None |
|
self.tokenizer = None |
|
|
|
def load_model(self): |
|
|
|
self.model = AutoModelForCausalLM.from_pretrained("your-model-path") |
|
self.tokenizer = AutoTokenizer.from_pretrained("your-model-path") |
|
|
|
def predict(self, inputs): |
|
|
|
inputs = self.tokenizer(inputs, return_tensors="pt") |
|
with torch.no_grad(): |
|
outputs = self.model(**inputs) |
|
return outputs |
|
|
|
handler = ModelHandler() |
|
handler.load_model() |
|
|
|
def handler(event, context): |
|
inputs = event["data"] |
|
outputs = handler.predict(inputs) |
|
return outputs |
|
|