from typing import Dict, Any, List import torch from transformers import BartForConditionalGeneration, BartTokenizer class EndpointHandler: def __init__(self, path=""): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' try: self.model = BartForConditionalGeneration.from_pretrained(path).to(self.device) self.tokenizer = BartTokenizer.from_pretrained(path) except Exception as e: print(f"Error loading model or tokenizer from path {path}: {e}") self.model = None self.tokenizer = None def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: inputs = data.get("inputs", "") num_beams = data.get("num_beams", 4) if not inputs: return [{"error": "No inputs provided"}] if self.model is None or self.tokenizer is None: return [{"error": "Model or tokenizer not loaded correctly"}] tokenized_input = self.tokenizer(inputs, return_tensors="pt", truncation=True, max_length=1000, padding="max_length") tokenized_input = tokenized_input.to(self.device) summary_ids = self.model.generate(**tokenized_input, max_length=400, min_length=100, length_penalty=2.0, num_beams=num_beams, early_stopping=True) summary_text = self.tokenizer.decode(summary_ids[0], skip_special_tokens=True) return [{"summary_text": summary_text}]