File size: 1,734 Bytes
776aee4
 
41c3289
776aee4
 
4427cbd
41c3289
3700df9
 
4427cbd
776aee4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from typing import Dict, List, Any
import torch
from transformers import LlavaNextVideoForConditionalGeneration, AutoProcessor, AutoConfig

class EndpointHandler:
    def __init__(self, path="/app"):
        self.model = LlavaNextVideoForConditionalGeneration.from_pretrained(path)

        # Load the processor from the configuration files
        self.processor = AutoProcessor.from_pretrained(path)

        # Ensure the model is in evaluation mode
        self.model.eval()

    def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        Args:
            data (Dict): Contains the input data including "clip" and "prompt".
        
        Returns:
            List[Dict[str, Any]]: The generated text from the model.
        """
        # Extract inputs from the data dictionary
        clip = data.get("clip")
        prompt = data.get("prompt")

        if clip is None or prompt is None:
            return [{"error": "Missing 'clip' or 'prompt' in input data"}]

        # Prepare the inputs for the model
        inputs_video = self.processor(text=prompt, videos=clip, padding=True, return_tensors="pt").to(self.model.device)

        # Generate output from the model
        generate_kwargs = {"max_new_tokens": 512, "do_sample": True, "top_p": 0.9}
        output = self.model.generate(**inputs_video, **generate_kwargs)
        generated_text = self.processor.batch_decode(output, skip_special_tokens=True)

        # Extract the relevant part of the assistant's answer
        assistant_answer_start = generated_text[0].find("ASSISTANT:") + len("ASSISTANT:")
        assistant_answer = generated_text[0][assistant_answer_start:].strip()

        return [{"generated_text": assistant_answer}]