Norod78 commited on
Commit
0e1bb1e
·
verified ·
1 Parent(s): e12480b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -10
app.py CHANGED
@@ -4,7 +4,7 @@ import torch
4
  from PIL import Image
5
  from pathlib import Path
6
  from threading import Thread
7
- from transformers import AutoTokenizer, AutoModelForCausalLM, AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
8
  import spaces
9
  import time
10
 
@@ -14,17 +14,15 @@ DESCRIPTION= """
14
  """
15
 
16
  # model config
17
- model_270m_name = "google/gemma-3-270m-it"
18
- model_270m = AutoModelForCausalLM.from_pretrained(
19
- model_270m_name,
20
  torch_dtype="auto",
21
  device_map="auto",
22
  attn_implementation="eager"
23
  ).eval()
24
 
25
- tokenizer = AutoTokenizer.from_pretrained(model_270m_name)
26
-
27
- processor_270m = AutoProcessor.from_pretrained(model_270m_name)
28
  # I will add timestamp later
29
  def extract_video_frames(video_path, num_frames=8):
30
  cap = cv2.VideoCapture(video_path)
@@ -104,9 +102,8 @@ def generate_response(input_data, chat_history, max_new_tokens, system_prompt, t
104
  if messages and messages[-1]["role"] == "user":
105
  messages[-1]["content"].extend(new_message["content"])
106
  else:
107
- messages.append(new_message)
108
- model = model_270m
109
- processor = processor_270m
110
  inputs = processor.apply_chat_template(
111
  messages,
112
  add_generation_prompt=True,
 
4
  from PIL import Image
5
  from pathlib import Path
6
  from threading import Thread
7
+ from transformers import AutoModelForCausalLM, AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
8
  import spaces
9
  import time
10
 
 
14
  """
15
 
16
  # model config
17
+ model_name = "google/gemma-3-270m-it"
18
+ model = Gemma3ForConditionalGeneration.from_pretrained(
19
+ model_name,
20
  torch_dtype="auto",
21
  device_map="auto",
22
  attn_implementation="eager"
23
  ).eval()
24
 
25
+ processor = AutoProcessor.from_pretrained(model_name)
 
 
26
  # I will add timestamp later
27
  def extract_video_frames(video_path, num_frames=8):
28
  cap = cv2.VideoCapture(video_path)
 
102
  if messages and messages[-1]["role"] == "user":
103
  messages[-1]["content"].extend(new_message["content"])
104
  else:
105
+ messages.append(new_message)
106
+
 
107
  inputs = processor.apply_chat_template(
108
  messages,
109
  add_generation_prompt=True,