gemma-3n / app.py
freddyaboulton's picture
Update app.py
43e0455 verified
import os
import pathlib
import tempfile
from collections.abc import Iterator
from threading import Thread
import av
import gradio as gr
import spaces
import torch
from fastrtc import AdditionalOutputs, ReplyOnPause, WebRTC, WebRTCData, get_cloudflare_turn_credentials_async, get_cloudflare_turn_credentials, audio_to_int16
from gradio.processing_utils import save_audio_to_cache
from gradio.utils import get_upload_folder
from transformers import AutoModelForImageTextToText, AutoProcessor
from transformers.generation.streamers import TextIteratorStreamer
import numpy as np
model_id = "google/gemma-3n-E4B-it"
processor = AutoProcessor.from_pretrained(model_id)
model = AutoModelForImageTextToText.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
IMAGE_FILE_TYPES = (".jpg", ".jpeg", ".png", ".webp")
VIDEO_FILE_TYPES = (".mp4", ".mov", ".webm")
AUDIO_FILE_TYPES = (".mp3", ".wav")
GRADIO_TEMP_DIR = get_upload_folder()
TARGET_FPS = int(os.getenv("TARGET_FPS", "3"))
MAX_FRAMES = int(os.getenv("MAX_FRAMES", "30"))
MAX_INPUT_TOKENS = int(os.getenv("MAX_INPUT_TOKENS", "10_000"))
def get_file_type(path: str) -> str:
if path.endswith(IMAGE_FILE_TYPES):
return "image"
if path.endswith(VIDEO_FILE_TYPES):
return "video"
if path.endswith(AUDIO_FILE_TYPES):
return "audio"
error_message = f"Unsupported file type: {path}"
raise ValueError(error_message)
def count_files_in_new_message(paths: list[str]) -> tuple[int, int]:
video_count = 0
non_video_count = 0
for path in paths:
if path.endswith(VIDEO_FILE_TYPES):
video_count += 1
else:
non_video_count += 1
return video_count, non_video_count
def validate_media_constraints(message: dict) -> bool:
video_count, non_video_count = count_files_in_new_message(message["files"])
if video_count > 1:
gr.Warning("Only one video is supported.")
return False
if video_count == 1 and non_video_count > 0:
gr.Warning("Mixing images and videos is not allowed.")
return False
return True
def extract_frames_to_tempdir(
video_path: str,
target_fps: float,
max_frames: int | None = None,
parent_dir: str | None = None,
prefix: str = "frames_",
) -> str:
temp_dir = tempfile.mkdtemp(prefix=prefix, dir=parent_dir)
container = av.open(video_path)
video_stream = container.streams.video[0]
if video_stream.duration is None or video_stream.time_base is None:
raise ValueError("video_stream is missing duration or time_base")
time_base = video_stream.time_base
duration = float(video_stream.duration * time_base)
interval = 1.0 / target_fps
total_frames = int(duration * target_fps)
if max_frames is not None:
total_frames = min(total_frames, max_frames)
target_times = [i * interval for i in range(total_frames)]
target_index = 0
for frame in container.decode(video=0):
if frame.pts is None:
continue
timestamp = float(frame.pts * time_base)
if target_index < len(target_times) and abs(timestamp - target_times[target_index]) < (interval / 2):
frame_path = pathlib.Path(temp_dir) / f"frame_{target_index:04d}.jpg"
frame.to_image().save(frame_path)
target_index += 1
if max_frames is not None and target_index >= max_frames:
break
container.close()
return temp_dir
def process_new_user_message(message: dict) -> list[dict]:
if not message["files"]:
return [{"type": "text", "text": message["text"]}]
file_types = [get_file_type(path) for path in message["files"]]
if len(file_types) == 1 and file_types[0] == "video":
gr.Info(f"Video will be processed at {TARGET_FPS} FPS, max {MAX_FRAMES} frames in this Space.")
temp_dir = extract_frames_to_tempdir(
message["files"][0],
target_fps=TARGET_FPS,
max_frames=MAX_FRAMES,
parent_dir=GRADIO_TEMP_DIR,
)
paths = sorted(pathlib.Path(temp_dir).glob("*.jpg"))
return [
{"type": "text", "text": message["text"]},
*[{"type": "image", "image": path.as_posix()} for path in paths],
]
return [
{"type": "text", "text": message["text"]},
*[{"type": file_type, file_type: path} for path, file_type in zip(message["files"], file_types, strict=True)],
]
def process_history(history: list[dict]) -> list[dict]:
messages = []
current_user_content: list[dict] = []
for item in history:
if item["role"] == "assistant":
if current_user_content:
messages.append({"role": "user", "content": current_user_content})
current_user_content = []
messages.append({"role": "assistant", "content": [{"type": "text", "text": item["content"]}]})
else:
content = item["content"]
if isinstance(content, str):
current_user_content.append({"type": "text", "text": content})
else:
filepath = content[0]
file_type = get_file_type(filepath)
current_user_content.append({"type": file_type, file_type: filepath})
return messages
@torch.inference_mode()
def _generate(message: dict, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512) -> Iterator[str]:
if not validate_media_constraints(message):
yield ""
return
messages = []
if system_prompt:
messages.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]})
messages.extend(process_history(history))
messages.append({"role": "user", "content": process_new_user_message(message)})
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
)
n_tokens = inputs["input_ids"].shape[1]
if n_tokens > MAX_INPUT_TOKENS:
gr.Warning(
f"Input too long. Max {MAX_INPUT_TOKENS} tokens. Got {n_tokens} tokens. This limit is set to avoid CUDA out-of-memory errors in this Space."
)
yield ""
return
inputs = inputs.to(device=model.device, dtype=torch.bfloat16)
streamer = TextIteratorStreamer(processor, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=False,
disable_compile=True,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
output = ""
for delta in streamer:
output += delta
yield output
@spaces.GPU(time_limit=120)
def generate(data: WebRTCData, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512, image=None):
files = []
audio_file = None
if data.audio is not None and data.audio[1].size > 0:
print(audio_to_int16(data.audio[1]))
audio_file = save_audio_to_cache(np.squeeze(audio_to_int16(data.audio[1])), data.audio[0], format="mp3", cache_dir=get_upload_folder())
files.append(audio_file)
if image is not None:
files.append(image)
message = {
"text": data.textbox,
"files": files,
}
print("message", message)
if image is not None:
history.append({"role": "user", "content": (image,)})
if audio_file is not None:
history.append({"role": "user", "content": (audio_file,)})
history.append({"role": "user", "content": data.textbox})
print("history", history)
yield AdditionalOutputs(history)
new_message = {"role": "assistant", "content": ""}
for output in _generate(message, history, system_prompt, max_new_tokens):
new_message["content"] = output
yield AdditionalOutputs(history + [new_message])
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=1):
image = gr.Image(type="filepath", label="Image Input")
with gr.Column(scale=4):
chatbot = gr.Chatbot(type="messages")
webrtc = WebRTC(
modality="audio",
mode="send",
variant="textbox",
rtc_configuration=get_cloudflare_turn_credentials_async,
)
with gr.Row():
with gr.Accordion(label="Additional Inputs"):
sp = gr.Textbox(label="System Prompt", value="You are a helpful assistant.")
slider = gr.Slider(label="Max New Tokens", minimum=100, maximum=2000, step=10, value=700)
webrtc.stream(
ReplyOnPause(generate), # type: ignore
inputs=[webrtc, chatbot, sp, slider, image],
outputs=[chatbot],
concurrency_limit=100,
)
webrtc.on_additional_outputs(lambda old, new: new, inputs=[chatbot], outputs=[chatbot], concurrency_limit=100)
if __name__ == "__main__":
demo.launch(ssr_mode=False)