Spaces:
Sleeping
Sleeping
import gradio as gr | |
from gtts import gTTS | |
from io import BytesIO | |
import IPython.display as ipd | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
# Load your Hugging Face model and tokenizer | |
model_name = "soufyane/gemma_data_science" | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
def process_text_gemma(input_text): | |
input_ids = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)["input_ids"] | |
output_ids = model.generate(input_ids) | |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return response | |
def process_speech_gemma(audio): | |
response = process_text_gemma(audio) | |
tts = gTTS(text=response, lang='en') | |
fp = BytesIO() | |
tts.write_to_fp(fp) | |
fp.seek(0) | |
return ipd.Audio(fp.read(), autoplay=True) | |
def main(input_text): | |
return process_text_gemma(input_text[0]), process_speech_gemma(input_text[0]) | |
gr.Interface( | |
fn=main, | |
inputs=["text"], | |
outputs=["text", "audio"], | |
title="Gemma Data Science Model", | |
description="This is a text-to-text model for data science tasks.", | |
live=True | |
).launch() | |