y-video / app.py
Docfile's picture
Update app.py
89adcf7 verified
import gradio as gr
import google.generativeai as genai
# API key input
import os
token=os.environ.get("TOKEN")
genai.configure(api_key=token)
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
]
def describe_video(pp,video_file):
try:
print(f"Uploading file...")
#uploaded_video = genai.upload_file(path=video_file)
url ="https://youtu.be/zfV-IOn7ouI?si=GWn8WyWLUe178L--"
uploaded_video = genai.upload_file(path=url)
print(f"Completed upload: {uploaded_video.uri}")
import time
while uploaded_video.state.name == "PROCESSING":
print("Waiting for video to be processed.")
time.sleep(10)
uploaded_video = genai.get_file(uploaded_video.name)
if uploaded_video.state.name == "FAILED":
raise ValueError(uploaded_video.state.name)
print(f"Video processing complete: " + uploaded_video.uri)
prompt = pp
# Set the model to Gemini 1.5 Pro.
model = genai.GenerativeModel(model_name="models/gemini-1.5-flash-latest",safety_settings=safety_settings)
# Make the LLM request.
print("Making LLM inference request...")
response = model.generate_content(
[prompt, uploaded_video], request_options={"timeout": 600}
)
print(response.text)
genai.delete_file(uploaded_video.name)
print(f"Deleted file {uploaded_video.uri}")
return response.text
except Exception as e:
return f"An error occurred: {e}"
# Create the Gradio interface
iface = gr.Interface(
fn=describe_video,
inputs=[gr.Textbox(label="Question:", lines=3) ,gr.Video()],
outputs=gr.Textbox(),
title="y Video.",
description="Pose des questions sur la vidéo et obtient une réponse.",
)
# Launch the interface
iface.launch()