video / app.py
shikharyashmaurya's picture
Update app.py
20a9ba4 verified
raw
history blame contribute delete
No virus
7.51 kB
# import streamlit as st
# import os
# import time
# import google.generativeai as genai
# secret_key = os.getenv("SECRET_KEY")
# genai.configure(api_key=secret_key)
# def upload_to_gemini(path, mime_type=None):
# file = genai.upload_file(path, mime_type=mime_type)
# print(f"Uploaded file '{file.display_name}' as: {file.uri}")
# return file
# def wait_for_files_active(files):
# for name in (file.name for file in files):
# file = genai.get_file(name)
# while file.state.name == "PROCESSING":
# time.sleep(10)
# file = genai.get_file(name)
# if file.state.name != "ACTIVE":
# raise Exception(f"File {file.name} failed to process")
# model = genai.GenerativeModel(
# model_name="gemini-1.5-pro"
# )
# # files = [
# # upload_to_gemini("American Museum of Natural History Tour - 5 Min", mime_type="video/mp4"),
# # ]
# uploaded_file = st.file_uploader("Upload a video file", type=["mp4", "avi", "mov"])
# if uploaded_file:
# files = [
# upload_to_gemini(uploaded_file, mime_type="video/mp4"),
# ]
# wait_for_files_active(files)
# chat_session = model.start_chat()
# input=st.text_input("Input Prompt: ",key="input")
# response = chat_session.send_message(input)
# st.write(response.text)
# import streamlit as st
# import os
# import time
# import tempfile
# import google.generativeai as genai
# secret_key = os.getenv("SECRET_KEY")
# genai.configure(api_key=secret_key)
# def upload_to_gemini(path, mime_type=None):
# file = genai.upload_file(path, mime_type=mime_type)
# print(f"Uploaded file '{file.display_name}' as: {file.uri}")
# return file
# def wait_for_files_active(files):
# for name in (file.name for file in files):
# file = genai.get_file(name)
# while file.state.name == "PROCESSING":
# time.sleep(10)
# file = genai.get_file(name)
# if file.state.name != "ACTIVE":
# raise Exception(f"File {file.name} failed to process")
# model = genai.GenerativeModel(
# model_name="gemini-1.5-pro"
# )
# uploaded_file = st.file_uploader("Upload a video file", type=["mp4", "avi", "mov"])
# if uploaded_file:
# with tempfile.NamedTemporaryFile(delete=False) as temp_file:
# temp_file.write(uploaded_file.read())
# temp_file_path = temp_file.name
# try:
# files = [
# upload_to_gemini(temp_file_path, mime_type="video/mp4"),
# ]
# wait_for_files_active(files)
# chat_session = model.start_chat()
# input = st.text_input("Input Prompt: ", key="input")
# if input:
# response = chat_session.send_message(input)
# st.write(response.text)
# finally:
# os.remove(temp_file_path) # Ensure the temporary file is deleted
# import streamlit as st
# import os
# import time
# import tempfile
# import google.generativeai as genai
# secret_key = os.getenv("SECRET_KEY")
# genai.configure(api_key=secret_key)
# def upload_to_gemini(path, mime_type=None):
# file = genai.upload_file(path, mime_type=mime_type)
# print(f"Uploaded file '{file.display_name}' as: {file.uri}")
# return file
# def wait_for_files_active(files):
# for name in (file.name for file in files):
# file = genai.get_file(name)
# while file.state.name == "PROCESSING":
# time.sleep(10)
# file = genai.get_file(name)
# if file.state.name != "ACTIVE":
# raise Exception(f"File {file.name} failed to process")
# model = genai.GenerativeModel(
# model_name="gemini-1.5-pro"
# )
# uploaded_file = st.file_uploader("Upload a video file", type=["mp4", "avi", "mov"])
# if uploaded_file:
# with tempfile.NamedTemporaryFile(delete=False) as temp_file:
# temp_file.write(uploaded_file.read())
# temp_file_path = temp_file.name
# try:
# file = upload_to_gemini(temp_file_path, mime_type="video/mp4")
# wait_for_files_active([file])
# chat_session = model.start_chat()
# input = st.text_input("Input Prompt: ", key="input")
# if input:
# response = chat_session.send_message(input)
# st.write(response.text)
# st.write(f"Uploaded video file: [View Video]({file.uri})")
# finally:
# os.remove(temp_file_path) # Ensure the temporary file is deleted
# streamlit_app.py
import os
import time
import streamlit as st
import google.generativeai as genai
# Configure the API key for Google Generative AI
secret_key = os.getenv("SECRET_KEY")
genai.configure(api_key=secret_key)
def upload_to_gemini(path, mime_type=None):
"""Uploads the given file to Gemini."""
file = genai.upload_file(path, mime_type=mime_type)
st.write(f"Uploaded file '{file.display_name}' as: {file.uri}")
return file
def wait_for_files_active(files):
"""Waits for the given files to be active."""
st.write("Waiting for file processing...")
for name in (file.name for file in files):
file = genai.get_file(name)
while file.state.name == "PROCESSING":
st.write(".", end="", flush=True)
time.sleep(10)
file = genai.get_file(name)
if file.state.name != "ACTIVE":
raise Exception(f"File {file.name} failed to process")
st.write("...all files ready")
st.write()
# Define the Streamlit interface
st.title("Video Upload for Generative AI Processing")
uploaded_file = st.file_uploader("Upload a video file", type=["mp4"])
if uploaded_file is not None:
# Save the uploaded file locally
video_path = os.path.join("", uploaded_file.name)
with open(video_path, "wb") as f:
f.write(uploaded_file.getbuffer())
st.success(f"Saved file: {uploaded_file.name}")
# Upload the video to Gemini
files = [
upload_to_gemini(video_path, mime_type="video/mp4"),
]
# Wait for the file to become active
wait_for_files_active(files)
# Create the generative model
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
]
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
safety_settings=safety_settings,
generation_config=generation_config,
)
# Start a chat session
chat_session = model.start_chat(
history=[
{
"role": "user",
"parts": ["summarise video"],
},
{
"role": "user",
"parts": [files[0].uri],
},
]
)
# Send a message to the chat session
response = chat_session.send_message("INSERT_INPUT_HERE")
# Display the response
st.write("Response from the model:")
st.write(response.text)