File size: 2,173 Bytes
f74e492
378ed8f
 
05588a0
b61d90b
 
 
378ed8f
130bd89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fe77ba
05588a0
378ed8f
05588a0
89adcf7
 
 
 
05588a0
378ed8f
05588a0
378ed8f
05588a0
 
 
 
378ed8f
05588a0
 
 
378ed8f
7fe77ba
f74e492
05588a0
672207b
05588a0
 
 
 
 
 
378ed8f
05588a0
 
378ed8f
05588a0
378ed8f
05588a0
 
378ed8f
05588a0
378ed8f
05588a0
6b13538
378ed8f
7fe77ba
 
378ed8f
f74e492
378ed8f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
import google.generativeai as genai

# API key input
import os 
token=os.environ.get("TOKEN") 
genai.configure(api_key=token)

safety_settings = [
  {
    "category": "HARM_CATEGORY_HARASSMENT",
    "threshold": "BLOCK_NONE",
  },
  {
    "category": "HARM_CATEGORY_HATE_SPEECH",
    "threshold": "BLOCK_NONE",
  },
  {
    "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
    "threshold": "BLOCK_NONE",
  },
  {
    "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
    "threshold": "BLOCK_NONE",
  },
]

def describe_video(pp,video_file):
    try:        

        print(f"Uploading file...")
        #uploaded_video = genai.upload_file(path=video_file)
        url ="https://youtu.be/zfV-IOn7ouI?si=GWn8WyWLUe178L--"
        uploaded_video = genai.upload_file(path=url)
       
        print(f"Completed upload: {uploaded_video.uri}")

        import time

        while uploaded_video.state.name == "PROCESSING":
            print("Waiting for video to be processed.")
            time.sleep(10)
            uploaded_video = genai.get_file(uploaded_video.name)

        if uploaded_video.state.name == "FAILED":
            raise ValueError(uploaded_video.state.name)
        print(f"Video processing complete: " + uploaded_video.uri)

        prompt = pp

        # Set the model to Gemini 1.5 Pro.
        model = genai.GenerativeModel(model_name="models/gemini-1.5-flash-latest",safety_settings=safety_settings)
        # Make the LLM request.
        print("Making LLM inference request...")
        response = model.generate_content(
            [prompt, uploaded_video], request_options={"timeout": 600}
        )
        print(response.text)

        genai.delete_file(uploaded_video.name)
        print(f"Deleted file {uploaded_video.uri}")

        return response.text
    except Exception as e:
        return f"An error occurred: {e}"


# Create the Gradio interface
iface = gr.Interface(
    fn=describe_video,
    inputs=[gr.Textbox(label="Question:", lines=3) ,gr.Video()],
    outputs=gr.Textbox(),
    title="y Video.",
    description="Pose des questions sur la vidéo et obtient une réponse.",
)

# Launch the interface
iface.launch()