File size: 6,767 Bytes
1a9386d
 
728b90c
1a9386d
 
 
 
 
fd10bb7
 
 
 
 
 
1a9386d
 
 
98b31df
1a9386d
 
98b31df
 
1a9386d
 
d54eb71
 
 
 
 
 
 
 
 
 
 
1a9386d
 
 
fd10bb7
1a9386d
fd44629
 
 
 
 
 
 
 
 
 
1a9386d
8761f22
1a9386d
 
 
d7cecbd
1a9386d
 
 
d7cecbd
1a9386d
d7cecbd
 
 
 
 
1a9386d
d7cecbd
1a9386d
e4a7d86
fc09229
e4a7d86
 
 
 
 
 
 
 
fc09229
e4a7d86
 
 
 
 
b866d44
 
 
 
 
 
 
d907b5f
 
 
 
 
 
 
b866d44
d907b5f
 
 
 
 
 
 
 
 
 
b866d44
d907b5f
 
 
 
 
 
 
 
 
 
 
b866d44
d907b5f
 
03a6218
b866d44
 
93853eb
b866d44
 
 
e4a7d86
 
 
 
b866d44
 
 
 
 
 
 
 
 
e4a7d86
b866d44
d907b5f
b866d44
 
 
 
1a9386d
 
 
9c24d62
 
1a9386d
 
 
 
 
 
 
 
 
 
 
03a6218
1a9386d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import streamlit as st
import openai
from openai import OpenAI
import os
import base64
import cv2
from moviepy.editor import VideoFileClip

# documentation
# 1. Cookbook:  https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
# 2. Configure your Project and Orgs to limit/allow Models:  https://platform.openai.com/settings/organization/general
# 3. Watch your Billing!  https://platform.openai.com/settings/organization/billing/overview


# Set API key and organization ID from environment variables
openai.api_key = os.getenv('OPENAI_API_KEY')
openai.organization = os.getenv('OPENAI_ORG_ID')
client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))

# Define the model to be used
#MODEL = "gpt-4o"
MODEL = "gpt-4o-2024-05-13"

def process_text():
    text_input = st.text_input("Enter your text:")
    if text_input:
        completion = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system", "content": "You are a helpful assistant. Help me with my math homework!"},
                {"role": "user", "content": f"Hello! Could you solve {text_input}?"}
            ]
        )
        st.write("Assistant: " + completion.choices[0].message.content)

def process_image(image_input):
    if image_input:
        base64_image = base64.b64encode(image_input.read()).decode("utf-8")
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
                {"role": "user", "content": [
                    {"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."},
                    {"type": "image_url", "image_url": {
                        "url": f"data:image/png;base64,{base64_image}"}
                    }
                ]}
            ],
            temperature=0.0,
        )
        st.markdown(response.choices[0].message.content)

def process_audio(audio_input):
    if audio_input:
        transcription = client.audio.transcriptions.create(
            model="whisper-1",
            file=audio_input,
        )
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
            {"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
            {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}],}
            ],
            temperature=0,
        )
        st.markdown(response.choices[0].message.content)

def process_audio_for_video(video_input):
    if video_input:
        transcription = client.audio.transcriptions.create(
            model="whisper-1",
            file=video_input,
        )
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
            {"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
            {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription}"}],}
            ],
            temperature=0,
        )
        st.markdown(response.choices[0].message.content)
        return response.choices[0].message.content

def save_video(video_file):
    # Save the uploaded video file
    with open(video_file.name, "wb") as f:
        f.write(video_file.getbuffer())
    return video_file.name

def process_video(video_path, seconds_per_frame=2):
    base64Frames = []
    base_video_path, _ = os.path.splitext(video_path)
    video = cv2.VideoCapture(video_path)
    total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = video.get(cv2.CAP_PROP_FPS)
    frames_to_skip = int(fps * seconds_per_frame)
    curr_frame = 0

    # Loop through the video and extract frames at specified sampling rate
    while curr_frame < total_frames - 1:
        video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
        success, frame = video.read()
        if not success:
            break
        _, buffer = cv2.imencode(".jpg", frame)
        base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
        curr_frame += frames_to_skip

    video.release()

    # Extract audio from video
    audio_path = f"{base_video_path}.mp3"
    clip = VideoFileClip(video_path)
    clip.audio.write_audiofile(audio_path, bitrate="32k")
    clip.audio.close()
    clip.close()

    print(f"Extracted {len(base64Frames)} frames")
    print(f"Extracted audio to {audio_path}")

    return base64Frames, audio_path

def process_audio_and_video(video_input):
    if video_input is not None:
        # Save the uploaded video file
        video_path = save_video(video_input )
    
        # Process the saved video
        base64Frames, audio_path = process_video(video_path, seconds_per_frame=1)

        # Get the transcript for the video model call
        transcript = process_audio_for_video(video_input)
        
        # Generate a summary with visual and audio
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system", "content": """You are generating a video summary. Create a summary of the provided video and its transcript. Respond in Markdown"""},
                {"role": "user", "content": [
                    "These are the frames from the video.",
                    *map(lambda x: {"type": "image_url",
                                    "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames),
                    {"type": "text", "text": f"The audio transcription is: {transcript}"}
                ]},
            ],
            temperature=0,
        )
    
        st.markdown(response.choices[0].message.content)


def main():
    st.markdown("### OpenAI GPT-4o Model")
    st.markdown("#### The Omni Model with Text, Audio, Image, and Video")
    option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
    if option == "Text":
        process_text()
    elif option == "Image":
        image_input = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
        process_image(image_input)
    elif option == "Audio":
        audio_input = st.file_uploader("Upload an audio file", type=["mp3", "wav"])
        process_audio(audio_input)
    elif option == "Video":
        video_input = st.file_uploader("Upload a video file", type=["mp4"])
        process_audio_and_video(video_input)

if __name__ == "__main__":
    main()