awacke1 commited on
Commit
65224df
1 Parent(s): 0b0a63a

Create backup-app3-0521-app.py

Browse files
Files changed (1) hide show
  1. backup-app3-0521-app.py +172 -0
backup-app3-0521-app.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import openai
3
+ from openai import OpenAI
4
+ import os
5
+ import base64
6
+ import cv2
7
+ from moviepy.editor import VideoFileClip
8
+
9
+ # documentation
10
+ # 1. Cookbook: https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
11
+ # 2. Configure your Project and Orgs to limit/allow Models: https://platform.openai.com/settings/organization/general
12
+ # 3. Watch your Billing! https://platform.openai.com/settings/organization/billing/overview
13
+
14
+
15
+ # Set API key and organization ID from environment variables
16
+ openai.api_key = os.getenv('OPENAI_API_KEY')
17
+ openai.organization = os.getenv('OPENAI_ORG_ID')
18
+ client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
19
+
20
+ # Define the model to be used
21
+ #MODEL = "gpt-4o"
22
+ MODEL = "gpt-4o-2024-05-13"
23
+
24
+ def process_text():
25
+ text_input = st.text_input("Enter your text:")
26
+ if text_input:
27
+ completion = client.chat.completions.create(
28
+ model=MODEL,
29
+ messages=[
30
+ {"role": "system", "content": "You are a helpful assistant. Help me with my math homework!"},
31
+ {"role": "user", "content": f"Hello! Could you solve {text_input}?"}
32
+ ]
33
+ )
34
+ st.write("Assistant: " + completion.choices[0].message.content)
35
+
36
+ def process_image(image_input):
37
+ if image_input:
38
+ base64_image = base64.b64encode(image_input.read()).decode("utf-8")
39
+ response = client.chat.completions.create(
40
+ model=MODEL,
41
+ messages=[
42
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
43
+ {"role": "user", "content": [
44
+ {"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."},
45
+ {"type": "image_url", "image_url": {
46
+ "url": f"data:image/png;base64,{base64_image}"}
47
+ }
48
+ ]}
49
+ ],
50
+ temperature=0.0,
51
+ )
52
+ st.markdown(response.choices[0].message.content)
53
+
54
+ def process_audio(audio_input):
55
+ if audio_input:
56
+ transcription = client.audio.transcriptions.create(
57
+ model="whisper-1",
58
+ file=audio_input,
59
+ )
60
+ response = client.chat.completions.create(
61
+ model=MODEL,
62
+ messages=[
63
+ {"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
64
+ {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}],}
65
+ ],
66
+ temperature=0,
67
+ )
68
+ st.markdown(response.choices[0].message.content)
69
+
70
+ def process_audio_for_video(video_input):
71
+ if video_input:
72
+ transcription = client.audio.transcriptions.create(
73
+ model="whisper-1",
74
+ file=video_input,
75
+ )
76
+ response = client.chat.completions.create(
77
+ model=MODEL,
78
+ messages=[
79
+ {"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
80
+ {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription}"}],}
81
+ ],
82
+ temperature=0,
83
+ )
84
+ st.markdown(response.choices[0].message.content)
85
+ return response.choices[0].message.content
86
+
87
+ def save_video(video_file):
88
+ # Save the uploaded video file
89
+ with open(video_file.name, "wb") as f:
90
+ f.write(video_file.getbuffer())
91
+ return video_file.name
92
+
93
+ def process_video(video_path, seconds_per_frame=2):
94
+ base64Frames = []
95
+ base_video_path, _ = os.path.splitext(video_path)
96
+ video = cv2.VideoCapture(video_path)
97
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
98
+ fps = video.get(cv2.CAP_PROP_FPS)
99
+ frames_to_skip = int(fps * seconds_per_frame)
100
+ curr_frame = 0
101
+
102
+ # Loop through the video and extract frames at specified sampling rate
103
+ while curr_frame < total_frames - 1:
104
+ video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
105
+ success, frame = video.read()
106
+ if not success:
107
+ break
108
+ _, buffer = cv2.imencode(".jpg", frame)
109
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
110
+ curr_frame += frames_to_skip
111
+
112
+ video.release()
113
+
114
+ # Extract audio from video
115
+ audio_path = f"{base_video_path}.mp3"
116
+ clip = VideoFileClip(video_path)
117
+ clip.audio.write_audiofile(audio_path, bitrate="32k")
118
+ clip.audio.close()
119
+ clip.close()
120
+
121
+ print(f"Extracted {len(base64Frames)} frames")
122
+ print(f"Extracted audio to {audio_path}")
123
+
124
+ return base64Frames, audio_path
125
+
126
+ def process_audio_and_video(video_input):
127
+ if video_input is not None:
128
+ # Save the uploaded video file
129
+ video_path = save_video(video_input )
130
+
131
+ # Process the saved video
132
+ base64Frames, audio_path = process_video(video_path, seconds_per_frame=1)
133
+
134
+ # Get the transcript for the video model call
135
+ transcript = process_audio_for_video(video_input)
136
+
137
+ # Generate a summary with visual and audio
138
+ response = client.chat.completions.create(
139
+ model=MODEL,
140
+ messages=[
141
+ {"role": "system", "content": """You are generating a video summary. Create a summary of the provided video and its transcript. Respond in Markdown"""},
142
+ {"role": "user", "content": [
143
+ "These are the frames from the video.",
144
+ *map(lambda x: {"type": "image_url",
145
+ "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames),
146
+ {"type": "text", "text": f"The audio transcription is: {transcript}"}
147
+ ]},
148
+ ],
149
+ temperature=0,
150
+ )
151
+
152
+ st.markdown(response.choices[0].message.content)
153
+
154
+
155
+ def main():
156
+ st.markdown("### OpenAI GPT-4o Model")
157
+ st.markdown("#### The Omni Model with Text, Audio, Image, and Video")
158
+ option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
159
+ if option == "Text":
160
+ process_text()
161
+ elif option == "Image":
162
+ image_input = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
163
+ process_image(image_input)
164
+ elif option == "Audio":
165
+ audio_input = st.file_uploader("Upload an audio file", type=["mp3", "wav"])
166
+ process_audio(audio_input)
167
+ elif option == "Video":
168
+ video_input = st.file_uploader("Upload a video file", type=["mp4"])
169
+ process_audio_and_video(video_input)
170
+
171
+ if __name__ == "__main__":
172
+ main()