shreyasiv commited on
Commit
d2e32df
1 Parent(s): 658b2e1

Upload 2 files

Browse files
Files changed (2) hide show
  1. app2.py +122 -0
  2. requirements.txt +0 -0
app2.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import streamlit as st
3
+ import tempfile
4
+ import base64
5
+ import os
6
+ from dotenv import load_dotenv
7
+ from openai import OpenAI
8
+ import assemblyai as aai
9
+ from moviepy.editor import *
10
+
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+ aai.settings.api_key = os.getenv("ASSEMBLYAI_API_KEY")
15
+ OpenAI.api_key = os.getenv("OPENAI_API_KEY")
16
+ client = OpenAI()
17
+
18
+ def main():
19
+ st.title('Insightly Video Content Moderation')
20
+
21
+ # Video upload section
22
+ uploaded_video = st.file_uploader('Upload a video', type=["mp4", "avi", "mov"])
23
+
24
+ if uploaded_video is not None:
25
+ # Save the video to a temp file
26
+ tfile = tempfile.NamedTemporaryFile(delete=False)
27
+ tfile.write(uploaded_video.read())
28
+ video_file_path = tfile.name
29
+ tfile.close()
30
+
31
+ transcriber = aai.Transcriber()
32
+ transcript = transcriber.transcribe(tfile.name)
33
+
34
+ # Process the video and display frames in a grid layout
35
+ base64_frames = video_to_base64_frames(video_file_path)
36
+ display_frame_grid(base64_frames[::30]) # Display every 30th frame in a 3-column grid
37
+
38
+ st.write("Actions:") # Header for the actions/buttons section
39
+
40
+
41
+ spacer_col1, col1, spacer_col2, col2, spacer_col3 = st.columns([1, 2, 1, 2, 1])
42
+
43
+ with col1:
44
+ if st.button("Description"):
45
+ st.session_state['description'] = generate_description(base64_frames) if 'description' not in st.session_state else st.session_state['description']
46
+
47
+ with col2:
48
+ if st.button("Generate Transcript"):
49
+ transcript = transcriber.transcribe(video_file_path)
50
+ st.session_state['transcript'] = transcript.text if 'transcript' not in st.session_state else st.session_state['transcript']
51
+
52
+
53
+
54
+ # If any value exists in session state then display it
55
+ if 'description' in st.session_state and st.session_state['description']:
56
+ st.subheader("Video Description")
57
+ st.write(st.session_state['description'])
58
+
59
+
60
+ if 'transcript' in st.session_state and st.session_state['transcript']:
61
+ st.subheader("Video Transcript")
62
+ st.write(st.session_state['transcript'])
63
+
64
+
65
+
66
+
67
+
68
+
69
+
70
+ def video_to_base64_frames(video_file_path):
71
+ # Logic to extract all frames from the video and convert them to base64
72
+ video = cv2.VideoCapture(video_file_path)
73
+ base64_frames = []
74
+
75
+ while video.isOpened():
76
+ success, frame = video.read()
77
+ if not success:
78
+ break
79
+
80
+ _, buffer = cv2.imencode('.jpg', frame)
81
+ base64_frame = base64.b64encode(buffer).decode('utf-8')
82
+ base64_frames.append(base64_frame)
83
+
84
+ video.release()
85
+ return base64_frames
86
+
87
+ #########################################
88
+ #Generate Video description
89
+ def generate_description(base64_frames):
90
+ prompt_messages = [
91
+ {
92
+ "role": "user",
93
+ "content": [
94
+ "1. Generate a description for this sequence of video frames in about 100 words.\
95
+ Return the following : 2. Frame by frame summary of what's happening in the video. 3. List of objects in the video. 4. Any restrictive content or sensitive content and if so which frame 5. What category can this video be tagged to?",
96
+ *map(lambda x: {"image": x, "resize": 428}, base64_frames[0::30]),
97
+ ],
98
+ },
99
+ ]
100
+ response = client.chat.completions.create(
101
+ model="gpt-4-vision-preview",
102
+ messages=prompt_messages,
103
+ max_tokens=3000,
104
+ )
105
+ return response.choices[0].message.content
106
+
107
+
108
+ ########################
109
+ def display_frame_grid(base64_frames):
110
+ cols_per_row = 3
111
+ n_frames = len(base64_frames)
112
+ for idx in range(0, n_frames, cols_per_row):
113
+ cols = st.columns(cols_per_row)
114
+ for col_index in range(cols_per_row):
115
+ frame_idx = idx + col_index
116
+ if frame_idx < n_frames:
117
+ with cols[col_index]:
118
+ frame = base64_frames[frame_idx]
119
+ st.image(base64.b64decode(frame), caption=f'Frame {frame_idx * 30 + 1}', width=200)
120
+
121
+ if __name__ == '__main__':
122
+ main()
requirements.txt ADDED
Binary file (2.7 kB). View file