Update app.py
Browse files
app.py
CHANGED
@@ -1584,20 +1584,23 @@ def process_audio(audio_input, text_input):
|
|
1584 |
|
1585 |
def process_audio_for_video(video_input):
|
1586 |
if video_input:
|
1587 |
-
|
1588 |
-
|
1589 |
-
|
1590 |
-
|
1591 |
-
|
1592 |
-
|
1593 |
-
|
1594 |
-
|
1595 |
-
|
1596 |
-
|
1597 |
-
|
1598 |
-
|
1599 |
-
|
1600 |
-
|
|
|
|
|
|
|
1601 |
|
1602 |
def save_video(video_file):
|
1603 |
# Save the uploaded video file
|
@@ -1628,14 +1631,16 @@ def process_video(video_path, seconds_per_frame=2):
|
|
1628 |
|
1629 |
# Extract audio from video
|
1630 |
audio_path = f"{base_video_path}.mp3"
|
1631 |
-
clip = VideoFileClip(video_path)
|
1632 |
try:
|
|
|
|
|
1633 |
clip.audio.write_audiofile(audio_path, bitrate="32k")
|
1634 |
clip.audio.close()
|
|
|
|
|
1635 |
except:
|
1636 |
st.write('No audio track found, moving on..')
|
1637 |
|
1638 |
-
clip.close()
|
1639 |
|
1640 |
print(f"Extracted {len(base64Frames)} frames")
|
1641 |
print(f"Extracted audio to {audio_path}")
|
@@ -1670,8 +1675,9 @@ def process_audio_and_video(video_input):
|
|
1670 |
results = response.choices[0].message.content
|
1671 |
st.markdown(results)
|
1672 |
|
1673 |
-
|
1674 |
-
|
|
|
1675 |
|
1676 |
|
1677 |
|
|
|
1584 |
|
1585 |
def process_audio_for_video(video_input):
|
1586 |
if video_input:
|
1587 |
+
try:
|
1588 |
+
transcription = client.audio.transcriptions.create(
|
1589 |
+
model="whisper-1",
|
1590 |
+
file=video_input,
|
1591 |
+
)
|
1592 |
+
response = client.chat.completions.create(
|
1593 |
+
model=MODEL,
|
1594 |
+
messages=[
|
1595 |
+
{"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
|
1596 |
+
{"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription}"}],}
|
1597 |
+
],
|
1598 |
+
temperature=0,
|
1599 |
+
)
|
1600 |
+
st.markdown(response.choices[0].message.content)
|
1601 |
+
return response.choices[0].message.content
|
1602 |
+
except:
|
1603 |
+
st.write('No transcript')
|
1604 |
|
1605 |
def save_video(video_file):
|
1606 |
# Save the uploaded video file
|
|
|
1631 |
|
1632 |
# Extract audio from video
|
1633 |
audio_path = f"{base_video_path}.mp3"
|
|
|
1634 |
try:
|
1635 |
+
clip = VideoFileClip(video_path)
|
1636 |
+
|
1637 |
clip.audio.write_audiofile(audio_path, bitrate="32k")
|
1638 |
clip.audio.close()
|
1639 |
+
|
1640 |
+
clip.close()
|
1641 |
except:
|
1642 |
st.write('No audio track found, moving on..')
|
1643 |
|
|
|
1644 |
|
1645 |
print(f"Extracted {len(base64Frames)} frames")
|
1646 |
print(f"Extracted audio to {audio_path}")
|
|
|
1675 |
results = response.choices[0].message.content
|
1676 |
st.markdown(results)
|
1677 |
|
1678 |
+
if transcript:
|
1679 |
+
filename = generate_filename(transcript, "md")
|
1680 |
+
create_file(filename, transcript, results, should_save)
|
1681 |
|
1682 |
|
1683 |
|