Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app2.py +87 -0
- requirements.txt +6 -0
app2.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
from deepgram import (
|
5 |
+
DeepgramClient,
|
6 |
+
PrerecordedOptions,
|
7 |
+
FileSource,
|
8 |
+
)
|
9 |
+
import streamlit as st
|
10 |
+
# Path to the audio file
|
11 |
+
#AUDIO_FILE = "temp.wav"
|
12 |
+
res = ''
|
13 |
+
|
14 |
+
def extract_transcript(json_data):
|
15 |
+
try:
|
16 |
+
data = json.loads(json_data)
|
17 |
+
transcript = data['results']['channels'][0]['alternatives'][0]['transcript']
|
18 |
+
return transcript
|
19 |
+
except (KeyError, json.JSONDecodeError):
|
20 |
+
|
21 |
+
print("Error: Transcript not found or invalid JSON format.")
|
22 |
+
return None
|
23 |
+
|
24 |
+
def analyze(AUDIO_FILE):
|
25 |
+
try:
|
26 |
+
# STEP 1 Create a Deepgram client using the API key
|
27 |
+
deepgram = DeepgramClient('DAPI')
|
28 |
+
|
29 |
+
with open(AUDIO_FILE, "rb") as file:
|
30 |
+
buffer_data = file.read()
|
31 |
+
|
32 |
+
payload: FileSource = {
|
33 |
+
"buffer": buffer_data,
|
34 |
+
}
|
35 |
+
|
36 |
+
#STEP 2: Configure Deepgram options for audio analysis
|
37 |
+
options = PrerecordedOptions(
|
38 |
+
model="nova-2",
|
39 |
+
smart_format=True,
|
40 |
+
)
|
41 |
+
print('before deepgram')
|
42 |
+
# STEP 3: Call the transcribe_file method with the text payload and options
|
43 |
+
response = deepgram.listen.prerecorded.v("1").transcribe_file(payload, options)
|
44 |
+
|
45 |
+
# STEP 4: Print the response
|
46 |
+
res = (extract_transcript(response.to_json(indent=4)))
|
47 |
+
print('after deepgram')
|
48 |
+
|
49 |
+
except Exception as e:
|
50 |
+
print(f"Exception: {e}")
|
51 |
+
|
52 |
+
openai.api_key= 'OAPI'
|
53 |
+
|
54 |
+
# Define OpenAI API endpoint
|
55 |
+
OPENAI_API_URL = 'https://api.openai.com/v1/engines/gpt-3.5-turbo/completions'
|
56 |
+
print('before opapi')
|
57 |
+
completion = openai.ChatCompletion.create(
|
58 |
+
model="gpt-3.5-turbo",
|
59 |
+
messages=[
|
60 |
+
{"role": "user", "content": "So, in input of Web interface Hiring manager attaches audio with any human conversation. In output Hiring manager should get sentiment or psychological insights derived from the conversation, some insights about speakers. Please don’t provide summary of conversation, key words, etc. Output should be related to sentimental analysis.",
|
61 |
+
"role": "user", "content": res}
|
62 |
+
]
|
63 |
+
)
|
64 |
+
|
65 |
+
# Extract the message content from the completion response
|
66 |
+
message_content = completion['choices'][0]['message']['content']
|
67 |
+
print('after opapi')
|
68 |
+
# Remove newlines and concatenate into a single line
|
69 |
+
message_content_single_line = ' '.join(message_content.splitlines())
|
70 |
+
return message_content_single_line
|
71 |
+
|
72 |
+
|
73 |
+
def main():
|
74 |
+
st.title("Audio Conversation Analysis")
|
75 |
+
st.write("Upload an audio file containing a conversation, and get insights generated by OpenAI's GPT-3.5 model.")
|
76 |
+
|
77 |
+
# Upload audio file
|
78 |
+
uploaded_file = st.file_uploader("Upload an audio file", type=["wav"])
|
79 |
+
|
80 |
+
if uploaded_file is not None:
|
81 |
+
st.write("File uploaded successfully!")
|
82 |
+
transcript = analyze(uploaded_file)
|
83 |
+
st.write("Generated Text:")
|
84 |
+
st.write(transcript)
|
85 |
+
|
86 |
+
if __name__ == "__main__":
|
87 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai==0.28
|
2 |
+
requests
|
3 |
+
deepgram-sdk==3.2.3
|
4 |
+
streamlit
|
5 |
+
requests
|
6 |
+
ffmpeg-python
|