Anton Vice commited on
Commit
cf6e857
1 Parent(s): 4a03a7b

Add application file

Browse files
Files changed (1) hide show
  1. app.py +154 -0
app.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai, os, time, requests, io, config
3
+ import pandas as pd
4
+ import tempfile
5
+ import json
6
+ openai.api_key = config.OPENAI_API_KEY
7
+
8
+ ###Prompt setup
9
+ name = 'Zac'
10
+ email = 'zac@callconnect.ai'
11
+ company = "CallConnect"
12
+ previous_notes = "this is the first call"
13
+
14
+ prompt = f"Who are you: you are a highly-skilled, cold-calling sales professional working with a sales company that helps companies make cold calls with AI, Call Connect.\
15
+ your name is John Candy (only say your first name unless they ask). Who you are speaking to {name}, their email {email}, their company {company}, last call notes {previous_notes}. If the user says a different name ask for the person you're calling\
16
+ your competitors: Currently no real competitors, but if the user brings up Sameday let them know that all same day does is have pre-recorded voice clips and we use AI to generate responses\
17
+ Your goal: You are trying to get a lot of information about the user in about a 5-10min phone call. Do not ask for everything all at once, get to know the customer. you are trying to get the customer to see a demo of the AI cold calling software your company sells the software costs 2,500 a month as well as a 10000 onboarding fee. If they balk at this pricing let them know that the average SDR salary is 50000 a year before the commission, health insurance, and whatever else people get paid for. The software can do the following currently: Make cold calls, Pitch the company or product to the customer, Find the customer's pain, Get the best times for a demo, and Send a summary of the call to the sales rep who will be handling the next part of the sales process, and Have a conversation outside the scope of the product (make small talk, if needed).\
18
+ Your goal is to push the person to a demo but you also need to find their pain points. Some of the common pain points are Cold calling is hard and take a lot of time and training for a rep to become good at it\
19
+ , Sales reps are spending too much time on calling leads, sales reps More than likely won't hit 100 calls a day, If they get a lot of not interested in a row they start to question their job choice churn quickly, Might become flustered and forget the sales playbook and plan of action. Also, you need to ask the customer at the end of the call when are the three best times to book a demo with them later this week, if they cannot do it this week then push for early next week. If you do not know the answer to a question let the caller know that it is a great question but you will have to get back to them on that answer via email. If they start asking a lot of questions you do not know say something along the lines of these are great questions that can all be answered on the demo DO NOT DO: talk about pricing too much. get too into the weeds about features you are not sure about, makeup dates. The current date is March 9th, 2023. DO NOT GET OFF THE PHONE WITHOUT THE PAIN POINTS, HOW THEY CURRENTLY HANDLE COLD CALLING, HOW MANY SDRS THEY CURRENTLY HAVE, THEIR CURRENT EMAIL, AND A DEMO TIME. EVERY CALL MUST END WITH 'GOODBYE' OR 'BYE' AS THE LAST WORD WITH NO PUNCTUATION"
20
+
21
+ messages = [
22
+ {"role": "system", "content": prompt},
23
+ {"role": "system", "content": 'Only respond as John, wait for the user response before going further'},
24
+ ]
25
+
26
+ def respond(message):
27
+ completion = openai.ChatCompletion.create(
28
+ model="gpt-3.5-turbo",
29
+ messages=messages,
30
+ temperature = 0,
31
+ presence_penalty = 2,
32
+ frequency_penalty = 2
33
+ )
34
+ print('Response: ', completion.choices[0].message.content)
35
+ return completion.choices[0].message.content
36
+
37
+ def create_audio(text):
38
+ new_url = f"https://api.elevenlabs.io/v1/text-to-speech/UhmVh8U8kVILeMXoBzCb/stream"
39
+ voice_settings = {
40
+ "stability": 0.4,
41
+ "similarity_boost": 0.8
42
+ }
43
+
44
+ # create the request headers and body
45
+ headers = {
46
+ 'xi-api-key': 'cef25a99905abc1ce8ddfa9c7c2a7e07',
47
+ 'Content-Type': 'application/json'
48
+ }
49
+ body = {
50
+ 'text': text,
51
+ 'voice_settings': voice_settings
52
+ }
53
+ json_body = json.dumps(body)
54
+ # make the POST request to the API
55
+ print("making post request to 11 labs to make an audio")
56
+ posting = requests.post(new_url, headers=headers, data=json_body)
57
+ if posting.status_code == 200:
58
+ print('audio created, saving...')
59
+ # save the audio file to disk
60
+ aid = find_audio(text)
61
+ print(aid, "<---This is audio id after creation")
62
+ with open(f'audios/{aid}.wav', 'wb') as f:
63
+ f.write(posting.content)
64
+ print('audio saved')
65
+ else:
66
+ # print the error message
67
+ print(posting.text)
68
+ print(aid, "<---This is audio id after creation before returning")
69
+ return aid
70
+
71
+ def find_audio(text):
72
+ url = f"https://api.elevenlabs.io/v1/history"
73
+ headers = {
74
+ "accept": "*/*",
75
+ "xi-api-key": 'cef25a99905abc1ce8ddfa9c7c2a7e07',
76
+ "Content-Type": "application/json",
77
+ }
78
+ history = requests.get(url, headers=headers)
79
+ data = json.loads(history.content)
80
+ # Iterate over the history list and print the text field for each item
81
+ for item in data['history']:
82
+ if item["text"] == text:
83
+ result = item["history_item_id"]
84
+ break
85
+ else:
86
+ result = 0
87
+ print(result)
88
+ return result
89
+
90
+ def summarize_call(chat_transcript):
91
+ prompt = f"Analyze the following text and Build a summary in a CRM format with key features: Lead name, figure out his email, pain points, current solutions used, whether the demo was scheduled and what time. And in the end include a short summary of the call form this transcript:\n{chat_transcript}"
92
+ response = openai.Completion.create(
93
+ engine="text-davinci-003",
94
+ prompt=prompt,
95
+ temperature=0,
96
+ max_tokens=1024,
97
+ top_p=1,
98
+ frequency_penalty=0,
99
+ presence_penalty=0
100
+ )
101
+ summary = response.choices[0].text.strip()
102
+ return summary
103
+
104
+ chat_transcript = ""
105
+
106
+ ## MAIN
107
+ def transcribe(audio_file_path):
108
+ global chat_transcript
109
+ with open(audio_file_path, "rb") as audio_file:
110
+ with tempfile.NamedTemporaryFile(mode='w+b', suffix='.wav') as temp_file:
111
+ temp_file.write(audio_file.read())
112
+ temp_file.seek(0)
113
+ transcript = openai.Audio.transcribe("whisper-1", temp_file)
114
+ message = transcript['text']
115
+ print(f"Transcribed message: {message}")
116
+ messages.append({"role": "user", "content": message})
117
+
118
+ response = respond(message)
119
+ messages.append({"role": "assistant", "content": response})
120
+ ### Check for audio
121
+ audio_id = find_audio(response)
122
+
123
+ print('Audio ID:', audio_id)
124
+ if audio_id == 0:
125
+ print("Creating new audio...")
126
+ ### GENERATE NEW AUDIO
127
+ audio_id = create_audio(response)
128
+ to_play = f'audios/{audio_id}.wav'
129
+ #audio_id = find_audio(response)
130
+ #to_play = get_audio(audio_id)
131
+ else:
132
+ to_play = f'audios/{audio_id}.wav'#f'audios/{audio_id}.wav'
133
+ chat_transcript += f'user: {message}\nassistant: {response}\n'
134
+
135
+ # Define the end to the conversation and write the call summary
136
+ if response.lower()[-1] in ['bye.', 'goodbye.', 'bye!', 'goodbye!']:
137
+ summary = summarize_call(chat_transcript)
138
+ # save the chat transcript to a file
139
+ folder_name = "Calls Made Today"
140
+ if not os.path.exists(folder_name):
141
+ os.makedirs(folder_name)
142
+ file_name = os.path.join(folder_name, "call_summary.txt")
143
+ with open(file_name, "w") as f:
144
+ f.write(chat_transcript)
145
+ f.write(f"\n\n And the summary:\n\
146
+ {summary}")
147
+
148
+ return chat_transcript, to_play
149
+
150
+
151
+
152
+
153
+ ui = gr.Interface(fn=transcribe,live = True, inputs=[gr.Audio(source="microphone", type="filepath"),], title='Call Connect Automated Cold Calling AI', description = """This is a demo of what Call Connect's Cold Calling AI can do. Ask it a question and watch it respond""", outputs=["text", 'audio'])
154
+ ui.launch(debug=True, share=True)