salmanmapkar commited on
Commit
ae2a5ee
1 Parent(s): d97c012

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -24
app.py CHANGED
@@ -6,11 +6,8 @@ import random
6
  from transformers import pipeline
7
  import torch
8
 
9
- from webdriver_manager.chrome import ChromeDriverManager
10
- # import chromedriver_autoinstaller
11
- # chromedriver_autoinstaller.install()
12
-
13
- session_token = os.environ.get('SessionToken')
14
 
15
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
16
 
@@ -25,36 +22,29 @@ all_special_ids = whisper_model.tokenizer.all_special_ids
25
  transcribe_token_id = all_special_ids[-5]
26
  translate_token_id = all_special_ids[-6]
27
 
28
- def get_api():
29
- api = ChatGPT(session_token)
30
- return api
31
-
32
  def translate_or_transcribe(audio, task):
33
  whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="Transcribe in Spoken Language" else translate_token_id]]
34
  text = whisper_model(audio)["text"]
35
  return text
36
 
37
- def get_response_from_chatbot(api,text):
38
- if api is None:
39
- return "Sorry, the chatGPT API has some issues. Please try again later"
40
  try:
41
- resp = api.send_message(text)
42
- api.refresh_auth()
43
- # api.reset_conversation()
44
- response = resp['message']
45
  except:
46
- response = "Sorry, the chatGPT queue is full. Please try again later"
47
  return response
48
 
49
- def chat(api,message, chat_history):
50
  out_chat = []
51
  if chat_history != '':
52
  out_chat = json.loads(chat_history)
53
- response = get_response_from_chatbot(api,message)
54
  out_chat.append((message, response))
55
  chat_history = json.dumps(out_chat)
56
  logger.info(f"out_chat_: {len(out_chat)}")
57
- return api,out_chat, chat_history
58
 
59
  start_work = """async() => {
60
  function isMobile() {
@@ -99,7 +89,6 @@ start_work = """async() => {
99
 
100
  page1.style.display = "none";
101
  page2.style.display = "block";
102
-
103
  window['div_count'] = 0;
104
  window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0];
105
  window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0];
@@ -183,10 +172,10 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
183
  outputs=prompt_input
184
  )
185
 
186
- api = gr.State(value=get_api())
187
  submit_btn.click(fn=chat,
188
- inputs=[api,prompt_input, chat_history],
189
- outputs=[api,chatbot, chat_history],
190
  )
191
  gr.HTML('''
192
  <p>Note: Please be aware that audio records from iOS devices will not be decoded as expected by Gradio. For the best experience, record your voice from a computer instead of your smartphone ;)</p>
@@ -196,5 +185,6 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
196
  </p>
197
  </div>
198
  ''')
 
199
 
200
  demo.launch(debug = True)
 
6
  from transformers import pipeline
7
  import torch
8
 
9
+ session_token = os.environ.get('SessionToken')
10
+ api = ChatGPT(session_token)
 
 
 
11
 
12
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
13
 
 
22
  transcribe_token_id = all_special_ids[-5]
23
  translate_token_id = all_special_ids[-6]
24
 
 
 
 
 
25
  def translate_or_transcribe(audio, task):
26
  whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="Transcribe in Spoken Language" else translate_token_id]]
27
  text = whisper_model(audio)["text"]
28
  return text
29
 
30
+ def get_response_from_chatbot(text):
 
 
31
  try:
32
+ resp = api.send_message(text)
33
+ response = resp['message']
34
+ # logger.info(f"response_: {response}")
 
35
  except:
36
+ response = "Sorry, the chatGPT queue is full. Please try again in some time"
37
  return response
38
 
39
+ def chat(message, chat_history):
40
  out_chat = []
41
  if chat_history != '':
42
  out_chat = json.loads(chat_history)
43
+ response = get_response_from_chatbot(message)
44
  out_chat.append((message, response))
45
  chat_history = json.dumps(out_chat)
46
  logger.info(f"out_chat_: {len(out_chat)}")
47
+ return out_chat, chat_history
48
 
49
  start_work = """async() => {
50
  function isMobile() {
 
89
 
90
  page1.style.display = "none";
91
  page2.style.display = "block";
 
92
  window['div_count'] = 0;
93
  window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0];
94
  window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0];
 
172
  outputs=prompt_input
173
  )
174
 
175
+
176
  submit_btn.click(fn=chat,
177
+ inputs=[prompt_input, chat_history],
178
+ outputs=[chatbot, chat_history],
179
  )
180
  gr.HTML('''
181
  <p>Note: Please be aware that audio records from iOS devices will not be decoded as expected by Gradio. For the best experience, record your voice from a computer instead of your smartphone ;)</p>
 
185
  </p>
186
  </div>
187
  ''')
188
+ gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=RamAnanth1.chatGPT_voice)")
189
 
190
  demo.launch(debug = True)