Ridealist commited on
Commit
f1783c6
โ€ข
1 Parent(s): d5e36c6

feat: make debate bot ui and logic with simple asking bot

Browse files
Files changed (1) hide show
  1. vocal_app.py +57 -18
vocal_app.py CHANGED
@@ -42,13 +42,13 @@ if "total_debate_history" not in st.session_state:
42
  st.session_state.total_debate_history = ""
43
 
44
  if "user_debate_history" not in st.session_state:
45
- st.session_state.user_debate_history = ""
46
 
47
  if "bot_debate_history" not in st.session_state:
48
- st.session_state.bot_debate_history = ""
49
 
50
  if "user_debate_time" not in st.session_state:
51
- st.session_state.user_debate_time = ""
52
 
53
  if "pros_and_cons" not in st.session_state:
54
  st.session_state.pros_and_cons = ""
@@ -269,6 +269,16 @@ config = dotenv_values(".env")
269
  openai.organization = config.get("OPENAI_ORGANIZATION")
270
  openai.api_key = config.get("OPENAI_API_KEY")
271
 
 
 
 
 
 
 
 
 
 
 
272
  # generate response
273
  def generate_response(prompt):
274
  st.session_state['messages'].append({"role": "user", "content": prompt})
@@ -280,7 +290,7 @@ def generate_response(prompt):
280
  response = completion.choices[0].message.content
281
  st.session_state['messages'].append({"role": "assistant", "content": response})
282
 
283
- print(st.session_state['messages'])
284
  # total_tokens = completion.usage.total_tokens
285
  # prompt_tokens = completion.usage.prompt_tokens
286
  # completion_tokens = completion.usage.completion_tokens
@@ -288,31 +298,55 @@ def generate_response(prompt):
288
  return response #, total_tokens, prompt_tokens, completion_tokens
289
 
290
  #TODO ์›…๊ธฐํ˜•์ด ์ถ”๊ฐ€ํ•ด๋†“์€ history ์„ธ์…˜ 3๊ฐ€์ง€ ์ถ”๊ฐ€ํ•ด๋†“๊ธฐ
291
- #TODO ์ „์ฒด ์œ ์ €๊ฐ€ ๋ฐœํ™”ํ•œ ์‹œ๊ฐ„ ๊ธฐ๋กํ•˜๊ธฐ ->
292
 
293
  def page4():
294
 
295
  with st.sidebar:
296
  st.sidebar.title('Ask to GPT')
297
- st.sidebar.text_area(
298
  label="Input text here",
299
  placeholder="Input text here",
300
  height=100)
301
- st.sidebar.button("Ask")
 
 
 
 
 
 
 
 
 
 
302
 
303
  debate_preset = "\n".join([
304
  "Debate Rules: ",
305
  "1) This debate will be divided into two teams, pro and con, with two debates on each team.",
306
  "2) The order of speaking is: first debater for the pro side, first debater for the con side, second debater for the pro side, second debater for the con side.",
307
- "3) Answer logically with an introduction, body, and conclusion.\n", #add this one.
308
- "4) If User take pro side, you take con side and vice versa.\n"
309
- "5) You should comprehend user's chat and figure out whether the user take pro or con side.\n"
310
- "6) Debate subject: " + st.session_state['topic']
311
  ])
 
312
  st.session_state['messages'] = [
313
  {"role": "system", "content": debate_preset}
314
  ]
315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  # container for chat history
317
  response_container = st.container()
318
  # container for text box
@@ -325,6 +359,14 @@ def page4():
325
 
326
  if submit_buttom and user_input:
327
  output = generate_response(user_input)
 
 
 
 
 
 
 
 
328
  st.session_state['past'].append(user_input)
329
  st.session_state['generated'].append(output)
330
  # st.session_state['model_name'].append(model_name)
@@ -341,13 +383,15 @@ def page4():
341
 
342
  if st.session_state['generated']:
343
  with response_container:
344
- for i in range(len(st.session_state['generated'])):
 
345
  message(st.session_state["past"][i], is_user=True, key=str(i) + '_user')
346
- message(st.session_state["generated"][i], key=str(i))
347
  # st.write(
348
  # f"Model used: {st.session_state['model_name'][i]}; Number of tokens: {st.session_state['total_tokens'][i]}; Cost: ${st.session_state['cost'][i]:.5f}"
349
  # )
350
 
 
351
  print(st.session_state)
352
 
353
  #########################################################
@@ -416,11 +460,6 @@ def page6():
416
  disfluency_word_list = ['eh', 'umm', 'ah', 'uh', 'er', 'erm', 'err']
417
  # Count the disfluency words
418
  disfluency_counts = {word: total_word_count[word] for word in disfluency_word_list}
419
-
420
-
421
-
422
-
423
-
424
 
425
  pass
426
 
 
42
  st.session_state.total_debate_history = ""
43
 
44
  if "user_debate_history" not in st.session_state:
45
+ st.session_state.user_debate_history = []
46
 
47
  if "bot_debate_history" not in st.session_state:
48
+ st.session_state.bot_debate_history = []
49
 
50
  if "user_debate_time" not in st.session_state:
51
+ st.session_state.user_debate_time = []
52
 
53
  if "pros_and_cons" not in st.session_state:
54
  st.session_state.pros_and_cons = ""
 
269
  openai.organization = config.get("OPENAI_ORGANIZATION")
270
  openai.api_key = config.get("OPENAI_API_KEY")
271
 
272
+ def ask_gpt(promt):
273
+ completion = openai.ChatCompletion.create(
274
+ model = "gpt-3.5-turbo",
275
+ messages = {
276
+ "role": "assistant",
277
+ "content": promt
278
+ }
279
+ )
280
+ return completion.choices[0].message.content
281
+
282
  # generate response
283
  def generate_response(prompt):
284
  st.session_state['messages'].append({"role": "user", "content": prompt})
 
290
  response = completion.choices[0].message.content
291
  st.session_state['messages'].append({"role": "assistant", "content": response})
292
 
293
+ # print(st.session_state['messages'])
294
  # total_tokens = completion.usage.total_tokens
295
  # prompt_tokens = completion.usage.prompt_tokens
296
  # completion_tokens = completion.usage.completion_tokens
 
298
  return response #, total_tokens, prompt_tokens, completion_tokens
299
 
300
  #TODO ์›…๊ธฐํ˜•์ด ์ถ”๊ฐ€ํ•ด๋†“์€ history ์„ธ์…˜ 3๊ฐ€์ง€ ์ถ”๊ฐ€ํ•ด๋†“๊ธฐ
301
+ #TODO ์ „์ฒด ์œ ์ €๊ฐ€ ๋ฐœํ™”ํ•œ ์‹œ๊ฐ„ ๊ธฐ๋กํ•˜๊ธฐ -> ์„ธ์…˜์— ์ €์žฅ
302
 
303
  def page4():
304
 
305
  with st.sidebar:
306
  st.sidebar.title('Ask to GPT')
307
+ user_input = st.sidebar.text_area(
308
  label="Input text here",
309
  placeholder="Input text here",
310
  height=100)
311
+ output = st.sidebar.button("Ask")
312
+ if output:
313
+ result = gpt_call(user_input)
314
+ else:
315
+ result = ""
316
+
317
+ st.sidebar.text_area(
318
+ label="Answer in here",
319
+ placeholder="(Answer)",
320
+ value=result,
321
+ height=150)
322
 
323
  debate_preset = "\n".join([
324
  "Debate Rules: ",
325
  "1) This debate will be divided into two teams, pro and con, with two debates on each team.",
326
  "2) The order of speaking is: first debater for the pro side, first debater for the con side, second debater for the pro side, second debater for the con side.",
327
+ "3) Answer logically with an introduction, body, and conclusion.", #add this one.
328
+ "4) Your role : " + st.session_state["pros_and_cons"] + "side debator"
329
+ "5) Debate subject: " + st.session_state['topic']
 
330
  ])
331
+ first_prompt = "Now we're going to start. Summarize the subject and your role. And ask user ready to begin."
332
  st.session_state['messages'] = [
333
  {"role": "system", "content": debate_preset}
334
  ]
335
 
336
+ completion = openai.ChatCompletion.create(
337
+ model = "gpt-3.5-turbo",
338
+ messages = [
339
+ {
340
+ "role": "system",
341
+ "content": debate_preset + "\n" + first_prompt
342
+ }
343
+ ]
344
+ )
345
+
346
+ response = completion.choices[0].message.content
347
+ st.session_state['messages'].append({"role": "assistant", "content": response})
348
+ st.session_state['generated'].append(response)
349
+
350
  # container for chat history
351
  response_container = st.container()
352
  # container for text box
 
359
 
360
  if submit_buttom and user_input:
361
  output = generate_response(user_input)
362
+ st.session_state['user_debate_history'].append(user_input)
363
+ st.session_state['bot_debate_history'].append(output)
364
+ st.session_state['total_debate_history'].append(
365
+ {
366
+ "user" + str(len(st.session_state['user_debate_history'])): user_input,
367
+ "bot" + str(len(st.session_state['bot_debate_history'])): output,
368
+ }
369
+ )
370
  st.session_state['past'].append(user_input)
371
  st.session_state['generated'].append(output)
372
  # st.session_state['model_name'].append(model_name)
 
383
 
384
  if st.session_state['generated']:
385
  with response_container:
386
+ message(st.session_state["generated"][0], key=str(0))
387
+ for i in range(len(st.session_state['past'])):
388
  message(st.session_state["past"][i], is_user=True, key=str(i) + '_user')
389
+ message(st.session_state["generated"][i + 1], key=str(i + 1))
390
  # st.write(
391
  # f"Model used: {st.session_state['model_name'][i]}; Number of tokens: {st.session_state['total_tokens'][i]}; Cost: ${st.session_state['cost'][i]:.5f}"
392
  # )
393
 
394
+
395
  print(st.session_state)
396
 
397
  #########################################################
 
460
  disfluency_word_list = ['eh', 'umm', 'ah', 'uh', 'er', 'erm', 'err']
461
  # Count the disfluency words
462
  disfluency_counts = {word: total_word_count[word] for word in disfluency_word_list}
 
 
 
 
 
463
 
464
  pass
465