liuyizhang commited on
Commit
54ed26e
β€’
1 Parent(s): 459631c

update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -71
app.py CHANGED
@@ -366,77 +366,7 @@ def set_openai_api_key(api_key):
366
 
367
  def get_response_from_openai(input, prompt_system, chat_history, model_radio, temperature=0.0):
368
  error_1 = 'You exceeded your current quota, please check your plan and billing details.'
369
- def openai_create_old(input_list, prompt_system, model_radio):
370
- try:
371
- # print(f'input_list={input_list}')
372
- input_list_len = len(input_list)
373
- out_prompt = ''
374
- messages = []
375
- prompt_system_token = prompt_system
376
- if model_radio == 'GPT-3.0':
377
- out_prompt = 'AI:'
378
- if prompt_system != '':
379
- prompt_system_token = f'Human:{prompt_system}'
380
- for i in range(input_list_len):
381
- input = input_list[input_list_len-i-1].replace("<br>", '\n\n')
382
- if input.startswith("Openai said:"):
383
- input = "☝:"
384
-
385
- if input.startswith("☝:"):
386
- if model_radio == 'GPT-3.0':
387
- out_prompt = input.replace("☝:", "AI:") + '\n' + out_prompt
388
- else:
389
- out_prompt = input.replace("☝:", "") + out_prompt
390
- messages.insert(0, {"role": "assistant", "content": input.replace("☝:", "")})
391
- elif input.startswith("☟:"):
392
- if model_radio == 'GPT-3.0':
393
- out_prompt = input.replace("☟:", "Human:") + '\n' + out_prompt
394
- else:
395
- out_prompt = input.replace("☟:", "") + out_prompt
396
- messages.insert(0, {"role": "user", "content": input.replace("☟:", "")})
397
- tokens = token_encoder.encode(out_prompt)
398
- if len(tokens) > max_input_tokens:
399
- break
400
-
401
- if model_radio == 'GPT-3.0':
402
- # print(out_prompt)
403
- response = openai.Completion.create(
404
- model="text-davinci-003",
405
- prompt=out_prompt,
406
- temperature=0.7,
407
- max_tokens=max_output_tokens,
408
- top_p=1,
409
- frequency_penalty=0,
410
- presence_penalty=0,
411
- stop=[" Human:", " AI:"]
412
- )
413
- # print(f'response_3.0__:{response}')
414
- ret = response.choices[0].text
415
- else:
416
- # print(messages)
417
- response = openai.ChatCompletion.create(
418
- model="gpt-3.5-turbo",
419
- messages=messages,
420
- temperature=0.7,
421
- max_tokens=max_output_tokens,
422
- top_p=1,
423
- frequency_penalty=0,
424
- presence_penalty=0,
425
- stop=[" Human:", " AI:"]
426
- )
427
- # print(f'response_3.5__:{response}')
428
- ret = response.choices[0].message['content']
429
- if ret.startswith("\n\n"):
430
- ret = ret.replace("\n\n", '')
431
- ret = ret.replace('\n', '<br>')
432
- if ret == '':
433
- ret = f"Openai said: I'm too tired."
434
- return ret, response.usage
435
- except Exception as e:
436
- logger.info(f"openai_create_error__{e}")
437
- ret = f"Openai said: {e} Perhaps enter your OpenAI API key."
438
- return ret, {"completion_tokens": -1, "prompt_tokens": -1, "total_tokens": -1}
439
-
440
  def openai_create(input_list, prompt_system, model_radio):
441
  try:
442
  input_list_len = len(input_list)
 
366
 
367
  def get_response_from_openai(input, prompt_system, chat_history, model_radio, temperature=0.0):
368
  error_1 = 'You exceeded your current quota, please check your plan and billing details.'
369
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  def openai_create(input_list, prompt_system, model_radio):
371
  try:
372
  input_list_len = len(input_list)