peiji commited on
Commit
69e485b
1 Parent(s): 921304a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -40
app.py CHANGED
@@ -1,31 +1,48 @@
1
  # -*- encoding: utf-8 -*-
2
  import gradio as gr
3
- from gradio.components import label
4
- import requests
5
- import json
 
 
 
 
 
6
 
7
- def llm_infer(messages, character_name, character_info, user_name, user_info):
8
- url='https://search.bytedance.net/gpt/openapi/online/v2/crawl?ak=paQxYUZ0TcbuBSFPgSbwR5QBdizjW1Ut'
9
- headers={
10
- 'Content-Type': 'application/json'
11
- }
12
- data={
13
- "messages": messages,
14
- "character_profile":{
15
- "character_name":character_name,
16
- "character_info":character_info,
17
- "user_name":user_name,
18
- "user_info":user_info
19
- },
20
- "model": "Baichuan-NPC-Turbo",
21
- "stream": False
22
- }
23
- ret=requests.post(url=url, headers=headers, data=json.dumps(data))
24
-
25
- return ret.json()['choices'][0]['message']['content']
 
 
 
 
 
 
26
 
27
- def respond(message, history, character_name, character_info, user_name, user_info):
 
 
28
  messages = [
 
 
 
 
29
  ]
30
  for part in history:
31
  messages.extend(
@@ -45,35 +62,61 @@ def respond(message, history, character_name, character_info, user_name, user_in
45
  'content':message
46
  })
47
  print(messages)
48
- ret=llm_infer(messages, character_name, character_info, user_name, user_info)
49
  history.append([
50
  message,
51
  ret
52
  ])
53
  #if len(history) % 2 == 0:
54
  return "", history
55
- CSS ="""
56
- .contain { display: flex; flex-direction: column; }
57
- #component-0 { height: 100%; }
58
- #chatbot { flex-grow: 1; }
59
- """
60
 
61
- with gr.Blocks(fill_height=True, css=CSS) as demo:
62
- with gr.Row(equal_height=True):
63
- with gr.Column(scale=2):
64
- chatbot = gr.Chatbot(elem_id="chatbot")
65
- chatbot.height=550
 
66
  with gr.Column():
67
- character_name=gr.TextArea(label="角色名", max_lines=1, lines=1, value='雷雷')
68
- character_info=gr.TextArea(label="角色信息", max_lines=3, lines=1, value='小学生,在人大附小上学')
69
- user_name=gr.TextArea(label="用户名", lines=1, max_lines=1, value='明明')
70
- user_info=gr.TextArea(label='用户信息', lines=1, max_lines=3, value='明明是雷雷的哥哥,是个初中生')
71
 
72
 
73
  with gr.Row():
74
  chat_input = gr.Textbox(label="input")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- chat_input.submit(respond, [chat_input,chatbot, character_name, character_info, user_name, user_info],[chat_input, chatbot])
 
77
  clear = gr.ClearButton([chat_input, chatbot],)
78
 
79
- demo.launch()
 
 
 
 
1
  # -*- encoding: utf-8 -*-
2
  import gradio as gr
3
+ import random
4
+ import time
5
+ from concurrent.futures import ThreadPoolExecutor
6
+ import asyncio
7
+ from functools import partial
8
+ from volcengine.maas import MaasService, MaasException, ChatRole
9
+ maas = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing', connection_timeout=24000, socket_timeout=24000)
10
+ maas.set_ak("AKLTY2ZjOWM0NmFlNTEwNDBhM2EyYTg4OTgzYTUyYTc2NjU")
11
 
12
+ maas.set_sk("T0Rjd01HUTFaVEF3Tm1FME5EWXhNRGhpTURreE1ETTROalkwTnpreU1UVQ==")
13
+
14
+ def llm_infer(endpoint_id, messages, agent_network_output=True):
15
+ req = {
16
+ "model":{
17
+ "endpoint_id":endpoint_id
18
+ },
19
+ "parameters": {
20
+ "max_prompt_tokens": 3800, # 最大prompt,自动截断前面的输入。0-不生效
21
+ "max_new_tokens": 0, # 输出文本的最大tokens限制。0-不生效
22
+ "min_new_tokens": 0, # 输出文本的最小tokens限制。0-不生效
23
+ "temperature":1, # 用于控制生成文本的随机性和创造性,Temperature值越大随机性越大。取值范围0~1
24
+ "top_p": 0.7, # 用于控制输出tokens的多样性,TopP值越大输出的tokens类型越丰富。取值范围0~1
25
+ "top_k": 0, # 选择预测值最大的k个token进行采样,取值范围0-1000。0-不生效
26
+ },
27
+ "messages": messages,
28
+ }
29
+ while True:
30
+ try:
31
+ resp = maas.chat(req)
32
+ break
33
+ except MaasException as e:
34
+
35
+ print(e)
36
+ continue
37
 
38
+ return resp['choice']['message']['content']
39
+
40
+ def respond(message, history, ep: gr.TextArea, sp:gr.TextArea):
41
  messages = [
42
+ {
43
+ 'role':'system',
44
+ 'content':sp
45
+ },
46
  ]
47
  for part in history:
48
  messages.extend(
 
62
  'content':message
63
  })
64
  print(messages)
65
+ ret=llm_infer(ep.strip(), messages)
66
  history.append([
67
  message,
68
  ret
69
  ])
70
  #if len(history) % 2 == 0:
71
  return "", history
 
 
 
 
 
72
 
73
+
74
+ with gr.Blocks(fill_height=True).queue(default_concurrency_limit=10) as demo:
75
+ with gr.Row():
76
+ with gr.Column():
77
+ chatbot = gr.Chatbot()
78
+ chatbot.height = 500
79
  with gr.Column():
80
+ gr.Label("input sp/ep of bot") # 增加些描述
81
+ sp=gr.TextArea(label="你是...", lines=1, max_lines=5,value='')
82
+ ep=gr.TextArea(label='ep1', lines=1, max_lines=1, value='')
 
83
 
84
 
85
  with gr.Row():
86
  chat_input = gr.Textbox(label="input")
87
+
88
+ def handle_click(chatbot, ep, sp, evt:gr.SelectData):
89
+ messages = [
90
+ {
91
+ 'role':'system',
92
+ 'content':sp
93
+ },
94
+ ]
95
+ index = evt.index[0]
96
+ print(index)
97
+ for part in chatbot[:index]:
98
+ messages.extend(
99
+ [
100
+ {
101
+ 'role':'user',
102
+ 'content':part[0]
103
+ },
104
+ {
105
+ 'role':'assistant',
106
+ 'content':part[1]
107
+ }
108
+ ]
109
+ )
110
+ ret = llm_infer(ep, messages)
111
+ print(ret)
112
+ print(chatbot[:index] + [[chatbot[index][0], ret]] + chatbot[index + 1:])
113
+ return chatbot[:index] + [[chatbot[index][0], ret]] + chatbot[index + 1:]
114
 
115
+ chat_input.submit(respond, [chat_input,chatbot, ep, sp ],[chat_input, chatbot])
116
+ #chatbot.select(handle_click, [chatbot, ep, sp],[chatbot],show_progress='hidden')
117
  clear = gr.ClearButton([chat_input, chatbot],)
118
 
119
+
120
+
121
+
122
+ demo.launch(share=True)