AllenYkl commited on
Commit
6900d0c
1 Parent(s): 3ce70bc

Upload 6 files

Browse files
bin_public/app/Chatbot.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+
3
+ import sys
4
+ from bin_public.utils.utils import *
5
+ from bin_public.utils.utils_db import *
6
+ from bin_public.config.presets import *
7
+
8
+ my_api_key = ""
9
+
10
+ # if we are running in Docker
11
+ if os.environ.get('dockerrun') == 'yes':
12
+ dockerflag = True
13
+ else:
14
+ dockerflag = False
15
+
16
+ authflag = False
17
+
18
+ if dockerflag:
19
+ my_api_key = os.environ.get('my_api_key')
20
+ if my_api_key == "empty":
21
+ print("Please give a api key!")
22
+ sys.exit(1)
23
+ # auth
24
+ username = os.environ.get('USERNAME')
25
+ password = os.environ.get('PASSWORD')
26
+ if not (isinstance(username, type(None)) or isinstance(password, type(None))):
27
+ authflag = True
28
+ else:
29
+ '''if not my_api_key and os.path.exists("api_key.txt") and os.path.getsize("api_key.txt"): # API key 所在的文件
30
+ with open("api_key.txt", "r") as f:
31
+ my_api_key = f.read().strip()'''
32
+
33
+
34
+
35
+ if os.path.exists("auth.json"):
36
+ with open("auth.json", "r") as f:
37
+ auth = json.load(f)
38
+ username = auth["username"]
39
+ password = auth["password"]
40
+ if username != "" and password != "":
41
+ authflag = True
42
+
43
+ gr.Chatbot.postprocess = postprocess
44
+
45
+ with gr.Blocks(css=customCSS) as demo:
46
+ history = gr.State([])
47
+ token_count = gr.State([])
48
+ invite_code = gr.State()
49
+ promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
50
+ TRUECOMSTANT = gr.State(True)
51
+ FALSECONSTANT = gr.State(False)
52
+ topic = gr.State("未命名对话历史记录")
53
+
54
+ # gr.HTML("""
55
+ # <div style="text-align: center; margin-top: 20px;">
56
+ # """)
57
+ gr.HTML(title)
58
+
59
+ with gr.Row(scale=1).style(equal_height=True):
60
+ with gr.Column(scale=5):
61
+ with gr.Row(scale=1):
62
+ chatbot = gr.Chatbot().style(height=600) # .style(color_map=("#1D51EE", "#585A5B"))
63
+ with gr.Row(scale=1):
64
+ with gr.Column(scale=12):
65
+ user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
66
+ container=False)
67
+ with gr.Column(min_width=50, scale=1):
68
+ submitBtn = gr.Button("🚀", variant="primary")
69
+ with gr.Row(scale=1):
70
+ emptyBtn = gr.Button("🧹 新的对话", )
71
+ retryBtn = gr.Button("🔄 重新生成")
72
+ delLastBtn = gr.Button("🗑️ 删除一条对话")
73
+ reduceTokenBtn = gr.Button("♻️ 总结对话")
74
+
75
+ with gr.Column():
76
+ with gr.Column(min_width=50, scale=1):
77
+ status_display = gr.Markdown("status: ready")
78
+ with gr.Tab(label="ChatGPT"):
79
+ keyTXT = gr.Textbox(show_label=True, placeholder=f"OpenAI API-key...",
80
+ type="password", visible=not HIDE_MY_KEY, label="API-Key/Invite-Code")
81
+
82
+ keyTxt = gr.Textbox(visible=False)
83
+
84
+ key_button = gr.Button("Enter")
85
+
86
+ model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False,
87
+ value=MODELS[0])
88
+ with gr.Accordion("参数", open=False):
89
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0,
90
+ step=0.1, interactive=True, label="Temperature", )
91
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
92
+ interactive=True, label="Top-p (nucleus sampling)", visible=False)
93
+ use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
94
+ use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
95
+
96
+ with gr.Tab(label="Prompt"):
97
+ systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
98
+ label="System prompt", value=initial_prompt).style(container=True)
99
+ with gr.Accordion(label="加载Prompt模板", open=True):
100
+ with gr.Column():
101
+ with gr.Row():
102
+ with gr.Column(scale=6):
103
+ templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件",
104
+ choices=get_template_names(plain=True),
105
+ multiselect=False,
106
+ value=get_template_names(plain=True)[0])
107
+ with gr.Column(scale=1):
108
+ templateRefreshBtn = gr.Button("🔄 刷新")
109
+ with gr.Row():
110
+ with gr.Column():
111
+ templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(
112
+ get_template_names(plain=True)[0], mode=1), multiselect=False, value=
113
+ load_template(
114
+ get_template_names(plain=True)[0], mode=1)[
115
+ 0])
116
+
117
+ with gr.Tab(label="保存/加载"):
118
+ with gr.Accordion(label="保存/加载对话历史记录", open=True):
119
+ with gr.Column():
120
+ with gr.Row():
121
+ with gr.Column(scale=6):
122
+ saveFileName = gr.Textbox(
123
+ show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名",
124
+ value="对话历史记录").style(container=True)
125
+ with gr.Column(scale=1):
126
+ saveHistoryBtn = gr.Button("💾 保存对话")
127
+ with gr.Row():
128
+ with gr.Column(scale=6):
129
+ historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话",
130
+ choices=get_history_names(plain=True),
131
+ multiselect=False,
132
+ value=get_history_names(plain=True)[0])
133
+ with gr.Column(scale=1):
134
+ historyRefreshBtn = gr.Button("🔄 刷新")
135
+
136
+ gr.HTML("""
137
+ <div style="text-align: center; margin-top: 20px; margin-bottom: 20px;">
138
+ """)
139
+ gr.Markdown(description)
140
+
141
+ # 输入为api key则保持不变,为邀请码则调用中心的api key
142
+ key_button.click(key_preprocessing, [keyTXT], [status_display, keyTxt, invite_code])
143
+
144
+ user_input.submit(predict, [keyTxt, invite_code, systemPromptTxt, history, user_input, chatbot, token_count, top_p,
145
+ temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox],
146
+ [chatbot, history, status_display, token_count], show_progress=True)
147
+ user_input.submit(reset_textbox, [], [user_input])
148
+
149
+ submitBtn.click(predict, [keyTxt, invite_code, systemPromptTxt, history, user_input, chatbot, token_count, top_p,
150
+ temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox],
151
+ [chatbot, history, status_display, token_count], show_progress=True)
152
+ submitBtn.click(reset_textbox, [], [user_input])
153
+
154
+ emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
155
+
156
+ retryBtn.click(retry,
157
+ [keyTxt, invite_code, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox,
158
+ model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
159
+
160
+ delLastBtn.click(delete_last_conversation, [chatbot, history, token_count], [
161
+ chatbot, history, token_count, status_display], show_progress=True)
162
+
163
+ reduceTokenBtn.click(reduce_token_size, [keyTxt, invite_code, systemPromptTxt, history, chatbot, token_count, top_p,
164
+ temperature, use_streaming_checkbox, model_select_dropdown],
165
+ [chatbot, history, status_display, token_count], show_progress=True)
166
+
167
+ saveHistoryBtn.click(save_chat_history, [
168
+ saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
169
+
170
+ saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
171
+
172
+ historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
173
+
174
+ historyFileSelectDropdown.change(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot],
175
+ [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
176
+
177
+ templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
178
+
179
+ templateFileSelectDropdown.change(load_template, [templateFileSelectDropdown],
180
+ [promptTemplates, templateSelectDropdown], show_progress=True)
181
+
182
+ templateSelectDropdown.change(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt],
183
+ [systemPromptTxt], show_progress=True)
184
+
185
+ logging.info( "\n访问 http://localhost:7860 查看界面")
186
+ # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
187
+ demo.title = "ChatGPT-长江商学院 🚀"
188
+
189
+ if __name__ == "__main__":
190
+ #if running in Docker
191
+ if dockerflag:
192
+ if authflag:
193
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
194
+ else:
195
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
196
+ #if not running in Docker
197
+ else:
198
+ if authflag:
199
+ demo.queue().launch(share=False, auth=(username, password))
200
+ else:
201
+ demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
bin_public/app/app.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+ import sys
3
+ from bin.utils.utils import *
4
+ from bin.config.presets import *
5
+
6
+ my_api_key = ""
7
+
8
+ # if we are running in Docker
9
+ if os.environ.get('dockerrun') == 'yes':
10
+ dockerflag = True
11
+ else:
12
+ dockerflag = False
13
+
14
+ authflag = False
15
+
16
+ if dockerflag:
17
+ my_api_key = os.environ.get('my_api_key')
18
+ if my_api_key == "empty":
19
+ print("Please give a api key!")
20
+ sys.exit(1)
21
+ # auth
22
+ username = os.environ.get('USERNAME')
23
+ password = os.environ.get('PASSWORD')
24
+ if not (isinstance(username, type(None)) or isinstance(password, type(None))):
25
+ authflag = True
26
+ else:
27
+ '''if not my_api_key and os.path.exists("api_key.txt") and os.path.getsize("api_key.txt"): # API key 所在的文件
28
+ with open("api_key.txt", "r") as f:
29
+ my_api_key = f.read().strip()'''
30
+
31
+
32
+
33
+ if os.path.exists("auth.json"):
34
+ with open("auth.json", "r") as f:
35
+ auth = json.load(f)
36
+ username = auth["username"]
37
+ password = auth["password"]
38
+ if username != "" and password != "":
39
+ authflag = True
40
+
41
+ gr.Chatbot.postprocess = postprocess
42
+
43
+ with gr.Blocks(css=customCSS, ) as demo:
44
+ history = gr.State([])
45
+ token_count = gr.State([])
46
+ promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
47
+ TRUECOMSTANT = gr.State(True)
48
+ FALSECONSTANT = gr.State(False)
49
+ topic = gr.State("未命名对话历史记录")
50
+
51
+ # gr.HTML("""
52
+ # <div style="text-align: center; margin-top: 20px;">
53
+ # """)
54
+ gr.HTML(title)
55
+
56
+ with gr.Row(scale=1).style(equal_height=True):
57
+ with gr.Column(scale=5):
58
+ with gr.Row(scale=1):
59
+ chatbot = gr.Chatbot().style(height=600) # .style(color_map=("#1D51EE", "#585A5B"))
60
+ with gr.Row(scale=1):
61
+ with gr.Column(scale=12):
62
+ user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
63
+ container=False)
64
+ with gr.Column(min_width=50, scale=1):
65
+ submitBtn = gr.Button("🚀", variant="primary")
66
+ with gr.Row(scale=1):
67
+ emptyBtn = gr.Button("🧹 新的对话", )
68
+ retryBtn = gr.Button("🔄 重新生成")
69
+ delLastBtn = gr.Button("🗑️ 删除一条对话")
70
+ reduceTokenBtn = gr.Button("♻️ 总结对话")
71
+
72
+ with gr.Column():
73
+ with gr.Column(min_width=50, scale=1):
74
+ status_display = gr.Markdown("status: ready")
75
+ with gr.Tab(label="ChatGPT"):
76
+ keyTXT = gr.Textbox(show_label=True, placeholder=f"OpenAI API-key...",
77
+ type="password", visible=not HIDE_MY_KEY, label="API-Key/Invite-Code")
78
+
79
+ keyTxt = gr.Textbox(visible=False)
80
+
81
+ key_button = gr.Button("Enter")
82
+
83
+ model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False,
84
+ value=MODELS[0])
85
+ with gr.Accordion("参数", open=False):
86
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0,
87
+ step=0.1, interactive=True, label="Temperature", )
88
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
89
+ interactive=True, label="Top-p (nucleus sampling)", visible=False)
90
+ use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
91
+ use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
92
+
93
+ with gr.Tab(label="Prompt"):
94
+ systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
95
+ label="System prompt", value=initial_prompt).style(container=True)
96
+ with gr.Accordion(label="加载Prompt模板", open=True):
97
+ with gr.Column():
98
+ with gr.Row():
99
+ with gr.Column(scale=6):
100
+ templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件",
101
+ choices=get_template_names(plain=True),
102
+ multiselect=False,
103
+ value=get_template_names(plain=True)[0])
104
+ with gr.Column(scale=1):
105
+ templateRefreshBtn = gr.Button("🔄 刷新")
106
+ with gr.Row():
107
+ with gr.Column():
108
+ templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(
109
+ get_template_names(plain=True)[0], mode=1), multiselect=False, value=
110
+ load_template(
111
+ get_template_names(plain=True)[0], mode=1)[
112
+ 0])
113
+
114
+ with gr.Tab(label="保存/加载"):
115
+ with gr.Accordion(label="保存/加载对话历史记录", open=True):
116
+ with gr.Column():
117
+ with gr.Row():
118
+ with gr.Column(scale=6):
119
+ saveFileName = gr.Textbox(
120
+ show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名",
121
+ value="对话历史记录").style(container=True)
122
+ with gr.Column(scale=1):
123
+ saveHistoryBtn = gr.Button("💾 保存对话")
124
+ with gr.Row():
125
+ with gr.Column(scale=6):
126
+ historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话",
127
+ choices=get_history_names(plain=True),
128
+ multiselect=False,
129
+ value=get_history_names(plain=True)[0])
130
+ with gr.Column(scale=1):
131
+ historyRefreshBtn = gr.Button("🔄 刷新")
132
+
133
+ gr.HTML("""
134
+ <div style="text-align: center; margin-top: 20px; margin-bottom: 20px;">
135
+ """)
136
+ gr.Markdown(description)
137
+
138
+ # 输入为api key则保持不变,为邀请码则调用中心的api key
139
+ key_button.click(key_preprocessing, [keyTXT], [status, keyTxt, status_display])
140
+
141
+ user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature,
142
+ use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox],
143
+ [chatbot, history, status_display, token_count], show_progress=True)
144
+ user_input.submit(reset_textbox, [], [user_input])
145
+
146
+ submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature,
147
+ use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox],
148
+ [chatbot, history, status_display, token_count], show_progress=True)
149
+ submitBtn.click(reset_textbox, [], [user_input])
150
+
151
+ emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
152
+
153
+ retryBtn.click(retry,
154
+ [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox,
155
+ model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
156
+
157
+ delLastBtn.click(delete_last_conversation, [chatbot, history, token_count], [
158
+ chatbot, history, token_count, status_display], show_progress=True)
159
+
160
+ reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature,
161
+ use_streaming_checkbox, model_select_dropdown],
162
+ [chatbot, history, status_display, token_count], show_progress=True)
163
+
164
+ saveHistoryBtn.click(save_chat_history, [
165
+ saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
166
+
167
+ saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
168
+
169
+ historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
170
+
171
+ historyFileSelectDropdown.change(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot],
172
+ [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
173
+
174
+ templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
175
+
176
+ templateFileSelectDropdown.change(load_template, [templateFileSelectDropdown],
177
+ [promptTemplates, templateSelectDropdown], show_progress=True)
178
+
179
+ templateSelectDropdown.change(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt],
180
+ [systemPromptTxt], show_progress=True)
181
+
182
+ logging.info(colorama.Back.GREEN + "\n访问 http://localhost:7860 查看界面" + colorama.Style.RESET_ALL)
183
+ # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
184
+ demo.title = "ChatGPT-长江商学院 🚀"
185
+
186
+ if __name__ == "__main__":
187
+ #if running in Docker
188
+ if dockerflag:
189
+ if authflag:
190
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
191
+ else:
192
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
193
+ #if not running in Docker
194
+ else:
195
+ if authflag:
196
+ demo.queue().launch(share=False, auth=(username, password))
197
+ else:
198
+ demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
bin_public/config/presets.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+ title = """<h1 align="center">ChatGPT-长江商学院 🚀</h1>"""
3
+ description = """<div align=center>
4
+ </div>
5
+ """
6
+ customCSS = """
7
+ code {
8
+ display: inline;
9
+ white-space: break-spaces;
10
+ border-radius: 6px;
11
+ margin: 0 2px 0 2px;
12
+ padding: .2em .4em .1em .4em;
13
+ background-color: rgba(175,184,193,0.2);
14
+ }
15
+ pre code {
16
+ display: block;
17
+ white-space: pre;
18
+ background-color: hsla(0, 0%, 0%, 72%);
19
+ border: solid 5px var(--color-border-primary) !important;
20
+ border-radius: 10px;
21
+ padding: 0 1.2rem 1.2rem;
22
+ margin-top: 1em !important;
23
+ color: #FFF;
24
+ box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
25
+ }
26
+ *{
27
+ transition: all 0.6s;
28
+ }
29
+ """
30
+
31
+ summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
32
+ # MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-4","gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"] # 可选的模型
33
+ MODELS = ["gpt-3.5-turbo-0301"]
34
+ websearch_prompt = """Web search results:
35
+ {web_results}
36
+ Current date: {current_date}
37
+ Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
38
+ Query: {query}
39
+ """
40
+
41
+ # 错误信息
42
+ standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
43
+ error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
44
+ connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时
45
+ read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时
46
+ proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误
47
+ ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
48
+ no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
49
+ no_invite_code_msg = "输入的invite code不存在"
50
+ no_useful_invite_code_msg = "输入的invite code状态不可用"
51
+
52
+
53
+ max_token_streaming = 3500 # 流式对话时的最大 token 数
54
+ timeout_streaming = 30 # 流式对话时的超时时间
55
+ max_token_all = 3500 # 非流式对话时的最大 token 数
56
+ timeout_all = 200 # 非流式对话时的超时时间
57
+ enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
58
+ HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
bin_public/utils/utils.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+ from __future__ import annotations
3
+ from typing import TYPE_CHECKING, List, Tuple
4
+ import logging
5
+ import json
6
+ import gradio as gr
7
+ # import openai
8
+ import os
9
+ import requests
10
+ # import markdown
11
+ import csv
12
+ import mdtex2html
13
+ from pypinyin import lazy_pinyin
14
+ from bin.config.presets import *
15
+ import tiktoken
16
+ from tqdm import tqdm
17
+ from duckduckgo_search import ddg
18
+ from bin_public.utils.utils_db import *
19
+ import datetime
20
+
21
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
22
+
23
+ if TYPE_CHECKING:
24
+ from typing import TypedDict
25
+
26
+ class DataframeData(TypedDict):
27
+ headers: List[str]
28
+ data: List[List[str | int | bool]]
29
+
30
+ initial_prompt = "You are a helpful assistant."
31
+ API_URL = "https://api.openai.com/v1/chat/completions"
32
+ HISTORY_DIR = "history"
33
+ TEMPLATES_DIR = "templates"
34
+
35
+
36
+ def postprocess(
37
+ self, y: List[Tuple[str | None, str | None]]
38
+ ) -> List[Tuple[str | None, str | None]]:
39
+ """
40
+ Parameters:
41
+ y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
42
+ Returns:
43
+ List of tuples representing the message and response. Each message and response will be a string of HTML.
44
+ """
45
+ if y is None:
46
+ return []
47
+ for i, (message, response) in enumerate(y):
48
+ y[i] = (
49
+ # None if message is None else markdown.markdown(message),
50
+ # None if response is None else markdown.markdown(response),
51
+ None if message is None else mdtex2html.convert((message)),
52
+ None if response is None else mdtex2html.convert(response),
53
+ )
54
+ return y
55
+
56
+
57
+ def count_token(input_str):
58
+ encoding = tiktoken.get_encoding("cl100k_base")
59
+ length = len(encoding.encode(input_str))
60
+ return length
61
+
62
+
63
+ def parse_text(text):
64
+ lines = text.split("\n")
65
+ lines = [line for line in lines if line != ""]
66
+ count = 0
67
+ for i, line in enumerate(lines):
68
+ if "```" in line:
69
+ count += 1
70
+ items = line.split('`')
71
+ if count % 2 == 1:
72
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
73
+ else:
74
+ lines[i] = f'<br></code></pre>'
75
+ else:
76
+ if i > 0:
77
+ if count % 2 == 1:
78
+ line = line.replace("`", "\`")
79
+ line = line.replace("<", "&lt;")
80
+ line = line.replace(">", "&gt;")
81
+ line = line.replace(" ", "&nbsp;")
82
+ line = line.replace("*", "&ast;")
83
+ line = line.replace("_", "&lowbar;")
84
+ line = line.replace("-", "&#45;")
85
+ line = line.replace(".", "&#46;")
86
+ line = line.replace("!", "&#33;")
87
+ line = line.replace("(", "&#40;")
88
+ line = line.replace(")", "&#41;")
89
+ line = line.replace("$", "&#36;")
90
+ lines[i] = "<br>"+line
91
+ text = "".join(lines)
92
+ return text
93
+
94
+
95
+ def construct_text(role, text):
96
+ return {"role": role, "content": text}
97
+
98
+
99
+ def construct_user(text):
100
+ return construct_text("user", text)
101
+
102
+
103
+ def construct_system(text):
104
+ return construct_text("system", text)
105
+
106
+
107
+ def construct_assistant(text):
108
+ return construct_text("assistant", text)
109
+
110
+
111
+ def construct_token_message(token, stream=False):
112
+ return f"Token 计数: {token}"
113
+
114
+
115
+ def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model):
116
+ headers = {
117
+ "Content-Type": "application/json",
118
+ "Authorization": f"Bearer {openai_api_key}"
119
+ }
120
+
121
+ history = [construct_system(system_prompt), *history]
122
+
123
+ payload = {
124
+ "model": selected_model,
125
+ "messages": history, # [{"role": "user", "content": f"{inputs}"}],
126
+ "temperature": temperature, # 1.0,
127
+ "top_p": top_p, # 1.0,
128
+ "n": 1,
129
+ "stream": stream,
130
+ "presence_penalty": 0,
131
+ "frequency_penalty": 0,
132
+ }
133
+ if stream:
134
+ timeout = timeout_streaming
135
+ else:
136
+ timeout = timeout_all
137
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
138
+ return response
139
+
140
+
141
+ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
142
+ def get_return_value():
143
+ return chatbot, history, status_text, all_token_counts
144
+
145
+ logging.info("实时回答模式")
146
+ partial_words = ""
147
+ counter = 0
148
+ status_text = "开始实时传输回答……"
149
+ history.append(construct_user(inputs))
150
+ history.append(construct_assistant(""))
151
+ chatbot.append((parse_text(inputs), ""))
152
+ user_token_count = 0
153
+ if len(all_token_counts) == 0:
154
+ system_prompt_token_count = count_token(system_prompt)
155
+ user_token_count = count_token(inputs) + system_prompt_token_count
156
+ else:
157
+ user_token_count = count_token(inputs)
158
+ all_token_counts.append(user_token_count)
159
+ logging.info(f"输入token计数: {user_token_count}")
160
+ yield get_return_value()
161
+ try:
162
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True, selected_model)
163
+ except requests.exceptions.ConnectTimeout:
164
+ status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
165
+ yield get_return_value()
166
+ return
167
+ except requests.exceptions.ReadTimeout:
168
+ status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
169
+ yield get_return_value()
170
+ return
171
+
172
+ yield get_return_value()
173
+ error_json_str = ""
174
+
175
+ for chunk in tqdm(response.iter_lines()):
176
+ if counter == 0:
177
+ counter += 1
178
+ continue
179
+ counter += 1
180
+ # check whether each line is non-empty
181
+ if chunk:
182
+ chunk = chunk.decode()
183
+ chunklength = len(chunk)
184
+ try:
185
+ chunk = json.loads(chunk[6:])
186
+ except json.JSONDecodeError:
187
+ logging.info(chunk)
188
+ error_json_str += chunk
189
+ status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
190
+ yield get_return_value()
191
+ continue
192
+ # decode each line as response data is in bytes
193
+ if chunklength > 6 and "delta" in chunk['choices'][0]:
194
+ finish_reason = chunk['choices'][0]['finish_reason']
195
+ status_text = construct_token_message(sum(all_token_counts), stream=True)
196
+ if finish_reason == "stop":
197
+ yield get_return_value()
198
+ break
199
+ try:
200
+ partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
201
+ except KeyError:
202
+ status_text = standard_error_msg + "API回复中找不到内容。很可能是Token计数达到上限。请重置对话。当前Token计数: " + str(sum(all_token_counts))
203
+ yield get_return_value()
204
+ break
205
+ history[-1] = construct_assistant(partial_words)
206
+ chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
207
+ all_token_counts[-1] += 1
208
+ yield get_return_value()
209
+
210
+
211
+ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
212
+ logging.info("一次性回答模式")
213
+ history.append(construct_user(inputs))
214
+ history.append(construct_assistant(""))
215
+ chatbot.append((parse_text(inputs), ""))
216
+ all_token_counts.append(count_token(inputs))
217
+ try:
218
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
219
+ except requests.exceptions.ConnectTimeout:
220
+ status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
221
+ return chatbot, history, status_text, all_token_counts
222
+ except requests.exceptions.ProxyError:
223
+ status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
224
+ return chatbot, history, status_text, all_token_counts
225
+ except requests.exceptions.SSLError:
226
+ status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
227
+ return chatbot, history, status_text, all_token_counts
228
+ response = json.loads(response.text)
229
+ content = response["choices"][0]["message"]["content"]
230
+ history[-1] = construct_assistant(content)
231
+ chatbot[-1] = (parse_text(inputs), parse_text(content))
232
+ total_token_count = response["usage"]["total_tokens"]
233
+ all_token_counts[-1] = total_token_count - sum(all_token_counts)
234
+ status_text = construct_token_message(total_token_count)
235
+ return chatbot, history, status_text, all_token_counts
236
+
237
+
238
+ def predict(openai_api_key, invite_code, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model = MODELS[0], use_websearch_checkbox = False, should_check_token_count = True): # repetition_penalty, top_k
239
+ # logging.info("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
240
+ if use_websearch_checkbox:
241
+ results = ddg(inputs, max_results=3)
242
+ web_results = []
243
+ for idx, result in enumerate(results):
244
+ logging.info(f"搜索结果{idx + 1}:{result}")
245
+ web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
246
+ web_results = "\n\n".join(web_results)
247
+ today = datetime.datetime.today().strftime("%Y-%m-%d")
248
+ inputs = websearch_prompt.replace("{current_date}", today).replace("{query}", inputs).replace("{web_results}", web_results)
249
+ if len(openai_api_key) != 51:
250
+ status_text = standard_error_msg + no_apikey_msg
251
+ logging.info(status_text)
252
+ chatbot.append((parse_text(inputs), ""))
253
+ if len(history) == 0:
254
+ history.append(construct_user(inputs))
255
+ history.append("")
256
+ all_token_counts.append(0)
257
+ else:
258
+ history[-2] = construct_user(inputs)
259
+ yield chatbot, history, status_text, all_token_counts
260
+ return
261
+ if stream:
262
+ yield chatbot, history, "开始生成回答……", all_token_counts
263
+ if stream:
264
+ logging.info("使用流式传输")
265
+ iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
266
+ for chatbot, history, status_text, all_token_counts in iter:
267
+ yield chatbot, history, status_text, all_token_counts
268
+ else:
269
+ logging.info("不使用流式传输")
270
+ chatbot, history, status_text, all_token_counts = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
271
+ yield chatbot, history, status_text, all_token_counts
272
+ logging.info(f"传输完毕。当前token计数为{all_token_counts}")
273
+ if len(history) > 1 and history[-1]['content'] != inputs:
274
+ # logging.info("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
275
+ try:
276
+ token = all_token_counts[-1]
277
+ except:
278
+ token = 0
279
+ holo_query_insert_chat_message(invite_code, inputs, history[-1]['content'], token, history)
280
+ if stream:
281
+ max_token = max_token_streaming
282
+ else:
283
+ max_token = max_token_all
284
+ if sum(all_token_counts) > max_token and should_check_token_count:
285
+ status_text = f"精简token中{all_token_counts}/{max_token}"
286
+ logging.info(status_text)
287
+ yield chatbot, history, status_text, all_token_counts
288
+ iter = reduce_token_size(openai_api_key, invite_code, system_prompt, history, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model=selected_model, hidden=True)
289
+ for chatbot, history, status_text, all_token_counts in iter:
290
+ status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
291
+ yield chatbot, history, status_text, all_token_counts
292
+
293
+
294
+ def retry(openai_api_key, invite_code, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0]):
295
+ logging.info("重试中……")
296
+ if len(history) == 0:
297
+ yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
298
+ return
299
+ history.pop()
300
+ inputs = history.pop()["content"]
301
+ token_count.pop()
302
+ iter = predict(openai_api_key, invite_code, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream, selected_model=selected_model)
303
+ logging.info("重试完毕")
304
+ for x in iter:
305
+ yield x
306
+
307
+
308
+ def reduce_token_size(openai_api_key, invite_code, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0], hidden=False):
309
+ logging.info("开始减少token数量……")
310
+ iter = predict(openai_api_key, invite_code, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, selected_model = selected_model, should_check_token_count=False)
311
+ logging.info(f"chatbot: {chatbot}")
312
+ for chatbot, history, status_text, previous_token_count in iter:
313
+ history = history[-2:]
314
+ token_count = previous_token_count[-1:]
315
+ if hidden:
316
+ chatbot.pop()
317
+ yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
318
+ logging.info("减少token数量完毕")
319
+
320
+
321
+ def delete_last_conversation(chatbot, history, previous_token_count):
322
+ if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
323
+ logging.info("由于包含报错信息,只删除chatbot记录")
324
+ chatbot.pop()
325
+ return chatbot, history
326
+ if len(history) > 0:
327
+ logging.info("删除了一组对话历史")
328
+ history.pop()
329
+ history.pop()
330
+ if len(chatbot) > 0:
331
+ logging.info("删除了一组chatbot对话")
332
+ chatbot.pop()
333
+ if len(previous_token_count) > 0:
334
+ logging.info("删除了一组对话的token计数记录")
335
+ previous_token_count.pop()
336
+ return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count))
337
+
338
+
339
+ def save_chat_history(filename, system, history, chatbot):
340
+ logging.info("保存对话历史中……")
341
+ if filename == "":
342
+ return
343
+ if not filename.endswith(".json"):
344
+ filename += ".json"
345
+ os.makedirs(HISTORY_DIR, exist_ok=True)
346
+ json_s = {"system": system, "history": history, "chatbot": chatbot}
347
+ logging.info(json_s)
348
+ with open(os.path.join(HISTORY_DIR, filename), "w") as f:
349
+ json.dump(json_s, f, ensure_ascii=False, indent=4)
350
+ logging.info("保存对话历史完毕")
351
+
352
+
353
+ def load_chat_history(filename, system, history, chatbot):
354
+ logging.info("加载对话历史中……")
355
+ try:
356
+ with open(os.path.join(HISTORY_DIR, filename), "r") as f:
357
+ json_s = json.load(f)
358
+ try:
359
+ if type(json_s["history"][0]) == str:
360
+ logging.info("历史记录格式为旧版,正在转换……")
361
+ new_history = []
362
+ for index, item in enumerate(json_s["history"]):
363
+ if index % 2 == 0:
364
+ new_history.append(construct_user(item))
365
+ else:
366
+ new_history.append(construct_assistant(item))
367
+ json_s["history"] = new_history
368
+ logging.info(new_history)
369
+ except:
370
+ # 没有对话历史
371
+ pass
372
+ logging.info("加载对话历史完毕")
373
+ return filename, json_s["system"], json_s["history"], json_s["chatbot"]
374
+ except FileNotFoundError:
375
+ logging.info("没有找到对话历史文件,不执行任何操作")
376
+ return filename, system, history, chatbot
377
+
378
+
379
+ def sorted_by_pinyin(list):
380
+ return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
381
+
382
+
383
+ def get_file_names(dir, plain=False, filetypes=[".json"]):
384
+ logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
385
+ files = []
386
+ try:
387
+ for type in filetypes:
388
+ files += [f for f in os.listdir(dir) if f.endswith(type)]
389
+ except FileNotFoundError:
390
+ files = []
391
+ files = sorted_by_pinyin(files)
392
+ if files == []:
393
+ files = [""]
394
+ if plain:
395
+ return files
396
+ else:
397
+ return gr.Dropdown.update(choices=files)
398
+
399
+
400
+ def get_history_names(plain=False):
401
+ logging.info("获取历史记录文件名列表")
402
+ return get_file_names(HISTORY_DIR, plain)
403
+
404
+
405
+ def load_template(filename, mode=0):
406
+ logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
407
+ lines = []
408
+ logging.info("Loading template...")
409
+ if filename.endswith(".json"):
410
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
411
+ lines = json.load(f)
412
+ lines = [[i["act"], i["prompt"]] for i in lines]
413
+ else:
414
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
415
+ reader = csv.reader(csvfile)
416
+ lines = list(reader)
417
+ lines = lines[1:]
418
+ if mode == 1:
419
+ return sorted_by_pinyin([row[0] for row in lines])
420
+ elif mode == 2:
421
+ return {row[0]:row[1] for row in lines}
422
+ else:
423
+ choices = sorted_by_pinyin([row[0] for row in lines])
424
+ return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
425
+
426
+
427
+ def get_template_names(plain=False):
428
+ logging.info("获取模板文件名列表")
429
+ return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
430
+
431
+
432
+ def get_template_content(templates, selection, original_system_prompt):
433
+ logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
434
+ try:
435
+ return templates[selection]
436
+ except:
437
+ return original_system_prompt
438
+
439
+
440
+ def reset_state():
441
+ logging.info("重置状态")
442
+ return [], [], [], construct_token_message(0)
443
+
444
+
445
+ def reset_textbox():
446
+ return gr.update(value='')
bin_public/utils/utils_db.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import psycopg2
2
+ import datetime
3
+ #from bin_public.config.config import HOLOGRES_CONFIG
4
+ from bin_public.config.presets import *
5
+ from dateutil import tz
6
+ import os
7
+
8
+
9
+ def current_time(type):
10
+ if type == 'ymd':
11
+ return datetime.datetime.now().strftime("%Y-%m-%d")
12
+ if type == 'ymdhms':
13
+ return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
14
+
15
+
16
+ # hologres 基础函数:查询
17
+ def holo_query_func(run_sql, is_query=0):
18
+ conn = psycopg2.connect(host=os.environ['host'],
19
+ port=os.environ['port'],
20
+ dbname=os.environ['dbname'],
21
+ user=os.environ['ak'],
22
+ password=os.environ['sk'])
23
+ cur = conn.cursor()
24
+ cur.execute(run_sql)
25
+ if is_query:
26
+ data = cur.fetchall()
27
+ cur.close()
28
+ conn.close()
29
+ if is_query:
30
+ return data
31
+
32
+ def holo_query_account_mapping(invite_code):
33
+ run_sql = f"""
34
+ select end_date, status, mapping_ak
35
+ from s_account_invite_code
36
+ where invite_code = '{invite_code}'
37
+ order by gmt_modify desc
38
+ limit 1
39
+ """
40
+ data = holo_query_func(run_sql, is_query=1)
41
+ # 数据库中查不到,则返回no_invite_code_msg
42
+ if len(data) == 0:
43
+ status_text = standard_error_msg + no_invite_code_msg
44
+ return status_text, None
45
+ # 数据库中查到,判断是否可用
46
+ if len(data) == 1:
47
+ end_date = data[0][0]
48
+ status = data[0][1]
49
+ mapping_ak = data[0][2]
50
+ if end_date < datetime.datetime.now().strftime("%Y%m%d") or status != '1':
51
+ status_text = standard_error_msg + no_useful_invite_code_msg
52
+ return status_text, None
53
+ return 'Success status: ready', mapping_ak
54
+
55
+
56
+ def key_preprocessing(keyTxt):
57
+ invite_code = keyTxt
58
+ # 这里先用这个逻辑,到时候等实际的邀请码来了就改一下这个函数就行
59
+ if keyTxt.startswith("ckgsb_"):
60
+ status_display, keyTxt = holo_query_account_mapping(keyTxt)
61
+ yield status_display, keyTxt, invite_code
62
+ return
63
+ else:
64
+ if len(keyTxt) != 51:
65
+ status_display = standard_error_msg + no_apikey_msg
66
+ yield status_display, keyTxt, invite_code
67
+ return
68
+ yield 'Success status: ready', keyTxt, invite_code
69
+ return
70
+
71
+
72
+ def holo_query_insert_chat_message(invite_code, prompt, response, all_token_cnt, history):
73
+ run_sql = f"""
74
+ insert into s_account_chat_message(
75
+ gmt_create
76
+ ,invite_code
77
+ ,prompt
78
+ ,response
79
+ ,all_token_cnt
80
+ ,history
81
+ ,chat_seq
82
+ ,log_timestamp
83
+ )
84
+ select
85
+ '{datetime.datetime.now().replace(tzinfo=tz.gettz('Asina/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")}' as gmt_create
86
+ ,'{str(invite_code).replace("'", '"')}' as invite_code
87
+ ,'{str(prompt).replace("'", '"')}' as prompt
88
+ ,'{str(response).replace("'", '"')}' as response
89
+ ,'{str(all_token_cnt).replace("'", '"')}' as all_token_cnt
90
+ ,'{str(history).replace("'", '"')}' as history
91
+ ,'{len(history)}' as chat_seq
92
+ ,localtimestamp as log_timestamp
93
+ """
94
+ holo_query_func(run_sql, is_query=0)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ mdtex2html
3
+ pypinyin
4
+ tiktoken
5
+ socksio
6
+ tqdm
7
+ colorama
8
+ duckduckgo_search
9
+ jieba
10
+ psycopg2-binary
11
+ python-dateutil