chaowei100 commited on
Commit
6462634
1 Parent(s): f1a6956

add project files

Browse files
Taiyi-Stable-Diffusion-1B-Chinese-v0.1 ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit 373a5f530de018c086e5acff83519f40fe3df1ce
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
2
+ import gradio as gr
3
+ from predict import predict
4
+ from funtional_picture import infer_text2img
5
+ from toolbox import format_io, find_free_port, get_conf
6
+ import numpy as np
7
+
8
+ # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
9
+ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT = \
10
+ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT')
11
+
12
+ # 如果WEB_PORT是-1, 则随机选取WEB端口
13
+ PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
14
+ if not AUTHENTICATION: AUTHENTICATION = None
15
+
16
+ initial_prompt = "Serve me as a writing and programming assistant."
17
+ title_html = "<h1 align=\"center\">展示你的机器学习模型</h1>"
18
+ description = """"""
19
+
20
+ # 问询记录, python 版本建议3.9+(越新越好)
21
+ import logging
22
+ os.makedirs("work_log", exist_ok=True)
23
+ try:logging.basicConfig(filename="work_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
24
+ except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
25
+ print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
26
+
27
+ # 一些普通功能模块
28
+ from functional import get_functionals
29
+ functional = get_functionals()
30
+
31
+
32
+
33
+ # 处理markdown文本格式的转变
34
+ gr.Chatbot.postprocess = format_io
35
+
36
+ # 做一些外观色彩上的调整
37
+ from theme import adjust_theme, advanced_css
38
+ set_theme = adjust_theme()
39
+
40
+ cancel_handles = []
41
+ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
42
+ gr.HTML(title_html)
43
+ with gr.Tab("ChatGPT"):
44
+ with gr.Row().style(equal_height=True):
45
+ with gr.Column(scale=2):
46
+ chatbot = gr.Chatbot()
47
+ chatbot.style(height=CHATBOT_HEIGHT/2)
48
+ history = gr.State([])
49
+ with gr.Row():
50
+ txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
51
+ with gr.Row():
52
+ submitBtn = gr.Button("提交", variant="primary")
53
+ with gr.Row():
54
+ resetBtn = gr.Button("重置", variant="secondary");
55
+ resetBtn.style(size="sm")
56
+ stopBtn = gr.Button("停止", variant="secondary");
57
+ stopBtn.style(size="sm")
58
+
59
+ with gr.Column(scale=1):
60
+ with gr.Row():
61
+ from check_proxy import check_proxy
62
+ status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {check_proxy(proxies)}")
63
+ with gr.Accordion("基础功能区", open=True) as area_basic_fn:
64
+ with gr.Row():
65
+ for k in functional:
66
+ variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
67
+ functional[k]["Button"] = gr.Button(k, variant=variant)
68
+ with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=True):
69
+ system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
70
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
71
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
72
+ checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
73
+ gr.Markdown(description)
74
+ with gr.Tab("AI绘画"):
75
+ examples = [
76
+ ["铁马冰河入梦来, 梦幻, 插画"],
77
+ ["东临碣石, 以观沧海, 波涛汹涌, 插画"],
78
+ ["孤帆远影碧空尽,惟见长江天际流,油画"],
79
+ ["动漫化,帅气,插画"],
80
+ ["女孩背影, 日落, 唯美插画"],
81
+ ]
82
+ with gr.Row():
83
+ with gr.Column(scale=1, ):
84
+ image_out = gr.Image(label='输出(output)')
85
+ with gr.Column(scale=1, ):
86
+ image_in = gr.Image(source='upload', elem_id="image_upload", type="pil", label="参考图(非必须)(ref)")
87
+ prompt = gr.Textbox(label='提示词(prompt)')
88
+ submit_btn = gr.Button("生成图像(Generate)")
89
+ with gr.Row(scale=0.5):
90
+ guide = gr.Slider(2, 15, value=7, step=0.1, label='文本引导强度(guidance scale)')
91
+ steps = gr.Slider(10, 30, value=20, step=1, label='迭代次数(inference steps)')
92
+ width = gr.Slider(384, 640, value=512, step=64, label='宽度(width)')
93
+ height = gr.Slider(384, 640, value=512, step=64, label='高度(height)')
94
+ strength = gr.Slider(0, 1.0, value=0.8, step=0.02, label='参考图改变程度(strength)')
95
+ ex = gr.Examples(examples, fn=infer_text2img, inputs=[prompt, guide, steps, width, height],
96
+ outputs=image_out)
97
+
98
+ submit_btn.click(fn=infer_text2img, inputs=[prompt, guide, steps, width, height, image_in, strength],
99
+ outputs=image_out)
100
+
101
+ # demo.queue(concurrency_count=1, max_size=8).launch()
102
+
103
+
104
+ # 功能区显示开关与功能区的互动
105
+ def fn_area_visibility(a):
106
+ ret = {}
107
+ ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
108
+ return ret
109
+
110
+ checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn])
111
+ # 整理反复出现的控件句柄组合
112
+ input_combo = [txt, top_p, temperature, chatbot, history, system_prompt]
113
+ output_combo = [chatbot, history, status]
114
+ predict_args = dict(fn=predict, inputs=input_combo, outputs=output_combo)
115
+ empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt]) # 用于在提交后清空输入栏
116
+ # 提交按钮、重置按钮
117
+ cancel_handles.append(txt.submit(**predict_args)) #; txt.submit(**empty_txt_args) 在提交后清空输入栏
118
+ cancel_handles.append(submitBtn.click(**predict_args)) #; submitBtn.click(**empty_txt_args) 在提交后清空输入栏
119
+ resetBtn.click(lambda: ([], [], "已重置"), None, output_combo)
120
+ # 基础功能区的回调函数注册
121
+ for k in functional:
122
+ click_handle = functional[k]["Button"].click(predict, [*input_combo, gr.State(True), gr.State(k)], output_combo)
123
+ cancel_handles.append(click_handle)
124
+ cancel_handles.append(click_handle)
125
+ # 终止按钮的回调函数注册
126
+ stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
127
+
128
+
129
+
130
+ # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
131
+ def auto_opentab_delay():
132
+ import threading, webbrowser, time
133
+ print(f"如果浏览器没有自动打开,请复制并转到以下URL: http://localhost:{PORT}")
134
+ def open():
135
+ time.sleep(2)
136
+ webbrowser.open_new_tab(f"http://localhost:{PORT}")
137
+ threading.Thread(target=open, name="open-browser", daemon=True).start()
138
+ auto_opentab_delay()
139
+ demo.title = "展示你的机器学习模型"
140
+ demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
check_proxy.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def check_proxy(proxies):
3
+ import requests
4
+ proxies_https = proxies['https'] if proxies is not None else '无'
5
+ try:
6
+ response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
7
+ data = response.json()
8
+ print(f'查询代理的地理位置,返回的结果是{data}')
9
+ if 'country_name' in data:
10
+ country = data['country_name']
11
+ result = f"代理配置 {proxies_https}, 代理所在地:{country}"
12
+ elif 'error' in data:
13
+ result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
14
+ print(result)
15
+ return result
16
+ except:
17
+ result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
18
+ print(result)
19
+ return result
20
+
21
+
22
+ if __name__ == '__main__':
23
+ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
24
+ from toolbox import get_conf
25
+ proxies, = get_conf('proxies')
26
+ check_proxy(proxies)
27
+
config.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # [step 1]>> 例如: API_KEY = "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" (此key无效)
2
+ API_KEY = "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"
3
+
4
+ # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
5
+ USE_PROXY = True
6
+ if USE_PROXY:
7
+ # 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
8
+ # 例如 "socks5h://localhost:11284"
9
+ # [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
10
+ # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
11
+ # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
12
+
13
+ # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
14
+ proxies = {
15
+ # [协议]:// [地址] :[端口]
16
+ "http": "socks5h://localhost:7890",
17
+ "https": "socks5h://localhost:7890",
18
+ }
19
+ else:
20
+ proxies = None
21
+
22
+
23
+ # [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改
24
+ # 对话窗的高度
25
+ CHATBOT_HEIGHT = 1115
26
+
27
+ # 发送请求到OpenAI后,等待多久判定为超时
28
+ TIMEOUT_SECONDS = 25
29
+
30
+ # 网页的端口, -1代表随机端口
31
+ WEB_PORT = -1
32
+
33
+ # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
34
+ MAX_RETRY = 2
35
+
36
+ # OpenAI模型选择是(gpt4现在只对申请成功的人开放)
37
+ LLM_MODEL = "gpt-3.5-turbo"
38
+
39
+ # OpenAI的API_URL
40
+ API_URL = "https://api.openai.com/v1/chat/completions"
41
+
42
+ # 设置并行使用的线程数
43
+ CONCURRENT_COUNT = 100
44
+
45
+ # 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
46
+ AUTHENTICATION = [] # [("username", "password"), ("username2", "password2"), ...]
examples//347/244/272/344/276/2131.jpg ADDED
functional.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 'primary' 颜色对应 theme.py 中的 primary_hue
2
+ # 'secondary' 颜色对应 theme.py 中的 neutral_hue
3
+ # 'stop' 颜色对应 theme.py 中的 color_er
4
+ # 默认按钮颜色是 secondary
5
+ from toolbox import clear_line_break
6
+
7
+ def get_functionals():
8
+ return {
9
+ "英语学术润色": {
10
+ # 前言
11
+ "Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
12
+ r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
13
+ r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
14
+ # 后语
15
+ "Suffix": r"",
16
+ "Color": r"secondary", # 按钮颜色
17
+ },
18
+ "中文学术润色": {
19
+ "Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
20
+ r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
21
+ "Suffix": r"",
22
+ },
23
+ "查找语法错误": {
24
+ "Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " +
25
+ r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." +
26
+ r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " +
27
+ r"put the original text the first column, " +
28
+ r"put the corrected text in the second column and highlight the key words you fixed.""\n"
29
+ r"Example:""\n"
30
+ r"Paragraph: How is you? Do you knows what is it?""\n"
31
+ r"| Original sentence | Corrected sentence |""\n"
32
+ r"| :--- | :--- |""\n"
33
+ r"| How **is** you? | How **are** you? |""\n"
34
+ r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n"
35
+ r"Below is a paragraph from an academic paper. "
36
+ r"You need to report all grammar and spelling mistakes as the example before."
37
+ + "\n\n",
38
+ "Suffix": r"",
39
+ "PreProcess": clear_line_break, # 预处理:清除换行符
40
+ },
41
+ "中译英": {
42
+ "Prefix": r"Please translate following sentence to English:" + "\n\n",
43
+ "Suffix": r"",
44
+ },
45
+ "学术中英互译": {
46
+ "Prefix": r"I want you to act as a scientific English-Chinese translator, " +
47
+ r"I will provide you with some paragraphs in one language " +
48
+ r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
49
+ r"Do not repeat the original provided paragraphs after translation. " +
50
+ r"You should use artificial intelligence tools, " +
51
+ r"such as natural language processing, and rhetorical knowledge " +
52
+ r"and experience about effective writing techniques to reply. " +
53
+ r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
54
+ "Suffix": "",
55
+ "Color": "secondary",
56
+ },
57
+ "英译中": {
58
+ "Prefix": r"请翻译成中文:" + "\n\n",
59
+ "Suffix": r"",
60
+ },
61
+ "找图片": {
62
+ "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
63
+ r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
64
+ "Suffix": r"",
65
+ },
66
+ "解释代码": {
67
+ "Prefix": r"请解释以下代码:" + "\n```\n",
68
+ "Suffix": "\n```\n",
69
+ },
70
+ }
funtional_picture.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #GAN模型
3
+ from PIL import Image
4
+ import torch
5
+ from diffusers import (
6
+ StableDiffusionPipeline,
7
+ StableDiffusionImg2ImgPipeline,
8
+ StableDiffusionInpaintPipeline,
9
+ )
10
+ device="cuda"
11
+ model_id = "./Taiyi-Stable-Diffusion-1B-Chinese-v0.1"
12
+
13
+ pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
14
+ pipe_img2img = StableDiffusionImg2ImgPipeline(**pipe_text2img.components).to(device)
15
+ def infer_text2img(prompt, guide, steps, width, height, image_in, strength):
16
+ if image_in is not None:
17
+ init_image = image_in.convert("RGB").resize((width, height))
18
+ output = pipe_img2img(prompt, image=init_image, strength=strength, guidance_scale=guide, num_inference_steps=steps)
19
+ else:
20
+ output = pipe_text2img(prompt, width=width, height=height, guidance_scale=guide, num_inference_steps=steps,)
21
+ image = output.images[0]
22
+ return image
predict.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
2
+
3
+ """
4
+ 该文件中主要包含三个函数
5
+
6
+ 不具备多线程能力的函数:
7
+ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
8
+
9
+ 具备多线程调用能力的函数
10
+ 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
11
+ 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
12
+ """
13
+
14
+ import json
15
+ import gradio as gr
16
+ import logging
17
+ import traceback
18
+ import requests
19
+ import importlib
20
+
21
+ # config_private.py放自己的秘密如API和代理网址
22
+ # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
23
+ from toolbox import get_conf
24
+ proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL = \
25
+ get_conf('proxies', 'API_URL', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'LLM_MODEL')
26
+
27
+ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
28
+ '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
29
+
30
+ def get_full_error(chunk, stream_response):
31
+ """
32
+ 获取完整的从Openai返回的报错
33
+ """
34
+ while True:
35
+ try:
36
+ chunk += next(stream_response)
37
+ except:
38
+ break
39
+ return chunk
40
+
41
+ #
42
+
43
+
44
+ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
45
+ stream = True, additional_fn=None):
46
+ """
47
+ 发送至chatGPT,流式获取输出。
48
+ 用于基础的对话功能。
49
+ inputs 是本次问询的输入
50
+ top_p, temperature是chatGPT的内部调优参数
51
+ history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
52
+ chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
53
+ additional_fn代表点击的哪个按钮,按钮见functional.py
54
+ """
55
+ if additional_fn is not None:
56
+ import functional
57
+ importlib.reload(functional) # 热更新prompt
58
+ functional = functional.get_functionals()
59
+ if "PreProcess" in functional[additional_fn]: inputs = functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
60
+ inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"]
61
+
62
+ if stream:
63
+ raw_input = inputs
64
+ logging.info(f'[raw_input] {raw_input}')
65
+ chatbot.append((inputs, ""))
66
+ yield chatbot, history, "等待响应"
67
+
68
+ headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt, stream)
69
+ history.append(inputs); history.append(" ")
70
+
71
+ retry = 0
72
+ while True:
73
+ try:
74
+ # make a POST request to the API endpoint, stream=True
75
+ response = requests.post(API_URL, headers=headers, proxies=proxies,
76
+ json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
77
+ except:
78
+ retry += 1
79
+ chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
80
+ retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
81
+ yield chatbot, history, "请求超时"+retry_msg
82
+ if retry > MAX_RETRY: raise TimeoutError
83
+
84
+ gpt_replying_buffer = ""
85
+
86
+ is_head_of_the_stream = True
87
+ if stream:
88
+ stream_response = response.iter_lines()
89
+ while True:
90
+ chunk = next(stream_response)
91
+ # print(chunk.decode()[6:])
92
+ if is_head_of_the_stream:
93
+ # 数据流的第一帧不携带content
94
+ is_head_of_the_stream = False; continue
95
+
96
+ if chunk:
97
+ try:
98
+ if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
99
+ # 判定为数据流的结束,gpt_replying_buffer也写完了
100
+ logging.info(f'[response] {gpt_replying_buffer}')
101
+ break
102
+ # 处理数据流的主体
103
+ chunkjson = json.loads(chunk.decode()[6:])
104
+ status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
105
+ # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
106
+ gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
107
+ history[-1] = gpt_replying_buffer
108
+ chatbot[-1] = (history[-2], history[-1])
109
+ yield chatbot, history, status_text
110
+
111
+ except Exception as e:
112
+ traceback.print_exc()
113
+ yield chatbot, history, "Json解析不合常规"
114
+ chunk = get_full_error(chunk, stream_response)
115
+ error_msg = chunk.decode()
116
+ if "reduce the length" in error_msg:
117
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by refreshing this page.")
118
+ history = [] # 清除历史
119
+ elif "Incorrect API key" in error_msg:
120
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
121
+ elif "exceeded your current quota" in error_msg:
122
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
123
+ else:
124
+ from toolbox import regular_txt_to_markdown
125
+ tb_str = '```\n' + traceback.format_exc() + '```'
126
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
127
+ yield chatbot, history, "Json异常" + error_msg
128
+ return
129
+
130
+ def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
131
+ """
132
+ 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
133
+ """
134
+ headers = {
135
+ "Content-Type": "application/json",
136
+ "Authorization": f"Bearer {API_KEY}"
137
+ }
138
+
139
+ conversation_cnt = len(history) // 2
140
+
141
+ messages = [{"role": "system", "content": system_prompt}]
142
+ if conversation_cnt:
143
+ for index in range(0, 2*conversation_cnt, 2):
144
+ what_i_have_asked = {}
145
+ what_i_have_asked["role"] = "user"
146
+ what_i_have_asked["content"] = history[index]
147
+ what_gpt_answer = {}
148
+ what_gpt_answer["role"] = "assistant"
149
+ what_gpt_answer["content"] = history[index+1]
150
+ if what_i_have_asked["content"] != "":
151
+ if what_gpt_answer["content"] == "": continue
152
+ if what_gpt_answer["content"] == timeout_bot_msg: continue
153
+ messages.append(what_i_have_asked)
154
+ messages.append(what_gpt_answer)
155
+ else:
156
+ messages[-1]['content'] = what_gpt_answer['content']
157
+
158
+ what_i_ask_now = {}
159
+ what_i_ask_now["role"] = "user"
160
+ what_i_ask_now["content"] = inputs
161
+ messages.append(what_i_ask_now)
162
+
163
+ payload = {
164
+ "model": LLM_MODEL,
165
+ "messages": messages,
166
+ "temperature": temperature, # 1.0,
167
+ "top_p": top_p, # 1.0,
168
+ "n": 1,
169
+ "stream": stream,
170
+ "presence_penalty": 0,
171
+ "frequency_penalty": 0,
172
+ }
173
+
174
+ print(f" {LLM_MODEL} : {conversation_cnt} : {inputs}")
175
+ return headers,payload
176
+
177
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch
3
+ torchvision
4
+ git+https://github.com/huggingface/diffusers.git
5
+ transformers
6
+ accelerate
7
+ gradio>=3.23
8
+ requests[socks]
9
+ mdtex2html
10
+ Markdown
11
+ latex2mathml
show_math.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This program is written by: https://github.com/polarwinkel/mdtex2html
2
+
3
+ from latex2mathml.converter import convert as tex2mathml
4
+ import re
5
+
6
+ incomplete = '<font style="color:orange;" class="tooltip">&#9888;<span class="tooltiptext">formula incomplete</span></font>'
7
+ convError = '<font style="color:red" class="tooltip">&#9888;<span class="tooltiptext">LaTeX-convert-error</span></font>'
8
+
9
+ def convert(mdtex, extensions=[], splitParagraphs=True):
10
+ ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
11
+ found = False
12
+ # handle all paragraphs separately (prevents aftereffects)
13
+ if splitParagraphs:
14
+ parts = re.split("\n\n", mdtex)
15
+ result = ''
16
+ for part in parts:
17
+ result += convert(part, extensions, splitParagraphs=False)
18
+ return result
19
+ # find first $$-formula:
20
+ parts = re.split('\${2}', mdtex, 2)
21
+ if len(parts)>1:
22
+ found = True
23
+ result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
24
+ try:
25
+ result += '<div class="blockformula">'+tex2mathml(parts[1])+'</div>\n'
26
+ except:
27
+ result += '<div class="blockformula">'+convError+'</div>'
28
+ if len(parts)==3:
29
+ result += convert(parts[2], extensions, splitParagraphs=False)
30
+ else:
31
+ result += '<div class="blockformula">'+incomplete+'</div>'
32
+ # else find first $-formulas:
33
+ else:
34
+ parts = re.split('\${1}', mdtex, 2)
35
+ if len(parts)>1 and not found:
36
+ found = True
37
+ try:
38
+ mathml = tex2mathml(parts[1])
39
+ except:
40
+ mathml = convError
41
+ if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
42
+ parts[0]=parts[0]+'&#x200b;'
43
+ if len(parts)==3:
44
+ result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
45
+ else:
46
+ result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
47
+ # else find first \[..\]-equation:
48
+ else:
49
+ parts = re.split(r'\\\[', mdtex, 1)
50
+ if len(parts)>1 and not found:
51
+ found = True
52
+ result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
53
+ parts = re.split(r'\\\]', parts[1], 1)
54
+ try:
55
+ result += '<div class="blockformula">'+tex2mathml(parts[0])+'</div>\n'
56
+ except:
57
+ result += '<div class="blockformula">'+convError+'</div>'
58
+ if len(parts)==2:
59
+ result += convert(parts[1], extensions, splitParagraphs=False)
60
+ else:
61
+ result += '<div class="blockformula">'+incomplete+'</div>'
62
+ # else find first \(..\)-equation:
63
+ else:
64
+ parts = re.split(r'\\\(', mdtex, 1)
65
+ if len(parts)>1 and not found:
66
+ found = True
67
+ subp = re.split(r'\\\)', parts[1], 1)
68
+ try:
69
+ mathml = tex2mathml(subp[0])
70
+ except:
71
+ mathml = convError
72
+ if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
73
+ parts[0]=parts[0]+'&#x200b;'
74
+ if len(subp)==2:
75
+ result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
76
+ else:
77
+ result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
78
+ if not found:
79
+ result = mdtex
80
+ return result
theme.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # gradio可用颜色列表
4
+ # gr.themes.utils.colors.slate (石板色)
5
+ # gr.themes.utils.colors.gray (灰色)
6
+ # gr.themes.utils.colors.zinc (锌色)
7
+ # gr.themes.utils.colors.neutral (中性色)
8
+ # gr.themes.utils.colors.stone (石头色)
9
+ # gr.themes.utils.colors.red (红色)
10
+ # gr.themes.utils.colors.orange (橙色)
11
+ # gr.themes.utils.colors.amber (琥珀色)
12
+ # gr.themes.utils.colors.yellow (黄色)
13
+ # gr.themes.utils.colors.lime (酸橙色)
14
+ # gr.themes.utils.colors.green (绿色)
15
+ # gr.themes.utils.colors.emerald (祖母绿)
16
+ # gr.themes.utils.colors.teal (青蓝色)
17
+ # gr.themes.utils.colors.cyan (青色)
18
+ # gr.themes.utils.colors.sky (天蓝色)
19
+ # gr.themes.utils.colors.blue (蓝色)
20
+ # gr.themes.utils.colors.indigo (靛蓝色)
21
+ # gr.themes.utils.colors.violet (紫罗兰色)
22
+ # gr.themes.utils.colors.purple (紫色)
23
+ # gr.themes.utils.colors.fuchsia (洋红色)
24
+ # gr.themes.utils.colors.pink (粉红色)
25
+ # gr.themes.utils.colors.rose (玫瑰色)
26
+
27
+ def adjust_theme():
28
+ try:
29
+ color_er = gr.themes.utils.colors.pink
30
+ set_theme = gr.themes.Default(
31
+ primary_hue=gr.themes.utils.colors.orange,
32
+ neutral_hue=gr.themes.utils.colors.gray,
33
+ font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
34
+ font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
35
+ set_theme.set(
36
+ # Colors
37
+ input_background_fill_dark="*neutral_800",
38
+ # Transition
39
+ button_transition="none",
40
+ # Shadows
41
+ button_shadow="*shadow_drop",
42
+ button_shadow_hover="*shadow_drop_lg",
43
+ button_shadow_active="*shadow_inset",
44
+ input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
45
+ input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
46
+ input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
47
+ checkbox_label_shadow="*shadow_drop",
48
+ block_shadow="*shadow_drop",
49
+ form_gap_width="1px",
50
+ # Button borders
51
+ input_border_width="1px",
52
+ input_background_fill="white",
53
+ # Gradients
54
+ stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
55
+ stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
56
+ error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
57
+ error_background_fill_dark="*background_fill_primary",
58
+ checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
59
+ checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
60
+ checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
61
+ checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
62
+ button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
63
+ button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
64
+ button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
65
+ button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
66
+ button_primary_border_color_dark="*primary_500",
67
+ button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
68
+ button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
69
+ button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
70
+ button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
71
+ button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
72
+ button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
73
+ button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
74
+ button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
75
+ button_cancel_border_color=color_er.c200,
76
+ button_cancel_border_color_dark=color_er.c600,
77
+ button_cancel_text_color=color_er.c600,
78
+ button_cancel_text_color_dark="white",
79
+ )
80
+ except:
81
+ set_theme = None; print('gradio版本较旧, 不能自定义字体和颜色')
82
+ return set_theme
83
+
84
+ advanced_css = """
85
+ /* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
86
+ .markdown-body table {
87
+ margin: 1em 0;
88
+ border-collapse: collapse;
89
+ empty-cells: show;
90
+ }
91
+
92
+ /* 设置表格单元格的内边距为5px,边框粗细为1.2px,颜色为--border-color-primary. */
93
+ .markdown-body th, .markdown-body td {
94
+ border: 1.2px solid var(--border-color-primary);
95
+ padding: 5px;
96
+ }
97
+
98
+ /* 设置表头背景颜色为rgba(175,184,193,0.2),透明度为0.2. */
99
+ .markdown-body thead {
100
+ background-color: rgba(175,184,193,0.2);
101
+ }
102
+
103
+ /* 设置表头单元格的内边距为0.5em和0.2em. */
104
+ .markdown-body thead th {
105
+ padding: .5em .2em;
106
+ }
107
+
108
+ /* 去掉列表前缀的默认间距,使其与文本线对齐. */
109
+ .markdown-body ol, .markdown-body ul {
110
+ padding-inline-start: 2em !important;
111
+ }
112
+
113
+ /* 设定聊天气泡的样式,包括圆角、最大宽度和阴影等. */
114
+ [class *= "message"] {
115
+ border-radius: var(--radius-xl) !important;
116
+ /* padding: var(--spacing-xl) !important; */
117
+ /* font-size: var(--text-md) !important; */
118
+ /* line-height: var(--line-md) !important; */
119
+ /* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
120
+ /* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
121
+ }
122
+ [data-testid = "bot"] {
123
+ max-width: 95%;
124
+ /* width: auto !important; */
125
+ border-bottom-left-radius: 0 !important;
126
+ }
127
+ [data-testid = "user"] {
128
+ max-width: 100%;
129
+ /* width: auto !important; */
130
+ border-bottom-right-radius: 0 !important;
131
+ }
132
+
133
+ /* 行内代码的背景设为淡灰色,设定圆角和间距. */
134
+ .markdown-body code {
135
+ display: inline;
136
+ white-space: break-spaces;
137
+ border-radius: 6px;
138
+ margin: 0 2px 0 2px;
139
+ padding: .2em .4em .1em .4em;
140
+ background-color: rgba(175,184,193,0.2);
141
+ }
142
+ /* 设定代码块的样式,包括背景颜色、内、外边距、圆角。 */
143
+ .markdown-body pre code {
144
+ display: block;
145
+ overflow: auto;
146
+ white-space: pre;
147
+ background-color: rgba(175,184,193,0.2);
148
+ border-radius: 10px;
149
+ padding: 1em;
150
+ margin: 1em 2em 1em 0.5em;
151
+ }
152
+ """
toolbox.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import markdown, mdtex2html, threading, importlib, traceback, importlib, inspect, re
2
+ from show_math import convert as convert_math
3
+ from functools import wraps, lru_cache
4
+
5
+ def get_reduce_token_percent(text):
6
+ try:
7
+ # text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
8
+ pattern = r"(\d+)\s+tokens\b"
9
+ match = re.findall(pattern, text)
10
+ EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题
11
+ max_limit = float(match[0]) - EXCEED_ALLO
12
+ current_tokens = float(match[1])
13
+ ratio = max_limit/current_tokens
14
+ assert ratio > 0 and ratio < 1
15
+ return ratio, str(int(current_tokens-max_limit))
16
+ except:
17
+ return 0.5, '不详'
18
+
19
+ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=True):
20
+ """
21
+ 调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
22
+ i_say: 当前输入
23
+ i_say_show_user: 显示到对话界面上的当前输入,例如,输入整个文件时,你绝对不想把文件的内容都糊到对话界面上
24
+ chatbot: 对话界面句柄
25
+ top_p, temperature: gpt参数
26
+ history: gpt参数 对话历史
27
+ sys_prompt: gpt参数 sys_prompt
28
+ long_connection: 是否采用更稳定的连接方式(推荐)
29
+ """
30
+ import time
31
+ from predict import predict_no_ui, predict_no_ui_long_connection
32
+ from toolbox import get_conf
33
+ TIMEOUT_SECONDS, MAX_RETRY = get_conf('TIMEOUT_SECONDS', 'MAX_RETRY')
34
+ # 多线程的时候,需要一个mutable结构在不同线程之间传递信息
35
+ # list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
36
+ mutable = [None, '']
37
+ # multi-threading worker
38
+ def mt(i_say, history):
39
+ while True:
40
+ try:
41
+ if long_connection:
42
+ mutable[0] = predict_no_ui_long_connection(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
43
+ else:
44
+ mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
45
+ break
46
+ except ConnectionAbortedError as token_exceeded_error:
47
+ # 尝试计算比例,尽可能多地保留文本
48
+ p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
49
+ if len(history) > 0:
50
+ history = [his[ int(len(his) *p_ratio): ] for his in history if his is not None]
51
+ else:
52
+ i_say = i_say[: int(len(i_say) *p_ratio) ]
53
+ mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
54
+ except TimeoutError as e:
55
+ mutable[0] = '[Local Message] 请求超时。'
56
+ raise TimeoutError
57
+ except Exception as e:
58
+ mutable[0] = f'[Local Message] 异常:{str(e)}.'
59
+ raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
60
+ # 创建新线程发出http请求
61
+ thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start()
62
+ # 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
63
+ cnt = 0
64
+ while thread_name.is_alive():
65
+ cnt += 1
66
+ chatbot[-1] = (i_say_show_user, f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt%4)))
67
+ yield chatbot, history, '正常'
68
+ time.sleep(1)
69
+ # 把gpt的输出从mutable中取出来
70
+ gpt_say = mutable[0]
71
+ if gpt_say=='[Local Message] Failed with timeout.': raise TimeoutError
72
+ return gpt_say
73
+
74
+ def write_results_to_file(history, file_name=None):
75
+ """
76
+ 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
77
+ """
78
+ import os, time
79
+ if file_name is None:
80
+ # file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
81
+ file_name = 'chatGPT分析报告' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
82
+ os.makedirs('./gpt_log/', exist_ok=True)
83
+ with open(f'./gpt_log/{file_name}', 'w', encoding = 'utf8') as f:
84
+ f.write('# chatGPT 分析报告\n')
85
+ for i, content in enumerate(history):
86
+ try: # 这个bug没找到触发条件,暂时先这样顶一下
87
+ if type(content) != str: content = str(content)
88
+ except:
89
+ continue
90
+ if i%2==0: f.write('## ')
91
+ f.write(content)
92
+ f.write('\n\n')
93
+ res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
94
+ print(res)
95
+ return res
96
+
97
+ def regular_txt_to_markdown(text):
98
+ """
99
+ 将普通文本转换为Markdown格式的文本。
100
+ """
101
+ text = text.replace('\n', '\n\n')
102
+ text = text.replace('\n\n\n', '\n\n')
103
+ text = text.replace('\n\n\n', '\n\n')
104
+ return text
105
+
106
+ def CatchException(f):
107
+ """
108
+ 装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
109
+ """
110
+ @wraps(f)
111
+ def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
112
+ try:
113
+ yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
114
+ except Exception as e:
115
+ from check_proxy import check_proxy
116
+ from toolbox import get_conf
117
+ proxies, = get_conf('proxies')
118
+ tb_str = '```\n' + traceback.format_exc() + '```'
119
+ if len(chatbot) == 0: chatbot.append(["插件调度异常","异常原因"])
120
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
121
+ yield chatbot, history, f'异常 {e}'
122
+ return decorated
123
+
124
+ def HotReload(f):
125
+ """
126
+ 装饰器函数,实现函数插件热更新
127
+ """
128
+ @wraps(f)
129
+ def decorated(*args, **kwargs):
130
+ fn_name = f.__name__
131
+ f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
132
+ yield from f_hot_reload(*args, **kwargs)
133
+ return decorated
134
+
135
+ def report_execption(chatbot, history, a, b):
136
+ """
137
+ 向chatbot中添加错误信息
138
+ """
139
+ chatbot.append((a, b))
140
+ history.append(a); history.append(b)
141
+
142
+ def text_divide_paragraph(text):
143
+ """
144
+ 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
145
+ """
146
+ if '```' in text:
147
+ # careful input
148
+ return text
149
+ else:
150
+ # shit input
151
+ lines = text.split("\n")
152
+ for i, line in enumerate(lines):
153
+ lines[i] = lines[i].replace(" ", "&nbsp;")
154
+ text = "</br>".join(lines)
155
+ return text
156
+
157
+ def markdown_convertion(txt):
158
+ """
159
+ 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
160
+ """
161
+ pre = '<div class="markdown-body">'
162
+ suf = '</div>'
163
+ if ('$' in txt) and ('```' not in txt):
164
+ return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + '<br><br>' + markdown.markdown(convert_math(txt, splitParagraphs=False),extensions=['fenced_code','tables']) + suf
165
+ else:
166
+ return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + suf
167
+
168
+ def close_up_code_segment_during_stream(gpt_reply):
169
+ """
170
+ 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
171
+ """
172
+ if '```' not in gpt_reply: return gpt_reply
173
+ if gpt_reply.endswith('```'): return gpt_reply
174
+
175
+ # 排除了以上两个情况,我们
176
+ segments = gpt_reply.split('```')
177
+ n_mark = len(segments) - 1
178
+ if n_mark % 2 == 1:
179
+ # print('输出代码片段中!')
180
+ return gpt_reply+'\n```'
181
+ else:
182
+ return gpt_reply
183
+
184
+
185
+
186
+ def format_io(self, y):
187
+ """
188
+ 将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
189
+ """
190
+ if y is None or y == []: return []
191
+ i_ask, gpt_reply = y[-1]
192
+ i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
193
+ gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
194
+ y[-1] = (
195
+ None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
196
+ None if gpt_reply is None else markdown_convertion(gpt_reply)
197
+ )
198
+ return y
199
+
200
+
201
+ def find_free_port():
202
+ """
203
+ 返回当前系统中可用的未使用端口。
204
+ """
205
+ import socket
206
+ from contextlib import closing
207
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
208
+ s.bind(('', 0))
209
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
210
+ return s.getsockname()[1]
211
+
212
+
213
+ def extract_archive(file_path, dest_dir):
214
+ import zipfile
215
+ import tarfile
216
+ import os
217
+ # Get the file extension of the input file
218
+ file_extension = os.path.splitext(file_path)[1]
219
+
220
+ # Extract the archive based on its extension
221
+ if file_extension == '.zip':
222
+ with zipfile.ZipFile(file_path, 'r') as zipobj:
223
+ zipobj.extractall(path=dest_dir)
224
+ print("Successfully extracted zip archive to {}".format(dest_dir))
225
+
226
+ elif file_extension in ['.tar', '.gz', '.bz2']:
227
+ with tarfile.open(file_path, 'r:*') as tarobj:
228
+ tarobj.extractall(path=dest_dir)
229
+ print("Successfully extracted tar archive to {}".format(dest_dir))
230
+
231
+ # 第三方库,需要预先pip install rarfile
232
+ # 此外,Windows��还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
233
+ elif file_extension == '.rar':
234
+ try:
235
+ import rarfile
236
+ with rarfile.RarFile(file_path) as rf:
237
+ rf.extractall(path=dest_dir)
238
+ print("Successfully extracted rar archive to {}".format(dest_dir))
239
+ except:
240
+ print("Rar format requires additional dependencies to install")
241
+ return '\n\n需要安装pip install rarfile来解压rar文件'
242
+
243
+ # 第三方库,需要预先pip install py7zr
244
+ elif file_extension == '.7z':
245
+ try:
246
+ import py7zr
247
+ with py7zr.SevenZipFile(file_path, mode='r') as f:
248
+ f.extractall(path=dest_dir)
249
+ print("Successfully extracted 7z archive to {}".format(dest_dir))
250
+ except:
251
+ print("7z format requires additional dependencies to install")
252
+ return '\n\n需要安装pip install py7zr来解压7z文件'
253
+ else:
254
+ return ''
255
+ return ''
256
+
257
+ def find_recent_files(directory):
258
+ """
259
+ me: find files that is created with in one minutes under a directory with python, write a function
260
+ gpt: here it is!
261
+ """
262
+ import os
263
+ import time
264
+ current_time = time.time()
265
+ one_minute_ago = current_time - 60
266
+ recent_files = []
267
+
268
+ for filename in os.listdir(directory):
269
+ file_path = os.path.join(directory, filename)
270
+ if file_path.endswith('.log'): continue
271
+ created_time = os.path.getctime(file_path)
272
+ if created_time >= one_minute_ago:
273
+ if os.path.isdir(file_path): continue
274
+ recent_files.append(file_path)
275
+
276
+ return recent_files
277
+
278
+
279
+
280
+
281
+
282
+
283
+
284
+ @lru_cache(maxsize=128)
285
+ def read_single_conf_with_lru_cache(arg):
286
+ try: r = getattr(importlib.import_module('config_private'), arg)
287
+ except: r = getattr(importlib.import_module('config'), arg)
288
+ # 在读取API_KEY时,检查一下是不是忘了改config
289
+ if arg=='API_KEY':
290
+ # 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
291
+ API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r)
292
+ if API_MATCH:
293
+ print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
294
+ else:
295
+ assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
296
+ "(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)"
297
+ if arg=='proxies':
298
+ if r is None:
299
+ print('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
300
+ else:
301
+ print('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
302
+ assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
303
+ return r
304
+
305
+ def get_conf(*args):
306
+ # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
307
+ res = []
308
+ for arg in args:
309
+ r = read_single_conf_with_lru_cache(arg)
310
+ res.append(r)
311
+ return res
312
+
313
+ def clear_line_break(txt):
314
+ txt = txt.replace('\n', ' ')
315
+ txt = txt.replace(' ', ' ')
316
+ txt = txt.replace(' ', ' ')
317
+ return txt
work_log/chat_secrets.log ADDED
File without changes