Spaces:
Sleeping
Sleeping
File size: 11,144 Bytes
fa4087a 8ba98ee fa4087a 8ba98ee fa4087a 4dfaeff fa4087a 5cb0bc3 fa4087a 8ba98ee 4dfaeff b28a1a9 4dfaeff 0cc999a fa4087a 4dfaeff 0cc999a 5cb0bc3 8ba98ee b28a1a9 8ba98ee b28a1a9 8ba98ee 4dfaeff 8ba98ee b28a1a9 8ba98ee 4dfaeff fa4087a 4dfaeff c67c8ad dd1d703 0cc999a 4dfaeff 0cc999a 4dfaeff c67c8ad 5cb0bc3 b28a1a9 5cb0bc3 4dfaeff 0cc999a 7bab2f2 4dfaeff 5cb0bc3 fa4087a 4dfaeff fa4087a 4dfaeff fa4087a 4dfaeff b28a1a9 fa4087a 4dfaeff fa4087a 4dfaeff b28a1a9 4dfaeff fa4087a 4dfaeff fa4087a 4dfaeff fa4087a 4dfaeff 8ba98ee fa4087a 4dfaeff fa4087a 4dfaeff fa4087a 4dfaeff fa4087a 8ba98ee 4dfaeff 8ba98ee fa4087a 8ba98ee fa4087a 8ba98ee 4dfaeff 0cc999a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
from collections import defaultdict
from contextlib import contextmanager
import os
import logging
import sys
import commentjson as json
from . import shared
from . import presets
__all__ = [
"my_api_key",
"sensitive_id",
"authflag",
"auth_list",
"dockerflag",
"retrieve_proxy",
"advance_docs",
"update_doc_config",
"usage_limit",
"multi_api_key",
"server_name",
"server_port",
"share",
"check_update",
"latex_delimiters_set",
"hide_history_when_not_logged_in",
"default_chuanhu_assistant_model",
"show_api_billing",
"chat_name_method_index",
]
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
# 同时,也可以为后续支持自定义功能提供config的帮助
if os.path.exists("config.json"):
with open("config.json", "r", encoding='utf-8') as f:
config = json.load(f)
else:
config = {}
def load_config_to_environ(key_list):
global config
for key in key_list:
if key in config:
os.environ[key.upper()] = os.environ.get(key.upper(), config[key])
hide_history_when_not_logged_in = config.get(
"hide_history_when_not_logged_in", False)
check_update = config.get("check_update", True)
show_api_billing = config.get("show_api_billing", False)
show_api_billing = bool(os.environ.get("SHOW_API_BILLING", show_api_billing))
chat_name_method_index = config.get("chat_name_method_index", 2)
if os.path.exists("api_key.txt"):
logging.info("检测到api_key.txt文件,正在进行迁移...")
with open("api_key.txt", "r", encoding="utf-8") as f:
config["openai_api_key"] = f.read().strip()
os.rename("api_key.txt", "api_key(deprecated).txt")
with open("config.json", "w", encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
if os.path.exists("auth.json"):
logging.info("检测到auth.json文件,正在进行迁移...")
auth_list = []
with open("auth.json", "r", encoding='utf-8') as f:
auth = json.load(f)
for _ in auth:
if auth[_]["username"] and auth[_]["password"]:
auth_list.append((auth[_]["username"], auth[_]["password"]))
else:
logging.error("请检查auth.json文件中的用户名和密码!")
sys.exit(1)
config["users"] = auth_list
os.rename("auth.json", "auth(deprecated).json")
with open("config.json", "w", encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
# 处理docker if we are running in Docker
dockerflag = config.get("dockerflag", False)
if os.environ.get("dockerrun") == "yes":
dockerflag = True
# 处理 api-key 以及 允许的用户列表
my_api_key = config.get("openai_api_key", "")
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
os.environ["OPENAI_API_KEY"] = my_api_key
os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
if config.get("legacy_api_usage", False):
sensitive_id = my_api_key
else:
sensitive_id = config.get("sensitive_id", "")
sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
if "available_models" in config:
presets.MODELS = config["available_models"]
logging.info(f"已设置可用模型:{config['available_models']}")
# 模型配置
if "extra_models" in config:
presets.MODELS.extend(config["extra_models"])
logging.info(f"已添加额外的模型:{config['extra_models']}")
google_palm_api_key = config.get("google_palm_api_key", "")
google_palm_api_key = os.environ.get(
"GOOGLE_PALM_API_KEY", google_palm_api_key)
os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
xmchat_api_key = config.get("xmchat_api_key", "")
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
minimax_api_key = config.get("minimax_api_key", "")
os.environ["MINIMAX_API_KEY"] = minimax_api_key
minimax_group_id = config.get("minimax_group_id", "")
os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "")
os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base
midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "")
os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret
midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "")
os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url
midjourney_temp_folder = config.get("midjourney_temp_folder", "")
os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder
spark_api_key = config.get("spark_api_key", "")
os.environ["SPARK_API_KEY"] = spark_api_key
spark_appid = config.get("spark_appid", "")
os.environ["SPARK_APPID"] = spark_appid
spark_api_secret = config.get("spark_api_secret", "")
os.environ["SPARK_API_SECRET"] = spark_api_secret
claude_api_secret = config.get("claude_api_secret", "")
os.environ["CLAUDE_API_SECRET"] = claude_api_secret
ernie_api_key = config.get("ernie_api_key", "")
os.environ["ERNIE_APIKEY"] = ernie_api_key
ernie_secret_key = config.get("ernie_secret_key", "")
os.environ["ERNIE_SECRETKEY"] = ernie_secret_key
load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
"azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
# 多账户机制
multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
if multi_api_key:
api_key_list = config.get("api_key_list", [])
if len(api_key_list) == 0:
logging.error("多账号模式已开启,但api_key_list为空,请检查config.json")
sys.exit(1)
shared.state.set_api_key_queue(api_key_list)
auth_list = config.get("users", []) # 实际上是使用者的列表
authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
api_host = os.environ.get(
"OPENAI_API_BASE", config.get("openai_api_base", None))
if api_host is not None:
shared.state.set_api_host(api_host)
os.environ["OPENAI_API_BASE"] = f"{api_host}/v1"
logging.info(f"OpenAI API Base set to: {os.environ['OPENAI_API_BASE']}")
default_chuanhu_assistant_model = config.get(
"default_chuanhu_assistant_model", "gpt-3.5-turbo")
for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]:
if config.get(x, None) is not None:
os.environ[x] = config[x]
@contextmanager
def retrieve_openai_api(api_key=None):
old_api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key is None:
os.environ["OPENAI_API_KEY"] = my_api_key
yield my_api_key
else:
os.environ["OPENAI_API_KEY"] = api_key
yield api_key
os.environ["OPENAI_API_KEY"] = old_api_key
# 处理代理:
http_proxy = os.environ.get("HTTP_PROXY", "")
https_proxy = os.environ.get("HTTPS_PROXY", "")
http_proxy = config.get("http_proxy", http_proxy)
https_proxy = config.get("https_proxy", https_proxy)
# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
os.environ["HTTP_PROXY"] = ""
os.environ["HTTPS_PROXY"] = ""
local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
@contextmanager
def retrieve_proxy(proxy=None):
"""
1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理
2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量
"""
global http_proxy, https_proxy
if proxy is not None:
http_proxy = proxy
https_proxy = proxy
yield http_proxy, https_proxy
else:
old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
os.environ["HTTP_PROXY"] = http_proxy
os.environ["HTTPS_PROXY"] = https_proxy
yield http_proxy, https_proxy # return new proxy
# return old proxy
os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
# 处理latex options
user_latex_option = config.get("latex_option", "default")
if user_latex_option == "default":
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
elif user_latex_option == "strict":
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
elif user_latex_option == "all":
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
{"left": "\\begin{equation}", "right": "\\end{equation}", "display": True},
{"left": "\\begin{align}", "right": "\\end{align}", "display": True},
{"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True},
{"left": "\\begin{gather}", "right": "\\end{gather}", "display": True},
{"left": "\\begin{CD}", "right": "\\end{CD}", "display": True},
]
elif user_latex_option == "disabled":
latex_delimiters_set = []
else:
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
# 处理advance docs
advance_docs = defaultdict(lambda: defaultdict(dict))
advance_docs.update(config.get("advance_docs", {}))
def update_doc_config(two_column_pdf):
global advance_docs
advance_docs["pdf"]["two_column"] = two_column_pdf
logging.info(f"更新后的文件参数为:{advance_docs}")
# 处理gradio.launch参数
server_name = config.get("server_name", None)
server_port = config.get("server_port", None)
if server_name is None:
if dockerflag:
server_name = "0.0.0.0"
else:
server_name = "127.0.0.1"
if server_port is None:
if dockerflag:
server_port = 7860
assert server_port is None or type(server_port) == int, "要求port设置为int类型"
# 设置默认model
default_model = config.get("default_model", "")
try:
presets.DEFAULT_MODEL = presets.MODELS.index(default_model)
except ValueError:
pass
share = config.get("share", False)
# avatar
bot_avatar = config.get("bot_avatar", "default")
user_avatar = config.get("user_avatar", "default")
if bot_avatar == "" or bot_avatar == "none" or bot_avatar is None:
bot_avatar = None
elif bot_avatar == "default":
bot_avatar = "web_assets/chatbot.png"
if user_avatar == "" or user_avatar == "none" or user_avatar is None:
user_avatar = None
elif user_avatar == "default":
user_avatar = "web_assets/user.png"
|