Spaces:
Sleeping
Sleeping
File size: 10,587 Bytes
a9516c8 1bda668 77f2c42 1bda668 647c87c ccade22 647c87c 1bda668 dd740ac 1bda668 a9516c8 e4df10e 8d81d5a e4df10e d1cfc73 76699a6 73d2fd7 f8a0305 362b886 1bda668 2af553a 8c04739 4b9ef74 8c04739 76699a6 362b886 3646bb0 6431f64 2582c02 6431f64 2582c02 6431f64 8c04739 6431f64 2582c02 6431f64 8c04739 1bda668 8c04739 2af553a d7d79a0 14c4abc 647c87c 1344244 bc82369 1344244 bc82369 432eb42 1344244 02f41f3 8c04739 02f41f3 3b2bedf 2af553a cef64b2 0b2092a 12eb16f 8c04739 4b9ef74 b522942 8d81d5a 8c04739 e4df10e 8c04739 647c87c 8c04739 cad6136 647c87c 14c4abc 647c87c 8c04739 dc61f72 f8a0305 8c04739 1bda668 8c04739 1bda668 8c04739 1bda668 8c04739 7c40573 1bda668 8c04739 2d5d187 1bda668 8c04739 8783eb5 1bda668 8c04739 73d2fd7 1bda668 8c04739 a9516c8 8c04739 a9516c8 8783eb5 d1cfc73 8c04739 d1cfc73 ccade22 d1cfc73 5f36649 ef91b66 c06298e ef91b66 c06298e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 |
from collections import defaultdict
from contextlib import contextmanager
import os
import logging
import sys
import commentjson as json
from . import shared
from . import presets
__all__ = [
"my_api_key",
"sensitive_id",
"authflag",
"auth_list",
"dockerflag",
"retrieve_proxy",
"advance_docs",
"update_doc_config",
"usage_limit",
"multi_api_key",
"server_name",
"server_port",
"share",
"check_update",
"latex_delimiters_set",
"hide_history_when_not_logged_in",
"default_chuanhu_assistant_model",
"show_api_billing"
]
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
# 同时,也可以为后续支持自定义功能提供config的帮助
if os.path.exists("config.json"):
with open("config.json", "r", encoding='utf-8') as f:
config = json.load(f)
else:
config = {}
def load_config_to_environ(key_list):
global config
for key in key_list:
if key in config:
os.environ[key.upper()] = os.environ.get(key.upper(), config[key])
hide_history_when_not_logged_in = config.get(
"hide_history_when_not_logged_in", False)
check_update = config.get("check_update", True)
show_api_billing = config.get("show_api_billing", False)
show_api_billing = bool(os.environ.get("SHOW_API_BILLING", show_api_billing))
if os.path.exists("api_key.txt"):
logging.info("检测到api_key.txt文件,正在进行迁移...")
with open("api_key.txt", "r", encoding="utf-8") as f:
config["openai_api_key"] = f.read().strip()
os.rename("api_key.txt", "api_key(deprecated).txt")
with open("config.json", "w", encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
if os.path.exists("auth.json"):
logging.info("检测到auth.json文件,正在进行迁移...")
auth_list = []
with open("auth.json", "r", encoding='utf-8') as f:
auth = json.load(f)
for _ in auth:
if auth[_]["username"] and auth[_]["password"]:
auth_list.append((auth[_]["username"], auth[_]["password"]))
else:
logging.error("请检查auth.json文件中的用户名和密码!")
sys.exit(1)
config["users"] = auth_list
os.rename("auth.json", "auth(deprecated).json")
with open("config.json", "w", encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
# 处理docker if we are running in Docker
dockerflag = config.get("dockerflag", False)
if os.environ.get("dockerrun") == "yes":
dockerflag = True
# 处理 api-key 以及 允许的用户列表
my_api_key = config.get("openai_api_key", "")
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
os.environ["OPENAI_API_KEY"] = my_api_key
os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
if config.get("legacy_api_usage", False):
sensitive_id = my_api_key
else:
sensitive_id = config.get("sensitive_id", "")
sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
# 模型配置
if "extra_models" in config:
presets.MODELS.extend(config["extra_models"])
logging.info(f"已添加额外的模型:{config['extra_models']}")
google_palm_api_key = config.get("google_palm_api_key", "")
google_palm_api_key = os.environ.get(
"GOOGLE_PALM_API_KEY", google_palm_api_key)
os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
xmchat_api_key = config.get("xmchat_api_key", "")
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
minimax_api_key = config.get("minimax_api_key", "")
os.environ["MINIMAX_API_KEY"] = minimax_api_key
minimax_group_id = config.get("minimax_group_id", "")
os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "")
os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base
midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "")
os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret
midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "")
os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url
midjourney_temp_folder = config.get("midjourney_temp_folder", "")
os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder
spark_api_key = config.get("spark_api_key", "")
os.environ["SPARK_API_KEY"] = spark_api_key
spark_appid = config.get("spark_appid", "")
os.environ["SPARK_APPID"] = spark_appid
spark_api_secret = config.get("spark_api_secret", "")
os.environ["SPARK_API_SECRET"] = spark_api_secret
load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
"azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
# 多账户机制
multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
if multi_api_key:
api_key_list = config.get("api_key_list", [])
if len(api_key_list) == 0:
logging.error("多账号模式已开启,但api_key_list为空,请检查config.json")
sys.exit(1)
shared.state.set_api_key_queue(api_key_list)
auth_list = config.get("users", []) # 实际上是使用者的列表
authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
api_host = os.environ.get(
"OPENAI_API_BASE", config.get("openai_api_base", None))
if api_host is not None:
shared.state.set_api_host(api_host)
os.environ["OPENAI_API_BASE"] = f"{api_host}/v1"
logging.info(f"OpenAI API Base set to: {os.environ['OPENAI_API_BASE']}")
default_chuanhu_assistant_model = config.get(
"default_chuanhu_assistant_model", "gpt-3.5-turbo")
for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]:
if config.get(x, None) is not None:
os.environ[x] = config[x]
@contextmanager
def retrieve_openai_api(api_key=None):
old_api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key is None:
os.environ["OPENAI_API_KEY"] = my_api_key
yield my_api_key
else:
os.environ["OPENAI_API_KEY"] = api_key
yield api_key
os.environ["OPENAI_API_KEY"] = old_api_key
# 处理代理:
http_proxy = os.environ.get("HTTP_PROXY", "")
https_proxy = os.environ.get("HTTPS_PROXY", "")
http_proxy = config.get("http_proxy", http_proxy)
https_proxy = config.get("https_proxy", https_proxy)
# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
os.environ["HTTP_PROXY"] = ""
os.environ["HTTPS_PROXY"] = ""
local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
@contextmanager
def retrieve_proxy(proxy=None):
"""
1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理
2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量
"""
global http_proxy, https_proxy
if proxy is not None:
http_proxy = proxy
https_proxy = proxy
yield http_proxy, https_proxy
else:
old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
os.environ["HTTP_PROXY"] = http_proxy
os.environ["HTTPS_PROXY"] = https_proxy
yield http_proxy, https_proxy # return new proxy
# return old proxy
os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
# 处理latex options
user_latex_option = config.get("latex_option", "default")
if user_latex_option == "default":
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
elif user_latex_option == "strict":
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
elif user_latex_option == "all":
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
{"left": "\\begin{equation}", "right": "\\end{equation}", "display": True},
{"left": "\\begin{align}", "right": "\\end{align}", "display": True},
{"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True},
{"left": "\\begin{gather}", "right": "\\end{gather}", "display": True},
{"left": "\\begin{CD}", "right": "\\end{CD}", "display": True},
]
elif user_latex_option == "disabled":
latex_delimiters_set = []
else:
latex_delimiters_set = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
# 处理advance docs
advance_docs = defaultdict(lambda: defaultdict(dict))
advance_docs.update(config.get("advance_docs", {}))
def update_doc_config(two_column_pdf):
global advance_docs
advance_docs["pdf"]["two_column"] = two_column_pdf
logging.info(f"更新后的文件参数为:{advance_docs}")
# 处理gradio.launch参数
server_name = config.get("server_name", None)
server_port = config.get("server_port", None)
if server_name is None:
if dockerflag:
server_name = "0.0.0.0"
else:
server_name = "127.0.0.1"
if server_port is None:
if dockerflag:
server_port = 7860
assert server_port is None or type(server_port) == int, "要求port设置为int类型"
# 设置默认model
default_model = config.get("default_model", "")
try:
presets.DEFAULT_MODEL = presets.MODELS.index(default_model)
except ValueError:
pass
share = config.get("share", False)
# avatar
bot_avatar = config.get("bot_avatar", "default")
user_avatar = config.get("user_avatar", "default")
if bot_avatar == "" or bot_avatar == "none" or bot_avatar is None:
bot_avatar = None
elif bot_avatar == "default":
bot_avatar = "web_assets/chatbot.png"
if user_avatar == "" or user_avatar == "none" or user_avatar is None:
user_avatar = None
elif user_avatar == "default":
user_avatar = "web_assets/user.png" |