Spaces:
Paused
Paused
Merge branch 'suburl' of https://github.com/yuxiaoyuan0406/chatgpt_academic into yuxiaoyuan0406-suburl
Browse files- config.py +3 -0
- main.py +7 -4
- toolbox.py +11 -0
config.py
CHANGED
@@ -60,3 +60,6 @@ AUTHENTICATION = []
|
|
60 |
# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
|
61 |
# 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"}
|
62 |
API_URL_REDIRECT = {}
|
|
|
|
|
|
|
|
60 |
# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
|
61 |
# 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"}
|
62 |
API_URL_REDIRECT = {}
|
63 |
+
|
64 |
+
# 如果你需要把网址放在二级地址下(常规情况下,不要修改!!)
|
65 |
+
CUSTOM_PATH = "/"
|
main.py
CHANGED
@@ -3,10 +3,10 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
|
3 |
def main():
|
4 |
import gradio as gr
|
5 |
from request_llm.bridge_all import predict
|
6 |
-
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
7 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
8 |
-
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
9 |
-
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
|
10 |
|
11 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
12 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
@@ -186,7 +186,10 @@ def main():
|
|
186 |
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
187 |
|
188 |
auto_opentab_delay()
|
189 |
-
demo.queue(concurrency_count=CONCURRENT_COUNT)
|
|
|
190 |
|
191 |
if __name__ == "__main__":
|
192 |
main()
|
|
|
|
|
|
3 |
def main():
|
4 |
import gradio as gr
|
5 |
from request_llm.bridge_all import predict
|
6 |
+
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, run_gradio, DummyWith
|
7 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
8 |
+
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS, CUSTOM_PATH = \
|
9 |
+
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS', 'CUSTOM_PATH')
|
10 |
|
11 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
12 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
|
|
186 |
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
187 |
|
188 |
auto_opentab_delay()
|
189 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT)
|
190 |
+
run_gradio(demo, auth=AUTHENTICATION, favicon_path="docs/logo.png", port=PORT, custom_path=CUSTOM_PATH)
|
191 |
|
192 |
if __name__ == "__main__":
|
193 |
main()
|
194 |
+
|
195 |
+
|
toolbox.py
CHANGED
@@ -520,3 +520,14 @@ class DummyWith():
|
|
520 |
|
521 |
def __exit__(self, exc_type, exc_value, traceback):
|
522 |
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
520 |
|
521 |
def __exit__(self, exc_type, exc_value, traceback):
|
522 |
return
|
523 |
+
|
524 |
+
def run_gradio(demo, auth, favicon_path, port, custom_path):
|
525 |
+
import uvicorn
|
526 |
+
import gradio as gr
|
527 |
+
from fastapi import FastAPI
|
528 |
+
app = FastAPI()
|
529 |
+
@app.get("/")
|
530 |
+
def read_main():
|
531 |
+
return {"message": "NULL"}
|
532 |
+
app = gr.mount_gradio_app(app, demo, path=custom_path, auth=auth, favicon_path=favicon_path)
|
533 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|