shenchucheng
commited on
Commit
•
0d5e9a2
1
Parent(s):
dbd93d7
download html source from remote
Browse files- .dockerignore +1 -0
- .gitignore +2 -1
- Dockerfile +11 -0
- app.py +13 -7
- config/config.yaml +18 -3
- config/template.yaml +84 -0
- init.sh +28 -0
.dockerignore
CHANGED
@@ -7,3 +7,4 @@ data
|
|
7 |
geckodriver.log
|
8 |
logs
|
9 |
storage
|
|
|
|
7 |
geckodriver.log
|
8 |
logs
|
9 |
storage
|
10 |
+
static
|
.gitignore
CHANGED
@@ -170,4 +170,5 @@ output
|
|
170 |
tmp.png
|
171 |
|
172 |
storage/*
|
173 |
-
logs
|
|
|
|
170 |
tmp.png
|
171 |
|
172 |
storage/*
|
173 |
+
logs
|
174 |
+
static
|
Dockerfile
CHANGED
@@ -1,4 +1,13 @@
|
|
1 |
# Use a base image with Python3.9 and Nodejs20 slim version
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
FROM nikolaik/python-nodejs:python3.9-nodejs20-slim
|
3 |
|
4 |
USER root
|
@@ -27,5 +36,7 @@ RUN pip install --no-cache-dir -r requirements.txt && \
|
|
27 |
mkdir -p /app/storage && chmod 777 /app/storage
|
28 |
|
29 |
COPY . .
|
|
|
|
|
30 |
|
31 |
CMD ["python", "app.py"]
|
|
|
1 |
# Use a base image with Python3.9 and Nodejs20 slim version
|
2 |
+
FROM nikolaik/python-nodejs:python3.9-nodejs20-slim as static
|
3 |
+
|
4 |
+
WORKDIR /app
|
5 |
+
|
6 |
+
COPY init.sh init.sh
|
7 |
+
|
8 |
+
RUN chmod u+x init.sh && ./init.sh
|
9 |
+
|
10 |
+
|
11 |
FROM nikolaik/python-nodejs:python3.9-nodejs20-slim
|
12 |
|
13 |
USER root
|
|
|
36 |
mkdir -p /app/storage && chmod 777 /app/storage
|
37 |
|
38 |
COPY . .
|
39 |
+
COPY --from=static /app/static /app/static
|
40 |
+
COPY config/template.yaml static/config.yaml
|
41 |
|
42 |
CMD ["python", "app.py"]
|
app.py
CHANGED
@@ -152,8 +152,10 @@ async def create_message(req_model: NewMsg, request: Request):
|
|
152 |
"""
|
153 |
Session message stream
|
154 |
"""
|
|
|
155 |
try:
|
156 |
-
|
|
|
157 |
set_context(config, uuid.uuid4().hex)
|
158 |
|
159 |
msg_queue = deque()
|
@@ -190,7 +192,6 @@ async def create_message(req_model: NewMsg, request: Request):
|
|
190 |
|
191 |
asyncio.create_task(stop_if_disconnect())
|
192 |
|
193 |
-
tc_id = 0
|
194 |
while True:
|
195 |
tc_id += 1
|
196 |
if await request.is_disconnected():
|
@@ -223,6 +224,8 @@ async def create_message(req_model: NewMsg, request: Request):
|
|
223 |
yield think_act_prompt.prompt + "\n\n"
|
224 |
answer.add_think_act(think_act_prompt)
|
225 |
yield answer.prompt + "\n\n" # Notify the front-end that the message is complete.
|
|
|
|
|
226 |
except Exception as ex:
|
227 |
description = str(ex)
|
228 |
answer = traceback.format_exc()
|
@@ -236,7 +239,9 @@ async def create_message(req_model: NewMsg, request: Request):
|
|
236 |
think_act_prompt = ThinkActPrompt(step=step)
|
237 |
yield think_act_prompt.prompt + "\n\n"
|
238 |
finally:
|
239 |
-
|
|
|
|
|
240 |
|
241 |
|
242 |
default_llm_stream_log = partial(print, end="")
|
@@ -270,7 +275,7 @@ app = FastAPI()
|
|
270 |
app.mount(
|
271 |
"/storage",
|
272 |
StaticFiles(directory="./storage/"),
|
273 |
-
name="
|
274 |
)
|
275 |
|
276 |
app.add_api_route(
|
@@ -283,8 +288,8 @@ app.add_api_route(
|
|
283 |
|
284 |
app.mount(
|
285 |
"/",
|
286 |
-
StaticFiles(directory="./
|
287 |
-
name="
|
288 |
)
|
289 |
|
290 |
|
@@ -292,7 +297,8 @@ set_llm_stream_logfunc(llm_stream_log)
|
|
292 |
|
293 |
|
294 |
def main():
|
295 |
-
|
|
|
296 |
|
297 |
|
298 |
if __name__ == "__main__":
|
|
|
152 |
"""
|
153 |
Session message stream
|
154 |
"""
|
155 |
+
tc_id = 0
|
156 |
try:
|
157 |
+
exclude_keys = CONFIG.get("SERVER_METAGPT_CONFIG_EXCLUDE", [])
|
158 |
+
config = {k.upper(): v for k, v in req_model.config.items() if k not in exclude_keys}
|
159 |
set_context(config, uuid.uuid4().hex)
|
160 |
|
161 |
msg_queue = deque()
|
|
|
192 |
|
193 |
asyncio.create_task(stop_if_disconnect())
|
194 |
|
|
|
195 |
while True:
|
196 |
tc_id += 1
|
197 |
if await request.is_disconnected():
|
|
|
224 |
yield think_act_prompt.prompt + "\n\n"
|
225 |
answer.add_think_act(think_act_prompt)
|
226 |
yield answer.prompt + "\n\n" # Notify the front-end that the message is complete.
|
227 |
+
except asyncio.CancelledError:
|
228 |
+
task.cancel()
|
229 |
except Exception as ex:
|
230 |
description = str(ex)
|
231 |
answer = traceback.format_exc()
|
|
|
239 |
think_act_prompt = ThinkActPrompt(step=step)
|
240 |
yield think_act_prompt.prompt + "\n\n"
|
241 |
finally:
|
242 |
+
CONFIG.WORKSPACE_PATH: pathlib.Path
|
243 |
+
if CONFIG.WORKSPACE_PATH.exists():
|
244 |
+
shutil.rmtree(CONFIG.WORKSPACE_PATH)
|
245 |
|
246 |
|
247 |
default_llm_stream_log = partial(print, end="")
|
|
|
275 |
app.mount(
|
276 |
"/storage",
|
277 |
StaticFiles(directory="./storage/"),
|
278 |
+
name="storage",
|
279 |
)
|
280 |
|
281 |
app.add_api_route(
|
|
|
288 |
|
289 |
app.mount(
|
290 |
"/",
|
291 |
+
StaticFiles(directory="./static/", html=True, follow_symlink=True),
|
292 |
+
name="static",
|
293 |
)
|
294 |
|
295 |
|
|
|
297 |
|
298 |
|
299 |
def main():
|
300 |
+
server_config = CONFIG.get("SERVER_UVICORN", {})
|
301 |
+
uvicorn.run(app="__main__:app", **server_config)
|
302 |
|
303 |
|
304 |
if __name__ == "__main__":
|
config/config.yaml
CHANGED
@@ -94,8 +94,8 @@ RPM: 10
|
|
94 |
|
95 |
#### for Mermaid CLI
|
96 |
## If you installed mmdc (Mermaid CLI) only for metagpt then enable the following configuration.
|
97 |
-
#PUPPETEER_CONFIG: "./config/puppeteer-config.json"
|
98 |
-
#MMDC: "./node_modules/.bin/mmdc"
|
99 |
|
100 |
|
101 |
### for calc_usage
|
@@ -110,7 +110,7 @@ RPM: 10
|
|
110 |
# MERMAID_ENGINE: nodejs
|
111 |
|
112 |
### browser path for pyppeteer engine, support Chrome, Chromium,MS Edge
|
113 |
-
#PYPPETEER_EXECUTABLE_PATH: "/usr/bin/google-chrome-stable"
|
114 |
|
115 |
### for repair non-openai LLM's output when parse json-text if PROMPT_FORMAT=json
|
116 |
### due to non-openai LLM's output will not always follow the instruction, so here activate a post-process
|
@@ -134,3 +134,18 @@ LOCAL_BASE_URL: "storage"
|
|
134 |
# S3_ENDPOINT_URL: ""
|
135 |
# S3_BUCKET: ""
|
136 |
# S3_SECURE: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
#### for Mermaid CLI
|
96 |
## If you installed mmdc (Mermaid CLI) only for metagpt then enable the following configuration.
|
97 |
+
# PUPPETEER_CONFIG: "./config/puppeteer-config.json"
|
98 |
+
# MMDC: "./node_modules/.bin/mmdc"
|
99 |
|
100 |
|
101 |
### for calc_usage
|
|
|
110 |
# MERMAID_ENGINE: nodejs
|
111 |
|
112 |
### browser path for pyppeteer engine, support Chrome, Chromium,MS Edge
|
113 |
+
# PYPPETEER_EXECUTABLE_PATH: "/usr/bin/google-chrome-stable"
|
114 |
|
115 |
### for repair non-openai LLM's output when parse json-text if PROMPT_FORMAT=json
|
116 |
### due to non-openai LLM's output will not always follow the instruction, so here activate a post-process
|
|
|
134 |
# S3_ENDPOINT_URL: ""
|
135 |
# S3_BUCKET: ""
|
136 |
# S3_SECURE: false
|
137 |
+
|
138 |
+
SERVER_UVICORN:
|
139 |
+
host: 0.0.0.0
|
140 |
+
port: 7860
|
141 |
+
|
142 |
+
SERVER_METAGPT_CONFIG_EXCLUDE:
|
143 |
+
- MERMAID_ENGINE
|
144 |
+
- PYPPETEER_EXECUTABLE_PATH
|
145 |
+
- DISABLE_LLM_PROVIDER_CHECK
|
146 |
+
- STORAGE_TYPE
|
147 |
+
- LOCAL_ROOT
|
148 |
+
- LOCAL_BASE_URL
|
149 |
+
- MMDC
|
150 |
+
- PUPPETEER_CONFIG
|
151 |
+
- WORKSPACE_PATH
|
config/template.yaml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#### if OpenAI
|
2 |
+
## The official OPENAI_BASE_URL is https://api.openai.com/v1
|
3 |
+
## If the official OPENAI_BASE_URL is not available, we recommend using the [openai-forward](https://github.com/beidongjiedeguang/openai-forward).
|
4 |
+
## Or, you can configure OPENAI_PROXY to access official OPENAI_BASE_URL.
|
5 |
+
# OPENAI_BASE_URL: "https://api.openai.com/v1"
|
6 |
+
# OPENAI_PROXY: "http://127.0.0.1:8118"
|
7 |
+
# OPENAI_API_KEY: "YOUR_API_KEY" # set the value to sk-xxx if you host the openai interface for open llm model
|
8 |
+
# OPENAI_API_MODEL: "gpt-4-1106-preview"
|
9 |
+
# MAX_TOKENS: 4096
|
10 |
+
# RPM: 10
|
11 |
+
|
12 |
+
#### if Spark
|
13 |
+
#SPARK_APPID : "YOUR_APPID"
|
14 |
+
#SPARK_API_SECRET : "YOUR_APISecret"
|
15 |
+
#SPARK_API_KEY : "YOUR_APIKey"
|
16 |
+
#DOMAIN : "generalv2"
|
17 |
+
#SPARK_URL : "ws://spark-api.xf-yun.com/v2.1/chat"
|
18 |
+
|
19 |
+
#### if Anthropic
|
20 |
+
#ANTHROPIC_API_KEY: "YOUR_API_KEY"
|
21 |
+
|
22 |
+
#### if AZURE, check https://github.com/openai/openai-cookbook/blob/main/examples/azure/chat.ipynb
|
23 |
+
#OPENAI_API_TYPE: "azure"
|
24 |
+
#OPENAI_BASE_URL: "YOUR_AZURE_ENDPOINT"
|
25 |
+
#OPENAI_API_KEY: "YOUR_AZURE_API_KEY"
|
26 |
+
#OPENAI_API_VERSION: "YOUR_AZURE_API_VERSION"
|
27 |
+
#DEPLOYMENT_NAME: "YOUR_DEPLOYMENT_NAME"
|
28 |
+
|
29 |
+
#### if zhipuai from `https://open.bigmodel.cn`. You can set here or export API_KEY="YOUR_API_KEY"
|
30 |
+
# ZHIPUAI_API_KEY: "YOUR_API_KEY"
|
31 |
+
|
32 |
+
#### if Google Gemini from `https://ai.google.dev/` and API_KEY from `https://makersuite.google.com/app/apikey`.
|
33 |
+
#### You can set here or export GOOGLE_API_KEY="YOUR_API_KEY"
|
34 |
+
# GEMINI_API_KEY: "YOUR_API_KEY"
|
35 |
+
|
36 |
+
#### if use self-host open llm model with openai-compatible interface
|
37 |
+
#OPEN_LLM_API_BASE: "http://127.0.0.1:8000/v1"
|
38 |
+
#OPEN_LLM_API_MODEL: "llama2-13b"
|
39 |
+
|
40 |
+
|
41 |
+
##### if use Fireworks api
|
42 |
+
#FIREWORKS_API_KEY: "YOUR_API_KEY"
|
43 |
+
#FIREWORKS_API_BASE: "https://api.fireworks.ai/inference/v1"
|
44 |
+
#FIREWORKS_API_MODEL: "YOUR_LLM_MODEL" # example, accounts/fireworks/models/llama-v2-13b-chat
|
45 |
+
|
46 |
+
#### if use self-host open llm model by ollama
|
47 |
+
# OLLAMA_API_BASE: http://127.0.0.1:11434/api
|
48 |
+
# OLLAMA_API_MODEL: llama2
|
49 |
+
|
50 |
+
#### for Search
|
51 |
+
|
52 |
+
## Supported values: serpapi/google/serper/ddg
|
53 |
+
#SEARCH_ENGINE: serpapi
|
54 |
+
|
55 |
+
## Visit https://serpapi.com/ to get key.
|
56 |
+
#SERPAPI_API_KEY: "YOUR_API_KEY"
|
57 |
+
|
58 |
+
## Visit https://console.cloud.google.com/apis/credentials to get key.
|
59 |
+
#GOOGLE_API_KEY: "YOUR_API_KEY"
|
60 |
+
## Visit https://programmablesearchengine.google.com/controlpanel/create to get id.
|
61 |
+
#GOOGLE_CSE_ID: "YOUR_CSE_ID"
|
62 |
+
|
63 |
+
## Visit https://serper.dev/ to get key.
|
64 |
+
#SERPER_API_KEY: "YOUR_API_KEY"
|
65 |
+
|
66 |
+
#### for TTS
|
67 |
+
|
68 |
+
#AZURE_TTS_SUBSCRIPTION_KEY: "YOUR_API_KEY"
|
69 |
+
#AZURE_TTS_REGION: "eastus"
|
70 |
+
|
71 |
+
#### for Stable Diffusion
|
72 |
+
## Use SD service, based on https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
73 |
+
#SD_URL: "YOUR_SD_URL"
|
74 |
+
#SD_T2I_API: "/sdapi/v1/txt2img"
|
75 |
+
|
76 |
+
#### for Execution
|
77 |
+
#LONG_TERM_MEMORY: false
|
78 |
+
|
79 |
+
### for repair non-openai LLM's output when parse json-text if PROMPT_FORMAT=json
|
80 |
+
### due to non-openai LLM's output will not always follow the instruction, so here activate a post-process
|
81 |
+
### repair operation on the content extracted from LLM's raw output. Warning, it improves the result but not fix all cases.
|
82 |
+
# REPAIR_LLM_OUTPUT: false
|
83 |
+
|
84 |
+
# PROMPT_FORMAT: json #json or markdown
|
init.sh
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/bash
|
2 |
+
set -ex
|
3 |
+
|
4 |
+
while [[ $# -gt 0 ]]; do
|
5 |
+
key="$1"
|
6 |
+
case $key in
|
7 |
+
--update-config-template)
|
8 |
+
UPDATE_CONFIG_TEMPLATE=true
|
9 |
+
shift
|
10 |
+
;;
|
11 |
+
*)
|
12 |
+
# unknown option
|
13 |
+
shift
|
14 |
+
;;
|
15 |
+
esac
|
16 |
+
done
|
17 |
+
|
18 |
+
rm -rf static
|
19 |
+
|
20 |
+
wget -O dist.tar.gz https://public-frontend-1300249583.cos.ap-nanjing.myqcloud.com/test-hp-metagpt-web/dist-20231228164728.tar.gz
|
21 |
+
tar xvzf dist.tar.gz
|
22 |
+
mv dist static
|
23 |
+
rm dist.tar.gz
|
24 |
+
|
25 |
+
if [ "$UPDATE_CONFIG_TEMPLATE" = true ]; then
|
26 |
+
rm static/config.yaml
|
27 |
+
ln -s ../config/template.yaml static/config.yaml
|
28 |
+
fi
|