support openllm
Browse files- .env.example +3 -0
- Makefile +6 -0
.env.example
CHANGED
@@ -5,6 +5,9 @@
|
|
5 |
LLM_MODEL_TYPE=huggingface
|
6 |
# LLM_MODEL_TYPE=mosaicml
|
7 |
# LLM_MODEL_TYPE=stablelm
|
|
|
|
|
|
|
8 |
|
9 |
OPENAI_API_KEY=
|
10 |
|
|
|
5 |
LLM_MODEL_TYPE=huggingface
|
6 |
# LLM_MODEL_TYPE=mosaicml
|
7 |
# LLM_MODEL_TYPE=stablelm
|
8 |
+
# LLM_MODEL_TYPE=openllm
|
9 |
+
|
10 |
+
OPENLLM_SERVER_URL=
|
11 |
|
12 |
OPENAI_API_KEY=
|
13 |
|
Makefile
CHANGED
@@ -18,6 +18,12 @@ chat:
|
|
18 |
tele:
|
19 |
python telegram_bot.py
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
ingest:
|
22 |
python ingest.py
|
23 |
|
|
|
18 |
tele:
|
19 |
python telegram_bot.py
|
20 |
|
21 |
+
openllm:
|
22 |
+
ifeq ("$(PORT)", "")
|
23 |
+
openllm start llama --model-id meta-llama/Llama-2-7b-chat-hf
|
24 |
+
else
|
25 |
+
openllm start llama --model-id meta-llama/Llama-2-7b-chat-hf --port=${PORT}
|
26 |
+
|
27 |
ingest:
|
28 |
python ingest.py
|
29 |
|