:pencil: [Doc] Readme: New available models
Browse files- README.md +2 -2
- constants/models.py +2 -2
README.md
CHANGED
@@ -17,8 +17,8 @@ Project link: https://github.com/Hansimov/hf-llm-api
|
|
17 |
|
18 |
## Features
|
19 |
|
20 |
-
- Available Models (2024/04/
|
21 |
-
- `mistral-7b`, `mixtral-8x7b`, `nous-mixtral-8x7b`, `gemma-7b`, `gpt-3.5-turbo`
|
22 |
- Adaptive prompt templates for different models
|
23 |
- Support OpenAI API format
|
24 |
- Enable api endpoint via official `openai-python` package
|
|
|
17 |
|
18 |
## Features
|
19 |
|
20 |
+
- Available Models (2024/04/20):
|
21 |
+
- `mistral-7b`, `mixtral-8x7b`, `nous-mixtral-8x7b`, `gemma-7b`, `command-r-plus`, `llama3-70b`, `zephyr-141b`, `gpt-3.5-turbo`
|
22 |
- Adaptive prompt templates for different models
|
23 |
- Support OpenAI API format
|
24 |
- Enable api endpoint via official `openai-python` package
|
constants/models.py
CHANGED
@@ -18,7 +18,7 @@ STOP_SEQUENCES_MAP = {
|
|
18 |
"mixtral-8x7b": "</s>",
|
19 |
"nous-mixtral-8x7b": "<|im_end|>",
|
20 |
"mistral-7b": "</s>",
|
21 |
-
"openchat-3.5": "<|end_of_turn|>",
|
22 |
"gemma-7b": "<eos>",
|
23 |
"command-r-plus": "<|END_OF_TURN_TOKEN|>",
|
24 |
}
|
@@ -27,7 +27,7 @@ TOKEN_LIMIT_MAP = {
|
|
27 |
"mixtral-8x7b": 32768,
|
28 |
"nous-mixtral-8x7b": 32768,
|
29 |
"mistral-7b": 32768,
|
30 |
-
"openchat-3.5": 8192,
|
31 |
"gemma-7b": 8192,
|
32 |
"command-r-plus": 32768,
|
33 |
"llama3-70b": 8192,
|
|
|
18 |
"mixtral-8x7b": "</s>",
|
19 |
"nous-mixtral-8x7b": "<|im_end|>",
|
20 |
"mistral-7b": "</s>",
|
21 |
+
# "openchat-3.5": "<|end_of_turn|>",
|
22 |
"gemma-7b": "<eos>",
|
23 |
"command-r-plus": "<|END_OF_TURN_TOKEN|>",
|
24 |
}
|
|
|
27 |
"mixtral-8x7b": 32768,
|
28 |
"nous-mixtral-8x7b": 32768,
|
29 |
"mistral-7b": 32768,
|
30 |
+
# "openchat-3.5": 8192,
|
31 |
"gemma-7b": 8192,
|
32 |
"command-r-plus": 32768,
|
33 |
"llama3-70b": 8192,
|