Update app.py
Browse files
app.py
CHANGED
|
@@ -13,20 +13,20 @@ total_count=0
|
|
| 13 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 14 |
|
| 15 |
DESCRIPTION = """\
|
| 16 |
-
# DeepSeek-
|
| 17 |
|
| 18 |
-
This Space demonstrates model [DeepSeek-Coder](https://huggingface.co/deepseek-ai/deepseek-coder-
|
| 19 |
"""
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
|
| 31 |
|
| 32 |
|
|
|
|
| 13 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 14 |
|
| 15 |
DESCRIPTION = """\
|
| 16 |
+
# DeepSeek-33B-Chat
|
| 17 |
|
| 18 |
+
This Space demonstrates model [DeepSeek-Coder](https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct) by DeepSeek, a code model with 33B parameters fine-tuned for chat instructions.
|
| 19 |
"""
|
| 20 |
|
| 21 |
+
if not torch.cuda.is_available():
|
| 22 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
| 23 |
|
| 24 |
|
| 25 |
+
if torch.cuda.is_available():
|
| 26 |
+
model_id = "deepseek-ai/deepseek-coder-33b-instruct"
|
| 27 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
|
| 28 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 29 |
+
tokenizer.use_default_system_prompt = False
|
| 30 |
|
| 31 |
|
| 32 |
|