Update app.py
Browse files
app.py
CHANGED
@@ -46,7 +46,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
46 |
|
47 |
# model_id = "mistralai/Mistral-7B-v0.3"
|
48 |
|
49 |
-
model_id = "CohereForAI/aya-23-
|
50 |
|
51 |
|
52 |
tokenizer = AutoTokenizer.from_pretrained(
|
@@ -59,11 +59,11 @@ accelerator = Accelerator()
|
|
59 |
|
60 |
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
61 |
# torch_dtype= torch.uint8,
|
62 |
-
torch_dtype=torch.float16,
|
63 |
-
|
64 |
-
|
65 |
attn_implementation="flash_attention_2",
|
66 |
-
low_cpu_mem_usage=True,
|
67 |
# device_map='cuda',
|
68 |
# device_map=accelerator.device_map,
|
69 |
|
|
|
46 |
|
47 |
# model_id = "mistralai/Mistral-7B-v0.3"
|
48 |
|
49 |
+
model_id = "CohereForAI/aya-23-8B"
|
50 |
|
51 |
|
52 |
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
59 |
|
60 |
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
61 |
# torch_dtype= torch.uint8,
|
62 |
+
# torch_dtype=torch.float16,
|
63 |
+
# load_in_8bit=True,
|
64 |
+
# # torch_dtype=torch.fl,
|
65 |
attn_implementation="flash_attention_2",
|
66 |
+
# low_cpu_mem_usage=True,
|
67 |
# device_map='cuda',
|
68 |
# device_map=accelerator.device_map,
|
69 |
|