Spaces:
Sleeping
Sleeping
add HF token
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
@@ -7,10 +9,12 @@ from transformers import AutoTokenizer
|
|
7 |
from model.modeling_llamask import LlamaskForCausalLM
|
8 |
from model.tokenizer_utils import generate_custom_mask, prepare_tokenizer
|
9 |
|
|
|
|
|
10 |
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
11 |
device = 'cpu'
|
12 |
|
13 |
-
model = LlamaskForCausalLM.from_pretrained(model_id, torch_dtype= torch.bfloat16)
|
14 |
model = model.to(device)
|
15 |
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
|
16 |
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
import gradio as gr
|
4 |
from huggingface_hub import InferenceClient
|
5 |
|
|
|
9 |
from model.modeling_llamask import LlamaskForCausalLM
|
10 |
from model.tokenizer_utils import generate_custom_mask, prepare_tokenizer
|
11 |
|
12 |
+
|
13 |
+
access_token = os.getenv("HF_TOKEN")
|
14 |
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
15 |
device = 'cpu'
|
16 |
|
17 |
+
model = LlamaskForCausalLM.from_pretrained(model_id, torch_dtype= torch.bfloat16, access_token=access_token)
|
18 |
model = model.to(device)
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
|
20 |
|
model/__pycache__/modeling_llamask.cpython-310.pyc
CHANGED
Binary files a/model/__pycache__/modeling_llamask.cpython-310.pyc and b/model/__pycache__/modeling_llamask.cpython-310.pyc differ
|
|
model/__pycache__/tokenizer_utils.cpython-310.pyc
ADDED
Binary file (2.36 kB). View file
|
|