alfonsovelp commited on
Commit
a87ee72
1 Parent(s): eca21d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -2,13 +2,18 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from huggingface_hub import InferenceClient
4
 
 
 
 
 
5
  model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
6
- tokenizer = AutoTokenizer.from_pretrained(model_id)
7
 
8
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
 
9
 
 
 
10
 
11
-
12
  def format_prompt(message, history):
13
  prompt = "<s>"
14
  for user_prompt, bot_response in history:
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Environment variable for HF token
6
+ hf_token = "os.environ.get("HF_TOKEN")"
7
+
8
+ # Your model ID
9
  model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
10
 
11
+ # Load the tokenizer
12
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=hf_token)
13
 
14
+ # Load the model
15
+ model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_token, device_map="auto")
16
 
 
17
  def format_prompt(message, history):
18
  prompt = "<s>"
19
  for user_prompt, bot_response in history: