loubnabnl HF staff commited on
Commit
6a7d152
1 Parent(s): 03b3f7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -6,6 +6,9 @@ import gradio as gr
6
  import spaces
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
 
 
 
9
 
10
  MAX_MAX_NEW_TOKENS = 4096
11
  DEFAULT_MAX_NEW_TOKENS = 2048
@@ -23,8 +26,8 @@ if not torch.cuda.is_available():
23
  if torch.cuda.is_available():
24
  model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
25
  # model_id = "mistralai/Mistral-7B-Instruct-v0.2"
26
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",load_in_4bit=True)
27
- tokenizer = AutoTokenizer.from_pretrained(model_id)
28
  tokenizer.use_default_system_prompt = False
29
 
30
 
 
6
  import spaces
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
+ import os
10
+
11
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
12
 
13
  MAX_MAX_NEW_TOKENS = 4096
14
  DEFAULT_MAX_NEW_TOKENS = 2048
 
26
  if torch.cuda.is_available():
27
  model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
28
  # model_id = "mistralai/Mistral-7B-Instruct-v0.2"
29
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",load_in_4bit=True, token=HF_TOKEN)
30
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN)
31
  tokenizer.use_default_system_prompt = False
32
 
33