lewtun HF staff commited on
Commit
1d89618
1 Parent(s): ebddf99

Pass token

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -26,13 +26,15 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
26
 
27
  # Load peft config for pre-trained checkpoint etc.
28
  device = "cuda" if torch.cuda.is_available() else "cpu"
29
- model_id = "HuggingFaceH4/llama-se-rl-ed"
30
  if device == "cpu":
31
- model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True)
32
  else:
33
  # torch_dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
34
  # model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch_dtype, device_map="auto")
35
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True)
 
 
36
 
37
  tokenizer = AutoTokenizer.from_pretrained(model_id)
38
 
 
26
 
27
  # Load peft config for pre-trained checkpoint etc.
28
  device = "cuda" if torch.cuda.is_available() else "cpu"
29
+ model_id = "trl-lib/llama-se-rl-merged"
30
  if device == "cpu":
31
+ model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, use_auth_token=HF_TOKEN)
32
  else:
33
  # torch_dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
34
  # model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch_dtype, device_map="auto")
35
+ model = AutoModelForCausalLM.from_pretrained(
36
+ model_id, device_map="auto", load_in_8bit=True, use_auth_token=HF_TOKEN
37
+ )
38
 
39
  tokenizer = AutoTokenizer.from_pretrained(model_id)
40