lvwerra HF staff commited on
Commit
b51f9d1
1 Parent(s): 1f37744

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -10,7 +10,7 @@ a 1.1B parameter model for code generation in Python, Java & JavaScript. The mod
10
  with the <span style='color: #ff75b3;'>&lt;FILL-HERE&gt;</span> token.</span>"""
11
 
12
  token = os.environ["HUB_TOKEN"]
13
- device="cuda"
14
 
15
 
16
  FIM_PREFIX = "<fim-prefix>"
@@ -30,7 +30,7 @@ tokenizer_fim.add_special_tokens({
30
 
31
  tokenizer = AutoTokenizer.from_pretrained("bigcode/christmas-models", use_auth_token=token)
32
  model = AutoModelForCausalLM.from_pretrained("bigcode/christmas-models", trust_remote_code=True, use_auth_token=token).to(device)
33
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
34
 
35
  def post_processing(prompt, completion):
36
  completion = "<span style='color: #ff75b3;'>" + completion + "</span>"
 
10
  with the <span style='color: #ff75b3;'>&lt;FILL-HERE&gt;</span> token.</span>"""
11
 
12
  token = os.environ["HUB_TOKEN"]
13
+ device="cuda:0"
14
 
15
 
16
  FIM_PREFIX = "<fim-prefix>"
 
30
 
31
  tokenizer = AutoTokenizer.from_pretrained("bigcode/christmas-models", use_auth_token=token)
32
  model = AutoModelForCausalLM.from_pretrained("bigcode/christmas-models", trust_remote_code=True, use_auth_token=token).to(device)
33
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
34
 
35
  def post_processing(prompt, completion):
36
  completion = "<span style='color: #ff75b3;'>" + completion + "</span>"