stillerman commited on
Commit
f306f8a
1 Parent(s): eba3bde

swapped to ruby model

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -29,7 +29,7 @@ tokenizer_fim.add_special_tokens({
29
  })
30
 
31
  tokenizer = AutoTokenizer.from_pretrained("bigcode/christmas-models", use_auth_token=token)
32
- model = AutoModelForCausalLM.from_pretrained("bigcode/christmas-models", trust_remote_code=True, use_auth_token=token).to(device)
33
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
34
 
35
  def post_processing(prompt, completion):
@@ -126,4 +126,5 @@ with demo:
126
  event = run.click(code_generation, [code, max_new_tokens, temperature, seed], output, api_name="predict")
127
  gr.HTML(label="Contact", value="<img src='https://huggingface.co/datasets/bigcode/admin/resolve/main/bigcode_contact.png' alt='contact' style='display: block; margin: auto; max-width: 800px;'>")
128
 
129
- demo.launch()
 
 
29
  })
30
 
31
  tokenizer = AutoTokenizer.from_pretrained("bigcode/christmas-models", use_auth_token=token)
32
+ model = AutoModelForCausalLM.from_pretrained("stillerman/santacoder-ruby", trust_remote_code=True, use_auth_token=token).to(device)
33
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
34
 
35
  def post_processing(prompt, completion):
 
126
  event = run.click(code_generation, [code, max_new_tokens, temperature, seed], output, api_name="predict")
127
  gr.HTML(label="Contact", value="<img src='https://huggingface.co/datasets/bigcode/admin/resolve/main/bigcode_contact.png' alt='contact' style='display: block; margin: auto; max-width: 800px;'>")
128
 
129
+ # demo.launch(share=True)
130
+ demo.launch()