mrm8488 commited on
Commit
4c0932e
1 Parent(s): cce0122

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -1,10 +1,7 @@
1
- import torch
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed, pipeline
4
 
5
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
6
-
7
- title = "Santacoder 🎅 bash/shell 🐚 Completion"
8
  description = "This is a subspace to make code generation with [SantaCoder fine-tuned on The Stack bash/shell](https://huggingface.co/mrm8488/santacoder-finetuned-the-stack-bash-4)"
9
  EXAMPLE_0 = "#!/bin/bash\n# This script removes files larger than 2MB in the current folder\nfind ."
10
  EXAMPLE_1 = "#!/bin/bash\n\n# This script send an email\nto=”admin@example.com”\nsubject=”Greeting”\nmsg=”Welcome to our site”\n"
@@ -18,7 +15,7 @@ model = AutoModelForCausalLM.from_pretrained("mrm8488/santacoder-finetuned-the-s
18
 
19
  def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
20
  set_seed(seed)
21
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
22
  generated_text = pipe(gen_prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
23
  return generated_text
24
 
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed, pipeline
3
 
4
+ title = "SantaCoder 🎅 bash/shell 🐚 Completion"
 
 
5
  description = "This is a subspace to make code generation with [SantaCoder fine-tuned on The Stack bash/shell](https://huggingface.co/mrm8488/santacoder-finetuned-the-stack-bash-4)"
6
  EXAMPLE_0 = "#!/bin/bash\n# This script removes files larger than 2MB in the current folder\nfind ."
7
  EXAMPLE_1 = "#!/bin/bash\n\n# This script send an email\nto=”admin@example.com”\nsubject=”Greeting”\nmsg=”Welcome to our site”\n"
 
15
 
16
  def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
17
  set_seed(seed)
18
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
19
  generated_text = pipe(gen_prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
20
  return generated_text
21