sombochea commited on
Commit
ddb24fa
1 Parent(s): 649e596

Updated app

Browse files
Files changed (2) hide show
  1. app.py +9 -11
  2. requirements.txt +3 -1
app.py CHANGED
@@ -11,18 +11,7 @@ model = AutoModelForCausalLM.from_pretrained(
11
  torch_dtype="auto",
12
  ).to("cuda" if torch.cuda.is_available() else "cpu") # Check for GPU availability
13
 
14
- # Define the Gradio interface
15
- iface = gr.Interface(
16
- fn=generate_code,
17
- inputs=[gr.Textbox(lines=2, placeholder="Enter your Python code prompt")],
18
- outputs="textbox",
19
- title="Python Code Completion",
20
- description="Generate code completions using a large language model.",
21
- )
22
-
23
  # Define the main function for code generation
24
-
25
-
26
  def generate_code(prompt):
27
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
28
  tokens = model.generate(
@@ -34,6 +23,15 @@ def generate_code(prompt):
34
  generated_code = tokenizer.decode(tokens[0], skip_special_tokens=True)
35
  return generated_code
36
 
 
 
 
 
 
 
 
 
 
37
 
38
  # Launch the Gradio app
39
  iface.launch()
 
11
  torch_dtype="auto",
12
  ).to("cuda" if torch.cuda.is_available() else "cpu") # Check for GPU availability
13
 
 
 
 
 
 
 
 
 
 
14
  # Define the main function for code generation
 
 
15
  def generate_code(prompt):
16
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
  tokens = model.generate(
 
23
  generated_code = tokenizer.decode(tokens[0], skip_special_tokens=True)
24
  return generated_code
25
 
26
+ # Define the Gradio interface
27
+ iface = gr.Interface(
28
+ fn=generate_code,
29
+ inputs=[gr.Textbox(lines=2, placeholder="Enter your Python code prompt")],
30
+ outputs="textbox",
31
+ title="Python Code Completion",
32
+ description="Generate code completions using a large language model.",
33
+ )
34
+
35
 
36
  # Launch the Gradio app
37
  iface.launch()
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- torch
2
  gradio
 
3
  transformers
 
 
 
 
1
  gradio
2
+ torch
3
  transformers
4
+ einops
5
+ accelerate