arampacha commited on
Commit
23a6073
1 Parent(s): 128097d

trying stuff

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- model_name = "EleutherAI/gpt-neo-125M"
5
  # define model and tokenizer
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -9,8 +9,17 @@ tokenizer.pad_token = tokenizer.eos_token
9
 
10
  def generate_solution(prompt, **kwargs):
11
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
12
- start = len(input_ids)
13
- output = model.generate(input_ids, pad_token_id=tokenizer.pad_token_id, *kwargs)
14
  return tokenizer.decode(output[0][start:])
15
 
16
- gr.Interface(generate_solution, inputs="text", outputs="text").launch()
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ model_name = "flax-community/gpt-neo-125M-apps"
5
  # define model and tokenizer
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
 
10
  def generate_solution(prompt, **kwargs):
11
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
12
+ start = len(input_ids[0])
13
+ output = model.generate(input_ids, pad_token_id=tokenizer.pad_token_id, **kwargs)
14
  return tokenizer.decode(output[0][start:])
15
 
16
+ inputs = [
17
+ gr.inputs.Textbox(placeholder="Define a problem here ...", lines=5)
18
+ ]
19
+
20
+ gr.Interface(
21
+ generate_solution,
22
+ inputs=inputs,
23
+ outputs="text",
24
+ title="Coding problem solver",
25
+ ).launch()