arampacha commited on
Commit
6842806
1 Parent(s): ced24dc

switch back to 125M

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -2,7 +2,8 @@ import gradio as gr
2
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
- model_name = "flax-community/gpt-code-clippy-1.3B-apps-alldata-2"
 
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  tokenizer.pad_token = tokenizer.eos_token
@@ -39,14 +40,14 @@ def generate_solution(question, starter_code="", temperature=1., num_beams=1):
39
  top_p=0.95,
40
  pad_token_id=tokenizer.pad_token_id,
41
  early_stopping=True,
42
- temperature=1.,
43
  num_beams=int(num_beams),
44
  no_repeat_ngram_size=None,
45
  repetition_penalty=None,
46
  num_return_sequences=None,
47
  )
48
 
49
- return format_outputs(tokenizer.decode(output[0][start:]).strip())
50
 
51
 
52
  _EXAMPLES = [
 
2
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ # model_name = "flax-community/gpt-code-clippy-1.3B-apps-alldata"
6
+ model_name = "flax-community/gpt-code-clippy-125M-apps-alldata"
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  tokenizer.pad_token = tokenizer.eos_token
 
40
  top_p=0.95,
41
  pad_token_id=tokenizer.pad_token_id,
42
  early_stopping=True,
43
+ temperature=temperature,
44
  num_beams=int(num_beams),
45
  no_repeat_ngram_size=None,
46
  repetition_penalty=None,
47
  num_return_sequences=None,
48
  )
49
 
50
+ return format_outputs(tokenizer.decode(output[0][start:], skip_special_tokens=True).strip())
51
 
52
 
53
  _EXAMPLES = [