s3nh commited on
Commit
1150d47
1 Parent(s): 7a0dab9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -15,11 +15,11 @@ def load_model(name: str) -> Any:
15
  def load_tokenizer(name: str) -> Any:
16
  return AutoTokenizer.from_pretrained(name)
17
 
18
- def create_generator():
19
  return GenerationConfig(
20
- temperature=1.0,
21
- top_p=0.75,
22
- num_beams=4,
23
  )
24
 
25
  def generate_prompt(instruction, input=None):
@@ -46,7 +46,7 @@ def generate_prompt(instruction, input=None):
46
  generation_config = create_generator()
47
 
48
 
49
- def evaluate(instruction, input, model, tokenizer):
50
  prompt = generate_prompt(instruction, input)
51
  inputs = tokenizer(prompt, return_tensors="pt")
52
  input_ids = inputs["input_ids"]
@@ -64,9 +64,10 @@ def evaluate(instruction, input, model, tokenizer):
64
  return ' '.join(el for el in result)
65
 
66
  def inference(model_name, text, input, temperature, top_p, num_beams):
 
67
  model = load_model(model_name)
68
  tokenizer = load_tokenizer(model_name)
69
- output = evaluate(instruction = text, input = input, model = model, tokenizer = tokenizer)
70
  return output
71
 
72
  def choose_model(name):
 
15
  def load_tokenizer(name: str) -> Any:
16
  return AutoTokenizer.from_pretrained(name)
17
 
18
+ def create_generator(temperature, top_p, num_beams):
19
  return GenerationConfig(
20
+ temperature=temperature,
21
+ top_p=top_p,
22
+ num_beams=num_beams,
23
  )
24
 
25
  def generate_prompt(instruction, input=None):
 
46
  generation_config = create_generator()
47
 
48
 
49
+ def evaluate(instruction, input, model, tokenizer, generation_config):
50
  prompt = generate_prompt(instruction, input)
51
  inputs = tokenizer(prompt, return_tensors="pt")
52
  input_ids = inputs["input_ids"]
 
64
  return ' '.join(el for el in result)
65
 
66
  def inference(model_name, text, input, temperature, top_p, num_beams):
67
+ generation_config = create_generator(temperature, top_p, num_beams)
68
  model = load_model(model_name)
69
  tokenizer = load_tokenizer(model_name)
70
+ output = evaluate(instruction = text, input = input, model = model, tokenizer = tokenizer, generation_config = generation_config)
71
  return output
72
 
73
  def choose_model(name):