Amitontheweb commited on
Commit
971aa39
1 Parent(s): 4dcdb6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -29,7 +29,10 @@ token = os.environ.get("HF_TOKEN")
29
  global chosen_strategy
30
 
31
  def generate(input_text, number_steps, number_beams, number_beam_groups, diversity_penalty, length_penalty, num_return_sequences, temperature, no_repeat_ngram_size, repetition_penalty, early_stopping, beam_temperature, top_p, top_k,penalty_alpha,top_p_box,top_k_box,strategy_selected,model_selected):
32
-
 
 
 
33
  chosen_strategy = strategy_selected
34
  inputs = tokenizer(input_text, return_tensors="pt")
35
 
@@ -160,6 +163,9 @@ def select_model(model_selected):
160
  def load_model ():
161
 
162
  global model_name
 
 
 
163
  tokenizer = AutoTokenizer.from_pretrained(model_name)
164
  model = AutoModelForCausalLM.from_pretrained(model_name)
165
 
 
29
  global chosen_strategy
30
 
31
  def generate(input_text, number_steps, number_beams, number_beam_groups, diversity_penalty, length_penalty, num_return_sequences, temperature, no_repeat_ngram_size, repetition_penalty, early_stopping, beam_temperature, top_p, top_k,penalty_alpha,top_p_box,top_k_box,strategy_selected,model_selected):
32
+
33
+ global tokenizer
34
+ global model
35
+
36
  chosen_strategy = strategy_selected
37
  inputs = tokenizer(input_text, return_tensors="pt")
38
 
 
163
  def load_model ():
164
 
165
  global model_name
166
+ global tokenizer
167
+ global model
168
+
169
  tokenizer = AutoTokenizer.from_pretrained(model_name)
170
  model = AutoModelForCausalLM.from_pretrained(model_name)
171