Kevin Fink commited on
Commit
7c58563
·
1 Parent(s): 909a043
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -244,7 +244,11 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
244
  return 'DONE!'#train_result
245
 
246
  # Define Gradio interface
247
- def predict(text, model):
 
 
 
 
248
  tokenizer = AutoTokenizer.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer')
249
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
250
  outputs = model(inputs)
@@ -272,8 +276,7 @@ def run_train(text):
272
  bias="none" # Bias handling
273
  )
274
  #model = get_peft_model(model, lora_config)
275
- #result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
276
- result = predict(text, model)
277
  return result
278
  # Create Gradio interface
279
  try:
@@ -295,7 +298,7 @@ try:
295
  )
296
  '''
297
  iface = gr.Interface(
298
- fn=run_train,
299
  inputs=[
300
  gr.Textbox(label="Query"),
301
  ],
 
244
  return 'DONE!'#train_result
245
 
246
  # Define Gradio interface
247
+ @spaces.GPU
248
+ def predict(text):
249
+ config = AutoConfig.from_pretrained("shorecode/t5-efficient-tiny-nh8-summarizer")
250
+ model = AutoModelForSeq2SeqLM.from_config(config)
251
+ initialize_weights(model)
252
  tokenizer = AutoTokenizer.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer')
253
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
254
  outputs = model(inputs)
 
276
  bias="none" # Bias handling
277
  )
278
  #model = get_peft_model(model, lora_config)
279
+ result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
 
280
  return result
281
  # Create Gradio interface
282
  try:
 
298
  )
299
  '''
300
  iface = gr.Interface(
301
+ fn=predict,
302
  inputs=[
303
  gr.Textbox(label="Query"),
304
  ],