vinayakdev commited on
Commit
267cd58
1 Parent(s): 94ffff8
Files changed (1) hide show
  1. generator.py +3 -0
generator.py CHANGED
@@ -26,6 +26,7 @@ import numpy as np
26
  from transformers import pipeline
27
  from transformers.pipelines import AggregationStrategy
28
  import pickle
 
29
 
30
  # sq_tokenizer = att.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
31
  # sq_model = alwm.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
@@ -35,6 +36,7 @@ import pickle
35
  hfmodel = alwm.from_pretrained("ThomasSimonini/t5-end2end-question-generation")
36
 
37
  hftokenizer = T5TokenizerFast.from_pretrained("t5-base")
 
38
  def run_model(input_string, **generator_args):
39
  generator_args = {
40
  "max_length": 256,
@@ -61,6 +63,7 @@ tokenizer = att.from_pretrained("ahotrod/albert_xxlargev1_squad2_512")
61
  model = amqa.from_pretrained("ahotrod/albert_xxlargev1_squad2_512")
62
  # al_model = pickle.load(open('models/al_model.sav', 'rb'))
63
  # al_tokenizer = pickle.load(open('models/al_tokenizer.sav', 'rb'))
 
64
  def QA(question, context):
65
  # model_name="deepset/electra-base-squad2"
66
  # nlp = pipeline("question-answering",model=al_model,tokenizer=al_tokenizer)
 
26
  from transformers import pipeline
27
  from transformers.pipelines import AggregationStrategy
28
  import pickle
29
+ import streampit as st
30
 
31
  # sq_tokenizer = att.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
32
  # sq_model = alwm.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
 
36
  hfmodel = alwm.from_pretrained("ThomasSimonini/t5-end2end-question-generation")
37
 
38
  hftokenizer = T5TokenizerFast.from_pretrained("t5-base")
39
+ @st.cache(suppress_st_warning=True)
40
  def run_model(input_string, **generator_args):
41
  generator_args = {
42
  "max_length": 256,
 
63
  model = amqa.from_pretrained("ahotrod/albert_xxlargev1_squad2_512")
64
  # al_model = pickle.load(open('models/al_model.sav', 'rb'))
65
  # al_tokenizer = pickle.load(open('models/al_tokenizer.sav', 'rb'))
66
+ @st.cache
67
  def QA(question, context):
68
  # model_name="deepset/electra-base-squad2"
69
  # nlp = pipeline("question-answering",model=al_model,tokenizer=al_tokenizer)