paragon-analytics commited on
Commit
d415981
1 Parent(s): d193831

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -20
app.py CHANGED
@@ -41,24 +41,10 @@ from transformers_interpret import SequenceClassificationExplainer
41
  cls_explainer = SequenceClassificationExplainer(
42
  model,
43
  tokenizer)
44
-
45
- # load the model from disk
46
- #filename = 'resil_lstm_model.sav'
47
- #lmodel = pickle.load(open(filename, 'rb'))
48
-
49
- # load the model from disk
50
- #filename = 'tokenizer.pickle'
51
- #tok = pickle.load(open(filename, 'rb'))
52
 
53
  def process_final_text(text):
54
  X_test = str(text).lower()
55
- #l = []
56
- #l.append(X_test)
57
- #test_sequences = tok.texts_to_sequences(l)
58
- #test_sequences_matrix = sequence.pad_sequences(test_sequences,maxlen=max_len)
59
- #lstm_prob = lmodel.predict(test_sequences_matrix.tolist()).flatten()
60
- #lstm_pred = np.where(lstm_prob>=0.5,1,0)
61
-
62
  encoded_input = tokenizer(X_test, return_tensors='pt')
63
  output = model(**encoded_input)
64
  scores = output[0][0].detach().numpy()
@@ -114,11 +100,6 @@ def process_final_text(text):
114
 
115
  word_attributions = [(letter[i], score[i]) for i in range(0, len(letter))]
116
 
117
- # # Paraphraser:
118
- # batch = para_tokenizer(X_test, return_tensors='pt')
119
- # generated_ids = para_model.generate(batch['input_ids'])
120
- # para_list = para_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
121
-
122
  return {"Persuasive": float(scores.numpy()[1]), "Non-Persuasive": float(scores.numpy()[0])},keywords,NER,word_attributions
123
 
124
  def main(prob1):
 
41
  cls_explainer = SequenceClassificationExplainer(
42
  model,
43
  tokenizer)
 
 
 
 
 
 
 
 
44
 
45
  def process_final_text(text):
46
  X_test = str(text).lower()
47
+
 
 
 
 
 
 
48
  encoded_input = tokenizer(X_test, return_tensors='pt')
49
  output = model(**encoded_input)
50
  scores = output[0][0].detach().numpy()
 
100
 
101
  word_attributions = [(letter[i], score[i]) for i in range(0, len(letter))]
102
 
 
 
 
 
 
103
  return {"Persuasive": float(scores.numpy()[1]), "Non-Persuasive": float(scores.numpy()[0])},keywords,NER,word_attributions
104
 
105
  def main(prob1):