aliasgerovs commited on
Commit
20f67e4
·
1 Parent(s): 9545dbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -19
app.py CHANGED
@@ -269,29 +269,35 @@ def split_text_allow_complete_sentences_nltk(text, max_length=256, tolerance=30,
269
  return decoded_segments
270
 
271
  def predict_quillbot(text):
272
- tokenized_text = quillbot_tokenizer(text, padding="max_length", truncation=True, max_length=256, return_tensors="pt").to(device)["input_ids"]
273
- output = quillbot_model(tokenized_text)
274
- output_norm = softmax(output.logits.detach().cpu().numpy(), 1)[0]
275
- q_score = {"QuillBot": output_norm[1].item(), "Original": output_norm[0].item()}
276
- return q_score
 
 
277
 
278
  def predict_bc(model, tokenizer, text):
279
- tokens = text_bc_tokenizer(
280
- text, padding='max_length', truncation=True, max_length=256, return_tensors="pt"
281
- ).to(device)["input_ids"]
282
- output = model(tokens)
283
- output_norm = softmax(output.logits.detach().cpu().numpy(), 1)[0]
284
- print("BC Score: ", output_norm)
285
- return output_norm
 
 
286
 
287
  def predict_mc(model, tokenizer, text):
288
- tokens = text_mc_tokenizer(
289
- text, padding='max_length', truncation=True, return_tensors="pt", max_length=256
290
- ).to(device)["input_ids"]
291
- output = model(tokens)
292
- output_norm = softmax(output.logits.detach().cpu().numpy(), 1)[0]
293
- print("MC Score: ", output_norm)
294
- return output_norm
 
 
295
 
296
  def ai_generated_test(ai_option, input):
297
 
 
269
  return decoded_segments
270
 
271
  def predict_quillbot(text):
272
+ with torch.no_grad():
273
+ quillbot_model.eval()
274
+ tokenized_text = quillbot_tokenizer(text, padding="max_length", truncation=True, max_length=256, return_tensors="pt").to(device)
275
+ output = quillbot_model(**tokenized_text)
276
+ output_norm = softmax(output.logits.detach().cpu().numpy(), 1)[0]
277
+ q_score = {"QuillBot": output_norm[1].item(), "Original": output_norm[0].item()}
278
+ return q_score
279
 
280
  def predict_bc(model, tokenizer, text):
281
+ with torch.no_grad():
282
+ model.eval()
283
+ tokens = text_bc_tokenizer(
284
+ text, padding='max_length', truncation=True, max_length=256, return_tensors="pt"
285
+ ).to(device)
286
+ output = model(**tokens)
287
+ output_norm = softmax(output.logits.detach().cpu().numpy(), 1)[0]
288
+ print("BC Score: ", output_norm)
289
+ return output_norm
290
 
291
  def predict_mc(model, tokenizer, text):
292
+ with torch.no_grad():
293
+ model.eval()
294
+ tokens = text_mc_tokenizer(
295
+ text, padding='max_length', truncation=True, return_tensors="pt", max_length=256
296
+ ).to(device)
297
+ output = model(**tokens)
298
+ output_norm = softmax(output.logits.detach().cpu().numpy(), 1)[0]
299
+ print("MC Score: ", output_norm)
300
+ return output_norm
301
 
302
  def ai_generated_test(ai_option, input):
303