MLIFY commited on
Commit
1f8905d
·
verified ·
1 Parent(s): 782204e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -1
app.py CHANGED
@@ -1,3 +1,19 @@
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
- gr.load("models/hamzab/roberta-fake-news-classification").launch()
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
2
+
3
+ tokenizer = AutoTokenizer.from_pretrained("hamzab/roberta-fake-news-classification")
4
+
5
+ model = AutoModelForSequenceClassification.from_pretrained("hamzab/roberta-fake-news-classification")
6
+
7
  import gradio as gr
8
 
9
+ import torch
10
+ def predict_fake(title,text):
11
+ input_str = "<title>" + title + "<content>" + text + "<end>"
12
+ input_ids = tokenizer.encode_plus(input_str, max_length=512, padding="max_length", truncation=True, return_tensors="pt")
13
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
+ model.to(device)
15
+ with torch.no_grad():
16
+ output = model(input_ids["input_ids"].to(device), attention_mask=input_ids["attention_mask"].to(device))
17
+ return dict(zip(["Fake","Real"], [x.item() for x in list(torch.nn.Softmax()(output.logits)[0])] ))
18
+
19
+ iface = gr.Interface(fn=predict_fake, inputs=[gr.inputs.Textbox(lines=1,label="headline"),gr.inputs.Textbox(lines=6,label="content")], outputs="label").launch(share=True)