File size: 1,716 Bytes
67cc9e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# app.py

# Install necessary libraries (This line is for local setup; Gradio Spaces should have dependencies set in the config)
# !pip install transformers datasets scikit-learn accelerate gradio

# Importing necessary libraries
import gradio as gr
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline

# Load the dataset
ds = load_dataset("GonzaloA/fake_news")

# Load pre-trained tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
model = AutoModelForSequenceClassification.from_pretrained('TeamQuad-fine-tuned-bert')

# Create a classification pipeline
classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer)

# Define label mapping for fake news detection
label_mapping = {0: 'fake', 1: 'true'}

# Function to classify input text
def classify_news(text):
    result = classifier(text)
    label = result[0]['label'].split('_')[1]  # Extract label from the model's output
    score = result[0]['score']  # Confidence score
    mapped_result = {'label': label_mapping[int(label)], 'score': score}
    return f"Label: {mapped_result['label']}, Score: {mapped_result['score']:.4f}"

# Create a Gradio interface
iface = gr.Interface(
    fn=classify_news,  # The function to process the input
    inputs=gr.Textbox(lines=10, placeholder="Enter a news headline or article to classify..."),
    outputs="text",  # Output will be displayed as text
    title="Fake News Detection",
    description="Enter a news headline or article and see whether the model classifies it as 'Fake News' or 'True News'."
)

# Launch the interface
if __name__ == "__main__":
    iface.launch(share=True)