# app.py # Install necessary libraries (This line is for local setup; Gradio Spaces should have dependencies set in the config) # !pip install transformers datasets scikit-learn accelerate gradio # Importing necessary libraries import gradio as gr from datasets import load_dataset from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline # Load the dataset ds = load_dataset("GonzaloA/fake_news") # Load pre-trained tokenizer and model tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = AutoModelForSequenceClassification.from_pretrained('TeamQuad-fine-tuned-bert') # Create a classification pipeline classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Define label mapping for fake news detection label_mapping = {0: 'fake', 1: 'true'} # Function to classify input text def classify_news(text): result = classifier(text) label = result[0]['label'].split('_')[1] # Extract label from the model's output score = result[0]['score'] # Confidence score mapped_result = {'label': label_mapping[int(label)], 'score': score} return f"Label: {mapped_result['label']}, Score: {mapped_result['score']:.4f}" # Create a Gradio interface iface = gr.Interface( fn=classify_news, # The function to process the input inputs=gr.Textbox(lines=10, placeholder="Enter a news headline or article to classify..."), outputs="text", # Output will be displayed as text title="Fake News Detection", description="Enter a news headline or article and see whether the model classifies it as 'Fake News' or 'True News'." ) # Launch the interface if __name__ == "__main__": iface.launch(share=True)