import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline import csv MODEL_URL = "https://huggingface.co/dsfsi/PuoBERTa-News" WEBSITE_URL = "https://www.kodiks.com/ai_solutions.html" tokenizer = AutoTokenizer.from_pretrained("dsfsi/PuoBERTa-News") model = AutoModelForSequenceClassification.from_pretrained("dsfsi/PuoBERTa-News") categories = { "arts_culture_entertainment_and_media": "Botsweretshi, setso, boitapoloso le bobegakgang", "crime_law_and_justice": "Bosenyi, molao le bosiamisi", "disaster_accident_and_emergency_incident": "Masetlapelo, kotsi le tiragalo ya maemo a tshoganyetso", "economy_business_and_finance": "Ikonomi, tsa kgwebo le tsa ditšhelete", "education": "Thuto", "environment": "Tikologo", "health": "Boitekanelo", "politics": "Dipolotiki", "religion_and_belief": "Bodumedi le tumelo", "society": "Setšhaba" } def prediction(news): classifier = pipeline("text-classification", tokenizer=tokenizer, model=model, return_all_scores=True) preds = classifier(news) preds_dict = {categories.get(pred['label'], pred['label']): round(pred['score'], 4) for pred in preds[0]} return preds_dict def file_prediction(file): news_list = [] if file.name.endswith('.csv'): file.seek(0) reader = csv.reader(file.read().decode('utf-8').splitlines()) news_list = [row[0] for row in reader if row] else: file.seek(0) file_content = file.read().decode('utf-8') news_list = file_content.splitlines() results = [] for news in news_list: if news.strip(): pred = prediction(news) results.append([news, pred]) return results with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): pass with gr.Column(scale=4, min_width=1000): gr.Image("logo_transparent_small.png", elem_id="logo", show_label=False, width=500) gr.Markdown("""

Setswana News Classification

This space provides a classification service for news in Setswana.

""") with gr.Column(scale=1): pass with gr.Tabs(): with gr.Tab("Text Input"): gr.Markdown(f""" Enter Setswana news article to see the category of the news.
For this classification, the PuoBERTa-News model was used. """) inp_text = gr.Textbox(lines=10, label="Paste some Setswana news here") output_label = gr.Label(num_top_classes=5, label="News categories probabilities") translate_button = gr.Button("Classify") translate_button.click(prediction, inputs=inp_text, outputs=output_label) with gr.Tab("File Upload"): gr.Markdown(""" Upload a text or CSV file with Setswana news articles. The first column in the CSV should contain the news text. """) file_input = gr.File(label="Upload text or CSV file") file_output = gr.Dataframe(headers=["News Text", "Category Predictions"], label="Predictions from file") file_button = gr.Button("Classify File") file_button.click(file_prediction, inputs=file_input, outputs=file_output) gr.Markdown("""
GitHub | Feedback Form
""") with gr.Accordion("More Information", open=False): gr.Markdown("""

Authors

Vukosi Marivate, Moseli Mots'Oehli, Valencia Wagner, Richard Lastrucci, Isheanesu Dzingirai
""") gr.Markdown("""

Citation

        @inproceedings{marivate2023puoberta,
          title   = {PuoBERTa: Training and evaluation of a curated language model for Setswana},
          author  = {Vukosi Marivate and Moseli Mots'Oehli and Valencia Wagner and Richard Lastrucci and Isheanesu Dzingirai},
          year    = {2023},
          booktitle= {Artificial Intelligence Research. SACAIR 2023. Communications in Computer and Information Science},
          url= {https://link.springer.com/chapter/10.1007/978-3-031-49002-6_17},
          keywords = {NLP},
          preprint_url = {https://arxiv.org/abs/2310.09141},
          dataset_url = {https://github.com/dsfsi/PuoBERTa},
          software_url = {https://huggingface.co/dsfsi/PuoBERTa}
        }
        
""") gr.Markdown("""

DOI

DOI: 10.1007/978-3-031-49002-6_17
""") demo.launch()