import nltk nltk.download('punkt') def create_html(title, content): html = f"""

{title}


{content}

""" return html import numpy as np def get_max(data): return max(data, key=lambda x: x['score']) import wikipediaapi from sumy.parsers.plaintext import PlaintextParser from sumy.nlp.tokenizers import Tokenizer from sumy.summarizers.lsa import LsaSummarizer def summarize_wikipedia(search_query, language='en', sentences_count=3): wiki_wiki = wikipediaapi.Wikipedia('AgungBagus (agungbagus@example.com)',language) page = wiki_wiki.page(search_query) if not page.exists(): return "Article not found." content = page.text parser = PlaintextParser.from_string(content, Tokenizer(language)) summarizer = LsaSummarizer() summary = summarizer(parser.document, sentences_count) return ' '.join([str(sentence) for sentence in summary]) import gradio as gr from PIL import Image from transformers import pipeline def image_processing(input_image : gr.Image): image = Image.fromarray(input_image) classifier = pipeline(task="image-classification", model="gungbgs/bird_species_classifier") species = get_max(classifier(image))['label'] species_text = species.lower() result = summarize_wikipedia(species_text) return create_html(species, result) app = gr.Interface( fn = image_processing, inputs = "image", outputs = "html" ) app.launch(share=True)