Spaces:
Sleeping
Sleeping
File size: 1,553 Bytes
7e57da9 79edba6 7e57da9 79edba6 7e57da9 79edba6 7e57da9 7abc5aa 7e57da9 5c4147c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import nltk
nltk.download('punkt')
def create_html(title, content):
html = f"""<div style='max-width:100%; max-height:360px; overflow:auto'>
<h1>{title}</h1>
<br/>
<p>{content}</p>
</div>"""
return html
import numpy as np
def get_max(data):
return max(data, key=lambda x: x['score'])
import wikipediaapi
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer
def summarize_wikipedia(search_query, language='en', sentences_count=3):
wiki_wiki = wikipediaapi.Wikipedia('AgungBagus (agungbagus@example.com)',language)
page = wiki_wiki.page(search_query)
if not page.exists():
return "Article not found."
content = page.text
parser = PlaintextParser.from_string(content, Tokenizer(language))
summarizer = LsaSummarizer()
summary = summarizer(parser.document, sentences_count)
return ' '.join([str(sentence) for sentence in summary])
import gradio as gr
from PIL import Image
from transformers import pipeline
def image_processing(input_image : gr.Image):
image = Image.fromarray(input_image)
classifier = pipeline(task="image-classification", model="gungbgs/bird_species_classifier")
species = get_max(classifier(image))['label']
species_text = species.lower()
result = summarize_wikipedia(species_text)
return create_html(species, result)
app = gr.Interface(
fn = image_processing,
inputs = "image",
outputs = "html"
)
app.launch(share=True) |