File size: 3,052 Bytes
caf6be1
 
 
 
 
 
4f94c4a
caf6be1
 
 
 
4f94c4a
 
 
 
 
 
 
 
 
caf6be1
4f94c4a
caf6be1
 
 
 
70e5abd
 
f3c98f0
 
94c8d66
fb0b4f0
5c87807
b4bd918
7f9320e
 
f3c98f0
 
 
b4bd918
 
f3c98f0
 
 
 
 
74facc4
 
 
 
5847716
9d8e134
 
 
 
 
 
052354a
98af2c3
 
 
 
 
 
4f94c4a
98af2c3
 
 
 
a78c9cf
 
5c87807
 
d5f7570
 
 
 
 
70e5abd
8129089
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import torch

model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()


import requests
import PIL
from torchvision import transforms

# Download human-readable labels for ImageNet.
response = requests.get("https://git.io/JJkYN")
labels   = response.text.split("\n")

def classify_image(image_filepath):
  PIL_image     = PIL.Image.open(image_filepath).convert('RGB')
  transformations = transforms.Compose([
    transforms.Resize(size = (224,224)),
    transforms.ToTensor(),
  ])
  image_tensors = transformations(PIL_image).unsqueeze(0)
  with torch.no_grad():
    prediction = torch.nn.functional.softmax(model(image_tensors)[0], dim=0)
    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
  return confidences


import gradio as gr

def display_model_details(model_details):
    return f"**Model Details:**\n\n{model_details}"

with gr.Blocks(title="Image Classification for 1000 Objects", css=".gradio-container {background:#FFD1DC;}") as demo:
    gr.HTML("""<div style="font-family:'Calibri', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:black;">Image Classification for 1000 Objects</div>""")
    
    gr.Markdown(
    """
    # Enter Model Details

    Please provide the necessary information about your model in the text box below.
    """
    )
    input_box = gr.Textbox(placeholder="Enter model details")
    output_box = gr.Markdown()

    input_box.change(display_model_details, input_box, output_box)

    # # Image Upload and Gallery
    # uploaded_images = gr.Gallery(label="Uploaded Images", type="image")
    # upload_button = gr.Button("Upload Images")
    # upload_button.click(fn=lambda images: uploaded_images.update(images), inputs=gr.Image(type="file", label="Upload Images"))  # Filter for JPG only

    # gr.Interface(
    # fn=display_model_details,
    # inputs="text",
    # outputs="text",
    # title="Model Details"
    # ).launch()
    
    
    with gr.Row(): 
        input_image  = gr.Image(type="filepath", image_mode="L")        
        output_label = gr.Label(label="Probabilities", num_top_classes=3)
    
    send_btn = gr.Button("Infer")
    send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label)
    
    with gr.Row():
        gr.Examples(['./lion.jpg']   , label='Sample images : Lion', inputs=input_image)
        gr.Examples(['./cheetah.jpg'], label='Cheetah'             , inputs=input_image)
        gr.Examples(['./eagle.jpg'], label='Eagle'             , inputs=input_image)
        gr.Examples(['./indigobird.jpg'], label='Indigo Bird'             , inputs=input_image)
        gr.Examples(['./aircraftcarrier.jpg'], label='Aircraft Carrier'             , inputs=input_image)
        gr.Examples(['./acousticguitar.jpg'], label='Acoustic Guitar'             , inputs=input_image)

# gr.Interface(fn=predict,
#              inputs=gr.Image(type="pil"),
#              outputs=gr.Label(num_top_classes=3),
#              examples=["lion.jpg", "cheetah.jpg"]).launch()

demo.launch(debug=True, share=True)