kushagra124 commited on
Commit
797b9ba
1 Parent(s): 94299ff

adding text box

Browse files
Files changed (1) hide show
  1. app.py +37 -34
app.py CHANGED
@@ -65,45 +65,48 @@ def shot(image, labels_text):
65
  prompts = labels_text.split(',')
66
  global classes
67
  classes = prompts
68
- print(classes)
69
  detections = detect_using_clip(image,prompts=prompts)
70
- print(detections)
71
- return 0
72
 
73
  def add_text(text):
74
  labels = text.split(',')
75
  return labels
76
 
77
- with gr.Blocks(title="Zero Shot Object ddetection using Text Prompts") as demo :
78
- gr.Markdown(
79
- """
80
- <center>
81
- <h1>
82
- The CLIP Model
83
- </h1>
84
- A neural network called CLIP which efficiently learns visual concepts from natural language supervision. CLIP can be applied to any visual classification benchmark by simply providing the names of the visual categories to be recognized, similar to the “zero-shot” capabilities of GPT-2 and GPT-3.
85
- </center>
86
- """
87
- )
 
 
88
 
89
- with gr.Row():
90
- with gr.Column():
91
- inputt = gr.Image(type="numpy", label="Input Image for Classification")
92
- labels = gr.Textbox(label="Enter Label/ labels",placeholder="ex. car,person",scale=4)
93
- button = gr.Button(value="Locate objects")
94
- with gr.Column():
95
- outputs = gr.Image(type="numpy", label="Detected Objects with Selected Category")
96
- # dropdown = gr.Dropdown(labels,label="Select the category",info='Label selection panel')
97
 
98
- # labels.submit(add_text, inputs=labels)
99
- button.click(fn=shot,inputs=[inputt,labels],api_name='Get labels')
100
-
101
-
102
- demo.launch()
103
- # iface = gr.Interface(fn=shot,
104
- # inputs = ["image","text","label"],
105
- # outputs=output,
106
- # examples=random_images,
107
- # allow_flagging=False,
108
- # analytics_enabled=False,
109
- # )
 
 
65
  prompts = labels_text.split(',')
66
  global classes
67
  classes = prompts
68
+
69
  detections = detect_using_clip(image,prompts=prompts)
70
+
71
+ return detections
72
 
73
  def add_text(text):
74
  labels = text.split(',')
75
  return labels
76
 
77
+ inputt = gr.Image(type="numpy", label="Input Image for Classification")
78
+
79
+ # with gr.Blocks(title="Zero Shot Object ddetection using Text Prompts") as demo :
80
+ # gr.Markdown(
81
+ # """
82
+ # <center>
83
+ # <h1>
84
+ # The CLIP Model
85
+ # </h1>
86
+ # A neural network called CLIP which efficiently learns visual concepts from natural language supervision. CLIP can be applied to any visual classification benchmark by simply providing the names of the visual categories to be recognized, similar to the “zero-shot” capabilities of GPT-2 and GPT-3.
87
+ # </center>
88
+ # """
89
+ # )
90
 
91
+ # with gr.Row():
92
+ # with gr.Column():
93
+ # inputt = gr.Image(type="numpy", label="Input Image for Classification")
94
+ # labels = gr.Textbox(label="Enter Label/ labels",placeholder="ex. car,person",scale=4)
95
+ # button = gr.Button(value="Locate objects")
96
+ # with gr.Column():
97
+ # outputs = gr.Image(type="numpy", label="Detected Objects with Selected Category")
98
+ # # dropdown = gr.Dropdown(labels,label="Select the category",info='Label selection panel')
99
 
100
+ # # labels.submit(add_text, inputs=labels)
101
+ # button.click(fn=shot,inputs=[inputt,labels],api_name='Get labels')
102
+
103
+
104
+ # demo.launch()
105
+ iface = gr.Interface(fn=shot,
106
+ inputs = ["image","text"],
107
+ outputs="label",
108
+ examples=random_images,
109
+ allow_flagging=False,
110
+ analytics_enabled=False,
111
+ )
112
+ iface.launch()