Spaces:
Runtime error
Runtime error
File size: 5,077 Bytes
66e03d0 ee0d36c edadc17 ee0d36c 66e03d0 840f873 66e03d0 62f5bba 66e03d0 62f5bba 66e03d0 62f5bba 66e03d0 62f5bba 66e03d0 1bbdbbd 66e03d0 62f5bba 66e03d0 afa6954 1bbdbbd 66e03d0 fafb9b3 1bbdbbd 66e03d0 62f5bba 66e03d0 1bbdbbd 66e03d0 62f5bba 66e03d0 1bbdbbd 66e03d0 fafb9b3 1bbdbbd 66e03d0 62f5bba 66e03d0 25839ed 66e03d0 53b4108 66e03d0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import os
os.system("pip uninstall numpy -y")
os.system("pip install numpy")
os.system("pip install pandas")
import gradio as gr
import sys
from uuid import uuid1
from PIL import Image
from zipfile import ZipFile
import pathlib
import shutil
import pandas as pd
import deepsparse
import json
rn50_embedding_pipeline_default = deepsparse.Pipeline.create(
task="embedding-extraction",
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
#emb_extraction_layer=-1, # extracts last layer before projection head and softmax
)
rn50_embedding_pipeline_last_1 = deepsparse.Pipeline.create(
task="embedding-extraction",
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
emb_extraction_layer=-1, # extracts last layer before projection head and softmax
)
rn50_embedding_pipeline_last_2 = deepsparse.Pipeline.create(
task="embedding-extraction",
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
emb_extraction_layer=-2, # extracts last layer before projection head and softmax
)
rn50_embedding_pipeline_last_3 = deepsparse.Pipeline.create(
task="embedding-extraction",
base_task="image-classification", # tells the pipeline to expect images and normalize input with ImageNet means/stds
model_path="zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/channel20_pruned75_quant-none-vnni",
emb_extraction_layer=-3, # extracts last layer before projection head and softmax
)
rn50_embedding_pipeline_dict = {
"0": rn50_embedding_pipeline_default,
"1": rn50_embedding_pipeline_last_1,
"2": rn50_embedding_pipeline_last_2,
"3": rn50_embedding_pipeline_last_3
}
def zip_ims(g):
from uuid import uuid1
if g is None:
return None
l = list(map(lambda x: x["name"], g))
if not l:
return None
zip_file_name ="tmp.zip"
with ZipFile(zip_file_name ,"w") as zipObj:
for ele in l:
zipObj.write(ele, "{}.png".format(uuid1()))
#zipObj.write(file2.name, "file2")
return zip_file_name
def unzip_ims_func(zip_file_name, choose_model,
rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict):
print("call file")
if zip_file_name is None:
return json.dumps({})
unzip_path = "img_dir"
if os.path.exists(unzip_path):
shutil.rmtree(unzip_path)
with ZipFile(zip_file_name) as archive:
archive.extractall(unzip_path)
im_name_l = pd.Series(
list(pathlib.Path(unzip_path).rglob("*.png")) + \
list(pathlib.Path(unzip_path).rglob("*.jpg")) + \
list(pathlib.Path(unzip_path).rglob("*.jpeg"))
).map(str).values.tolist()
rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model]
embeddings = rn50_embedding_pipeline(images=im_name_l)
if os.path.exists(unzip_path):
shutil.rmtree(unzip_path)
im_name_l = pd.Series(im_name_l).map(lambda x: x.split("/")[-1]).values.tolist()
return json.dumps({
"names": im_name_l,
"embs": embeddings.embeddings[0]
})
def emb_img_func(im, choose_model,
rn50_embedding_pipeline_dict = rn50_embedding_pipeline_dict):
print("call im :")
if im is None:
return json.dumps({})
im_obj = Image.fromarray(im)
im_name = "{}.png".format(uuid1())
im_obj.save(im_name)
rn50_embedding_pipeline = rn50_embedding_pipeline_dict[choose_model]
embeddings = rn50_embedding_pipeline(images=[im_name])
os.remove(im_name)
return json.dumps({
"names": [im_name],
"embs": embeddings.embeddings[0]
})
'''
def emb_gallery_func(gallery):
print("call ga :")
if gallery is None:
return []
im_name_l = list(map(lambda x: x["name"], images))
embeddings = rn50_embedding_pipeline(images=im_name_l)
return embeddings
'''
with gr.Blocks() as demo:
with gr.Row():
choose_model = gr.Radio(choices=["0", "1", "2", "3"],
value="0", label="Choose embedding layer", elem_id="layer_radio")
with gr.Row():
with gr.Column():
inputs_0 = gr.Image(label = "Input Image for embed")
button_0 = gr.Button("Image button")
with gr.Column():
inputs_1 = gr.File(label = "Input Images zip file for embed")
button_1 = gr.Button("Image File button")
with gr.Row():
outputs = gr.Text(label = "Output Embeddings")
button_0.click(fn = emb_img_func, inputs = [inputs_0, choose_model], outputs = outputs)
button_1.click(fn = unzip_ims_func, inputs = [inputs_1, choose_model], outputs = outputs)
demo.launch("0.0.0.0")
|