Spaces:
Running
on
T4
Running
on
T4
liuyizhang
commited on
Commit
•
6d54f1e
1
Parent(s):
dfba81f
update app.py
Browse files- app.py +22 -23
- app_cli.py +5 -6
app.py
CHANGED
@@ -43,6 +43,7 @@ sam_enable = True
|
|
43 |
inpainting_enable = True
|
44 |
ram_enable = True
|
45 |
lama_cleaner_enable = True
|
|
|
46 |
kosmos_enable = False
|
47 |
|
48 |
if os.environ.get('IS_MY_DEBUG') is not None:
|
@@ -90,12 +91,7 @@ ckpt_filenmae = "groundingdino_swint_ogc.pth"
|
|
90 |
sam_checkpoint = './sam_vit_h_4b8939.pth'
|
91 |
output_dir = "outputs"
|
92 |
|
93 |
-
|
94 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
95 |
-
else:
|
96 |
-
device = 'cpu'
|
97 |
-
kosmos_enable = False
|
98 |
-
|
99 |
os.makedirs(output_dir, exist_ok=True)
|
100 |
groundingdino_model = None
|
101 |
sam_device = None
|
@@ -270,12 +266,12 @@ def mix_masks(imgs):
|
|
270 |
return Image.fromarray(np.uint8(255*re_img))
|
271 |
|
272 |
def set_device():
|
273 |
-
global device
|
274 |
if os.environ.get('IS_MY_DEBUG') is None:
|
275 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
276 |
else:
|
277 |
device = 'cpu'
|
278 |
print(f'device={device}')
|
|
|
279 |
|
280 |
def load_groundingdino_model(device):
|
281 |
# initialize groundingdino model
|
@@ -805,18 +801,6 @@ def get_model_device(module):
|
|
805 |
except Exception as e:
|
806 |
return 'Error'
|
807 |
|
808 |
-
task_types = ["detection"]
|
809 |
-
if sam_enable:
|
810 |
-
task_types.append("segment")
|
811 |
-
if inpainting_enable:
|
812 |
-
task_types.append("inpainting")
|
813 |
-
if lama_cleaner_enable:
|
814 |
-
task_types.append("remove")
|
815 |
-
if ram_enable:
|
816 |
-
task_types.append("relate anything")
|
817 |
-
if kosmos_enable:
|
818 |
-
task_types.append("Kosmos-2")
|
819 |
-
|
820 |
if __name__ == "__main__":
|
821 |
parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
|
822 |
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
@@ -827,7 +811,9 @@ if __name__ == "__main__":
|
|
827 |
if os.environ.get('IS_MY_DEBUG') is None:
|
828 |
os.system("pip list")
|
829 |
|
830 |
-
set_device()
|
|
|
|
|
831 |
|
832 |
if kosmos_enable:
|
833 |
kosmos_model, kosmos_processor = load_kosmos_model(device)
|
@@ -855,12 +841,24 @@ if __name__ == "__main__":
|
|
855 |
# print(f'sd_model__{get_model_device(sd_model)}')
|
856 |
# print(f'lama_cleaner_model__{get_model_device(lama_cleaner_model)}')
|
857 |
# print(f'ram_model__{get_model_device(ram_model)}')
|
858 |
-
# print(f'kosmos_model__{get_model_device(kosmos_model)}')
|
859 |
-
|
860 |
block = gr.Blocks().queue()
|
861 |
with block:
|
862 |
with gr.Row():
|
863 |
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
864 |
input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload")
|
865 |
task_type = gr.Radio(task_types, value="detection",
|
866 |
label='Task type', visible=True)
|
@@ -948,7 +946,8 @@ if __name__ == "__main__":
|
|
948 |
<a href="https://huggingface.co/spaces/yizhangliu/Grounded-Segment-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
949 |
gr.Markdown(DESCRIPTION)
|
950 |
|
951 |
-
print(f'device={device}')
|
|
|
952 |
computer_info()
|
953 |
block.launch(server_name='0.0.0.0', debug=args.debug, share=args.share)
|
954 |
|
|
|
43 |
inpainting_enable = True
|
44 |
ram_enable = True
|
45 |
lama_cleaner_enable = True
|
46 |
+
|
47 |
kosmos_enable = False
|
48 |
|
49 |
if os.environ.get('IS_MY_DEBUG') is not None:
|
|
|
91 |
sam_checkpoint = './sam_vit_h_4b8939.pth'
|
92 |
output_dir = "outputs"
|
93 |
|
94 |
+
device = 'cpu'
|
|
|
|
|
|
|
|
|
|
|
95 |
os.makedirs(output_dir, exist_ok=True)
|
96 |
groundingdino_model = None
|
97 |
sam_device = None
|
|
|
266 |
return Image.fromarray(np.uint8(255*re_img))
|
267 |
|
268 |
def set_device():
|
|
|
269 |
if os.environ.get('IS_MY_DEBUG') is None:
|
270 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
271 |
else:
|
272 |
device = 'cpu'
|
273 |
print(f'device={device}')
|
274 |
+
return device
|
275 |
|
276 |
def load_groundingdino_model(device):
|
277 |
# initialize groundingdino model
|
|
|
801 |
except Exception as e:
|
802 |
return 'Error'
|
803 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
804 |
if __name__ == "__main__":
|
805 |
parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
|
806 |
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
|
|
811 |
if os.environ.get('IS_MY_DEBUG') is None:
|
812 |
os.system("pip list")
|
813 |
|
814 |
+
device = set_device()
|
815 |
+
if device == 'cpu':
|
816 |
+
kosmos_enable = False
|
817 |
|
818 |
if kosmos_enable:
|
819 |
kosmos_model, kosmos_processor = load_kosmos_model(device)
|
|
|
841 |
# print(f'sd_model__{get_model_device(sd_model)}')
|
842 |
# print(f'lama_cleaner_model__{get_model_device(lama_cleaner_model)}')
|
843 |
# print(f'ram_model__{get_model_device(ram_model)}')
|
844 |
+
# print(f'kosmos_model__{get_model_device(kosmos_model)}')
|
845 |
+
|
846 |
block = gr.Blocks().queue()
|
847 |
with block:
|
848 |
with gr.Row():
|
849 |
with gr.Column():
|
850 |
+
task_types = ["detection"]
|
851 |
+
if sam_enable:
|
852 |
+
task_types.append("segment")
|
853 |
+
if inpainting_enable:
|
854 |
+
task_types.append("inpainting")
|
855 |
+
if lama_cleaner_enable:
|
856 |
+
task_types.append("remove")
|
857 |
+
if ram_enable:
|
858 |
+
task_types.append("relate anything")
|
859 |
+
if kosmos_enable:
|
860 |
+
task_types.append("Kosmos-2")
|
861 |
+
|
862 |
input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload")
|
863 |
task_type = gr.Radio(task_types, value="detection",
|
864 |
label='Task type', visible=True)
|
|
|
946 |
<a href="https://huggingface.co/spaces/yizhangliu/Grounded-Segment-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
947 |
gr.Markdown(DESCRIPTION)
|
948 |
|
949 |
+
print(f'device = {device}')
|
950 |
+
print(f'torch.cuda.is_available = {torch.cuda.is_available()}')
|
951 |
computer_info()
|
952 |
block.launch(server_name='0.0.0.0', debug=args.debug, share=args.share)
|
953 |
|
app_cli.py
CHANGED
@@ -97,12 +97,11 @@ if __name__ == '__main__':
|
|
97 |
|
98 |
logger.info(f'loading models ... ')
|
99 |
# set_device() # If you have enough GPUs, you can open this comment
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
# load_ram_model()
|
106 |
|
107 |
input_image = Image.open(args.input_image)
|
108 |
|
|
|
97 |
|
98 |
logger.info(f'loading models ... ')
|
99 |
# set_device() # If you have enough GPUs, you can open this comment
|
100 |
+
groundingdino_model = load_groundingdino_model('cpu')
|
101 |
+
load_sam_model(device)
|
102 |
+
# load_sd_model(device)
|
103 |
+
load_lama_cleaner_model(device)
|
104 |
+
# load_ram_model(device)
|
|
|
105 |
|
106 |
input_image = Image.open(args.input_image)
|
107 |
|