nick_93 commited on
Commit
3da0b4c
1 Parent(s): 05c4ff3
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -105,15 +105,19 @@ def create_refseg_demo(model, tokenizer, device):
105
 
106
 
107
  def main():
 
 
108
  opt = TestOptions().initialize()
109
  args = opt.parse_args()
110
 
111
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
112
- #model = EVPDepth(args=args, caption_aggregation=True)
113
- #model.to(device)
114
- #model_weight = torch.load('best_model_nyu.ckpt', map_location=device)['model']
115
- #model.load_state_dict(model_weight, strict=False)
116
- #model.eval()
 
 
117
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
118
  model_refseg = EVPRefer()
119
  model_refseg.to(device)
@@ -133,8 +137,9 @@ def main():
133
  with gr.Blocks() as demo:
134
  gr.Markdown(title)
135
  gr.Markdown(description)
136
- #with gr.Tab("Depth Prediction"):
137
- # create_depth_demo(model_refseg, device)
 
138
  with gr.Tab("Referring Segmentation"):
139
  create_refseg_demo(model_refseg, tokenizer, device)
140
  gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/MykolaL/evp?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
 
105
 
106
 
107
  def main():
108
+ upload_2_models = True
109
+
110
  opt = TestOptions().initialize()
111
  args = opt.parse_args()
112
 
113
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
114
+ if upload_2_models:
115
+ model = EVPDepth(args=args, caption_aggregation=True)
116
+ model.to(device)
117
+ model_weight = torch.load('best_model_nyu.ckpt', map_location=device)['model']
118
+ model.load_state_dict(model_weight, strict=False)
119
+ model.eval()
120
+
121
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
122
  model_refseg = EVPRefer()
123
  model_refseg.to(device)
 
137
  with gr.Blocks() as demo:
138
  gr.Markdown(title)
139
  gr.Markdown(description)
140
+ if upload_2_models:
141
+ with gr.Tab("Depth Prediction"):
142
+ create_depth_demo(model, device)
143
  with gr.Tab("Referring Segmentation"):
144
  create_refseg_demo(model_refseg, tokenizer, device)
145
  gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/MykolaL/evp?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>