fffiloni commited on
Commit
1ddab6e
1 Parent(s): 3a7df69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -66
app.py CHANGED
@@ -2,70 +2,13 @@ import gradio as gr
2
  import os
3
  import cv2
4
  import numpy as np
5
- from PIL import Image
6
  from moviepy.editor import *
7
  #from share_btn import community_icon_html, loading_icon_html, share_js
8
 
9
 
10
- os.system("python -m pip install git+https://github.com/MaureenZOU/detectron2-xyz.git")
11
-
12
-
13
- import torch
14
- import argparse
15
-
16
- from xdecoder.BaseModel import BaseModel
17
- from xdecoder import build_model
18
- from utils.distributed import init_distributed
19
- from utils.arguments import load_opt_from_config_files
20
-
21
- from tasks import *
22
-
23
- def parse_option():
24
- parser = argparse.ArgumentParser('X-Decoder All-in-One Demo', add_help=False)
25
- parser.add_argument('--conf_files', default="configs/xdecoder/svlp_focalt_lang.yaml", metavar="FILE", help='path to config file', )
26
- args = parser.parse_args()
27
-
28
- return args
29
-
30
- '''
31
- build args
32
- '''
33
- args = parse_option()
34
- opt = load_opt_from_config_files(args.conf_files)
35
- opt = init_distributed(opt)
36
-
37
- # META DATA
38
- pretrained_pth_last = os.path.join("xdecoder_focalt_last.pt")
39
- pretrained_pth_novg = os.path.join("xdecoder_focalt_last_novg.pt")
40
-
41
- if not os.path.exists(pretrained_pth_last):
42
- os.system("wget {}".format("https://projects4jw.blob.core.windows.net/x-decoder/release/xdecoder_focalt_last.pt"))
43
-
44
- if not os.path.exists(pretrained_pth_novg):
45
- os.system("wget {}".format("https://projects4jw.blob.core.windows.net/x-decoder/release/xdecoder_focalt_last_novg.pt"))
46
-
47
-
48
- '''
49
- build model
50
- '''
51
- model_last = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth_last).eval().cuda()
52
-
53
- with torch.no_grad():
54
- model_last.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(["background", "background"], is_eval=True)
55
-
56
- '''
57
- inference model
58
- '''
59
-
60
- @torch.no_grad()
61
- def xdecoder(image, instruction, *args, **kwargs):
62
- image = Image.open(image)
63
- image = image.convert("RGB")
64
- with torch.autocast(device_type='cuda', dtype=torch.float16):
65
- return referring_inpainting_gpt3(model_last, image, instruction, *args, **kwargs)
66
-
67
 
68
- #xdecoder = gr.Interface.load(name="spaces/xdecoder/Instruct-X-Decoder")
69
 
70
  def get_frames(video_in):
71
  frames = []
@@ -129,13 +72,7 @@ def infer(prompt,video_in, trim_value):
129
  print("set stop frames to: " + str(n_frame))
130
 
131
  for i in frames_list[0:int(n_frame)]:
132
- #xdecoder_img = xdecoder(i, prompt, fn_index=0)
133
- xdecoder_img = xdecoder(i, prompt)
134
- #res_image = xdecoder_img[0]
135
- #rgb_im = images[0].convert("RGB")
136
-
137
- # exporting the image
138
- #res_image.save(f"result_img-{i}.jpg")
139
  result_frames.append(xdecoder_img)
140
  print("frame " + i + "/" + str(n_frame) + ": done;")
141
 
 
2
  import os
3
  import cv2
4
  import numpy as np
5
+
6
  from moviepy.editor import *
7
  #from share_btn import community_icon_html, loading_icon_html, share_js
8
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ xdecoder = gr.Interface.load(name="spaces/xdecoder/Instruct-X-Decoder")
12
 
13
  def get_frames(video_in):
14
  frames = []
 
72
  print("set stop frames to: " + str(n_frame))
73
 
74
  for i in frames_list[0:int(n_frame)]:
75
+ xdecoder_img = xdecoder(i, prompt, fn_index=0)
 
 
 
 
 
 
76
  result_frames.append(xdecoder_img)
77
  print("frame " + i + "/" + str(n_frame) + ": done;")
78