not-lain commited on
Commit
0b808d6
·
verified ·
1 Parent(s): fb9bda0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from train import AnimeSegmentation
2
+ import cv2
3
+ import numpy as np
4
+ from loadimg import load_img
5
+ import gradio as gr
6
+
7
+
8
+ model = AnimeSegmentation.from_pretrained("skytnt/anime-seg")
9
+
10
+ device = "cuda"
11
+ model.eval()
12
+ model.to(device)
13
+ img_size = model._hub_mixin_config["img_size"]
14
+
15
+
16
+ def get_mask(model, input_img, use_amp=True, s=640):
17
+ input_img = (input_img / 255).astype(np.float32)
18
+ h, w = h0, w0 = input_img.shape[:-1]
19
+ h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
20
+ ph, pw = s - h, s - w
21
+ img_input = np.zeros([s, s, 3], dtype=np.float32)
22
+ img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(input_img, (w, h))
23
+ img_input = np.transpose(img_input, (2, 0, 1))
24
+ img_input = img_input[np.newaxis, :]
25
+ tmpImg = torch.from_numpy(img_input).type(torch.FloatTensor).to(model.device)
26
+ with torch.no_grad():
27
+ if use_amp:
28
+ with amp.autocast():
29
+ pred = model(tmpImg)
30
+ pred = pred.to(dtype=torch.float32)
31
+ else:
32
+ pred = model(tmpImg)
33
+ pred = pred.cpu().numpy()[0]
34
+ pred = np.transpose(pred, (1, 2, 0))
35
+ pred = pred[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
36
+ pred = cv2.resize(pred, (w0, h0))[:, :, np.newaxis]
37
+ return pred
38
+
39
+
40
+ def process(img):
41
+ path = load_img(img,output_type="str")
42
+ img = cv2.cvtColor(cv2.imread(path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
43
+ mask = get_mask(HF_model, img, use_amp= False, s=img_size)
44
+ img = mask * img
45
+ out = load_img(img)
46
+ return out
47
+
48
+
49
+
50
+
51
+ demo = gr.Interface(process,"image","image")
52
+
53
+ demo.launch(debug=True)