Spaces:
Runtime error
Runtime error
Add
Browse files- MODNet +1 -0
- RCFPyTorch0 +1 -0
- __pycache__/web.cpython-37.pyc +0 -0
- app.py +146 -0
- flagged/image/tmpo2k6btjc.jpg +0 -0
- flagged/log.csv +2 -0
MODNet
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 28165a451e4610c9d77cfdf925a94610bb2810fb
|
RCFPyTorch0
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 0f1f2486e5cca2f0c564fc87bdd87b182bfb03c1
|
__pycache__/web.cpython-37.pyc
ADDED
Binary file (2.96 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import os.path as osp
|
4 |
+
import cv2
|
5 |
+
import argparse
|
6 |
+
import torch
|
7 |
+
#from torch.utils.data import DataLoader
|
8 |
+
import torchvision
|
9 |
+
from RCFPyTorch0.dataset import BSDS_Dataset
|
10 |
+
from RCFPyTorch0.models import RCF
|
11 |
+
import gradio as gr
|
12 |
+
from PIL import Image
|
13 |
+
import sys
|
14 |
+
import torch.nn as nn
|
15 |
+
import torch.nn.functional as F
|
16 |
+
import torchvision.transforms as transforms
|
17 |
+
from MODNet.src.models.modnet import MODNet
|
18 |
+
# 网页制作
|
19 |
+
import cv2
|
20 |
+
|
21 |
+
|
22 |
+
def single_scale_test(image):
|
23 |
+
ref_size = 512
|
24 |
+
# define image to tensor transform
|
25 |
+
im_transform = transforms.Compose(
|
26 |
+
[
|
27 |
+
transforms.ToTensor(),
|
28 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
29 |
+
]
|
30 |
+
)
|
31 |
+
|
32 |
+
# create MODNet and load the pre-trained ckpt
|
33 |
+
modnet = MODNet(backbone_pretrained=False)
|
34 |
+
modnet = nn.DataParallel(modnet).cuda()
|
35 |
+
modnet.load_state_dict(torch.load('MODNet/pretrained/modnet_photographic.ckpt'))
|
36 |
+
modnet.eval()
|
37 |
+
# 注:程序中的数字仅表示某张输入图片尺寸,如1080x1440,此处只为记住其转换过程。
|
38 |
+
# inference images
|
39 |
+
# im_names = os.listdir(args.input_path)
|
40 |
+
# for im_name in im_names:
|
41 |
+
# print('Process image: {0}'.format(im_name))
|
42 |
+
# read image
|
43 |
+
|
44 |
+
# unify image channels to 3
|
45 |
+
image = np.asarray(image)
|
46 |
+
if len(image.shape) == 2:
|
47 |
+
image = image[:, :, None]
|
48 |
+
if image.shape[2] == 1:
|
49 |
+
image = np.repeat(image, 3, axis=2)
|
50 |
+
elif image.shape[2] == 4:
|
51 |
+
image = image[:, :, 0:3]
|
52 |
+
im_org = image # 保存numpy原始数组 (1080,1440,3)
|
53 |
+
# convert image to PyTorch tensor
|
54 |
+
image = Image.fromarray(image)
|
55 |
+
image = im_transform(image)
|
56 |
+
# add mini-batch dim
|
57 |
+
image = image[None, :, :, :]
|
58 |
+
# resize image for input
|
59 |
+
im_b, im_c, im_h, im_w = image.shape
|
60 |
+
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
|
61 |
+
if im_w >= im_h:
|
62 |
+
im_rh = ref_size
|
63 |
+
im_rw = int(im_w / im_h * ref_size)
|
64 |
+
elif im_w < im_h:
|
65 |
+
im_rw = ref_size
|
66 |
+
im_rh = int(im_h / im_w * ref_size)
|
67 |
+
else:
|
68 |
+
im_rh = im_h
|
69 |
+
im_rw = im_w
|
70 |
+
im_rw = im_rw - im_rw % 32
|
71 |
+
im_rh = im_rh - im_rh % 32
|
72 |
+
image = F.interpolate(image, size=(im_rh, im_rw), mode='area')
|
73 |
+
|
74 |
+
# inference
|
75 |
+
_, _, matte = modnet(image.cuda(), True) # 从模型获得的 matte ([1,1,512, 672])
|
76 |
+
|
77 |
+
# resize and save matte,foreground picture
|
78 |
+
matte = F.interpolate(matte, size=(im_h, im_w), mode='area') #内插,扩展到([1,1,1080,1440]) 范围[0,1]
|
79 |
+
matte = matte[0][0].data.cpu().numpy() # torch 张量转换成numpy (1080, 1440)
|
80 |
+
# matte_name = im_name.split('.')[0] + '_matte.png'
|
81 |
+
# Image.fromarray(((matte * 255).astype('uint8')), mode='L').save(os.path.join(args.output_path, matte_name))
|
82 |
+
matte_org = np.repeat(np.asarray(matte)[:, :, None], 3, axis=2) # 扩展到 (1080, 1440, 3) 以便和im_org计算
|
83 |
+
|
84 |
+
foreground = im_org * matte_org + np.full(im_org.shape, 255) * (1 - matte_org) # 计算前景,获得抠像
|
85 |
+
# fg_name = im_name.split('.')[0] + '_fg.png'
|
86 |
+
Image.fromarray(((foreground).astype('uint8')), mode='RGB').save(os.path.join('MODNet/output-img', 'fg_name.png'))
|
87 |
+
output = Image.open(os.path.join('MODNet/output-img', 'fg_name.png'))
|
88 |
+
image = np.array(output)
|
89 |
+
|
90 |
+
model = RCF().cuda()
|
91 |
+
checkpoint = torch.load("RCFPyTorch0/bsds500_pascal_model.pth")
|
92 |
+
model.load_state_dict(checkpoint)
|
93 |
+
model.eval()
|
94 |
+
# if not osp.isdir(save_dir):
|
95 |
+
# os.makedirs(save_dir)
|
96 |
+
# for idx, image in enumerate(test_loader):
|
97 |
+
image = torch.from_numpy(image).float().permute(2,0,1).unsqueeze(0)
|
98 |
+
image = image.cuda()
|
99 |
+
_, _, H, W = image.shape
|
100 |
+
results = model(image)
|
101 |
+
all_res = torch.zeros((len(results), 1, H, W))
|
102 |
+
for i in range(len(results)):
|
103 |
+
all_res[i, 0, :, :] = results[i]
|
104 |
+
#filename = osp.splitext(test_list[idx])[0]
|
105 |
+
torchvision.utils.save_image(1 - all_res, osp.join('RCFPyTorch0/results/RCF', 'result.jpg'))
|
106 |
+
fuse_res = torch.squeeze(results[1].detach()).cpu().numpy()
|
107 |
+
fuse_res = ((1 - fuse_res) * 255).astype(np.uint8)
|
108 |
+
cv2.imwrite(osp.join("RCFPyTorch0/results/RCF", 'result_ss.png'), fuse_res)
|
109 |
+
#print('\rRunning single-scale test [%d/%d]' % (idx + 1, len(test_loader)), end='')
|
110 |
+
#print('Running single-scale test done')
|
111 |
+
output = Image.open(os.path.join('RCFPyTorch0/results/RCF', 'result_ss.png'))
|
112 |
+
return output
|
113 |
+
|
114 |
+
parser = argparse.ArgumentParser(description='PyTorch Testing')
|
115 |
+
parser.add_argument('--gpu', default='0', type=str, help='GPU ID')
|
116 |
+
#parser.add_argument('--checkpoint', default=None, type=str, help='path to latest checkpoint')
|
117 |
+
#parser.add_argument('--save-dir', help='output folder', default='results/RCF')
|
118 |
+
#parser.add_argument('--dataset', help='root folder of dataset', default='data/HED-BSDS')
|
119 |
+
args = parser.parse_args()
|
120 |
+
|
121 |
+
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
|
122 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
|
123 |
+
|
124 |
+
#if not osp.isdir(args.save_dir):
|
125 |
+
# os.makedirs(args.save_dir)
|
126 |
+
|
127 |
+
#test_dataset = BSDS_Dataset(root=args.dataset, split='test')
|
128 |
+
#test_loader = DataLoader(test_dataset, batch_size=1, num_workers=1, drop_last=False, shuffle=False)
|
129 |
+
#test_list = [osp.split(i.rstrip())[1] for i in test_dataset.file_list]
|
130 |
+
#assert len(test_list) == len(test_loader)
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
#if osp.isfile(args.checkpoint):
|
135 |
+
# print("=> loading checkpoint from '{}'".format(args.checkpoint))
|
136 |
+
# checkpoint = torch.load(args.checkpoint)
|
137 |
+
# model.load_state_dict(checkpoint)
|
138 |
+
# print("=> checkpoint loaded")
|
139 |
+
#else:
|
140 |
+
# print("=> no checkpoint found at '{}'".format(args.checkpoint))
|
141 |
+
|
142 |
+
#print('Performing the testing...')
|
143 |
+
|
144 |
+
|
145 |
+
interface = gr.Interface(fn=single_scale_test, inputs="image", outputs="image")
|
146 |
+
interface.launch()
|
flagged/image/tmpo2k6btjc.jpg
ADDED
flagged/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
image,output,flag,username,timestamp
|
2 |
+
D:\code\MOD_RCF\flagged\image\tmpo2k6btjc.jpg,,,,2022-11-28 23:39:55.725173
|