给大家一个直接可以运行的版本,前面Model Card里面第二种方法不太完整,至于img_input_size,我试了试,好像[1024,1024]结果还不错

#37
by lsjv50kfv - opened
from transformers import AutoModelForImageSegmentation
from torchvision.transforms.functional import normalize

import numpy as np
import torch
import torch.nn.functional as F
import skimage.io as io
from PIL import Image

model = AutoModelForImageSegmentation.from_pretrained("./",trust_remote_code=True)

def preprocess_image(im: np.ndarray, model_input_size: list) -> torch.Tensor:
    if len(im.shape) < 3:
        im = im[:, :, np.newaxis]
    # orig_im_size=im.shape[0:2]
    im_tensor = torch.tensor(im, dtype=torch.float32).permute(2,0,1)
    im_tensor = F.interpolate(torch.unsqueeze(im_tensor,0), size=model_input_size, mode='bilinear')
    image = torch.divide(im_tensor,255.0)
    image = normalize(image,[0.5,0.5,0.5],[1.0,1.0,1.0])
    return image

def postprocess_image(result: torch.Tensor, im_size: list)-> np.ndarray:
    result = torch.squeeze(F.interpolate(result, size=im_size, mode='bilinear') ,0)
    ma = torch.max(result)
    mi = torch.min(result)
    result = (result-mi)/(ma-mi)
    im_array = (result*255).permute(1,2,0).cpu().data.numpy().astype(np.uint8)
    im_array = np.squeeze(im_array)
    return im_array

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)

# prepare input
image_path = "https://farm5.staticflickr.com/4007/4322154488_997e69e4cf_z.jpg"
orig_im = io.imread(image_path)
orig_im_size = orig_im.shape[0:2]
image = preprocess_image(orig_im, [1024,1024]).to(device)


# inference 
result=model(image) # tuple 2*6

# post process
result_image = postprocess_image(result[0][0], orig_im_size)

# save result
pil_im = Image.fromarray(result_image)
no_bg_image = Image.new("RGBA", pil_im.size, (0,0,0,0))

orig_image = Image.fromarray(orig_im)
no_bg_image.paste(orig_image, mask=pil_im)

no_bg_image.save('./res.png')

哦对了,我已经把这个模型下载到本地了,这个代码是在文件里面写的,所以直接from_pretrained("./",trust_remote_code=True)了,需要改一下路径,或者直接原方法

我想做成一个 web服务,请问这个如何实现,我对python不熟悉

哦对了,我已经把这个模型下载到本地了,这个代码是在文件里面写的,所以直接from_pretrained("./",trust_remote_code=True)了,需要改一下路径,或者直接原方法

请问你是怎么下载的,我下载总是报,缺少 MyConfig.py

找到了,我用python从transformer library安装报错,用 git clone https://huggingface.co/briaai/RMBG-1.4 就没问题了

源码中MyPipe.py里确实用 1024x1024 作为model_input_size的
def preprocess_image(self,im: np.ndarray, model_input_size: list=[1024,1024]) -> torch.Tensor:
# same as utilities.py with minor modification
if len(im.shape) < 3:
im = im[:, :, np.newaxis]
im_tensor = torch.tensor(im, dtype=torch.float32).permute(2,0,1)
im_tensor = F.interpolate(torch.unsqueeze(im_tensor,0), size=model_input_size, mode='bilinear')
image = torch.divide(im_tensor,255.0)
image = normalize(image,[0.5,0.5,0.5],[1.0,1.0,1.0])
return image

我想做成一个 web服务,请问这个如何实现,我对python不熟悉

我做了一个webserver封装;https://github.com/sssxyd/removebg-by-ai
你可以直接下载打包后的windows程序运行

Sign up or log in to comment