djl234 commited on
Commit
672a594
1 Parent(s): 0c39182

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tqdm
2
+ #import fastCNN
3
+ import numpy as np
4
+
5
+
6
+ import gradio as gr
7
+ import os
8
+ #os.system("sudo apt-get install nvIDia-cuda-toolkit")
9
+ #os.system("/usr/local/bin/python -m pip install --upgrade pip")
10
+ #os.system("pip install argparse")
11
+ import pydensecrf.densecrf as dcrf
12
+ from PIL import Image
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torchvision import transforms
16
+ import numpy as np
17
+ import collections
18
+ import cv2
19
+
20
+
21
+ #import argparse
22
+ device='cpu'
23
+
24
+ def test(gpu_id, net, img_list, group_size, img_size):
25
+ print('test')
26
+ #device=device
27
+ hl,wl=[_.shape[0] for _ in img_list],[_.shape[1] for _ in img_list]
28
+ img_transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(),
29
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
30
+ img_transform_gray = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(),
31
+ transforms.Normalize(mean=[0.449], std=[0.226])])
32
+ with torch.no_grad():
33
+
34
+ group_img=torch.rand(5,3,224,224)
35
+ for i in range(5):
36
+ group_img[i]=img_transform(Image.fromarray(img_list[i]))
37
+ _,pred_mask=net(group_img*1)
38
+ pred_mask=(pred_mask.detach().squeeze()*255)#.numpy().astype(np.uint8)
39
+ #pred_mask=[F.interpolate(pred_mask[i].reshape(1,1,pred_mask[i].shape[-2],pred_mask[i].shape[-1]),size=(size,size),mode='bilinear').squeeze().numpy().astype(np.uint8) for i in range(5)]
40
+ img_resize=[((group_img[i]-group_img[i].min())/(group_img[i].max()-group_img[i].min())*255).permute(1,2,0).contiguous().numpy().astype(np.uint8)
41
+ for i in range(5)]
42
+ pred_mask=[crf_refine(img_resize[i],pred_mask[i].numpy().astype(np.uint8)) for i in range(5)]
43
+ #for i in range(5):
44
+ # print(img_list[i].shape,pred_mask[i].shape)
45
+ #pred_mask=[crf_refine(img_list[i],pred_mask[i]) for i in range(5)]
46
+ print(pred_mask[0].shape)
47
+ white=(torch.ones(2,pred_mask[0].shape[1],3)*255).long()
48
+ result = [torch.cat([torch.from_numpy(img_resize[i]),white,torch.from_numpy(pred_mask[i]).unsqueeze(2).repeat(1,1,3)],dim=0).numpy() for i in range(5)]
49
+ #w, h = 224,224#Image.open(image_list[i][j]).size
50
+ #result = result.resize((w, h), Image.BILINEAR)
51
+ #result.convert('L').save('0.png')
52
+ print('done')
53
+ return result
54
+
55
+ img_lst=[(torch.rand(352,352,3)*255).numpy().astype(np.uint8) for i in range(5)]
56
+ outputpath1='img2.png'
57
+ outputpath2='img2.png'
58
+ outputpath3='img2.png'
59
+ def sepia(opt,img1):
60
+
61
+ #img_list=[img1,img2,img3,img4,img5]
62
+ #h_list,w_list=[_.shape[0] for _ in img_list],[_.shape[1] for _ in img_list]
63
+ #print(type(img1))
64
+ #print(img1.shape)
65
+ #result_list=test(device,net,img_list,5,224)
66
+ #result_list=[result_list[i].resize((w_list[i], h_list[i]), Image.BILINEAR) for i in range(5)]
67
+ #img1,img2,img3,img4,img5=result_list#test('cpu',net,img_list,5,224)
68
+ #white=(torch.ones(img1.shape[0],2,3)*255).numpy().astype(np.uint8)
69
+
70
+ output=cv2.imread('bike'+opt.replace(':','_')+'.png')
71
+ output=cv2.resize(output,(output.shape[1]*256//output.shape[0],256))
72
+ return output[:,:,::-1]#np.concatenate([img1,white,img2,white,img3,white,img4,white,img5],axis=1)
73
+ with gr.Blocks() as demo:
74
+ gr.Markdown("image cropping")
75
+ #with gr.Tab("Component test"):
76
+ with gr.Row():
77
+ with gr.Column():
78
+ #slider1 = gr.Slider(2, 20, value=2, label="Count", info="Choose betwen 2 and 20")
79
+
80
+ #drop1 = gr.Dropdown(["cat", "dog", "bird"], label="Animal", info="Will add more animals later!")
81
+
82
+ #checklist1 = gr.CheckboxGroup(["4:3", "3:4", "16:9"], label="Shape", info="The shape of cropped image")
83
+ radio2 = gr.Radio(["9:16", "3:4","1:1","4:3", "16:9"], value="3:4",label="Shape", info="The shape of cropped image")
84
+ #radio1 = gr.Radio(["park", "zoo", "road"], label="Location", info="Where did they go?")
85
+ src_img1 = gr.Image()
86
+ bottom1 = gr.Button(label="cropping component")
87
+
88
+ out1 = gr.Image()#gr.Textbox()
89
+
90
+ bottom1.click(sepia, inputs=[radio2,src_img1], outputs=out1)
91
+ #gr.Image(shape=(224, 2))
92
+ #demo = gr.Interface(sepia, inputs=["image","image","image","image","image"], outputs=["image","image","image","image","image"])#gr.Interface(sepia, gr.Image(shape=(200, 200)), "image")
93
+ #demo = gr.Interface(sepia, inputs=["image"], outputs=["image"])
94
+ demo.launch(debug=True,share=True)