Spaces:
Build error
Build error
Trang Dang
commited on
Commit
·
218ce85
1
Parent(s):
d01a371
process test image
Browse files
run.py
CHANGED
@@ -4,6 +4,19 @@ import numpy as np
|
|
4 |
import matplotlib.pyplot as plt
|
5 |
import app
|
6 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
def pred(src):
|
9 |
# os.environ['HUGGINGFACE_HUB_HOME'] = './.cache'
|
@@ -18,7 +31,37 @@ def pred(src):
|
|
18 |
my_sam_model.load_state_dict(torch.load("sam_model.pth", map_location=torch.device('cpu')))
|
19 |
|
20 |
new_image = np.array(Image.open(src))
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
inputs = {k: v.to(device) for k, v in inputs.items()}
|
23 |
x = 1
|
24 |
# my_sam_model.eval()
|
|
|
4 |
import matplotlib.pyplot as plt
|
5 |
import app
|
6 |
import os
|
7 |
+
from patchify import patchify
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
|
11 |
+
def patchify(large_image):
|
12 |
+
all_img_patches = []
|
13 |
+
patches_img = patchify(large_image, (patch_size, patch_size), step=step) #Step=256 for 256 patches means no overlap
|
14 |
+
for i in range(patches_img.shape[0]):
|
15 |
+
for j in range(patches_img.shape[1]):
|
16 |
+
single_patch_img = patches_img[i,j,:,:]
|
17 |
+
all_img_patches.append(single_patch_img)
|
18 |
+
images = np.array(all_img_patches)
|
19 |
+
|
20 |
|
21 |
def pred(src):
|
22 |
# os.environ['HUGGINGFACE_HUB_HOME'] = './.cache'
|
|
|
31 |
my_sam_model.load_state_dict(torch.load("sam_model.pth", map_location=torch.device('cpu')))
|
32 |
|
33 |
new_image = np.array(Image.open(src))
|
34 |
+
patches = patchify(new_image, (256, 256), step=256)
|
35 |
+
|
36 |
+
# Define the size of your array
|
37 |
+
array_size = 256
|
38 |
+
|
39 |
+
# Define the size of your grid
|
40 |
+
grid_size = 10
|
41 |
+
|
42 |
+
# Generate the grid points
|
43 |
+
x = np.linspace(0, array_size-1, grid_size)
|
44 |
+
y = np.linspace(0, array_size-1, grid_size)
|
45 |
+
|
46 |
+
# Generate a grid of coordinates
|
47 |
+
xv, yv = np.meshgrid(x, y)
|
48 |
+
|
49 |
+
# Convert the numpy arrays to lists
|
50 |
+
xv_list = xv.tolist()
|
51 |
+
yv_list = yv.tolist()
|
52 |
+
|
53 |
+
# Combine the x and y coordinates into a list of list of lists
|
54 |
+
input_points = [[[int(x), int(y)] for x, y in zip(x_row, y_row)] for x_row, y_row in zip(xv_list, yv_list)]
|
55 |
+
input_points = torch.tensor(input_points).view(1, 1, grid_size*grid_size, 2)
|
56 |
+
|
57 |
+
i, j = 1, 2
|
58 |
+
|
59 |
+
# Selectelected patch for segmentation
|
60 |
+
random_array = patches[i, j]
|
61 |
+
|
62 |
+
single_patch = Image.fromarray(random_array)
|
63 |
+
inputs = processor(single_patch, input_points=input_points, return_tensors="pt")
|
64 |
+
|
65 |
inputs = {k: v.to(device) for k, v in inputs.items()}
|
66 |
x = 1
|
67 |
# my_sam_model.eval()
|