Spaces:
Runtime error
Runtime error
resizing to 500 for image display
Browse files
app.py
CHANGED
@@ -77,9 +77,10 @@ else:
|
|
77 |
image_dict = imageLoader.transform(image)
|
78 |
|
79 |
# image = imageLoader.to_image(image_dict["image"].squeeze(0))
|
|
|
80 |
|
81 |
-
show = st.image(
|
82 |
-
show.image(
|
83 |
|
84 |
gen_show_caption(sub, imageLoader.text_transform(cap_prompt))
|
85 |
|
|
|
77 |
image_dict = imageLoader.transform(image)
|
78 |
|
79 |
# image = imageLoader.to_image(image_dict["image"].squeeze(0))
|
80 |
+
show_image = imageLoader.show_resize(image)
|
81 |
|
82 |
+
show = st.image(show_image)
|
83 |
+
show.image(show_image, "Your Image")
|
84 |
|
85 |
gen_show_caption(sub, imageLoader.text_transform(cap_prompt))
|
86 |
|
model.py
CHANGED
@@ -30,15 +30,24 @@ class ImageLoader():
|
|
30 |
def load(self, im_path):
|
31 |
im = torch.FloatTensor(self.transformer(Image.open(im_path))).unsqueeze(0)
|
32 |
return {"image": im}
|
|
|
33 |
def raw_load(self, im_path):
|
34 |
im = torch.FloatTensor(Image.open(im_path))
|
35 |
return {"image": im}
|
|
|
36 |
def transform(self, image):
|
37 |
im = torch.FloatTensor(self.transformer(image)).unsqueeze(0)
|
38 |
return {"image": im}
|
|
|
39 |
def text_transform(self, text):
|
40 |
# at present just lowercasing:
|
41 |
return text.lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def to_image(self, tensor):
|
43 |
return torchvision.transforms.ToPILImage()(tensor)
|
44 |
|
|
|
30 |
def load(self, im_path):
|
31 |
im = torch.FloatTensor(self.transformer(Image.open(im_path))).unsqueeze(0)
|
32 |
return {"image": im}
|
33 |
+
|
34 |
def raw_load(self, im_path):
|
35 |
im = torch.FloatTensor(Image.open(im_path))
|
36 |
return {"image": im}
|
37 |
+
|
38 |
def transform(self, image):
|
39 |
im = torch.FloatTensor(self.transformer(image)).unsqueeze(0)
|
40 |
return {"image": im}
|
41 |
+
|
42 |
def text_transform(self, text):
|
43 |
# at present just lowercasing:
|
44 |
return text.lower()
|
45 |
+
|
46 |
+
def show_resize(self, image):
|
47 |
+
im = torchvision.transforms.functional.pil_to_transform(image)
|
48 |
+
im = torchvision.transforms.functional.resize(image, size=500, max_size=500)
|
49 |
+
return self.to_image(im)
|
50 |
+
|
51 |
def to_image(self, tensor):
|
52 |
return torchvision.transforms.ToPILImage()(tensor)
|
53 |
|