SMD00 commited on
Commit
d16d812
1 Parent(s): dea8100

Upload 4 files

Browse files
Files changed (2) hide show
  1. app.py +8 -23
  2. requirements.txt +11 -4
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import gradio as gr
2
  from PIL import Image
3
  import cv2 as cv
4
 
@@ -303,10 +303,8 @@ def visualize(model, data, dims):
303
  # t_img = transforms.Resize((dims[0], dims[1]))(t_img)
304
  img = Image.fromarray(np.uint8(fake_imgs[i]))
305
  img = cv.resize(fake_imgs[i], dsize=(dims[1], dims[0]), interpolation=cv.INTER_CUBIC)
306
- return img
307
  # st.text(f"Size of fake image {fake_imgs[i].shape} \n Type of image = {type(fake_imgs[i])}")
308
- # st.image(img, caption="Output image", use_column_width='auto', clamp=True)
309
-
310
 
311
  def log_results(loss_meter_dict):
312
  for loss_name, loss_meter in loss_meter_dict.items():
@@ -354,27 +352,14 @@ def make_dataloaders2(batch_size=16, n_workers=4, pin_memory=True, **kwargs): #
354
  pin_memory=pin_memory)
355
  return dataloader
356
 
357
- def main_func(filepath):
358
- im = Image.open(filepath)
359
- size_text=f"Size of uploaded image {im.shape}"
360
- # st.text(body=f"Size of uploaded image {im.shape}")
361
  a = im.shape
362
- # st.image(im, caption="Uploaded Image.", use_column_width='auto')
363
  test_dl = make_dataloaders2(img_list=[im])
364
  for data in test_dl:
365
  model.setup_input(data)
366
  model.optimize()
367
- img=visualize(model, data, a)
368
- return (size_text,img)
369
-
370
- title = "PicSum"
371
- description = "Gradio demo for PicSum project. You can give an image as input on the left side and then click on the submit button. The generated text, summary, important sentences and fill in the gaps would be generated on the right side."
372
- gr.Interface(
373
- extract,
374
- [gr.inputs.Image(type="filepath", label="Input"),gr.inputs.CheckboxGroup(choices, type="value", default=['Generate text'], label='Options') ],
375
- [gr.outputs.Textbox(label="Generated Text"),"image"],
376
- title=title,
377
- description=description,
378
- # examples=[['a.png', ['Generate text']], ['b.png', ['Generate text','Summary','Important Sentences']], ]
379
- ).launch(enable_queue=True)
380
-
 
1
+ import streamlit as st
2
  from PIL import Image
3
  import cv2 as cv
4
 
 
303
  # t_img = transforms.Resize((dims[0], dims[1]))(t_img)
304
  img = Image.fromarray(np.uint8(fake_imgs[i]))
305
  img = cv.resize(fake_imgs[i], dsize=(dims[1], dims[0]), interpolation=cv.INTER_CUBIC)
 
306
  # st.text(f"Size of fake image {fake_imgs[i].shape} \n Type of image = {type(fake_imgs[i])}")
307
+ st.image(img, caption="Output image", use_column_width='auto', clamp=True)
 
308
 
309
  def log_results(loss_meter_dict):
310
  for loss_name, loss_meter in loss_meter_dict.items():
 
352
  pin_memory=pin_memory)
353
  return dataloader
354
 
355
+ file_up = st.file_uploader("Upload an jpg image", type="jpg")
356
+ if file_up is not None:
357
+ im = Image.open(file_up)
358
+ st.text(body=f"Size of uploaded image {im.shape}")
359
  a = im.shape
360
+ st.image(im, caption="Uploaded Image.", use_column_width='auto')
361
  test_dl = make_dataloaders2(img_list=[im])
362
  for data in test_dl:
363
  model.setup_input(data)
364
  model.optimize()
365
+ visualize(model, data, a)
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,15 +1,22 @@
1
- gradio
 
2
  glob2
3
  numpy
4
  pathlib
5
  tqdm
6
- matplotlib
7
- matplotlib-venn
8
  scikit-image
9
- torch==1.7
10
  torchvision
11
  torchsummary
12
  fastai==2.4
 
 
 
 
 
 
13
  opencv-contrib-python==4.6.0.66
14
  opencv-python==4.6.0.66
15
  opencv-python-headless==4.6.0.66
 
1
+ streamlit
2
+ Pillow
3
  glob2
4
  numpy
5
  pathlib
6
  tqdm
7
+ matplotlib==3.2.2
8
+ matplotlib-venn==0.11.7
9
  scikit-image
10
+ torch
11
  torchvision
12
  torchsummary
13
  fastai==2.4
14
+ fastcore==1.3.29
15
+ fastdownload==0.0.7
16
+ fastdtw==0.3.4
17
+ fastjsonschema==2.16.2
18
+ fastprogress==1.0.3
19
+ fastrlock==0.8.1
20
  opencv-contrib-python==4.6.0.66
21
  opencv-python==4.6.0.66
22
  opencv-python-headless==4.6.0.66