gchhablani commited on
Commit
9e939e7
1 Parent(s): 5fa85d2

Remove resizing

Browse files
Files changed (2) hide show
  1. app.py +6 -8
  2. requirements.txt +1 -2
app.py CHANGED
@@ -5,7 +5,7 @@ import json
5
  import os
6
  import numpy as np
7
  from streamlit.elements import markdown
8
- import cv2
9
  from model.flax_clip_vision_bert.modeling_clip_vision_bert import (
10
  FlaxCLIPVisionBertForSequenceClassification,
11
  )
@@ -18,7 +18,6 @@ from utils import (
18
  )
19
  import matplotlib.pyplot as plt
20
  from mtranslate import translate
21
- from PIL import Image
22
 
23
 
24
  from session import _get_state
@@ -44,10 +43,11 @@ def read_markdown(path, parent="./sections/"):
44
  with open(os.path.join(parent, path)) as f:
45
  return f.read()
46
 
47
- def resize_height(image, new_height):
48
- h, w, c = image.shape
49
- new_width = int(w * new_height / h)
50
- return cv2.resize(image, (new_width, new_height))
 
51
 
52
  checkpoints = ["./ckpt/ckpt-60k-5999"] # TODO: Maybe add more checkpoints?
53
  dummy_data = pd.read_csv("dummy_vqa_multilingual.tsv", sep="\t")
@@ -111,8 +111,6 @@ if uploaded_file is not None:
111
  state.image_file = os.path.join("images/val2014", uploaded_file.name)
112
  state.image = np.array(Image.open(uploaded_file))
113
 
114
-
115
- state.image = resize_height(state.image, 224)
116
  transformed_image = get_transformed_image(state.image)
117
 
118
  # Display Image
 
5
  import os
6
  import numpy as np
7
  from streamlit.elements import markdown
8
+ from PIL import Image
9
  from model.flax_clip_vision_bert.modeling_clip_vision_bert import (
10
  FlaxCLIPVisionBertForSequenceClassification,
11
  )
 
18
  )
19
  import matplotlib.pyplot as plt
20
  from mtranslate import translate
 
21
 
22
 
23
  from session import _get_state
 
43
  with open(os.path.join(parent, path)) as f:
44
  return f.read()
45
 
46
+
47
+ # def resize_height(image, new_height):
48
+ # h, w, c = image.shape
49
+ # new_width = int(w * new_height / h)
50
+ # return cv2.resize(image, (new_width, new_height))
51
 
52
  checkpoints = ["./ckpt/ckpt-60k-5999"] # TODO: Maybe add more checkpoints?
53
  dummy_data = pd.read_csv("dummy_vqa_multilingual.tsv", sep="\t")
 
111
  state.image_file = os.path.join("images/val2014", uploaded_file.name)
112
  state.image = np.array(Image.open(uploaded_file))
113
 
 
 
114
  transformed_image = get_transformed_image(state.image)
115
 
116
  # Display Image
requirements.txt CHANGED
@@ -4,5 +4,4 @@ git+https://github.com/huggingface/transformers.git
4
  torchvision==0.10.0
5
  mtranslate==1.8
6
  black==21.7b0
7
- flax==0.3.4
8
- opencv-python==4.5.3.56
 
4
  torchvision==0.10.0
5
  mtranslate==1.8
6
  black==21.7b0
7
+ flax==0.3.4