karpurna2 commited on
Commit
b700f65
·
1 Parent(s): 97bbc07

updated with spaces

Browse files
Files changed (3) hide show
  1. utils/Caption.py +1 -1
  2. utils/Emotions.py +2 -2
  3. utils/utils.py +2 -1
utils/Caption.py CHANGED
@@ -4,7 +4,7 @@ import spaces
4
 
5
  @spaces.GPU
6
  def get_caption(image):
7
- print(image)
8
  model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
9
  model = model.to(device='cuda')
10
 
 
4
 
5
  @spaces.GPU
6
  def get_caption(image):
7
+
8
  model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
9
  model = model.to(device='cuda')
10
 
utils/Emotions.py CHANGED
@@ -15,7 +15,7 @@ def get_emotions(image, text):
15
  max_len = 128
16
  input_dim = 768
17
  output_dim = 8
18
- print(image)
19
 
20
  test_transform = torchvision.transforms.Compose([
21
  torchvision.transforms.Resize((224, 224)),
@@ -31,7 +31,7 @@ def get_emotions(image, text):
31
  test_dataset = CustomDataset(image, text, test_emo, tokenizer, max_len, test_transform)
32
  test_loader = torch.utils.data.DataLoader(test_dataset,
33
  batch_size=1,
34
- shuffle=False, num_workers=2)
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
36
  model, preprocess = clip.load("ViT-L/14", device=device)
37
 
 
15
  max_len = 128
16
  input_dim = 768
17
  output_dim = 8
18
+
19
 
20
  test_transform = torchvision.transforms.Compose([
21
  torchvision.transforms.Resize((224, 224)),
 
31
  test_dataset = CustomDataset(image, text, test_emo, tokenizer, max_len, test_transform)
32
  test_loader = torch.utils.data.DataLoader(test_dataset,
33
  batch_size=1,
34
+ shuffle=False, num_workers=0)
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
36
  model, preprocess = clip.load("ViT-L/14", device=device)
37
 
utils/utils.py CHANGED
@@ -4,6 +4,7 @@ from utils.Caption import get_caption
4
  from utils.Emotions import get_emotions
5
  import spaces
6
 
 
7
  @spaces.GPU
8
  def get_label(image):
9
  caption = get_caption(image)
@@ -12,7 +13,7 @@ def get_label(image):
12
 
13
  emotions = ['Excitement', 'Sadness', 'Amusement', 'Disgust', 'Awe', 'Contentment', 'Fear', 'Anger']
14
  probabilities = pred
15
- print(pred)
16
  max_idx = np.argmax(probabilities)
17
 
18
  # Create color list where all bars are one color, and the max bar is another color
 
4
  from utils.Emotions import get_emotions
5
  import spaces
6
 
7
+
8
  @spaces.GPU
9
  def get_label(image):
10
  caption = get_caption(image)
 
13
 
14
  emotions = ['Excitement', 'Sadness', 'Amusement', 'Disgust', 'Awe', 'Contentment', 'Fear', 'Anger']
15
  probabilities = pred
16
+
17
  max_idx = np.argmax(probabilities)
18
 
19
  # Create color list where all bars are one color, and the max bar is another color