DGSpitzer commited on
Commit
84b2982
1 Parent(s): 3797752

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -1
app.py CHANGED
@@ -1,3 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from PIL import Image
2
  import numpy as np
3
  import gradio as gr
@@ -25,7 +99,7 @@ import mutagen
25
  from mutagen.mp3 import MP3
26
  from mutagen.wave import WAVE
27
 
28
- img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
29
  text_to_music = gr.Interface.load("spaces/fffiloni/text-2-music")
30
 
31
  language_translation_model = hub.Module(name='baidu_translate')
 
1
+ #Install CLIP-Interrogator-2.1
2
+
3
+ import os, subprocess
4
+ import torch
5
+
6
+ def setup():
7
+ install_cmds = [
8
+ ['pip', 'install', 'ftfy', 'gradio', 'regex', 'tqdm', 'transformers==4.21.2', 'timm', 'fairscale', 'requests'],
9
+ ['pip', 'install', 'open_clip_torch'],
10
+ ['pip', 'install', '-e', 'git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip'],
11
+ ['git', 'clone', '-b', 'open-clip', 'https://github.com/pharmapsychotic/clip-interrogator.git']
12
+ ]
13
+ for cmd in install_cmds:
14
+ print(subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8'))
15
+
16
+ setup()
17
+
18
+ # download cache files
19
+ print("Download preprocessed cache files...")
20
+ CACHE_URLS = [
21
+ 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl',
22
+ 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl',
23
+ 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl',
24
+ 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl',
25
+ 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl',
26
+ ]
27
+ os.makedirs('cache', exist_ok=True)
28
+ for url in CACHE_URLS:
29
+ print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
30
+
31
+ import sys
32
+ sys.path.append('src/blip')
33
+ sys.path.append('clip-interrogator')
34
+
35
+ import gradio as gr
36
+ from clip_interrogator import Config, Interrogator
37
+
38
+ config = Config()
39
+ config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
40
+ config.blip_offload = False if torch.cuda.is_available() else True
41
+ config.chunk_size = 2048
42
+ config.flavor_intermediate_count = 512
43
+ config.blip_num_beams = 64
44
+
45
+ ci = Interrogator(config)
46
+
47
+ def img_to_text(image, mode, best_max_flavors):
48
+ image = image.convert('RGB')
49
+ if mode == 'best':
50
+
51
+ prompt_result = ci.interrogate(image, max_flavors=int(best_max_flavors))
52
+
53
+ print("mode best: " + prompt_result)
54
+
55
+ return prompt_result, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
56
+
57
+ elif mode == 'classic':
58
+
59
+ prompt_result = ci.interrogate_classic(image)
60
+
61
+ print("mode classic: " + prompt_result)
62
+
63
+ return prompt_result, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
64
+
65
+ else:
66
+
67
+ prompt_result = ci.interrogate_fast(image)
68
+
69
+ print("mode fast: " + prompt_result)
70
+
71
+ return prompt_result, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
72
+
73
+
74
+
75
  from PIL import Image
76
  import numpy as np
77
  import gradio as gr
 
99
  from mutagen.mp3 import MP3
100
  from mutagen.wave import WAVE
101
 
102
+ #img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
103
  text_to_music = gr.Interface.load("spaces/fffiloni/text-2-music")
104
 
105
  language_translation_model = hub.Module(name='baidu_translate')