Spaces:
Runtime error
Runtime error
jframed281
commited on
Commit
•
dca8200
1
Parent(s):
473dfd0
modify app.py
Browse files
app.py
CHANGED
@@ -1,224 +1,62 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
"Requirement already satisfied: MarkupSafe>=0.9.2 in /usr/local/lib/python3.8/dist-packages (from Mako->alembic>=0.6.2->dataset) (2.0.1)\n",
|
64 |
-
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
|
65 |
-
"Requirement already satisfied: sentencepiece in /usr/local/lib/python3.8/dist-packages (0.1.97)\n",
|
66 |
-
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
|
67 |
-
"Requirement already satisfied: googletrans==3.1.0a0 in /usr/local/lib/python3.8/dist-packages (3.1.0a0)\n",
|
68 |
-
"Requirement already satisfied: httpx==0.13.3 in /usr/local/lib/python3.8/dist-packages (from googletrans==3.1.0a0) (0.13.3)\n",
|
69 |
-
"Requirement already satisfied: idna==2.* in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (2.10)\n",
|
70 |
-
"Requirement already satisfied: certifi in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (2022.12.7)\n",
|
71 |
-
"Requirement already satisfied: rfc3986<2,>=1.3 in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (1.5.0)\n",
|
72 |
-
"Requirement already satisfied: chardet==3.* in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (3.0.4)\n",
|
73 |
-
"Requirement already satisfied: httpcore==0.9.* in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (0.9.1)\n",
|
74 |
-
"Requirement already satisfied: sniffio in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (1.3.0)\n",
|
75 |
-
"Requirement already satisfied: hstspreload in /usr/local/lib/python3.8/dist-packages (from httpx==0.13.3->googletrans==3.1.0a0) (2023.1.1)\n",
|
76 |
-
"Requirement already satisfied: h2==3.* in /usr/local/lib/python3.8/dist-packages (from httpcore==0.9.*->httpx==0.13.3->googletrans==3.1.0a0) (3.2.0)\n",
|
77 |
-
"Requirement already satisfied: h11<0.10,>=0.8 in /usr/local/lib/python3.8/dist-packages (from httpcore==0.9.*->httpx==0.13.3->googletrans==3.1.0a0) (0.9.0)\n",
|
78 |
-
"Requirement already satisfied: hpack<4,>=3.0 in /usr/local/lib/python3.8/dist-packages (from h2==3.*->httpcore==0.9.*->httpx==0.13.3->googletrans==3.1.0a0) (3.0.0)\n",
|
79 |
-
"Requirement already satisfied: hyperframe<6,>=5.2.0 in /usr/local/lib/python3.8/dist-packages (from h2==3.*->httpcore==0.9.*->httpx==0.13.3->googletrans==3.1.0a0) (5.2.0)\n"
|
80 |
-
]
|
81 |
-
}
|
82 |
-
]
|
83 |
-
},
|
84 |
-
{
|
85 |
-
"cell_type": "code",
|
86 |
-
"execution_count": 64,
|
87 |
-
"metadata": {
|
88 |
-
"colab": {
|
89 |
-
"base_uri": "https://localhost:8080/",
|
90 |
-
"height": 723
|
91 |
-
},
|
92 |
-
"id": "5DNOX2BIU5mO",
|
93 |
-
"outputId": "9311e1ba-5685-4f74-e40a-0f1cd39484d0"
|
94 |
-
},
|
95 |
-
"outputs": [
|
96 |
-
{
|
97 |
-
"output_type": "stream",
|
98 |
-
"name": "stderr",
|
99 |
-
"text": [
|
100 |
-
"Using cache found in /root/.cache/torch/hub/pytorch_vision_v0.6.0\n",
|
101 |
-
"/usr/local/lib/python3.8/dist-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
|
102 |
-
" warnings.warn(\n",
|
103 |
-
"/usr/local/lib/python3.8/dist-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
|
104 |
-
" warnings.warn(msg)\n"
|
105 |
-
]
|
106 |
-
},
|
107 |
-
{
|
108 |
-
"output_type": "stream",
|
109 |
-
"name": "stdout",
|
110 |
-
"text": [
|
111 |
-
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
|
112 |
-
"Note: opening Chrome Inspector may crash demo inside Colab notebooks.\n",
|
113 |
-
"\n",
|
114 |
-
"To create a public link, set `share=True` in `launch()`.\n"
|
115 |
-
]
|
116 |
-
},
|
117 |
-
{
|
118 |
-
"output_type": "display_data",
|
119 |
-
"data": {
|
120 |
-
"text/plain": [
|
121 |
-
"<IPython.core.display.Javascript object>"
|
122 |
-
],
|
123 |
-
"application/javascript": [
|
124 |
-
"(async (port, path, width, height, cache, element) => {\n",
|
125 |
-
" if (!google.colab.kernel.accessAllowed && !cache) {\n",
|
126 |
-
" return;\n",
|
127 |
-
" }\n",
|
128 |
-
" element.appendChild(document.createTextNode(''));\n",
|
129 |
-
" const url = await google.colab.kernel.proxyPort(port, {cache});\n",
|
130 |
-
"\n",
|
131 |
-
" const external_link = document.createElement('div');\n",
|
132 |
-
" external_link.innerHTML = `\n",
|
133 |
-
" <div style=\"font-family: monospace; margin-bottom: 0.5rem\">\n",
|
134 |
-
" Running on <a href=${new URL(path, url).toString()} target=\"_blank\">\n",
|
135 |
-
" https://localhost:${port}${path}\n",
|
136 |
-
" </a>\n",
|
137 |
-
" </div>\n",
|
138 |
-
" `;\n",
|
139 |
-
" element.appendChild(external_link);\n",
|
140 |
-
"\n",
|
141 |
-
" const iframe = document.createElement('iframe');\n",
|
142 |
-
" iframe.src = new URL(path, url).toString();\n",
|
143 |
-
" iframe.height = height;\n",
|
144 |
-
" iframe.allow = \"autoplay; camera; microphone; clipboard-read; clipboard-write;\"\n",
|
145 |
-
" iframe.width = width;\n",
|
146 |
-
" iframe.style.border = 0;\n",
|
147 |
-
" element.appendChild(iframe);\n",
|
148 |
-
" })(7882, \"/\", \"100%\", 500, false, window.element)"
|
149 |
-
]
|
150 |
-
},
|
151 |
-
"metadata": {}
|
152 |
-
},
|
153 |
-
{
|
154 |
-
"output_type": "execute_result",
|
155 |
-
"data": {
|
156 |
-
"text/plain": []
|
157 |
-
},
|
158 |
-
"metadata": {},
|
159 |
-
"execution_count": 64
|
160 |
-
}
|
161 |
-
],
|
162 |
-
"source": [
|
163 |
-
"import gradio as gr\n",
|
164 |
-
"import requests\n",
|
165 |
-
"from PIL import Image\n",
|
166 |
-
"from torchvision import transforms\n",
|
167 |
-
"from transformers import pipeline\n",
|
168 |
-
"import torch\n",
|
169 |
-
"import sentencepiece\n",
|
170 |
-
"import re\n",
|
171 |
-
"import googletrans \n",
|
172 |
-
"from googletrans import Translator\n",
|
173 |
-
"\n",
|
174 |
-
"model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n",
|
175 |
-
"\n",
|
176 |
-
"from transformers import SegformerFeatureExtractor, SegformerForImageClassification, T5Tokenizer, T5Model\n",
|
177 |
-
"from PIL import Image\n",
|
178 |
-
"import requests\n",
|
179 |
-
"\n",
|
180 |
-
"\n",
|
181 |
-
"def loadImageToText(image, argument):\n",
|
182 |
-
" # url = \"https://media.istockphoto.com/id/470604022/es/foto/%C3%A1rbol-de-manzano.jpg?s=1024x1024&w=is&k=20&c=R7b6jPeTGsDw75Sqn3VwpNRckqlAkJNPLelb48pCk2U=\"\n",
|
183 |
-
" # image = Image.open(requests.get(url, stream=True).raw)\n",
|
184 |
-
" feature_extractor = SegformerFeatureExtractor.from_pretrained(\"nvidia/mit-b2\")\n",
|
185 |
-
" model = SegformerForImageClassification.from_pretrained(\"nvidia/mit-b2\")\n",
|
186 |
-
" translator = Translator()\n",
|
187 |
-
"\n",
|
188 |
-
" inputs = feature_extractor(images=image, return_tensors=\"pt\")\n",
|
189 |
-
" outputs = model(**inputs)\n",
|
190 |
-
" logits = outputs.logits\n",
|
191 |
-
" # model predicts one of the 1000 ImageNet classes\n",
|
192 |
-
" predicted_class_idx = logits.argmax(-1).item()\n",
|
193 |
-
" part_args = f\"<\"+re.sub(\"[^(\\w|<|>)]+(?=\\w)\", \"><\", argument) + \">\"\n",
|
194 |
-
" story_gen = pipeline(\"text-generation\", \"pranavpsv/gpt2-genre-story-generator\")\n",
|
195 |
-
" story_text = story_gen(part_args + model.config.id2label[predicted_class_idx])\n",
|
196 |
-
"\n",
|
197 |
-
" generate_text_stroy = story_text[0][\"generated_text\"]\n",
|
198 |
-
" ln_text_story = generate_text_stroy[len(part_args):len(generate_text_stroy)]\n",
|
199 |
-
"\n",
|
200 |
-
" translated_ita = translator.translate(ln_text_story, src='en', dest='es')\n",
|
201 |
-
"\n",
|
202 |
-
" return translated_ita.text\n",
|
203 |
-
"\n",
|
204 |
-
"\n",
|
205 |
-
"\n",
|
206 |
-
"#print(loadImageToText(\"animal,super\")) # borra el argumento 'image' y sus variables internas si quieres probarlo desde aquí.\n",
|
207 |
-
"\n",
|
208 |
-
"gr.Interface(fn=loadImageToText,\n",
|
209 |
-
" inputs=[gr.Image(), gr.Text(label=\"Argumentos base\", placeholder=\"Verano, película,playa, superhéroe, animal\")],\n",
|
210 |
-
" outputs=\"text\").launch()\n",
|
211 |
-
"\n"
|
212 |
-
]
|
213 |
-
},
|
214 |
-
{
|
215 |
-
"cell_type": "code",
|
216 |
-
"source": [],
|
217 |
-
"metadata": {
|
218 |
-
"id": "zMmh7-ixXNIH"
|
219 |
-
},
|
220 |
-
"execution_count": null,
|
221 |
-
"outputs": []
|
222 |
-
}
|
223 |
-
]
|
224 |
-
}
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""imagetortext.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colaboratory.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1UXh8tivt-4vHaBeXgfyq-TYLvpJAMZVV
|
8 |
+
"""
|
9 |
+
|
10 |
+
!pip install transformers dataset
|
11 |
+
!pip install -q gradio
|
12 |
+
!pip install sentencepiece
|
13 |
+
!pip install googletrans==3.1.0a0
|
14 |
+
|
15 |
+
import gradio as gr
|
16 |
+
import requests
|
17 |
+
from PIL import Image
|
18 |
+
from torchvision import transforms
|
19 |
+
from transformers import pipeline
|
20 |
+
import torch
|
21 |
+
import sentencepiece
|
22 |
+
import re
|
23 |
+
import googletrans
|
24 |
+
from googletrans import Translator
|
25 |
+
|
26 |
+
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
|
27 |
+
|
28 |
+
from transformers import SegformerFeatureExtractor, SegformerForImageClassification, T5Tokenizer, T5Model
|
29 |
+
from PIL import Image
|
30 |
+
import requests
|
31 |
+
|
32 |
+
|
33 |
+
def loadImageToText(image, argument):
|
34 |
+
# url = "https://media.istockphoto.com/id/470604022/es/foto/%C3%A1rbol-de-manzano.jpg?s=1024x1024&w=is&k=20&c=R7b6jPeTGsDw75Sqn3VwpNRckqlAkJNPLelb48pCk2U="
|
35 |
+
# image = Image.open(requests.get(url, stream=True).raw)
|
36 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/mit-b2")
|
37 |
+
model = SegformerForImageClassification.from_pretrained("nvidia/mit-b2")
|
38 |
+
translator = Translator()
|
39 |
+
|
40 |
+
inputs = feature_extractor(images=image, return_tensors="pt")
|
41 |
+
outputs = model(**inputs)
|
42 |
+
logits = outputs.logits
|
43 |
+
# model predicts one of the 1000 ImageNet classes
|
44 |
+
predicted_class_idx = logits.argmax(-1).item()
|
45 |
+
part_args = f"<"+re.sub("[^(\w|<|>)]+(?=\w)", "><", argument) + ">"
|
46 |
+
story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
|
47 |
+
story_text = story_gen(part_args + model.config.id2label[predicted_class_idx])
|
48 |
+
|
49 |
+
generate_text_stroy = story_text[0]["generated_text"]
|
50 |
+
ln_text_story = generate_text_stroy[len(part_args):len(generate_text_stroy)]
|
51 |
+
|
52 |
+
translated_ita = translator.translate(ln_text_story, src='en', dest='es')
|
53 |
+
|
54 |
+
return translated_ita.text
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
#print(loadImageToText("animal,super")) # borra el argumento 'image' y sus variables internas si quieres probarlo desde aquí.
|
59 |
+
|
60 |
+
gr.Interface(fn=loadImageToText,
|
61 |
+
inputs=[gr.Image(), gr.Text(label="Argumentos base", placeholder="Verano, película,playa, superhéroe, animal")],
|
62 |
+
outputs="text").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|