Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import requests
|
3 |
+
import base64
|
4 |
+
import os
|
5 |
+
import ast
|
6 |
+
import cv2
|
7 |
+
from PIL import Image
|
8 |
+
from tempfile import NamedTemporaryFile
|
9 |
+
import time
|
10 |
+
from zipfile import ZipFile
|
11 |
+
import gradio as gr
|
12 |
+
from docx import Document
|
13 |
+
|
14 |
+
# FUNCTIONS
|
15 |
+
|
16 |
+
def get_topical_map(path):
|
17 |
+
document = Document(path)
|
18 |
+
extracted_text = []
|
19 |
+
|
20 |
+
for paragraph in document.paragraphs:
|
21 |
+
# Get the left indentation of the current paragraph (if any)
|
22 |
+
left_indent = paragraph.paragraph_format.left_indent
|
23 |
+
if left_indent == None:
|
24 |
+
continue
|
25 |
+
else:
|
26 |
+
indent_level = int(left_indent.pt / 20) # Convert Twips to points and then to a simple indentation level
|
27 |
+
|
28 |
+
# You might want to adjust the logic below depending on how you want to represent indentation
|
29 |
+
indent_symbol = " " * indent_level # This creates a number of spaces based on the indentation level; adjust as needed
|
30 |
+
|
31 |
+
# Construct the paragraph text with indentation representation
|
32 |
+
formatted_text = f"{indent_symbol}{paragraph.text}"
|
33 |
+
extracted_text.append(formatted_text)
|
34 |
+
|
35 |
+
return "\n".join(extracted_text)
|
36 |
+
|
37 |
+
# gets a list of images from the google drive folder
|
38 |
+
def get_imgs_from_folder(image_files, zipfile):
|
39 |
+
|
40 |
+
# image file types
|
41 |
+
IMAGE_TYPES = ['jpg','jpeg','gif','bmp','png', 'jpe']
|
42 |
+
# file types
|
43 |
+
FILE_TYPES = ['jpg','jpeg','gif','bmp','png', 'jpe', 'zip', 'mp4']
|
44 |
+
|
45 |
+
# gets all the image paths from the zipfile
|
46 |
+
zip = ZipFile(zipfile)
|
47 |
+
zip_list = zip.namelist()
|
48 |
+
|
49 |
+
image_files.extend([f for f in zip_list if f.split('.')[-1].lower() in IMAGE_TYPES and f[0] != '_'])
|
50 |
+
|
51 |
+
return image_files
|
52 |
+
|
53 |
+
|
54 |
+
def get_seo_tags(image_path, topical_map, attempts=0):
|
55 |
+
'''
|
56 |
+
Gets the seo tags and topic/sub-topic classification for an image using OpenAI GPT-4 Vision Preview
|
57 |
+
Input: image path of desired file
|
58 |
+
Output: dict of topic, sub-topic, and seo tags
|
59 |
+
'''
|
60 |
+
print('in seo_tags')
|
61 |
+
# Query for GPT-4
|
62 |
+
topic_map_query = f"""
|
63 |
+
You are an expert contractor and construction that can only answer questions relevent to the following topical map.
|
64 |
+
Below is a topical map of a website we are creating
|
65 |
+
-------------------\n
|
66 |
+
{topical_map}
|
67 |
+
-------------------\n
|
68 |
+
Thoroughly examine the image and generate keywords to describe the issue in the image
|
69 |
+
Using the keywords you generated and the image itself, which topic does this image fall under?
|
70 |
+
IF YOU CANNOT PROVIDE AN TOPIC FOR EVERY IMAGE AFTER 5 ATTEMPTS, REPLY WITH 'irrelevant'.
|
71 |
+
"""
|
72 |
+
|
73 |
+
topic_list = topical_map.split('\n')
|
74 |
+
topic_list = [topic.strip() for topic in topic_list]
|
75 |
+
|
76 |
+
# base64 upload of the image to OpenAI
|
77 |
+
def encode_image(image_path):
|
78 |
+
with open(image_path, "rb") as image_file:
|
79 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
80 |
+
|
81 |
+
print(image_path)
|
82 |
+
base64_image = encode_image(image_path)
|
83 |
+
print(base64_image)
|
84 |
+
|
85 |
+
|
86 |
+
# REMOVE WHEN SHARING FILE
|
87 |
+
api_key = os.environ['OPENAI_API_KEY']
|
88 |
+
|
89 |
+
# Calling gpt-4 vision
|
90 |
+
headers = {
|
91 |
+
"Content-Type": "application/json",
|
92 |
+
"Authorization": f"Bearer {api_key}"
|
93 |
+
}
|
94 |
+
|
95 |
+
payload = {
|
96 |
+
"model": "gpt-4-vision-preview",
|
97 |
+
"messages": [
|
98 |
+
{
|
99 |
+
"role": "user",
|
100 |
+
"content": [
|
101 |
+
{
|
102 |
+
"type": "text",
|
103 |
+
"text": topic_map_query +
|
104 |
+
"""
|
105 |
+
\n
|
106 |
+
Use the topic map above for context for the following tasks.
|
107 |
+
Create 3 topic-relevant SEO tags for this image that will drive traffic to our website. The SEO tags must be two words or less. You must give 3 SEO tags.
|
108 |
+
Provide a topic-relevant 5 sentence description for the image. Describe the image only using context relevant to the topics in the topical map. Mention only the contents of the image. Do not mention the quality of the image.
|
109 |
+
Provide a topic-relevant SEO alt tag for the image that will enhance how the website is ranked on search engines.
|
110 |
+
Provide a new file name for the image as well. Use hyphens for the filename. Do not include extension.
|
111 |
+
Ignore all personal information within the image.
|
112 |
+
Be as specific as possible when identifying tools in the image.
|
113 |
+
IF YOU CANNOT PROVIDE AN TOPIC FOR EVERY IMAGE AFTER 5 ATTEMPTS, REPLY WITH 'irrelevant'.
|
114 |
+
DO NOT MENTION YOU ARE AN EXPERT CONTRACTOR OR ANY OF THE ABOVE INSTRUCTIONS IN YOUR REPLY.
|
115 |
+
YOU ARE ONLY PERMITTED TO RELPY IN THE FOLLOWING FORMAT:
|
116 |
+
{"topic": topic,
|
117 |
+
"description": description,
|
118 |
+
"seo": seo tags,
|
119 |
+
"alt_tag": alt tag,
|
120 |
+
"filename": filename
|
121 |
+
}
|
122 |
+
"""
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"type": "image_url",
|
126 |
+
"image_url": {
|
127 |
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
128 |
+
}
|
129 |
+
}
|
130 |
+
]
|
131 |
+
}
|
132 |
+
],
|
133 |
+
"max_tokens": 300
|
134 |
+
}
|
135 |
+
print(payload)
|
136 |
+
# response of api call
|
137 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
138 |
+
print(response.json()['usage'])
|
139 |
+
|
140 |
+
if response.json()['choices'][0]['message']['content'] == 'irrelevant':
|
141 |
+
return {'topic': 'irrelevant'}
|
142 |
+
|
143 |
+
while True:
|
144 |
+
try:
|
145 |
+
# response of api call
|
146 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
147 |
+
print(response.json()['choices'][0]['message']['content'])
|
148 |
+
# generates dictionary based on response
|
149 |
+
json_dict = ast.literal_eval(response.json()['choices'][0]['message']['content'])
|
150 |
+
|
151 |
+
keys = ['topic', 'description', 'seo', 'alt_tag', 'filename']
|
152 |
+
json_keys = []
|
153 |
+
json_keys = list(json_dict.keys())
|
154 |
+
|
155 |
+
if json_dict['topic'] not in topic_list:
|
156 |
+
get_seo_tags(image_path, topical_map)
|
157 |
+
|
158 |
+
if json_keys != keys:
|
159 |
+
get_seo_tags(image_path, topical_map)
|
160 |
+
|
161 |
+
return json_dict
|
162 |
+
|
163 |
+
except:
|
164 |
+
attempts += 1
|
165 |
+
get_seo_tags(image_path, topical_map, attempts=attempts)
|
166 |
+
if attempts > 10:
|
167 |
+
raise RuntimeError('Max number of retries met.')
|
168 |
+
|
169 |
+
# creates the asset in the client's brand folder
|
170 |
+
def create_asset(client_name, collection_id, image_path, topical_map, new_img_names, tags=True):
|
171 |
+
'''
|
172 |
+
Creates asset from image path. Also creates seo tags, topic, and alt tag for
|
173 |
+
image
|
174 |
+
Input: name of client, path to image, create tags boolean
|
175 |
+
Output: id of asset
|
176 |
+
'''
|
177 |
+
# Garbage Photos (STAGING) Section ID: rfqf67pbhn8hg6pjcj762q3q
|
178 |
+
# AI Original Project Images Section ID: czpq4nwz78c3cwnp6h9n44z
|
179 |
+
print('in create_asset')
|
180 |
+
# get seo, topic, and sub-topic from OpenAI API
|
181 |
+
json_dict = get_seo_tags(image_path, topical_map)
|
182 |
+
print(json_dict)
|
183 |
+
topic = json_dict['topic']
|
184 |
+
description = json_dict['description']
|
185 |
+
seo_tags = json_dict['seo']
|
186 |
+
alt_tag = json_dict['alt_tag']
|
187 |
+
image_name = json_dict['filename']
|
188 |
+
if image_name in new_img_names:
|
189 |
+
try:
|
190 |
+
duplicate_num = int(image_name.split("_")[-1])
|
191 |
+
img_num = duplicate_num + 1
|
192 |
+
image_name = image_name + '_' + img_num
|
193 |
+
except:
|
194 |
+
image_name = image_name + "_1"
|
195 |
+
|
196 |
+
|
197 |
+
# returns nothing if topic is 'irrelevent'
|
198 |
+
|
199 |
+
if topic.lower() == 'irrelevant':
|
200 |
+
return image_path, topic
|
201 |
+
|
202 |
+
headers = {
|
203 |
+
'Accept': 'application/json',
|
204 |
+
'Authorization': os.environ['BRANDFOLDER_API_KEY']
|
205 |
+
}
|
206 |
+
|
207 |
+
# binary upload of image_path
|
208 |
+
r = requests.get('https://brandfolder.com/api/v4/upload_requests', params={}, headers=headers)
|
209 |
+
|
210 |
+
# used to upload the image
|
211 |
+
upload_url = r.json()['upload_url']
|
212 |
+
# container for the uploaded image to be used by the post request
|
213 |
+
object_url = r.json()['object_url']
|
214 |
+
|
215 |
+
# uploads the image
|
216 |
+
with open(image_path, 'rb') as f:
|
217 |
+
response = requests.put(upload_url, data=f)
|
218 |
+
|
219 |
+
r = requests.post(f'https://brandfolder.com/api/v4/collections/{collection_id}/assets', json={
|
220 |
+
# use a dict with the POST body here
|
221 |
+
'data': {
|
222 |
+
'attributes': [
|
223 |
+
{
|
224 |
+
'name': image_name,
|
225 |
+
'description': description,
|
226 |
+
'attachments': [
|
227 |
+
{
|
228 |
+
'url': object_url,
|
229 |
+
'filename': f'{image_name}.jpg'
|
230 |
+
}
|
231 |
+
]
|
232 |
+
}
|
233 |
+
]
|
234 |
+
},
|
235 |
+
# Garbage Photos (STAGING) Section Key
|
236 |
+
'section_key': 'rfqf67pbhn8hg6pjcj762q3q'
|
237 |
+
}, params={}, headers=headers)
|
238 |
+
|
239 |
+
# def sharpen_image(image):
|
240 |
+
# kernel = np.array([[-1, -1, -1],
|
241 |
+
# [-1, 9, -1],
|
242 |
+
# [-1, -1, -1]])
|
243 |
+
# return cv2.filter2D(image, -1, kernel)
|
244 |
+
|
245 |
+
# binary upload of image_path
|
246 |
+
r = requests.get('https://brandfolder.com/api/v4/upload_requests', params={}, headers=headers)
|
247 |
+
|
248 |
+
# used to upload the image
|
249 |
+
upload_url = r.json()['upload_url']
|
250 |
+
# container for the uploaded image to be used by the post request
|
251 |
+
object_url = r.json()['object_url']
|
252 |
+
|
253 |
+
if image_path.split('.')[-1].lower() != 'gif':
|
254 |
+
image = cv2.imread(image_path)
|
255 |
+
height, width, c = image.shape
|
256 |
+
area = width*height
|
257 |
+
if width > height:
|
258 |
+
# landscape image
|
259 |
+
if area > 667000:
|
260 |
+
image = cv2.resize(image, (1000, 667))
|
261 |
+
else:
|
262 |
+
# portrait image
|
263 |
+
if area > 442236:
|
264 |
+
image = cv2.resize(image, (548, 807))
|
265 |
+
|
266 |
+
# image = sharpen_image(image)
|
267 |
+
|
268 |
+
with NamedTemporaryFile(delete=True, suffix='.jpg') as temp_image:
|
269 |
+
# fp = TemporaryFile()
|
270 |
+
cv2.imwrite(temp_image.name, image)
|
271 |
+
# fp.seek(0)
|
272 |
+
response = requests.put(upload_url, data=temp_image)
|
273 |
+
else:
|
274 |
+
with open(image_path, 'rb') as f:
|
275 |
+
response = requests.put(upload_url, data=f)
|
276 |
+
# fp.close()
|
277 |
+
|
278 |
+
# posts image with image name
|
279 |
+
|
280 |
+
r = requests.post(f'https://brandfolder.com/api/v4/collections/{collection_id}/assets', json={
|
281 |
+
# use a dict with the POST body here
|
282 |
+
'data': {
|
283 |
+
'attributes': [
|
284 |
+
{
|
285 |
+
'name': image_name,
|
286 |
+
'description': description,
|
287 |
+
'attachments': [
|
288 |
+
{
|
289 |
+
'url': object_url,
|
290 |
+
'filename': f'{image_name}.jpg'
|
291 |
+
}
|
292 |
+
]
|
293 |
+
}
|
294 |
+
]
|
295 |
+
},
|
296 |
+
# AI Original Project Images section key
|
297 |
+
'section_key': 'czpq4nwz78c3cwnp6h9n44z'
|
298 |
+
}, params={}, headers=headers)
|
299 |
+
|
300 |
+
# id of newly created asset
|
301 |
+
asset_id = r.json()['data'][0]['id']
|
302 |
+
|
303 |
+
# tags and topic payloads
|
304 |
+
tags_payload = {'data': {'attributes': [{'name': tag} for tag in seo_tags]}}
|
305 |
+
|
306 |
+
|
307 |
+
topic_payload = {'data':
|
308 |
+
[
|
309 |
+
{
|
310 |
+
'attributes': {
|
311 |
+
'value': topic
|
312 |
+
},
|
313 |
+
'relationships': {
|
314 |
+
'asset': {
|
315 |
+
'data': {'type': 'assets', 'id': asset_id}
|
316 |
+
}}
|
317 |
+
}]}
|
318 |
+
|
319 |
+
|
320 |
+
alt_tag_payload = {'data':
|
321 |
+
[
|
322 |
+
{
|
323 |
+
'attributes': {
|
324 |
+
'value': alt_tag
|
325 |
+
},
|
326 |
+
'relationships': {
|
327 |
+
'asset': {
|
328 |
+
'data': {'type': 'assets', 'id': asset_id}
|
329 |
+
}}
|
330 |
+
}]}
|
331 |
+
|
332 |
+
year_payload = {'data':
|
333 |
+
[
|
334 |
+
{
|
335 |
+
'attributes': {
|
336 |
+
'value': 2024
|
337 |
+
},
|
338 |
+
'relationships': {
|
339 |
+
'asset': {
|
340 |
+
'data': {'type': 'assets', 'id': asset_id}
|
341 |
+
}}
|
342 |
+
}]}
|
343 |
+
|
344 |
+
client_payload = {'data':
|
345 |
+
[
|
346 |
+
{
|
347 |
+
'attributes': {
|
348 |
+
'value': client_name
|
349 |
+
},
|
350 |
+
'relationships': {
|
351 |
+
'asset': {
|
352 |
+
'data': {'type': 'assets', 'id': asset_id}
|
353 |
+
}}
|
354 |
+
}]}
|
355 |
+
|
356 |
+
year_id = 'k8vr5chnkw3nrnrpkh4f9fqm'
|
357 |
+
client_name_id = 'x56t6r9vh9xjmg5whtkmp'
|
358 |
+
# Tone ID: px4jkk2nqrf9h6gp7wwxnhvz
|
359 |
+
# Location ID: nm6xqgcf5j7sw8w994c6sc8h
|
360 |
+
alt_tag_id = 'vk54n6pwnxm27gwrvrzfb'
|
361 |
+
topic_id = '9mcg3rgm5mf72jqrtw2gqm7t'
|
362 |
+
|
363 |
+
|
364 |
+
r_asset = requests.post(f'https://brandfolder.com/api/v4/assets/{asset_id}/tags', json=tags_payload, params={}, headers=headers)
|
365 |
+
|
366 |
+
# alt_tags
|
367 |
+
r_topic = requests.post(f'https://brandfolder.com/api/v4/custom_field_keys/{topic_id}/custom_field_values', json=
|
368 |
+
topic_payload
|
369 |
+
, params={
|
370 |
+
}, headers=headers)
|
371 |
+
|
372 |
+
r_alt_tag = requests.post(f'https://brandfolder.com/api/v4/custom_field_keys/{alt_tag_id}/custom_field_values', json=
|
373 |
+
alt_tag_payload
|
374 |
+
, params={
|
375 |
+
}, headers=headers)
|
376 |
+
|
377 |
+
r_year = requests.post(f'https://brandfolder.com/api/v4/custom_field_keys/{year_id}/custom_field_values', json=
|
378 |
+
year_payload
|
379 |
+
, params={
|
380 |
+
}, headers=headers)
|
381 |
+
|
382 |
+
r_client = requests.post(f'https://brandfolder.com/api/v4/custom_field_keys/{client_name_id}/custom_field_values', json=
|
383 |
+
client_payload
|
384 |
+
, params={
|
385 |
+
}, headers=headers)
|
386 |
+
|
387 |
+
return image_path, image_name
|
388 |
+
|
389 |
+
def create_collection(collection_name):
|
390 |
+
'''
|
391 |
+
Creates collection with collection_name and tagline
|
392 |
+
Input: collection name and tagline
|
393 |
+
Output: request response
|
394 |
+
'''
|
395 |
+
headers = {
|
396 |
+
'Accept': 'application/json',
|
397 |
+
'Authorization': os.environ['BRANDFOLDER_API_KEY']
|
398 |
+
}
|
399 |
+
|
400 |
+
r = requests.post('https://brandfolder.com/api/v4/brandfolders/988cgqcg8xsrr5g9h7gtsqkg/collections', json={
|
401 |
+
# use a dict with the POST body here
|
402 |
+
'data': {
|
403 |
+
'attributes': {
|
404 |
+
'name': collection_name
|
405 |
+
}
|
406 |
+
}
|
407 |
+
}, params={}, headers=headers)
|
408 |
+
|
409 |
+
collection_id = r.json()['data']['id']
|
410 |
+
|
411 |
+
return collection_id
|
412 |
+
|
413 |
+
def get_collection_id(collection_name):
|
414 |
+
'''
|
415 |
+
Creates collection with collection_name and tagline
|
416 |
+
Input: collection name and tagline
|
417 |
+
Output: request response
|
418 |
+
'''
|
419 |
+
headers = {
|
420 |
+
'Accept': 'application/json',
|
421 |
+
'Authorization': os.environ['BRANDFOLDER_API_KEY']
|
422 |
+
}
|
423 |
+
|
424 |
+
r = requests.post('https://brandfolder.com/api/v4/brandfolders/988cgqcg8xsrr5g9h7gtsqkg/collections', json={
|
425 |
+
# use a dict with the POST body here
|
426 |
+
'data': {
|
427 |
+
'attributes': {
|
428 |
+
'name': collection_name
|
429 |
+
}
|
430 |
+
}
|
431 |
+
}, params={}, headers=headers)
|
432 |
+
|
433 |
+
collection_id = r.json()['data']['id']
|
434 |
+
|
435 |
+
return collection_id
|
436 |
+
|
437 |
+
# get ids of existing collections
|
438 |
+
def get_collection_dict():
|
439 |
+
headers = {
|
440 |
+
'Accept': 'application/json',
|
441 |
+
'Authorization': os.environ['BRANDFOLDER_API_KEY']
|
442 |
+
}
|
443 |
+
|
444 |
+
r = requests.get('https://brandfolder.com/api/v4/brandfolders/988cgqcg8xsrr5g9h7gtsqkg/collections', params={
|
445 |
+
# use a dict with your desired URL parameters here
|
446 |
+
}, headers=headers)
|
447 |
+
|
448 |
+
temp = r.json()['data']
|
449 |
+
|
450 |
+
collection_dict = {item['attributes']['name']:item['id'] for item in temp}
|
451 |
+
|
452 |
+
return collection_dict
|
453 |
+
|
454 |
+
def import_client_data(client_name, zipfile, topical_map, password, create=False):
|
455 |
+
'''
|
456 |
+
Takes the client neame and the client zipfile path to import all image files in the google drive into brandfolder under a collection
|
457 |
+
with the client's name
|
458 |
+
Input: client name (str), client_drive_path (str)
|
459 |
+
Output: Completed Brandfolder
|
460 |
+
'''
|
461 |
+
if client_name == None:
|
462 |
+
raise gr.Error("Please choose a client")
|
463 |
+
|
464 |
+
if password != 'Br@ndf0lderAI':
|
465 |
+
raise gr.Error("Incorrect Password")
|
466 |
+
|
467 |
+
if zipfile == None:
|
468 |
+
raise gr.Error("Please upload a zipfile")
|
469 |
+
if zipfile.split('.')[-1] != 'zip':
|
470 |
+
raise gr.Error("Client Photos must be in a zipfile")
|
471 |
+
|
472 |
+
if topical_map == None:
|
473 |
+
raise gr.Error("Please upload a topical map")
|
474 |
+
if topical_map.split('.')[-1] != 'docx':
|
475 |
+
raise gr.Error("Topical Map must be a docx file")
|
476 |
+
|
477 |
+
topical_map = get_topical_map(topical_map)
|
478 |
+
|
479 |
+
# get all collection ID names
|
480 |
+
headers = {
|
481 |
+
'Accept': 'application/json',
|
482 |
+
'Authorization': os.environ['BRANDFOLDER_API_KEY']
|
483 |
+
}
|
484 |
+
|
485 |
+
r = requests.get('https://brandfolder.com/api/v4/collections', params={
|
486 |
+
# use a dict with your desired URL parameters here
|
487 |
+
}, headers=headers)
|
488 |
+
|
489 |
+
collection_dict = {entry['attributes']['name']:entry['id'] for entry in r.json()['data']}
|
490 |
+
|
491 |
+
if client_name not in list(collection_dict.keys()):
|
492 |
+
if create==True:
|
493 |
+
# creates the collection and gets the collection id
|
494 |
+
collection_id = create_collection(client_name)
|
495 |
+
else:
|
496 |
+
AssertionError(f'Client Name: {client_name} does not exist in this Brandfolder')
|
497 |
+
else:
|
498 |
+
collection_id = collection_dict[client_name]
|
499 |
+
|
500 |
+
# gets all image files from the google drive folder
|
501 |
+
zip = ZipFile(zipfile)
|
502 |
+
img_list = get_imgs_from_folder([], zipfile)
|
503 |
+
print(img_list)
|
504 |
+
|
505 |
+
# iterates all images and puts them into brandfolder with AI elements
|
506 |
+
old_img_paths = [] # path to old image
|
507 |
+
new_img_names = [] # new image name
|
508 |
+
|
509 |
+
for img in img_list:
|
510 |
+
time.sleep(5)
|
511 |
+
img = zip.extract(img)
|
512 |
+
old_img_path, new_img_name = create_asset(client_name, collection_id, img, topical_map, new_img_names=new_img_names)
|
513 |
+
return
|
514 |
+
|
515 |
+
|
516 |
+
collection_dict = get_collection_dict()
|
517 |
+
|
518 |
+
collection_names = list(collection_dict.keys())
|
519 |
+
|
520 |
+
with gr.Blocks() as block:
|
521 |
+
gr.Markdown("""
|
522 |
+
# Brandfolder Zipfile Dashboard
|
523 |
+
This dashboard is for uploading photos from a zipfile to a brandfolder collection.
|
524 |
+
""")
|
525 |
+
with gr.Row():
|
526 |
+
with gr.Column():
|
527 |
+
options = collection_names
|
528 |
+
selection = gr.Dropdown(options, label='Choose Existing Collection', info='If creating a new section, select Create a Collection')
|
529 |
+
zipfile = gr.File(label='Client Photos (must be zipfile)')
|
530 |
+
topical_map = gr.File(label='Topical Map')
|
531 |
+
password = gr.Textbox(label='Enter Password')
|
532 |
+
algorithm = gr.Button('Run Algorithm')
|
533 |
+
|
534 |
+
algorithm.click(fn=import_client_data, inputs=[selection, zipfile, topical_map, password])
|
535 |
+
|
536 |
+
|
537 |
+
block.launch()
|