Update app.py
Browse files
app.py
CHANGED
|
@@ -1,16 +1,20 @@
|
|
| 1 |
import ast
|
| 2 |
import json
|
|
|
|
| 3 |
import spaces
|
| 4 |
import requests
|
| 5 |
import numpy as np
|
| 6 |
import gradio as gr
|
| 7 |
from PIL import Image
|
| 8 |
from io import BytesIO
|
|
|
|
| 9 |
from turtle import title
|
| 10 |
from openai import OpenAI
|
| 11 |
from collections import Counter
|
| 12 |
from transformers import pipeline
|
| 13 |
|
|
|
|
|
|
|
| 14 |
client = OpenAI()
|
| 15 |
|
| 16 |
pipe = pipeline("zero-shot-image-classification", model="patrickjohncyh/fashion-clip")
|
|
@@ -37,6 +41,7 @@ def shot(input, category):
|
|
| 37 |
subColour,mainColour,score = get_colour(ast.literal_eval(str(input)),category)
|
| 38 |
common_result = get_predicted_attributes(ast.literal_eval(str(input)),category)
|
| 39 |
openai_parsed_response = get_openAI_tags(ast.literal_eval(str(input)))
|
|
|
|
| 40 |
return {
|
| 41 |
"colors":{
|
| 42 |
"main":mainColour,
|
|
@@ -44,7 +49,8 @@ def shot(input, category):
|
|
| 44 |
"score":round(score*100,2)
|
| 45 |
},
|
| 46 |
"attributes":common_result,
|
| 47 |
-
"image_mapping":openai_parsed_response
|
|
|
|
| 48 |
}
|
| 49 |
|
| 50 |
|
|
@@ -153,6 +159,43 @@ def get_openAI_tags(image_urls):
|
|
| 153 |
response = json.loads(openai_response.choices[0].message.content)
|
| 154 |
return response
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
# Define the Gradio interface with the updated components
|
| 157 |
iface = gr.Interface(
|
| 158 |
fn=shot,
|
|
|
|
| 1 |
import ast
|
| 2 |
import json
|
| 3 |
+
import base64
|
| 4 |
import spaces
|
| 5 |
import requests
|
| 6 |
import numpy as np
|
| 7 |
import gradio as gr
|
| 8 |
from PIL import Image
|
| 9 |
from io import BytesIO
|
| 10 |
+
import face_recognition
|
| 11 |
from turtle import title
|
| 12 |
from openai import OpenAI
|
| 13 |
from collections import Counter
|
| 14 |
from transformers import pipeline
|
| 15 |
|
| 16 |
+
|
| 17 |
+
|
| 18 |
client = OpenAI()
|
| 19 |
|
| 20 |
pipe = pipeline("zero-shot-image-classification", model="patrickjohncyh/fashion-clip")
|
|
|
|
| 41 |
subColour,mainColour,score = get_colour(ast.literal_eval(str(input)),category)
|
| 42 |
common_result = get_predicted_attributes(ast.literal_eval(str(input)),category)
|
| 43 |
openai_parsed_response = get_openAI_tags(ast.literal_eval(str(input)))
|
| 44 |
+
face_embeddings = get_face_embeddings(ast.literal_eval(str(input)))
|
| 45 |
return {
|
| 46 |
"colors":{
|
| 47 |
"main":mainColour,
|
|
|
|
| 49 |
"score":round(score*100,2)
|
| 50 |
},
|
| 51 |
"attributes":common_result,
|
| 52 |
+
"image_mapping":openai_parsed_response,
|
| 53 |
+
"face_embeddings":face_embeddings
|
| 54 |
}
|
| 55 |
|
| 56 |
|
|
|
|
| 159 |
response = json.loads(openai_response.choices[0].message.content)
|
| 160 |
return response
|
| 161 |
|
| 162 |
+
|
| 163 |
+
@spaces.GPU
|
| 164 |
+
def get_face_embeddings(image_urls):
|
| 165 |
+
# Initialize a dictionary to store the face encodings or errors
|
| 166 |
+
results = {}
|
| 167 |
+
|
| 168 |
+
# Loop through each image URL
|
| 169 |
+
for index, url in enumerate(image_urls):
|
| 170 |
+
try:
|
| 171 |
+
# Try to download the image from the URL
|
| 172 |
+
response = requests.get(url)
|
| 173 |
+
# Raise an exception if the response is not successful
|
| 174 |
+
response.raise_for_status()
|
| 175 |
+
|
| 176 |
+
# Load the image using face_recognition
|
| 177 |
+
image = face_recognition.load_image_file(BytesIO(response.content))
|
| 178 |
+
|
| 179 |
+
# Get the face encodings for all faces in the image
|
| 180 |
+
face_encodings = face_recognition.face_encodings(image)
|
| 181 |
+
|
| 182 |
+
# If no faces are detected, store an empty list
|
| 183 |
+
if not face_encodings:
|
| 184 |
+
results[index] = []
|
| 185 |
+
else:
|
| 186 |
+
# Otherwise, store the first face encoding as a list
|
| 187 |
+
results[index] = face_encodings[0].tolist()
|
| 188 |
+
except Exception as e:
|
| 189 |
+
# If any error occurs during the download or processing, store the error message
|
| 190 |
+
results[index] = f"Error processing image: {str(e)}"
|
| 191 |
+
|
| 192 |
+
return results
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
|
| 199 |
# Define the Gradio interface with the updated components
|
| 200 |
iface = gr.Interface(
|
| 201 |
fn=shot,
|