EnigmaOfTheWorld commited on
Commit
e0e645c
β€’
1 Parent(s): f624562

Created app.py

Browse files
Files changed (1) hide show
  1. app.py +596 -0
app.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import torch
4
+ import requests
5
+ from PIL import Image
6
+ from diffusers import StableDiffusionDepth2ImgPipeline
7
+ from PIL import Image
8
+ import time
9
+ import io
10
+ import os
11
+ import warnings
12
+ from PIL import Image
13
+ from stability_sdk import client
14
+ import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
15
+ from diffusers import StableDiffusionImg2ImgPipeline
16
+ import urllib
17
+ from serpapi import GoogleSearch
18
+ from base64 import b64encode
19
+ from pathlib import Path
20
+ import openai
21
+
22
+ try:
23
+ import face_recognition
24
+ except:
25
+ pass
26
+ import pickle
27
+ import numpy as np
28
+ from PIL import Image
29
+ import cv2
30
+
31
+ current_time = time.asctime()
32
+
33
+ stability_api = client.StabilityInference(
34
+ key=os.environ['STABILITY_KEY'], # API Key reference.
35
+ verbose=True, # Print debug messages.
36
+ engine="stable-diffusion-512-v2-1", # Set the engine to use for generation. For SD 2.0 use "stable-diffusion-v2-0".
37
+ # Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0
38
+ # stable-diffusion-512-v2-1 stable-diffusion-768-v2-1 stable-inpainting-v1-0 stable-inpainting-512-v2-0
39
+ )
40
+
41
+ ################
42
+ # Set up our initial generation parameters.
43
+
44
+ prompt ="photo of bespectacled woman, long curly blue hair, bright green eyes, freckled complexion, photorealistic, colorful, highly detailed 4k, realistic photo"
45
+ def transform_ncuda(img,prompt,cfg=8.0,stps=30,sc=0.8):
46
+ answers2 = stability_api.generate(
47
+ prompt=f"{prompt}",
48
+ init_image=img, # Assign our previously generated img as our Initial Image for transformation.
49
+ start_schedule=sc, # Set the strength of our prompt in relation to our initial image.
50
+ steps=stps,# If attempting to transform an image that was previously generated with our API,
51
+ # initial images benefit from having their own distinct seed rather than using the seed of the original image generation.
52
+ # Amount of inference steps performed on image generation. Defaults to 30.
53
+ cfg_scale=cfg, # Influences how strongly your generation is guided to match your prompt.
54
+ # Setting this value higher increases the strength in which it tries to match your prompt.
55
+ # Defaults to 7.0 if not specified.
56
+ width=512, # Generation width, defaults to 512 if not included.
57
+ height=512, # Generation height, defaults to 512 if not included.
58
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
59
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
60
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
61
+ )
62
+
63
+ # Set up our warning to print to the console if the adult content classifier is tripped.
64
+ # If adult content classifier is not tripped, display generated image.
65
+ for resp in answers2:
66
+ for artifact in resp.artifacts:
67
+ if artifact.finish_reason == generation.FILTER:
68
+ warnings.warn(
69
+ "Your request activated the API's safety filters and could not be processed."
70
+ "Please modify the prompt and try again.")
71
+ if artifact.type == generation.ARTIFACT_IMAGE:
72
+ global img2
73
+ img2 = Image.open(io.BytesIO(artifact.binary))
74
+ return img2
75
+ # img2.save(str(artifact.seed)+ "-img2img.png") # Save our generated image with its seed number as the filename and the img2img suffix so that we know this is our transformed image.
76
+
77
+
78
+ #########################
79
+ def generate_stability(prompt):
80
+ # Set up our initial generation parameters.
81
+ answers = stability_api.generate(
82
+ prompt=f"{prompt}",
83
+ # If a seed is provided, the resulting generated image will be deterministic.
84
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
85
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
86
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
87
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
88
+ # Setting this value higher increases the strength in which it tries to match your prompt.
89
+ # Defaults to 7.0 if not specified.
90
+ width=512, # Generation width, defaults to 512 if not included.
91
+ height=512, # Generation height, defaults to 512 if not included.
92
+ samples=1, # Number of images to generate, defaults to 1 if not included.
93
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
94
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
95
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
96
+ )
97
+
98
+ # Set up our warning to print to the console if the adult content classifier is tripped.
99
+ # If adult content classifier is not tripped, save generated images.
100
+ for resp in answers:
101
+ for artifact in resp.artifacts:
102
+ if artifact.finish_reason == generation.FILTER:
103
+ warnings.warn(
104
+ "Your request activated the API's safety filters and could not be processed."
105
+ "Please modify the prompt and try again.")
106
+ if artifact.type == generation.ARTIFACT_IMAGE:
107
+ img = Image.open(io.BytesIO(artifact.binary))
108
+ # img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
109
+ return img
110
+
111
+
112
+ #################
113
+ global cuda_error1
114
+ cuda_error1 = 0
115
+ try:
116
+ device = "cuda"
117
+ model_id_or_path = "runwayml/stable-diffusion-v1-5"
118
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
119
+ pipe = pipe.to(device)
120
+ except:
121
+ cuda_error1 = 1
122
+
123
+ #####################
124
+ global cuda_error2
125
+ cuda_error2 = 0
126
+ try:
127
+ pipe1 = StableDiffusionDepth2ImgPipeline.from_pretrained(
128
+ "stabilityai/stable-diffusion-2-depth",
129
+ torch_dtype=torch.float16,
130
+ ).to("cuda")
131
+ except:
132
+ cuda_error2 = 1
133
+
134
+ ##################
135
+ def transform(init_image,prompt,n_prompt):
136
+ if cuda_error2==0:
137
+ try:
138
+ image1 = pipe1(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.8).images[0]
139
+ except:
140
+ image1 = transform_ncuda(init_image,prompt)
141
+ # image1.save("img1.png")
142
+ # nimage = Image.open("img1.png")
143
+ else:
144
+ image1 = transform_ncuda(init_image,prompt)
145
+ im = np.asarray(image1)
146
+ return im
147
+
148
+
149
+ ###################
150
+ def transform1(img,prompt,n_prompt):
151
+ img.save("img1.png")
152
+ nimage = Image.open("img1.png").convert('RGB')
153
+ if cuda_error1==0:
154
+ try:
155
+ images = pipe(prompt=prompt, image=nimage,negative_prompt=n_prompt, strength=1, guidance_scale=15).images
156
+ im = np.asarray(images[0])
157
+ except:
158
+ image = transform_ncuda(img,prompt,15,50,0.95)
159
+ im = np.asarray(image)
160
+ # image1.save("img1.png")
161
+ # nimage = Image.open("img1.png")
162
+ else:
163
+ image = transform_ncuda(img,prompt,15,50,0.95)
164
+ im = np.asarray(image)
165
+ return im
166
+
167
+
168
+ #####################
169
+ openai.api_key = os.environ['OPENAI_KEY']
170
+
171
+ PROMPT = "colorful portrait 25 year bespectacled woman with long, curly skyblue hair and bright green eyes. She has a small, upturned nose and a freckled complexion. She is approximately 5'5 tall and has a thin build"
172
+ def generate(PROMPT,model):
173
+ # PROMPT = "An eco-friendly computer from the 90s in the style of vaporwave""Dall-E","StableDiffusion"
174
+ if model=="Dall-E":
175
+ response = openai.Image.create(
176
+ prompt=PROMPT,
177
+ n=1,
178
+ size="256x256",
179
+ )
180
+ x = response["data"][0]["url"]
181
+ urllib.request.urlretrieve(x,"file")
182
+ img = Image.open("file")
183
+ else:
184
+ img = generate_stability(PROMPT)
185
+ return np.asarray(img)
186
+
187
+
188
+ ########################
189
+ API_ENDPOINT = "https://api.imgbb.com/1/upload"
190
+ API_KEY = "51e7720f96af8eb5179e772e443c0c1e"
191
+
192
+ # path_raw = input("Enter the image path: ")
193
+ # path = Path(path_raw)
194
+ # path = path_raw.replace("\\", "/")
195
+
196
+ # def imgLink(path):
197
+ # path = Path("/content/drive/MyDrive/Salz/dannydevito.png")
198
+ def imgLink(image):
199
+ pil_image = image.convert('RGB')
200
+ open_cv_image = np.array(pil_image)
201
+ cv2.imwrite("search.png",open_cv_image)
202
+ path = Path("search.png")
203
+ with open(path, "rb") as image:
204
+ image_data = b64encode(image.read()).decode()
205
+ # image_data = image
206
+ payload = {
207
+ "key": API_KEY,
208
+ "image": image_data
209
+ }
210
+
211
+ # Send the API request
212
+ response = requests.post(API_ENDPOINT, payload)
213
+ # print(response)
214
+ # # Get the generated link from the API response
215
+ response_json = response.json() #
216
+ # print("Response json:", response_json)
217
+ image_url = response_json["data"]["url"]
218
+
219
+ # print("Generated link:", image_url)
220
+ return image_url
221
+
222
+
223
+ ############################
224
+ def google_search(image):
225
+ image_url = imgLink(image)
226
+ params = {
227
+ "engine": "google_lens",
228
+ "url": image_url,
229
+ "hl": "en",
230
+ "api_key": "9f32067b9dd74d6e94153036003ec0e6e24d54b36ffb09a340f9004012fdae98"
231
+ }
232
+ search = GoogleSearch(params)
233
+ result = search.get_dict()
234
+ t = ''
235
+ try:
236
+ for i in range(len(result['knowledge_graph'])):
237
+ t = t+ "Title : "+result['knowledge_graph'][i]['title']+"\n"
238
+ source = result["knowledge_graph"][i]['images'][0]['source']
239
+ t+=source+"\n"
240
+ except:
241
+ t = "Not Found"
242
+ try:
243
+ for i in range(0,min(2,len(result['visual_matches']))):
244
+ t = t+ "Title : "+result['visual_matches'][i]['title']+"\n"
245
+ source = result['visual_matches'][i]['source']
246
+ t+=source+"\n"
247
+ except:
248
+ t = "Not Found"
249
+
250
+ try:
251
+ img_link = result["visual_matches"][0]['thumbnail']
252
+ urllib.request.urlretrieve(img_link,"file")
253
+ img = Image.open("file")
254
+ img = np.asarray(img)
255
+ except:
256
+ img = image
257
+ return t,img
258
+
259
+
260
+ ######################################################################
261
+ images_folder_path = 'Images'
262
+ #find path of xml file containing haarcascade file
263
+ # cascPathface = os.path.dirname(
264
+ # cv2.__file__) + "/data/haarcascade_frontalface_default.xml"
265
+ cascPathface = "/content/Salz/haarcascade_frontalface_default.xml"
266
+ # cascPathface = cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
267
+ # load the harcaascade in the cascade classifier
268
+ faceCascade = cv2.CascadeClassifier(cascPathface)
269
+ # load the known faces and embeddings saved in last file
270
+ data = pickle.loads(open('face_enc', "rb").read())
271
+
272
+ ################################################################
273
+ def check_database(ima):
274
+ # file_bytes = np.asarray(bytearray(image_upload.read()), dtype=np.uint8) # https://github.com/streamlit/streamlit/issues/888
275
+ # opencv_image = cv2.imdecode(file_bytes, 1)
276
+ # st.image(image, caption=f"Uploaded Image {img_array.shape[0:2]}", use_column_width=True,)
277
+ # image = cv2.imread(img)
278
+ # rgb = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)
279
+ #convert image to Greyscale for haarcascade
280
+ # image = cv2.imread(image)
281
+ try:
282
+ pil_image = ima.convert('RGB')
283
+ # pil_image = ima
284
+ open_cv_image = np.array(pil_image)
285
+ cv2.imwrite("new.png",open_cv_image)
286
+ # Convert RGB to BGR
287
+ image = open_cv_image[:, :, ::-1].copy()
288
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
289
+ faces = faceCascade.detectMultiScale(gray,
290
+ scaleFactor=1.1,
291
+ minNeighbors=5,
292
+ minSize=(60, 60),
293
+ flags=cv2.CASCADE_SCALE_IMAGE)
294
+
295
+ # the facial embeddings for face in input
296
+ encodings = face_recognition.face_encodings(image)
297
+ names = []
298
+ # loop over the facial embeddings incase
299
+ # we have multiple embeddings for multiple fcaes
300
+ for encoding in encodings:
301
+ #Compare encodings with encodings in data["encodings"]
302
+ #Matches contain array with boolean values and True for the embeddings it matches closely
303
+ #and False for rest
304
+ matches = face_recognition.compare_faces(data["encodings"],
305
+ encoding)
306
+ #set name =inknown if no encoding matches
307
+ name = "Unknown"
308
+ # check to see if we have found a match
309
+ if True in matches:
310
+ #Find positions at which we get True and store them
311
+ matchedIdxs = [i for (i, b) in enumerate(matches) if b]
312
+ counts = {}
313
+ # loop over the matched indexes and maintain a count for
314
+ # each recognized face face
315
+ for i in matchedIdxs:
316
+ #Check the names at respective indexes we stored in matchedIdxs
317
+ name = data["names"][i]
318
+ #increase count for the name we got
319
+ counts[name] = counts.get(name, 0) + 1
320
+ #set name which has highest count
321
+ name = max(counts, key=counts.get)
322
+ # update the list of names
323
+ names.append(name)
324
+ # loop over the recognized faces
325
+ for ((x, y, w, h), name) in zip(faces, names):
326
+ # rescale the face coordinates
327
+ # draw the predicted face name on the image
328
+ cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
329
+ cv2.putText(image, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
330
+ 0.75, (0, 255, 0), 2)
331
+ else: # To store the unknown new face with name
332
+ faces = faceCascade.detectMultiScale(gray,
333
+ scaleFactor=1.1,
334
+ minNeighbors=5,
335
+ minSize=(60, 60),
336
+ flags=cv2.CASCADE_SCALE_IMAGE)
337
+
338
+ cv2.imwrite('curr.png',image)
339
+ return name
340
+ except:
341
+ return "Need GPU"
342
+
343
+
344
+ ###########################
345
+ def video(vid):
346
+
347
+ file = '../../../../..'+vid.name
348
+ print(f'file: {file}')
349
+ # file = vid
350
+ video = cv2.VideoCapture(file)
351
+ # video.set(cv2.CAP_PROP_FPS, 10)
352
+ if (video.isOpened() == False):
353
+ print("Error reading video file")
354
+ frame_width = int(video.get(3))
355
+ frame_height = int(video.get(4))
356
+ size = (frame_width, frame_height)
357
+
358
+ # # Below VideoWriter object will create
359
+ # # a frame of above defined The output
360
+ # # is stored in 'filename.avi' file.
361
+ result = cv2.VideoWriter('filename.mp4',
362
+ cv2.VideoWriter_fourcc(*'mp4v'),
363
+ 10, size)
364
+
365
+ while(True):
366
+ ret, frame = video.read()
367
+ if ret == True:
368
+
369
+ rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
370
+ faces = faceCascade.detectMultiScale(rgb,
371
+ scaleFactor=1.1,
372
+ minNeighbors=5,
373
+ minSize=(60, 60),
374
+ flags=cv2.CASCADE_SCALE_IMAGE)
375
+
376
+ # convert the input frame from BGR to RGB
377
+
378
+ # the facial embeddings for face in input
379
+ encodings = face_recognition.face_encodings(rgb)
380
+ names = []
381
+ # loop over the facial embeddings incase
382
+ # we have multiple embeddings for multiple fcaes
383
+ for encoding in encodings:
384
+ #Compare encodings with encodings in data["encodings"]
385
+ #Matches contain array with boolean values and True for the embeddings it matches closely
386
+ #and False for rest
387
+ matches = face_recognition.compare_faces(data["encodings"],
388
+ encoding)
389
+ #set name =inknown if no encoding matches
390
+ name = "Unknown"
391
+ # check to see if we have found a match
392
+ if True in matches:
393
+ #Find positions at which we get True and store them
394
+ matchedIdxs = [i for (i, b) in enumerate(matches) if b]
395
+ counts = {}
396
+ # loop over the matched indexes and maintain a count for
397
+ # each recognized face face
398
+ for i in matchedIdxs:
399
+ #Check the names at respective indexes we stored in matchedIdxs
400
+ name = data["names"][i]
401
+ #increase count for the name we got
402
+ counts[name] = counts.get(name, 0) + 1
403
+ #set name which has highest count
404
+ name = max(counts, key=counts.get)
405
+
406
+
407
+ # update the list of names
408
+ names.append(name)
409
+ # loop over the recognized faces
410
+ for ((x, y, w, h), name) in zip(faces, names):
411
+ # rescale the face coordinates
412
+ # draw the predicted face name on the image
413
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
414
+ cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
415
+ 0.75, (0, 255, 0), 2)
416
+ result.write(frame)
417
+ # cv2_imshow(frame)
418
+ if cv2.waitKey(1) & 0xFF == ord('q'):
419
+ break
420
+
421
+ # Break the loop
422
+ else:
423
+ break
424
+
425
+
426
+ # print("The video was successfully saved")
427
+ return 'filename.mp4'
428
+
429
+ #################
430
+ def generate_prompt(AG,facftop,facfmid,facfbot):
431
+ response = openai.Completion.create(
432
+ model="text-davinci-003",
433
+ prompt="Generate Facial Description of person from the following desciptors-Realistic facial portrait sketch of " + AG + facftop + facfmid + facfbot,
434
+ temperature=0.1,
435
+ max_tokens=256,
436
+ top_p=1,
437
+ frequency_penalty=0,
438
+ presence_penalty=0
439
+ )
440
+ return (response["choices"][0]["text"])
441
+
442
+ ##############################
443
+ openai.api_key = os.environ['OPENAI_KEY']
444
+ # os.getenv()
445
+ PROMPT = "Ankit went to the market. He called Raj then."
446
+ response = openai.Completion.create(
447
+ model="text-davinci-003",
448
+ prompt=f"Given a prompt, extrapolate as many relationships as possible from it and provide a list of updates.\n\nIf an update is a relationship, provide [ENTITY 1, RELATIONSHIP, ENTITY 2]. The relationship is directed, so the order matters.\n\nIf an update is related to deleting an entity, provide [\"DELETE\", ENTITY].\n\nExample:\nprompt: Alice is Bob's roommate. Alice likes music. Her roommate likes sports\nupdates:\n[[\"Alice\", \"roommate\", \"Bob\"],[\"Alice\",\"likes\",\"music\"],[\"Bob\",\"likes\",\"sports\"]]\n\nprompt: {PROMPT}\nupdates:",
449
+ temperature=0,
450
+ max_tokens=256,
451
+ top_p=1,
452
+ frequency_penalty=0,
453
+ presence_penalty=0
454
+ )
455
+
456
+ ###################
457
+ t = response["choices"][0]["text"]
458
+ t = t[2:]
459
+ t = t.replace("[",'').replace("]","")
460
+ t = t.split(",")
461
+ r = []
462
+ for i in range(len(t)//3):
463
+ r.append(t[3*i:3*i+3])
464
+ r
465
+
466
+
467
+ #####################
468
+ disp_url = "https://i.ibb.co/TP4ddc6/sherlock.png"
469
+ det_url = "https://i.ibb.co/Ms1jcDv/104cc37752fa.png"
470
+ with gr.Blocks(css=".gradio-container {background-color: #F0FFFF}") as demo:
471
+ gr.Markdown("# **Sherlock's Phoeniks**")
472
+ gr.Markdown("### **Facial Recognition using Generative AI - ChatGPT+StableDiffusion+Dall-E,utilizing Computer Vision and Google Search API**")
473
+ # gr.Image(display).style(height=400, width=1200)
474
+ gr.HTML(value="<img src='https://i.ibb.co/TP4ddc6/sherlock.png' alt='Flow Diagram' width='1200' height='300'/>")
475
+ # gr.Markdown("! [title](https://pixabay.com/photos/tree-sunset-clouds-sky-silhouette-736885/)")
476
+ gr.Markdown("""Our Sherlock's Phoeniks Search Squad solution is a facial recognition
477
+ system that utilizes generative AI models like ChatGPT and stable
478
+ diffusion/Dalle, as well as computer vision techniques, to identify and locate
479
+ missing persons in real time . The system will take input in the form of text
480
+ describing the appearance of the missing person, as well as raw images
481
+ such as sketches, CCTV footage, or blurry photos. The algorithm will then
482
+ search through internal databases and internet/social media platforms like
483
+ Facebook and Twitter to find matches and potentially identify the missing
484
+ person. This system has the potential to significantly aid Police and
485
+ Investigating agencies in their efforts to locate and bring missing persons
486
+ home""")
487
+ gr.HTML(value="<img src='https://i.ibb.co/Ms1jcDv/104cc37752fa.png' alt='Flow Diagram' style='height:500px;width:1200px'>")
488
+ # gr.Image(detail).style(height=400, width=1200)
489
+ with gr.Accordion("Generate Prompt",open=False):
490
+ print('DEBUG: FIRST WITH')
491
+ gr.Markdown("**Generate Prompt from the face description for image generation**")
492
+
493
+ with gr.Row():
494
+ with gr.Column():
495
+ print('DEBUG: SECOND WITH')
496
+ # seed = gr.Text(label="Input Phrase")
497
+ text1_1 = gr.Text(label="Enter Possible Age and Gender and Ethnicity for the Person")
498
+ text1_2 = gr.Text(label="Provide Desciptors for Hair and Eyebrows and Eyes")
499
+ text1_3 = gr.Text(label="Describe Skin Color, Blemishes, Nose Structure")
500
+ text1_4 = gr.Text(label="Descibe Facial Shape, build , chin structure in as much detail as possible")
501
+ print(f'{text1_1=}')
502
+ print(f'{text1_2=}')
503
+ print(f'{text1_3=}')
504
+ print(f'{text1_4=}')
505
+
506
+
507
+ with gr.Column():
508
+ # seed = gr.Text(label="Input Phrase")
509
+ text2 = gr.Text(label="Generated Phrase")
510
+ print(text2,'-------------')
511
+ abtn = gr.Button("Generate mugshot phrase")
512
+ abtn.click(generate_prompt, inputs=[text1_1,text1_2,text1_3,text1_4], outputs=text2)
513
+ with gr.Accordion("Generate MugShot",open=False):
514
+ gr.Markdown("**Generate MugShot from the input prompt using Dall-E**")
515
+ gr.Markdown("**Use Dall E or StableDiffusion Image Generation for text to image**")
516
+ model = gr.Radio(["StableDiffusion"])
517
+ print(dir(model))
518
+ #with open('/content/logging.log',mode='a') as log_f: log_f.write(f'{datetime.now()} Using model for General Mugshot: {model.value}'+'\n')
519
+ with gr.Row():
520
+ with gr.Column():
521
+ # seed = gr.Text(label="Input Phrase")
522
+ text3 = gr.Text(label="Input Phrase")
523
+ with gr.Column():
524
+ # seed = gr.Text(label="Input Phrase")
525
+ im1 = gr.Image()
526
+ bbtn = gr.Button("Image from description")
527
+ bbtn.click(generate, inputs=[text3,model], outputs=im1)
528
+
529
+ with gr.Accordion("Image from Sketch",open=False):
530
+ gr.Markdown("**Get Enhanced Image from sketch and desired input promt using StableDiffusion**")
531
+ with gr.Accordion("Pre-drawn Sketch",open=False):
532
+ gr.Markdown("**Generate Colorful Image from pre drawn sketch**")
533
+ gr.Markdown("**Use StableDiffusion Depth2Image for Image to Image transformation**")
534
+ with gr.Row():
535
+ with gr.Column():
536
+ # seed = gr.Text(label="Input Phrase")
537
+ text4 = gr.Text(label="Prompt")
538
+ text5 = gr.Text(label="Negative Prompt")
539
+ im2 = gr.Image(type="pil")
540
+ with gr.Column():
541
+ # seed = gr.Text(label="Input Phrase")
542
+ im3 = gr.Image()
543
+ cbtn = gr.Button("Sketch to color")
544
+ cbtn.click(transform, inputs=[im2,text4,text5], outputs=im3)
545
+ with gr.Accordion("Draw Sketch",open=False):
546
+ gr.Markdown("**Draw sketch on your own and give text description of features**")
547
+ gr.Markdown("**Generate Colorful Image using StableDiffusionImg2ImgPipeline**")
548
+ with gr.Row():
549
+ with gr.Column():
550
+ # seed = gr.Text(label="Input Phrase")
551
+ text6 = gr.Text(label="Prompt")
552
+ text7 = gr.Text(label="Negative Prompt")
553
+ # im1 = gr.Image(type="pil",interactive=True)
554
+ im4 = gr.Sketchpad(shape=(256,256),invert_colors=False,type="pil")
555
+ with gr.Column():
556
+ # seed = gr.Text(label="Input Phrase")
557
+ im5 = gr.Image()
558
+ ebtn = gr.Button("Draw Sketch to color")
559
+ ebtn.click(transform1, inputs=[im4,text6,text7], outputs=im5)
560
+
561
+ with gr.Accordion("Check Database",open=False):
562
+ gr.Markdown("**Check if the image matches any image in our database using face_recognition**")
563
+ gr.Markdown("**Use Face Recognition, Face Detection and Computer Vision to match images**")
564
+ with gr.Row():
565
+ with gr.Column():
566
+ # seed = gr.Text(label="Input Phrase")
567
+ im6 = gr.Image(type="pil")
568
+ with gr.Column():
569
+ # seed = gr.Text(label="Input Phrase")
570
+ text8 = gr.Text(label="Identified Name")
571
+ fbtn = gr.Button("Find the Name")
572
+ fbtn.click(check_database, inputs=im6, outputs=text8)
573
+
574
+ with gr.Accordion("Search Google",open=False):
575
+ gr.Markdown("**Check if the image is present on the Internet**")
576
+ gr.Markdown("**Using Google search api to search the image on Web**")
577
+ with gr.Row():
578
+ with gr.Column():
579
+ # seed = gr.Text(label="Input Phrase")
580
+ im7 = gr.Image(type="pil")
581
+ with gr.Column():
582
+ text9 = gr.Text(label="Identified Title")
583
+ im8 = gr.Image()
584
+ gbtn = gr.Button("Find the Name")
585
+ gbtn.click(google_search, inputs=im7, outputs=[text9,im8])
586
+
587
+ with gr.Accordion("Search in CCTV footage",open=False):
588
+ gr.Markdown("**Upload a video to identify missing person in the footage**")
589
+ with gr.Row():
590
+ with gr.Column():
591
+ fil1 = gr.File(type="file")
592
+ with gr.Column():
593
+ vid2 = gr.Video()
594
+ hbtn = gr.Button("Video")
595
+ hbtn.click(video, inputs=fil1, outputs=vid2)
596
+ demo.launch()