Eshieh2 commited on
Commit
f61333d
·
1 Parent(s): c3e51ac

update to use object detection for heads and pass off classification. Use ImageAnnotation result to annotate detections

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. app.py +41 -15
  3. marcela.jpg +0 -0
  4. requirements.txt +2 -1
  5. valid.txt +8 -0
.gitignore CHANGED
@@ -1 +1,3 @@
1
  .DS_Store
 
 
 
1
  .DS_Store
2
+ .gradio
3
+ *~
app.py CHANGED
@@ -2,28 +2,54 @@ import gradio as gr
2
  import tensorflow as tf
3
  import numpy as np
4
  import requests
 
 
5
  from huggingface_hub import snapshot_download
 
6
 
7
  label_count = 51
 
 
 
 
8
  labels = ["missing"] * label_count
9
- with open('labels.txt','r') as f:
10
- labels = f.read().splitlines()
11
 
12
- model_path = extractor_path = snapshot_download(repo_id="eshieh2/jaguarid_pantanal")
13
  model = tf.saved_model.load(f"{model_path}/saved_model")
14
  serving = model.signatures['serving_default']
15
- #model = tf.keras.models.load_model(f"{model_path}/saved_model")
16
- def classify_image(inp):
17
- inp = inp.resize((480,480))
18
- inp = np.array(inp)
19
- inp = np.reshape(inp,(-1, 480, 480, 3)).astype(np.float32)
20
- inp = np.divide(inp,255.0)
21
- #prediction = model.predict(inp).flatten()
22
- prediction = serving(tf.convert_to_tensor(inp))['model']
23
- prediction = tf.squeeze(prediction)
24
- return {labels[i]: float(prediction[i]) for i in range(label_count)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  image = gr.Image(type='pil')
27
- label = gr.Label(num_top_classes=3)
28
 
29
- gr.Interface(fn=classify_image, inputs=image, outputs=label, examples = [["kasimir.jpg","guaraci.jpg"]]).launch()
 
2
  import tensorflow as tf
3
  import numpy as np
4
  import requests
5
+ import torch
6
+
7
  from huggingface_hub import snapshot_download
8
+ from huggingface_hub import hf_hub_download
9
 
10
  label_count = 51
11
+ def readLines(filename):
12
+ with open(filename,'r') as f:
13
+ return(f.read().splitlines())
14
+
15
  labels = ["missing"] * label_count
16
+ labels = readLines('labels.txt')
17
+ valid = readLines('valid.txt')
18
 
19
+ model_path = snapshot_download(repo_id="eshieh2/jaguarid_pantanal")
20
  model = tf.saved_model.load(f"{model_path}/saved_model")
21
  serving = model.signatures['serving_default']
22
+
23
+ detector_path = hf_hub_download(repo_id= "eshieh2/jaguarhead",
24
+ filename = "jaguarheadv5.pt")
25
+ detector = torch.hub.load('ultralytics/yolov5', 'custom', path = detector_path)
26
+
27
+ def classify_image(in_image):
28
+ width,height = in_image.size
29
+ heads = detector(in_image)
30
+ masks = [] # tuple of box coords and string
31
+ for head in heads.xyxy[0]:
32
+ x,y,x2,y2,pct,cls = head.numpy()
33
+ w = x2 - x
34
+ h = y2 - y
35
+ inp = in_image.crop((x,y,x2,y2))
36
+ inp = inp.resize((480,480))
37
+ inp = np.array(inp)
38
+ inp = np.reshape(inp,(-1, 480, 480, 3)).astype(np.float32)
39
+ inp = np.divide(inp,255.0)
40
+ prediction = serving(tf.convert_to_tensor(inp))['model']
41
+ prediction = tf.squeeze(prediction)
42
+ pred = {labels[i]: float(prediction[i]) for i in range(label_count)}
43
+ #print(pred)
44
+ max_key = max(pred, key=pred.get)
45
+ rect = (int(x),int(y),int(x2),int(y2))
46
+ if max_key.lower() in valid:
47
+ masks.append((rect,f"{max_key}:{pct}"))
48
+ else:
49
+ masks.append((rect,f"unknown",))
50
+ return (in_image,masks)
51
 
52
  image = gr.Image(type='pil')
53
+ output = gr.AnnotatedImage()
54
 
55
+ gr.Interface(fn=classify_image, inputs=image, outputs=output, examples = [["kasimir.jpg"],["guaraci.jpg"],["marcela.jpg"]]).launch()
marcela.jpg ADDED
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- tensorflow
 
 
1
+ tensorflow
2
+ torch
valid.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ guaraci
2
+ marcela
3
+ margo
4
+ ti
5
+ patricia
6
+ bernard
7
+ kasimir
8
+ medrosa