Netrava commited on
Commit
6b50886
·
verified ·
1 Parent(s): 430a919

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +41 -12
  2. app.py +58 -58
  3. requirements.txt +7 -0
README.md CHANGED
@@ -1,12 +1,41 @@
1
- ---
2
- title: Ai
3
- emoji: 🏆
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.36.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # 🤖 Netrava UI Detection
3
+
4
+ This Hugging Face Space hosts the Netrava AI Assistant's UI detection model.
5
+
6
+ ## Usage
7
+
8
+ 1. Upload a screenshot
9
+ 2. The model will detect and highlight UI elements
10
+ 3. Download the annotated image
11
+
12
+ ## Supported Elements
13
+
14
+ - Buttons
15
+ - Text boxes
16
+ - Dropdowns
17
+ - Checkboxes
18
+ - Icons
19
+ - Menus
20
+ - Windows
21
+ - Dialogs
22
+ - Links
23
+ - Images
24
+ - Labels
25
+ - Tabs
26
+ - Toolbars
27
+ - Status bars
28
+ - Scroll bars
29
+
30
+ ## API Usage
31
+
32
+ You can also use this space via API:
33
+
34
+ ```python
35
+ import requests
36
+
37
+ response = requests.post(
38
+ "https://your-username-netrava-ui-detection.hf.space/api/predict",
39
+ files={"data": open("screenshot.png", "rb")}
40
+ )
41
+ ```
app.py CHANGED
@@ -1,58 +1,58 @@
1
-
2
- import gradio as gr
3
- import torch
4
- from ultralytics import YOLO
5
- import cv2
6
- import numpy as np
7
- from PIL import Image
8
-
9
- # Load model
10
- model = YOLO('netrava_ui_model.pt')
11
-
12
- def detect_ui_elements(image):
13
- """Detect UI elements in uploaded image"""
14
- try:
15
- # Convert PIL to OpenCV format
16
- img_array = np.array(image)
17
- img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
18
-
19
- # Run detection
20
- results = model(img_bgr)
21
-
22
- # Draw results
23
- for result in results:
24
- boxes = result.boxes
25
- if boxes is not None:
26
- for box in boxes:
27
- # Get coordinates
28
- x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
29
- conf = box.conf[0].cpu().numpy()
30
- cls = int(box.cls[0].cpu().numpy())
31
-
32
- # Draw bounding box
33
- cv2.rectangle(img_bgr, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
34
-
35
- # Add label
36
- label = f'{model.names[cls]}: {conf:.2f}'
37
- cv2.putText(img_bgr, label, (int(x1), int(y1)-10),
38
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
39
-
40
- # Convert back to RGB
41
- result_img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
42
- return Image.fromarray(result_img)
43
-
44
- except Exception as e:
45
- return f"Error: {str(e)}"
46
-
47
- # Create Gradio interface
48
- interface = gr.Interface(
49
- fn=detect_ui_elements,
50
- inputs=gr.Image(type="pil"),
51
- outputs=gr.Image(type="pil"),
52
- title="🤖 Netrava UI Element Detection",
53
- description="Upload a screenshot to detect UI elements like buttons, textboxes, etc.",
54
- examples=["example1.png", "example2.png"]
55
- )
56
-
57
- if __name__ == "__main__":
58
- interface.launch()
 
1
+
2
+ import gradio as gr
3
+ import torch
4
+ from ultralytics import YOLO
5
+ import cv2
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ # Load model
10
+ model = YOLO('netrava_ui_model.pt')
11
+
12
+ def detect_ui_elements(image):
13
+ """Detect UI elements in uploaded image"""
14
+ try:
15
+ # Convert PIL to OpenCV format
16
+ img_array = np.array(image)
17
+ img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
18
+
19
+ # Run detection
20
+ results = model(img_bgr)
21
+
22
+ # Draw results
23
+ for result in results:
24
+ boxes = result.boxes
25
+ if boxes is not None:
26
+ for box in boxes:
27
+ # Get coordinates
28
+ x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
29
+ conf = box.conf[0].cpu().numpy()
30
+ cls = int(box.cls[0].cpu().numpy())
31
+
32
+ # Draw bounding box
33
+ cv2.rectangle(img_bgr, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
34
+
35
+ # Add label
36
+ label = f'{model.names[cls]}: {conf:.2f}'
37
+ cv2.putText(img_bgr, label, (int(x1), int(y1)-10),
38
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
39
+
40
+ # Convert back to RGB
41
+ result_img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
42
+ return Image.fromarray(result_img)
43
+
44
+ except Exception as e:
45
+ return f"Error: {str(e)}"
46
+
47
+ # Create Gradio interface
48
+ interface = gr.Interface(
49
+ fn=detect_ui_elements,
50
+ inputs=gr.Image(type="pil"),
51
+ outputs=gr.Image(type="pil"),
52
+ title="🤖 Netrava UI Element Detection",
53
+ description="Upload a screenshot to detect UI elements like buttons, textboxes, etc.",
54
+ examples=["example1.png", "example2.png"]
55
+ )
56
+
57
+ if __name__ == "__main__":
58
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ ultralytics
3
+ torch
4
+ torchvision
5
+ opencv-python
6
+ Pillow
7
+ gradio