niki-stha commited on
Commit
d1f4a84
1 Parent(s): 07695ec

Create inference.py

Browse files
Files changed (1) hide show
  1. inference.py +47 -0
inference.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision.transforms as T
3
+ from PIL import Image
4
+
5
+ # Load the YOLOv5 model
6
+ model = torch.hub.load('niki-stha/asl-detection-yolov5', 'yolov5s')
7
+
8
+ # Set the device (GPU if available, otherwise CPU)
9
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+ model.to(device).eval()
11
+
12
+ # Define the image transformation
13
+ transform = T.Compose([
14
+ T.Resize((416, 416)),
15
+ T.ToTensor(),
16
+ ])
17
+
18
+ # Inference function
19
+ def run_inference(image):
20
+ # Preprocess the image
21
+ image = transform(image).unsqueeze(0).to(device)
22
+
23
+ # Perform inference
24
+ results = model(image)
25
+
26
+ # Post-process the results
27
+ # (You can customize this part based on your specific requirements)
28
+ predictions = results.pandas().xyxy[0]
29
+
30
+ return predictions
31
+
32
+ # Example API endpoint
33
+ def inference_api(request):
34
+ # Get the image from the request (you may need to adapt this based on your API framework)
35
+ image_data = request.files['image'].read()
36
+ image = Image.open(io.BytesIO(image_data))
37
+
38
+ # Run inference
39
+ predictions = run_inference(image)
40
+
41
+ # Convert predictions to JSON or any other desired format
42
+ # (You may need to adapt this based on your API framework)
43
+ response = {
44
+ 'predictions': predictions.to_dict(orient='records')
45
+ }
46
+
47
+ return jsonify(response)