import cv2
import numpy as np
import onnxruntime as ort

model_path = 'model.onnx'

# Load the ONNX model
session = ort.InferenceSession(model_path)
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name

# Load the class names
class_names = ['pen', 'udisk']

# Initialize the video capture
cap = cv2.VideoCapture(0)

while True:
    # Read frame from the video capture
    ret, frame = cap.read()

    # Preprocess the frame
    resized_frame = cv2.resize(frame, (224, 224))
    input_data = resized_frame.astype(np.float32) / 255.0  # Convert to float and normalize

    # Add a batch dimension
    input_data = np.expand_dims(input_data, axis=0)

    # Run inference
    output_data = session.run([output_name], {input_name: input_data})[0]
    idx = np.argmax(output_data)
    confidence = output_data[0][idx]
    label = class_names[idx]

    # Display the result on the frame
    cv2.putText(frame, f'{label}: {confidence:.2f}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    # Show the frame
    cv2.imshow('Image Classification', frame)

    # Check for 'q' key press to exit
    if cv2.waitKey(1) == ord('q'):
        break

# Release the video capture and close the windows
cap.release()
cv2.destroyAllWindows()