metadata
datasets:
- scikit-learn/iris
import joblib
model = joblib.load("iris_svm.joblib")
import json
with open("config.json", "r") as f:
config = json.load(f)
features = config["features"]
target = config["targets"][0]
target_mapping = config["target_mapping"]
import numpy as np
# example input data
input_data = np.array([
[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[6.2, 3.4, 5.4, 2.3]
])
# make sure the input data has the correct shape
if input_data.shape[1] != len(features):
raise ValueError(f"Input data must have {len(features)} features.")
predicted_classes = model.predict(input_data)
predicted_class_names = [list(target_mapping.keys())[list(target_mapping.values()).index(predicted_class)] for predicted_class in predicted_classes]
print("Predicted classes:", predicted_class_names)