test_svc_model / README.md
yashriva's picture
Update README.md
4466594
|
raw
history blame
1.76 kB
metadata
datasets:
  - scikit-learn/iris
widget:
  structuredData:
    SepalLengthCm:
      - 5.1
      - 4.9
      - 6.2
    SepalWidthCm:
      - 3.5
      - 3
      - 3.4
    PetalLengthCm:
      - 1.4
      - 1.4
      - 5.4
    PetalWidthCm:
      - 0.2
      - 0.2
      - 2.3
    target:
      - 0
      - 0
      - 2
pipeline_tag: tabular-classification

Usage

import joblib
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline

model = joblib.load("iris_svm.joblib")

column_transformer_pipeline = ColumnTransformer([
                ("SepalLengthCm", SimpleImputer(strategy="mean"), ["SepalLengthCm"]),
                ("SepalWidthCm", SimpleImputer(strategy="mean"), ["SepalWidthCm"]),
                ("PetalLengthCm", SimpleImputer(strategy="mean"), ["PetalLengthCm"]),
                ("PetalWidthCm", SimpleImputer(strategy="mean"), ["PetalWidthCm"])])
import json
pipeline = Pipeline([
    ('transformation', column_transformer_pipeline),
    ('model', model)
])

with open("config.json", "r") as f:
    config = json.load(f)

features = config["features"]
target = config["targets"][0]
target_mapping = config["target_mapping"]

import numpy as np

# example input data
input_data = np.array([
    [5.1, 3.5, 1.4, 0.2],
    [4.9, 3.0, 1.4, 0.2],
    [6.2, 3.4, 5.4, 2.3]
])

# make sure the input data has the correct shape
if input_data.shape[1] != len(features):
    raise ValueError(f"Input data must have {len(features)} features.")

predicted_classes = pipeline.predict(input_data)
predicted_class_names = [list(target_mapping.keys())[list(target_mapping.values()).index(predicted_class)] for predicted_class in predicted_classes]

print("Predicted classes:", predicted_class_names)