|
|
|
|
|
|
|
import sys |
|
import argparse |
|
import numpy as np |
|
|
|
import tritonclient.grpc as grpcclient |
|
|
|
from sklearn.datasets import fetch_openml |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.metrics import accuracy_score |
|
|
|
|
|
random_seed = 0 |
|
np.random.seed(random_seed) |
|
|
|
|
|
def make_prediction(model_server, model_name, model_version, verbose): |
|
try: |
|
triton_client = grpcclient.InferenceServerClient(url=model_server, verbose=verbose) |
|
except Exception as e: |
|
print("channel creation failed: " + str(e)) |
|
sys.exit(1) |
|
|
|
inputs = [] |
|
outputs = [] |
|
|
|
dataset_name = "cardiotocography" |
|
dataset = fetch_openml(name=dataset_name, version=1, as_frame=False) |
|
X, y = dataset.data, dataset.target |
|
s = y == "3" |
|
y = s.astype(int) |
|
|
|
_, X_test, _, y_test = train_test_split(X, y, test_size=0.25, random_state=random_seed) |
|
input_data = X_test.astype(np.float32) |
|
input_label = y_test.astype(np.float32) |
|
print(f'input_data:\n{input_data[0]}') |
|
print(f'input_label:\n{input_label[0]}') |
|
|
|
|
|
inputs.append(grpcclient.InferInput('float_input', [input_data.shape[0], input_data.shape[1]], "FP32")) |
|
inputs[0].set_data_from_numpy(input_data) |
|
outputs.append(grpcclient.InferRequestedOutput('label')) |
|
|
|
results = triton_client.infer(model_name=model_name, inputs=inputs, outputs=outputs) |
|
|
|
statistics = triton_client.get_inference_statistics(model_name=model_name) |
|
|
|
if len(statistics.model_stats) != 1: |
|
print("FAILED: Inference Statistics") |
|
sys.exit(1) |
|
|
|
y_pred = results.as_numpy('label').squeeze() |
|
|
|
y_pred = np.where(y_pred == 1, 0, 1) |
|
print(f'y_pred:\n{y_pred[0]}') |
|
|
|
acc = accuracy_score(y_test, y_pred) |
|
print(f'Accuracy classification score: {acc}') |
|
|
|
|
|
""" |
|
python client.py --model_server localhost:8001 --model_name isolation_forest --model_version 1 |
|
""" |
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Make predictions using a specific model.") |
|
parser.add_argument("--model_server", default="localhost:8001", help="The address of the model server.") |
|
parser.add_argument("--model_name", default="isolation_forest", help="The name of the model to use.") |
|
parser.add_argument("--model_version", default="1", help="The version of the model to use.") |
|
parser.add_argument("--verbose", action="store_true", required=False, default=False, help='Enable verbose output') |
|
args = parser.parse_args() |
|
make_prediction(args.model_server, args.model_name, args.model_version, args.verbose) |
|
|