Spaces:
Sleeping
Sleeping
File size: 1,828 Bytes
b75711c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import json
import requests
import argparse
import numpy as np
from sklearn.model_selection import train_test_split
from modeling.ml_model_dev import read_csv_file
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def send_post_reqest(ARGS):
df_csv = read_csv_file(ARGS.file_csv)
df_train, df_test = train_test_split(df_csv, test_size=0.1, random_state=4)
list_cols = df_train.columns[:-1]
X_test, Y_test = df_test.to_numpy()[:, :-1], df_test.to_numpy()[:, -1:]
print(X_test.shape)
url = "https://abhishekrs4-ml-water-potability.hf.space/predict"
# the endpoint of the post request
headers = {'Content-type': 'application/json'}
# additional headers to indicate the content type of the post request
# perform 20 post requests
for i in range(0, ARGS.num_requests):
list_values = list(X_test[i, :])
encoded_data = dict(zip(list_cols, list_values))
print(encoded_data)
result = requests.post(url, data=json.dumps(encoded_data), headers=headers)
print(f"{json.loads(result.text)} \n")
# print(f"{type(json.loads(result.text))} \n")
return
def main():
file_csv = "dataset/water_potability.csv"
num_requests = 20
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--file_csv", default=file_csv,
type=str, help="full path to dataset csv file")
parser.add_argument("--num_requests", default=num_requests,
type=int, help="number of post requests to send")
ARGS, unparsed = parser.parse_known_args()
send_post_reqest(ARGS)
return
if __name__ == "__main__":
main()
|