|
|
|
|
|
|
|
|
|
import gradio as gr |
|
import pandas as pd |
|
import numpy as np |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.linear_model import LogisticRegression |
|
from sklearn import metrics |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uncleaned_data = pd.read_csv('data.csv') |
|
|
|
|
|
uncleaned_data = uncleaned_data.iloc[: , 1:] |
|
data = pd.DataFrame() |
|
|
|
|
|
|
|
|
|
cat_value_dicts = {} |
|
final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1] |
|
|
|
|
|
for (colname, colval) in uncleaned_data.iteritems(): |
|
|
|
|
|
|
|
if isinstance(colval.values[0], (np.integer, float)): |
|
data[colname] = uncleaned_data[colname].copy() |
|
continue |
|
|
|
|
|
new_dict = {} |
|
val = 0 |
|
transformed_col_vals = [] |
|
|
|
|
|
for (row, item) in enumerate(colval.values): |
|
|
|
|
|
if item not in new_dict: |
|
new_dict[item] = val |
|
val += 1 |
|
|
|
|
|
transformed_col_vals.append(new_dict[item]) |
|
|
|
|
|
if colname == final_colname: |
|
new_dict = {value : key for (key, value) in new_dict.items()} |
|
|
|
cat_value_dicts[colname] = new_dict |
|
data[colname] = transformed_col_vals |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cols = len(data.columns) |
|
num_features = cols - 1 |
|
x = data.iloc[: , :num_features] |
|
y = data.iloc[: , num_features:] |
|
|
|
|
|
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25) |
|
|
|
|
|
model = LogisticRegression() |
|
model.fit(x_train, y_train.values.ravel()) |
|
y_pred = model.predict(x_test) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_feat(): |
|
feats = [abs(x) for x in model.coef_[0]] |
|
max_val = max(feats) |
|
idx = feats.index(max_val) |
|
return data.columns[idx] |
|
|
|
acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%" |
|
most_imp_feat = get_feat() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def general_predictor(*args): |
|
features = [] |
|
|
|
|
|
for colname, arg in zip(data.columns, args): |
|
if (colname in cat_value_dicts): |
|
features.append(cat_value_dicts[colname][arg]) |
|
else: |
|
features.append(arg) |
|
|
|
|
|
new_input = [features] |
|
result = model.predict(new_input) |
|
return cat_value_dicts[final_colname][result[0]] |
|
|
|
|
|
|
|
|
|
block = gr.Blocks() |
|
|
|
with open('info.md') as f: |
|
with block: |
|
gr.Markdown(f.readline()) |
|
gr.Markdown('Take the quiz to get a personalized recommendation using AI.') |
|
|
|
with gr.Row(): |
|
with gr.Group(): |
|
inputls = [] |
|
for colname in data.columns: |
|
|
|
if colname == final_colname: |
|
continue |
|
|
|
|
|
|
|
if colname in cat_value_dicts: |
|
radio_options = list(cat_value_dicts[colname].keys()) |
|
inputls.append(gr.Dropdown(radio_options, type="value", label=colname)) |
|
else: |
|
|
|
inputls.append(gr.Number(label=colname)) |
|
gr.Markdown("<br />") |
|
|
|
submit = gr.Button("Click to see your personalized result!", variant="primary") |
|
gr.Markdown("<br />") |
|
output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here") |
|
|
|
submit.click(fn=general_predictor, inputs=inputls, outputs=output) |
|
gr.Markdown("<br />") |
|
|
|
with gr.Row(): |
|
with gr.Group(): |
|
gr.Markdown(f"<h3>Accuracy: </h3>{acc}") |
|
with gr.Group(): |
|
gr.Markdown(f"<h3>Most important feature: </h3>{most_imp_feat}") |
|
|
|
gr.Markdown("<br />") |
|
|
|
with gr.Group(): |
|
gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for <em>that dataset</em>. Model accuracy and most important feature can be helpful for understanding how the model works, but <em>should not be considered absolute facts about the real world</em>.''') |
|
|
|
with gr.Group(): |
|
with open('info.md') as f: |
|
f.readline() |
|
gr.Markdown(f.read()) |
|
|
|
|
|
block.launch() |