homework06 / app.py
jiehou's picture
Update app.py
17695bc
raw
history blame contribute delete
No virus
5.29 kB
def homework06_solution(theta0, theta1, theta2, learning_rate):
import numpy as np
import pandas as pd
def logistic_predict(b0, b1, b2, x1, x2):
z = b0 + b1*x1 + b2*x2
p = 1 / (1 + np.exp(-z))
return p
def binary_cross_entropy(X, y, parameter_list):
entropy_loss = 0
for n in range(0,len(X)):
# calculate the linear output given the current weights
z = parameter_list[0] + parameter_list[1]*X[n][0]+ parameter_list[2]*X[n][1]
# calculate sigmoid output given z
p = 1 / (1 + np.exp(-z))
# calculate the binary cross-entropy
predict_1 = y[n] * np.log(p)
predict_0 = (1-y[n]) * np.log(1-p)
entropy_loss += -(predict_1 + predict_0) / len(X)
return entropy_loss
def get_logistic_results(data, theta0, theta1, theta2):
## (2) make logistic prediction
y_hat_list = []
theta0_grad = 0
theta1_grad = 0
theta2_grad = 0
for i in range(len(data)):
x1 = data.iloc[i,0]
x2 = data.iloc[i,1]
y = data.iloc[i,2]
y_hat = logistic_predict(theta0, theta1, theta2, x1, x2)
y_hat_list.append(y_hat)
## (3) calculate gradients
theta0_grad = theta0_grad - 1/len(data)*(y - y_hat)*1.0
theta1_grad = theta1_grad - 1/len(data)*(y - y_hat)*x1
theta2_grad = theta2_grad - 1/len(data)*(y - y_hat)*x2
data['y_hat'] = y_hat_list
data['y-y_hat'] = data['y'] - data['y_hat']
data['(y-y_hat)*x_1'] = data['y-y_hat']*data.iloc[:,0]
data['(y-y_hat)*x_2'] = data['y-y_hat']*data.iloc[:,1]
return data, theta0_grad, theta1_grad, theta2_grad
## (1) load data
X = np.array([[9,9], [0,0], [8,6], [3,3], [7,8], [0.5,0.5], [5,6], [6,7]])
y = [1,0,1,0,1,0,1,1]
data = pd.DataFrame(X, columns=['X1','X2'])
data['y'] = y
## (2) get regression table, gradients q3.2
data, theta0_grad, theta1_grad, theta2_grad = get_logistic_results(data, theta0, theta1, theta2)
### (3) summarize gradient results for question 3.2
data_t = data.T
data_t = data_t.round(5)
data_t.insert(loc=0, column='Name', value=['X1', 'X2', 'y', 'y_hat', 'y-y_hat', '(y-y_hat)*x_1', '(y-y_hat)*x_2'])
### (4) summarize gradient results for question 3b
q3_loss = binary_cross_entropy(X, y, [theta0, theta1, theta2])
### summarize gradient results for question Q3.3
### update parameter using gradient descent Q3.4
theta0_new = theta0 - learning_rate*theta0_grad
theta1_new = theta1 - learning_rate*theta1_grad
theta2_new = theta2 - learning_rate*theta2_grad
### (6) summarize gradient results for question 3.4
q3_loss_new = binary_cross_entropy(X, y, [theta0_new, theta1_new, theta2_new])
### (7) return all results for Gradio visualization
data_final = data_t.T
print(data_final['y-y_hat'].values[1:])
y_diff_sum = data_final['y-y_hat'].values[1:].sum()
y_diff_x1_sum = data_final['(y-y_hat)*x_1'].values[1:].sum()
y_diff_x2_sum = data_final['(y-y_hat)*x_2'].values[1:].sum()
return data_t.T, q3_loss, y_diff_sum, y_diff_x1_sum, y_diff_x2_sum, theta0_grad, theta1_grad , theta2_grad, theta0_new, theta1_new, theta2_new, q3_loss_new
import numpy as np
import gradio as gr
### configure inputs
set_theta0 = gr.inputs.Number()
set_theta1 = gr.inputs.Number()
set_theta2 = gr.inputs.Number()
set_ita = gr.inputs.Number()
### configure outputs
set_output_q3a = gr.outputs.Dataframe(type='pandas', label ='Question 3.2')
set_output_q3b = gr.outputs.Textbox(label ='Question 3.1: Initial BCE loss')
set_output_q3c = gr.outputs.Textbox(label ='Question 3.2: sum(y-y_hat)')
set_output_q3d = gr.outputs.Textbox(label ='Question 3.2: sum((y-y_hat)*x_1)')
set_output_q3e = gr.outputs.Textbox(label ='Question 3.2: sum((y-y_hat)*x_2)')
set_output_q4a0 = gr.outputs.Textbox(label ='Question 3.3: theta0_grad')
set_output_q4a1 = gr.outputs.Textbox(label ='Question 3.3: theta1_grad')
set_output_q4a2 = gr.outputs.Textbox(label ='Question 3.3: theta2_grad')
set_output_q4b0 = gr.outputs.Textbox(label ='Question 3.4: theta0_new: updated by gradient descent')
set_output_q4b1 = gr.outputs.Textbox(label ='Question 3.4: theta1_new: updated by gradient descent')
set_output_q4b2 = gr.outputs.Textbox(label ='Question 3.4: theta2_new: updated by gradient descent')
set_output_q4b4 = gr.outputs.Textbox(label ='Question 4 (4): New BCE after update the parameters using gradient descent')
### configure Gradio
interface = gr.Interface(fn=homework06_solution,
inputs=[set_theta0, set_theta1, set_theta2, set_ita],
outputs=[set_output_q3a, set_output_q3b, set_output_q3c, set_output_q3d, set_output_q3e,
set_output_q4a0, set_output_q4a1, set_output_q4a2,
set_output_q4b0, set_output_q4b1, set_output_q4b2,
set_output_q4b4],
title="CSCI4750/5750(hw06): Linear Regression/Optimization",
description= "Click examples below for a quick demo",
theme = 'huggingface',
layout = 'horizontal',
live=True
)
interface.launch(debug=True)