vivek9's picture
Update app.py
b71a784 verified
raw
history blame contribute delete
No virus
4.15 kB
import gradio as gr
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import log_loss
from io import BytesIO
from PIL import Image
def sigmoid(z):
return 1.0/(1.0+ np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1.0-sigmoid(z))
def cross_entropy(weights,bias,X_train,y_train):
return log_loss(y_train,[feedforward(X_train[k],bias,weights).reshape(-1,) for k in range(len(X_train))],labels=[1.0,0.0])
def feedforward(x,bias,weights):
for b,w in zip(bias,weights):
x=sigmoid(np.dot(w,x)+b)
return x
def update_mini_batch(weights,bias,X_mini,y_mini,eta):
derivative_b=[np.zeros(b.shape) for b in bias]
derivative_w=[np.zeros(w.shape) for w in weights]
for i in range(len(y_mini)):
x_mi=X_mini[i]
y_mi=y_mini[i]
derivative_b_sample, derivative_w_sample=backprop(weights,bias,x_mi,y_mi)
for j in range(len(derivative_b)):
derivative_b[j]+=derivative_b_sample[j]
derivative_w[j]+=derivative_w_sample[j]
for i in range(len(weights)):
weights[i]-=(eta/len(y_mini))*derivative_w[i]
bias[i]-=(eta/len(y_mini))*derivative_b[i]
return (weights,bias)
input_neurons = 10
hidden_neurons = 2
output_neurons = 1
weights_input_hidden = np.array([[ -7.22321659, -11.15471068, -30.00484398, 13.08466657,
-15.0766471 , 14.81645208, -13.11446119, 29.95009006,
10.9668077 , 7.11140227],
[ 7.48857155, 11.20225432, 30.07325613, -13.1441687 ,
15.08768163, -15.10126949, 13.11893102, -30.1417512 ,
-11.18918466, -7.44585737]]).T
biases_hidden = np.array([[1.62426502],
[1.63230885]]).reshape(-1)
weights_hidden_output = np.array([[23.73266741, 23.84381938]]).T
bias_output = np.array([[-35.80629779]]).reshape(-1)
def visualize_neural(a):
input_values = [int(i) for i in list(a)]
hidden_layer_input = np.dot(input_values, weights_input_hidden) + biases_hidden
hidden_layer_output = np.array([round(i,3) for i in sigmoid(hidden_layer_input)])
output_layer_input = np.dot(hidden_layer_output, weights_hidden_output) + bias_output
output = sigmoid(output_layer_input)
G = nx.DiGraph()
for i in range(input_neurons):
G.add_node(f'{i}={input_values[i]}', pos=(0, i))
for i in range(hidden_neurons):
G.add_node(f'{i},f({np.round(hidden_layer_input[i],3)})={np.round(hidden_layer_output[i],3)}', pos=(1, 0.5*(input_neurons-1-hidden_neurons)+i))
for i in range(output_neurons):
G.add_node(f'Output {i+1}={output}', pos=(2, 4))
for i in range(input_neurons):
for j in range(hidden_neurons):
weight = weights_input_hidden[i][j]
G.add_edge(f'{i}={input_values[i]}', f'{j},f({np.round(hidden_layer_input[j],3)})={np.round(hidden_layer_output[j],3)}', weight=f"W[{j,i}]={weight}")
for i in range(hidden_neurons):
for j in range(output_neurons):
weight = weights_hidden_output[i][j]
G.add_edge(f'{i},f({np.round(hidden_layer_input[i],3)})={np.round(hidden_layer_output[i],3)}', f'Output {j+1}={output}', weight=weight)
pos = nx.get_node_attributes(G, 'pos')
plt.figure(figsize=(10, 5),dpi=200)
nx.draw(G, pos, with_labels=True, node_size=1e+3, node_color='lightblue', font_size=3, font_weight='bold')
labels = nx.get_edge_attributes(G, 'weight')
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels,font_size=3,label_pos=0.8)
plt.title('Neural Network Graph')
buffer = BytesIO()
plt.savefig(buffer, format='png') # Save the plot to the buffer in PNG format
buffer.seek(0)
image = Image.open(buffer)
plt.close() # Close the plot to prevent displaying it
# Convert the PIL image to a numpy array
image_array = np.array(image)
if output<0.5:
return image_array,"Non palindrom"
else:
return image_array,"palindrom"
title="Implementation of Backpropagation and Training a Palindrome Network"
demo = gr.Interface(fn=visualize_neural, inputs=gr.Textbox(label="input a binary string of length 10"), outputs=[gr.Image(label="Neural network visualization"),gr.Textbox(label="output")],title=title)
demo.launch()