import numpy as np
import matplotlib.pyplot as plt

X = np.array([[1, 2, 3], [3, 4, 5], [5, 6, 7], [7, 8, 9], [9, 8, 7]])
Y = np.array([1, 2, 3, 4, 5])
print(X.shape)
print(Y.shape)


fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], c='r', marker='o')


no_of_inputs = X.shape[1]
epochs = 50
learning_rate = .01
weights = np.random.rand(no_of_inputs + 1)
print(weights.shape)


def relu_activation(sum):
  if sum > 0: return sum
  else: return 0


class Perceptron(object):
    def __init__(self, no_of_inputs, activation):
        self.learning_rate = learning_rate
        self.weights = np.zeros(no_of_inputs + 1)
        self.activation = activation

    def predict(self, inputs):
        summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
        return self.activation(summation)

    def train(self, training_inputs, training_labels, epochs=100, learning_rate=0.01):
        history = []
        for _ in range(epochs):
            for inputs, label in zip(training_inputs, training_labels):
                prediction = self.predict(inputs)
                loss = (label - prediction)
                loss2 = loss * loss
                history.append(loss2)
                print(f"loss = {loss2}")
                self.weights[1:] += self.learning_rate * loss * inputs
                self.weights[0] += self.learning_rate * loss
        return history


perceptron = Perceptron(no_of_inputs, relu_activation)
history = perceptron.train(X,Y, epochs=epochs)