import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import matplotlib.cm as cm
from sklearn.model_selection import train_test_split
import matplotlib.gridspec as gridspec


def build_data():
    np.random.seed(1)
    c = np.array([[0.0, -0.23], [0.83, 0.23]])
    x = np.r_[np.dot(np.random.randn(300, 2), c), np.dot(np.random.randn(300, 2), c) + np.array([1, 1])]
    y = np.hstack((np.zeros(300), np.ones(300))).astype(np.int)
    return x, y
    pass


def sigmoid(x, derivative=False):
    if derivative:
        return tf.sigmoid(x) * (1 - tf.sigmoid(x))
    else:
        return tf.sigmoid(x)


class Perceptorn:
    def __init__(self, x_dim):
        n_weights = x_dim + 1
        self.activation = sigmoid
        self.n_weights = n_weights
        self.weights = ((2 * np.random.random(n_weights)) * 0.25).reshape(-1, 1)
        self.train_errors = []
        self.validation_errors = []
        self.sess = tf.InteractiveSession()

    @staticmethod
    def add_bias(x):
        temp = np.zeros([x.shape[0], x.shape[1] + 1])
        temp[:, :-1] = x
        return temp
        pass

    def predict(self, inputs):
        return self.activation(np.dot(inputs, self.weights))
        pass

    def train(self, x_train, y_train, x_val, y_val, learning_rate=0.2, epochs=10, batch_size=50, weight_decay=0):
        # sess = tf.InteractiveSession()
        x_train = self.add_bias(x_train)

        for epoch in range(epochs):
            data_size = len(x_train)
            shuffle_index = np.random.permutation(np.arange(data_size))
            x_train = x_train[shuffle_index]
            number_batch = ((data_size - 1) // batch_size)
            for batch_num in range(number_batch):
                # print('===========>')
                start_index = batch_num * batch_size
                end_index = (batch_num + 1) * batch_size
                x = x_train[start_index: end_index]
                y = y_train[start_index: end_index]
                # print(x.shape, y.shape)
                output = self.predict(x)
                output = self.sess.run(output)

                error = y - output

                adjustment = np.dot(x.T, error * self.sess.run(self.activation(output, derivative=True)))
                adjustment = np.mean(adjustment, axis=1).reshape(-1, 1)
                # print(adjustment.shape, self.weights.shape)

                self.weights += adjustment

                # print('weight', self.weights, adjustment)
                # print('Y: ', output, y)
                # print('===============>')
        # sess.close()

        pass

    def decision_function(self, X, addbias=True):
        """
        - if addbias is True, an entry with 1 will be added to X to represent the bias.
        """
        if addbias:
            X = self.add_bias(X)
        return self.sess.run(self.activation(X.dot(self.weights).flatten()))

    pass


def show_decision_boundary(clf, X, y, subplot_spec=None):
    assert X.shape[1] == 2
    wratio = (15, 1)
    if subplot_spec is None:
        gs = gridspec.GridSpec(1, 2, width_ratios=wratio)
    else:
        gs = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=subplot_spec, width_ratios=wratio)

    ax = plt.subplot(gs[0])
    ax.set_title('Dataset and decision function')

    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    h = 0.2  # step size in the meshgrid
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    ctr = ax.contourf(xx, yy, Z, cmap=cm.gray, vmin=0, vmax=1)

    unique_labels = np.unique(y)
    colors = cm.Paired(np.linspace(0, 1, num=len(unique_labels)))
    for i, yi in enumerate(unique_labels):
        color = colors[i]
        ax.scatter(X[y == yi, 0], X[y == yi, 1], c=color, linewidth=0, label='%d' % yi)
    ax.legend()
    ax.set_xlim((x_min, x_max))
    ax.set_ylim((y_min, y_max))

    plt.colorbar(ctr, cax=plt.subplot(gs[1]))
    plt.show()


def main():
    x, y = build_data()
    x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2)
    model = Perceptorn(x.shape[1])
    model.train(x_train, y_train, x_val, y_val)
    show_decision_boundary(model, x, y)

    # print('info:', x[y == 0, 0].shape, x[y == 0, 1].shape)
    # print('info', x.shape, x.shape)
    # plt.show()
    pass


if __name__ == '__main__':
    main()
