import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from matplotlib.colors import colorConverter, ListedColormap
from sklearn.utils import shuffle
from sklearn.preprocessing import OneHotEncoder


def one_hot(data, start, end):
    ohe = OneHotEncoder()
    a = np.linspace(start, end - 1, end - start)
    b = np.reshape(a, [-1, 1]).astype(np.int32)
    ohe.fit(b)
    c = ohe.transform(data).toarray()
    return c


def generate(sample_size, num_classes, diff, regression):
    np.random.seed(10)
    mean = np.random.randn(2)
    cov = np.eye(2)

    sample_per_class = int(sample_size / num_classes)

    x0 = np.random.multivariate_normal(mean, cov, sample_per_class)
    y0 = np.zeros(sample_per_class)

    for ci, d in enumerate(diff):
        x1 = np.random.multivariate_normal(mean + d, cov, sample_per_class)
        y1 = (ci + 1) * np.ones(sample_per_class)

        x0 = np.concatenate((x0, x1))
        y0 = np.concatenate((y0, y1))

    if not regression:
        y0 = np.reshape(y0, [-1, 1])
        y0 = one_hot(y0.astype(np.int32), 0, num_classes)

    x, y = shuffle(x0, y0)

    return x, y


# Ensure we always get the same amount of randomness
np.random.seed(10)

input_dim = 2
num_classes = 4
X, Y = generate(320, num_classes, [[3.0, 0], [3.0, 3.0], [0, 3.0]], True)
Y = Y % 2

xr = []
xb = []
for (l, k) in zip(Y[:], X[:]):
    if l == 0.0:
        xr.append([k[0], k[1]])
    else:
        xb.append([k[0], k[1]])
xr = np.array(xr)
xb = np.array(xb)
plt.scatter(xr[:, 0], xr[:, 1], c='r', marker='+')
plt.scatter(xb[:, 0], xb[:, 1], c='b', marker='o')

plt.show()
Y = np.reshape(Y, [-1, 1])

learning_rate = 1e-4
n_input = 2
n_label = 1
# n_hidden = 2  # 欠拟合
n_hidden = 200

x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_label])

weights = {
    'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden], stddev=0.1)),
    'h2': tf.Variable(tf.random_normal([n_hidden, n_label], stddev=0.1))
}
biases = {
    'h1': tf.Variable(tf.zeros([n_hidden])),
    'h2': tf.Variable(tf.zeros([n_label]))
}

layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['h1']))
# y_pred = tf.nn.tanh(tf.add(tf.matmul(layer_1, weights['h2']),biases['h2']))
# y_pred = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['h2']),biases['h2']))#局部最优解
# y_pred = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['h2']),biases['h2']))

# Leaky relus  40000次 ok
layer2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['h2'])
y_pred = tf.maximum(layer2, 0.01 * layer2)

loss = tf.reduce_mean((y_pred - y) ** 2)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)

# 加载
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

for i in range(20000):  #

    _, loss_val = sess.run([train_step, loss], feed_dict={x: X, y: Y})

    if i % 1000 == 0:
        print("Step:", i, "Current loss:", loss_val)


# xr = []
# xb = []
# for (l, k) in zip(Y[:], X[:]):
#     if l == 0.0:
#         xr.append([k[0], k[1]])
#     else:
#         xb.append([k[0], k[1]])
# xr = np.array(xr)
# xb = np.array(xb)
# plt.scatter(xr[:, 0], xr[:, 1], c='r', marker='+')
# plt.scatter(xb[:, 0], xb[:, 1], c='b', marker='o')
#
# nb_of_xs = 200
# xs1 = np.linspace(-3, 10, num=nb_of_xs)
# xs2 = np.linspace(-3, 10, num=nb_of_xs)
# xx, yy = np.meshgrid(xs1, xs2)  # create the grid
# # Initialize and fill the classification plane
# classification_plane = np.zeros((nb_of_xs, nb_of_xs))
# for i in range(nb_of_xs):
#     for j in range(nb_of_xs):
#         # classification_plane[i,j] = nn_predict(xx[i,j], yy[i,j])
#         classification_plane[i, j] = sess.run(y_pred, feed_dict={x: [[xx[i, j], yy[i, j]]]})
#         classification_plane[i, j] = int(classification_plane[i, j])
#
# # Create a color map to show the classification colors of each grid point
# cmap = ListedColormap([
#     colorConverter.to_rgba('r', alpha=0.30),
#     colorConverter.to_rgba('b', alpha=0.30)])
# # Plot the classification plane with decision boundary and input samples
# plt.contourf(xx, yy, classification_plane, cmap=cmap)
# plt.show()

xTrain, yTrain = generate(12, num_classes, [[3.0, 0], [3.0, 3.0], [0, 3.0]], True)
yTrain = yTrain % 2

xr = []
xb = []
for (l, k) in zip(yTrain[:], xTrain[:]):
    if l == 0.0:
        xr.append([k[0], k[1]])
    else:
        xb.append([k[0], k[1]])
xr = np.array(xr)
xb = np.array(xb)
plt.scatter(xr[:, 0], xr[:, 1], c='r', marker='+')
plt.scatter(xb[:, 0], xb[:, 1], c='b', marker='o')

# plt.show()
yTrain = np.reshape(yTrain, [-1, 1])
print("loss:\n", sess.run(loss, feed_dict={x: xTrain, y: yTrain}))

nb_of_xs = 200
xs1 = np.linspace(-1, 8, num=nb_of_xs)
xs2 = np.linspace(-1, 8, num=nb_of_xs)
xx, yy = np.meshgrid(xs1, xs2)  # create the grid
# Initialize and fill the classification plane
classification_plane = np.zeros((nb_of_xs, nb_of_xs))
for i in range(nb_of_xs):
    for j in range(nb_of_xs):
        # classification_plane[i,j] = nn_predict(xx[i,j], yy[i,j])
        classification_plane[i, j] = sess.run(y_pred, feed_dict={x: [[xx[i, j], yy[i, j]]]})
        classification_plane[i, j] = int(classification_plane[i, j])

# Create a color map to show the classification colors of each grid point
cmap = ListedColormap([
    colorConverter.to_rgba('r', alpha=0.30),
    colorConverter.to_rgba('b', alpha=0.30)])
# Plot the classification plane with decision boundary and input samples
plt.contourf(xx, yy, classification_plane, cmap=cmap)
plt.show()










