#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer



def g(z):
    """
    activate function
    """
    a = 1.0 / (1 + np.exp(-z))
    return a


def g_1(z):
    """
    the first derivate of activate function
    """
    a_1 = g(z) * (1 - g(z))
    return a_1
    
def nn_train(X,Y,alpha):
    """
    Neural Networks Classifiction forward and reward spread
    X: input datas
    Y: input datas` labels
    alpha: learning efficiency
    """
    nx, m = X.shape
    n0 = nx
    n1 = n0 
    n2 = 1
    #parameters random initialization
    W1 = np.random.random((n1,n0)) * 0.01
    b1 = np.zeros((n1,1))
    W2 = np.random.random((n2,n1))
    b2 = np.zeros((n2,1))
    loopnum = int(120.0 / alpha)
    for i in np.arange(loopnum):
        #forward spread
        Z1 = np.dot(W1,X) + b1
        A1 = np.tanh(Z1)
        Z2 = np.dot(W2,A1) + b2
        A2 = g(Z2)
        #reward spread
        dZ2 = A2 - Y
        dW2 = (1.0/m) * np.dot(dZ2,A1.T)
        db2 = (1.0/m) * np.sum(dZ2,axis=1,keepdims=True)
        dZ1 = np.dot(W2.T,dZ2) * (1-np.tanh(Z1)**2)
        dW1 = (1.0/m) * np.dot(dZ1,X.T)
        db1 = (1.0/m) * np.sum(dZ1,axis=1,keepdims=True)
        #parameters update
        W1 = W1 - alpha * dW1
        b1 = b1 - alpha * db1
        W2 = W2 - alpha * dW2
        b2 = b2 - alpha * db2
        
    return (W1,b1,W2,b2)

def nn_test(X_test,W1,b1,W2,b2):
    Z1 = np.dot(W1,X_test) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2,A1) + b2
    A2 = g(Z2)
    return A2.T

def error(pre,test):
    errornum = []
    for i in np.arange(len(test)):
        if float(test[i]) != pre[i]:
            errornum.append(i)
    return errornum

def plo(XX):
    XX_plo = np.copy(XX)
    XX_plo[:,0] = np.sqrt(XX[:,0]**2 + XX[:,1]**2)
    XX_plo[:,1] = np.arctan2(XX[:,1], XX[:,0])
    return XX_plo


#moon datasets
X, y = datasets.make_moons(2000,noise=0.02)
#X, y = datasets.make_circles(2000,noise=0.02)
#scaler = StandardScaler()
#X = scaler.fit_transform(X)
#normalize = Normalizer().fit(X)
#X = normalize.transform(X)

X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3)
X_train = plo(X_train)
X_test_plo = plo(X_test)

#nn
W1,b1,W2,b2 = nn_train(X=X_train.T,Y=y_train,alpha=1e-2)
y_pro = nn_test(X_test_plo.T,W1,b1,W2,b2)

y_pre = np.copy(y_pro)

y_pre[y_pre>0.4] = 1
y_pre[y_pre<0.4] = 0

#sklearn mlp
clf = MLPClassifier(solver='lbfgs', alpha=1e-4,hidden_layer_sizes=(4), random_state=1)  
clf.fit(X_train, y_train)
y_precdit = clf.predict(X_test_plo)



#error
error_nn = error(y_pre,y_test)
error_mlp = error(y_precdit,y_test)
error_rate1 = len(error_nn) / len(y_test)
error_rate2 = len(error_mlp) / len(y_test)
print("error rate = {}%".format(error_rate1*100))
print("error rate = {}%".format(error_rate2*100))




# plot data
plt.figure(num=1,figsize=(12,6))

plt.subplot(121)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_pre)
plt.title('my predict')
plt.subplot(122)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_precdit)
plt.title('mlp')
plt.show()


    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    