#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import average_precision_score

def sigmoid(z):
    a = 1.0 / (1.0 + np.exp(-z))
    return a

def sigmoid_derivate(z):
    a = sigmoid(z) * (1-sigmoid(z))
    return a

def tanh(z):
    a = np.tanh(z)
    return a

def tanh_derivate(z):
    a = 1 - (tanh(z))**2
    return a


class BPNeuralNetwork:
    
    def __init__(self,n0,n1,n2=1):
        """
        n0: numbers of input cells
        n1: numbers of hidden cells
        n2: numbers of putput ceells
        """
        self.input_n = n0
        self.hidden_n = n1
        self.output_n = n2
        #init weights
        self.input_weights = np.random.random((self.hidden_n,self.input_n)) * 0.01
        self.input_b = np.zeros((self.hidden_n,1))
        self.output_weights = np.random.random((self.output_n,self.hidden_n)) * 0.01
        self.output_b = np.zeros((self.output_n,1))
        
    def feedforward(self,X):
        nx, self.m = X.shape 
        self.Z1 = np.dot(self.input_weights,X) + self.input_b
        self.A1 = tanh(self.Z1)
        self.Z2 = np.dot(self.output_weights,self.A1) + self.output_b
        self.A2 = sigmoid(self.Z2)
        return self.A2
    
    def feedbackward(self,X,y):
        self.dZ2 = self.feedforward(X) - y
        self.dW2 = np.dot(self.dZ2,self.A1.T) / self.m
        self.db2 = np.sum(self.dZ2,axis=1,keepdims=True) / self.m
        self.dZ1 = np.dot(self.output_weights.T,self.dZ2) * tanh_derivate(self.Z1)
        self.dW1 = np.dot(self.dZ1,X.T) / self.m
        self.db1 = np.sum(self.dZ1,axis=1,keepdims=True) / self.m
             
    def train(self,train_data,label,alpha,limit):
        self.alpha = alpha
        self.limit = limit
        for i in np.arange(self.limit):
            self.feedforward(train_data)
            self.feedbackward(train_data,label)
            self.input_weights -= self.alpha * self.dW1
            self.output_weights -= self.alpha * self.dW2
            self.input_b -= self.alpha * self.db1
            self.output_b -= self.alpha * self.db2
    
    def test(self,test_data):
        pre = self.feedforward(test_data)
        return pre.T
    

def error(pre,test):
    errornum = []
    for i in np.arange(len(test)):
        if float(test[i]) != pre[i]:
            errornum.append(i)
    return errornum


def plo(XX):
    XX_plo = np.copy(XX)
    XX_plo[:,0] = np.sqrt(XX[:,0]**2 + XX[:,1]**2)
    XX_plo[:,1] = np.arctan2(XX[:,1], XX[:,0])
    return XX_plo


#moon datasets
X, y = datasets.make_moons(3000,noise=0.05)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3)
X_test_plt = np.copy(X_test)
X_train = plo(X_train)
X_test = plo(X_test)


BP = BPNeuralNetwork(2,2,1)
BP.train(X_train.T,y_train,0.1,7000)
y_pro = BP.test(X_test.T)
y_pre = np.copy(y_pro)
y_pre[y_pre>0.5] = 1
y_pre[y_pre<0.5] = 0
score = average_precision_score(y_test,y_pre)
error_bp = error(y_pre,y_test)
print("probability of test = {}".format(y_pro))
print("error rate ={}% ".format( len(error_bp)/len(y_test)*100))
print("score = {}".format(score))

y_pre[error_bp,0] = 2

# plot data
plt.figure(num=1)
plt.scatter(X_test_plt[:, 0], X_test_plt[:, 1], c=y_pre)
plt.title('my predict')




    
           
        
        
        











        
    
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        