# coding: utf-8

from __future__ import print_function
import numpy as np

def sigmoid(x):
    return 1 / (1 + np.exp(-x))
    
logic_or = np.vectorize(lambda x, y: x or y)

class MultiLayerLogistic(object):
    
    def __init__(self, nnodes=4, max_iter=1000000, alpha=0.001, threshold=0.5, debug=False):
        self.nnodes = nnodes
        self.max_iter = max_iter
        self.alpha = alpha
        self.threshold = threshold
        self.debug = debug
        
    def fit(self, X, Y):
        X = np.asarray(X).astype(float)
        Y = np.asarray(Y).astype(float)
        assert(len(X) == len(Y) and \
               X.ndim == 2 and \
               Y.ndim == 1 and \
               logic_or(Y == 0, Y == 1).all())
        Y = Y[..., np.newaxis]
        
        self.ndata, self.nfeatures = X.shape
        self.losses = []
        
        # 梯度下降
        Theta_sup1 = np.random.rand(self.nfeatures, self.nnodes) / 100
        theta_sup2 = np.random.rand(self.nnodes, 1) / 100
        
        # Theta_sup1 = np.zeros((self.nfeatures, self.nnodes))
        # theta_sup2 = np.zeros((self.nnodes, 1))
        
        for round in range(self.max_iter):
            
            # 正向传播过程
            Z_sup1 = np.dot(X, Theta_sup1)
            A_sup1 = sigmoid(Z_sup1)
            Z_sup2 = np.dot(A_sup1, theta_sup2)
            A_sup2 = sigmoid(Z_sup2)
            J = - np.sum(Y * np.log(A_sup2) + (1 - Y) * np.log(1 - A_sup2)) / self.ndata
            
            if round % 1000 == 0:
                print('round: {0}, loss: {1}'.format(round, J))
            self.losses.append(J)
            
            # 反向传播过程
            dJ_dtheta_sup2 = np.dot(A_sup1.T, A_sup2 - Y) / self.ndata
            
            dJ_dZ_sup2 = (A_sup2 - Y) / self.ndata
            dZ_sup2_dA_sup1 = theta_sup2.T
            dA_sup1_dZ_sup1 = A_sup1 * (1 - A_sup1)
            
            dJ_dZ_sup1 = dJ_dZ_sup2 * dZ_sup2_dA_sup1 * dA_sup1_dZ_sup1
            
            dJ_dTheta_sup1 = np.dot(X.T, dJ_dZ_sup1)
            
            last_Theta_sup1 = Theta_sup1.copy()
            last_theta_sup2 = theta_sup2.copy()
            Theta_sup1 -= self.alpha * dJ_dTheta_sup1
            theta_sup2 -= self.alpha * dJ_dtheta_sup2
            
            if self.debug:
                print('X:\n', X[:10])
                print('Theta_sup1:\n', Theta_sup1)
                print('theta_sup2:\n', theta_sup2)
                print('Z_sup1:\n', Z_sup1[:10])
                print('A_sup1:\n', A_sup1[:10])
                print('Z_sup2:\n', Z_sup2[:10])
                print('A_sup2:\n', A_sup2[:10])
                print('dJ_dtheta_sup2:\n', dJ_dtheta_sup2)
                print('dJ_dZ_sup2:\n', dJ_dZ_sup2[:10])
                print('dJ_dZ_sup1:\n', dJ_dZ_sup1[:10])
                print('dJ_dTheta_sup1:\n', dJ_dTheta_sup1)
                print('')
                
        # end for
        
        self.Theta_sup1 = Theta_sup1
        self.theta_sup2 = theta_sup2
            
    def proba(self, X):
        Z_sup1 = np.dot(X, self.Theta_sup1)
        A_sup1 = sigmoid(Z_sup1)
        Z_sup2 = np.dot(A_sup1, self.theta_sup2)
        A_sup2 = sigmoid(Z_sup2)
        return A_sup2.ravel()
        
    def predict(self, X):
        prob = self.proba(X)
        posi_class = prob > self.threshold
        prob[posi_class] = 1
        prob[~posi_class] = 0
        return prob
        
    
        