# coding = utf-8
'''
time: 2015.06.03
author: yujianmin
objection: BGD / SGD / mini-batch GD / QNGD / DFP / BFGS 
'''
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
data = pd.read_csv("arraydataR.csv")
print(data.ix[1:5, :])
dataArray = np.array(data)
'''
x = dataArray[:, 0]
y = dataArray[:, 1]
plt.plot(x, y, 'o')
plt.title('data is like this')
plt.xlabel('x feature')
plt.ylabel('y label')
plt.show()
'''
def Myfunction_BGD(data, alpha, numIter, eplise):
    ''' Batch Gradient Descent
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    #eplise = 0.4
    while i < numIter:
        H = np.dot(x,theta)
        J = (np.sum((y-H)**2))/(2*nRow)
        print('Itering %d ;cost is:%f' %(i+1,J))
        costJ.append(J)
        Gradient = (np.dot(np.transpose(y-H),x))/nRow
        Gradient = Gradient.reshape(nCol+1, 1)
        if np.sum(np.fabs(Gradient))<= eplise:
            return theta, costJ
        else:
            ## update
            theta = theta + alpha * Gradient
        i = i + 1
    return theta, costJ

def Myfunction_SGD(data, alpha, numIter, eplise):
    ''' Stochastic Gradient Descent
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    Loop = 0
    costJ = []
    while Loop <numIter:
        H = np.dot(x,theta)
        J = np.sum((y-H)**2)/(2*nRow)
        print('Itering %d ;cost is:%f' %(Loop+1,J))
        costJ.append(J)
        i = 0
        while i <nRow:
            Gradient = (y[i] - np.dot(x[i], theta)) * x[i]
            Gradient = Gradient.reshape(nCol+1, 1)
            theta = theta + alpha * Gradient
            i = i + 1
        #eplise = 0.4
        Gradient = (np.dot(np.transpose(y-H),x))/nRow
        if np.sum(np.fabs(Gradient))<= eplise:
            return theta, costJ
        Loop = Loop + 1
    return theta, costJ


def Myfunction_NGD1(data, alpha, numIter, eplise):
    ''' Newton Gradient Descent -- theta := theta - alpha*[f'']^(-1)*f'
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://www.doc88.com/p-145660070193.html
    :hessian = transpos(x) * x 
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    while i < numIter:
        H = np.dot(x,theta)
        J = (np.sum((y-H)**2))/(2*nRow)
        ## update
        print('Itering %d ;cost is:%f' %(i+1,J))
        costJ.append(J)
        Gradient = (np.dot(np.transpose(y-H),x))/nRow
        Gradient = Gradient.reshape(nCol+1, 1)
        #eplise = 0.4
        if np.sum(np.fabs(Gradient))<=eplise:
            return theta, costJ
        Hessian = np.dot(np.transpose(x), x)/nRow
        theta = theta + alpha * np.dot(np.linalg.inv(Hessian), Gradient)
        #theta = theta + np.dot(np.linalg.inv(Hessian), Gradient)
        i = i + 1
    return theta, costJ


def Myfunction_NGD2(data, alpha, numIter, eplise):
    ''' Newton Gradient Descent -- theta := theta - [f'']^(-1)*f'
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://www.doc88.com/p-145660070193.html
    :hessian = transpos(x) * x 
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    while i < numIter:
        H = np.dot(x,theta)
        J = (np.sum((y-H)**2))/(2*nRow)
        ## update
        print('Itering %d ;cost is:%f' %(i+1,J))
        costJ.append(J)
        Gradient = (np.dot(np.transpose(y-H),x))/nRow
        Gradient = Gradient.reshape(nCol+1, 1)
        #eplise = 0.4
        if np.sum(np.fabs(Gradient)) <= eplise:
            return theta, costJ
        Hessian = np.dot(np.transpose(x), x)/nRow
        theta = theta + np.dot(np.linalg.inv(Hessian), Gradient)
        i = i + 1
    return theta, costJ

def Myfunction_QNGD(data, alpha, numIter, eplise):
    ''' Newton Gradient Descent -- theta := theta - alpha* [f'']^(-1)*f'--
            alpha is search by ForwardAndBack method and huang jin fen ge 
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://www.doc88.com/p-145660070193.html
    :hessian = transpos(x) * x 
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    #eplise = 0.4
    while i < numIter:
        H = np.dot(x,theta)
        J = (np.sum((y-H)**2))/(2*nRow)
        ## update
        print('Itering %d ;cost is:%f' %(i+1,J))
        costJ.append(J)
        Gradient = (np.dot(np.transpose(y-H),x))/nRow
        Gradient = Gradient.reshape(nCol+1, 1)
        if np.sum(np.fabs(Gradient))<= eplise:
            return theta, costJ
        else:
            Hessian = np.dot(np.transpose(x), x)/nRow
            Dk = - np.dot(np.linalg.inv(Hessian), Gradient)
            ## find optimal [a,b] which contain optimal alpha
            ## optimal alpha lead to min{f(theta + alpha*DK)}
            alpha0 = 0
            h = np.random.random(1)
            alpha1 = alpha0
            alpha2 = alpha0 + h
            theta1 = theta + alpha1 * Dk
            theta2 = theta + alpha2 * Dk
            f1 = (np.sum((y-np.dot(x, theta1))**2))/(2*nRow)
            f2 = (np.sum((y-np.dot(x, theta2))**2))/(2*nRow)
            Loop = 1
            a = 0
            b = 0
            while Loop >0:
                print(' find [a,b] loop is %d' %Loop)
                Loop = Loop + 1
                if f1 > f2:
                    h = 2*h
                else:
                    h = -h
                    (alpha1, alpha2) = (alpha2, alpha1)
                    (f1, f2) = (f2, f1)
                alpha3 = alpha2 + h
                theta3 = theta + alpha3 * Dk
                f3 = (np.sum((y-np.dot(x, theta3))**2))/(2*nRow)
                print('f3 - f2 is %f' %(f3-f2))
                if f3 > f2:
                    a = min(alpha1, alpha3)
                    b = max(alpha1, alpha3)
                    break
                if f3 <= f2:
                    alpha1 = alpha2
                    alpha2 = alpha3
                    f1 = f2 
                    f2 = f3
            ## find optiaml alpha in [a,b] using huang jin fen ge fa 
            e = 0.01
            while Loop >0:
                alpha1 = a + 0.382 * (b - a)
                alpha2 = a + 0.618 * (b - a)
                theta1 = theta + alpha1* Dk
                theta2 = theta + alpha2* Dk
                f1 = (np.sum((y-np.dot(x, theta1))**2))/(2*nRow)
                f2 = (np.sum((y-np.dot(x, theta2))**2))/(2*nRow)
                if f1 > f2:
                    a = alpha1
                if f1< f2:
                    b = alpha2
                if np.fabs(a-b) <= e:
                    alpha = (a+b)/2
                    break
            print('optimal alpha is %f' % alpha)
            theta = theta + alpha * Dk
        i = i + 1
    return theta, costJ


def Myfunction_DFP2(data, alpha, numIter, eplise):
    ''' DFP -- theta := theta + alpha * Dk 
              --alpha is searched by huangjin method 
              --satisfied argmin{f(theta+alpha*Dk)}##
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://blog.pfan.cn/miaowei/52925.html
    :reference:http://max.book118.com/html/2012/1025/3119007.shtm ## important ##
    :hessian is estimated by DFP method.
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    Hessian = np.eye(nCol+1)
    H = np.dot(x,theta)
    J = (np.sum((y-H)**2))/(2*nRow)
    #costJ.append(J)
    Gradient = (np.dot(np.transpose(y-H),x))/nRow
    Gradient = Gradient.reshape(nCol+1, 1)
    Dk = - Gradient
    #eplise = 0.4
    while i < numIter:
        if(np.sum(np.fabs(Dk)) <= eplise ): ## stop condition ##
            return theta, costJ
        else:
            ## find alpha that min f(thetaK + alpha * Dk)
            ## find optimal [a,b] which contain optimal alpha
            ## optimal alpha lead to min{f(theta + alpha*DK)}
            alpha0 = 0
            h = np.random.random(1)
            alpha1 = alpha0
            alpha2 = alpha0 + h
            theta1 = theta + alpha1 * Dk
            theta2 = theta + alpha2 * Dk
            f1 = (np.sum((y-np.dot(x, theta1))**2))/(2*nRow)
            f2 = (np.sum((y-np.dot(x, theta2))**2))/(2*nRow)
            Loop = 1
            a = 0
            b = 0
            while Loop >0:
                print(' find [a,b] loop is %d' %Loop)
                Loop = Loop + 1
                if f1 > f2:
                    h = 2*h
                else:
                    h = -h
                    (alpha1, alpha2) = (alpha2, alpha1)
                    (f1, f2) = (f2, f1)
                alpha3 = alpha2 + h
                theta3 = theta + alpha3 * Dk
                f3 = (np.sum((y-np.dot(x, theta3))**2))/(2*nRow)
                print('f3 - f2 is %f' %(f3-f2))
                if f3 > f2:
                    a = min(alpha1, alpha3)
                    b = max(alpha1, alpha3)
                    break
                if f3 <= f2:
                    alpha1 = alpha2
                    alpha2 = alpha3
                    f1 = f2 
                    f2 = f3
            ## find optiaml alpha in [a,b] using huang jin fen ge fa 
            e = 0.01
            while Loop >0:
                alpha1 = a + 0.382 * (b - a)
                alpha2 = a + 0.618 * (b - a)
                theta1 = theta + alpha1* Dk
                theta2 = theta + alpha2* Dk
                f1 = (np.sum((y-np.dot(x, theta1))**2))/(2*nRow)
                f2 = (np.sum((y-np.dot(x, theta2))**2))/(2*nRow)
                if f1 > f2:
                    a = alpha1
                if f1< f2:
                    b = alpha2
                if np.fabs(a-b) <= e:
                    alpha = (a+b)/2
                    break
            print('optimal alpha is %f' % alpha)

            theta_old = theta
            theta = theta + alpha * Dk
            ## update the Hessian matrix ##
            H = np.dot(x,theta)
            J = (np.sum((y-H)**2))/(2*nRow)
            ## update 
            print('Itering %d ;cost is:%f' %(i+1,J))
            costJ.append(J)
            # here to estimate Hessian'inv #
            # sk = ThetaNew - ThetaOld = alpha * inv(H) * Gradient
            sk = theta - theta_old
            #yk = DelX(k+1) - DelX(k)
            DelXK = - (np.dot(np.transpose(y-np.dot(x, theta)),x))/nRow
            DelXk = - (np.dot(np.transpose(y-np.dot(x, theta_old)),x))/nRow
            yk = (DelXK - DelXk).reshape(nCol+1, 1)
            #z1 = (sk * sk') # a matrix
            #z2 = (sk' * yk) # a value
            z1 = sk * np.transpose(sk)
            z2 = np.dot(np.transpose(sk),yk)
            #z3 = (H * yk * yk' * H) # a matrix
            #z4 = (yk' * H * yk) # a value
            z3 = np.dot(np.dot(np.dot(Hessian, yk), np.transpose(yk)), Hessian)
            z4 = np.dot(np.dot(np.transpose(yk), Hessian),yk)
            DHessian = z1/z2 - z3/z4
            Hessian = Hessian + DHessian
            Dk = - np.dot(Hessian, DelXK.reshape(nCol+1,1))


        i = i + 1
    return theta, costJ

def Myfunction_DFP1(data, alpha, numIter, eplise):
    ''' DFP -- theta := theta + alpha * Dk
               alpha is fixed ##
    :type data: array 
    :param data: contain x and y(label) 
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://blog.pfan.cn/miaowei/52925.html
    :reference:http://max.book118.com/html/2012/1025/3119007.shtm ## important ##
    :hessian is estimated by DFP method.
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    Hessian = np.eye(nCol+1)
    H = np.dot(x,theta)
    J = (np.sum((y-H)**2))/(2*nRow)
    #costJ.append(J)
    Gradient = (np.dot(np.transpose(y-H),x))/nRow
    Gradient = Gradient.reshape(nCol+1, 1)
    Dk = - Gradient
    #eplise = 0.4
    while i < numIter:
        if(np.sum(np.fabs(Dk)) <= eplise ): ## stop condition ##
            return theta, costJ
        else:
            ## find alpha that min f(thetaK + alpha * Dk)
            ## here for simple alpha is parameter 'alpha'
            alpha = alpha
            theta_old = theta
            theta = theta + alpha * Dk
            ## update the Hessian matrix ##
            H = np.dot(x,theta)
            J = (np.sum((y-H)**2))/(2*nRow)
            ## update 
            print('Itering %d ;cost is:%f' %(i+1,J))
            costJ.append(J)
            # here to estimate Hessian'inv #
            # sk = ThetaNew - ThetaOld = alpha * inv(H) * Gradient
            sk = theta - theta_old
            #yk = DelX(k+1) - DelX(k)
            DelXK = - (np.dot(np.transpose(y-np.dot(x, theta)),x))/nRow
            DelXk = - (np.dot(np.transpose(y-np.dot(x, theta_old)),x))/nRow
            yk = (DelXK - DelXk).reshape(nCol+1, 1)
            #z1 = (sk * sk') # a matrix
            #z2 = (sk' * yk) # a value
            z1 = sk * np.transpose(sk)
            z2 = np.dot(np.transpose(sk),yk)
            #z3 = (H * yk * yk' * H) # a matrix
            #z4 = (yk' * H * yk) # a value
            z3 = np.dot(np.dot(np.dot(Hessian, yk), np.transpose(yk)), Hessian)
            z4 = np.dot(np.dot(np.transpose(yk), Hessian),yk)
            DHessian = z1/z2 - z3/z4
            Hessian = Hessian + DHessian
            Dk = - np.dot(Hessian, DelXK.reshape(nCol+1,1))
            i = i + 1
    return theta, costJ

def Myfunction_BFGS1(data, alpha, numIter, eplise):
    ''' BFGS 
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://blog.pfan.cn/miaowei/52925.html
    :reference:http://max.book118.com/html/2012/1025/3119007.shtm ## important ##
    :hessian is estimated by BFGS method.
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    Hessian = np.eye(nCol+1)
    H = np.dot(x,theta)
    J = (np.sum((y-H)**2))/(2*nRow)
    #costJ.append(J)
    Gradient = (np.dot(np.transpose(y-H),x))/nRow
    Gradient = Gradient.reshape(nCol+1, 1)
    Dk = - Gradient
    #eplise = 0.4
    while i < numIter:
        if(np.sum(np.fabs(Dk)) <= eplise ): ## stop condition ##
            return theta, costJ
        else:
            ## find alpha that min J(thetaK + alpha * Dk)
            ## here for simple alpha is parameter 'alpha'
            alpha = alpha
            theta_old = theta
            theta = theta + alpha * Dk
            ## update the Hessian matrix ##
            H = np.dot(x,theta)
            J = (np.sum((y-H)**2))/(2*nRow)
            ## update 
            print('Itering %d ;cost is:%f' %(i+1,J))
            costJ.append(J)
            # here to estimate Hessian #
            # sk = ThetaNew - ThetaOld = alpha * inv(H) * Gradient
            sk = theta - theta_old
            #yk = DelX(k+1) - DelX(k)
            DelXK = - (np.dot(np.transpose(y-np.dot(x, theta)),x))/nRow
            DelXk = - (np.dot(np.transpose(y-np.dot(x, theta_old)),x))/nRow
            yk = (DelXK - DelXk).reshape(nCol+1, 1)
            #z1 = yk' * H * yk # a value
            #z2 = (sk' * yk) # a value
            z1 = np.dot(np.dot(np.transpose(yk), Hessian), yk)
            z2 = np.dot(np.transpose(sk),yk)
            #z3 = sk * sk' # a matrix
            #z4 = sk * yk' * H # a matrix
            z3 = np.dot(sk, np.transpose(sk))
            z4 = np.dot(np.dot(sk, np.transpose(yk)), Hessian)
            DHessian = (1+z1/z2) * (z3/z2) - z4/z2
            Hessian = Hessian + DHessian
            Dk = - np.dot(Hessian, DelXK.reshape(nCol+1,1))
            i = i + 1
    return theta, costJ


def Myfunction_BFGS2(data, alpha, numIter, eplise):
    ''' BFGS 
    :type data: array  
    :param data: contain x and y(label)
    :type step: int/float numeric
    :param step: length of step when update the theta
    :reference:http://blog.pfan.cn/miaowei/52925.html
    :reference:http://max.book118.com/html/2012/1025/3119007.shtm ## important ##
    :hessian is estimated by BFGS method.
    '''
    nCol = data.shape[1]-1
    nRow = data.shape[0]
    print nCol
    print nRow
    x = data[:, :nCol]
    print x[1:5, :]
    z = np.ones(nRow).reshape(nRow, 1)
    x = np.hstack((z, x))  ## vstack merge like rbind in R; hstack like cbind in R;
    y = data[:, (nCol)].reshape(nRow, 1)
    #theta = np.random.random(nCol+1).reshape(nCol+1, 1)
    theta = np.ones(nCol+1).reshape(nCol+1, 1)
    i = 0
    costJ = []
    Hessian = np.eye(nCol+1)
    H = np.dot(x,theta)
    J = (np.sum((y-H)**2))/(2*nRow)
    #costJ.append(J)
    Gradient = (np.dot(np.transpose(y-H),x))/nRow
    Gradient = Gradient.reshape(nCol+1, 1)
    Dk = - Gradient
    #eplise = 0.4
    while i < numIter:
        if(np.sum(np.fabs(Dk)) <= eplise ): ## stop condition ##
            return theta, costJ
        else:
            ## find alpha that min J(thetaK + alpha * Dk)
            alpha = alpha
            ## find optimal [a,b] which contain optimal alpha
            ## optimal alpha lead to min{f(theta + alpha*DK)}
            '''
            alpha0 = 0
            h = np.random.random(1)
            alpha1 = alpha0
            alpha2 = alpha0 + h
            theta1 = theta + alpha1 * Dk
            theta2 = theta + alpha2 * Dk
            f1 = (np.sum((y-np.dot(x, theta1))**2))/(2*nRow)
            f2 = (np.sum((y-np.dot(x, theta2))**2))/(2*nRow)
            Loop = 1
            a = 0
            b = 0
            while Loop >0:
                print(' find [a,b] loop is %d' %Loop)
                Loop = Loop + 1
                if f1 > f2:
                    h = 2*h
                else:
                    h = -h
                    (alpha1, alpha2) = (alpha2, alpha1)
                    (f1, f2) = (f2, f1)
                alpha3 = alpha2 + h
                theta3 = theta + alpha3 * Dk
                f3 = (np.sum((y-np.dot(x, theta3))**2))/(2*nRow)
                print('f3 - f2 is %f' %(f3-f2))
                if f3 > f2:
                    a = min(alpha1, alpha3)
                    b = max(alpha1, alpha3)
                    break
                if f3 <= f2:
                    alpha1 = alpha2
                    alpha2 = alpha3
                    f1 = f2 
                    f2 = f3
            ## find optiaml alpha in [a,b] using huang jin fen ge fa 
            e = 0.01
            while Loop >0:
                alpha1 = a + 0.382 * (b - a)
                alpha2 = a + 0.618 * (b - a)
                theta1 = theta + alpha1* Dk
                theta2 = theta + alpha2* Dk
                f1 = (np.sum((y-np.dot(x, theta1))**2))/(2*nRow)
                f2 = (np.sum((y-np.dot(x, theta2))**2))/(2*nRow)
                if f1 > f2:
                    a = alpha1
                if f1< f2:
                    b = alpha2
                if np.fabs(a-b) <= e:
                    alpha = (a+b)/2
                    break
            print('optimal alpha is %f' % alpha)
            '''
            ## Get Dk and update Hessian
            theta_old = theta
            theta = theta + alpha * Dk
            ## update the Hessian matrix ##
            H = np.dot(x,theta)
            J = (np.sum((y-H)**2))/(2*nRow)
            ## update 
            print('Itering %d ;cost is:%f' %(i+1,J))
            costJ.append(J)
            # here to estimate Hessian #
            # sk = ThetaNew - ThetaOld = alpha * inv(H) * Gradient
            sk = theta - theta_old
            #yk = DelX(k+1) - DelX(k)
            DelXK = - (np.dot(np.transpose(y-np.dot(x, theta)),x))/nRow
            DelXk = - (np.dot(np.transpose(y-np.dot(x, theta_old)),x))/nRow
            yk = (DelXK - DelXk).reshape(nCol+1, 1)
            #z1 = yk' * H * yk # a value
            #z2 = (sk' * yk) # a value
            z1 = np.dot(np.dot(np.transpose(yk), Hessian), yk)
            z2 = np.dot(np.transpose(sk),yk)
            #z3 = sk * sk' # a matrix
            #z4 = sk * yk' * H # a matrix
            z3 = np.dot(sk, np.transpose(sk))
            z4 = np.dot(np.dot(sk, np.transpose(yk)), Hessian)
            DHessian = (1+z1/z2) * (z3/z2) - z4/z2
            Hessian = Hessian + DHessian
            Dk = - np.dot(Hessian, DelXK.reshape(nCol+1,1))
            i = i + 1
    return theta, costJ



## test ##

num = 10000
#theta, costJ = Myfunction_BGD(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ##
theta, costJ = Myfunction_SGD(dataArray, alpha=0.00005, numIter=num, eplise=0.4)
#theta, costJ = Myfunction_NGD1(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ## alpha is fixed ##
#theta, costJ = Myfunction_NGD2(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ## alpha is 1 ##
#theta, costJ = Myfunction_QNGD(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ## alpha is searched ##
#theta, costJ = Myfunction_DFP1(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ## alpha is fixed ##
#theta, costJ = Myfunction_DFP2(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ## alpha is searched ##
#theta, costJ = Myfunction_BFGS1(dataArray, alpha=0.0005, numIter=num, eplise=0.4) ## alpha is fxied ##
print theta
klen = len(costJ)
leng = np.linspace(1, klen, klen)
plt.plot(leng, costJ)
plt.show()