from neural_networks import *

def save_datasets(datasets, path):
        import pickle
        pickle.dump(datasets, open(path, 'wb'))
def load_datasets(path):
        import pickle
        return pickle.load(open(path, 'rb'))

class LeNet5_1():
    def __init__(self):
        self.layers = [
            ConvolutionalLayer((32,32), (5,5), np.ones((1,6))), 
            FullConnectedLayer((28*28*6,32*32)),

        ]

        self.cost = LSMCost()

    def forward(self, x):
        lsz = len(self.layers)
        (a, frs) = (x, [None]*lsz)

        for i in range(lsz):
            (a, frs[i]) = self.layers[i].forward(a)
            #print('--', i, a)
        return (a, frs)
        
    def backward(self, delta, frs):
        lsz = len(self.layers)
        (d, brs) = (delta, [None]*lsz)
        
        for i in range(lsz):
            (d, brs[lsz-1-i]) = self.layers[lsz-1-i].backward(d, frs[lsz-1-i])
        return (d, brs)
        
    def update(self, brs, theta):
        lsz = len(self.layers)
        for i in range(lsz):
            self.layers[i].update(brs[i], theta)
        

    def train(self, xs, ys, steps=500):
        c = np.ones(len(xs))*np.inf

        for l in self.layers:
            print(l.ws.reshape(l.ws.size)[0])
        
        n = len(xs)
        for s in range(steps):    

            idxs = np.random.randint(0, n, n/100)
            #idxs = c.argsort()[-1:]
            
            print(idxs, c[idxs[0]], s/steps)

            all_brs = [[0,0],[0,0]]
            for i in idxs:

                (x,y) = xs[i],ys[i]

                (a, frs) = self.forward(x)
                c[i] = self.cost.forward(a, y)

                d = self.cost.backward(a, y)
                (d, brs) = self.backward(d, frs)


                for bi in range(len(brs)):
                    for bii in range(len(brs[bi])):
                        all_brs[bi][bii] += brs[bi][bii]

                
            self.update(all_brs, 0.5)

       
            for l in self.layers:
                print(l.ws.reshape(l.ws.size)[0])
        
        

    def classify(self, x):
        (y,_) = self.forward(x)
        y = y / np.sum(y)
        return y.argsort()[-1]

class LeNet5():
    def __init__(self):
        relu = lambda x: (x>0) * x
        drelu = lambda x: (x>0) * np.ones(x.shape)
        table = np.array([
            [1,0,0,0,1,1,1,0,0,1,1,1,1,0,1,1],
            [1,1,0,0,0,1,1,1,0,0,1,1,1,1,0,1],
            [1,1,1,0,0,0,1,1,1,0,0,1,0,1,1,1],
            [0,1,1,1,0,0,1,1,1,1,0,0,1,0,1,1],
            [0,0,1,1,1,0,0,1,1,1,1,0,1,1,0,1],
            [0,0,0,1,1,1,0,0,1,1,1,1,0,1,1,1]
        ])
        self.layers = [
            ConvolutionalLayer((32,32), (5,5), np.ones((1,6))), 
            PoolingLayer(6, (28, 28), (2,2), (relu,drelu)), 
            ConvolutionalLayer((14,14),(5,5), table),
            PoolingLayer(16, (10,10), (2,2), (relu,drelu)), 
            ConvolutionalLayer((5,5), (5,5), np.ones((16,120))), 
            FullConnectedLayer((120,84),(relu,drelu)),
            EuclideanLayer(84,10)
        ]

        self.cost = WeightCost()

    def forward(self, x):
        lsz = len(self.layers)
        (a, frs) = (x, [None]*lsz)

        for i in range(lsz):
            (a, frs[i]) = self.layers[i].forward(a)
            #print('--', i, a)
        return (a, frs)
        
    def backward(self, delta, frs):
        lsz = len(self.layers)
        (d, brs) = (delta, [None]*lsz)
        
        for i in range(lsz):
            (d, brs[lsz-1-i]) = self.layers[lsz-1-i].backward(d, frs[lsz-1-i])
        return (d, brs)
        
    def update(self, brs, theta):
        lsz = len(self.layers)
        for i in range(lsz):
            self.layers[i].update(brs[i], theta)
        

    def train(self, xs, ys, steps=500):
        c = np.ones(len(xs))*np.inf

        for l in self.layers:
            print(l.ws.reshape(l.ws.size)[0])
        
        n = len(xs)
        for s in range(steps):            
            idxs = c.argsort()[-1:]
            
            print(c[idxs[-1]], s/steps)

            for i in idxs:

                (x,y) = xs[i],ys[i]

                (a, frs) = self.forward(x)
                c[i] = self.cost.forward(a, y)

                d = self.cost.backward(a, y)
                (d, brs) = self.backward(d, frs)
                self.update(brs, 0.5)

       
            for l in self.layers:
                print(l.ws.reshape(l.ws.size)[0])
        
        

    def classify(self, x):
        (y,_) = self.forward(x)
        y = y / np.sum(y)
        return y.argsort()[-1]

    
def load_mnist_datasets(path='mnist_py3k.pkl.gz'):
    '''
    数据集位于http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist_py3k.pkl.gz'
    它是python数据经过packle后的形式。数据形式：(train, valid, test)
    它们的形式相同，是两个元素tuple：(imges, digits)
    images: n*784的numpy.array，每一行是28*28大小图片展成一行，数据范围[0,1]
    digits：n的numpy.array，每个元素就是图片对应数字
    '''
    import gzip, pickle
    with gzip.open(path, 'rb') as f:
        return pickle.load(f)


def show_digit_image(image):
    if len(image.shape) == 1: image = image.reshape(np.sqrt(len(image)), np.sqrt(len(image)))
    plt.imshow(image, cmap='gray')
    plt.show()


def test_pooling_layer():
    '''测试PoolingLayer'''
    layer1 = PoolingLayer(3, (8,8), (2,2), [np.sin, np.cos])
    layer2 = PoolingLayer(3, (4,4), (2,2), [lambda x:x**2, lambda x:2*x])

    x = np.array(range(3*8*8))/100
    print(x.reshape(3,8,8))

    (y1, fr1) = layer1.forward(x)
    (y2, fr2) = layer2.forward(y1)

    # Cost取和，则导数为1
    delta = np.ones(3*2*2)

    (delta2, br2) = layer2.backward(delta, fr2)
    (delta1, br1) = layer1.backward(delta2, fr1)

    c = np.sum(y2)

    # c
    step = 0.00001
    layer1.ws[1] += step
    (y_, _) = layer1.forward(x)
    (y_, _) = layer2.forward(y_)

    c_ = np.sum(y_)
    print((c_-c) /step)
    print(br1)

def test_convolutional_layer():
    np.random.seed(0)
    l1 = ConvolutionalLayer((4,4), (2,2), np.ones((1,2)))
    l2 = ConvolutionalLayer((3,3), (2,2), np.array([[1,0,0],[1,0,0]]))

    x = np.array(range(4*4))/100
    (y1, fr1) = l1.forward(x)
    
    (y2, fr2) = l2.forward(y1)

    c = np.sum(y2)
    (delta2, br2) = l2.backward(np.ones((3,2,2)), fr2)
    (delta1, br1) = l1.backward(delta2, fr1)


    step = 0.0001
    l1.ws[0,1,1,0] += step
    #l1.bs[0] += step

    (y_, _) = l1.forward(x)
    (y_, _) = l2.forward(y_)

    c_ = np.sum(y_)
    print((c_-c)/step)

    print(br1[0])

def test_full_connected_layer():

    l1 = FullConnectedLayer([2,2])
    l1.ws = np.array([[0.15,0.25],[0.20,0.30]])
    l1.bs = np.array([0.35, 0.35])

    l2 = FullConnectedLayer([2,2])
    l2.ws = np.array([[0.40,0.50],[0.45,0.55]])
    l2.bs = np.array([0.60, 0.60])

    y = np.array([0.01, 0.99])
    c = Cost()

    works = [[],[]]

    x = np.array([0.05, 0.10])

    fr1 = l1.forward(x)
    
    fr2 = l2.forward(fr1[0])

    err = c.forward(fr2[0], y)

    delta = c.backward(fr2[0], y)
    print('delta', delta)

    br2 = l2.backward(delta, fr2[1])

    br1 = l1.backward(br2[0], fr1[1])
    print(br1[1][0])


def test_euclideanlayer():
    l1 = EuclideanLayer(10,5)
    l2 = EuclideanLayer(5,3)

    x = np.array(range(10))/10

    (y1, fr1) = l1.forward(x)
    (y2, fr2) = l2.forward(y1)
    c = sum(y2)

    (delta2, br2) = l2.backward(np.ones(3), fr2)
    (delta1, br1) = l1.backward(delta2, fr1)
    
    step = 0.000001
    l1.dws[1,0] += step

    (y_, _) = l1.forward(x)
    (y_, _) = l2.forward(y_)
    c_ = sum(y_)

    print((c_-c)/step)
    print(br1[0])

def test_multiple_layer_network():
    bp = MulitpleLayerNetwork([1,20,1])
    xs = np.array([[i] for i in range(10)]) / 10
    ys = 1/(1+np.exp(-xs))
    bp.train(xs,ys, 3000)

    _ys = bp.classify(xs)
    print(ys-_ys)
    print(np.sum(np.abs(ys-_ys)))

def plot_decision_boundary(pred_func, X, y):
    # Set min and max values and give it some padding
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    h = 0.01
    # Generate a grid of points with distance h between them
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    # Predict the function value for the whole gid

    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    # Plot the contour and training examples
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)

def _test_bp_moon():
    '''http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/'''
    np.random.seed(0)
    #X, y = sklearn.datasets.make_moons(200, noise=0.20)
    X, y = sklearn.datasets.make_circles(200, noise=0.020)
    plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)

    f = lambda x: 1/(1+np.exp(-x))
    df = lambda x: f(x)*(1-f(x))
    funcs = [[np.tanh, lambda x: 1-np.tanh(x)**2],[np.tanh, lambda x: 1-np.tanh(x)**2],[f,df]]

    bp = MulitpleLayerNetwork([2,3,3,1])
    bp.train(X,y, 2000)

    #f = lambda x: np.array([int(i) for i in bp.classify(x)[0]>0.5])
    f = lambda x: bp.classify(x)>0.5


    _y = bp.classify(X)
    _y = [int(i>0.5) for i in _y]

    print(np.sum(_y==y) / len(y))

    plot_decision_boundary(f, X, y)

    plt.show()

def test_derivative():
    bp = MulitpleLayerNetwork([2,3,3,4,5,6,1])
    
    x = np.array([4,1])
    y = 3

    (_y, frs) = bp.forward(x)
    e0 = bp.loss(_y, y)

    brs = bp.backward(_y, y, frs)
    print(brs[0][0])

    s = 0.00001
    bp.layers[0].ws[1][0] += s
    (_y, frs) = bp.forward(x)
    e1 = bp.loss(_y, y)

    print((e1-e0) / s)


def center_image(image, nsz):
    (osz, nsz) = (image.shape, np.array(nsz))
    assert (nsz-osz).any()

    r = np.zeros(nsz)
    s = (nsz-osz)/2
    r[s[0]:s[0]+osz[0],s[1]:s[1]+osz[1]] = image
    return r
    
def test_lenet5_der():
    def load_datasets(path):
        import pickle
        return pickle.load(open(path, 'rb'))
    (trains, test) = load_datasets('lenet5_sets_1000')

    (x,y) = (center_image(trains[0][0].reshape(28,28),(32,32)), trains[1][0])

    lenet5 = LeNet5()

    (a, frs) = lenet5.forward(x)
    c = lenet5.cost.forward(a,y)
    delta = lenet5.cost.backward(a, y)
    (delta, brs) = lenet5.backward(delta, frs)

    print(lenet5.layers[0].ws[0,0,0,0])

    step = 0.001
    lenet5.layers[0].ws[0,0,0,0] +=  step
    
    (a, frs) = lenet5.forward(x)
    c_ = lenet5.cost.forward(a,y)

    print((c_-c)/step)

    print(brs[0][0])


def test_lenet5():


    start_time = time.time()
    #(trains, verify, test) = load_mnist_datasets()

    (trains, test) = load_datasets('lenet5_sets_1000')

    #datasets = (datasets[0][0:1], datasets[1][0:1])
    xs = np.zeros((len(trains[0][0:5]), 32*32))
    #xs = np.zeros((1, 32*32))
    ys = trains[1]

    for i in range(len(xs)):
        xs[i] = center_image(trains[0][i].reshape(28,28), (32,32)).reshape(1024)

    
    lenet5 = LeNet5()
    lenet5.train(xs,ys, 1500)


    err = 0
    for i in range(len(test[0][:100])):
        (x,y) = (test[0][i], test[1][i])
        _y = lenet5.classify(center_image(test[0][i].reshape(28,28), (32,32)))
        if y != _y: 
            print('err:', i)
            err += 1

    print(err, err/len(test[0][:100]))

    print('time: ', time.time()-start_time)

    


def test_lenet5_1():
      
    start_time = time.time()
    #(trains, verify, test) = load_mnist_datasets()

    (trains, test) = load_datasets('lenet5_sets_1000')

    #datasets = (datasets[0][0:1], datasets[1][0:1])
    xs = np.zeros((len(trains[0][0:1000]), 32*32))
    #xs = np.zeros((1, 32*32))
    ys = trains[1]

    for i in range(len(xs)):
        xs[i] = center_image(trains[0][i].reshape(28,28), (32,32)).reshape(1024)

    
    lenet5 = LeNet5_1()
    lenet5.train(xs, xs, 1500)

if __name__ == '__main__':
    #test_convolutional_layer()
    test_lenet5_1()
    #test_euclideanlayer()
