# -*- coding: utf-8 -*-
__author__ = 'Von zhe'
"""
logistic regression + N2 regularization 
 y*log(h(x)) - (1-y) * log(1-h(x)) + lmbda / m 
"""
import numpy as np
import random
import os
from MNIST import image_loader, CrossBarLoader
from MNIST.AbstractNetwork import AbstractNetwork
import matplotlib.pyplot as plt


def sigmoid(z):
    '''h(x) 这里记为 sigmoid(z)'''
    return 1.0 / (1.0 + np.exp(-z))


def sigmoid_prime(z):
    '''sigmoid(z) 的导数'''
    return sigmoid(z)*(1-sigmoid(z))


class Network(AbstractNetwork):

    def __init__(self, sizes, setPath, resetPath):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.setPath = setPath
        self.resetPath = resetPath
        # initialize bias and weights
        np.random.seed(342232)
        #self.weights = [np.random.normal(0.0, pow(y, -0.5), (y, x)) for x, y in zip(sizes[:-1], sizes[1:])] # [ whi, woh]  y,x hnodes,inodes/ onodes hnodes
        self.randomWeights()

        np.random.seed(3422323423)
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]  # biases: [[100*1],[10*1]]
        self.accuracy = [0.0]
        self.delta = []

    def randomWeights(self):
        crossbar = CrossBarLoader(self.setPath, self.resetPath)
        G_max, G_min, G_mean, setG_mean, resetG_mean = crossbar.getGInfo()
        G_range = G_max-G_min
        self.setG_mean = setG_mean
        self.resetG_mean = resetG_mean
        self.G_mean = G_mean
        self.scalef = [(G_max-G_mean) / 0.327, (G_max-G_mean)/1.575]
        # self.scalef = (G_max - G_mean) / 0.5
        # 初始化权重
        self.weights = [np.random.randn(y, x) / np.sqrt(x)
                        for x, y in zip(self.sizes[:-1], self.sizes[1:])]
        # self.weights = [(G_range * np.random.randn(y, x)+G_min)
        #                 for x, y in zip(self.sizes[:-1], self.sizes[1:])]  # x,y in ((784,100),(100,10))
        # for j in range(2):
        #     self.weights[j] = self.weights[j] / self.scalef[j]


    def feedforward(self, a):
        """Return the output of the network if 'a' is input"""
        for b,w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a)+b)
        return a

    def backprop(self, x, y):
        """Return a tuple ``(nabla_b, nabla_w)`` representing the
        gradient for the cost function C_x. ``nabla_b`` and
        ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
        to ``self.biases`` and ``self.weights``."""
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        activation = x   # 此时x是输入网络的图片数据
        activations = [x]  # list to store all the activations, layer by layer
        zs = []  # list to store all the z vectors, layer by layer
        for b, w in zip(self.biases, self.weights):   # FeedForward 并且记录下每一层神经元的输出，以便将来计算每一层的delta_w
            z = np.dot(w, activation) + b
            zs.append(z)                           # 神经元整合后的输入
            activation = sigmoid(z)
            activations.append(activation)         # 神经元整合后的输出

        # 对于三层BP网络，计算输出层与隐藏层之间的delta_woh = (y-sigmoid(z)) * x
        delta = self.cost_derivative(activations[-1], y)
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        # 对于三层BP网络，计算隐藏层与输入层之间的delta_whi. 这里l只能取-2，就是隐藏层
        for l in range(2, self.num_layers):
            z = zs[-l]
            # err_hidden =  w' * err_output  将输出层的误差传输到隐藏层
            delta = np.dot(self.weights[-l + 1].transpose(), delta) * sigmoid_prime(z)
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
        return nabla_b, nabla_w

    def update_mini_batch(self,mini_batch,eta, lmbda, n):
        '''Update the network's weights and biases by applying gradient descent using
        backpropagation to a single minibatch. The 'mini_bath' is a list of tuple (x,y)
        , and 'eta' is the learning rate'''
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]

        for x,y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)  # 计算delta_w, delta_b
            nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
        nabla_w = [(-1 * eta * (lmbda / n)) * w - (eta / len(mini_batch)) * nw for w, nw in
                   zip(self.weights, nabla_w)]

        self.set_weight2Matrix_update(self.weights, nabla_w)

        self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]

    def SGD(self,training_data,epochs,mini_batch_size, eta, lmbda=1.0, test_data=None):
        '''If 'test_data' is provided then the network will be evaluated
        against the test data after each epoch, and partial progress printed out.This
        is useful for tracking progress, but slows things down substantially'''
        n = len(training_data)
        for j in range(epochs):
            random.shuffle(training_data)
            mini_batches = [training_data[k:k + mini_batch_size] for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta, lmbda, len(training_data))
            if test_data:
                n_test = len(test_data)
                accuracy = self.evaluate(test_data) / n_test * 100
                self.accuracy.append(accuracy)
                print("Epoch {0}, Accuracy: {1}".format(j + 1, accuracy))
            else:
                print("Epoch {0} complete".format(j + 1))

    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)

    def cost_derivative(self, output_activations, y):
        return output_activations - y

    def plot_training(self):
        plt.semilogy(self.accuracy, '-b', linewidth=2)
        plt.xlabel('Training Epoch')
        plt.ylabel('Accuracy (%)')
        plt.ylim((1, 100))
        path = os.path.join('.', str(self.sizes[1])+"_hidden_units_result")
        plt.savefig(path, dpi=300, Transparent=True)

    def plot_weights(self):
        '''show the delta_w in 9 selected epochs to find the minist value change for updating'''
        plt.hist(self.weights, bins=100)
        path = os.path.join('.', str(self.sizes[1]) + "_hidden_units_weights")
        plt.savefig(path, dpi=300, Transparent=True)

    def save_weights(self,name):
        weights0 = np.array(self.weights[0]).flatten()
        weights1 = np.array(self.weights[1]).flatten()
        weights = np.concatenate((weights0, weights1))
        path = os.path.join('.',"Weights_"+name+".txt")
        np.savetxt(path,weights)

    def set_weight2Matrix_update(self, weights, nabla_w):
        """根据dw的正负，将权重前进或倒退一步"""
        for j in range(len(weights)):
            # weight 是其中一个权重矩阵
            weight = weights[j]
            x, y = weight.shape
            weight = weight.flatten()
            weight_G = weight*self.scalef[j]+self.G_mean

            abla_w = nabla_w[j]
            abla_w = abla_w.flatten()
            abla_w[abla_w > 0] = 1
            abla_w[abla_w == 0] = 0
            abla_w[abla_w < 0] = -1

            for i in range(weight_G.size):
                # 60 = len(self.setG_mean), clip掉过大的值
                if abla_w[i] == 1:
                    index = self.findGIndex(weight_G[i], self.setG_mean)
                    if (index >= 59):  #防止下标过界
                        index = 58
                    weight_G[i] = self.setG_mean[index+1]
                elif abla_w[i] == -1:
                    index = self.findGIndex(weight_G[i], self.resetG_mean)
                    if(index>=58):
                        index = 58
                    weight_G[i] = self.resetG_mean[index+1]
                else:
                    continue
            weight = (weight_G-self.G_mean)/self.scalef[j]
            weights[j] = weight.reshape(x, y)

    def findGIndex(self, g, G_array):
        return np.argmin(np.abs(G_array - g))


if __name__ == '__main__':
    training_data, validation_data, test_data = image_loader.load_data_wrapper()
    set_path = r"E:\References\Crosssim\examples\lookup_table_example\set.csv"
    reset_path = r"E:\References\Crosssim\examples\lookup_table_example\reset.csv"

    net = Network([784, 100, 10], set_path, reset_path)
    print(net.evaluate(test_data)/len(test_data) * 100)

    net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
    path = os.path.join('.', "Accuracy_result.txt")
    np.savetxt(path, net.accuracy)

    #net.save_weights("result")

    #net.plot_training()
    #weights = np.array(net.weights)


    #net.plot_weights()
