# coding:utf-8
# Author : hiicy redldw
# Date : 2019/03/04

import numpy as np


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def sigmoid_prime(z):
    """
    求 sigmoid 函数的导数
    """
    return sigmoid(z) * (1 - sigmoid(z))

# def tanh(x):
#     return (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))

def tanh(x):
    return np.tanh(x)

def tanh_derivative(x):
    return 1 - np.tanh(x) * np.tanh(x)

# sigmod函数
def logistic(x):
    return 1 / (1 + np.exp(-x))

# sigmod函数的导数
def logistic_derivative(x):
    return logistic(x) * (1 - logistic(x))

class BPNetWork(object):
    def __init__(self, units):
        self.units = units
        self.numOflayer = len(self.units)
        # 对隐藏层、输出层初始化bias，正态分布
        self.biase = [np.random.randn(i, 1) for i in self.units[1:]]
        # 随机产生每条连接线的 weight 值（0 - 1）
        self.wights = [np.random.randn(y, x) for x, y in zip(self.units[:-1], self.units[1:])]
        print(f'self.wights: {self.wights[1].shape}')

    def feedforward(self, x):  # 前向传输计算每个神经元的值
        """
        :param x:单个样本
        :return: 计算后每个神经元的值
        """
        for w, b in zip(self.wights, self.biase):
            x = sigmoid(w.dot(x) + b)
        print('x.shape', x.shape)
        return x

    def backprop(self, x, y):
        # 先初始化偏导数
        nabla_b = [np.zeros(b.shape) for b in self.biase]
        nabla_w = [np.zeros(w.shape) for w in self.wights]

        activation = x
        # REW:用列表存储每一层的状态
        activations = [x]  # 第一层
        # REW:需要个矩阵存储状态，然后每次是一批(带矩阵)更新，不能只牢记公式那个简单的 每次是一层的权重更新
        # 存储每个未经过sigmoid计算的神经元值
        zs = []
        for b, w in zip(self.biase, self.wights):
            print("activation.shape,", activation.shape)
            print('w.shape:{},b.shape{}'.format(w.shape, b.shape))
            z = np.dot(w, activation) + b  # 第二层开始
            print('\n\tz.shape', z.shape)
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        # delta~gi
        delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
        # 先求bias最后层偏导
        print('delta.shape', delta.shape)
        nabla_b[-1] = delta  # (10,)
        bh = activations[-2].transpose()  # 因为由它求过来的，转置才能相乘
        print("bh.shape", bh.shape)
        nabla_w[-1] = np.dot(delta, bh)
        for l in range(2, self.numOflayer):
            z = zs[-l]
            sp = sigmoid_prime(z)
            # 活、多用转置
            # eh = nabla_w[-l+1]*delta.transpose()
            # REW: 其实是用了归纳法的
            delta = np.dot(nabla_w[-l + 1].transpose(), delta) * sp
            nabla_w[-l] = np.dot(delta, x.transpose())
            nabla_b[-l] = delta
        return (nabla_b, nabla_w)

    def update_mini_batch(self, mini_batch, eta):
        """
        :param mini_batch:
        :param eta: 学习率
        :return:
        """
        nabla_b = [np.zeros(b.shape) for b in self.biase]
        nabla_w = [np.zeros(w.shape) for w in self.wights]
        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            # 更新每一层的权重
            nabla_b = [nb + deltab for nb, deltab in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw + deltaw for nw, deltaw in zip(nabla_w, delta_nabla_w)]
        # 更新根据累加的偏导值更新w和b，这里因为用了小样本，
        # 所以 eta 要除于小样本的长度
        self.wights = [wight - eta / len(mini_batch) * nw for wight, nw in zip(self.wights, nabla_w)]
        self.biase = [bias - eta / len(mini_batch) * nb for bias, nb in zip(self.biase, nabla_b)]

    def sgd(self, training_data, epochs, mini_batch_size, eta,
            test_data=None):
        """
               随机梯度下降
               :param training_data: 输入的训练集
               :param epochs: 迭代次数
               :param mini_batch_size: 小样本数量
               :param eta: 学习率
               :param test_data: 测试数据集
               """
        if test_data: n_test = len(test_data)
        n = len(training_data)
        start = 0
        for i in range(epochs):
            np.random.shuffle(training_data)
            # REW:切片技巧
            mini_batches = [
                training_data[k:k + mini_batch_size]
                for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                # 根据小样本来更新权重
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print("Epoch {0}: {1} / {2}".format(i, self.evaluate(test_data), n_test))
            else:
                print("Epoch {0} complete".format(i))

    # 损失函数的导数
    def cost_derivative(self, output_activations, y):
        print("output_activations.shape", output_activations.shape, y.shape)
        return (output_activations - y)

    def evaluate(self, test_data):
        # 调用feedforward方法计算训练好的神经网络的输出层神经元值（也即预测值），然后比对正确值和预测值得到精确
        result = [(np.argmax(self.feedforward(x)), y) for x, y in test_data]
        return sum(int(x == y) for x, y in result)


class BPTWONetwork:
    def __init__(self, layers, activation='tanh'):
        if activation == 'logistic':
            self.activation = logistic
            self.activation_deriv = logistic_derivative
        elif activation == 'tanh':
            self.activation = tanh
            self.activation_deriv = tanh_derivative

        # 随机产生权重值
        self.weights = []
        for i in range(1, len(layers) - 1):  # 不算输入层，循环
            self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
            self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)
            # print self.weights

    def fit(self, x, y, learning_rate=0.2, epochs=10000):
        x = np.atleast_2d(x)
        print(type(x))
        temp = np.ones([x.shape[0], x.shape[1]+1])
        print(temp)
        temp[:, 0:-1] = x
        print(temp)
        x = temp
        y = np.array(y)

        for k in range(epochs):  # 循环epochs次
            i = np.random.randint(x.shape[0])  # 随机产生一个数，对应行号，即数据集编号
            a = [x[i]]  # 抽出这行的数据集

            # 迭代将输出数据更新在a的最后一行
            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l], self.weights[l])))

            # 减去最后更新的数据，得到误差
            error = y[i] - a[-1]
            deltas = [error * self.activation_deriv(a[-1])]

            # 求梯度
            for l in range(len(a) - 2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))

            # 反向排序
            deltas.reverse()

            # 梯度下降法更新权值
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)

    def predict(self, x):
        x = np.array(x)
        temp = np.ones(x.shape[0] + 1)
        temp[0:-1] = x
        a = temp
        for l in range(0, len(self.weights)):
            a = self.activation(np.dot(a, self.weights[l]))
        print(a.shape)
        return a
nn = BPTWONetwork([3, 2, 1], 'tanh')
x = np.array([[1, 1,0], [0, 0,1], [0,1,0], [1,0,0]])
y = np.array([1, 0, 0, 1])
nn.fit(x, y)
for i in [[0, 0,0], [0,1,0], [1,1, 0], [0,0,1]]:
    print(i, nn.predict(i))
# import mnist_loader
#
# training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
# net = BPNetWork([3,2,1])
# net.sgd(training_data, 30, 10, 3.0, test_data=test_data)
"""
import numpy as np

# 定义tanh函数
def tanh(x):
	return np.tanh(x)


# tanh函数的导数
def tan_deriv(x):
	return 1.0 - np.tanh(x) * np.tan(x)


# sigmoid函数
def logistic(x):
	return 1 / (1 + np.exp(-x))


# sigmoid函数的导数
def logistic_derivative(x):
	return logistic(x) * (1 - logistic(x))


class NeturalNetwork():
	def __init__(self,layers:list,activation):
		self.activation = activation
		self.activation_deriv = logistic_derivative
		self.layers = layers
		self.weights = []
		for i in range(1,len(layers)-1):
			self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
			self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)

			# self.weights.append((2*np.random.random((layers[i-1]+1,layers[i]+1))-1)*0.25)
			# self.weights.append((2*np.random.random((layers[i]+1,layers[i+1]))-1)*0.25)

	def fit(self,X,y,learn_rate=0.2,epoch=10000):
		X = np.atleast_2d(X)
		temp = np.ones([X.shape[0],X.shape[1]+1])
		temp[:,0:-1] = X
		X = temp
		y = np.array(y)

		for k in range(epoch):
			i = np.random.randint(X.shape[0])
			a = [X[i]]
			# 用随机抽取的一组数据对神经网络进行更新
			for l in range(len(self.weights)):
				a.append(self.activation(np.dot(a[l],self.weights[l])))
			error = y[i] - a[-1]
			# 输出层的误差
			deltas = [error * self.activation_deriv(a[-1])]

			# 反向更新
			for l in range(len(a) - 2,0,-1):
				# 隐含层的误差
				deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
				deltas.reverse()
			for i in range(len(self.weights)):
				layer = np.atleast_2d(a[i])
				delta = np.atleast_2d(deltas[i])
				self.weights[i] += learn_rate*layer.T.dot(delta)
	def predict(self,x):
		x = np.array(x)
		temp = np.ones(x.shape[0]+1)
		temp[0:-1] = x
		a = temp
		for l in range(0,len(self.weights)):
			a = self.activation(np.dot(a,self.weights[l]))
		return a

def one():
	nn = NeturalNetwork([2,2,1],tanh)
	temp = [[0, 0], [0, 1], [1, 0], [1, 1]]
	X = np.array(temp)
	y = np.array([0, 1, 1, 0])
	nn.fit(X,y)
	for i in temp:
		print(i,nn.predict(i))



if __name__ == "__main__":
	one()
"""