# _*_ coding:utf-8 _*_
__author__ = 'joker_wb'
__date__ = '2018/3/27 15:44'
import math
import random
import numpy
import matplotlib.pyplot as mlp
random.seed(0)


def rand(a, b):
    return (b - a) * random.random() + a # 随机数发生器函数


def make_matrix(m, n, fill=0.0):      #创建一个指定大小的矩阵
    mat = []
    for i in range(m):
        mat.append([fill] * n)
    return mat

# 定义sigmoid
def sigmoid(x):
    return 1.0 / (1.0 + math.exp(-x))

def sigmoid_derivative(x):
    return x * (1 - x)

# 定义Relu
def relu(x):
    if x > 0:
        return x
    else:
        return 0

# 定义relu的导数
def relu_d(x):
    if x > 0:
        return 1
    else:
        return 0
#定义归一化函数和反归一化函数
def Normalized_1(normal):
     normal= normal/10
     return normal
def Normalized_2(normal):
    normal=normal/100
    return normal
def Anti_Normalized_1(normal):
    normal=10*normal
    return normal
def Anti_Normalized_2(normal):
    normal_h = 0
    for g in range(100):
        normal_h=normal_h+normal
    normal=normal_h
    return normal


class BPNeuralNetwork:
    def __init__(self):
        self.input_n = 0
        self.hidden_n = 0
        self.output_n = 0
        self.input_cells = []
        self.hidden_cells = []
        self.output_cells = []
        self.input_weights = []
        self.output_weights = []
        self.input_correction = []
        self.output_correction = []
        self.test_result=[]

    def setup(self, ni, nh, no):
        self.input_n = ni + 1
        self.hidden_n = nh
        self.output_n = no
        # init cells
        self.input_cells = [1.0] * self.input_n
        self.hidden_cells = [1.0] * self.hidden_n
        self.output_cells = [1.0] * self.output_n

        # init weights
        self.input_weights = make_matrix(self.input_n, self.hidden_n)
        self.output_weights = make_matrix(self.hidden_n, self.output_n)
        # random activate
        for i in range(self.input_n):
            for h in range(self.hidden_n):
                self.input_weights[i][h] = rand(-0.2, 0.2)
        for h in range(self.hidden_n):
            for o in range(self.output_n):
                self.output_weights[h][o] = rand(-0.2, 0.2)
        # init correction matrix
        self.input_correction = make_matrix(self.input_n, self.hidden_n)
        self.output_correction = make_matrix(self.hidden_n, self.output_n)

    def predict(self, inputs):
        # activate input layer
        for i in range(self.input_n - 1):
            self.input_cells[i] = inputs[i]
        # activate hidden layer
        for j in range(self.hidden_n):
            total = 0.0
            for i in range(self.input_n):
                total += self.input_cells[i] * self.input_weights[i][j]
            self.hidden_cells[j] = relu(total)
        # activate output layer
        for k in range(self.output_n):
            total = 0.0
            for j in range(self.hidden_n):
                total += self.hidden_cells[j] * self.output_weights[j][k]
            self.output_cells[k] = relu(total)
        return self.output_cells[:]

    def back_propagate(self, case, label, learn, correct):
        # feed forward
        self.predict(case)
        # get output layer error
        output_deltas = [0.0] * self.output_n
        for o in range(self.output_n):
            error = label[o] - self.output_cells[o]
            output_deltas[o] = relu_d(self.output_cells[o]) * error
        # get hidden layer error
        hidden_deltas = [0.0] * self.hidden_n
        for h in range(self.hidden_n):
            error = 0.0
            for o in range(self.output_n):
                error += output_deltas[o] * self.output_weights[h][o]
            hidden_deltas[h] = relu_d(self.hidden_cells[h]) * error
        # update output weights
        for h in range(self.hidden_n):
            for o in range(self.output_n):
                change = output_deltas[o] * self.hidden_cells[h]
                self.output_weights[h][o] += learn * change + correct * self.output_correction[h][o]
                self.output_correction[h][o] = change
        # update input weights
        for i in range(self.input_n):
            for h in range(self.hidden_n):
                change = hidden_deltas[h] * self.input_cells[i]
                self.input_weights[i][h] += learn * change + correct * self.input_correction[i][h]
                self.input_correction[i][h] = change
        # get global error
        error = 0.0
        for o in range(len(label)):
            error += 0.5 * (label[o] - self.output_cells[o]) ** 2
        return error

    def train(self, cases, labels, limit=50000, learn=0.05, correct=0.1):
        error_list=[]
        for j in range(limit):
            error = 0.0
            for i in range(len(cases)):
                label = labels[i]
                case = cases[i]
                error += self.back_propagate(case, label, learn, correct)
            error_list.append(error)
        x1=range(0,len(error_list))
        mlp.plot(x1,error_list)
        mlp.show()



    def realtrain(self):
        cases = [
            [1,7],
            [2,7],
            [3,7],
            [4,7],
            [5,7],
            [6,7],
            [7,7],
            [8,7],
            [9,7],
        ]
        labels = [[7], [14], [21], [28],[35],[42],[49],[56],[63]]
        # 执行线性归一化
        # 对标记数据执行归一化
        for g in range(len(labels)):
           labels[g][0]=Normalized_2(labels[g][0])
        # 对模糊数据执行归一化
        for m in range(len(cases)):
            for n in range(2):
                cases[m][n]=Normalized_1(cases[m][n])

        # numpy.savetxt('input_weight.csv',self.input_weights, delimiter=',')
        # numpy.savetxt('output_weight.csv', self.output_weights, delimiter=',')
        #训练主程序
        self.setup(2,10, 1)
        self.train(cases, labels, 1000, 0.05, 0.1)
        #保存权值数据和归一化数据
        numpy.save('input_weight.npy',self.input_weights)
        numpy.save('output_weight.npy',self.output_weights)
        # print(self.output_weights)

    def printtest(self):
        cases = [
            [20, 7],
            [6, 7],

        ]
        for m in range(len(cases)):
            for n in range(2):
                cases[m][n]=Normalized_1(cases[m][n])
        # print(cases)
        for case in cases:
             self.test_result=self.predict(case)            #self.predict(case) 为列表类型
             #去归一化
             self.test_result[0]=Anti_Normalized_2(self.test_result[0])
             print(self.test_result[0])


if __name__ == '__main__':
        nn = BPNeuralNetwork()
        nn.realtrain()

        nn.printtest()

