# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 10:57:21 2019

@author: leslielee
"""
import numpy as np
import scipy.special as sp

class BP():

    def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate):
        
        #设置输入层 隐藏层 输出层的节点个数
        self.inodes = inputnodes
        self.hnodes = hiddennodes
        self.onodes = outputnodes
        #设置学习率
        self.lr = learningrate
#        #初始化两个权重矩阵
#        self.wih = np.random.rand(self.hnodes,self.inodes) - 0.5
#        self.who = np.random.rand(self.onodes,self.hnodes) - 0.5
        #激活函数。scipy.expit 即sigmoid函数
        self.AF = lambda x: sp.expit(x)
        
        pass
    
#    def W(self,W1=0,W2=0,origin=False):
#        #更新迭代权重
#        if origin == True :
#            self.wih = np.random.normal(0.0,pow(self.hnodes,-0.5),
#                                        (self.hnodes,self.inodes))
#            self.who = np.random.normal(0.0,pow(self.onodes,-0.5),
#                                        (self.onodes,self.hnodes))  
#        else :
#            self.wih = W1
#            self.who = W2
#        pass
        
    def W(self,W1=0,W2=0):
        self.wih = W1
        self.who = W2
        
        pass
    
    def train(self,inputs,targets):
        #将inputs，targets转化为二维
        inputs = np.array(inputs,ndmin=2).T
        targets = np.array(targets,ndmin=2).T
        #进入隐藏层的信号 
        hidden_inputs = np.dot(self.wih,inputs) 
        #隐藏层输出的信号
        hidden_outputs = self.AF(hidden_inputs)
        #进入输出层的信号
        final_inputs = np.dot(self.who,hidden_outputs)
        #输出层输出的信号
        final_outputs = self.AF(final_inputs)
        #输出误差=标准输出-实际输出
        output_errors = targets - final_outputs
        #判断是否更新误差
        if np.abs(output_errors).all() > 0.02 :
            print('改变权重')
            #隐藏层误差
            hidden_errors = np.dot(self.who.T,output_errors)
            #更新W2，即隐藏层与输出层之间的权重
            self.who += self.lr*np.dot((output_errors*final_outputs*(1.0-final_outputs)),
                                       np.transpose(hidden_outputs))
            #更新W1，即输入层与隐藏层之间的权重
            self.wih += self.lr*np.dot((hidden_errors*hidden_outputs*(1.0-hidden_outputs)),
                                       np.transpose(inputs)) 
        
        W1 = self.wih
        W2 = self.who
         
        return W1,W2,hidden_errors,output_errors

#    def query(self,input_list) :
#        wih = self.wih 
#        who = self.who
#        #将输入转化为二维数组
#        inputs = np.array(input_list,ndmin=2).T
#        print('=====================================inputs')
#        print(inputs)
#        #进入隐藏层的信号
#        hidden_inputs = np.dot(self.wih,inputs)
#        print('========================W1 and hidden_inputs')
#        print(wih)
#        print(hidden_inputs)
#        #隐藏层输出的信号
#        hidden_outputs = self.AF(hidden_inputs)
#        print('==============================hidden_outputs')
#        print(hidden_outputs)
#        #进入输出层的信号
#        final_inputs = np.dot(self.who,hidden_outputs)
#        print('=========================W2 and final_inputs')
#        print(who)
#        print(final_inputs)
#        #输出层输出的信号
#        final_outputs = self.AF(final_inputs)
#        print('===============================final_outputs')
#        print(final_outputs)   
        
    def query(self,input_list) :

        inputs = np.array(input_list,ndmin=2).T
        hidden_inputs = np.dot(self.wih,inputs)
        hidden_outputs = self.AF(hidden_inputs)
        final_inputs = np.dot(self.who,hidden_outputs)
        final_outputs = self.AF(final_inputs)
    
        return np.argmax(final_outputs)