function [ out ] = MinBPTTNNet( in )
  % MinBPTTNNet: RNN（Recurrent Neural Network）递归神经网络
  %               BPTT（Back Propagation Through Time）算法实现
  % url:http://blog.csdn.net/Dark_Scope/article/details/47056361
  %
  % Extended description

end  % MinBPTTNNet

function [ wax,wab,wob,ps ] = MinBPTTNNetTrain(data,laySize,fn,dfn )
  % MinBPTTNNetTrain: Short description
  %
  % Extended description

  % 数据维度
  dataSize = size(data,2);
  dim = size(data,1) - 1;
  yData = data(end,:);

  % 数据归一化
  [data,ps] = mapminmax(data(1:end-1,:));

  % 递归神经网络的网络结构
  %
  % out层(输出结果,size:1*1) := wob[1*laySize] * bh层
  %  ↑
  % bh层(隐藏层b,size:laySize*1) := fn[激活函数](ah层)
  %  ↑                               ↓
  % ah层(隐藏层a,size:laySize*1) := wab(laySize*laySize) * bh层(t-1) + wax[laySize*dim] * x层
  %  ↑
  % x层(输入数据,size:dim*1)
  %

  % 随机初始化参数矩阵
  wax = unifrnd(-1,1,laySize,dim);
  wab = unifrnd(-1,1,laySize,laySize);
  wob = unifrnd(-1,1,1,laySize);

  % 初始学习率
  r = 0.08;
  % 学习率变化系数
  rd = 0.3;
  % 学习率调整最大次数 50好像也不够
  maxStep = 50;
  while r ~= 0
    [ outLayList,ahList,bhList ] = calOut( data,dataSize,wax,wab,wob,laySize,fn,dfn );
    [ waxGrad,wabGrad,wobGrad ] = calGrad( data,yData,outLayList,ahList,bhList,wax,wab,wob,dataSize,dim,laySize,dfn );
    [ r,wax,wab,wob ] = adjustR( waxGrad,wabGrad,wobGrad,data,yData,dataSize,wax,wab,wob,laySize,fn,dfn,r,rd,maxStep );
  end

end  % MinBPTTNNetTrain

function [ outLayList,ahList,bhList ] = calOut( data,dataSize,wax,wab,wob,laySize,fn,dfn )
  % calOut: 计算各层输出
  %
  % data : 输入数据(dim*t)
  % dataSize : t
  %
  % 参数矩阵
  % wax : 输入数据 → 隐藏层a
  % wab : 隐藏层b → 隐藏层a
  % wob : 隐藏层b → 输出结果
  %
  % laySize : 隐藏层size
  %
  % fn : 激活函数
  % dfn : 激活函数的导数
  %
  % outLayList : 输出结果(1*t)
  % ahList : 隐藏层a(laySize*t)
  % bhList : 隐藏层b(laySize*t)
  %
  % Extended description
  outLayList = zeros(1,dataSize);
  b = zeros(laySize,1);
  % 隐藏层a
  ahList = zeros(laySize,dataSize);
  % 隐藏层b
  bhList = zeros(laySize,dataSize);
  for t=1:dataSize
    a = wax*data(:,t)+wab*b;
    ahList(:,t) = a;
    b = arrayfun(fn,a);
    bhList(:,t);
    outLayList(t) = wob*b;
  end

end  % calOut

function [ waxGrad,wabGrad,wobGrad ] = calGrad( data,yData,outLayList,ahList,bhList,wax,wab,wob,dataSize,dim,laySize,dfn )
  % calGrad: 计算梯度
  %
  % Extended description

  % 后向传播
  %
  % wob_Grad(t,i) := to(t) * bh(i,t)
  %
  % th(h,t) := dfn(ah(h,t)) * (wob(h) * to(t) + wab(h,hh) * th(hh,t + 1))
  %
  % wax_Grad(t,h,i) := th(h,t) * data(i,t)
  %
  % wab_Grad(t,h,h2) := th(h,t) * bh(h2,t)

  % 向量化
  th = zeros(laySize,1);

  wobGradList = zeros(dataSize,laySize);
  waxGradList = zeros(dataSize,laySize,dim);
  wabGradList = zeros(dataSize,laySize,laySize);

  toList = yData - outLayList;
  for t=dataSize:-1:1
    wobGradList(t,:) = toList(t) * bhList(:,t)';
    th = arrayfun(dfn ,ahList(:,t)).*(wob' * toList(t) + wab * th);
    waxGradList(t,:,:) = th * data(:,t)';
    wabGradList(t,:,:) = th * bhList(:,t)';
  end

  waxGrad = resharp(sum(waxGradList,1),[laySize,dim]);
  wabGrad = resharp(sum(wabGradList,1),[laySize,dim]);
  wobGrad = sum(wobGradList,1);

end  % calGrad

function [ rOut,wax,wab,wob ] = adjustR( waxGrad,wabGrad,wobGrad,data,yData,dataSize,wax,wab,wob,laySize,fn,dfn,r,rd,maxStep )
% adjustR: 调整学习率
%
% Extended description
[ outLayList,ahList,bhList ] = calOut( data,dataSize,wax,wab,wob,laySize,fn,dfn );
error = sum((yData - outLayList).^2);

rOut = 0;
tmpR = r;

for i=1:maxStep
  % 计算调整后网络权重
  tmpWax = wax + tmpR * waxGrad;
  tmpWab = wab + tmpR * wabGrad;
  tmpWob = wob + tmpR * wobGrad;
  % 计算误差
  [ outLayList,ahList,bhList ] = calOut( data,dataSize,tmpWax,tmpWab,tmpWob,laySize,fn,dfn );
  tmpError = sum((yData - outLayList).^2);

  % 调整学习率
  % 当前误差小于最小误差时，提升学习率 r=r*(1+rd) ; 0<rd<1
  % 当前误差大于最小误差时，降低学习率 r=r*(1-rd) ; 0<rd<1
  if tmpError < error
    error = tmpError;
    rOut = tmpR;
    tmpR = tmpR * (1+rd);
  else
    tmpR = tmpR * (1-rd);
  end
end

wax = wax + rOut * waxGrad;
wab = wab + rOut * wabGrad;
wob = wob + rOut * wobGrad;

end  % adjustR
