#! /usr/bin/python
# _*_ coding:UTF-8 _*_

import numpy as np
import math
import matplotlib.pyplot as plt

# BP神经网络模拟函数    y=0.5*sinx + 0.5
# 神经网络结构 输入层1个节点，输出层1个节点，一个单隐层4个节点
# x [0, 2PI]


#  模拟函数
def fun(x):
    return 0.5 * np.sin(x) + 0.5


def sigmoid(x):
    return 1.0 / (1 + np.power(math.e, -x))


# 初始化数据
def init_data():
    print "初始化数据"
    data = np.zeros((100, 1))
    for i in range(100):
        data[i, 0] = np.random.rand() * 2 * math.pi
    y = fun(data)
    return data, y
def sinx(theta, gamma, omega, nu):
    x = np.array([[6]])
    # print x
    alpha = x.dot(np.transpose(nu))
    b = sigmoid(alpha - np.transpose(gamma))

    beta = b.dot(np.transpose(omega))
    y_pre = sigmoid(beta - np.transpose(theta))
    # print theta
    return y_pre

if __name__ == "__main__":
    (data, y) = init_data()
    sort_train_set = sorted(data[1:80, 0])
    y_train = fun(sort_train_set)
    # print sort_train_set
    plt.figure(1)
    plt.plot(data[:, 0], y[:, 0], 'bo')
    plt.plot(sort_train_set, y_train)
    plt.plot([0, 7], [0.5, 0.5])
    # plt.show()

    m = 80  #训练集个数
    d = 1   # d个输入神经元
    l = 1   # l个输出神经元
    q = 8  # h个隐层神经元
    theta = np.random.random(size=(l, 1))  # 输出层神经元阈值
    gamma = np.random.random(size=(q, 1))  # 隐藏层神经元阈值
    omega = np.random.random(size=(l, q))    # 输出层和隐藏层链接权值
    nu = np.random.random(size=(q, d))       # 隐藏层和输入层链接权值
    eta = 0.6  # 学习率
    max_iter = 1000  # 最大迭代次数

    # print sigmoid(0.3)

    i = 0
    while i < max_iter:
        for k in range(m):
            x = data[k, :].reshape((1, 1))
            alpha = x.dot(np.transpose(nu))
            b = sigmoid(alpha - np.transpose(gamma))
            # print omega,b
            beta = b.dot(np.transpose(omega))
            y_pre = sigmoid(beta - np.transpose(theta))
            # print y_pre
            g = y_pre * (1 - y_pre) * (y[k, :] - y_pre)
            e = b*(1-b)*(omega*g)

            delta_omega = eta*g*b
            delta_nu = np.transpose(eta*e*data[k, :])
            delta_theta = -1*eta*np.transpose(g)
            delta_gamma = -1*eta*np.transpose(e)

            omega += delta_omega
            nu += delta_nu
            theta += delta_theta
            gamma += delta_gamma
            # print 0.5*(y_pre[0,0]-y[k,0])*(y_pre[0,0]-y[k,0])
        i += 1
        print '训练',i
    # print sinx(theta, gamma, omega, nu)
    y_train_pre = np.zeros((m, 1))
    for k in range(m):
        x = data[k, :].reshape((1, 1))
        alpha = x.dot(np.transpose(nu))
        b = sigmoid(alpha - np.transpose(gamma))
        # print omega,b
        beta = b.dot(np.transpose(omega))
        print sigmoid(beta - np.transpose(theta))[0,0]
        y_train_pre[k, 0] = sigmoid(beta - np.transpose(theta))[0,0]
    plt.plot(data[0:80, 0], y_train_pre[:, 0], 'ro')
    plt.show()