import numpy as np

# softmax test

def sigmoid(x):
    return 1/(1+np.exp(-x))
def sigmoid_dao(x):
    return x*(1-x)

# 合格  -1
x = np.random.random((100,1))*5
y = np.random.random((100,1))*5

x1 = np.random.random((10,1))*10+200
y1 = np.random.random((10,1))*10+400

label = []
vec_data =[]

for i in range(x.shape[0]):
    vec_data.append([x[i][0],y[i][0]])
    label.append([0])
for i in range(x1.shape[0]):
    vec_data.append([x1[i][0],y1[i][0]])
    label.append([1])

vec_data = np.array(
    [
        [0.98,0.99],
        [0.91,0.81],
        [0.01,0.87],
        [0.06,0.08],
        [0.03,0.02],
        [0.09,0.02],
    ]
)
#正确值
label = np.array([
    [1],
    [1],
    [1],
    [0],
    [0],
    [0]
])

label = np.array(label)
vec_data =  np.array(vec_data)


#print(vec_data)
w1 = np.random.random((2,8))*2-1
w2 = np.random.random((8,1))*2-1


    #return np.exp(-x)/np.sqrt(1+np.exp(-x))

for i in range(100):
    # 隐层1
    l1 = sigmoid(np.matmul(vec_data,w1))
    # 预期值
    out = sigmoid(np.matmul(l1,w2))
    #实际值和预期值之间的差异
    loss = label - out
    if i%10 ==0 :
        print('loss:',np.mean(np.abs(loss)))
    #用loss去修改W1

    l1_delta = loss * sigmoid_dao(out)
    w1_loss = np.matmul(l1_delta,w2.T)

    x_delta = w1_loss*sigmoid_dao(l1)
    #print(l1.shape,l1_delta.shape,vec_data.shape,x_delta.shape)
    w2+= l1.T.dot(l1_delta)

    w1+= vec_data.T.dot(x_delta)
    #print(w1_loss)


data = np.array([
    [0.88,0.9],
    [0.88,0.9],
    [0.13,0.1],
    [0.13,0.1],
    [0.13,0.1],
])
l1 = sigmoid(np.matmul(data,w1))
# 预期值
out = sigmoid(np.matmul(l1,w2))
print(out)
