#实验一
#用numpy 的dot 函数将两个向量相乘
import numpy as np
example_input = [1, .2, .1, .05, .2]
example_weights = [.2, .12, .4, .6, .90]
input_vector = np.array(example_input)
weights = np.array(example_weights)
bias_weight = .2
activation_level = np.dot(input_vector, weights) +\
    (bias_weight * 1)
#bias_weight * 1 只是为了强调bias_weight 和其他权重一样：权重与输入值相乘，区别只是bias_weight的输入特征值总是1
activation_level

#假设选择一个简单的阈值激活函数，并选择0.5 作为阈值，结果如下：
threshold = 0.5
if activation_level >= threshold:
    perceptron_output = 1
else:
    perceptron_output = 0
perceptron_output
#对于给定的输入样本example_input 和权重，这个感知机将会输出1。
#如果有许多example_input 向量，输出将会是一个标签集合。

expected_output = 0
new_weights = []
for i, x in enumerate(example_input):
    new_weights.append(weights[i] + (expected_output -\
    perceptron_output) * x)
#在上述的第一次计算中，new_weight = 0.2 + (0 - 1) × 1 = −0.8
weights = np.array(new_weights)
example_weights
#初始权重
#[0.2, 0.12, 0.4, 0.6, 0.9]
weights
#新的权重
#[-0.8 -0.08 0.3 0.55 0.7]

sample_data = [[0, 0], # False, False
    [0, 1], # False, True
    [1, 0], # True, False
    [1, 1]] # True, True

expected_results = [0, # (False OR False) gives False
    1, # (False OR True ) gives True
    1, # (True OR False) gives True
    1] # (True OR True ) gives True
activation_threshold = 0.5

#numpy 可以用来做向量（数组）乘法
#random 用来初始化权重
from random import random
import numpy as np
weights = np.random.random(2)/1000 # Small random float 0 < w < .001
weights
#偏置
bias_weight = np.random.random() / 1000
bias_weight

#感知机随机预测
for idx, sample in enumerate(sample_data):
    input_vector = np.array(sample)
    activation_level = np.dot(input_vector, weights) +\
    (bias_weight * 1)
    if activation_level > activation_threshold:
        perceptron_output = 1
    else:
        perceptron_output = 0
    print('Predicted {}'.format(perceptron_output))
    print('Expected: {}'.format(expected_results[idx]))
    print()
    
#感知机学习
import numpy as np
for iteration_num in range(5):
    correct_answers = 0
    for idx, sample in enumerate(sample_data):
        input_vector = np.array(sample)
        weights = np.squeeze(np.array(weights))
        activation_level = any(np.dot(input_vector, weights) +\
            (bias_weight * 1))
        if activation_level > activation_threshold:
            perceptron_output = 1
        else:
            perceptron_output = 0
        if perceptron_output == expected_results[idx]:
            correct_answers += 1
        new_weights = []
        #通过循环来强调每个权重是由其输入（xi）更新的。如果输入数据很小或为零，则无论误差大小，该输入对该权重的影响都将会很小。相反，如果输入数据很大，则影响会很大
        for i, x in enumerate(sample):
            new_weights.append(weights[i] + (expected_results[idx] -\
                    perceptron_output) * x)
            bias_weight = bias_weight + ((expected_results[idx] -\
                perceptron_output) * 1)
                #偏置权重也会随着输入一起更新
            weights = np.array(new_weights)
        print('{} correct answers out of 4, for iteration {}'\
            .format(correct_answers, iteration_num))
