import numpy as np
from nn import NN
from funcs import *
import lossfuncs
from layers import *
import data

def test_conv():
    '''
        测试卷积层
        可以很明显的发现，经过training后，输出和正确答案变得非常接近
    '''
    a = np.array(
        [
            [[1, 1, 3], [2, 2, 3], [3, 3, 5], [4, 4, 5]],
            [[0, 0, 3], [1, 1, 3], [0, 0, 5], [1, 1, 5]],
            [[5, 5, 3], [0, 0, 3], [9, 9, 5], [1, 1, 5]],
            [[6, 6, 3], [3, 3, 3], [7, 7, 5], [1, 1, 5]]
        ]
    )

    label = np.array([[1, 0, 1, 1]])

    from funcs import sigmoid
    from lossfuncs import sse
    from nn import NN

    # conv = ConvolutionLayer(2,1,2,1)
    my_nn = NN((4, 4, 3), (1, 4))
    my_nn.set_layers([
        ConvolutionLayer(3, 6, 1, 1),
        FuncLayer(sigmoid),
        MeanPoolingLayer(2,2),
        FuncLayer(relu),
        ConvolutionLayer(6, 5, 2, 1),
        FuncLayer(sigmoid),
        ReshapeLayer(None,(1,-1)),
        FullConnectedLayer(None, 4),
        FuncLayer(sigmoid),
    ])
    y1 = my_nn.forward(a)
    for i in range(20000):
        my_nn.train(a, label, sse, 0.1)
    y2 = my_nn.forward(a)
    print("训练前：",y1) # 训练前： [[0.77564924 0.91641117 0.37085342 0.2824503 ]]
    print("训练后：",y2) # 训练后： [[0.99345597 0.00654098 0.99345575 0.9934803 ]]
    print("答案：",label) # 答案： [[1 0 1 1]]

def test_BP_Pool_Reshape_sigmoid_softmax():
    '''
        这个计算起来相对较快(纯python代码也少)，收敛也简单些
        用 mnist 数据集做测试，随便设置了 epoches 和 学习率
        10000个记录用于训练，另外10000个用于测试
        如果结果随机，准确率会在0.1左右（10%-->瞎猜）
        我随便测的结果大概是 0.35-0.45 说明反向传播的有效性

        之前也用这个测过自己写的cnn，由于纯python部分占比有点大，比较慢
        总是失败
        一 可能relu出问题（应该不是，我没发现哪里有错）
        二 可能是收敛太慢，结果非常不理想（10%，无异于瞎猜）
        三 可能是输入没有做归一化或者标准化 -----> 这应该是主要原因 但是我懒得改了 23333333
        于是我换了另一个非常简单的情况来测试代码正确性（而且测起来又快）
    '''
    #++++++++++++++++++++++++++++++++++++++++++
    def get_nn():
        my_nn = NN(input_shape=(1,784),output_shape=(1,10))
        my_nn.set_layers([
            FullConnectedLayer(None,196),
            FuncLayer(sigmoid),
            ReshapeLayer(None,(14,14,1)),
            MeanPoolingLayer(2,2),
            ReshapeLayer(None,(1,-1)),
            FullConnectedLayer(None,10),
            FuncLayer(softmax)
        ])
        # my_nn = NN(input_shape=(1,-1),output_shape=(1,10))
        # my_nn.set_layers([
        #     FullConnectedLayer(784, 50),
        #     FuncLayer(sigmoid),
        #     FullConnectedLayer(50, 10),
        #     FuncLayer(softmax)
        # ])
        return my_nn
    #++++++++++++++++++++++++++++++++++++++++++
    my_nn = get_nn()
    lr = 0.005
    loss_func = lossfuncs.sse

    labels: np.ndarray
    mnist_data: np.ndarray
    labels, mnist_data = data.get_data()
    # split_point = 40000
    split_point = 10000
    train_data = mnist_data[:split_point]
    train_labels = labels[:split_point]
    test_data = mnist_data[split_point:]
    test_labels = labels[split_point:]
    # epoches = 10
    epoches = 3
    e_print_mod = 1
    for e in range(epoches):
        cnt = len(train_data)
        if e% e_print_mod == 0:
            print("epoch: {}".format(e))
        for i in range(cnt):
            # vec = train_data[i].reshape((-1,))
            vec = train_data[i]
            num = train_labels[i]
            label = data.label2vec(train_labels[i])
            my_nn.train(vec, label, loss_func, lr)
            # print(i)
    print("over")
    test_num = 10000
    # test_num = 1000
    test_num = min(test_num, len(test_data))
    bool_array = np.zeros(test_num)
    for i in range(test_num):
        vec = test_data[i].reshape((-1,))
        label = test_labels[i]
        predict_raw = my_nn.forward(vec)
        predict = data.vec2label(predict_raw)
        bool_array[i] = 1 if predict == label else 0
        # print(label,predict,predict_raw,np.sum(predict_raw))
        # print(label,predict)
    print("accuracy: ",sum(bool_array)/float(len(bool_array)))



if __name__ == "__main__":
    test_conv()
    test_BP_Pool_Reshape_sigmoid_softmax()
    pass