import numpy as np
import math
import struct
import copy
from tqdm import tqdm
import matplotlib.pyplot as plt
import pickle
import time

def tanh(x):
    return np.tanh(x)

def relu(x):
    for i in range(len(x)):
        if x[i] <= 0:
            x[i] = 0
    return x

def sigmoid(x):
    return 1/(1+np.exp(-x))

def _softmax(x):
    exp = np.exp(x - x.max()) #同时除以一个数防止指数爆炸
    return exp/exp.sum()

def d_sigmoid(x):
    return np.diag(np.exp(-x)/(1 + np.exp(-x))**2)

def d_tanh(x):
    return np.diag(1/(np.cosh(x))**2)

def d_softmax(x):
    sm = _softmax(x)
    return np.diag(sm) - np.outer(sm, sm)

def d_relu(x):
    for i in range(len(x)):
        if x[i] <= 0:
            x[i] = 0
        else:
            x[i] = 1
    return np.diag(x)

def read_images(imgaes_path = 'data/test_images'):
    images_file = open(imgaes_path, 'rb')
    magic_number, images_num, rows, columns = struct.unpack('>4i', images_file.read(16))  #大端读取开头16个字节，内容分别为魔法数字、图片个数、图片的像素行数和列数
    images_num = int(images_num)
    rows = int(rows)
    columns = int(columns)
    images = (np.fromfile(images_file, dtype=np.uint8).reshape(images_num, rows*columns))/255   #用np接收所有数据，并归一化
    images_file.close()
    return images

def read_labels(labels_path = 'data/test_labels'):
    labels_file = open(labels_path, 'rb')
    magic_number, labels_num = struct.unpack('>2i', labels_file.read(8))  #大端读取开头8个字节，内容分别为魔法数字、标签个数
    labels_num = int(labels_num)
    labels = np.fromfile(labels_file, dtype=np.uint8) #用np接收所有数据
    labels_file.close()
    return labels

class BackPropagation():
    def __init__(self):


        self.dimensions = [28*28,10] #用于保存各层节点数量
        self.activate_fan = [tanh, _softmax] #用于保存各层激活函数
        #保存各层参数的初始范围。
        self.distribution = [
            {'b':[0, 0]},
            {'b':[0, 0], 
                'w':[-math.sqrt(6/(self.dimensions[0] + self.dimensions[1])),math.sqrt(6/(self.dimensions[0] + self.dimensions[1]))]}, 
        ]


        # self.dimensions = [28*28,12,10]
        # # self.activate_fan = [tanh, tanh, _softmax]
        # self.activate_fan = [relu, relu, _softmax]
        # self.distribution = [
        #     {'b':[0, 0]},
        #     {'b':[0, 0], 
        #         'w':[-math.sqrt(6/(self.dimensions[0] + self.dimensions[1])),math.sqrt(6/(self.dimensions[0] + self.dimensions[1]))]},
        #     {'b':[0, 0], 
        #         'w':[-math.sqrt(6/(self.dimensions[0] + self.dimensions[2])),math.sqrt(6/(self.dimensions[0] + self.dimensions[2]))]},  
        # ]

        self.differential = {tanh:d_tanh, _softmax:d_softmax, relu:d_relu, sigmoid:d_sigmoid}

        self.bath_size = 1000
        self.learn_rate = 10**0.6
        self.epoch = 5
        
        self.parameters = self.init_parameters()
        self.divided_num = 6
        self.test_images = read_images('data/test_images')
        self.test_labels = read_labels('data/test_labels')
        self.train_images = read_images('data/train_images')
        self.train_labels = read_labels('data/train_labels')
        self.valid_images = self.train_images[:len(self.train_images)//self.divided_num]
        self.valid_labels = self.train_labels[:len(self.train_labels)//self.divided_num]
        self.train_images = self.train_images[len(self.train_images)//self.divided_num:]
        self.train_labels = self.train_labels[len(self.train_labels)//self.divided_num:]

        self.train_loss = []
        self.train_accuracy = []
        self.test_loss = []
        self.test_accuracy = []
        
    def init_parameters_b(self, num_layer):
        dist = self.distribution[num_layer]['b']
        return np.random.rand(self.dimensions[num_layer])*(dist[1]-dist[0]) + dist[0]

    def init_parameters_w(self, num_layer):
        dist = self.distribution[num_layer]['w']
        return np.random.rand(self.dimensions[num_layer-1], self.dimensions[num_layer])*(dist[1]-dist[0]) + dist[0]

    def init_parameters(self):
        layers_parameters = []
        for i in range(len(self.dimensions)):
            layer_parameter = {}
            for j in self.distribution[i]:
                if j == 'b':
                    layer_parameter['b'] = self.init_parameters_b(i)
                elif j == 'w':
                    layer_parameter['w'] = self.init_parameters_w(i)
            layers_parameters.append(layer_parameter)
        return layers_parameters



    def predict(self, img_data, parameters):
        img_data = np.array(img_data)
        for i in range(len(self.dimensions)):
            if i == 0:
                img_data = self.activate_fan[i](img_data + parameters[i]['b'])
            else:
                img_data = self.activate_fan[i](np.dot(img_data, parameters[i]['w']) + parameters[i]['b'])
        return img_data

    def sqr_loss(self, image, label, parameters):
        y_predict = self.predict(image, parameters)
        y = np.identity(self.dimensions[-1])[int(label)]
        error = y - y_predict
        return np.dot(error, error)

    def grad_parameters(self, img_data, label_data, parameters):
        img_data = np.array(img_data)
        l_ins = []
        l_outs = []
        garb = []
        loyer_nums = len(self.dimensions)
        for i in range(loyer_nums):
            if i == 0:
                l_in = img_data + parameters[i]['b']
                img_data = self.activate_fan[i](l_in)
            else:
                l_in = np.dot(img_data, parameters[i]['w']) + parameters[i]['b']
                img_data = self.activate_fan[i](l_in)
            l_ins.append(l_in)
            l_outs.append(img_data)
        loyer_num = loyer_nums - 1
        while(loyer_num + 1):
            if loyer_num == loyer_nums - 1:
                error =  np.identity(self.dimensions[-1])[int(label_data)] - l_outs[-1]
                dz_error = -2*np.dot(self.differential[self.activate_fan[loyer_num]](l_ins[loyer_num]), error)
            else:
                dz_error = np.multiply(np.diagonal(self.differential[self.activate_fan[loyer_num]](l_ins[loyer_num])),
                    np.dot(parameters[loyer_num + 1]['w'], dz_error))
            garb_b = dz_error
            if loyer_num:
                garb_w = np.multiply(l_ins[loyer_num-1].reshape(-1,1), dz_error)
                garb.insert(0, {'b':garb_b, 'w':garb_w})
            else:
                garb.insert(0, {'b':garb_b})
            loyer_num -= 1
        return garb
            
    def train_batch(self, batch_tmp, parameters_tmp):
        grad_accumulate = self.grad_parameters(self.train_images[batch_tmp*self.bath_size], 
            self.train_labels[batch_tmp*self.bath_size], 
            parameters_tmp)
        for img_i in range(1,self.bath_size):
            grad_tmp = self.grad_parameters(self.train_images[batch_tmp*self.bath_size + img_i], 
                self.train_labels[batch_tmp*self.bath_size + img_i],
                parameters_tmp)
            for layer_num in range(len(grad_accumulate)):
                for key in grad_accumulate[layer_num].keys():
                    grad_accumulate[layer_num][key] += grad_tmp[layer_num][key]
        for layer_num in range(len(grad_accumulate)):
            for key in grad_accumulate[layer_num].keys():
                grad_accumulate[layer_num][key] /= self.bath_size
        return grad_accumulate
    
    def combine_parameters(self, parameters, grad_accumulate):
        parameters_tmp = copy.deepcopy(parameters)
        for layer_num in range(len(grad_accumulate)):
            for key in grad_accumulate[layer_num].keys():
                parameters_tmp[layer_num][key] -= grad_accumulate[layer_num][key]*self.learn_rate
        return parameters_tmp

    def train(self):
        parameters = self.parameters
        for i in range(self.epoch):
            for batch_tmp in tqdm(range(len(self.train_images)//self.bath_size)):
                if batch_tmp % 10 == 0:
                    pass
                    print('epoch:%d/%d running batch:%d/%d'%(i+1, self.epoch, batch_tmp+1, len(self.train_images + 1)//self.bath_size))
                    print("accuracy:%f"%(self.accuracy(parameters)))
                parameters = self.combine_parameters(parameters, self.train_batch(batch_tmp, parameters))
                self.train_loss.append(self.loss(parameters, "train"))
                self.train_accuracy.append(self.accuracy(parameters, "train"))
                self.test_loss.append(self.loss(parameters, "test"))
                self.test_accuracy.append(self.accuracy(parameters, "test"))
                self.save_accuracy()
                self.save_loss()
        self.parameters = parameters

    def loss(self, parameters, data_type="test"):
        loss_vlaue = 0
        if data_type == "test":
            images = self.test_images
            labels = self.test_labels
        elif data_type == "train":
            images = self.train_images
            labels = self.train_labels
        for img_i in range(len(images)):
            loss_vlaue += self.sqr_loss(images[img_i], labels[img_i], parameters)
        loss_vlaue = loss_vlaue / (len(images))
        return loss_vlaue

    def accuracy(self, parameters, data_type="test"):
        ture_count = 0
        if data_type == "test":
            images = self.test_images
            labels = self.test_labels
        elif data_type == "train":
            images = self.train_images
            labels = self.train_labels
        else:
            images = self.valid_images
            labels = self.valid_labels
        count = len(images)
        for img_i in range(count):
            if np.argmax(self.predict(images[img_i], parameters)) == labels[img_i]:
                ture_count += 1
        return ture_count/count

    def save_parameters(self):
        file_path = "weight/" + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '_BpWeight'
        file_obj = open(file_path, 'wb')
        str = pickle.dumps(self.parameters)
        file_obj.write(str)
        file_obj.close()
        return file_path
    
    def read_parameters(self, file_path):
        with open(file_path,'rb') as file:
            self.parameters  = pickle.loads(file.read())

    def save_loss(self):
        lower = 0
        plt.clf() 
        plt.xlabel('batch')
        plt.ylabel('loss')
        plt.plot(self.train_loss[lower:], color="blue", label="train loss")
        plt.plot(self.test_loss[lower:], color="red", label="test loss")
        plt.legend()
        plt.savefig('loss.png')

    def save_accuracy(self):
        lower = 0
        plt.clf() 
        plt.xlabel('batch')
        plt.ylabel('accuracy')
        plt.plot(self.train_accuracy[lower:], color="blue", label="train accuracy")
        plt.plot(self.test_accuracy[lower:], color="red", label="test accuracy")
        plt.legend()
        plt.savefig('accuracy.png')
    
    def lr_change(self):
        loss_list = []
        batch_tmp = np.random.randint(len(self.train_images)//self.bath_size)
        grad_lr = self.train_batch(batch_tmp, self.parameters)
        # upper = 5
        # lower = -3
        upper = 9
        lower = 4
        step = 1
        for lr_i in tqdm(np.linspace(lower, upper, (upper-lower)//step+1)):
            self.learn_rate = 10**lr_i
            parameters_tmp = self.combine_parameters(self.parameters, grad_lr)
            train_loss = self.loss(parameters_tmp, 'train')
            loss_list.append({str(lr_i): train_loss})
        print(loss_list)



if __name__ == '__main__':
    bp = BackPropagation()
    # bp.read_parameters('weight/new')
    bp.train()
    bp.save_parameters()
    print(bp.accuracy(bp.parameters, 'valid'))
    # file_path = bp.save_parameters()
    # bp.read_parameters('weight/best')
    # bp.grad_parameters_2(bp.train_images[5], bp.test_labels[5], bp.parameters)
    # bp.lr_change()
