import numpy as np
import random
import math
import minst
import cProfile
from datetime import datetime
import sys
import time
import pstats
from conv import get_output_size

def func_time(func):
    def new_func(*args, **kargs):
        tm_start = time.time()
        print("@%s, {%s} start" % (time.strftime("%X", time.localtime()), func.__name__))
        back = func(*args, **kargs)
        print("@%s, {%s} end %.4fs token" % (time.strftime("%X", time.localtime()), func.__name__, time.time()-tm_start))
        sys.stdout.flush()
        return back
    return new_func


def array_op(array, op):
    for x in np.nditer(array, op_flags=['readwrite']):
        x[...] = op(x)

def get_son_array(array, row, col, shape):
    return array[:, row:row+shape[0], col:col+shape[1]]

def get_output_size(input_szie, filter_size, step):
    return int((input_szie-filter_size)/step+1)

class Layer(object):
    def __init__(self, name):
        self.name = name
        self.next_layer = None
    
    def set_next_layer(self, layer):
        self.next_layer = layer 
    
    def set_error_func(self, err_func):
        self.err_func = err_func
    
    def calculate_input_delta(self):
        assert(False)

    def calculate_delta(self):
        assert(False)
    
    def update(self):
        assert(False)
    
    def calculate_grad(self):
        assert(False)
    
    def forward(self, input_array):
        assert(False)
    
    def backward(self):
        self.calculate_delta()
        self.calculate_input_delta()
        self.calculate_grad()


class FullConnectLayer(Layer):
    def __init__(self, input_size, output_size, activator, lr):
        self.inputSize = input_size
        self.outputSize = output_size
        self.activator = activator
        self.w = np.random.uniform(-0.1, 0.1, (output_size, input_size))
        self.b = np.zeros((output_size, 1))
        self.lr = lr
        super(FullConnectLayer, self).__init__("full_connect")
    
    def set_label(self, label):
        self.label = label
    
    def forward(self, input_array):
        self.input_array = input_array
        self.fmt_input_array = input_array.reshape(self.inputSize, 1)
        self.output_array = self.activator.forward(np.dot(self.w, self.fmt_input_array)+self.b)
        return self.output_array
    # 
    def calculate_delta(self):
        if not self.next_layer: 
            self.delta = self.activator.backward(self.output_array)*self.err_func.backward(self.output_array, self.label) 
        else:
            self.delta = self.activator.backward(self.output_array)*self.next_layer.input_delta_array
    
    def calculate_input_delta(self):
        self.input_delta_array = np.dot(self.w.T, self.delta).reshape(*self.input_array.shape)

    
    def calculate_grad(self):
        self.grad  = np.dot(self.delta, self.fmt_input_array.T)
        self.b_grad = self.delta

    
    def update(self):
        self.w -= self.grad*self.lr
        self.b -= self.b_grad*self.lr

class SigmodActivator(object):
    def sigmod(self, val):
        return 1.0/(1 + np.exp(-val))

    def forward(self, input):
        return self.sigmod(input)
    
    def backward(self, input):
        return input*(1-input)



class ConvLayer(Layer):
    def __init__(self, filter_w, filter_h, filter_d, filter_num, 
                    zero_padding, step, activitor, lrate):
        self.zero_padding = zero_padding
        self.filter_w = filter_w
        self.filter_h = filter_h
        self.filter_d = filter_d
        self.filter_num = filter_num
        self.step = step
        self.activitor = activitor
        self.lrate = lrate
        self.filters = []
        self.init_filter()
        super(ConvLayer, self).__init__("conv")


    def init_filter(self):
        for i in range(self.filter_num):
            self.filters.append(Filter(self.filter_w, self.filter_h, self.filter_d))
    
    @staticmethod
    # 把二维数组扩展为三维数组
    def conv_array_format(array):
        if array.ndim not in (2, 3):
            print("conv array format error, wrong ndim", array.ndim)
            assert(False)
            return
        if array.ndim == 2:
            return array.reshape(1, *array.shape)
        return array

    @staticmethod
    ## 输入数组都必须是三维数组，即使深度只有1（深度为1其实就是一个二维数组）但是为了处理方便统一成三维
    def conv(input_array, filter_array, step):
        input_array = ConvLayer.conv_array_format(input_array)
        filter_array = ConvLayer.conv_array_format(filter_array)
        if input_array.shape[0] != filter_array.shape[0]:
            print("conv deep error, input_array deep %s, filter deep %s" %(input_array.shape[0], filter_array.shape[0]))
            assert(False)
            return
        output_array_height = get_output_size(input_array.shape[1], filter_array.shape[1], step)
        output_array_width = get_output_size(input_array.shape[2], filter_array.shape[2], step)
        output_array = np.zeros((output_array_height, output_array_width))
        filter_shape = filter_array.shape[1:]
        #filter_shape = filter_array.shape
        for i in range(output_array_height):
            for j in range(output_array_width):
                output_array[i][j] = sum((get_son_array(input_array, i*step, j*step, filter_shape)*filter_array).flat)
        #output_array = np.array([(get_son_array(input_array, i*step, j*step, filter_array.shape[1:])*filter_array).sum() \
        #    for i in range(output_array_height) for j in range(output_array_width)]).reshape(output_array_height, output_array_width)
        return output_array

    @staticmethod
    def conv_small_func():
        pass



    def forward(self, input_array):
        input_array = ConvLayer.conv_array_format(input_array)
        self.input_array = input_array
        self.padding_input_array = ConvLayer.padding(input_array, self.zero_padding)
        if input_array.ndim != self.filters[0].get_weight().ndim:
            print("input size ndim is %d not %d" %(input_array.ndim, self.filters[0].ndim))
            return
        output_array = [ConvLayer.conv(self.padding_input_array, filter.get_weight(), self.step)+filter.b for filter in self.filters]
        self.output_array = np.array(output_array)
        array_op(self.output_array, self.activitor.forward)
        return self.output_array

    @staticmethod
    def padding(input_array, zp):
        input_d = input_array.shape[0]
        input_h = input_array.shape[1]
        input_w = input_array.shape[2]
        padding_array = np.zeros((input_d, input_h+2*zp, input_w+2*zp))
        padding_array[:, zp:zp+input_h, zp:zp+input_w] = input_array
        return padding_array

    # 转换为步长为1的情况
    def sensitive_map_expanding(self, ss_map):
        if self.step == 1:
            return ss_map
        expanding_h = get_output_size(self.padding_input_array.shape[1], self.filter_h, 1)
        expanding_w = get_output_size(self.padding_input_array.shape[2], self.filter_w, 1)
        expanding_array = np.zeros((self.filter_num, expanding_h, expanding_w))
        expanding_array[:, ::self.step, ::self.step] = ss_map
        return expanding_array

    def calculate_delta(self):
        next_layer = self.next_layer
        self.delta_array = np.zeros(self.output_array.shape)
        # 下一层的输入和本层的输入的shape是一样的
        if not next_layer:
            derivative_array = np.array(self.output_array)
            array_op(derivative_array, self.activitor.backward)
            self.delta_array = self.err_func.backward(self.output_array)*derivative_array
        else:
            next_delta_layer = next_layer.input_delta_array
            derivative_array = np.array(self.output_array)
            array_op(derivative_array, self.activitor.backward)
            self.delta_array *= derivative_array
    

    def calculate_input_delta(self):
        """
        计算本层关于输入的梯度, 通过这个进行反向传播
        """
        input_delta_array = np.zeros(self.padding_input_array.shape)
        # 扩展为步长为1的n_delata
        expanding_delta = self.sensitive_map_expanding(self.delta_array)
        # 因为Iw - Fw + 1 = dw, dw+x-Fw +1 = Iw可以推出x = 2*(Fw-1), Iw为输入宽度，Fw为filter宽度, dw为delta宽度
        # 这么做会导致补0太多了
        delta_zero_padding = self.filter_w - 1
        # 对delta补0
        padding_delta = ConvLayer.padding(expanding_delta, delta_zero_padding)
        # 本层的delta深度=本层filter的个数=本层输出深度=下一层输入深度=下一层filter的深度
        # f_delta为filter对应的delta
        for  filter, f_delta in zip(self.filters, padding_delta):
            # 计算每个filter对应的delta_array, 总的delta为 分别对每个filter求出来的相加
            # delta的深度和filter的个数是一样的，而f_delta的深度为1，所以需要对filter的每一个深度分别求卷积
            delta_iter = [ConvLayer.conv(f_delta, np.rot90(d_filter, 2), 1) for d_filter in filter.w]
            input_delta_array += np.array(delta_iter)
        # 这个是关于padding后的梯度，所以要取子集
        real_input_shape= (self.padding_input_array.shape[1]-2*self.zero_padding, 
                            self.padding_input_array.shape[2]-2*self.zero_padding)
        self.input_delta_array = get_son_array(input_delta_array, self.zero_padding, self.zero_padding, real_input_shape)

    #def calculate_input_delta(self):
    #    """
    #    计算本层关于输入的梯度, 通过这个进行反向传播
    #    Iw可以由conv(dw, Fw)但有时这样补0太多了，可以反着用conv(Fw, dw) 
    #    """
    #    input_delta_array = np.zeros(self.padding_input_array.shape)
    #    # 扩展为步长为1的n_delata
    #    expanding_delta = self.sensitive_map_expanding(self.delta_array)
    #    # 因为Iw - Fw + 1 = dw, dw+x-Fw +1 = Iw可以推出x = 2*(Fw-1), Iw为输入宽度，Fw为filter宽度, dw为delta宽度
    #    # 这么做会导致补0太多了
    #    delta_zero_padding = expanding_delta.shape[1]- 1
    #    # 本层的delta深度=本层filter的个数=本层输出深度=下一层输入深度=下一层filter的深度
    #    # f_delta为filter对应的delta
    #    for  filter, f_delta in zip(self.filters, expanding_delta):
    #        # 对filter补0
    #        padding_filter= ConvLayer.padding(filter.w, delta_zero_padding)
    #        # 计算每个filter对应的delta_array, 总的delta为 分别对每个filter求出来的相加
    #        # delta的深度和filter的个数是一样的，而f_delta的深度为1，所以需要对filter的每一个深度分别求卷积
    #        delta_iter = [ConvLayer.conv(d_filter, np.rot90(f_delta, 2), 1) for d_filter in padding_filter]
    #        input_delta_array += np.array(delta_iter)
    #    # 这个是关于padding后的梯度，所以要取子集
    #    real_input_shape= (self.padding_input_array.shape[1]-2*self.zero_padding, 
    #                        self.padding_input_array.shape[2]-2*self.zero_padding)
    #    self.input_delta_array = get_son_array(input_delta_array, self.zero_padding, self.zero_padding, real_input_shape)

    
    def calculate_grad(self):
       # 对delta扩展为不长为1的情况
        expanding_delta = self.sensitive_map_expanding(self.delta_array)
        for e_delta, filter in zip(expanding_delta, self.filters):
            # 因为Iw - Fw + 1 = dw, 可以推出Iw - dw + 1 = Fw, Iw为输入宽度，Fw为filter宽度, dw为delta宽度
            # 所以不用对输入再次补零
            # 本层的delta深度=本层filter的个数=本层输出深度
            # 本层的输入深度=本层filter深度
            # expanding_delta的深度肯定为1，所以需要对input_array的每个深度进行卷积
            filter_grad_iter = [ConvLayer.conv(d_input, e_delta, 1) for d_input in self.padding_input_array]
            filter_grad = np.array(filter_grad_iter)
            filter.grad_w = filter_grad
            # b的所有输入都是1
            filter.grad_b = e_delta.sum()


    def update(self):
       for filter in self.filters:
           filter.update(self.lrate) 

        
class ReluActivator(object):
    def forward(self, input):
        return max(0, input)

    def backward(self, output):
        return 1 if output > 0 else 0

class Filter(object):
    def __init__(self, width, height, depth):
        self.w = np.random.uniform(-1.0, 1.0, (depth, height, width))
        self.b = 0
        self.grad_w = np.zeros(self.w.shape)
        self.grad_b = 0

    def get_weight(self):
        return self.w
    
    def get_bias(self):
        return self.b

    def update(self, lr):
        self.w -= self.grad_w*lr
        self.b -= self.grad_b*lr
    
class max_pool(object):
    def forward(self, input_array):
        output = [input.max() for input in input_array]
        return output
    
    def backward(self, input_array):
        output_array = np.zeros(input_array.shape)
        for d in range(input_array.shape[0]):
            max_element = input_array[d].max()
            for row in range(input_array.shape[1]):
                for col in range(input_array.shape[2]):
                    if max_element and input_array[d, row, col] == max_element:
                        output_array[d, row, col] = 1
                        max_element = None
                    else:
                        output_array[d, row, col] = 0
        return output_array

class max_pool(object):
    def forward(self, input_array):
        output = [input.max() for input in input_array]
        return output
    
    def backward(self, input_array):
        output_array = np.zeros(input_array.shape)
        for d in range(input_array.shape[0]):
            max_element = input_array[d].max()
            for row in range(input_array.shape[1]):
                for col in range(input_array.shape[2]):
                    if max_element and input_array[d, row, col] == max_element:
                        output_array[d, row, col] = 1
                        max_element = None
                    else:
                        output_array[d, row, col] = 0
        return output_array


class mean_pool(object):
    def forward(self, input_array):
        output = [input.mean() for input in input_array]
        return output
    
    def backward(self, input_array):
        output_array = np.zeros(input_array.shape)
        array_op(output_array, lambda v: 1/output_array.size)
        return output_array
        

class PoolingLayer(Layer):
    def __init__(self, filter_h, filter_w, filter_d, step, pool):
        self.filter_h = filter_h
        self.filter_w = filter_w
        self.filter_d = filter_d
        self.step = step
        self.pool = pool
        super(PoolingLayer, self).__init__("pool")
    
    def forward(self, input_array):
        self.input_array = input_array
        output_array_height = get_output_size(input_array.shape[1], self.filter_h, self.step)
        output_array_width = get_output_size(input_array.shape[2], self.filter_w, self.step)
        output = np.zeros((input_array.shape[0], output_array_height, output_array_width))
        for i in range(output_array_height):
            row = i*self.step
            for j in range(output_array_width):
                col = j*self.step
                son_array = get_son_array(input_array, row, col, (self.filter_h, self.filter_w))
                output[:, i, j] = self.pool.forward(son_array)
        return output

    def calculate_delta(self):
        # 输出=下一层输入,所以delta_array等于下一层的输入delta_array
        if self.next_layer:
            self.delta_array = self.next_layer.input_delta_array
    
    def calculate_input_delta(self):
        input_array = self.input_array
        back_array  = np.zeros(input_array.shape)
        delta_height = self.delta_array.shape[1]
        delta_width  = self.delta_array.shape[2]
        for i in range(delta_height):
            row = i*self.step
            for j in range(delta_width):
                col = j*self.step
                son_array = get_son_array(input_array, row, col, (self.filter_h, self.filter_w))
                back_son_array = get_son_array(back_array, row, col, (self.filter_h, self.filter_w))
                back_son_array += self.pool.backward(son_array)*self.delta_array[:, i:i+1, j:j+1]
        self.input_delta_array = back_array
    
    # pool层没有权值,不计算梯度
    def calculate_grad(self):
        pass

    def update(self):
        pass

class IdentityActivator(object):
    def forward(self, weighted_input):
        return weighted_input

    def backward(self, output):
        return 1

class TestErrFunc(object):
    def forward(self, input_array):
        return input_array.sum()
    
    def backward(self, input_array):
        return np.ones(input_array.shape)

def test_conv():
    input_array = np.array(
        [[[0,1,1,0,2],
          [2,2,2,2,1],
          [1,0,0,2,0],
          [0,1,1,0,0],
          [1,2,0,0,2]],
         [[1,0,2,2,0],
          [0,0,0,2,0],
          [1,2,1,2,1],
          [1,0,0,0,0],
          [1,2,1,1,1]],
         [[2,1,2,0,0],
          [1,0,0,1,0],
          [0,2,1,0,1],
          [0,1,2,2,2],
            [2,1,0,0,1]]])
    conv_layer = ConvLayer(3, 3, 3, 2, 1, 1, IdentityActivator(), 0.001)
    err_func = TestErrFunc()
    conv_layer.set_error_func(err_func)
    output_array = conv_layer.forward(input_array)
    conv_layer.backward()

    epsilon = 10e-4

    for filter in conv_layer.filters:
        for d in range(filter.w.shape[0]):
            for row in range(filter.w.shape[1]):
                for col in range(filter.w.shape[2]):
                    g_w = filter.grad_w[d, row, col]
                    filter.w[d, row, col] += epsilon
                    delta1 = err_func.forward(conv_layer.forward(input_array))
                    filter.w[d, row, col] -= 2*epsilon
                    delta2 = err_func.forward(conv_layer.forward(input_array))
                    print("delta1 = %f, delta2 = %f" %(delta1, delta2))
                    expect_grad = (delta1-delta2)/(2*epsilon)
                    # 还原
                    filter.w[d, row, col] += epsilon
                    print ("w %s pos(%d, %d, %d) expect_grad = %f, actual = %f" %(filter.w[d, row, col], d, row, col, expect_grad, g_w))

def test_pool():
    input_array = np.array(
        [[[1,1,2,4],
          [5,6,7,8],
          [3,2,1,0],
          [1,2,3,4]],
         [[0,1,2,3],
          [4,5,6,7],
          [8,9,0,1],
          [3,4,5,6]]], dtype=np.float64)
    
    #pool_layer = PoolingLayer(2, 2, 2, 2, max_pool())
    pool_layer = PoolingLayer(2, 2, 2, 2, mean_pool())
    output = pool_layer.forward(input_array)
    print("max_pool output: \n%s" %(output))
    pool_layer.delta_array = np.array(
                            [[[1,2],
                            [2,4]],
                            [[3,5],
                            [8,2]]], dtype=np.float64)
    pool_layer.backward()
    print("max_pool input_delta: \n%s" %(pool_layer.input_delta_array))

def test_full_connect():
    pass

class RBF(object):
    def forward(self, input_array, label_array):
        return 0.5*reduce(lambda val, vec: val + (vec[0]-vec[1])**2, zip(input_array.flat, label_array.flat), 0)

    def backward(self, input_array, label_arary):
        return -(label_arary-input_array)

class Network(object):
    # layers包含每个层的节点个数
    def __init__(self, layers, err_func):
        self.layers = layers
        self.err_func = err_func
        prev_layer = None
        for layer in self.layers:
            if prev_layer:
                prev_layer.set_next_layer(layer)
            prev_layer = layer


    def train(self, samples, labels):
        print("start train") 
        for i, sample in enumerate(samples):
            print("train sampe %s" %(i))
            sys.stdout.flush()
            self._train_one_sample(sample, labels[i])
            break
    
    @func_time
    def _train_one_sample(self, sample, label):
        self.predict(sample)
        self._calc_gradient(label)
        self._update_weight()
        #self.gradient_check(sample, label)
    
    def predict(self, sample):
        # 设置输入层
        # /256是对输入做归一化，否则识别率惨不忍睹
        # 图片是28*28的大小
        input_array = sample/256
        #input_array = np.array([sample]).T
        for layer in self.layers:
            input_array = layer.forward(input_array)
        return self.layers[-1].output_array

    
    def _update_weight(self):
        for layer in self.layers:
            layer.update()

    
    def _calc_gradient(self, label):
        label_arary = label.reshape(label.size, 1)
        for layer in self.layers[::-1]:
            if layer == self.layers[-1]:
                layer.set_label(label_arary)
                layer.set_error_func(self.err_func)
            layer.backward()

    def evaluate(self, testSamples, testLabels):
        total = len(testSamples)
        error = 0
        error_sample = []
        error_label = []
        for i in range(total):
            sample = testSamples[i]
            label = testLabels[i]
            output = self.predict(sample)
            val = Network.get_result(list(output.flat))
            label_val = Network.get_result(list(label.flat))
            if val != label_val:
                error += 1
        errorRate = float(error)/total
        print("total %d, error %d, erroRate %f" %(total, error, errorRate))
        return errorRate 

    @staticmethod
    def get_result(output):
        max = 0
        for i in range(1, len(output)):
            if output[max] < output[i]:
                max = i 
        return max




def main(images, labels, test_images, test_labels):
    layers = []
    learning_rate = 0.01*math.sqrt(10)
    # 第一层:卷积层, 使用6个大小为5*5的卷积核
    layers.append(ConvLayer(5, 5, 1, 6, 2, 1, ReluActivator(), learning_rate))
    # 第二层:池化层, 使用max_pool, 池化的大小时2*2
    layers.append(PoolingLayer(2, 2, 6, 2, max_pool()))
    # 第三层:卷积层, 使用16个大小为5*5的卷积核
    layers.append(ConvLayer(5, 5, 6, 16, 0, 1, ReluActivator(), learning_rate))
    # 第四层:池化层, 使用max_pool, 池化的大小时2*2
    layers.append(PoolingLayer(2, 2, 16, 2, max_pool()))
    # 第五层:卷积层, 使用120个大小为5*5的卷积核
    layers.append(ConvLayer(5, 5, 16, 120, 0, 1, ReluActivator(), learning_rate))
    # 第六层:全连接层
    layers.append(FullConnectLayer(120, 84, SigmodActivator(), learning_rate))
    # 第七层: 输出层
    layers.append(FullConnectLayer(84, 10, SigmodActivator(), learning_rate))
    network = Network(layers, RBF())
    
    index = 0
    lastErrorRadio = 1.0
    while True:
        network.train(train_images, train_labels)
        index += 1
        break
        errorRadio = network.evaluate(test_images, test_labels)
        print("%s: after %s train errorRate %f" %(datetime.now(), index*10, errorRadio))
        sys.stdout.flush()
        if index%10 == 0:
            if errorRadio > lastErrorRadio:
                break 
            else:
                lastErrorRadio = errorRadio


if __name__ == "__main__":
    test_conv()
    test_pool()
    #TRAIN_DATA_PATH = "D:/code/python/train_data"
    #mntImg = minst.MinstImage()
    #trainMntImgPath = "%s/train-images-idx3-ubyte/train-images.idx3-ubyte" %(TRAIN_DATA_PATH)
    #mntImg.parse(trainMntImgPath)
    #train_images = mntImg.get_images()[1:100]

    #mntLable = minst.MinstLabel()
    #trainMntLablePath = "%s/train-labels-idx1-ubyte/train-labels.idx1-ubyte" %(TRAIN_DATA_PATH)
    #mntLable.parse(trainMntLablePath)
    #train_labels = mntLable.get_labels()[1:100]

    #testMntImgPath = "%s/t10k-images-idx3-ubyte/t10k-images.idx3-ubyte" %(TRAIN_DATA_PATH)
    #mntImg.parse(testMntImgPath)
    #test_images = mntImg.get_images()[1:100]


    #testMntLablePath = "%s/t10k-labels-idx1-ubyte/t10k-labels.idx1-ubyte" %(TRAIN_DATA_PATH)
    #mntLable.parse(testMntLablePath)
    #test_labels = mntLable.get_labels()[1:100]

    #cProfile.run("main(train_images, train_labels, test_images, test_labels)", "perf.out")
    #p = pstats.Stats("perf.out")
    #p.strip_dirs().sort_stats("cumulative", "name").print_stats(0.5)


