import pycuda.autoinit
import pycuda.driver as drv
import numpy as np
#计时器
from timeit import default_timer as timer
from pycuda.compiler import SourceModule
from jinja2 import Template

class Layer:
    def __init__(self,name = "layer",inputlayer = None, input_sharp = [28,784], unit = 56, block_size = 14):

        self.name = name
        self.prelayer = None
        self.postlayer = None

        self.block_size = block_size
        self.batch_size = input_sharp[0]

        #隐藏层
        self.hide_num = unit
        self.input_sharp = input_sharp

        #更新步长
        self.eta = np.float32(1)

        #print(type(inputlayer))
        ###input
        if type(inputlayer) == Layer:
            print("inputlayer : " + name)
            self.al_1_shape = inputlayer.al_shape
            self.al_1 = inputlayer.al
            self.al_1_gpu = inputlayer.al_gpu
            #self.al_1 = (np.ones(self.al_1_shape[0] * self.al_1_shape[1]) * 0.5).astype(np.float32)
            #self.al_1_gpu = drv.mem_alloc(self.al_1.nbytes)
            #drv.memcpy_htod(self.al_1_gpu, self.al_1)
            
            self.prelayer = inputlayer
            inputlayer.setpostlayer(self)

        elif type(inputlayer) == np.ndarray:
            self.al_1_shape = [input_sharp[0],input_sharp[1]]
            #self.al_1 = (np.random.randn(self.al_1_shape[0] * self.al_1_shape[1])).astype(np.float32)
            #self.al_1 = np.zeros(self.al_1_shape[0] * self.al_1_shape[1]).astype(np.float32)
            self.al_1 = inputlayer
            self.al_1_gpu = drv.mem_alloc(self.al_1.nbytes)
            drv.memcpy_htod(self.al_1_gpu, self.al_1)

        self.wl_shape = [input_sharp[1],unit]
        self.wl = np.random.randn(self.wl_shape[0] * self.wl_shape[1]).astype(np.float32)
        #self.wl = np.ones(self.wl_shape[0] * self.wl_shape[1]).astype(np.float32)
        self.wl_gpu = drv.mem_alloc(self.wl.nbytes)
        drv.memcpy_htod(self.wl_gpu, self.wl)

        self.bl_shape = [unit]
        self.bl = np.random.randn(self.bl_shape[0]).astype(np.float32)
        #self.bl = np.zeros(self.bl_shape[0]).astype(np.float32)
        self.bl_gpu = drv.mem_alloc(self.bl.nbytes)
        drv.memcpy_htod(self.bl_gpu, self.bl)

        ###output
        self.al_shape = [input_sharp[0], unit]
        self.al = np.zeros(self.al_shape[0] * self.al_shape[1]).astype(np.float32)
        self.al_gpu = drv.mem_alloc(self.al.nbytes)

        #delta
        self.delta_gpu = drv.mem_alloc(self.al.nbytes)

        self.grid_size = [int(unit/block_size),int(input_sharp[0]/block_size)]


    def compilecuda(self):
        post_a_width = 14

        if self.postlayer != None:
            post_a_width = self.postlayer.al_shape[1]
        #region 初始化cuda
        fo = open("nn_gpu.cu", "rb")
        content = str(fo.read(), encoding = "utf8")
        tpl = Template(content)
        rendered_tpl = tpl.render(block_size=self.block_size, batch_size = self.batch_size, pre_z_width = self.al_1_shape[1]
            , w_width = self.hide_num, post_z_width = post_a_width ,inv_batch_size = 1/self.batch_size)
        mod = SourceModule(rendered_tpl)
        self.feedforwardfunc = mod.get_function("feedforward")
        self.backpropfunc = mod.get_function("backprop")
        self.bplastfunc = mod.get_function("bplast")
        self.bpupdatewbfunc = mod.get_function("updatewb")
        #endregion

    def setpostlayer(self, postlayer):
         self.postlayer = postlayer

    def feedforward(self,inputs=None):
        step_count = 1
        for x in range(step_count):
            self.feedforwardfunc(self.al_1_gpu, self.wl_gpu, self.bl_gpu, self.al_gpu, block=(self.block_size,self.block_size,1), grid=(self.grid_size[0],self.grid_size[1]))

    def printloss(self, y , x):
        loss = 0.0
        drv.memcpy_dtoh(self.al, self.al_gpu)
        loss = np.sum(np.abs(self.al - y))
        err = 0
        for i in range(self.batch_size):
            j = i * self.hide_num
            list1 = self.al[j:j+self.hide_num]
            arg1 = np.argmax(list1)
            arg2 = np.argmax(y[j:j+self.hide_num])
            err += int(arg1 != arg2)
            if arg1 != arg2:
                #print("al : %d value : %f y : %d" % (arg1 ,list1[arg1],arg2))
                j2 = i * 784
                list2 = x[j2:j2 + 784]
                arg3 = np.sum(list2)
                #print(arg3)
        print("loss: %f" % loss)
        print("err: %d" % err)
        pass


    def backprop(self):
        step_count = 1
        for x in range(step_count):
            self.backpropfunc(self.al_gpu, self.postlayer.wl_gpu, self.postlayer.delta_gpu, self.delta_gpu, 
                block=(self.block_size,self.block_size,1), grid=(self.grid_size[0],self.grid_size[1]))

    def testbp(self):
        self.al = np.ones(self.al_shape[0] * self.al_shape[1]).astype(np.float32)
        drv.memcpy_htod(self.al_gpu, self.al)
        self.postlayer.wl = np.ones(len(self.postlayer.wl)).astype(np.float32)
        drv.memcpy_htod(self.postlayer.wl_gpu, self.postlayer.wl)
        delta = np.random.randn(len(self.postlayer.al)).astype(np.float32)
        drv.memcpy_htod(self.postlayer.delta_gpu, delta)
        self.backpropfunc(self.al_gpu, self.postlayer.wl_gpu, self.postlayer.delta_gpu, self.delta_gpu, 
                block=(self.block_size,self.block_size,1), grid=(self.grid_size[0],self.grid_size[1]))

    def bplast(self, y_gpu):
        step_count = 1
        for x in range(step_count):
            self.bplastfunc(self.al_gpu, y_gpu, self.delta_gpu, 
                block=(self.block_size,self.block_size,1), grid=(self.grid_size[0],self.grid_size[1]))

    def updatewb(self):
        grid_size = [int(self.wl_shape[1]/self.block_size),int(self.wl_shape[0]/self.block_size)]
        step_count = 1
        for x in range(step_count):
            self.bpupdatewbfunc(self.al_1_gpu, self.delta_gpu, self.wl_gpu, self.bl_gpu, self.eta,
                block=(self.block_size,self.block_size,1), grid=(grid_size[0],grid_size[1]))

    def printal(self):
        print(self.name + " al:")
        drv.memcpy_dtoh(self.al, self.al_gpu)
        for x in range(self.input_sharp[0]):
        #for x in range(1):
            y = x * self.hide_num
            #print(self.al[y:self.hide_num + y])
            print(self.al[y:2 + y])

    def printwl(self):
        print(self.name + " wl:")
        drv.memcpy_dtoh(self.wl, self.wl_gpu)
        for x in range(self.input_sharp[1]):
        #for x in range(1):
            y = x * self.hide_num
            #print(self.wl[y:self.hide_num + y])
            print(self.wl[y:2 + y])

    def printdelta(self):
        print(self.name + " delta:")
        delta = np.zeros(self.al_shape[0] * self.al_shape[1]).astype(np.float32)
        drv.memcpy_dtoh(delta, self.delta_gpu)
        #for x in range(self.al_shape[0]):
        for x in range(1):
            y = x * self.al_shape[1]
            print(np.sum(delta[y:self.hide_num + y]))

    def printb(self):
        print(self.name + " b:")
        drv.memcpy_dtoh(self.bl, self.bl_gpu)
        print(self.bl)


if __name__ == '__main__':

    batch_size = 28

    import mnist_loader
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

    mini_batches = training_data[0:batch_size]

    print(type(mini_batches[0]))

    x = []
    y = []
    for i,j in mini_batches:
        x.extend(i)
        y.extend(j)
    x = np.array(x).flatten().astype(np.float32)
    y = np.array(y).flatten().astype(np.float32)

    y_gpu = drv.mem_alloc(y.nbytes)
    drv.memcpy_htod(y_gpu,y)

    layer1 = Layer(name="layer1", inputlayer = x, input_sharp = [batch_size,784], unit = 56, block_size = 14)
    layer2 = Layer(name="layer2", inputlayer=layer1,input_sharp = [batch_size,56], unit = 14, block_size = 7)
    layer1.compilecuda()
    layer2.compilecuda()

    start = timer()
    for i in range(1000000):
        layer1.feedforward()
        layer2.feedforward()
        layer2.bplast(y_gpu)
        
        if i % 1000 == 0:
            layer2.printloss(y,x)
            #layer2.printdelta()
            # layer1.printwl()
            # layer1.printb()
            # layer1.printal()
            # layer2.printal()
            # layer2.printwl()
            # layer2.printb()
        layer1.backprop()
        #update
        layer1.updatewb()
        layer2.updatewb()
        #if x==0:
            #layer1.printal()
            # layer2.printal()
            # layer2.printwl()

    run_time = timer() - start
    print("gpu run time %f seconds " % (run_time))

    #layer2.printzl()
    #layer2.printwl()

    
    #layer1.printwl()

    #layer2.printal()
    #print(y[0:14])
    #layer1.testbp()
    #layer1.printdelta()

    #layer2.printdelta()
    #layer1.printdelta()
    # layer1.printzl()
    #layer2.printal()
