import numpy as np

from Activation import np
from .Activation import *
import os
    
VariableFormTypeMap = {
    np.uint8   : 1,
    np.uint16  : 2,
    np.uint32  : 3,
    np.uint64  : 4,

    np.int8    : 6,
    np.int16   : 7,
    np.int32   : 8,
    np.int64   : 9,
 
    np.float16 : 11,
    np.float32 : 12,
    np.float64 : 13,
}

VariableFormTypeMapIv = {
    1 :  np.uint8 ,
    2 :  np.uint16  ,
    3 :  np.uint32  ,
    4 :  np.uint64  ,
    6 :  np.int8    ,
    7 :  np.int16   ,
    8 :  np.int32   ,
    9 :  np.int64   ,
    11:  np.float16 ,
    12:  np.float32 ,
    13:  np.float64 ,
}



class Layer:
    def __init__(self,Vtype = np.float32) -> None:
        self.output = 0
        self.err = 0
        self.Vtyepe = Vtype
        pass
    
    @staticmethod
    def Load(fileFd):
        pass
    
    def Save(self)->bytes:
        pass
    
    def Id(self)->int:
        pass
    
    def GrandientDescent(self,learning_rate:float,Batchsize:int)->np.matrix:
        pass
    
    def BP(self,PreA:np.matrix)->np.matrix:
        pass
    
    def ForwardPropagation(self,x:np.matrix)->np.matrix:
        pass
    
    
    
    
    
class Dense(Layer):
    def __init__(self,Neural_size:int,input_size:int,activation:Activation,Vtype = np.float32,weight:np.matrix = None,bias:np.matrix = None) -> None:
        super().__init__(Vtype) 
        if weight is not None:
            self.w = weight
            self.b = bias
        else:
            self.w = np.random.randn(Neural_size,input_size).astype(Vtype)
            self.b = np.random.randn(Neural_size,1).astype(Vtype)
        
        self.activation:Activation = activation
        self.Neura_size = Neural_size
        self.input_size = input_size
        self.d_dw = 0
        self.d_db = 0
        
    def ForwardPropagation(self,x:np.matrix)->np.matrix:
        a = self.Liner(x)
        self.output = self.Act(a)
        return self.output
    def Liner(self,a:np.matrix)->np.matrix:
        """
        a.shape = (input_size,1) //为列向量
        """
        return self.w@a + self.b
    def BP(self,PreA:np.matrix)->np.matrix:
        """
        返回反向传播的下一层的误差
        """  
        if type(self.activation) == Softmax:
            tmp = self.err
        else:
            tmp = self.err*self.activation.Da_dz(self.output)
            
        preerr = (tmp * self.w)  
        d_dw =  tmp @ PreA.T   
        d_db =  tmp   
        self.d_dw = self.d_dw + d_dw
        self.d_db = self.d_db + d_db
        return np.sum(preerr,axis=0).reshape(-1,1)
         
    def GrandientDescent(self,learning_rate:float,Batchsize:int)->np.matrix:
        self.w = self.w - learning_rate*self.d_dw/Batchsize
        self.b = self.b - learning_rate*self.d_db/Batchsize
        self.d_dw = 0
        self.d_db = 0
        pass
    def Act(self,z:np.matrix)->np.matrix:
        return self.activation(z)
    def Id(self)->int:
        return 0

    @staticmethod
    def Load(fileFd):
        # os.read(fileFd,4)
     
        V_type = np.frombuffer(fileFd.read(1),dtype=np.uint8)[0]
        N_Size = np.frombuffer(fileFd.read(4),dtype=np.uint32)[0]
        I_Size = np.frombuffer(fileFd.read(4),dtype=np.uint32)[0]
        ActType = np.frombuffer(fileFd.read(1),dtype=np.uint8)[0]
       

        
        V_type = VariableFormTypeMapIv[V_type]
        size = len(V_type(1).tobytes())
        
        ActType = ActTypeMap[ActType]
        
        
        W = np.frombuffer(fileFd.read(N_Size*I_Size*size),dtype=V_type).reshape(N_Size,I_Size)
        b = np.frombuffer(fileFd.read(N_Size*size),dtype=V_type).reshape(N_Size,1)
         
         
        return Dense(N_Size,I_Size,ActType(),V_type,W,b),10+N_Size*I_Size*size+I_Size*size
        
    
    def Save(self)->bytes: 
        global  VariableFormTypeMap
        
        head = bytes([0,VariableFormTypeMap[self.Vtyepe]])
        
        size = np.array([self.Neura_size,self.input_size]).tobytes()
    
        acttyep = bytes([self.activation.Id()])
        
        tmpw = self.w.astype(self.Vtyepe).tobytes()
        tmpb = self.b.astype(self.Vtyepe).tobytes()
        
        return head + size + acttyep + tmpw + tmpb
      
 
# TODO:卷积层     
class Cover(Layer):
    def __init__(self,shape:tuple,inputshape:tuple ,Vtype=np.float32,weight:np.matrix = None) -> None:
        super().__init__(Vtype)
        self.inputshape = inputshape
        self.shape = shape
        if weight is not None:
            self.w = weight
        else:
            self.w = np.random.randn(*shape).astype(Vtype)
            
    def Id(self) -> int:      
        return 1
    
    def Save(self) -> bytes:
        pass
    
    def ForwardPropagation(self,x:np.matrix)->np.matrix:
        pass
    
    def BP(self,PreA:np.matrix)->np.matrix:
        pass
    
    def GrandientDescent(self,learning_rate:float,Batchsize:int)->np.matrix:
        pass

def GetLayerType(Id:int):
    
    TypeMap = [Dense, Cover]
    
    return TypeMap[Id]
    
    
if __name__ == '__main__':

    pass