from .Activation import *
from .Layer import *
from .Lost import *
import numpy as np
import os

class Model:
    def __init__(self,**kwargs) -> None:
        self.learning_rate =  kwargs.get("learning_rate")
        self.lost = kwargs.get("Lost_fun")
        self.layers:list[Layer] = []
    
    def Compile(self,Lost_fun:Lost,learning_rate:float=0.01 ):
        self.learning_rate = learning_rate
        self.lost = Lost_fun
        pass
 
    def Load(self,path:str):
        with open(path,"rb") as file:     
            file_lenth = os.fstat(file.fileno()).st_size
            while file_lenth > 0:
                layerType = np.frombuffer(file.read(1),dtype=np.uint8)
                layers,size = GetLayerType(layerType).Load(file)
                self.layers.append(layers)
                file_lenth  = file_lenth - size-1 
        pass
    
    def Save(self,path:str):
        """
        保存文件:文件以二进制保存  
        """
        with open(path,"wb") as file:
            for layers in self.layers:
                batearry =layers.Save()
                file.write(batearry)
            file.close()
        pass

    def add(self,layers:Layer):
        self.layers.append(layers)
        pass
    
    def Prediction(self,x:np.matrix)->np.matrix:
        x = x.T
        for layers in self.layers:
            x = layers.ForwardPropagation(x)
            
        return x.reshape(self.layers[-1].Neura_size,-1).T

    def fit_Batchones(self,x:np.matrix,y:np.matrix):
        bach_size =len(x)
        # BGD:
        cost = 0
        for TranData,TranDatay in zip(x,y):
            
            TranData:np.matrix= TranData.reshape(-1,1)
            TranDatay:np.matrix = TranDatay.reshape(-1,1) 
           
            self.layers[0].ForwardPropagation(TranData)
          
            
            #正向传播
            for i in range(1,len(self.layers)):
                self.layers[i].ForwardPropagation(self.layers[i-1].output)
            
            
            lost = self.lost(self.layers[-1].output,TranDatay)
            cost = cost + np.sum(lost) 
            
            if type(self.layers[-1].activation) == Softmax: 
                self.layers[-1].err = self.layers[-1].output - TranDatay
            else:
                self.layers[-1].err = self.lost.Dl_dyhat(self.layers[-1].output,TranDatay)
            
            
            for i in range(len(self.layers)-1,0,-1):
                self.layers[i-1].err =  self.layers[i].BP(self.layers[i-1].output)
                pass
            self.layers[0].BP(TranData)
            
        for layers in self.layers:
            layers.GrandientDescent(self.learning_rate,bach_size)
        cost = 1/bach_size* cost       
        return cost
        

