# -*- coding: utf-8 -*-
"""
Created on Thu Apr 21 23:17:29 2022

@author: Jovi Wong
"""
import torch
from AutoEncoder import AutoEncoder

class StackedAutoEncoder():
    def __init__(self, *layer_dim, **kwargs):
        print("begin to init stacked auto encoder")
        self.sae = []
        self.activate_func = torch.tanh
        self.params_dir = "params_cache"
        self.rep_save_dir = "rep_cache"
        self.name = "untitled"
        
        for k,v in kwargs.items():
            if k == "activate_func":
                self.activate_func = v
            elif k == "params_load_dir":
                self.params_load_dir = v
            elif k == "rep_save_dir":
                self.rep_save_dir = v
            elif k == "name":
                self.name = v

        # create autoencoders and store them into self.sae
        for idx in range(len(layer_dim)-1):
            ae_name = self.name + "-AE" + str(idx)
            ae = AutoEncoder(ae_name, layer_dim[idx], layer_dim[idx+1], self.activate_func, self.params_dir)
            self.sae.append(ae)
        
        self.in_dim = layer_dim[0]
        self.out_dim = layer_dim[-1]

    def __len__(self):
        """ return the depth of the stacked auto-encoder """
        return len(self.sae)

    def forward(self, x):
        for ae in self.sae:
            x = ae.encode(x)
        return x

    def reconstruct(self, x):
        for ae in self.sae:
            x = ae.encode(x)
        for ae in reversed(self.sae):
            x = ae.decode(x)
        return x