import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F


class TrainParams:
    """
    :ivar optim_type:
      0: SGD
      1: ADAM

    :ivar load_weights:
        0: train from scratch
        1: load and test
        2: load if it exists and continue training

    :ivar save_criterion:  when to save a new checkpoint
        0: max validation accuracy
        1: min validation loss
        2: max training accuracy
        3: min training loss
    """

    def __init__(self):
        self.batch_size = 128
        self.optim_type = 1
        
        #self.lr = 0.00005 #before 100 epoc
        self.lr = 0.00003

        self.momentum = 0.9
        self.n_epochs = 400
        self.weight_decay = 0.00008
        self.c0 = 0.35
        self.save_criterion = 0
        self.load_weights = 1
        self.weights_path = '/content/drive/My Drive/Colab Notebooks/a9/model.pt'


class MNISTParams(TrainParams):
    def __init__(self):
        super(MNISTParams, self).__init__()
        #self.weights_path = '/content/drive/My Drive/A9/checkpoints/mnist/model.pt'
        
        self.weights_path = '/content/drive/My Drive/Colab Notebooks/checkpoints/mnist'


class FMNISTParams(TrainParams):
    def __init__(self):
        super(FMNISTParams, self).__init__()
        #self.weights_path = '/content/drive/My Drive/A9/checkpoints/fmnist/model.pt'
        self.weights_path = '/content/drive/My Drive/Colab Notebooks/a9/checkpoints/fmnist'

class MNISTParams(TrainParams):
    def __init__(self):
        super(MNISTParams, self).__init__()
        #self.weights_path = './checkpoints/mnist/model.pt'
        
        self.weights_path = '/content/drive/My Drive/Colab Notebooks/checkpoints/mnist/model.pt'
        

class FMNISTParams(TrainParams):
    def __init__(self):
        super(FMNISTParams, self).__init__()
        #self.weights_path = './checkpoints/fmnist/model.pt'
        self.weights_path = '/content/drive/My Drive/Colab Notebooks/checkpoints/fmnist/model.pt'


class CompositeLoss(nn.Module):
    def __init__(self, device):
        super(CompositeLoss, self).__init__()
        #self.reconstruction_loss = loss_rec
        #self.classification_loss = loss_cls
        pass

    def init_weights(self):
        pass

    def forward(self, reconstruction_loss, classification_loss):
        #loss = reconstruction_loss + c0 * classification_loss
        #return loss
        pass


class Encoder(nn.Module):


#'''Requirement:
#1. all layers are fully connected
##2. encoder and deconder are symmetric
#3. deconder get weight from encoder to share weight
#4. encoder output= 150 '''


    def __init__(self, device):
      super(Encoder, self).__init__()
      self.encod1 = torch.nn.Linear(28*28, 512, True)   #initize to extract weight 
      self.encod2 = torch.nn.Linear(512, 384, True)
      self.encod3 = torch.nn.Linear(384, 384, True)
      self.encod4 = torch.nn.Linear(384, 256, True)
      self.encod5 = torch.nn.Linear(256, 150, True)

      self.weight1 = nn.Parameter(self.encod1.weight)
      self.weight2 = nn.Parameter(self.encod2.weight)
      self.weight3 = nn.Parameter(self.encod3.weight)
      self.weight4 = nn.Parameter(self.encod4.weight)
      self.weight5 = nn.Parameter(self.encod5.weight)
      
      self.b1 = nn.Parameter(torch.randn(512)) #bias dim equal this layer's output dim
      self.b2 = nn.Parameter(torch.randn(384))
      self.b3 = nn.Parameter(torch.randn(384))
      self.b4 = nn.Parameter(torch.randn(256))
      self.b5 = nn.Parameter(torch.randn(150))

    def get_weights(self):

      #share weights between encoder and decoder, weight in decoder just transpose of corrsponding weight
      return self.weight5.t(), self.weight4.t(), self.weight3.t(), self.weight2.t(), self.weight1.t()

    def init_weights(self):
      torch.nn.init.xavier_uniform_(self.encod1.weight)
      torch.nn.init.xavier_uniform_(self.encod2.weight)
      torch.nn.init.xavier_uniform_(self.encod3.weight)
      torch.nn.init.xavier_uniform_(self.encod4.weight)
      torch.nn.init.xavier_uniform_(self.encod5.weight)
        

    def forward(self, enc_input):
      
      x = enc_input
      x = x.view(x.size(0), -1)
    
      x = F.elu(torch.mm(x, self.weight1.t()) + self.b1)
      x = F.elu(torch.mm(x, self.weight2.t()) + self.b2)
      x = F.elu(torch.mm(x, self.weight3.t()) + self.b3)
      x = F.elu(torch.mm(x, self.weight4.t()) + self.b4)
      x = F.elu(torch.mm(x, self.weight5.t()) + self.b5)
      return x

class Decoder(nn.Module):
  
  ''' 
self.dec1 is tensor inti by nn.Linear() so that can pass to Xe initiler
self.dec_weight1 is a register paremeter, use for manully calculta y = xW' + b'''
  
  def __init__(self, device):
    
    super(Decoder, self).__init__()
    self.dec_weight1 = torch.randn(150,256)
    self.dec_weight2 = torch.randn(256,384)
    self.dec_weight3 = torch.randn(384,384)
    self.dec_weight4 = torch.randn(384,512)
    self.dec_weight5 = torch.randn(512,28*28)
    
    self.dec_b1 = nn.Parameter(torch.randn(256))  #dim= output dim of that layer
    self.dec_b2 = nn.Parameter(torch.randn(384))
    self.dec_b3 = nn.Parameter(torch.randn(384))
    self.dec_b4 = nn.Parameter(torch.randn(512))
    self.dec_b5 = nn.Parameter(torch.randn(28*28))

  def init_weights(self, shared_weights):
    
    #print(type(self.dec_weight1))
    #print(type(shared_weights[0]))
    self.dec_weight1, self.dec_weight2, self.dec_weight3, self.dec_weight4, self.dec_weight5 = shared_weights

    #convert to parameters
    self.dec_weight1 = nn.Parameter(self.dec_weight1)
    self.dec_weight2 = nn.Parameter(self.dec_weight2)
    self.dec_weight3 = nn.Parameter(self.dec_weight3)
    self.dec_weight4 = nn.Parameter(self.dec_weight4)
    self.dec_weight5 = nn.Parameter(self.dec_weight5)

  def forward(self, dec_input):
    x = dec_input
    #print(x.shape)
    #print(self.dec_weight1.t().shape)
    #print(self.dec_b1.shape)
    x = F.elu(torch.mm(x, self.dec_weight1.t()) + self.dec_b1)
    x = F.elu(torch.mm(x, self.dec_weight2.t()) + self.dec_b2)
    x = F.elu(torch.mm(x, self.dec_weight3.t()) + self.dec_b3)
    x = F.elu(torch.mm(x, self.dec_weight4.t()) + self.dec_b4)
    x = F.elu(torch.mm(x, self.dec_weight5.t()) + self.dec_b5)

    x = x.view(x.size(0), 1, 28, 28)
    return x


class Classifier(nn.Module):
  
  
  def __init__(self, device):
    
    super(Classifier, self).__init__()
    self.class_layer1 = torch.nn.Linear(150,150)
    self.class_layer2 = torch.nn.Linear(150,150)
    self.class_layer3 = torch.nn.Linear(150,100)
    self.class_layer4 = torch.nn.Linear(100,60)
    self.class_layer5 = torch.nn.Linear(60,10)


  def init_weights(self):
    
    torch.nn.init.xavier_uniform_(self.class_layer1.weight)
    torch.nn.init.xavier_uniform_(self.class_layer2.weight)
    torch.nn.init.xavier_uniform_(self.class_layer3.weight)
    torch.nn.init.xavier_uniform_(self.class_layer4.weight)
    torch.nn.init.xavier_uniform_(self.class_layer5.weight)

  def forward(self, x):
    
    x = x.view(x.size(0), -1)
    x = F.relu(self.class_layer1(x))
    x = F.relu(self.class_layer2(x))
    x = F.relu(self.class_layer3(x))
    x = F.relu(self.class_layer4(x))
    x = self.class_layer5(x)
    x = F.softmax(x,dim=1)
    return x