import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np

class KoopmanGeneralSolver(object):
    '''
    Build the Koopman solver
    '''

    def __init__(self, dic, target_dim, reg=0.0):
        """Initializer

        :param dic: dictionary
        :type dic: class
        :param target_dim: dimension of the variable of the equation
        :type target_dim: int
        :param reg: the regularization parameter when computing K, defaults to 0.0
        :type reg: float, optional
        """
        self.dic = dic
        self.dic_func = dic.forward
        self.target_dim = target_dim
        self.reg = reg

    def separate_data(self, data):
        data_x = data[0]
        data_y = data[1]
        return data_x, data_y

    def build(self, data_train):
        # Separate data
        self.data_train = data_train
        self.data_x_train, self.data_y_train = self.separate_data(
            self.data_train)

        # Compute final information
        self.compute_final_info(reg_final=0.0)

    def compute_final_info(self, reg_final):
        # Compute K
        self.K = self.compute_K(self.dic_func,
                                self.data_x_train,
                                self.data_y_train,
                                reg=reg_final)
        self.eig_decomp(self.K)
        self.compute_mode()

    def eig_decomp(self, K):
        """ eigen-decomp of K """
        self.eigenvalues, self.eigenvectors = np.linalg.eig(K)
        idx = self.eigenvalues.real.argsort()[::-1]
        self.eigenvalues = self.eigenvalues[idx]
        self.eigenvectors = self.eigenvectors[:, idx]
        self.eigenvectors_inv = np.linalg.inv(self.eigenvectors)

    def eigenfunctions(self, data_x):
        """ estimated eigenfunctions """
        psi_x = self.dic_func(data_x)
        val = torch.matmul(psi_x.to(dtype=torch.cdouble), torch.tensor(self.eigenvectors, dtype=torch.cdouble))
        return val

    def compute_mode(self):
        self.basis_func_number = self.K.shape[0]

        # Form B matrix
        self.B = self.dic.generate_B(self.data_x_train)

        # Compute modes
        self.modes = torch.matmul(torch.tensor(self.eigenvectors_inv, dtype=torch.cdouble), torch.tensor(self.B, dtype=torch.cdouble)).T
        return self.modes

    def calc_psi_next(self, data_x, K):
        psi_x = self.dic_func(data_x)
        psi_next = torch.matmul(psi_x, K)
        return psi_next

    def predict(self, x0, traj_len):
        """ predict the trajectory """
        traj = [x0]
        for _ in range(traj_len - 1):
            x_curr = traj[-1]
            efunc = self.eigenfunctions(x_curr)
            x_next = torch.matmul(self.modes, (torch.tensor(self.eigenvalues) * efunc).T)
            traj.append(x_next.real.T)
        traj = torch.stack(traj, dim=0).permute(1, 0, 2)
        return traj.squeeze()

    def compute_K(self, dic, data_x, data_y, reg):
        psi_x = dic(data_x)
        psi_y = dic(data_y)
        psi_xt = psi_x.T
        idmat = torch.eye(psi_x.shape[-1], dtype=torch.float64)
        xtx_inv = torch.linalg.pinv(reg * idmat + torch.matmul(psi_xt, psi_x))
        xty = torch.matmul(psi_xt, psi_y)
        self.K_reg = torch.matmul(xtx_inv, xty)
        return self.K_reg


class KoopmanDLSolver(KoopmanGeneralSolver):
    '''
    Build the Koopman model with dictionary learning
    '''

    def build_model(self):
        """Build model with trainable dictionary

        The loss function is ||Psi(y) - K Psi(x)||^2 .

        """
        class KoopmanModel(nn.Module):
            def __init__(self, dic_func, target_dim):
                super(KoopmanModel, self).__init__()
                self.dic_func = dic_func
                self.K = nn.Linear(target_dim, target_dim, bias=False)
                
            def forward(self, x, y):
                psi_x = self.dic_func(x)
                psi_y = self.dic_func(y)
                psi_next = self.K(psi_x)
                return psi_next - psi_y

        return KoopmanModel(self.dic_func, self.target_dim)

    def train_psi(self, model, epochs):
        """Train the trainable part of the dictionary

        :param model: koopman model
        :type model: model
        :param epochs: the number of training epochs before computing K for each inner training epoch
        :type epochs: int
        :return: history
        :rtype: history callback object
        """
        model.train()
        optimizer = optim.Adam(model.parameters(), lr=self.lr)
        criterion = nn.MSELoss()
        
        for epoch in range(epochs):
            optimizer.zero_grad()
            outputs = model(self.data_x_train, self.data_y_train)
            loss = criterion(outputs, torch.zeros_like(outputs))
            loss.backward()
            optimizer.step()
            
            if epoch % 10 == 0:
                print(f'Epoch {epoch}, Loss: {loss.item():.6f}')
        
        return {'loss': [loss.item()]}

    def build(
            self,
            data_train,
            data_valid,
            epochs,
            batch_size,
            lr,
            log_interval,
            lr_decay_factor):
        # Separate training data
        self.data_train = data_train
        self.data_x_train, self.data_y_train = self.separate_data(
            self.data_train)

        self.data_valid = data_valid
        self.zeros_data_y_train = torch.zeros_like(
            self.dic_func(self.data_y_train))
        self.zeros_data_y_valid = torch.zeros_like(
            self.dic_func(self.data_valid[1]))
        self.batch_size = batch_size
        self.lr = lr

        # Build the Koopman DL model
        self.model = self.build_model()

        # Training Loop
        losses = []
        for i in range(epochs):
            # One step for computing K
            self.K = self.compute_K(self.dic_func,
                                    self.data_x_train,
                                    self.data_y_train,
                                    self.reg)
            self.model.K.weight.data = self.K.T

            # Two steps for training PsiNN
            self.history = self.train_psi(self.model, epochs=2)

            print('number of the outer loop:', i)
            if i % log_interval == 0:
                losses.append(self.history['loss'][-1])

                # Adjust learning rate:
                if len(losses) > 2:
                    if losses[-1] > losses[-2]:
                        print("Error increased. Decay learning rate")
                        self.lr *= lr_decay_factor
                        for param_group in self.model.optimizer.param_groups:
                            param_group['lr'] = self.lr

        # Compute final information
        self.compute_final_info(reg_final=0.01)
