import torch
from seq_kernel import FastStringKernel

from gpytorch.means import ConstantMean
from gpytorch.kernels import ScaleKernel
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.distributions import MultivariateNormal
from gpytorch.constraints import Interval
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.models import ExactGP
from tqdm import tqdm

class ExactGPModel(ExactGP):
    def __init__(self, train_x, train_y, likelihood, kern, outputscale_constraint=Interval(0.5, 5.)):
        super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
        self.mean_module = ConstantMean()
        self.covar_module = ScaleKernel(kern, 
                                                outputscale_constraint=outputscale_constraint)

    def forward(self, x):
        mean_x = self.mean_module(x)
        covar_x = self.covar_module(x)
        return MultivariateNormal(mean_x, covar_x)
    
def train_gp(train_x, train_y, alphabet, n_train_steps=300, hypers={}):        
    assert train_x.ndim == 2
    assert train_y.ndim == 1
    assert train_x.shape[0] == train_y.shape[0]
    
    # Create hyper parameter bounds
    noise_variance = 0.005
    noise_constraint = Interval(1e-6, 0.1)
    lengthscale_constraint = Interval(0.01, 0.5)
    outputscale_constraint = Interval(0.5, 5.)

    # Create models
    likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(dtype=train_y.dtype)
    kernel = FastStringKernel(seq_length=train_x.shape[1], alphabet_size=alphabet)
    model = ExactGPModel(
        train_x=train_x,
        train_y=train_y,
        likelihood=likelihood,
        kern=kernel,
        # lengthscale_constraint=lengthscale_constraint,
        outputscale_constraint=outputscale_constraint,
    ).to(dtype=train_x.dtype)

    # Find optimal model hyperparameters
    model.train()
    likelihood.train()
    
    # "Loss" for GPs - the marginal log likelihood
    mll = ExactMarginalLogLikelihood(likelihood, model)

    # Initialize model hypers
    if hypers:
        model.load_state_dict(hypers)
    else:
        hypers = {}
        hypers["covar_module.outputscale"] = 1.0
        hypers["likelihood.noise"] = noise_variance if noise_variance is not None else 0.005
        model.initialize(**hypers)

    # Use the adam optimizer
    optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.03)

    trange = tqdm(range(n_train_steps), desc="GP fit")
    for _ in trange:
        optimizer.zero_grad()
        output = model(train_x, )
        loss = -mll(output, train_y).float()
        loss.backward()
        optimizer.step()
        trange.set_postfix({'loss': format(loss.item(), 'g')})

    # Switch to eval mode
    model.eval()
    likelihood.eval()

    return model