import numpy as np
from copy import deepcopy
import torch

from gp_fit import train_gp
from local_search import local_search

class Optimizer:
    def __init__(self, seq_length, alphabet, n_init, gp_train_steps=300):
        self.alphabet = alphabet
        self.seq_length = seq_length
        self.X = np.zeros((0,seq_length))
        self.fX = np.zeros((0,1))
        self.X_init = None
        self.init_X(n_init)
        #print(self.X_init)
        #print(type(self.X_init))
        self.gp_train_steps = gp_train_steps
        
    def init_X(self, n_init: int):
        #考虑int, 考虑改善
        assert n_init > 0
        self.X_init = np.random.randint(0, self.alphabet, (n_init, self.seq_length))   
    
    def suggest(self):
        # use up init then search by gp
        if self.X_init.shape[0] > 0:
            X_suggest = self.X_init[0]
            self.X_init = self.X_init[1:,:]
        else:
            X_suggest = self.select_candidate()
            
        return X_suggest
    
    def observe(self,x_next,y_next):
        #print("observe: {}, {} : ".format(x_next,y_next))
        self.X = np.vstack((self.X, x_next))
        self.fX = np.vstack((self.fX, y_next))
        #若置信域不够，再restart 
        
    def select_candidate(self):
        X_torch = torch.FloatTensor(self.X)
        y_torch = torch.FloatTensor(deepcopy(self.fX).ravel())
        x_center = self.X[self.fX.argmin().item(), :][None, :]
        
        model = train_gp(train_x=X_torch, train_y=y_torch, alphabet=self.alphabet, n_train_steps=self.gp_train_steps)
        hypers = model.state_dict()

        def _ei(X, augmented=True):  
            """Expected improvement (with option to enable augmented EI"""
            from torch.distributions import Normal
            if not isinstance(X, torch.Tensor):
                X = torch.tensor(X, dtype=torch.float32)
            if X.dim() == 1:
                X = X.reshape(1, -1)
            gauss = Normal(torch.zeros(1).to(X), torch.ones(1).to(X))
            # flip for minimization problems
            preds = model(X)
            mean, std = -preds.mean, preds.stddev
            # use in-fill criterion
            mu_star = -model.likelihood(model(torch.tensor(x_center[0].reshape(1, -1)).to(X))).mean
            u = (mean - mu_star) / std
            try:
                ucdf = gauss.cdf(u)
            except ValueError as e:
                raise ValueError(f'\t- {u}\n\t- {mean}\n\t- {mu_star}\n\t- {std}') from e

            updf = torch.exp(gauss.log_prob(u))
            ei = std * updf + (mean - mu_star) * ucdf
            #if augmented:
            #    sigma_n = gp.likelihood.noise
            #    ei *= (1. - torch.sqrt(torch.tensor(sigma_n)) / torch.sqrt(sigma_n + std ** 2))
            return ei

        def _ucb(X, beta=5.):
            """Upper confidence bound"""
            if not isinstance(X, torch.Tensor):
                X = torch.tensor(X, dtype=torch.float32)
            if X.dim() == 1:
                X = X.reshape(1, -1)
            # Invoked when you supply X in one-hot representations

            preds = model.likelihood(model(X))
            mean, std = preds.mean, preds.stddev
            return -(mean + beta * std)

        X_select, acq_select = local_search(x_center[0], _ei, 
                                            max_hamming_dist=int(self.seq_length)-1, 
                                            alphabet=self.alphabet, n_restart=3)

        del X_torch, y_torch
        X_select = np.array(X_select)
        return X_select

class Trust:
    def __init__(self,):
        #trust-region radius
        self.r = 10
        self.r_min = 1
        self.r_max = 20
        
        self.succcount = 0
        self.failcount = 0
        self.succtol = 3
        self.failtol = 20