import numpy as np
import torch
from StreamLearn.Base.SemiEstimator import StreamEstimator


class MERO(StreamEstimator):
    def __init__(self, args):
        self.name = 'MERO'
        self.D = args.D
        self.G = args.G
        self.cw_star_MERO = args.cw_star_MERO
        self.cw_MERO = args.cw_MERO
        self.cq_MERO = args.cq_MERO
        self.T = None
        self.m = None
        self.dataset = None
        self.w_size = None
        # Step-sizes
        self.eta_w_star = lambda t: self.cw_star_MERO * self.D / (self.G * np.sqrt(t))
        self.eta_w = None
        self.eta_q = None
        self.sum_eta_w_star = 0
        self.sum_eta_w = 0
        self.sum_eta_q = 0
        # Parameters
        self.w_star = None
        self.ave_w_star = None
        self.w = None
        self.ave_w = None
        self.q = None
        self.ave_q = None
        # Optimization setting
        self.criterion = torch.nn.BCELoss(reduction='none')
        self.function = torch.nn.Sigmoid()
        self.optimizer = None

    def init(self):
        # Step-sizes
        self.eta_w = lambda t: self.cw_MERO * self.D ** 2 / np.sqrt((self.D ** 2 * self.G ** 2 + np.log(self.m)) * t)
        self.eta_q = lambda t: self.cq_MERO * np.log(self.m) / np.sqrt((self.D ** 2 * self.G ** 2 + np.log(self.m)) * t)
        # Parameters
        self.w_star = torch.zeros((self.m, self.w_size), requires_grad=True, dtype=torch.float32)
        self.ave_w_star = torch.zeros((self.m, self.w_size), dtype=torch.float32)
        self.w = torch.zeros(self.w_size, requires_grad=True, dtype=torch.float32)
        self.ave_w = torch.zeros(self.w_size, dtype=torch.float32)
        self.q = torch.ones(self.m, requires_grad=False) / self.m
        self.ave_q = torch.zeros(self.m)
        # Optimization setting
        self.optimizer = torch.optim.SGD(
            [{"params": self.w_star, "lr": self.eta_w_star(1)}, {"params": self.w, "lr": self.eta_w(1)}], lr=0)

    def fit(self, stream_dataset):
        self.T = np.floor(np.min(stream_dataset.get_n())).astype(int)
        self.m = stream_dataset.get_m()
        self.w_size = stream_dataset.get_w_size()
        self.dataset = stream_dataset
        self.init()
        for t in range(1, self.T + 1):
            # Update ave_w_star, average_w and average_q
            self.ave_w_star = (self.ave_w_star * self.sum_eta_w_star + self.w_star.detach() * self.eta_w_star(t)) / (
                    self.sum_eta_w_star + self.eta_w_star(t))
            self.ave_w = (self.ave_w * self.sum_eta_w + self.w.detach() * self.eta_w(t)) / (
                    self.sum_eta_w + self.eta_w(t))
            self.ave_q = (self.ave_q * self.sum_eta_q + self.q.detach() * self.eta_q(t)) / (
                    self.sum_eta_q + self.eta_q(t))
            self.sum_eta_w_star = self.sum_eta_w_star + self.eta_w_star(t)
            self.sum_eta_w = self.sum_eta_w + self.eta_w(t)
            self.sum_eta_q = self.sum_eta_q + self.eta_q(t)
            # Update learning rate
            self.step_size_update(t)
            # Update w_star
            sample_m = self.dataset.sample_m()
            xs = sample_m[:, :-1]
            ys = sample_m[:, -1]
            for i in range(self.m):
                model_output_w_star = self.function(torch.from_numpy(xs[i]).to(torch.float32) @ self.w_star[i])
                loss_w_star = self.criterion(model_output_w_star, torch.tensor(ys[i], dtype=torch.float32))
                self.optimizer.zero_grad()
                loss_w_star.backward()
                self.optimizer.step()
                if torch.norm(self.w_star[i].detach(), p=2) > self.D:
                    self.w_star.data[i] = (self.w_star[i].detach() / torch.norm(self.w_star[i].detach(), p=2) *
                                           self.D)
                    self.w_star[i].requires_grad_(True)
            # Update w
            model_output = self.function(torch.from_numpy(xs).to(torch.float32) @ self.w)
            loss = self.criterion(model_output, torch.tensor(ys, dtype=torch.float32))
            true_loss = self.q @ loss
            self.optimizer.zero_grad()
            true_loss.backward()
            self.optimizer.step()
            if torch.norm(self.w.detach(), p=2) > self.D:
                self.w = self.w.detach() / torch.norm(self.w.detach(), p=2) * self.D
                self.w.requires_grad_(True)
            # Update q
            model_output_w_star_ave = np.array([self.criterion(
                self.function(torch.from_numpy(xs[i]).to(torch.float32) @ self.ave_w_star[i].detach()),
                torch.tensor(ys[i], dtype=torch.float32)) for i in range(self.m)])
            g_q = loss.detach().numpy() - model_output_w_star_ave
            q_1 = self.q * np.exp(self.eta_q(t) * g_q)
            q_1 = q_1 / q_1.sum()
            self.q = q_1

    def step_size_update(self, t):
        self.optimizer.param_groups[0]['lr'] = self.eta_w_star(t)
        self.optimizer.param_groups[1]['lr'] = self.eta_w(t)

    def predict(self, X):
        model_output = self.function(torch.from_numpy(X).to(torch.float32) @ self.ave_w)
        return model_output

    def evaluate(self, y_pred, y_true):
        return self.criterion(y_pred, torch.tensor(y_true, dtype=torch.float32)).numpy()
