import numpy as np
import torch
from StreamLearn.Base.SemiEstimator import StreamEstimator


class WGDRO(StreamEstimator):
    def __init__(self, args):
        self.name = 'WGDRO'
        self.D = args.D
        self.G = args.G
        self.L = args.L
        self.c = args.c
        self.cw = args.cw_WGDRO
        self.cq = args.cq_WGDRO
        self.kappa = args.kappa
        self.T = None
        self.m = None
        self.w_size = None
        self.dataset = None
        self.n = None
        self.nm = None
        self.sigma2 = None
        self.tilde_L = None
        self.omega_max = None
        self.p = None
        self.eta_w = None
        self.eta_q = None
        # Parameters
        self.w = None
        self.w_prime = None
        self.ave_w = None
        self.q = None
        self.q_prime = None
        self.ave_q = None
        # Optimization setting
        self.criterion = torch.nn.BCELoss(reduction='none')
        self.function = torch.nn.Sigmoid()
        self.optimizer = None

    def init(self):
        # Parameters
        self.w = torch.zeros(self.w_size, requires_grad=True, dtype=torch.float32)
        self.w_prime = torch.zeros(self.w_size, requires_grad=True, dtype=torch.float32)
        self.ave_w = torch.zeros(self.w_size, dtype=torch.float32)
        self.q = torch.ones(self.m) / self.m
        self.q_prime = torch.ones(self.m) / self.m
        self.ave_q = torch.zeros(self.m, dtype=torch.float32)
        # Step_sizes
        self.nm = np.min(self.n)
        self.p = (1 / np.sqrt(self.nm) + 1) / (1 / np.sqrt(self.nm) + np.sqrt(self.nm / self.n)).astype(np.float32)
        self.omega_max = np.max((self.p ** 2 * self.nm) / self.n)
        self.tilde_L = 2 * np.sqrt(2) * np.max(self.p) * (self.D ** 2 * self.L + self.D ** 2 * self.G *
                                                          np.sqrt(np.log(self.m)))
        self.sigma2 = 2 * self.c * self.omega_max * (self.kappa * self.D ** 2 * self.G ** 2 + np.log(self.m) ** 2)
        self.eta_w = torch.tensor(self.cw * self.D ** 2
                                  * np.min((1 / (np.sqrt(3) * self.tilde_L), 2 / np.sqrt(7 * self.sigma2 * self.nm))))
        self.eta_q = torch.tensor(self.cq * np.log(self.m)
                                  * np.min((1 / (np.sqrt(3) * self.tilde_L), 2 / np.sqrt(7 * self.sigma2 * self.nm))))
        self.optimizer = torch.optim.SGD([self.w, self.w_prime], lr=self.eta_w.numpy())

    def fit(self, stream_dataset):
        self.dataset = stream_dataset
        self.m = stream_dataset.get_m()
        self.w_size = stream_dataset.get_w_size()
        self.n = stream_dataset.get_n()
        self.nm = np.min(self.n)
        self.T = np.floor(self.nm / 2).astype(int)
        self.init()
        # fit
        for t in range(1, self.T + 1):
            self.ave_q = (self.ave_q * (t - 1) + self.q) / t
            self.ave_w = (self.ave_w * (t - 1) + self.w.detach()) / t
            sample_m = self.dataset.sample_pr_avail()
            indexes = sample_m[:, 0].astype(int)
            xs = sample_m[:, 1:-1]
            ys = sample_m[:, -1]
            model_output = self.function(torch.from_numpy(xs).to(torch.float32) @ self.w_prime)
            loss_p = torch.zeros(self.m)
            for i in range(self.m):
                loss_p[i] = torch.mean(
                    self.criterion(model_output[indexes == i], torch.tensor(ys[indexes == i], dtype=torch.float32)))
            true_loss = self.q_prime @ (torch.tensor(self.p) * loss_p)
            # Update w_{t+1}
            self.optimizer.zero_grad()
            true_loss.backward()
            grad = self.w_prime.grad
            self.w = self.w_prime.detach().clone() - self.eta_w * grad
            if torch.norm(self.w.detach(), p=2) > self.D:
                self.w = self.w.detach() / torch.norm(self.w.detach(), p=2) * self.D
            self.w_prime.grad = None
            self.w.requires_grad_(True)
            # Update q_{t+1}
            g_q = self.p * loss_p.detach().numpy()
            q_1 = self.q_prime * np.exp(self.eta_q * g_q)
            q_1 = q_1 / q_1.sum()
            self.q = q_1.to(torch.float32)
            sample_m = self.dataset.sample_pr_avail()
            indexes = sample_m[:, 0].astype(int)
            xs = sample_m[:, 1:-1]
            ys = sample_m[:, -1]
            model_output_prime = self.function(torch.from_numpy(xs).to(torch.float32) @ self.w)
            loss_p_prime = torch.zeros(self.m)
            for i in range(self.m):
                loss_p_prime[i] = torch.mean(self.criterion(model_output_prime[indexes == i],
                                                            torch.tensor(ys[indexes == i], dtype=torch.float32)))
            true_loss_prime = self.q.to(torch.float32) @ (torch.tensor(self.p) * loss_p_prime)
            # Update w^prime_{t+1}
            self.optimizer.zero_grad()
            true_loss_prime.backward()
            grad = self.w.grad
            self.w_prime = self.w_prime.detach().clone() - self.eta_w * grad
            if torch.norm(self.w_prime.detach(), p=2) > self.D:
                self.w_prime = self.w_prime.detach() / torch.norm(self.w_prime.detach(), p=2) * self.D
            self.w.grad = None
            self.w_prime.requires_grad_(True)
            # Update q^prime_{t+1}
            g_q = self.p * loss_p_prime.detach().numpy()
            q_1 = self.q_prime * np.exp(self.eta_q * g_q)
            q_1 = q_1 / q_1.sum()
            self.q_prime = q_1.to(torch.float32)

    def predict(self, X):
        model_output = self.function(torch.from_numpy(X).to(torch.float32) @ self.ave_w)
        return model_output

    def evaluate(self, y_pred, y_true):
        return self.criterion(y_pred, torch.tensor(y_true, dtype=torch.float32)).numpy()
