import numpy.typing as npt
from sklearn.preprocessing import minmax_scale
from sklearn.utils import shuffle
import math
import matplotlib.pyplot as plt

from utils import load_pickle, EvolvingDataset, deriv_logistic

import numpy as np
import torch
import paddle
import tensorflow as tf

BACKEND = paddle
RANDOM = {
    torch: torch,
    np: np.random,
    paddle: paddle,
    tf: tf.random
}.get(BACKEND)


def np2backend(*X: tuple[npt.ArrayLike]):
    if BACKEND is np:
        return X
    if BACKEND is torch:
        return (torch.from_numpy(x) for x in X)
    if BACKEND is paddle:
        return (paddle.to_tensor(x) for x in X)
    if BACKEND is tf:
        return (tf.constant(x) for x in X)


def load_data(dataset: str, B: int):
    X, y = load_pickle("data/{}.pkl".format(dataset))
    X, y = minmax_scale(X).astype(np.float32), y.astype(int)
    X, y = np2backend(X, y)

    return EvolvingDataset(*shuffle(X, y, random_state=42), B=B).get_data()


class RFFTransformer:

    def __init__(
        self,
        d: int,
        D: int = 1000,
        gamma: float = 1.,
    ) -> None:
        self.gamma = gamma
        self.d = d
        self.D = D

        self.W = math.sqrt(2 * self.gamma) * RANDOM.randn(d, D)
        self.b = RANDOM.rand(D) * 2 * math.pi

    def transform(self, x):
        lin = x @ self.W + self.b
        return math.sqrt(2 / self.D) * BACKEND.cos(lin)


def online_gradient_descent(
    X: npt.ArrayLike,
    y: npt.ArrayLike,
    loss_grad,
    w: npt.ArrayLike = None,
    lr: float = 1.,
    lr_schedule=lambda t: 1,
):
    if w is None:
        w = BACKEND.zeros(X.shape[1])
    acr = 0
    for i, (x_, y_) in enumerate(zip(X, y)):
        z = w @ x_

        acr += BACKEND.sign(z) == y_

        grad = loss_grad(z, y_)
        w -= lr * lr_schedule(i + 1) * grad * x_

        yield z, w, acr / (i + 1)


def weight_combination(
    X1: npt.ArrayLike,
    X2: npt.ArrayLike,
    y: npt.ArrayLike,
    loss_grad,
    w1=None,
    w2=None,
    lr: float = 1.,
    lr_schedule=lambda t: 1,
):
    acr, ACR, eta = 0, [], math.sqrt(8 * math.log(2) / len(X1))
    model1 = online_gradient_descent(X1, y, loss_grad, w1, lr, lr_schedule)
    model2 = online_gradient_descent(X2, y, loss_grad, w2, lr, lr_schedule)

    alpha = 0.5
    for i, (output1, output2) in enumerate(zip(model1, model2)):
        p1, p2 = output1[0], output2[0]
        p = alpha * p1 + (1 - alpha) * p2

        acr += BACKEND.sign(p) == y[i]
        ACR.append(float(acr) / (i + 1))

        l1, l2 = BACKEND.sign(p1) != y[i], BACKEND.sign(p2) != y[i]
        v1 = alpha * math.exp(-eta * l1)
        v2 = (1 - alpha) * math.exp(-eta * l2)
        alpha = v1 / (v1 + v2)

    return ACR


def omd_solver_nuclear(M, v, n_iters, lr: float = 0.01):
    p = BACKEND.ones_like(v) / len(v)
    s = (M**2).sum(0)
    for i in range(n_iters):
        grad = v - s / BACKEND.sqrt(s @ p)
        p *= BACKEND.exp(-lr * grad)
        p /= p.sum()

    return p


def omd_solver_alignment(u, v, n_iters, lr: float = 0.01):
    p = BACKEND.ones_like(v) / len(v)
    u_square = u**2
    for i in range(n_iters):
        grad = v - u_square / BACKEND.sqrt(p @ u_square)
        p *= BACKEND.exp(-lr * grad)
        p /= p.sum()

    return p


def relation_learner(X1, X2, rff1: RFFTransformer, rff2: RFFTransformer):
    Z1, Z2 = rff1.transform(X1), rff2.transform(X2)
    D1, D2 = Z1.shape[1], Z2.shape[1]
    M, v = BACKEND.zeros((D1, D2)), BACKEND.zeros(D2)

    for i in range(len(X1)):
        M += BACKEND.outer(Z1[i], Z2[i] * math.sqrt(D1))
        v += Z2[i]**2 * D2

    p = omd_solver_nuclear(M / len(X1), v / len(X1), 10000)
    U, _, V = BACKEND.linalg.svd(M, full_matrices=False)
    return p, U @ V.T


def ideal_learner(X2, y, rff2: RFFTransformer):
    Z2 = rff2.transform(X2)
    D2 = Z2.shape[1]
    u, v = BACKEND.zeros(D2), BACKEND.zeros(D2)

    for i in range(len(X2)):
        v += Z2[i]**2 * D2
        u += y * Z2[i] * math.sqrt(D2)

    p = omd_solver_alignment(u / len(X2), v / len(X2), 10000)
    return p


def sigmoid(x):
    return BACKEND.where(x > 0, 1 / (1 + BACKEND.exp(-x)),
                         1 - 1 / (1 + BACKEND.exp(x)))


def deriv_logistic(p, y):
    return -y * sigmoid(-y * p)


if __name__ == "__main__":

    np.random.seed(42)
    torch.manual_seed(42)

    B = 1000
    loss_grad = deriv_logistic
    data_T1, label_T1, data_T2, label_T2 = load_data('a9a', B)

    rff1 = RFFTransformer(data_T1.shape[1], 1000, 1.)
    print("Previous stage")
    ACR_prev = []
    for _, w1, acr in online_gradient_descent(
            rff1.transform(data_T1[:-B]),
            label_T1[:-B],
            loss_grad,
    ):
        ACR_prev.append(acr)

    rff2 = RFFTransformer(data_T2.shape[1], 1000, 1.)
    rff2_ = RFFTransformer(data_T2.shape[1], 1000, 1.)

    print("Evolving stage")
    p1, R = relation_learner(data_T1[-B:], data_T2[:B], rff1, rff2)
    p2 = ideal_learner(data_T2[:B], label_T2[:B], rff2_)

    print("Current stage")
    ACR = weight_combination(
        rff2.transform(data_T2[B:]) * BACKEND.sqrt(p1),
        rff2_.transform(data_T2[B:]) * BACKEND.sqrt(p2),
        label_T2[B:],
        loss_grad,
        w1=R.T @ w1,
        w2=None,
    )

    plt.plot(ACR_prev + ACR[3:])
    plt.show()
