# -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 17:36:39 2019

Y_new = Ys * w(x) + b(x)
Conditional distribution matching by min P(Ys_new|Xs) - P(Yt|Xt)

@author: 陈耿祥
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.linalg
import torch.optim
from sklearn.metrics import r2_score
from torch.autograd import Variable

from mmd_loss import *


def pdinv(A):
    n = len(A)
    U = scipy.linalg.cholesky(A)
    invU = torch.ones(n) / U
    Ainv = torch.inverse(torch.mm(invU, invU.t()))
    return Ainv


def kernel(ker, X, X2, sigma):
    '''
    Pytorch.
    Input: X  n_feature*Size1
           X2 n_feature*Size2
    Output: Size1*Size2
    '''
    n1, n2 = X.shape[1], X2.shape[1]
    if ker == 'linear':
        K = torch.mm(X.t(), X2)
    elif ker == 'rbf':
        n1sq = torch.sum(X ** 2, 0)
        n2sq = torch.sum(X2 ** 2, 0)
        D = torch.ones((n1, n2), dtype=torch.double).mul(n2sq) + \
            torch.ones((n2, n1), dtype=torch.double).mul(n1sq).t() - \
            2 * torch.mm(X.t(), X2)
        K = torch.exp(-sigma * D)
    elif ker == 'sam':
        D = X.t().mm(X2)
        K = torch.exp(-sigma * torch.acos(D) ** 2)
    return K


# class LS_minPYX():
def train(Xs, Xt, Ys, Yt, sigma,
          lambda_regularization=1e-3,
          lambda_inv=0.1,
          learning_rate=0.9,
          Max_Iter=100,
          Thresh=1e-5):
    """
    Y_new = Ys * w(x) + b(x)
    Conditional distribution matching by min P(Ys_new|Xs) - P(Yt|Xt)
    Parameters
    ----------
    Xs : X of source domain
    Xt : X of target domain
    Ys : X of source domain
    Yt : X of target domain
    sigma: int 
        the kernel width for Y used to construct Gram matrix K
    """

    # Initial parameters
    wide_kernel = sigma * 2
    lambda_inv = 0.1
    Tol = 1e-6
    Max_Iter = Max_Iter
    LR = learning_rate
    ns, nt = len(Xs), len(Xt)

    # to torch 
    Xs = torch.from_numpy(Xs)
    Xt = torch.from_numpy(Xt)
    Ys = torch.from_numpy(Ys)
    Yt = torch.from_numpy(Yt)

    # Kernel matrix [constant]
    KXs = kernel('rbf', Xs.t(), Xs.t(), wide_kernel)
    KXt = kernel('rbf', Xt.t(), Xt.t(), wide_kernel)
    KXs_inv = torch.inverse(KXs + lambda_inv * torch.eye(ns, dtype=torch.double))
    KXt_inv = torch.inverse(KXt + lambda_inv * torch.eye(nt, dtype=torch.double))
    KXtXs = kernel('rbf', Xt.t(), Xs.t(), wide_kernel)

    # Find R [constant]
    e, V = torch.eig(KXs.mm(KXs_inv), eigenvectors=True)
    mask = e[:, 0].gt(torch.max(e[:, 0] * Thresh))
    R = KXs.mm(KXs_inv).mm(V[mask].t())  # ns * n_egenvectors

    # initial params0 [constant]   
    temp0 = torch.inverse(R.t().mm(R)).mm(R.t()).mm(torch.ones((ns, 1), dtype=torch.double))
    params_W = torch.reshape(temp0, (R.shape[1], 1))
    params_B = torch.zeros((R.shape[1], 1), dtype=torch.double)

    # Set variable grads
    params_W = Variable(params_W, requires_grad=True)
    params_B = Variable(params_B, requires_grad=True)

    # Begin to optimize params
    Error = 1
    Iteriation = 0

    # loss function 
    opt_SGD = torch.optim.SGD([params_W, params_B], lr=LR)
    while (Error > Tol) & (Iteriation < Max_Iter):
        Iteriation += 1
        W = R.mm(params_W)
        B = R.mm(params_B)
        Ys_new = Ys.mul(W) + B
        tilde_K = kernel('rbf', Ys_new.t(), Ys_new.t(), wide_kernel)
        tilde_Kc = kernel('rbf', Yt.t(), Ys_new.t(), wide_kernel)
        part1 = torch.trace(KXs_inv.mm(tilde_K).mm(KXs_inv).mm(KXs))
        part2 = 2 * torch.trace(KXs_inv.mm(tilde_Kc.t()).mm(KXt_inv).mm(KXtXs))
        W_ = W - torch.ones(W.shape, dtype=torch.double)
        part3 = lambda_regularization * \
                (torch.sum(W_.mul(W_)) + torch.sum(B.mul(B)))
        loss = part1 - part2 + part3
        opt_SGD.zero_grad()
        loss.backward()
        opt_SGD.step()
        print("loss={:.2}".format(loss))

    return Ys_new


if __name__ == '__main__':

    sigma = 0.5
    lbd_reg = 1e-4
    lbd_inv = 0.1
    lng_rate = 0.1
    max_iter = 100
    Thresh = 1e-5

    Title = 'Parameters: ' + 'sigma=' + str(sigma) + \
            ' lambdaInv=' + str(lbd_inv) + ' LR=' + str(lng_rate)

    # Demon curve 3
    if 1:
        # Xs_ = np.linspace(-5, 5, 126)
        # Ys = np.sin(Xs_) + 1
        # Ys = Ys[:, np.newaxis]
        # print(Xs_)
        # print(Ys)
        # Xt_ = np.linspace(-5, 2, 5)
        # Yt = 0.6 * np.sin(Xt_) + 0.5
        # Yt = Yt[:, np.newaxis]

        # =========================================
        Xs_ = np.array(
            [-5., -4.6, -4.2, -3.8, -3.4, -3., -2.6, -2.2, -1.8, -1.4, -1., -0.6, -0.2, 0.2,
             0.6, 1., 1.4, 1.8, 2.2, 2.6, 3., 3.4, 3.8, 4.2, 4.6, 5., 5.4, 5.8,
             6.2, 6.6, 7., 7.4, 7.8, 8.2, 8.6, 9., 9.4, 9.8, 10.2, 10.6, 11., 11.4,
             11.8])
        Ys = np.array([[0.02955],
                       [0.02769],
                       [0.02585],
                       [0.02462],
                       [0.02334],
                       [0.02213],
                       [0.02116],
                       [0.02034],
                       [0.01995],
                       [0.02032],
                       [0.02086],
                       [0.02198],
                       [0.0232],
                       [0.02408],
                       [0.02485],
                       [0.02551],
                       [0.02604],
                       [0.02643],
                       [0.02638],
                       [0.02634],
                       [0.02666],
                       [0.02696],
                       [0.02715],
                       [0.02722],
                       [0.02718],
                       [0.02711],
                       [0.02709],
                       [0.02698],
                       [0.02665],
                       [0.02681],
                       [0.02669],
                       [0.02704],
                       [0.02778],
                       [0.02934],
                       [0.03156],
                       [0.03441],
                       [0.03737],
                       [0.04069],
                       [0.04508],
                       [0.04898],
                       [0.05339],
                       [0.05855],
                       [0.06423]])

        Xt_ = np.array([-4.91, -3.37, -1.58, -0.31, 1.25, 2.8, 4.32, 5.85, 7.4, 8.93, 11.23])
        Yt = np.array(
            [[0.03], [0.0229], [0.022], [0.0248], [0.0304], [0.0332], [0.0342], [0.0354], [0.0356], [0.0351], [0.043]])

    #     # =======================================
    #
        Xs = np.vstack((Xs_, Xs_)).T
        Xt = np.vstack((Xt_, Xt_)).T

        Xtest = Xs
    #
    # # =======================================================================
    Xt_all = np.array([-4.91, -3.37, -1.58, -0.31, 1.25, 2.8, 4.32, 5.85, 7.4, 8.93, 11.23])
    Yt_all = np.array(
        [[0.03], [0.0229], [0.022], [0.0248], [0.0304], [0.0332], [0.0342], [0.0354], [0.0356], [0.0351], [0.043]])
    # # =======================================================================
    # main
    Ys_new = train(Xs, Xt, Ys, Yt, sigma=sigma,
                   lambda_regularization=lbd_reg,
                   lambda_inv=lbd_inv,
                   learning_rate=lng_rate,
                   Max_Iter=max_iter,
                   Thresh=Thresh)

    # fig
    Ys_new = Ys_new.detach().numpy()
    fig, ax = plt.subplots()
    plt.plot(Xs_, Ys_new, 'purple', lw=1, zorder=9, label='Min P(Y|X)')
    plt.plot(Xs_, Ys, 'r-', lw=1, label='Source model')
    # plt.plot(Xt_all, Yt_all, 'g', lw=1, label='Target model')
    plt.scatter(Xt_, Yt, c='b', s=50, label='Target Data')
    fontfamily = 'NSimSun'
    font = {'family': fontfamily,
            'size': 12,
            'weight': 23}
    ax.set_xlabel('X', fontproperties=fontfamily, size=12)
    ax.set_ylabel('Y', fontproperties=fontfamily, size=12)
    plt.yticks(fontproperties=fontfamily, size=12)
    plt.xticks(fontproperties=fontfamily, size=12)
    ax.set_title(Title, fontproperties=fontfamily, size=12)
    plt.legend(prop=font)
    plt.tight_layout()
    plt.legend(prop=font)
    plt.show()

    print(Ys_new)

    # y_test = np.array([[0.02972941],
    #                    [0.02883122],
    #                    [0.02694955],
    #                    [0.02459293],
    #                    [0.02331069],
    #                    [0.02271564],
    #                    [0.02229738],
    #                    [0.02219356],
    #                    [0.02205667],
    #                    [0.02235383],
    #                    [0.02316197],
    #                    [0.02434523],
    #                    [0.02524884],
    #                    [0.02653632],
    #                    [0.02821587],
    #                    [0.02961435],
    #                    [0.03055176],
    #                    [0.03133827],
    #                    [0.03219389],
    #                    [0.03286631],
    #                    [0.03326826],
    #                    [0.03357006],
    #                    [0.03388937],
    #                    [0.03413835],
    #                    [0.03437946],
    #                    [0.03472064],
    #                    [0.03505008],
    #                    [0.03534643],
    #                    [0.0354007],
    #                    [0.03560134],
    #                    [0.03548949],
    #                    [0.03560962],
    #                    [0.03540751],
    #                    [0.03536733],
    #                    [0.03527518],
    #                    [0.03558769],
    #                    [0.03679691],
    #                    [0.0381931],
    #                    [0.03978448],
    #                    [0.04065374],
    #                    [0.04247783],
    #                    [0.0428414],
    #                    [0.0433306]])
    # r2_y = r2_score(y_test, Ys_new)
    # print("R2 of CDM =", r2_y)
