{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "uLEAaahgfdfs"
      },
      "source": [
        "Copyright 2023 Google LLC\n",
        "\n",
        "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "you may not use this file except in compliance with the License.\n",
        "You may obtain a copy of the License at\n",
        "\n",
        "    https://www.apache.org/licenses/LICENSE-2.0\n",
        "\n",
        "Unless required by applicable law or agreed to in writing, software\n",
        "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "See the License for the specific language governing permissions and\n",
        "limitations under the License."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "UNSr0eZdyILL"
      },
      "source": [
        "# Latent shift adaptiation of continuous random variables via least-squares conditional density estimator\n",
        "\n",
        "This notebook implements the experiment for the continuous spectral method described in Supplementary Material C. This notebook reproduces the result in Figure 3B. This notebook relies on previously executing `colab/synthetic_data_to_file.ipynb`."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xDPcJT60xFE3"
      },
      "outputs": [],
      "source": [
        "import numpy as np\n",
        "import scipy\n",
        "import matplotlib.pyplot as plt\n",
        "import scipy.stats as stats\n",
        "import re\n",
        "import pandas as pd\n",
        "\n",
        "from itertools import chain\n",
        "from sklearn.cluster import KMeans\n",
        "from sklearn.neural_network import MLPClassifier\n",
        "from sklearn.metrics import roc_auc_score, accuracy_score, log_loss, brier_score_loss\n",
        "from io import BytesIO\n",
        "from cosde.base import EigenBase, LSEigenBase\n",
        "from cosde.utils import l2_norm, compute_inv_eigen_system, inner_product\n",
        "\n",
        "from latent_shift_adaptation.methods.continuous_spectral_method.library import *\n",
        "from latent_shift_adaptation.methods.continuous_spectral_method.utils import gram_schmidt_lse, gram_schmidt, compute_adaggerb_multi, multi_least_squares, least_squares, multi_least_squares_scale\n",
        "from latent_shift_adaptation.methods.continuous_spectral_method.create_basis import basis_from_centers\n",
        "from latent_shift_adaptation.methods.continuous_spectral_method.multi_ls_conditional_de import MultiCDEBase\n",
        "from latent_shift_adaptation.methods.continuous_spectral_method.multi_ls_de import MultiDEBase\n",
        "from latent_shift_adaptation.methods.continuous_spectral_method.multi_ls_marginal_de import MultiMDEBase"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "tn-gS0Cypseu"
      },
      "outputs": [],
      "source": [
        "#@title Library functions\n",
        "\n",
        "\n",
        "# Extract dataframe format back to dict format\n",
        "def extract_from_df(samples_df, cols=['u', 'x', 'w', 'c', 'c_logits', 'y', 'y_logits', 'y_one_hot', 'w_binary', 'w_one_hot', 'u_one_hot', 'x_scaled']):\n",
        "  \"\"\"\n",
        "  Extracts dict of numpy arrays from dataframe\n",
        "  \"\"\"\n",
        "  result = {}\n",
        "  for col in cols:\n",
        "    if col in samples_df.columns:\n",
        "      result[col] = samples_df[col].values\n",
        "    else:\n",
        "      match_str = f\"^{col}_\\d$\"\n",
        "      r = re.compile(match_str, re.IGNORECASE)\n",
        "      matching_columns = list(filter(r.match, samples_df.columns))\n",
        "      if len(matching_columns) == 0:\n",
        "        continue\n",
        "      result[col] = samples_df[matching_columns].to_numpy()\n",
        "  return result\n",
        "\n",
        "def extract_from_df_nested(samples_df, cols=['u', 'x', 'w', 'c', 'c_logits', 'y', 'y_logits', 'y_one_hot', 'w_binary', 'w_one_hot', 'u_one_hot', 'x_scaled']):\n",
        "  \"\"\"\n",
        "  Extracts nested dict of numpy arrays from dataframe with structure {domain: {partition: data}}\n",
        "  \"\"\"\n",
        "  result = {}\n",
        "  for partition in samples_df['partition'].unique():\n",
        "    partition_df = samples_df.query('partition == @partition')\n",
        "    result[partition] = extract_from_df(partition_df, cols=cols)\n",
        "  return result\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "nb41iC3tZUfx"
      },
      "outputs": [],
      "source": [
        "#setting the parameters\n",
        "\n",
        "params = {\n",
        "        'num_samples':\n",
        "            10000,\n",
        "        'k_w':\n",
        "            1,\n",
        "        'k_x':\n",
        "            2,\n",
        "        'mu_w_u_coeff':\n",
        "            1,\n",
        "        'mu_x_u_coeff':\n",
        "            1,\n",
        "        'mu_w_u_mat': np.array([[-3, 3]]).T,\n",
        "        'mu_x_u_mat':\n",
        "            np.array([[-1, 1], [1, -1]]),  # k_u x k_x\n",
        "    }\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "RpRj9bXq4Qq9"
      },
      "outputs": [],
      "source": [
        "colors =  plt.get_cmap('tab20c')\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pNdXQujGQKms"
      },
      "source": [
        "## Adaptation Procedures"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Rg2G9XKfucGp"
      },
      "outputs": [],
      "source": [
        "#@title Compute $p(U\\mid X)$\n",
        "\n",
        "\n",
        "def compute_pu_x(fw_u,fw_x,x0, thre=1e-3):\n",
        "  \"\"\"Estimate p(U=i|x0) for i=1,...,k\n",
        "\n",
        "  Args:\n",
        "    fw_u: list of LSEigenBase objects [f(W|U=1),...,f(W|U=k)]\n",
        "    fw_x: conditional_density_estimator_base\n",
        "    x0: the point to be evaluated, (1, number of features)\n",
        "\n",
        "  Returns:\n",
        "    pU_x0: probability simplex\n",
        "\n",
        "  \"\"\"\n",
        "  # get the estimated conditional density function\n",
        "  fw_x0 = fw_x.get_density_function(x0)\n",
        "\n",
        "  #use least-squares estimator to estimate f(U|x0)\n",
        "  pU_x0 = least_squares(fw_u,fw_x0, verbose=False, reuse_gram=False)\n",
        "  #print('pU_x0 before normalization: ', pU_x0)\n",
        "  #make sure that the probability is non-negative\n",
        "  pU_x0 = np.array([max(i,0) for i in pU_x0])\n",
        "  #normalize to 1\n",
        "  pU_x0 = pU_x0/sum(pU_x0)\n",
        "  #print('pU_x0 after normalization: ', pU_x0)\n",
        "\n",
        "  return pU_x0\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PPx9g8xeyMUY"
      },
      "outputs": [],
      "source": [
        "#@title Estimate $f(Y|X,U)$\n",
        "\n",
        "\n",
        "def predict(qu_pu,fw_y0x,fw_y1x, py_x, fw_u, fw_x, x0, normalize='standard'):\n",
        "  \"\"\"Given fixed y, estimate f(y|x0, U=i) for i=1,...,k\n",
        "\n",
        "  Args:\n",
        "    qu_pu: density ratio,ndarray\n",
        "    fw_y0x: f(W|Y=0, X)\n",
        "    fw_y1x: f(W|Y=1, X)\n",
        "    py_x: p(Y|x)\n",
        "    fw_u: list of LSEigenBase objects, [f(W|U=1),...,f(W|U=k)]\n",
        "    fw_x: f(W|X)\n",
        "    x0: the point to be evaluated, (1, number of features)\n",
        "  Returns:\n",
        "    fy_x0u: probability simplex\n",
        "\n",
        "  \"\"\"\n",
        "  #estimate p(U|x0)\n",
        "\n",
        "  # get the estimated conditional density function\n",
        "  fw_x0 = fw_x.get_density_function(x0)\n",
        "\n",
        "  #use least-squares estimator to estimate f(U|x0)\n",
        "\n",
        "  pU_x0 = compute_pu_x(fw_u,fw_x,x0, thre=1e-3)\n",
        "\n",
        "  #estimate f(w|u)p(u|x0)\n",
        "  fwu_x0 = []\n",
        "  for p, f in zip(pU_x0, fw_u):\n",
        "    id_cut = np.where(np.abs(f.get_params()['coeff'])\u003c1e-10)[0]\n",
        "    new_coeff = f.get_params()['coeff']*p\n",
        "    new_coeff[id_cut] = 0.\n",
        "    #truncate bases such that the coeff is too small\n",
        "    fwu_x0.append(LSEigenBase(f.baselist, new_coeff))\n",
        "\n",
        "  #fwy0_x evaluated at x0\n",
        "\n",
        "  fw_y0x0 = fw_y0x.get_density_function(x0)\n",
        "  fw_y1x0 = fw_y1x.get_density_function(x0)\n",
        "\n",
        "\n",
        "  fw_y0x0_coeff = fw_y0x0.get_params()['coeff']\n",
        "  fwy0_x0 = LSEigenBase(fw_y0x0.get_params()['base_list'], fw_y0x0_coeff * py_x.predict_proba(x0.reshape(1,params['k_x']))[0,0])\n",
        "\n",
        "  fw_y1x0_coeff = fw_y1x0.get_params()['coeff']\n",
        "  fwy1_x0 = LSEigenBase(fw_y1x0.get_params()['base_list'], fw_y1x0_coeff * py_x.predict_proba(x0.reshape(1,params['k_x']))[0,1])\n",
        "\n",
        "\n",
        "  solution = multi_least_squares_scale(fwu_x0, fwy0_x0, fwy1_x0, pU_x0, reuse_gram = False)\n",
        "  fy0_x0u = solution[0:2]\n",
        "  fy1_x0u = solution[2::]\n",
        "  sum_u0 = fy0_x0u[0]+fy1_x0u[0]\n",
        "  sum_u1 = fy0_x0u[1]+fy1_x0u[1]\n",
        "  sum_u = np.array([1./sum_u0, 1./sum_u1])\n",
        "  fy0_x0u = fy0_x0u*sum_u\n",
        "  fy1_x0u = fy1_x0u*sum_u\n",
        "\n",
        "  true_fy0_x0u = np.array([1-multi_true_p_y_ux(x0, 0, params), 1-multi_true_p_y_ux(x0, 1, params)]).squeeze()\n",
        "\n",
        "  true_fy1_x0u = np.array([multi_true_p_y_ux(x0, 0, params), multi_true_p_y_ux(x0, 1, params)]).squeeze()\n",
        "  mse_fy_xu = np.mean(np.array([fy0_x0u-true_fy0_x0u, fy1_x0u-true_fy1_x0u])**2)\n",
        "  qy0_x0 = max(0., sum(fy0_x0u*qu_pu*pU_x0))\n",
        "  qy1_x0 = max(0., sum(fy1_x0u*qu_pu*pU_x0))\n",
        "  out_prob = np.array([qy0_x0, qy1_x0])\n",
        "  if normalize=='standard':\n",
        "    out_prob /= np.sum(out_prob)\n",
        "  else:\n",
        "    out_prob = scipy.special.softmax(out_prob)\n",
        "  return out_prob, mse_fy_xu"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "LuA1swVmWDe0"
      },
      "outputs": [],
      "source": [
        "#@title Estimate $f(Y|X,U)$\n",
        "\n",
        "\n",
        "def predict_verification(qu_pu, py_x, fw_u, fw_x, x0, normalize='standard'):\n",
        "  \"\"\"Given fixed y, estimate f(y|x0, U=i) for i=1,...,k\n",
        "\n",
        "  Args:\n",
        "    qu_pu: density ratio,ndarray\n",
        "    fw_y0x: f(W|Y=0, X)\n",
        "    fw_y1x: f(W|Y=1, X)\n",
        "    py_x: p(Y|x)\n",
        "    fw_u: list of LSEigenBase objects, [f(W|U=1),...,f(W|U=k)]\n",
        "    fw_x: f(W|X)\n",
        "    x0: the point to be evaluated, (1, number of features)\n",
        "\n",
        "  Returns:\n",
        "    fy_x0u: probability simplex\n",
        "\n",
        "  \"\"\"\n",
        "  #estimate p(U|x0)\n",
        "\n",
        "  # get the estimated conditional density function\n",
        "  fw_x0 = fw_x.get_density_function(x0)\n",
        "\n",
        "\n",
        "  #use least-squares estimator to estimate f(U|x0)\n",
        "\n",
        "  pU_x0 = compute_pu_x(fw_u,fw_x,x0, thre=1e-3)\n",
        "\n",
        "\n",
        "\n",
        "  #estimate f(w|u)p(u|x0)\n",
        "  fwu_x0 = []\n",
        "  for p, f in zip(pU_x0, fw_u):\n",
        "    new_coeff = f.coeff*p\n",
        "    #truncate bases such that the coeff is too small\n",
        "    if(np.linalg.norm(new_coeff)\u003e1e-5):\n",
        "      fwu_x0.append(LSEigenBase(f.baselist, new_coeff))\n",
        "\n",
        "  #fwy0_x evaluated at x0\n",
        "\n",
        "  fw_y0x0 = fw_y0x.get_density_function(x0)\n",
        "  fw_y1x0 = fw_y1x.get_density_function(x0)\n",
        "\n",
        "  #estimate f(y=0|x0,u=0), f(y=0|x0,u=1)\n",
        "  fy0_x0u = np.array([1-multi_true_p_y_ux(x0, 0, params), 1-multi_true_p_y_ux(x0, 1, params)]).squeeze()\n",
        "\n",
        "  fy1_x0u = np.array([multi_true_p_y_ux(x0, 0, params), multi_true_p_y_ux(x0, 1, params)]).squeeze()\n",
        "\n",
        "\n",
        "  qy0_x0 = sum(fy0_x0u*qu_pu*pU_x0)\n",
        "  qy1_x0 = sum(fy1_x0u*qu_pu*pU_x0)\n",
        "  out_prob = np.array([qy0_x0, qy1_x0])\n",
        "  if normalize=='standard':\n",
        "    out_prob /= np.sum(out_prob)\n",
        "  else:\n",
        "    out_prob = scipy.special.softmax(out_prob)\n",
        "\n",
        "  return out_prob"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ENqAjh4VCK3H"
      },
      "outputs": [],
      "source": [
        "#@title Learning process\n",
        "\n",
        "def train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='kmeans', test_c=np.array([1,0,0]), test_y = 1, evaluate=False):\n",
        "  \"\"\"domain adaptation via spectral method\"\"\"\n",
        "\n",
        "  # construct linear independent basis\n",
        "  basisx = [basis_from_centers(params['mu_x_u_mat'].squeeze()[i], 1) for i in range(params['k_x'])]\n",
        "  basisw = basis_from_centers(params['mu_w_u_mat'].squeeze(), 1)\n",
        "  basis = []\n",
        "  for x,w in zip(basisx, basisw):\n",
        "    basis.append(x+[w])\n",
        "  ##########################\n",
        "  # step 1 Estimate f(W|U) #\n",
        "  ##########################\n",
        "\n",
        "  # Estimate f(W,X|c) and f(W,X,y|c)\n",
        "  c_id = np.where(np.sum(source_data_sample['c']==test_c,axis=1)==3)[0]\n",
        "  sx_c = np.array(source_data_sample['x'][c_id])\n",
        "  sw_c = np.array(source_data_sample['w'][c_id])[:,np.newaxis]\n",
        "\n",
        "  cy_id = np.where((np.sum(source_data_sample['c']==test_c,axis=1)==3) \u0026 (source_data_sample['y'] == test_y))[0]\n",
        "  sx_cy0 = np.array(source_data_sample['x'][cy_id])\n",
        "  sw_cy0 = np.array(source_data_sample['w'][cy_id])[:,np.newaxis]\n",
        "\n",
        "  # estimate the density estimator\n",
        "  # f(x,w,c=test_c)\n",
        "  fxw_c = MultiDEBase([sx_c[:, 0][:, np.newaxis], sx_c[:, 1][:, np.newaxis], sw_c], basis, 1e-2)\n",
        "  # f(x,w,c=test_c,y=test_y)\n",
        "  fxwy0_c =  MultiDEBase([sx_cy0[:, 0][:, np.newaxis], sx_cy0[:, 1][:, np.newaxis], sw_cy0], basis, 1e-2)\n",
        "\n",
        "  # compute $\\mathfrak{A}^\\dagger\\mathfrak{B}$\n",
        "\n",
        "  # ensure that sigular values of fxwy0_c is not too small\n",
        "  fxwy0_c_df = fxwy0_c.density_function\n",
        "  id = np.argsort(fxwy0_c_df.get_params()['coeff'])[::-1][0:2]\n",
        "  #id = np.arange(4)\n",
        "\n",
        "  new_coeff = fxwy0_c_df.get_params()['coeff'][id]\n",
        "\n",
        "  base_list = []\n",
        "  for i in id:\n",
        "    base_list.append(fxwy0_c_df.get_params()['base_list'][i])\n",
        "  fxwy0_c_df = LSEigenBase(base_list, new_coeff)\n",
        "\n",
        "  fxw_c_df = fxw_c.density_function\n",
        "  id = np.argsort(fxw_c_df.get_params()['coeff'])[::-1][0:2]\n",
        "\n",
        "  #id = np.arange(4)\n",
        "\n",
        "  new_coeff = fxw_c_df.get_params()['coeff'][id]\n",
        "  base_list = []\n",
        "  for i in id:\n",
        "    base_list.append(fxw_c_df.get_params()['base_list'][i])\n",
        "  fxw_c_df = LSEigenBase(base_list, new_coeff)\n",
        "\n",
        "  D, x_coor, y_coor = compute_adaggerb_multi(fxw_c_df, fxwy0_c_df)\n",
        "\n",
        "\n",
        "  # only consider taking the top 2 components\n",
        "  w, eigen_func = compute_inv_eigen_system(D, y_coor)\n",
        "\n",
        "\n",
        "  # plot eigen function\n",
        "\n",
        "  fw_u = []\n",
        "\n",
        "  for func in eigen_func:\n",
        "    # get the parameters\n",
        "    param_dict = func.get_params()\n",
        "\n",
        "    baselist = param_dict['base_list']\n",
        "    vec = []\n",
        "    # normalize the function\n",
        "    for b in baselist:\n",
        "      l = b.get_params()['kernel'].get_params()['length_scale']\n",
        "      vec.append(np.sqrt(2*np.pi)*np.sum(b.get_params()['weight'])*l)\n",
        "    l1_sum = np.sum(param_dict['coeff'] * np.array(vec))\n",
        "    # rescale the eigenfunction so that the density function is sum to 1\n",
        "    weight = param_dict['coeff']/l1_sum\n",
        "    fw_u.append(LSEigenBase(baselist, weight))\n",
        "\n",
        "  for j in range(len(fw_u)):\n",
        "    new_w = np.linspace(-7,7,100)\n",
        "    w_0 = new_w[0]\n",
        "    l1_sum = 0\n",
        "    p_w = np.zeros(new_w.shape)\n",
        "    for i, w in enumerate(new_w):\n",
        "      p_w[i] = fw_u[j].eval(w.reshape((1,1)))\n",
        "\n",
        "      l1_sum += np.abs(p_w[i]) * (w-w_0)\n",
        "      w_0 = w\n",
        "\n",
        "    plt.plot(new_w, p_w, color=colors((j+2)*4+0),label='fw_u %d cdf: %.2f'%(j+1, l1_sum))\n",
        "  plt.legend(bbox_to_anchor=(1.1, 0.6))\n",
        "  plt.plot(new_w, stats.norm.pdf(new_w, params['mu_w_u_mat'][0], 1),'-.', color='b', label='true density function 1')\n",
        "  plt.plot(new_w, stats.norm.pdf(new_w, params['mu_w_u_mat'][1], 1), '-.',color='r', label='true density function 2')\n",
        "  plt.title('Estimated f(W|U)')\n",
        "  plt.show()\n",
        "\n",
        "  #############################\n",
        "  # step 2 Estimate q(U)/p(U) #\n",
        "  #############################\n",
        "\n",
        "\n",
        "  # The first step is to estimate f(w|x)\n",
        "  sw = np.array(source_data_sample['w'])[:, np.newaxis]\n",
        "  sx = np.array(source_data_sample['x'])\n",
        "  fw_x = MultiCDEBase( sx, sw, basisx, basisw, 1e-4)\n",
        "\n",
        "\n",
        "  #compute the MSE of f(w|x)\n",
        "  if evaluate:\n",
        "    new_w = np.linspace(-7,7,20)\n",
        "    new_x = np.linspace(-3,3,20)\n",
        "\n",
        "    cosde_pdf = np.zeros((new_x.size,new_x.size, new_w.size))\n",
        "    true_pdf = np.zeros(cosde_pdf.shape)\n",
        "\n",
        "    for i, x in enumerate(new_x):\n",
        "      for k, x2 in enumerate(new_x):\n",
        "        for j, w in enumerate(new_w):\n",
        "          n_x = np.array([x, x2])\n",
        "          fw_x0 = fw_x.get_density_function(n_x)\n",
        "          cosde_pdf[i, k, j] = fw_x0.eval(w.reshape(1,1))\n",
        "          true_pdf[i, k, j] = multi_true_p_w_x(w, n_x, p_u_source, params)\n",
        "\n",
        "    print(\"MSE of f(W|X)\", np.mean((cosde_pdf-true_pdf)**2))\n",
        "\n",
        "\n",
        "  # estimate g(x) and f(x) from data\n",
        "\n",
        "  sx = np.array(source_data_sample['x'])\n",
        "  tx = np.array(target_data_sample['x'])\n",
        "  # kernel density estimator\n",
        "  fx = MultiMDEBase(sx, basisx ,1e-4)\n",
        "  gx = MultiMDEBase(tx, basisx, 1e-4)\n",
        "\n",
        "\n",
        "  if evaluate:\n",
        "    new_w = np.linspace(-7,7,20)\n",
        "    new_x = np.linspace(-3,3,20)\n",
        "\n",
        "    cosde_pdf = np.zeros((new_x.size,new_x.size))\n",
        "    true_pdf = np.zeros(cosde_pdf.shape)\n",
        "\n",
        "    for i, x in enumerate(new_x):\n",
        "      for j, y in enumerate(new_x):\n",
        "        n_x = np.array([x,y])\n",
        "        cosde_pdf[i,j] = fx.get_pdf([x.reshape(1,1), y.reshape(1,1)])\n",
        "        true_pdf[i,j] = multi_true_p_x(n_x, p_u_source, params['mu_x_u_mat'].squeeze()*params['mu_x_u_coeff'])\n",
        "    print('MSE of f(x)', np.mean((cosde_pdf-true_pdf)**2))\n",
        "\n",
        "\n",
        "    cosde_pdf = np.zeros((new_x.size,new_x.size))\n",
        "    true_pdf = np.zeros(cosde_pdf.shape)\n",
        "\n",
        "    for i, x in enumerate(new_x):\n",
        "      for j, y in enumerate(new_x):\n",
        "        n_x = np.array([x,y])\n",
        "        cosde_pdf[i,j] = gx.get_pdf([x.reshape(1,1), y.reshape(1,1)])\n",
        "        true_pdf[i,j] = multi_true_p_x(n_x,p_u_target, params['mu_x_u_mat'].squeeze()*params['mu_x_u_coeff'])\n",
        "    print('MSE of g(x)', np.mean((cosde_pdf-true_pdf)**2))\n",
        "  if method == 'kmeans':\n",
        "    # select samples\n",
        "    kmeans = KMeans(n_clusters=2, random_state=0).fit(np.array(source_data_sample['x']).reshape(-1,2))\n",
        "    centers = kmeans.cluster_centers_.squeeze()\n",
        "    x0 = centers[0]\n",
        "    x1 = centers[1]\n",
        "\n",
        "    # use least-squares estimator to estimate f(U|x1)\n",
        "\n",
        "    pU_x0 = compute_pu_x(fw_u,fw_x,x0)\n",
        "\n",
        "    pU_x1 = compute_pu_x(fw_u,fw_x,x1)\n",
        "\n",
        "    # construct the confusion matrix\n",
        "    C = np.array([pU_x0,pU_x1])\n",
        "\n",
        "    # solve the linear system\n",
        "\n",
        "    # get g(x0)/f(x0)\n",
        "    f_x0 = fx.get_pdf([x0[0].reshape(1,1), x0[1].reshape(1,1)])\n",
        "    g_x0 = gx.get_pdf([x0[0].reshape(1,1), x0[1].reshape(1,1)])\n",
        "    gx0_fx0 = g_x0/f_x0\n",
        "\n",
        "    # get g(x1)/f(x1)\n",
        "    f_x1 = fx.get_pdf([x1[0].reshape(1,1), x1[1].reshape(1,1)])\n",
        "    g_x1 = gx.get_pdf([x1[0].reshape(1,1), x1[1].reshape(1,1)])\n",
        "    gx1_fx1 = g_x1/f_x1\n",
        "\n",
        "    x_ratio = np.array([gx0_fx0, gx1_fx1]).squeeze()\n",
        "    qu_pu = scipy.optimize.nnls(C, x_ratio)[0]\n",
        "  elif method == 'random':\n",
        "    # randomly sample 100 points\n",
        "    np.random.seed(1)\n",
        "    random_id = np.random.choice(source_data_sample['x'].shape[0], 100, replace=False)\n",
        "\n",
        "    select_x = np.array(source_data_sample['x'])[random_id,:]\n",
        "    pU_x_mat = np.zeros((select_x.size,2))\n",
        "    qx_px_mat = np.zeros(select_x.size)\n",
        "    for i,x in enumerate(select_x):\n",
        "      pU_x_mat[i] = compute_pu_x(fw_u,fw_x,x)\n",
        "      qx = gx.get_pdf([x[0].reshape(1,1), x[1].reshape(1,1)])\n",
        "      px = fx.get_pdf([x[0].reshape(1,1), x[1].reshape(1,1)])\n",
        "      qx_px_mat[i] = qx/(px)\n",
        "    qu_pu = scipy.optimize.nnls(pU_x_mat,qx_px_mat)[0]\n",
        "\n",
        "\n",
        "  if evaluate:\n",
        "    print('Estimated:', qu_pu)\n",
        "    print('MSE of q(U)/p(U):', np.mean((qu_pu-np.array(p_u_target)/np.array(p_u_source))**2))\n",
        "\n",
        "    random_id = np.random.choice(source_data_sample['x'].shape[0], 100, replace=False)\n",
        "    select_x = np.array(source_data_sample['x'])[random_id,:]\n",
        "    pU_x_mat = np.zeros((select_x.size,2))\n",
        "    pU_x_mat_true = np.zeros((select_x.size,2))\n",
        "    diff = 0\n",
        "    for i,x in enumerate(select_x):\n",
        "      pU_x_mat[i] = compute_pu_x(fw_u,fw_x,x)\n",
        "      pU_x_mat_true[i] = np.array(multi_true_p_u_x(x,p_u_source,params['mu_x_u_coeff']*params['mu_x_u_mat'].squeeze())).squeeze()\n",
        "      diff += np.mean((pU_x_mat[i]-pU_x_mat_true[i])**2)\n",
        "\n",
        "    print('MSE of p(U|x):', diff/select_x.size)\n",
        "\n",
        "  #############################\n",
        "  # step 3 Estimate f(W|U,x)  #\n",
        "  #############################\n",
        "\n",
        "\n",
        "  # Learn p(y|x) via MLP\n",
        "  mlp_p_y_x = MLPClassifier(random_state=0, learning_rate='adaptive', max_iter=10000).fit(np.array(source_data_sample['x']), np.array(source_data_sample['y']))\n",
        "\n",
        "  if evaluate:\n",
        "    mse = []\n",
        "    for x in source_data_sample['x']:\n",
        "      mse.append((multi_true_p_y_x(x, p_u_source , params)-mlp_p_y_x.predict_proba(x.reshape(-1,2))[:,1])**2)\n",
        "    print('MSE of mlp p(y|x):',np.mean(np.array(mse)))\n",
        "  # Estimate f(W|x, y=0)\n",
        "\n",
        "  y0_id = np.where(source_data_sample['y']==0)[0]\n",
        "  sx_y0 = np.array(source_data_sample['x'][y0_id])\n",
        "  sw_y0 = np.array(source_data_sample['w'][y0_id])[:, np.newaxis]\n",
        "\n",
        "  fw_y0x = MultiCDEBase(sx_y0, sw_y0, basisx, basisw, 1e-4)\n",
        "\n",
        "  # Estimate f(W|x, y=1)\n",
        "  y1_id = np.where(source_data_sample['y']==1)[0]\n",
        "  sx_y1 = np.array(source_data_sample['x'][y1_id])\n",
        "  sw_y1 = np.array(source_data_sample['w'][y1_id])[:, np.newaxis]\n",
        "\n",
        "  fw_y1x = MultiCDEBase(sx_y1, sw_y1, basisx, basisw, 1e-4)\n",
        "\n",
        "  if evaluate:\n",
        "\n",
        "    new_w = np.linspace(-7,7,20)\n",
        "    new_x = np.linspace(-4,4,20)\n",
        "\n",
        "    fwy0_x0_pdf = np.zeros((new_x.size,new_x.size, new_w.size))\n",
        "    fwy1_x0_pdf = np.zeros((new_x.size,new_x.size, new_w.size))\n",
        "\n",
        "    true_fwy0_x0_pdf = np.zeros(fwy0_x0_pdf.shape)\n",
        "    true_fwy1_x0_pdf = np.zeros(fwy1_x0_pdf.shape)\n",
        "\n",
        "    for i, x1 in enumerate(new_x):\n",
        "      for j, x2 in enumerate(new_x):\n",
        "        for k, w in enumerate(new_w):\n",
        "          n_x = np.array([x1, x2])\n",
        "          fw_y0x0 = fw_y0x.get_density_function(n_x)\n",
        "          fw_y1x0 = fw_y1x.get_density_function(n_x)\n",
        "\n",
        "\n",
        "          fwy1_x0_pdf[i, j, k] = fw_y1x0.eval(w.reshape(1,1))*mlp_p_y_x.predict_proba(n_x.reshape(1,2))[0,1]\n",
        "          true_fwy1_x0_pdf[i, j, k] =  multi_true_p_yw_x(1,w,n_x, p_u_source,params)\n",
        "\n",
        "\n",
        "\n",
        "          fwy0_x0_pdf[i, j, k] = fw_y0x0.eval(w.reshape(1,1))*mlp_p_y_x.predict_proba(n_x.reshape(1,2))[0,0]\n",
        "          true_fwy0_x0_pdf[i, j, k] = multi_true_p_yw_x(0,w,n_x, p_u_source,params)\n",
        "\n",
        "\n",
        "    print('MSE of p(Y=1,w|x):', np.mean((fwy1_x0_pdf-true_fwy1_x0_pdf)**2))\n",
        "    print('MSE of p(Y=0,w|x):', np.mean((fwy0_x0_pdf-true_fwy0_x0_pdf)**2))\n",
        "\n",
        "  results = {\n",
        "      'qu_pu': qu_pu,\n",
        "      'fw_y0x': fw_y0x,\n",
        "      'fw_y1x': fw_y1x,\n",
        "      'py_x': mlp_p_y_x,\n",
        "      'fw_u': fw_u,\n",
        "      'fw_x': fw_x\n",
        "  }\n",
        "  return results\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9fNuCadayjbG"
      },
      "source": [
        "## Evaluation\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "BJ0zyC75vjyz"
      },
      "outputs": [],
      "source": [
        "def print_results(result):\n",
        "  key_set = result['ss'].keys()\n",
        "  method_set = result.keys()\n",
        "  look_up = {\n",
        "      'ss': 'source on source',\n",
        "      'st': 'source on target',\n",
        "      'tt': 'target on target',\n",
        "      'adapt_kmeans': 'adaptation(kmeans)',\n",
        "      'adapt_random': 'adaptation(random samples)',\n",
        "      'adapt_yux': 'adaptation (true p(y|u,x))'\n",
        "  }\n",
        "  for key in key_set:\n",
        "    print('metric {}'.format(key))\n",
        "    for mt in method_set:\n",
        "      print(look_up[mt],\": %.4f\"%result[mt][key])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "47W0ZFcUQdga"
      },
      "outputs": [],
      "source": [
        "def inference(dataset,qu_pu, fw_y0x, fw_y1x, p_y_x, fw_u, fw_x, true_p_u):\n",
        "  source_feature = np.array(dataset['x'])\n",
        "  source_label = np.array(dataset['y'])\n",
        "\n",
        "  acc = 0\n",
        "  source_predict_score = []\n",
        "  source_predict_label = []\n",
        "  source_mse = []\n",
        "  error = 0\n",
        "  for x,y in zip(source_feature, source_label):\n",
        "    #predicting the probability that q(Y=0|x)\n",
        "\n",
        "    qy_x, yxu_err = predict(qu_pu, fw_y0x, fw_y1x, p_y_x, fw_u, fw_x, x, 'standard')\n",
        "    qy_x = qy_x[1]\n",
        "    true_qy_x = multi_true_p_y_x(x, true_p_u, params)\n",
        "    source_mse.append((true_qy_x - qy_x)**2)\n",
        "    if(qy_x\u003e=0.5):\n",
        "      hat_label = 1\n",
        "    else:\n",
        "      hat_label = 0\n",
        "    source_predict_score.append(qy_x)\n",
        "    source_predict_label.append(hat_label)\n",
        "    error += yxu_err\n",
        "  print('MSE of f(y|x, u):',error/source_label.shape[0])\n",
        "  results = {\n",
        "    'acc': accuracy_score(source_label, np.array(source_predict_label)),\n",
        "    'aucroc': roc_auc_score(source_label, source_predict_score),\n",
        "    'log-loss': log_loss(source_label, source_predict_score),\n",
        "    'mse': np.mean(source_mse),\n",
        "    'brier': brier_score_loss(source_label, source_predict_score)\n",
        "  }\n",
        "  return results\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Fb-7KXBEW8aQ"
      },
      "outputs": [],
      "source": [
        "\n",
        "def inference_verification(dataset,qu_pu, p_y_x, fw_u, fw_x, true_p_u):\n",
        "  source_feature = np.array(dataset['x'])\n",
        "  source_label = np.array(dataset['y'])\n",
        "\n",
        "  acc = 0\n",
        "  source_predict_score = []\n",
        "  source_predict_label = []\n",
        "  source_mse = []\n",
        "  for x,y in zip(source_feature, source_label):\n",
        "    #predicting the probability that q(Y=0|x)\n",
        "\n",
        "    qy_x = predict_verification(qu_pu, py_x, fw_u, fw_x, x, normalize='standard')[1]\n",
        "\n",
        "    true_qy_x = multi_true_p_y_x(x, true_p_u, params)\n",
        "    source_mse.append((true_qy_x - qy_x)**2)\n",
        "    if(qy_x\u003e=0.5):\n",
        "      hat_label = 1\n",
        "    else:\n",
        "      hat_label = 0\n",
        "    source_predict_score.append(qy_x)\n",
        "    source_predict_label.append(hat_label)\n",
        "\n",
        "  results = {\n",
        "    'acc': accuracy_score(source_label, np.array(source_predict_label)),\n",
        "    'aucroc': roc_auc_score(source_label, source_predict_score),\n",
        "    'log-loss': log_loss(source_label, source_predict_score),\n",
        "    'mse': np.mean(source_mse),\n",
        "    'brier': brier_score_loss(source_label, source_predict_score)\n",
        "  }\n",
        "  return results"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "jRV_QPmnb3TG"
      },
      "outputs": [],
      "source": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_Kk9Q4D3Fb-o"
      },
      "source": [
        "## test W=3\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "HbOd-k5De9Yf"
      },
      "outputs": [],
      "source": [
        "def train_batch_parameters_W3():\n",
        "  #load source data\n",
        "\n",
        "  source_df = pd.read_csv('./tmp_data/synthetic_multivariate_num_samples_10000_w_coeff_3_p_u_0_0.9.csv')\n",
        "\n",
        "  source_data_dict = extract_from_df_nested(source_df) # defined in the colab\n",
        "  p_u_source = np.array([0.9, 0.1])\n",
        "\n",
        "  #prepare data\n",
        "\n",
        "  source_data = source_data_dict\n",
        "\n",
        "  #check that p(y=0 | u=0, c=1) and p(y=0 | u=1, c=1) are well-separated, (identification assumption)\n",
        "\n",
        "  #select c and y to estimate the density operator\n",
        "  test_c = np.array([1,0,0])\n",
        "  test_y = 1\n",
        "  # condition on the value c = test_c and y = test_y\n",
        "\n",
        "  source_data_c_id =  np.where(np.sum(source_data['train']['c']==test_c,axis=1)==3)[0]\n",
        "  #sample data for calibration and other training\n",
        "\n",
        "  source_data_sample = source_data['train']\n",
        "  source_data_cali = source_data['val'] #use this to calibrate the prediction\n",
        "  source_data_test = source_data['test']\n",
        "  #prepare the x in the target domain\n",
        "\n",
        "  print('number of training data', source_data_c_id.shape[0])\n",
        "  results_batch = {}\n",
        "  #load target data\n",
        "  for t_pu0 in [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:\n",
        "\n",
        "    target_df = pd.read_csv(f'./tmp_data/synthetic_multivariate_num_samples_10000_w_coeff_3_p_u_0_{t_pu0}.csv')\n",
        "    target_data_dict = extract_from_df_nested(target_df) # defined in the colab\n",
        "\n",
        "    #prepare data\n",
        "    target_data = target_data_dict\n",
        "\n",
        "    target_data_sample = target_data['train']\n",
        "    target_data_cali = target_data['val'] #use this to calibrate the prediction\n",
        "    target_data_test = target_data['test']\n",
        "    p_u_target = [t_pu0, 1-t_pu0]\n",
        "    #start training\n",
        "    train_params = train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='kmeans', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "\n",
        "    fw_y0x = train_params['fw_y0x']\n",
        "    fw_y1x = train_params['fw_y1x']\n",
        "    fw_u   = train_params['fw_u']\n",
        "    fw_x   = train_params['fw_x']\n",
        "    py_x   = train_params['py_x']\n",
        "    qu_pu  = train_params['qu_pu']\n",
        "\n",
        "    train_params = train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='random', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "    fw_y0x_v2 = train_params['fw_y0x']\n",
        "    fw_y1x_v2 = train_params['fw_y1x']\n",
        "    fw_u_v2   = train_params['fw_u']\n",
        "    fw_x_v2   = train_params['fw_x']\n",
        "    py_x_v2   = train_params['py_x']\n",
        "    qu_pu_v2  = train_params['qu_pu']\n",
        "\n",
        "    baseline_params = train_process(target_data_sample, target_data_sample, p_u_target, p_u_target, params, method='kmeans', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "    gw_y0x = baseline_params['fw_y0x']\n",
        "    gw_y1x = baseline_params['fw_y1x']\n",
        "    gw_u   = baseline_params['fw_u']\n",
        "    gw_x   = baseline_params['fw_x']\n",
        "    qy_x   = baseline_params['py_x']\n",
        "\n",
        "    results_target_on_target = inference(target_data_test, np.array([1.,1.,]), gw_y0x, gw_y1x, qy_x, gw_u, gw_x, p_u_target)\n",
        "    results_source_on_source = inference(source_data_test, np.array([1.,1.,]), fw_y0x, fw_y1x, py_x, fw_u, fw_x, p_u_source)\n",
        "\n",
        "    results_source_on_target = inference(target_data_test, np.array([1.,1.,]), fw_y0x,    fw_y1x,    py_x,    fw_u,    fw_x,    p_u_target)\n",
        "    results_adaptation1      = inference(target_data_test, qu_pu             , fw_y0x,    fw_y1x,    py_x,    fw_u,    fw_x,    p_u_target)\n",
        "    results_adaptation2      = inference(target_data_test, qu_pu_v2          , fw_y0x_v2, fw_y1x_v2, py_x_v2, fw_u_v2, fw_x_v2, p_u_target)\n",
        "\n",
        "\n",
        "    all_results = {\n",
        "        'ss':    results_source_on_source,\n",
        "        'st':    results_source_on_target,\n",
        "        'tt':    results_target_on_target,\n",
        "        'adapt_kmeans': results_adaptation1,\n",
        "        'adapt_random': results_adaptation2,\n",
        "    }\n",
        "    results_batch['target_{}'.format(t_pu0)] = all_results\n",
        "  return results_batch\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "KLMbzD17jo0O"
      },
      "outputs": [],
      "source": [
        "results_batch_W3 = train_batch_parameters_W3()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Bpm3bMqyJCxj"
      },
      "outputs": [],
      "source": [
        "for p in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:\n",
        "  print(results_batch_W3['target_{}'.format(p)])\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "BFeqozYSFh5n"
      },
      "source": [
        "## test W=2"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "09FU8j08FPXv"
      },
      "outputs": [],
      "source": [
        "#setting the parameters\n",
        "params = {\n",
        "        'num_samples':\n",
        "            10000,\n",
        "        'k_w':\n",
        "            1,\n",
        "        'k_x':\n",
        "            2,\n",
        "        'mu_w_u_coeff':\n",
        "            1,\n",
        "        'mu_x_u_coeff':\n",
        "            1,\n",
        "        'mu_w_u_mat': np.array([[-2, 2]]).T,\n",
        "        'mu_x_u_mat':\n",
        "            np.array([[-1, 1], [1, -1]]),  # k_u x k_x\n",
        "    }\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ZtUAblkMFXnM"
      },
      "outputs": [],
      "source": [
        "def train_batch_parameters_W2():\n",
        "  #load source data\n",
        "\n",
        "  source_df = pd.read_csv('./tmp_data/synthetic_multivariate_num_samples_10000_w_coeff_2_p_u_0_0.9.csv')\n",
        "  source_data_dict = extract_from_df_nested(source_df) # defined in the colab\n",
        "  p_u_source = np.array([0.9, 0.1])\n",
        "\n",
        "  #prepare data\n",
        "\n",
        "  source_data = source_data_dict\n",
        "\n",
        "  #check that p(y=0 | u=0, c=1) and p(y=0 | u=1, c=1) are well-separated, (identification assumption)\n",
        "\n",
        "  #select c and y to estimate the density operator\n",
        "  test_c = np.array([1,0,0])\n",
        "  test_y = 1\n",
        "  # condition on the value c = test_c and y = test_y\n",
        "  source_data_c_id =  np.where(np.sum(source_data['train']['c']==test_c,axis=1)==3)[0]\n",
        "  #sample data for calibration and other training\n",
        "  source_data_sample = source_data['train']\n",
        "  source_data_cali = source_data['val'] #use this to calibrate the prediction\n",
        "  source_data_test = source_data['test']\n",
        "  #prepare the x in the target domain\n",
        "\n",
        "  print('number of training data', source_data_c_id.shape[0])\n",
        "  results_batch = {}\n",
        "  #load target data\n",
        "  for t_pu0 in [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:\n",
        "\n",
        "    for key in file_id_dict.keys():\n",
        "\n",
        "    target_df = pd.read_csv(f'./tmp_data/synthetic_multivariate_num_samples_10000_w_coeff_2_p_u_0_{t_pu0}.csv')\n",
        "    target_data_dict = extract_from_df_nested(target_df) # defined in the colab\n",
        "\n",
        "    #prepare data\n",
        "    target_data = target_data_dict\n",
        "    #check that p(y=0 | u=0, c=1) and p(y=0 | u=1, c=1) are well-separated, (identification assumption)\n",
        "\n",
        "    target_data_sample = target_data['train']\n",
        "    target_data_cali = target_data['val'] #use this to calibrate the prediction\n",
        "    target_data_test = target_data['test']\n",
        "    p_u_target = [t_pu0, 1-t_pu0]\n",
        "    #start training\n",
        "    train_params = train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='kmeans', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "\n",
        "    fw_y0x = train_params['fw_y0x']\n",
        "    fw_y1x = train_params['fw_y1x']\n",
        "    fw_u   = train_params['fw_u']\n",
        "    fw_x   = train_params['fw_x']\n",
        "    py_x   = train_params['py_x']\n",
        "    qu_pu  = train_params['qu_pu']\n",
        "\n",
        "    train_params = train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='random', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "    fw_y0x_v2 = train_params['fw_y0x']\n",
        "    fw_y1x_v2 = train_params['fw_y1x']\n",
        "    fw_u_v2   = train_params['fw_u']\n",
        "    fw_x_v2   = train_params['fw_x']\n",
        "    py_x_v2   = train_params['py_x']\n",
        "    qu_pu_v2  = train_params['qu_pu']\n",
        "\n",
        "    baseline_params = train_process(target_data_sample, target_data_sample, p_u_target, p_u_target, params, method='kmeans', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "    gw_y0x = baseline_params['fw_y0x']\n",
        "    gw_y1x = baseline_params['fw_y1x']\n",
        "    gw_u   = baseline_params['fw_u']\n",
        "    gw_x   = baseline_params['fw_x']\n",
        "    qy_x   = baseline_params['py_x']\n",
        "\n",
        "    results_target_on_target = inference(target_data_test, np.array([1.,1.,]), gw_y0x, gw_y1x, qy_x, gw_u, gw_x, p_u_target)\n",
        "    results_source_on_source = inference(source_data_test, np.array([1.,1.,]), fw_y0x, fw_y1x, py_x, fw_u, fw_x, p_u_source)\n",
        "\n",
        "    results_source_on_target = inference(target_data_test, np.array([1.,1.,]), fw_y0x,    fw_y1x,    py_x,    fw_u,    fw_x,    p_u_target)\n",
        "    results_adaptation1      = inference(target_data_test, qu_pu             , fw_y0x,    fw_y1x,    py_x,    fw_u,    fw_x,    p_u_target)\n",
        "    results_adaptation2      = inference(target_data_test, qu_pu_v2          , fw_y0x_v2, fw_y1x_v2, py_x_v2, fw_u_v2, fw_x_v2, p_u_target)\n",
        "\n",
        "    all_results = {\n",
        "        'ss':    results_source_on_source,\n",
        "        'st':    results_source_on_target,\n",
        "        'tt':    results_target_on_target,\n",
        "        'adapt_kmeans': results_adaptation1,\n",
        "        'adapt_random': results_adaptation2,\n",
        "    }\n",
        "    results_batch['target_{}'.format(t_pu0)] = all_results\n",
        "  return results_batch\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xTpjyBnfFrvi"
      },
      "outputs": [],
      "source": [
        "results_batch_W2 = train_batch_parameters_W2()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PetQve59FwWX"
      },
      "outputs": [],
      "source": [
        "for p in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:\n",
        "  print(results_batch_W2['target_{}'.format(p)])\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JqNwg69JO-F-"
      },
      "source": [
        "## test W=1"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "sjhg0mFKO9B8"
      },
      "outputs": [],
      "source": [
        "#setting the parameters\n",
        "\n",
        "params = {\n",
        "        'num_samples':\n",
        "            10000,\n",
        "        'k_w':\n",
        "            1,\n",
        "        'k_x':\n",
        "            2,\n",
        "        'mu_w_u_coeff':\n",
        "            1,\n",
        "        'mu_x_u_coeff':\n",
        "            1,\n",
        "        'mu_w_u_mat': np.array([[-1, 1]]).T,\n",
        "        'mu_x_u_mat':\n",
        "            np.array([[-1, 1], [1, -1]]),  # k_u x k_x\n",
        "    }"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-3A2ZV-jUhof"
      },
      "outputs": [],
      "source": [
        "def train_batch_parameters_W1():\n",
        "  #load source data\n",
        "\n",
        "  source_df = pd.read_csv('./tmp_data/synthetic_multivariate_num_samples_10000_w_coeff_1_p_u_0_0.9.csv')\n",
        "  source_data_dict = extract_from_df_nested(source_df) # defined in the colab\n",
        "  p_u_source = np.array([0.9, 0.1])\n",
        "\n",
        "  #prepare data\n",
        "\n",
        "  source_data = source_data_dict\n",
        "\n",
        "  #check that p(y=0 | u=0, c=1) and p(y=0 | u=1, c=1) are well-separated, (identification assumption)\n",
        "\n",
        "\n",
        "  #select c and y to estimate the density operator\n",
        "  test_c = np.array([1,0,0])\n",
        "  test_y = 1\n",
        "  # condition on the value c = test_c and y = test_y\n",
        "\n",
        "  source_data_c_id =  np.where(np.sum(source_data['train']['c']==test_c,axis=1)==3)[0]\n",
        "  #sample data for calibration and other training\n",
        "\n",
        "  source_data_sample = source_data['train']\n",
        "  source_data_cali = source_data['val'] #use this to calibrate the prediction\n",
        "  source_data_test = source_data['test']\n",
        "  #prepare the x in the target domain\n",
        "\n",
        "\n",
        "  print('number of training data', source_data_c_id.shape[0])\n",
        "  results_batch = {}\n",
        "  #load target data\n",
        "  for t_pu0 in [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:\n",
        "\n",
        "    target_df = pd.read_csv(f'./tmp_data/synthetic_multivariate_num_samples_10000_w_coeff_2_p_u_0_{t_pu0}.csv')\n",
        "    target_data_dict = extract_from_df_nested(target_df) # defined in the colab\n",
        "\n",
        "    #prepare data\n",
        "    target_data = target_data_dict\n",
        "    #check that p(y=0 | u=0, c=1) and p(y=0 | u=1, c=1) are well-separated, (identification assumption)\n",
        "\n",
        "    target_data_sample = target_data['train']\n",
        "    target_data_cali = target_data['val'] #use this to calibrate the prediction\n",
        "    target_data_test = target_data['test']\n",
        "    p_u_target = [t_pu0, 1-t_pu0]\n",
        "    #start training\n",
        "    train_params = train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='kmeans', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "\n",
        "    fw_y0x = train_params['fw_y0x']\n",
        "    fw_y1x = train_params['fw_y1x']\n",
        "    fw_u   = train_params['fw_u']\n",
        "    fw_x   = train_params['fw_x']\n",
        "    py_x   = train_params['py_x']\n",
        "    qu_pu  = train_params['qu_pu']\n",
        "\n",
        "    train_params = train_process(source_data_sample, target_data_sample, p_u_source, p_u_target, params, method='random', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "    fw_y0x_v2 = train_params['fw_y0x']\n",
        "    fw_y1x_v2 = train_params['fw_y1x']\n",
        "    fw_u_v2   = train_params['fw_u']\n",
        "    fw_x_v2   = train_params['fw_x']\n",
        "    py_x_v2   = train_params['py_x']\n",
        "    qu_pu_v2  = train_params['qu_pu']\n",
        "\n",
        "    baseline_params = train_process(target_data_sample, target_data_sample, p_u_target, p_u_target, params, method='kmeans', test_c=test_c, test_y=test_y, evaluate=False)\n",
        "    gw_y0x = baseline_params['fw_y0x']\n",
        "    gw_y1x = baseline_params['fw_y1x']\n",
        "    gw_u   = baseline_params['fw_u']\n",
        "    gw_x   = baseline_params['fw_x']\n",
        "    qy_x   = baseline_params['py_x']\n",
        "\n",
        "    results_target_on_target = inference(target_data_test, np.array([1.,1.,]), gw_y0x, gw_y1x, qy_x, gw_u, gw_x, p_u_target)\n",
        "    results_source_on_source = inference(source_data_test, np.array([1.,1.,]), fw_y0x, fw_y1x, py_x, fw_u, fw_x, p_u_source)\n",
        "\n",
        "    results_source_on_target = inference(target_data_test, np.array([1.,1.,]), fw_y0x,    fw_y1x,    py_x,    fw_u,    fw_x,    p_u_target)\n",
        "    results_adaptation1      = inference(target_data_test, qu_pu             , fw_y0x,    fw_y1x,    py_x,    fw_u,    fw_x,    p_u_target)\n",
        "    results_adaptation2      = inference(target_data_test, qu_pu_v2          , fw_y0x_v2, fw_y1x_v2, py_x_v2, fw_u_v2, fw_x_v2, p_u_target)\n",
        "\n",
        "    all_results = {\n",
        "        'ss':    results_source_on_source,\n",
        "        'st':    results_source_on_target,\n",
        "        'tt':    results_target_on_target,\n",
        "        'adapt_kmeans': results_adaptation1,\n",
        "        'adapt_random': results_adaptation2,\n",
        "    }\n",
        "    results_batch['target_{}'.format(t_pu0)] = all_results\n",
        "  return results_batch\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "fAfpooWPPLlB"
      },
      "outputs": [],
      "source": [
        "results_batch_W1 = train_batch_parameters_W1()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xyVNsW-dPRcH"
      },
      "outputs": [],
      "source": [
        "for p in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:\n",
        "  print(results_batch_W1['target_{}'.format(p)])\n"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "last_runtime": {
        "build_target": "//learning/deepmind/public/tools/ml_python:ml_notebook",
        "kind": "private"
      },
      "name": "continuous_spectral_method.ipynb",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
