{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.model_selection import KFold\n",
    "from econml.sklearn_extensions.linear_model import StatsModelsLinearRegression\n",
    "from econml.utilities import _safe_norm_ppf\n",
    "\n",
    "\n",
    "def dml_sim(model_t, model_y, n_folds, Y, T, X):\n",
    "    \"\"\"\n",
    "    DML for a single treatment and outcome.\n",
    " \n",
    "    Parameters\n",
    "    ----------\n",
    "    model_t : object\n",
    "        A fitted model object for the treatment.\n",
    "    model_y : object\n",
    "        A fitted model object for the outcome.\n",
    "    n_folds : int\n",
    "        The number of folds to use in cross-validation.\n",
    "    Y : array-like\n",
    "        The outcome variable.\n",
    "    T : array-like\n",
    "        The treatment variable.\n",
    "    X : array-like\n",
    "        The covariates.\n",
    " \n",
    "    Returns\n",
    "    -------\n",
    "    float\n",
    "        The estimated treatment effect.\n",
    "    \"\"\"\n",
    " \n",
    "    # Initialize KFold cross-validation\n",
    "    kf = KFold(n_splits=n_folds, shuffle=True)\n",
    " \n",
    "    # Initialize arrays to hold predictions\n",
    "    y_res = np.zeros_like(Y)\n",
    "    t_res = np.zeros_like(T)\n",
    " \n",
    "    # Cross-validation loop\n",
    "    for train_index, test_index in kf.split(X):\n",
    "        X_train, X_test = X[train_index], X[test_index]\n",
    "        Y_train, Y_test = Y[train_index], Y[test_index]\n",
    "        T_train, T_test = T[train_index], T[test_index]\n",
    " \n",
    "        # Fit the treatment model\n",
    "        t_res[test_index] = T_test-model_t.fit(X_train, T_train).predict(X_test).reshape(T_test.shape)\n",
    " \n",
    "        # Fit the outcome model\n",
    "        y_res[test_index] = Y_test-model_y.fit(X_train, Y_train).predict(X_test).reshape(Y_test.shape)\n",
    "\n",
    "    # for testing.. use econml nuisances\n",
    "    # y_res = econ_y_res.reshape(-1, 1)  # Reshape y_res to be a column vector\n",
    "    # t_res = econ_t_res.reshape(-1, 1)  # Reshape t_res to be a column vector\n",
    " \n",
    "    # ATE, sigma^2, and nu^2\n",
    "    \n",
    "\n",
    "    # print('rmse t', np.mean(t_res**2)**0.5)\n",
    "    # print('rmse y:', np.mean(y_res**2)**0.5)\n",
    "\n",
    "    # smlr = StatsModelsLinearRegression(fit_intercept=False).fit(t_res, y_res)\n",
    "    # theta_smlr = smlr.coef_[0]\n",
    "    # var_smlr = smlr._var[0][0]\n",
    "    # print('theta_smlr:', theta_smlr)\n",
    "    # print('var_smlr:', var_smlr)\n",
    "    # print('se_smlr:', np.sqrt(var_smlr))\n",
    " \n",
    "    theta = np.mean(y_res*t_res) / np.mean(t_res**2)  # Estimate the treatment effect\n",
    "    # print('theta:', theta)\n",
    "    sigma2 = np.mean((y_res - theta*t_res)**2)  # Estimate the variance of the outcome residuals (after subtracting the treatment effect)\n",
    "    nu2 = 1/np.mean(t_res**2)  # Estimate the variance of the treatment\n",
    "    ests = np.array([theta, sigma2, nu2])  # Estimated parameters\n",
    " \n",
    "    ls = np.concatenate([t_res**2, np.ones_like(t_res), t_res**2], axis=1)\n",
    " \n",
    "    G = np.diag(np.mean(ls, axis=0))  # G matrix, diagonal with means of ls\n",
    "    G_inv = np.linalg.inv(G)  # Inverse of G matrix, could just take reciprocals since it's diagonal\n",
    " \n",
    " \n",
    "    residuals = np.concatenate([y_res*t_res-theta*t_res*t_res, (y_res-theta*t_res)**2-sigma2, t_res**2*nu2-1], axis=1)  # Combine residuals\n",
    "    Ω = residuals.T @ residuals / len(residuals)  # Estimate the covariance matrix of the residuals\n",
    "    cov = G_inv @ Ω @ G_inv / len(residuals)  # Estimate the variance of the parameters\n",
    " \n",
    "    return theta, sigma2, nu2, cov\n",
    "\n",
    "def sensitivity_interval(theta, sigma, nu, cov, alpha, c_y, c_t, rho):\n",
    "    # [theta, sigma, nu] = ests\n",
    "    C = np.abs(rho) * np.sqrt(c_y) * np.sqrt(c_t/(1-c_t)) / 2\n",
    "    ests = np.array([theta, sigma, nu])\n",
    "    coefs_p = np.array([1, C*np.sqrt(nu/sigma), C*np.sqrt(sigma/nu)])\n",
    "    coefs_n = np.array([1, -C*np.sqrt(nu/sigma), -C*np.sqrt(sigma/nu)])\n",
    "    # One dimensional normal distribution:\n",
    "    sigma_p = coefs_p @ cov @ coefs_p\n",
    "    sigma_n = coefs_n @ cov @ coefs_n\n",
    "\n",
    "    lb = _safe_norm_ppf(alpha / 2, loc=ests @ coefs_n, scale=np.sqrt(sigma_n))\n",
    "    ub = _safe_norm_ppf(1 - alpha / 2, loc=ests @ coefs_p, scale=np.sqrt(sigma_p))\n",
    "\n",
    "    return (lb, ub)\n",
    "\n",
    "def RV(theta, sigma, nu, cov, alpha):\n",
    "    # The robustness value is the degree of confounding of *both* the treatment and the outcome that still produces an interval\n",
    "    # that excludes zero.\n",
    "\n",
    "    # We're looking for a value of r such that the sensitivity bounds just touch zero\n",
    "\n",
    "    r = 0\n",
    "    r_up = 1\n",
    "    r_down = 0\n",
    "    lb, ub = sensitivity_interval(theta, sigma, nu, cov, alpha, 0, 0, 1)\n",
    "    if lb < 0 and ub > 0:\n",
    "        return 0\n",
    "    \n",
    "    else:\n",
    "        if lb > 0:\n",
    "            target = 0\n",
    "            mult = 1\n",
    "            d = lb\n",
    "        else:\n",
    "            target = 1\n",
    "            mult = -1\n",
    "            d = ub\n",
    "\n",
    "    while abs(d) > 1e-6:\n",
    "        d = mult * sensitivity_interval(theta, sigma, nu, cov, alpha, r, r, 1)[target]\n",
    "        if d > 0:\n",
    "            r_down = r\n",
    "        else:\n",
    "            r_up = r\n",
    "\n",
    "        r = (r_down + r_up) / 2\n",
    "        \n",
    "    return r\n",
    "\n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Simulate data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import LinearRegression\n",
    "\n",
    "n = 1000\n",
    "\n",
    "alpha = np.random.normal(size=30)\n",
    "beta = np.random.normal(size=30)\n",
    "\n",
    "X = np.random.normal(size=(n,30))\n",
    "\n",
    "t = np.random.normal(size=n) + 2 * X @ alpha\n",
    "y = np.random.normal(size=n) + 3*t + 50* X @ beta\n",
    "\n",
    "T = t.reshape(-1, 1)\n",
    "Y = y.reshape(-1, 1)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Simple example"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Run simple dml and calculate intermediate values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "sig_level = 0.1\n",
    "\n",
    "theta, sigma, nu, sig = dml_sim(LinearRegression(), LinearRegression(), 2, Y, T, X)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Calculate a \"sensitivity interval\". \n",
    "\n",
    "Need to supply \"strength of latent confounder\" as argument."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1.9713167396783304, 3.992013510060846)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sensitivity_interval(theta, sigma, nu, sig, sig_level, 0.6, 0.6, 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Calculate Robustness Value. \n",
    "\n",
    "The required strength of a latent confounder in order for the confidence interval to include 0."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9007053971290588"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "RV(theta, sigma, nu, sig, sig_level)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Ablations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'i': 30,\n",
       " 'alpha': 0.1,\n",
       " 'sensitivity_interval': (2.006244806224368, 4.0212629919887455),\n",
       " 'RV': 0.9028782248497009}"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sig_level = 0.1\n",
    "\n",
    "results = []\n",
    "for i in [1, 5, 15, 30]:\n",
    "    theta, sigma, nu, sig = dml_sim(LinearRegression(), LinearRegression(), 2, Y, T, X[:,:i])\n",
    "    result_dict = {\n",
    "        'i': i,\n",
    "        'alpha': sig_level,\n",
    "        'sensitivity_interval': sensitivity_interval(theta, sigma, nu, sig, sig_level, 0.6, 0.6, 1),\n",
    "        'RV': RV(theta, sigma, nu, sig, sig_level)\n",
    "    }\n",
    "    results.append(result_dict)\n",
    "                   \n",
    "results[-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>i</th>\n",
       "      <th>alpha</th>\n",
       "      <th>sensitivity_interval</th>\n",
       "      <th>RV</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>0.1</td>\n",
       "      <td>(-5.76103393577295, 30.16877869667943)</td>\n",
       "      <td>0.463505</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>5</td>\n",
       "      <td>0.1</td>\n",
       "      <td>(-4.254207344618898, 31.474674283379382)</td>\n",
       "      <td>0.503265</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>15</td>\n",
       "      <td>0.1</td>\n",
       "      <td>(2.926606519518738, 25.91115187623303)</td>\n",
       "      <td>0.679077</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>30</td>\n",
       "      <td>0.1</td>\n",
       "      <td>(2.006244806224368, 4.0212629919887455)</td>\n",
       "      <td>0.902878</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    i  alpha                      sensitivity_interval        RV\n",
       "0   1    0.1    (-5.76103393577295, 30.16877869667943)  0.463505\n",
       "1   5    0.1  (-4.254207344618898, 31.474674283379382)  0.503265\n",
       "2  15    0.1    (2.926606519518738, 25.91115187623303)  0.679077\n",
       "3  30    0.1   (2.006244806224368, 4.0212629919887455)  0.902878"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(\n",
    "    pd.DataFrame(results)\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DoubleML"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import doubleml as dml\n",
    "import pandas as pd\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Y</th>\n",
       "      <th>T</th>\n",
       "      <th>X0</th>\n",
       "      <th>X1</th>\n",
       "      <th>X2</th>\n",
       "      <th>X3</th>\n",
       "      <th>X4</th>\n",
       "      <th>X5</th>\n",
       "      <th>X6</th>\n",
       "      <th>X7</th>\n",
       "      <th>...</th>\n",
       "      <th>X20</th>\n",
       "      <th>X21</th>\n",
       "      <th>X22</th>\n",
       "      <th>X23</th>\n",
       "      <th>X24</th>\n",
       "      <th>X25</th>\n",
       "      <th>X26</th>\n",
       "      <th>X27</th>\n",
       "      <th>X28</th>\n",
       "      <th>X29</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>169.161860</td>\n",
       "      <td>7.570734</td>\n",
       "      <td>-1.720637</td>\n",
       "      <td>-0.480190</td>\n",
       "      <td>1.793066</td>\n",
       "      <td>-0.909049</td>\n",
       "      <td>-0.491242</td>\n",
       "      <td>0.407515</td>\n",
       "      <td>-2.719292</td>\n",
       "      <td>0.214566</td>\n",
       "      <td>...</td>\n",
       "      <td>-1.236440</td>\n",
       "      <td>0.460231</td>\n",
       "      <td>-0.266955</td>\n",
       "      <td>0.185816</td>\n",
       "      <td>0.729663</td>\n",
       "      <td>1.324584</td>\n",
       "      <td>-0.091459</td>\n",
       "      <td>1.387470</td>\n",
       "      <td>-0.982113</td>\n",
       "      <td>-2.001484</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>-368.478684</td>\n",
       "      <td>-10.259294</td>\n",
       "      <td>1.502222</td>\n",
       "      <td>-0.351112</td>\n",
       "      <td>0.326909</td>\n",
       "      <td>0.383989</td>\n",
       "      <td>1.019918</td>\n",
       "      <td>0.102063</td>\n",
       "      <td>-1.172437</td>\n",
       "      <td>2.210359</td>\n",
       "      <td>...</td>\n",
       "      <td>-1.044021</td>\n",
       "      <td>-0.816131</td>\n",
       "      <td>0.288329</td>\n",
       "      <td>-0.428007</td>\n",
       "      <td>1.047723</td>\n",
       "      <td>-0.058769</td>\n",
       "      <td>-0.664951</td>\n",
       "      <td>-0.111616</td>\n",
       "      <td>1.111973</td>\n",
       "      <td>0.214340</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>595.208775</td>\n",
       "      <td>3.414269</td>\n",
       "      <td>-0.584506</td>\n",
       "      <td>-0.410095</td>\n",
       "      <td>-0.085977</td>\n",
       "      <td>0.515477</td>\n",
       "      <td>1.374335</td>\n",
       "      <td>1.912329</td>\n",
       "      <td>-0.645654</td>\n",
       "      <td>-0.413862</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.029845</td>\n",
       "      <td>-1.035931</td>\n",
       "      <td>-0.146995</td>\n",
       "      <td>-1.224990</td>\n",
       "      <td>-2.704679</td>\n",
       "      <td>1.110162</td>\n",
       "      <td>-0.191411</td>\n",
       "      <td>0.852031</td>\n",
       "      <td>-0.457792</td>\n",
       "      <td>-2.563874</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>265.170499</td>\n",
       "      <td>21.740763</td>\n",
       "      <td>0.285862</td>\n",
       "      <td>-0.802048</td>\n",
       "      <td>0.211192</td>\n",
       "      <td>-0.799453</td>\n",
       "      <td>0.643080</td>\n",
       "      <td>-0.379773</td>\n",
       "      <td>0.055227</td>\n",
       "      <td>1.142309</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.290620</td>\n",
       "      <td>-1.175747</td>\n",
       "      <td>0.404689</td>\n",
       "      <td>-1.427746</td>\n",
       "      <td>0.069783</td>\n",
       "      <td>0.237225</td>\n",
       "      <td>-0.983619</td>\n",
       "      <td>0.459482</td>\n",
       "      <td>-0.306016</td>\n",
       "      <td>1.853582</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>66.916752</td>\n",
       "      <td>12.252024</td>\n",
       "      <td>-1.129366</td>\n",
       "      <td>-1.768697</td>\n",
       "      <td>-1.095474</td>\n",
       "      <td>0.536305</td>\n",
       "      <td>1.300771</td>\n",
       "      <td>0.316005</td>\n",
       "      <td>0.877389</td>\n",
       "      <td>-0.117935</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.062981</td>\n",
       "      <td>0.789482</td>\n",
       "      <td>1.940387</td>\n",
       "      <td>1.153639</td>\n",
       "      <td>0.741752</td>\n",
       "      <td>-0.210623</td>\n",
       "      <td>0.046603</td>\n",
       "      <td>1.389971</td>\n",
       "      <td>-0.172416</td>\n",
       "      <td>0.265279</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 32 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            Y          T        X0        X1        X2        X3        X4  \\\n",
       "0  169.161860   7.570734 -1.720637 -0.480190  1.793066 -0.909049 -0.491242   \n",
       "1 -368.478684 -10.259294  1.502222 -0.351112  0.326909  0.383989  1.019918   \n",
       "2  595.208775   3.414269 -0.584506 -0.410095 -0.085977  0.515477  1.374335   \n",
       "3  265.170499  21.740763  0.285862 -0.802048  0.211192 -0.799453  0.643080   \n",
       "4   66.916752  12.252024 -1.129366 -1.768697 -1.095474  0.536305  1.300771   \n",
       "\n",
       "         X5        X6        X7  ...       X20       X21       X22       X23  \\\n",
       "0  0.407515 -2.719292  0.214566  ... -1.236440  0.460231 -0.266955  0.185816   \n",
       "1  0.102063 -1.172437  2.210359  ... -1.044021 -0.816131  0.288329 -0.428007   \n",
       "2  1.912329 -0.645654 -0.413862  ... -0.029845 -1.035931 -0.146995 -1.224990   \n",
       "3 -0.379773  0.055227  1.142309  ... -0.290620 -1.175747  0.404689 -1.427746   \n",
       "4  0.316005  0.877389 -0.117935  ... -0.062981  0.789482  1.940387  1.153639   \n",
       "\n",
       "        X24       X25       X26       X27       X28       X29  \n",
       "0  0.729663  1.324584 -0.091459  1.387470 -0.982113 -2.001484  \n",
       "1  1.047723 -0.058769 -0.664951 -0.111616  1.111973  0.214340  \n",
       "2 -2.704679  1.110162 -0.191411  0.852031 -0.457792 -2.563874  \n",
       "3  0.069783  0.237225 -0.983619  0.459482 -0.306016  1.853582  \n",
       "4  0.741752 -0.210623  0.046603  1.389971 -0.172416  0.265279  \n",
       "\n",
       "[5 rows x 32 columns]"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.concat([\n",
    "    pd.DataFrame(Y).squeeze().to_frame('Y'),\n",
    "    pd.DataFrame(T).squeeze().to_frame('T'),\n",
    "    pd.DataFrame(X).add_prefix('X'),\n",
    "], axis=1)\n",
    "\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<doubleml.double_ml_data.DoubleMLData at 0x198557e2e30>"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dml_data = dml.DoubleMLData(df, 'Y', 'T')\n",
    "dml_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== DoubleMLPLR Object ==================\n",
      "\n",
      "------------------ Data summary      ------------------\n",
      "Outcome variable: Y\n",
      "Treatment variable(s): ['T']\n",
      "Covariates: ['X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'X9', 'X10', 'X11', 'X12', 'X13', 'X14', 'X15', 'X16', 'X17', 'X18', 'X19', 'X20', 'X21', 'X22', 'X23', 'X24', 'X25', 'X26', 'X27', 'X28', 'X29']\n",
      "Instrument variable(s): None\n",
      "No. Observations: 1000\n",
      "\n",
      "------------------ Score & algorithm ------------------\n",
      "Score function: partialling out\n",
      "\n",
      "------------------ Machine learner   ------------------\n",
      "Learner ml_l: LinearRegression()\n",
      "Learner ml_m: LinearRegression()\n",
      "Out-of-sample Performance:\n",
      "Regression:\n",
      "Learner ml_l RMSE: [[3.19708721]]\n",
      "Learner ml_m RMSE: [[1.01678607]]\n",
      "\n",
      "------------------ Resampling        ------------------\n",
      "No. folds: 2\n",
      "No. repeated sample splits: 1\n",
      "\n",
      "------------------ Fit summary       ------------------\n",
      "       coef   std err          t  P>|t|     2.5 %    97.5 %\n",
      "T  2.992753  0.032487  92.121737    0.0  2.929079  3.056426\n"
     ]
    }
   ],
   "source": [
    "dml_obj = dml.DoubleMLPLR(dml_data,\n",
    "                          ml_l=LinearRegression(),\n",
    "                          ml_m=LinearRegression(),\n",
    "                          n_folds=2,\n",
    "                          score='partialling out',)\n",
    "dml_obj.fit()\n",
    "print(dml_obj)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== Sensitivity Analysis ==================\n",
      "\n",
      "------------------ Scenario          ------------------\n",
      "Significance Level: level=0.95\n",
      "Sensitivity parameters: cf_y=0.03; cf_d=0.03, rho=1.0\n",
      "\n",
      "------------------ Bounds with CI    ------------------\n",
      "   CI lower  theta lower     theta  theta upper  CI upper\n",
      "0  2.909926     2.963376  2.992753     3.022129  3.075592\n",
      "\n",
      "------------------ Robustness Values ------------------\n",
      "   H_0     RV (%)    RVa (%)\n",
      "0  0.0  91.336809  90.546872\n"
     ]
    }
   ],
   "source": [
    "dml_obj.sensitivity_analysis(cf_y=0.03, cf_d=0.03, rho=1.)\n",
    "print(dml_obj.sensitivity_summary)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "   cf_y     cf_d  rho  delta_theta\n",
      "T   1.0  0.13301 -1.0    -7.112185\n"
     ]
    }
   ],
   "source": [
    "sens_benchmark = dml_obj.sensitivity_benchmark(benchmarking_set=[\"X6\"])\n",
    "print(sens_benchmark)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# New datasets"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DoubleML confounded synthetic data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "from doubleml.datasets import make_confounded_plr_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "cf_y = 0.1\n",
    "cf_d = 0.1\n",
    "theta = 5.0\n",
    "dpg_dict = make_confounded_plr_data(n_obs=10000, cf_y=cf_y, cf_d=cf_d, theta=theta)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_cols = [f'X{i + 1}' for i in np.arange(dpg_dict['x'].shape[1])]\n",
    "df = pd.DataFrame(np.column_stack((dpg_dict['x'], dpg_dict['y'], dpg_dict['d'])), columns=x_cols + ['y', 'd'])\n",
    "dml_data = dml.DoubleMLData(df, 'y', 'd')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.ensemble import RandomForestRegressor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== DoubleMLPLR Object ==================\n",
      "\n",
      "------------------ Data summary      ------------------\n",
      "Outcome variable: y\n",
      "Treatment variable(s): ['d']\n",
      "Covariates: ['X1', 'X2', 'X3', 'X4']\n",
      "Instrument variable(s): None\n",
      "No. Observations: 10000\n",
      "\n",
      "------------------ Score & algorithm ------------------\n",
      "Score function: partialling out\n",
      "\n",
      "------------------ Machine learner   ------------------\n",
      "Learner ml_l: RandomForestRegressor()\n",
      "Learner ml_m: RandomForestRegressor()\n",
      "Out-of-sample Performance:\n",
      "Regression:\n",
      "Learner ml_l RMSE: [[8.43266256]]\n",
      "Learner ml_m RMSE: [[1.13959992]]\n",
      "\n",
      "------------------ Resampling        ------------------\n",
      "No. folds: 2\n",
      "No. repeated sample splits: 1\n",
      "\n",
      "------------------ Fit summary       ------------------\n",
      "       coef   std err          t  P>|t|     2.5 %    97.5 %\n",
      "d  4.386734  0.076845  57.085535    0.0  4.236121  4.537348\n"
     ]
    }
   ],
   "source": [
    "dml_obj = dml.DoubleMLPLR(dml_data,\n",
    "                          ml_l=RandomForestRegressor(),\n",
    "                          ml_m=RandomForestRegressor(),\n",
    "                          n_folds=2,\n",
    "                          score='partialling out',)\n",
    "dml_obj.fit()\n",
    "print(dml_obj)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== Sensitivity Analysis ==================\n",
      "\n",
      "------------------ Scenario          ------------------\n",
      "Significance Level: level=0.95\n",
      "Sensitivity parameters: cf_y=0.1; cf_d=0.1, rho=1.0\n",
      "\n",
      "------------------ Bounds with CI    ------------------\n",
      "   CI lower  theta lower     theta  theta upper  CI upper\n",
      "0  3.652547     3.758583  4.386734     5.014886  5.170425\n",
      "\n",
      "------------------ Robustness Values ------------------\n",
      "   H_0     RV (%)    RVa (%)\n",
      "0  0.0  51.346635  49.604976\n"
     ]
    }
   ],
   "source": [
    "dml_obj.sensitivity_analysis(cf_y=0.1, cf_d=0.1, rho=1.)\n",
    "print(dml_obj.sensitivity_summary)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(10000,)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dpg_dict['y'].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(4.84204509497674, 5.158534499404629)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sensitivity_interval(theta, sigma, nu, sig, sig_level, 0.1, 0.1, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'alpha': 0.05,\n",
       " 'sig': array([[ 5.97407504e-03, -1.67034912e-01, -5.43651824e-05],\n",
       "        [-1.67034912e-01,  1.20203175e+01,  2.52109790e-03],\n",
       "        [-5.43651824e-05,  2.52109790e-03,  1.18426109e-04]]),\n",
       " 'sensitivity_interval': (-1.6122852078135446, 10.442290925119165),\n",
       " 'RV': 0.4936022162437439}"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sig_level=0.05\n",
    "\n",
    "theta, sigma, nu, sig = dml_sim(\n",
    "    RandomForestRegressor(), \n",
    "    RandomForestRegressor(), \n",
    "    2, \n",
    "    dpg_dict['y'].reshape(-1, 1),\n",
    "    dpg_dict['d'].reshape(-1, 1),\n",
    "    dpg_dict['x']\n",
    ")\n",
    "\n",
    "result_dict = {\n",
    "    'alpha': sig_level,\n",
    "    'sig': sig,\n",
    "    'sensitivity_interval': sensitivity_interval(theta, sigma, nu, sig, sig_level, 0.6, 0.6, 1),\n",
    "    'RV': RV(theta, sigma, nu, sig, sig_level)\n",
    "}\n",
    "result_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.4936022162437439"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result_dict['RV']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 401k data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "dml_data = dml.datasets.fetch_401K()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== DoubleMLPLR Object ==================\n",
      "\n",
      "------------------ Data summary      ------------------\n",
      "Outcome variable: net_tfa\n",
      "Treatment variable(s): ['e401']\n",
      "Covariates: ['age', 'inc', 'educ', 'fsize', 'marr', 'twoearn', 'db', 'pira', 'hown']\n",
      "Instrument variable(s): None\n",
      "No. Observations: 9915\n",
      "\n",
      "------------------ Score & algorithm ------------------\n",
      "Score function: partialling out\n",
      "\n",
      "------------------ Machine learner   ------------------\n",
      "Learner ml_l: RandomForestRegressor()\n",
      "Learner ml_m: RandomForestRegressor()\n",
      "Out-of-sample Performance:\n",
      "Regression:\n",
      "Learner ml_l RMSE: [[56554.88810261]]\n",
      "Learner ml_m RMSE: [[0.46880446]]\n",
      "\n",
      "------------------ Resampling        ------------------\n",
      "No. folds: 5\n",
      "No. repeated sample splits: 1\n",
      "\n",
      "------------------ Fit summary       ------------------\n",
      "             coef      std err         t         P>|t|        2.5 %  \\\n",
      "e401  9545.723974  1260.808673  7.571112  3.700422e-14  7074.584382   \n",
      "\n",
      "            97.5 %  \n",
      "e401  12016.863565  \n"
     ]
    }
   ],
   "source": [
    "dml_obj = dml.DoubleMLPLR(dml_data,\n",
    "                          ml_l=RandomForestRegressor(),\n",
    "                          ml_m=RandomForestRegressor(),\n",
    "                          n_folds=5,\n",
    "                          score='partialling out',)\n",
    "dml_obj.fit()\n",
    "print(dml_obj)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== Sensitivity Analysis ==================\n",
      "\n",
      "------------------ Scenario          ------------------\n",
      "Significance Level: level=0.95\n",
      "Sensitivity parameters: cf_y=0.04; cf_d=0.04, rho=1.0\n",
      "\n",
      "------------------ Bounds with CI    ------------------\n",
      "      CI lower  theta lower        theta   theta upper      CI upper\n",
      "0  2409.550717  4636.205456  9545.723974  14455.242491  16511.651393\n",
      "\n",
      "------------------ Robustness Values ------------------\n",
      "   H_0    RV (%)   RVa (%)\n",
      "0  0.0  7.628916  5.816637\n"
     ]
    }
   ],
   "source": [
    "dml_obj.sensitivity_analysis(cf_y=0.04, cf_d=0.04, rho=1.)\n",
    "print(dml_obj.sensitivity_summary)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import LinearRegression, LassoCV"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'alpha': 0.05,\n",
       " 'sensitivity_interval': (-3259.2844095062455, 7370.312469068845),\n",
       " 'RV': 0}"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sig_level=0.05\n",
    "\n",
    "theta, sigma, nu, sig = dml_sim(\n",
    "    RandomForestRegressor(), \n",
    "    RandomForestRegressor(), \n",
    "    5, \n",
    "    dml_data.y.reshape(-1, 1),\n",
    "    dml_data.d.reshape(-1, 1),\n",
    "    dml_data.x\n",
    ")\n",
    "\n",
    "# seem to get weird results here but not when I pass econml nuisances directly inside dml_sim func.\n",
    "result_dict = {\n",
    "    'alpha': sig_level,\n",
    "    'sensitivity_interval': sensitivity_interval(theta, sigma, nu, sig, sig_level, 0.00001, 0.0001, 1),\n",
    "    'RV': RV(theta, sigma, nu, sig, sig_level)\n",
    "}\n",
    "result_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Coefficient Results:  X is None, please call intercept_inference to learn the constant!\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<table class=\"simpletable\">\n",
       "<caption>CATE Intercept Results</caption>\n",
       "<tr>\n",
       "         <td></td>        <th>point_estimate</th>  <th>stderr</th>  <th>zstat</th> <th>pvalue</th> <th>ci_lower</th> <th>ci_upper</th>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>cate_intercept</th>    <td>8923.022</td>    <td>1328.008</td> <td>6.719</td>   <td>0.0</td>  <td>6320.175</td> <td>11525.87</td>\n",
       "</tr>\n",
       "</table><br/><br/><sub>A linear parametric conditional average treatment effect (CATE) model was fitted:<br/>$Y = \\Theta(X)\\cdot T + g(X, W) + \\epsilon$<br/>where for every outcome $i$ and treatment $j$ the CATE $\\Theta_{ij}(X)$ has the form:<br/>$\\Theta_{ij}(X) = X' coef_{ij} + cate\\_intercept_{ij}$<br/>Coefficient Results table portrays the $coef_{ij}$ parameter vector for each outcome $i$ and treatment $j$. Intercept Results table portrays the $cate\\_intercept_{ij}$ parameter.</sub>"
      ],
      "text/plain": [
       "<class 'econml.utilities.Summary'>\n",
       "\"\"\"\n",
       "                        CATE Intercept Results                       \n",
       "=====================================================================\n",
       "               point_estimate  stderr  zstat pvalue ci_lower ci_upper\n",
       "---------------------------------------------------------------------\n",
       "cate_intercept       8923.022 1328.008 6.719    0.0 6320.175 11525.87\n",
       "---------------------------------------------------------------------\n",
       "\n",
       "<sub>A linear parametric conditional average treatment effect (CATE) model was fitted:\n",
       "$Y = \\Theta(X)\\cdot T + g(X, W) + \\epsilon$\n",
       "where for every outcome $i$ and treatment $j$ the CATE $\\Theta_{ij}(X)$ has the form:\n",
       "$\\Theta_{ij}(X) = X' coef_{ij} + cate\\_intercept_{ij}$\n",
       "Coefficient Results table portrays the $coef_{ij}$ parameter vector for each outcome $i$ and treatment $j$. Intercept Results table portrays the $cate\\_intercept_{ij}$ parameter.</sub>\n",
       "\"\"\""
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from econml.dml import LinearDML\n",
    "\n",
    "est = LinearDML(model_y=RandomForestRegressor(), model_t=RandomForestRegressor()).fit(\n",
    "    Y=dml_data.y, T=dml_data.d, W=dml_data.x, cache_values=True\n",
    ")\n",
    "est.summary()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "store econml nuisances to use inside dml_sim func for debugging"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "econ_y_res, econ_t_res = est._cached_values.nuisances"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### bonus data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "dml_data = dml.datasets.fetch_bonus()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== DoubleMLPLR Object ==================\n",
      "\n",
      "------------------ Data summary      ------------------\n",
      "Outcome variable: inuidur1\n",
      "Treatment variable(s): ['tg']\n",
      "Covariates: ['female', 'black', 'othrace', 'dep1', 'dep2', 'q2', 'q3', 'q4', 'q5', 'q6', 'agelt35', 'agegt54', 'durable', 'lusd', 'husd']\n",
      "Instrument variable(s): None\n",
      "No. Observations: 5099\n",
      "\n",
      "------------------ Score & algorithm ------------------\n",
      "Score function: partialling out\n",
      "\n",
      "------------------ Machine learner   ------------------\n",
      "Learner ml_l: RandomForestRegressor()\n",
      "Learner ml_m: RandomForestRegressor()\n",
      "Out-of-sample Performance:\n",
      "Regression:\n",
      "Learner ml_l RMSE: [[1.28984655]]\n",
      "Learner ml_m RMSE: [[0.5034491]]\n",
      "\n",
      "------------------ Resampling        ------------------\n",
      "No. folds: 2\n",
      "No. repeated sample splits: 1\n",
      "\n",
      "------------------ Fit summary       ------------------\n",
      "        coef   std err         t     P>|t|     2.5 %    97.5 %\n",
      "tg -0.079585  0.035971 -2.212475  0.026934 -0.150086 -0.009083\n"
     ]
    }
   ],
   "source": [
    "dml_obj = dml.DoubleMLPLR(dml_data,\n",
    "                          ml_l=RandomForestRegressor(),\n",
    "                          ml_m=RandomForestRegressor(),\n",
    "                          n_folds=2,\n",
    "                          score='partialling out',)\n",
    "dml_obj.fit()\n",
    "print(dml_obj)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================== Sensitivity Analysis ==================\n",
      "\n",
      "------------------ Scenario          ------------------\n",
      "Significance Level: level=0.95\n",
      "Sensitivity parameters: cf_y=0.04; cf_d=0.04, rho=1.0\n",
      "\n",
      "------------------ Bounds with CI    ------------------\n",
      "   CI lower  theta lower     theta  theta upper  CI upper\n",
      "0 -0.243371    -0.184128 -0.079585     0.024959  0.084101\n",
      "\n",
      "------------------ Robustness Values ------------------\n",
      "   H_0    RV (%)   RVa (%)\n",
      "0  0.0  3.059967  0.794663\n"
     ]
    }
   ],
   "source": [
    "dml_obj.sensitivity_analysis(cf_y=0.04, cf_d=0.04, rho=1.)\n",
    "print(dml_obj.sensitivity_summary)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n",
      "c:\\Users\\fabiovera\\AppData\\Local\\anaconda3\\envs\\dev_env2\\lib\\site-packages\\sklearn\\base.py:1473: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'alpha': 0.05,\n",
       " 'sensitivity_interval': (-1.9404570165795167, 0.9777681976085163),\n",
       " 'RV': 0}"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sig_level=0.05\n",
    "\n",
    "theta, sigma, nu, sig = dml_sim(\n",
    "    RandomForestRegressor(), \n",
    "    RandomForestRegressor(), \n",
    "    5, \n",
    "    dml_data.y.reshape(-1, 1),\n",
    "    dml_data.d.reshape(-1, 1),\n",
    "    dml_data.x\n",
    ")\n",
    "\n",
    "result_dict = {\n",
    "    'alpha': sig_level,\n",
    "    'sensitivity_interval': sensitivity_interval(theta, sigma, nu, sig, sig_level, 0.05, 0.05, 1),\n",
    "    'RV': RV(theta, sigma, nu, sig, sig_level)\n",
    "}\n",
    "result_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dev_env2",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
