{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm_notebook as tqdm\n",
    "import numpy as np\n",
    "import pymc3 as pm\n",
    "import theano.tensor as tt\n",
    "import pandas as pd\n",
    "import arviz as az\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# with pm.Model() as m2:\n",
    "#     tau1 = pm.Normal(\"tau1\",0,1,shape=(N,1))\n",
    "#     slope = pm.Normal(\"slope\",0,2)\n",
    "#     tau_sig = pm.InverseGamma(\"tau_sig_t\",1,1,shape=(1,T))\n",
    "#     tau_pre = pm.Normal(\"tau\",tt.arange(T)*slope+tau1,tau_sig,shape=(N,T))\n",
    "#     lambda_i_pre = pm.Normal(\"lambda\",4,1,shape=(T,I))\n",
    "#     phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,I))\n",
    "#     epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "#     sub_lambda = lambda_i_pre[subject_test_order]\n",
    "#     sub_phi = phi_i_pre[subject_test_order]\n",
    "#     sub_ep = epsilon_i_pre[subject_test_order]\n",
    "    \n",
    "\n",
    "#     rt_mean = (sub_lambda-tau_pre.T.reshape((T,N,1)))*sub_phi\n",
    "#     Y = pm.Lognormal(\"rt\",rt_mean,sub_ep,observed=rt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # subject_test_order = tt.as_tensor(subject_test_order)\n",
    "# with pm.Model() as m1:\n",
    "#     s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "#     s_pre = pm.Deterministic(\"s_pre\",tt.concatenate(([1],s_pre))) \n",
    "#     s_mat = tt.eye(T)*s_pre\n",
    "#     L = pm.Normal(\"L\",0.5,2,shape=(sum(np.arange(T)))) \n",
    "#     L = tri(T,s_mat,L)\n",
    "    \n",
    "#     cov_pre = pm.Deterministic(\"COV\",(L.dot(L.T)))\n",
    "#     corr = pm.Deterministic(\"corr\",L)\n",
    "#     tau_mean_pre = tt.concatenate(([0],pm.Normal(\"tau_mean\",0,2,shape=(T-1)))) \n",
    "#     tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,cov=cov_pre,shape=(N,T))\n",
    "    \n",
    "#     lambda_i_pre = pm.Normal(\"lambda\",4,1,shape=(T,I))\n",
    "#     phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,I))\n",
    "#     epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "\n",
    "#     for v in range(5):\n",
    "        \n",
    "#         rt_pre = pm.Lognormal(f\"rt_v{v}\",\n",
    "#                               (lambda_i_pre[test_oder[v]-1].reshape([\n",
    "#                                                         T,1,I\n",
    "#                                                         ])\\\n",
    "#                     -tau_pre.T[:,v_index[v]].reshape([\n",
    "#                                                     T,len(v_index[v]),1\n",
    "#                                                     ]))\\\n",
    "#                     *phi_i_pre[test_oder[v]-1].reshape([\n",
    "#                                                     T,1,I\n",
    "#                                                     ]),\n",
    "#                               epsilon_i_pre[test_oder[v]-1].reshape([\n",
    "#                                                                             T,1,I\n",
    "#                                                                             ]),\n",
    "#                               observed=rt[:,v_index[v]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # subject_test_order = tt.as_tensor(subject_test_order)\n",
    "# rt_observed_gv = np.concatenate([rt[:,idx] for idx in v_index],axis=1)\n",
    "# with pm.Model() as m1:\n",
    "#     s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "#     s_pre = pm.Deterministic(\"s_pre\",tt.concatenate(([1],s_pre))) \n",
    "#     s_mat = tt.eye(T)*s_pre\n",
    "#     cor_mean_prior = pm.Normal(\"cor_mean_prior\",0,5,shape=(sum(np.arange(T))))\n",
    "#     L = pm.Normal(\"L\",cor_mean_prior,2,shape=(sum(np.arange(T)))) \n",
    "#     L = tri(T,s_mat,L)\n",
    "    \n",
    "#     cov_pre = pm.Deterministic(\"COV\",(L.dot(L.T)))\n",
    "#     corr = pm.Deterministic(\"corr\",L)\n",
    "#     tau_mean_prior = pm.Normal(\"tau_mean_prior\",0,5,shape=(T-1))\n",
    "#     tau_mean_pre = tt.concatenate(([0],pm.Normal(\"tau_mean\",tau_mean_prior,5,shape=(T-1)))) \n",
    "#     tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,cov=cov_pre,shape=(N,T))\n",
    "    \n",
    "#     lambda_i_pre = pm.Normal(\"lambda\",4,1,shape=(T,I))\n",
    "#     phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,I))\n",
    "#     epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "\n",
    "#     rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "#                             -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "#                             *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "#                             for v in range(5)],axis=1)\n",
    "#     rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "        \n",
    "    \n",
    "#     rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # subject_test_order = tt.as_tensor(subject_test_order)\n",
    "# rt_observed_gv = np.concatenate([rt[:,idx] for idx in v_index],axis=1)\n",
    "# with pm.Model() as m1:\n",
    "#     chol, corr, stds = pm.LKJCholeskyCov(\n",
    "#         \"chol\", n=T, eta=2.0, sd_dist=pm.Exponential.dist(1.0), compute_corr=True\n",
    "#     )\n",
    "#     pm.Deterministic(\"corr\",corr)\n",
    "#     chol = tt.set_subtensor(chol[0,0],1)\n",
    "#     cov_pre = pm.Deterministic(\"cov\",chol.dot(chol.T))\n",
    "#     tau_mean_prior = pm.Normal(\"tau_mean_prior\",0,5,shape=(T-1))\n",
    "#     tau_mean_pre = tt.concatenate(([0],pm.Normal(\"tau_mean\",tau_mean_prior,5,shape=(T-1)))) \n",
    "#     tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,cov=cov_pre,shape=(N,T))\n",
    "    \n",
    "#     lambda_i_pre = pm.Normal(\"lambda\",4,1,shape=(T,I))\n",
    "#     phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,I))\n",
    "#     epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "\n",
    "#     rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "#                             -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "#                             *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "#                             for v in range(5)],axis=1)\n",
    "#     rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "        \n",
    "    \n",
    "#     rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "rt = np.stack([pd.read_csv(f\"./data/rt_ {i+1} .csv\",usecols=[\"V1\"]).values for i in range(5)])\n",
    "rt = rt.reshape(5,350,10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 不同组的测验顺序，去掉第一列，因为第一列为表的index，无实际意义\n",
    "test_oder = pd.read_csv(\"./data/test_oder.csv\").values[:,1:]\n",
    "# python序列不同于R,从0开始，因此-1\n",
    "test_index = test_oder-1\n",
    "test_version = pd.read_csv(\"./data/Test_versions.csv\",usecols=[\"V1\"]).values.flatten()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "T = 5\n",
    "N = 350\n",
    "I = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "subject_test_order = test_index[test_version-1]\n",
    "subject_test_order = subject_test_order.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2, 3, 4, 5, 1])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_oder[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2, 4, 5, 1, 1, 5, 3, 2, 1, 4, 1, 3, 3, 4, 2, 4, 5, 2, 3, 2, 5, 4,\n",
       "       1, 2, 3, 4, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 4, 5, 2, 3, 5, 4, 1, 1,\n",
       "       1, 2, 3, 1, 2, 1, 2, 3, 4, 5, 1, 2, 3, 4, 3, 3, 4, 2, 1, 2, 3, 4,\n",
       "       5, 1, 2, 3, 5, 1, 2, 3, 4, 5, 1, 2, 4, 2, 3, 4, 5, 1, 2, 3, 1, 1,\n",
       "       2, 4, 5, 1, 2, 4, 1, 2, 3, 4, 5, 2, 5, 4, 1, 2, 3, 4, 5, 1, 2, 3,\n",
       "       4, 5, 1, 2, 3, 4, 5, 1, 3, 4, 1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 2, 3,\n",
       "       4, 5, 1, 2, 5, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 2, 3, 4, 5, 1, 2, 1,\n",
       "       3, 4, 5, 1, 2, 5, 4, 1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 2, 4, 5, 1, 2,\n",
       "       1, 1, 2, 4, 3, 5, 1, 2, 3, 4, 1, 2, 3, 4, 5, 2, 1, 2, 3, 4, 5, 1,\n",
       "       2, 3, 4, 5, 1, 2, 3, 4, 1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 2, 3, 3, 1,\n",
       "       2, 4, 3, 5, 1, 2, 3, 4, 1, 2, 3, 4, 1, 5, 2, 5, 3, 1, 2, 3, 4, 5,\n",
       "       1, 2, 3, 4, 5, 1, 2, 3, 5, 4, 1, 4, 3, 4, 5, 1, 1, 2, 3, 4, 5, 1,\n",
       "       2, 3, 4, 1, 2, 3, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 2, 3, 4, 5, 1, 2,\n",
       "       1, 2, 3, 5, 1, 2, 3, 4, 4, 5, 2, 3, 4, 5, 1, 2, 1, 2, 3, 4, 5, 1,\n",
       "       4, 5, 1, 2, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 2, 5,\n",
       "       3, 5, 2, 4, 1, 2, 3, 4, 5, 1, 1, 2, 3, 5, 3, 3, 4, 5, 1, 5])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_version"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "v_index = [np.arange(350)[test_version==(i+1)]  for i in range(5)]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n",
       "       [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n",
       "       [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n",
       "       [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n",
       "       [ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9]])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = np.arange(T*I).reshape(T,I)\n",
    "x[test_oder[1]-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5, 350)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "subject_test_order.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def data_gener(subject_test_order):\n",
    "    tau_mean = np.array([0,0.08962463, 0.20529881, 0.27482943, 0.38357254])\n",
    "    cov =[[1.   , 0.281, 0.397, 0.313, 0.335],\n",
    "       [0.281, 0.126, 0.128, 0.102, 0.109],\n",
    "       [0.397, 0.128, 0.214, 0.142, 0.151],\n",
    "       [0.313, 0.102, 0.142, 0.144, 0.121],\n",
    "       [0.335, 0.109, 0.151, 0.121, 0.16 ]]\n",
    "    lambda_i = np.array([[3.47904226, 3.48092061, 3.50389548, 3.65213225, 3.68763499,\n",
    "                        3.49519173, 3.46420529, 3.6450886 , 3.45832729, 3.50611461],\n",
    "                    [3.46905027, 3.4865045 , 3.52990054, 3.62699094, 3.69623724,\n",
    "                        3.5108881 , 3.42285793, 3.63969826, 3.48085452, 3.49875628],\n",
    "                    [3.48638329, 3.52409948, 3.49579457, 3.62168352, 3.66825349,\n",
    "                        3.43472656, 3.439949  , 3.64203138, 3.45334508, 3.54108414],\n",
    "                    [3.53646425, 3.52187356, 3.5157394 , 3.57554694, 3.62165349,\n",
    "                        3.53036811, 3.43465193, 3.61590978, 3.47500245, 3.54889858],\n",
    "                    [3.51474299, 3.46438449, 3.48640937, 3.5487003 , 3.69888899,\n",
    "                        3.53430971, 3.43201937, 3.6114257 , 3.49712088, 3.52643726]])\n",
    "    epsilon_i = np.array([[0.65131151, 0.76859535, 0.7037782 , 0.84548921, 0.69298111,\n",
    "                            0.71482942, 0.88732922, 0.65461122, 0.67825452, 0.74082501],\n",
    "                        [0.69486281, 0.73254919, 0.68635088, 0.84498684, 0.71827331,\n",
    "                            0.78695395, 0.87539312, 0.69560075, 0.74533578, 0.72006544],\n",
    "                        [0.71420962, 0.71849639, 0.65233315, 0.85456522, 0.72566865,\n",
    "                            0.79710279, 0.86439581, 0.6601459 , 0.70898181, 0.74299168],\n",
    "                        [0.73207429, 0.68323721, 0.6865543 , 0.7950074 , 0.6811167 ,\n",
    "                            0.73469803, 0.81414372, 0.72863257, 0.74990608, 0.74330321],\n",
    "                        [0.75193773, 0.68785976, 0.70382524, 0.90979533, 0.75193348,\n",
    "                            0.7039742 , 0.80466047, 0.68383339, 0.69076075, 0.76544729]])\n",
    "    tau = np.random.multivariate_normal(tau_mean,cov,size=(350))\n",
    "    lambda_i_sub = lambda_i[subject_test_order]\n",
    "    epsilon_i_sub = epsilon_i[subject_test_order]\n",
    "    rt_mean = lambda_i_sub-tau.T.reshape(5,350,1)\n",
    "    rt = np.random.lognormal(rt_mean,epsilon_i_sub)\n",
    "    return rt\n",
    "rt_simu = data_gener(subject_test_order)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# with pm.Model() as m1:\n",
    "    \n",
    "#     sd_dist = pm.Exponential.dist(1.0, shape=3)\n",
    "#     chol, corr, stds = pm.LKJCholeskyCov('chol_cov', n=3, eta=2,\n",
    "#         sd_dist=sd_dist, compute_corr=True)\n",
    "#     vals = pm.MvNormal('vals', mu=tt.ones(3), chol=chol,shape=(350,3))\n",
    "        \n",
    "#     # chol, corr, stds = pm.LKJCholeskyCov(\n",
    "#     # \"chol\", n=T, eta=2.0, sd_dist=pm.Exponential.dist(1.0), compute_corr=True\n",
    "#     #     )\n",
    "#     # # L = pm.Normal(\"L\",0,1,shape=(sum(np.arange(T)))) \n",
    "#     # # L = tri(T,s_mat,L)\n",
    "    \n",
    "#     # cov_pre = pm.Deterministic(\"cov\",(chol.dot(chol.T)))\n",
    "\n",
    "#     # _ = pm.Normal(\"tau_mean\",0,10,shape=(T-1))\n",
    "#     # tau_mean_pre = tt.concatenate(([0],_)) \n",
    "#     # tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,chol=chol,shape=(N,T))\n",
    "#     pm.sample(2000,tune=2000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# subject_test_order = tt.as_tensor(subject_test_order)\n",
    "rt_observed_gv = np.concatenate([rt[:,idx] for idx in v_index],axis=1)\n",
    "def model1(es_type,rt_observed_gv,steps=50000):\n",
    "    def tri(T,mat,values):\n",
    "    \n",
    "        i=0\n",
    "        for tt_ in range(T):\n",
    "            for ttt in range(T):\n",
    "                if tt_>ttt:\n",
    "                    mat = tt.set_subtensor(mat[tt_,ttt],values[i])\n",
    "                    i+=1\n",
    "        return mat\n",
    "\n",
    "    with pm.Model() as m1:\n",
    "        \n",
    "        # s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "        s_pre = pm.Deterministic(\"s_pre\",tt.ones(T)) \n",
    "        s_mat = tt.eye(T)*s_pre\n",
    "        \n",
    "        # chol, corr, stds = pm.LKJCholeskyCov(\n",
    "        # \"chol\", n=T, eta=2.0, sd_dist=pm.Exponential.dist(1.0), compute_corr=True\n",
    "        #     )\n",
    "        L = pm.Normal(\"L\",0,1,shape=(sum(np.arange(T)))) \n",
    "        L = tri(T,s_mat,L)\n",
    "        \n",
    "        cov_pre = pm.Deterministic(\"cov\",(L.dot(L.T)))\n",
    "\n",
    "        _ = pm.Normal(\"tau_mean\",0,10,shape=(T-1))\n",
    "        tau_mean_pre = tt.concatenate(([0],_)) \n",
    "        tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,chol=L,shape=(N,T))\n",
    "\n",
    "        lambda_mean_prior = pm.Normal(\"lambda_i_mean_prior\",3,5)\n",
    "        lambda_sig_prior = pm.InverseGamma(\"lambda_sig_prior\",1,1) \n",
    "        lambda_i_pre = pm.Normal(\"lambda\",lambda_mean_prior,lambda_sig_prior,shape=(T,I))\n",
    "        epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "\n",
    "        rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "                                # *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                for v in range(5)],axis=1)\n",
    "        rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "            \n",
    "        \n",
    "        rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n",
    "        \n",
    "        if es_type==\"mcmc\":\n",
    "            tr1 = pm.sample(2000,tune=2000,return_inferencedata=True)\n",
    "\n",
    "        if es_type==\"vi\":\n",
    "            approx1 = pm.fit(n=steps, method=\"advi\",obj_n_mc=2,callbacks=[\n",
    "                                    pm.callbacks.CheckParametersConvergence(\n",
    "                                                every=100, diff='absolute',\n",
    "                                                tolerance=1e-2)],\n",
    "                             obj_optimizer=pm.adam(learning_rate=0.01))\n",
    "            tr1 = az.from_pymc3(approx1.sample(draws=1000),model=m1)\n",
    "                        \n",
    "    return tr1,m1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model2(es_type,rt_observed_gv):\n",
    "    with pm.Model() as m2:\n",
    "        tau1 = pm.Normal(\"tau1\",0,1,shape=(N,1))\n",
    "        slope = pm.Normal(\"slope\",0,2,shape=(T-1))\n",
    "        plus = pm.Normal(\"plus\",0,2,shape=(T-1))\n",
    "        tau_sig_pre = pm.InverseGamma(\"tau_sig_pre\",1,1,shape=(1,T-1))\n",
    "        tau_sig = pm.Normal(\"tau_sig\",0,tau_sig_pre,shape=(N,T-1,1))\n",
    "        taus = [tau1]\n",
    "        for t in range(4):\n",
    "            taus.append(plus[t]+taus[t]*slope[t]+tau_sig[:,t])\n",
    "        taus = tt.concatenate(taus,axis=1)\n",
    "        tau_pre = pm.Deterministic(\"tau\",taus)       \n",
    "        lambda_mean_prior = pm.Normal(\"lambda_i_mean_prior\",3,5)\n",
    "        lambda_sig_prior = pm.InverseGamma(\"lambda_sig_prior\",1,1) \n",
    "        lambda_i_pre = pm.Normal(\"lambda\",lambda_mean_prior,lambda_sig_prior,shape=(T,I))\n",
    "        # phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,I))\n",
    "        epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "        \n",
    "        rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "                                # *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                for v in range(5)],axis=1)\n",
    "        rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "            \n",
    "        \n",
    "        rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n",
    "        if es_type==\"mcmc\":\n",
    "            tr2 = pm.sample(2000,tune=2000,return_inferencedata=True)\n",
    "\n",
    "        if es_type==\"vi\":\n",
    "            approx2 = pm.fit(n=70000, method=\"advi\",obj_n_mc=2,callbacks=[\n",
    "                                    pm.callbacks.CheckParametersConvergence(\n",
    "                                                every=100, diff='relative',\n",
    "                                                tolerance=1e-2)])\n",
    "            tr2 = az.from_pymc3(approx2.sample(draws=1000),model=m2)\n",
    "                        \n",
    "    return tr2,m2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model3(es_type,rt_observed_gv):\n",
    "    with pm.Model() as m3:\n",
    "        tau1 = pm.Normal(\"tau1\",0,1,shape=(N,1))\n",
    "        slope = pm.Normal(\"slope\",0,2)\n",
    "        tau_sig = pm.InverseGamma(\"tau_sig_t\",1,1,shape=(T-1))\n",
    "        \n",
    "        lambda_mean_prior = pm.Normal(\"lambda_i_mean_prior\",3,5)\n",
    "        lambda_sig_prior = pm.InverseGamma(\"lambda_sig_prior\",1,1) \n",
    "        tau_pre = pm.Normal(\"tau_t2_T\",tt.arange(1,T)*slope+0,tau_sig,shape=(N,(T-1)))\n",
    "        tau_pre = tt.concatenate([tau1,tau_pre],axis=1)\n",
    "        lambda_i_pre = pm.Normal(\"lambda\",lambda_mean_prior,lambda_sig_prior,shape=(T,I))\n",
    "        # phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,I))\n",
    "        epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "        \n",
    "        rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "                                # *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                for v in range(5)],axis=1)\n",
    "        rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "            \n",
    "        \n",
    "        rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n",
    "        \n",
    "        if es_type==\"mcmc\":\n",
    "            tr3 = pm.sample(2000,tune=2000,return_inferencedata=True)\n",
    "\n",
    "        if es_type==\"vi\":\n",
    "            approx3 = pm.fit(n=70000, method=\"advi\",obj_n_mc=2,callbacks=[\n",
    "                                    pm.callbacks.CheckParametersConvergence(\n",
    "                                                every=100, diff='absolute',\n",
    "                                                tolerance=1e-2)],\n",
    "                             obj_optimizer=pm.adam(learning_rate=0.01))\n",
    "            tr3 = az.from_pymc3(approx3.sample(draws=1000),model=m3)\n",
    "                        \n",
    "    return tr3,m3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model4(es_type,rt_observed_gv):\n",
    "    with pm.Model() as m4:\n",
    "\n",
    "        tau1 = pm.Normal(\"tau1\",0,1,shape=(N,1))\n",
    "        taut_mu = pm.Normal(\"lambda_mu\",0,10,shape=T-1)\n",
    "        taut_sig = pm.Exponential(\"lambda_sig\",1,shape=T-1)\n",
    "   \n",
    "\n",
    "        tau_pre = pm.Deterministic(\"tau\",tt.concatenate([tau1,pm.Normal(\"taut\",taut_mu,taut_sig,shape=(N,T-1))],axis=1))\n",
    "        lambda_mean_prior = pm.Normal(\"lambda_i_mean_prior\",3,5)\n",
    "        lambda_sig_prior = pm.InverseGamma(\"lambda_sig_prior\",1,1)\n",
    "        lambda_i_pre = pm.Normal(\"lambda\",lambda_mean_prior,lambda_sig_prior,shape=(T,I))\n",
    "        \n",
    "        epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,I))\n",
    "        \n",
    "        rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "                                \n",
    "                                for v in range(5)],axis=1)\n",
    "        rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "            \n",
    "        \n",
    "        rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n",
    "        if es_type==\"mcmc\":\n",
    "            tr4 = pm.sample(2000,tune=8000,return_inferencedata=True)\n",
    "\n",
    "        if es_type==\"vi\":\n",
    "            approx4 = pm.fit(n=70000, method=\"advi\",obj_n_mc=2,callbacks=[\n",
    "                                    pm.callbacks.CheckParametersConvergence(\n",
    "                                                every=100, diff='absolute',\n",
    "                                                tolerance=1e-2)],\n",
    "                             obj_optimizer=pm.adam(learning_rate=0.01))\n",
    "            tr4 = az.from_pymc3(approx4.sample(draws=1000),model=m4)\n",
    "                        \n",
    "    return tr4,m4\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def model5(es_type,rt_observed_gv):\n",
    "    def tri(T,mat,values):\n",
    "    \n",
    "        i=0\n",
    "        for tt_ in range(T):\n",
    "            for ttt in range(T):\n",
    "                if tt_>ttt:\n",
    "                    mat = tt.set_subtensor(mat[tt_,ttt],values[i])\n",
    "                    i+=1\n",
    "        return mat\n",
    "\n",
    "    with pm.Model() as m1:\n",
    "        s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "        s_pre = pm.Deterministic(\"s_pre\",tt.concatenate(([1],s_pre))) \n",
    "        s_mat = tt.eye(T)*s_pre\n",
    "\n",
    "        L = pm.Normal(\"L\",0,5,shape=(sum(np.arange(T)))) \n",
    "        L = tri(T,s_mat,L)\n",
    "        # L = chol\n",
    "        cov_pre = pm.Deterministic(\"cov\",(L.dot(L.T)))\n",
    "        \n",
    "   \n",
    "        _ = pm.Normal(\"tau_mean\",0,5,shape=(T-1))\n",
    "        tau_mean_pre = tt.concatenate(([0],_)) \n",
    "        tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,chol=L,shape=(N,T))\n",
    "        lambda_mu = pm.Normal(\"lambda_mu\",3,10)\n",
    "        lambda_sig = pm.HalfCauchy(\"lambda_sig\",2.5)\n",
    "        lambda_delta = pm.Normal(\"lambda_delta\",0,1,shape=(T,I))\n",
    "\n",
    "        lambda_i_pre = pm.Deterministic(\"lambda\",lambda_mu+lambda_sig*lambda_delta)\n",
    "\n",
    "        omega_i_pre = pm.InverseGamma(\"omega\",1,1,shape=(T,I))\n",
    "        epsilon_i_pre = pm.Deterministic(\"epsilon\",1/omega_i_pre**2)\n",
    "\n",
    "        rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "                                # *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                for v in range(5)],axis=1)\n",
    "        rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "            \n",
    "        \n",
    "        rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n",
    "        \n",
    "        if es_type==\"mcmc\":\n",
    "            tr1 = pm.sample(2000,tune=2000,return_inferencedata=True)\n",
    "\n",
    "        if es_type==\"vi\":\n",
    "            approx1 = pm.fit(n=70000, method=\"advi\",obj_n_mc=2,callbacks=[\n",
    "                                    pm.callbacks.CheckParametersConvergence(\n",
    "                                                every=100, diff='absolute',\n",
    "                                                tolerance=1e-2)],\n",
    "                             obj_optimizer=pm.adam(learning_rate=0.01))\n",
    "            tr1 = az.from_pymc3(approx1.sample(draws=1000),model=m1)\n",
    "                        \n",
    "    return tr1,m1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def model6(es_type,rt_observed_gv):\n",
    "    def tri(T,mat,values):\n",
    "    \n",
    "        i=0\n",
    "        for tt_ in range(T):\n",
    "            for ttt in range(T):\n",
    "                if tt_>ttt:\n",
    "                    mat = tt.set_subtensor(mat[tt_,ttt],values[i])\n",
    "                    i+=1\n",
    "        return mat\n",
    "\n",
    "    with pm.Model() as m1:\n",
    "        s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "        s_pre = pm.Deterministic(\"s_pre\",tt.concatenate(([1],s_pre))) \n",
    "        s_mat = tt.eye(T)*s_pre\n",
    "\n",
    "        L = pm.Normal(\"L\",0,5,shape=(sum(np.arange(T)))) \n",
    "        L = tri(T,s_mat,L)\n",
    "        # L = chol\n",
    "        cov_pre = pm.Deterministic(\"cov\",(L.dot(L.T)))\n",
    "        \n",
    "   \n",
    "        _ = pm.Normal(\"tau_mean\",0,5,shape=(T-1))\n",
    "        tau_mean_pre = tt.concatenate(([0],_)) \n",
    "        tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,chol=L,shape=(N,T))\n",
    "\n",
    "\n",
    "        lambda_i_pre = pm.Deterministic(\"lambda\",3,2,shape=(T,N))\n",
    "\n",
    "        omega_i_pre = pm.InverseGamma(\"omega\",1,1,shape=(T,I))\n",
    "        epsilon_i_pre = pm.Deterministic(\"epsilon\",1/omega_i_pre**2)\n",
    "\n",
    "        rt_mean = tt.concatenate([(lambda_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                -tau_pre.T[:,v_index[v]].reshape([T,len(v_index[v]),1]))\n",
    "                                # *phi_i_pre[test_oder[v]-1].reshape([T,1,I])\n",
    "                                for v in range(5)],axis=1)\n",
    "        rt_ep = tt.concatenate([epsilon_i_pre[test_oder[v]-1].reshape([T,1,I])*tt.ones((1,len(v_index[v]),1)) for v in range(5)],axis=1)\n",
    "            \n",
    "        \n",
    "        rt_pre = pm.Lognormal(\"rt\",rt_mean,rt_ep,observed=rt_observed_gv)\n",
    "        \n",
    "        if es_type==\"mcmc\":\n",
    "            tr1 = pm.sample(2000,tune=2000,return_inferencedata=True)\n",
    "\n",
    "        if es_type==\"vi\":\n",
    "            approx1 = pm.fit(n=70000, method=\"advi\",obj_n_mc=2,callbacks=[\n",
    "                                    pm.callbacks.CheckParametersConvergence(\n",
    "                                                every=100, diff='absolute',\n",
    "                                                tolerance=1e-2)],\n",
    "                             obj_optimizer=pm.adam(learning_rate=0.01))\n",
    "            tr1 = az.from_pymc3(approx1.sample(draws=1000),model=m1)\n",
    "                        \n",
    "    return tr1,m1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "# tr1,m1 = model1(\"mcmc\",rt_observed_gv)\n",
    "# tr5,m5 = model5(\"mcmc\",rt_observed_gv)\n",
    "# tr6,m6 = model6(\"mcmc\",rt_observed_gv)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# models_dic = {k:v for (k,v) in zip([\"协方差\",\"noncenter\",\"fix_prior\"],[tr1,tr5,tr6])}\n",
    "# comp = az.compare(models_dic)\n",
    "# comp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "\n",
       "<style>\n",
       "    /* Turns off some styling */\n",
       "    progress {\n",
       "        /* gets rid of default border in Firefox and Opera. */\n",
       "        border: none;\n",
       "        /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
       "        background-size: auto;\n",
       "    }\n",
       "    .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
       "        background: #F44336;\n",
       "    }\n",
       "</style>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      <progress value='70000' class='' max='70000' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      100.00% [70000/70000 08:30<00:00 Average Loss = 79,149]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Finished [100%]: Average Loss = 79,149\n",
      "Auto-assigning NUTS sampler...\n",
      "Initializing NUTS using jitter+adapt_diag...\n",
      "Multiprocess sampling (4 chains in 4 jobs)\n",
      "NUTS: [epsilon, lambda, lambda_sig_prior, lambda_i_mean_prior, tau_sig, tau_sig_pre, plus, slope, tau1]\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "<style>\n",
       "    /* Turns off some styling */\n",
       "    progress {\n",
       "        /* gets rid of default border in Firefox and Opera. */\n",
       "        border: none;\n",
       "        /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
       "        background-size: auto;\n",
       "    }\n",
       "    .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
       "        background: #F44336;\n",
       "    }\n",
       "</style>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      <progress value='16000' class='' max='16000' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      100.00% [16000/16000 03:42<00:00 Sampling 4 chains, 3 divergences]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Sampling 4 chains for 2_000 tune and 2_000 draw iterations (8_000 + 8_000 draws total) took 223 seconds.\n",
      "There were 3 divergences after tuning. Increase `target_accept` or reparameterize.\n",
      "The estimated number of effective samples is smaller than 200 for some parameters.\n",
      "Auto-assigning NUTS sampler...\n",
      "Initializing NUTS using jitter+adapt_diag...\n",
      "Multiprocess sampling (4 chains in 4 jobs)\n",
      "NUTS: [epsilon, lambda, tau_t2_T, lambda_sig_prior, lambda_i_mean_prior, tau_sig_t, slope, tau1]\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "<style>\n",
       "    /* Turns off some styling */\n",
       "    progress {\n",
       "        /* gets rid of default border in Firefox and Opera. */\n",
       "        border: none;\n",
       "        /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
       "        background-size: auto;\n",
       "    }\n",
       "    .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
       "        background: #F44336;\n",
       "    }\n",
       "</style>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      <progress value='16000' class='' max='16000' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      100.00% [16000/16000 02:26<00:00 Sampling 4 chains, 0 divergences]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Sampling 4 chains for 2_000 tune and 2_000 draw iterations (8_000 + 8_000 draws total) took 147 seconds.\n",
      "The number of effective samples is smaller than 10% for some parameters.\n",
      "Auto-assigning NUTS sampler...\n",
      "Initializing NUTS using jitter+adapt_diag...\n",
      "Multiprocess sampling (4 chains in 4 jobs)\n",
      "NUTS: [epsilon, lambda, lambda_sig_prior, lambda_i_mean_prior, taut, lambda_sig, lambda_mu, tau1]\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "<style>\n",
       "    /* Turns off some styling */\n",
       "    progress {\n",
       "        /* gets rid of default border in Firefox and Opera. */\n",
       "        border: none;\n",
       "        /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
       "        background-size: auto;\n",
       "    }\n",
       "    .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
       "        background: #F44336;\n",
       "    }\n",
       "</style>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      <progress value='40000' class='' max='40000' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      100.00% [40000/40000 04:25<00:00 Sampling 4 chains, 0 divergences]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Sampling 4 chains for 8_000 tune and 2_000 draw iterations (32_000 + 8_000 draws total) took 266 seconds.\n",
      "The rhat statistic is larger than 1.05 for some parameters. This indicates slight problems during sampling.\n",
      "The estimated number of effective samples is smaller than 200 for some parameters.\n"
     ]
    }
   ],
   "source": [
    "tr1,m1 = model1(\"vi\",rt_observed_gv,steps=70000)\n",
    "tr2,m2 = model2(\"mcmc\",rt_observed_gv)\n",
    "tr3,m3 = model3(\"mcmc\",rt_observed_gv)\n",
    "tr4,m4 = model4(\"mcmc\",rt_observed_gv)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "143313"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import gc\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "models_dic = {k:v for (k,v) in zip([\"协方差\",\"马尔可夫\",\"潜在增长\",\"单独估计\"],[tr1,tr2,tr3,tr4])}\n",
    "comp = az.compare(models_dic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>rank</th>\n",
       "      <th>loo</th>\n",
       "      <th>p_loo</th>\n",
       "      <th>d_loo</th>\n",
       "      <th>weight</th>\n",
       "      <th>se</th>\n",
       "      <th>dse</th>\n",
       "      <th>warning</th>\n",
       "      <th>loo_scale</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>马尔可夫</th>\n",
       "      <td>0</td>\n",
       "      <td>-78400.384654</td>\n",
       "      <td>441.507747</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>8.516063e-01</td>\n",
       "      <td>143.863151</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>False</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>协方差</th>\n",
       "      <td>1</td>\n",
       "      <td>-78467.204328</td>\n",
       "      <td>803.727432</td>\n",
       "      <td>66.819674</td>\n",
       "      <td>1.483937e-01</td>\n",
       "      <td>143.745523</td>\n",
       "      <td>13.828758</td>\n",
       "      <td>False</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>潜在增长</th>\n",
       "      <td>2</td>\n",
       "      <td>-78813.544274</td>\n",
       "      <td>994.933286</td>\n",
       "      <td>413.159620</td>\n",
       "      <td>1.561088e-11</td>\n",
       "      <td>143.336403</td>\n",
       "      <td>26.360064</td>\n",
       "      <td>False</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>单独估计</th>\n",
       "      <td>3</td>\n",
       "      <td>-78814.957675</td>\n",
       "      <td>978.808797</td>\n",
       "      <td>414.573021</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>143.348933</td>\n",
       "      <td>26.480676</td>\n",
       "      <td>False</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      rank           loo       p_loo       d_loo        weight          se  \\\n",
       "马尔可夫     0 -78400.384654  441.507747    0.000000  8.516063e-01  143.863151   \n",
       "协方差      1 -78467.204328  803.727432   66.819674  1.483937e-01  143.745523   \n",
       "潜在增长     2 -78813.544274  994.933286  413.159620  1.561088e-11  143.336403   \n",
       "单独估计     3 -78814.957675  978.808797  414.573021  0.000000e+00  143.348933   \n",
       "\n",
       "            dse  warning loo_scale  \n",
       "马尔可夫   0.000000    False       log  \n",
       "协方差   13.828758    False       log  \n",
       "潜在增长  26.360064    False       log  \n",
       "单独估计  26.480676    False       log  "
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "comp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "logL\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>-log_L</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>协方差</th>\n",
       "      <td>78041.824012</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>马尔可夫</th>\n",
       "      <td>78171.956599</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>潜在增长</th>\n",
       "      <td>78281.312247</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>单独估计</th>\n",
       "      <td>78291.446317</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "            -log_L\n",
       "协方差   78041.824012\n",
       "马尔可夫  78171.956599\n",
       "潜在增长  78281.312247\n",
       "单独估计  78291.446317"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(\"logL\")\n",
    "# print(f\"协方差:\\t\",tr1.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values)\n",
    "# print(f\"马尔可夫:\\t\",tr2.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values)\n",
    "# print(f\"潜在增长:\\t\",tr3.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values)\n",
    "pd.DataFrame([tr1.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values*-1,\n",
    "              tr2.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values*-1,\n",
    "              tr3.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values*-1,\n",
    "              tr4.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum().values*-1],\n",
    "             index=[\"协方差\",\"马尔可夫\",\"潜在增长\",\"单独估计\"],columns=[\"-log_L\"]).sort_values(by=\"-log_L\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n",
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n",
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n",
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>rank</th>\n",
       "      <th>waic</th>\n",
       "      <th>p_waic</th>\n",
       "      <th>d_waic</th>\n",
       "      <th>weight</th>\n",
       "      <th>se</th>\n",
       "      <th>dse</th>\n",
       "      <th>warning</th>\n",
       "      <th>waic_scale</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>马尔可夫</th>\n",
       "      <td>0</td>\n",
       "      <td>-78399.574315</td>\n",
       "      <td>440.697408</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>8.383816e-01</td>\n",
       "      <td>143.850526</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>协方差</th>\n",
       "      <td>1</td>\n",
       "      <td>-78463.760563</td>\n",
       "      <td>800.283667</td>\n",
       "      <td>64.186248</td>\n",
       "      <td>1.616184e-01</td>\n",
       "      <td>143.700457</td>\n",
       "      <td>13.819782</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>潜在增长</th>\n",
       "      <td>2</td>\n",
       "      <td>-78807.150032</td>\n",
       "      <td>988.539044</td>\n",
       "      <td>407.575716</td>\n",
       "      <td>2.205423e-12</td>\n",
       "      <td>143.263416</td>\n",
       "      <td>26.345540</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>单独估计</th>\n",
       "      <td>3</td>\n",
       "      <td>-78808.466742</td>\n",
       "      <td>972.317863</td>\n",
       "      <td>408.892426</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>143.272510</td>\n",
       "      <td>26.468151</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      rank          waic      p_waic      d_waic        weight          se  \\\n",
       "马尔可夫     0 -78399.574315  440.697408    0.000000  8.383816e-01  143.850526   \n",
       "协方差      1 -78463.760563  800.283667   64.186248  1.616184e-01  143.700457   \n",
       "潜在增长     2 -78807.150032  988.539044  407.575716  2.205423e-12  143.263416   \n",
       "单独估计     3 -78808.466742  972.317863  408.892426  0.000000e+00  143.272510   \n",
       "\n",
       "            dse  warning waic_scale  \n",
       "马尔可夫   0.000000     True        log  \n",
       "协方差   13.819782     True        log  \n",
       "潜在增长  26.345540     True        log  \n",
       "单独估计  26.468151     True        log  "
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "models_dic = {k:v for (k,v) in zip([\"协方差\",\"马尔可夫\",\"潜在增长\",\"单独估计\"],[tr1,tr2,tr3,tr4])}\n",
    "comp = az.compare(models_dic,ic=\"waic\")\n",
    "comp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>mean</th>\n",
       "      <th>sd</th>\n",
       "      <th>hdi_3%</th>\n",
       "      <th>hdi_97%</th>\n",
       "      <th>mcse_mean</th>\n",
       "      <th>mcse_sd</th>\n",
       "      <th>ess_bulk</th>\n",
       "      <th>ess_tail</th>\n",
       "      <th>r_hat</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>slope[0]</th>\n",
       "      <td>0.609</td>\n",
       "      <td>0.041</td>\n",
       "      <td>0.534</td>\n",
       "      <td>0.687</td>\n",
       "      <td>0.001</td>\n",
       "      <td>0.000</td>\n",
       "      <td>4418.0</td>\n",
       "      <td>5217.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>slope[1]</th>\n",
       "      <td>1.100</td>\n",
       "      <td>0.090</td>\n",
       "      <td>0.944</td>\n",
       "      <td>1.284</td>\n",
       "      <td>0.001</td>\n",
       "      <td>0.001</td>\n",
       "      <td>4862.0</td>\n",
       "      <td>5228.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>slope[2]</th>\n",
       "      <td>0.925</td>\n",
       "      <td>0.076</td>\n",
       "      <td>0.776</td>\n",
       "      <td>1.066</td>\n",
       "      <td>0.001</td>\n",
       "      <td>0.001</td>\n",
       "      <td>4171.0</td>\n",
       "      <td>4456.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>slope[3]</th>\n",
       "      <td>0.960</td>\n",
       "      <td>0.083</td>\n",
       "      <td>0.801</td>\n",
       "      <td>1.114</td>\n",
       "      <td>0.001</td>\n",
       "      <td>0.001</td>\n",
       "      <td>4383.0</td>\n",
       "      <td>4437.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "           mean     sd  hdi_3%  hdi_97%  mcse_mean  mcse_sd  ess_bulk  \\\n",
       "slope[0]  0.609  0.041   0.534    0.687      0.001    0.000    4418.0   \n",
       "slope[1]  1.100  0.090   0.944    1.284      0.001    0.001    4862.0   \n",
       "slope[2]  0.925  0.076   0.776    1.066      0.001    0.001    4171.0   \n",
       "slope[3]  0.960  0.083   0.801    1.114      0.001    0.001    4383.0   \n",
       "\n",
       "          ess_tail  r_hat  \n",
       "slope[0]    5217.0    1.0  \n",
       "slope[1]    5228.0    1.0  \n",
       "slope[2]    4456.0    1.0  \n",
       "slope[3]    4437.0    1.0  "
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "az.summary(tr2,\"slope\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n",
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n",
      "/home/anaconda/anaconda3/envs/py_mc/lib/python3.7/site-packages/arviz/stats/stats.py:1637: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail. \n",
      "See http://arxiv.org/abs/1507.04544 for details\n",
      "  \"For one or more samples the posterior variance of the log predictive \"\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>rank</th>\n",
       "      <th>waic</th>\n",
       "      <th>p_waic</th>\n",
       "      <th>d_waic</th>\n",
       "      <th>weight</th>\n",
       "      <th>se</th>\n",
       "      <th>dse</th>\n",
       "      <th>warning</th>\n",
       "      <th>waic_scale</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>马尔可夫</th>\n",
       "      <td>0</td>\n",
       "      <td>-78399.574315</td>\n",
       "      <td>440.697408</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>8.383716e-01</td>\n",
       "      <td>143.850526</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>协方差</th>\n",
       "      <td>1</td>\n",
       "      <td>-78463.760563</td>\n",
       "      <td>800.283667</td>\n",
       "      <td>64.186248</td>\n",
       "      <td>1.616284e-01</td>\n",
       "      <td>143.700457</td>\n",
       "      <td>13.819782</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>潜在增长</th>\n",
       "      <td>2</td>\n",
       "      <td>-78807.150032</td>\n",
       "      <td>988.539044</td>\n",
       "      <td>407.575716</td>\n",
       "      <td>4.974132e-12</td>\n",
       "      <td>143.263416</td>\n",
       "      <td>26.345540</td>\n",
       "      <td>True</td>\n",
       "      <td>log</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      rank          waic      p_waic      d_waic        weight          se  \\\n",
       "马尔可夫     0 -78399.574315  440.697408    0.000000  8.383716e-01  143.850526   \n",
       "协方差      1 -78463.760563  800.283667   64.186248  1.616284e-01  143.700457   \n",
       "潜在增长     2 -78807.150032  988.539044  407.575716  4.974132e-12  143.263416   \n",
       "\n",
       "            dse  warning waic_scale  \n",
       "马尔可夫   0.000000     True        log  \n",
       "协方差   13.819782     True        log  \n",
       "潜在增长  26.345540     True        log  "
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "models_dic = {k:v for (k,v) in zip([\"协方差\",\"马尔可夫\",\"潜在增长\"],[tr1,tr2,tr3])}\n",
    "comp = az.compare(models_dic,ic=\"waic\")\n",
    "comp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'asd' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_4121/2244368664.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0mrt_observed_gv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0masd\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m: name 'asd' is not defined"
     ]
    }
   ],
   "source": [
    "rt_observed_gv.shape\n",
    "asd"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "$$\\textbf{\\tau} =  $$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.cov((tr1.posterior[\"tau\"].mean(axis=0).mean(axis=0)).T)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.array([[0.03932921, 0.03514879, 0.03941905, 0.03593094, 0.0340008 ],\n",
    "            [0.03514879, 0.04070189, 0.04140433, 0.03777904, 0.03566832],\n",
    "            [0.03941905, 0.04140433, 0.05161177, 0.04212687, 0.04068283],\n",
    "            [0.03593094, 0.03777904, 0.04212687, 0.04240615, 0.03676167],\n",
    "            [0.0340008 , 0.03566832, 0.04068283, 0.03676167, 0.03913315]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy import stats\n",
    "pd.DataFrame((tr1.posterior[\"tau\"].mean(axis=0).mean(axis=0)).values).corr()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.cov((tr4.posterior[\"tau\"].mean(axis=0).mean(axis=0)).T)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pd.DataFrame((tr3.posterior[\"tau\"].mean(axis=0).mean(axis=0)).values).corr()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "u1[\"m2_mean\"] = u2[\"mean\"]\n",
    "u1[\"m3_mean\"] = u3[\"mean\"]\n",
    "u1[[\"mean\",\"m2_mean\",\"m3_mean\"]].corr()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "models_dic = {k:v for (k,v) in zip([\"协方差\",\"马尔科夫t2=t1+t1*b\",\"潜增长t=t1+t*b\",\"单独估计\"],[tr1,tr2,tr3,tr4])}\n",
    "comp = az.compare(models_dic,ic=\"waic\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "comp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "comp = az.compare(models_dic)\n",
    "comp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(tr1.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum())\n",
    "print(tr2.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum())\n",
    "print(tr3.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum())\n",
    "print(tr4.log_likelihood[\"rt\"].mean(axis=0).mean(axis=0).sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "S = np.eye(3)*np.array([1,1.25**0.5,1.5**0.5])\n",
    "cor =np.array([[1,0.8,0.64],\n",
    "            [0.8,1,0.8],\n",
    "            [0.64,0.8,1]])\n",
    "COV = S.dot(cor).dot(S)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "N,I,T=500,30,3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "COV"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tau = np.random.multivariate_normal([0,0.5,1],COV,size=(N))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tau.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.cov(tau.T)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.cov(tau.T).flatten()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "lambda_i = np.random.normal(4,0.5,(T,I))\n",
    "phi_i = np.random.normal(1,0.25,(T,I))\n",
    "omega = np.random.uniform(1,5,(T,I))\n",
    "epsilon = 1/omega**2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tau = tau.T.reshape(T,N,1)\n",
    "rt_mean = phi_i.reshape(T,1,I)*(lambda_i.reshape(T,1,I)-tau)\n",
    "rt = rt_mean+np.random.normal(0,epsilon.reshape(T,1,I),size=(T,N,I))\n",
    "rt = np.exp(rt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cor = np.arange(1,T).sum()\n",
    "sigama = np.random.normal(0,2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def tri(T,mat,values):\n",
    "    \n",
    "    i=0\n",
    "    for tt_ in range(T):\n",
    "        for ttt in range(T):\n",
    "            if tt_>ttt:\n",
    "                mat = tt.set_subtensor(mat[tt_,ttt],values[i])\n",
    "                i+=1\n",
    "    return mat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "with pm.Model() as m:\n",
    "    s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "    s_pre = pm.Deterministic(\"s_pre\",tt.concatenate(([1],s_pre))) \n",
    "    s_mat = tt.eye(T)*s_pre\n",
    "    L = pm.Normal(\"L\",0.5,2,shape=(sum(np.arange(T)))) \n",
    "    L = tri(T,s_mat,L)\n",
    "    \n",
    "    cov_pre = pm.Deterministic(\"COV\",(L.dot(L.T)))\n",
    "    corr = pm.Deterministic(\"corr\",L)\n",
    "    tau_mean_pre = tt.concatenate(([0],pm.Normal(\"tau_mean\",0,2,shape=(T-1)))) \n",
    "    tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,cov=cov_pre,shape=(N,T))\n",
    "    lambda_i_pre = pm.Normal(\"lambda\",4,1,shape=(T,1,I))\n",
    "    phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,1,I))\n",
    "    epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,1,I))\n",
    "    rt_mean = (lambda_i_pre-tau_pre.T.reshape((T,N,1)))*phi_i_pre\n",
    "    Y = pm.Lognormal(\"rt\",rt_mean,epsilon_i_pre,observed=rt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# with pm.Model() as m:\n",
    "#     s_pre = pm.InverseGamma(\"s\",1,1,shape=(T-1))\n",
    "#     s_pre = tt.concatenate(([1],s_pre)) \n",
    "#     s_mat = tt.eye(T)*s_pre\n",
    "#     L_tri = pm.Normal(\"L_tri\",0,2,shape=(sum(np.arange(T)))) \n",
    "#     L = tri(T,s_mat,L_tri)\n",
    "#     cov_pre = pm.Deterministic(\"COV\",L.dot(L.T))\n",
    "#     tau_mean_pre = tt.concatenate(([0],pm.Normal(\"tau_mean\",0,2,shape=(T-1)))) \n",
    "#     tau_pre = pm.MvNormal(\"tau\",tau_mean_pre,cov=cov_pre,shape=(N,T))\n",
    "#     lambda_i_pre = pm.Normal(\"lambda\",4,1,shape=(T,1,I))\n",
    "#     phi_i_pre = pm.Normal(\"phi\",1,1,shape=(T,1,I))\n",
    "#     epsilon_i_pre = pm.InverseGamma(\"epsilon\",1,1,shape=(T,1,I))\n",
    "#     rt_mean = (lambda_i_pre-tau_pre.T.reshape((T,N,1)))*phi_i_pre\n",
    "#     Y = pm.Lognormal(\"rt\",rt_mean,epsilon_i_pre,observed=rt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "L.tag.test_value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cov_pre.tag.test_value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with m:\n",
    "    \n",
    "    va = pm.ADVI(Minibatches=256)\n",
    "    approx = pm.fit(n=70000, method=va,obj_n_mc=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr = approx.sample(draws=2000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import arviz as az\n",
    "from scipy.stats import stats\n",
    "az.summary(tr,\"COV\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "print(stats.pearsonr(az.summary(tr,\"tau\")[\"mean\"].values.flatten(),tau.T.flatten()))\n",
    "print(stats.pearsonr(az.summary(tr,\"lambda\")[\"mean\"].values.flatten(),lambda_i.flatten()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with m:\n",
    "    trace = pm.sample(2000,tune=2000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(stats.pearsonr(az.summary(trace,\"tau\")[\"mean\"].values.flatten(),tau.T.flatten()))\n",
    "print(stats.pearsonr(az.summary(trace,\"lambda\")[\"mean\"].values.flatten(),lambda_i.flatten()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "az.summary(trace,\"COV\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with pm.Model() as m2:\n",
    "    tau_mean_ = pm.Normal(\"tau_mean\",0,2,shape=(T))\n",
    "    chol, corr, stds = pm.LKJCholeskyCov(\n",
    "        \"chol\", n=T, eta=2.0, sd_dist=pm.Exponential.dist(1.0), compute_corr=True\n",
    "    )\n",
    "    pm.Deterministic(\"corr\",corr)\n",
    "    cov_pre = pm.Deterministic(\"cov\", chol.dot(chol.T))\n",
    "    tau_ = pm.MvNormal(\"tau\",tau_mean_,chol=chol,shape=(N,T))\n",
    "    lambda_i_ = pm.Normal(\"lambda\",4,1,shape=(T,1,I))\n",
    "    phi_i_ = pm.Normal(\"phi\",1,1,shape=(T,1,I))\n",
    "    epsilon_i_ = pm.InverseGamma(\"epsilon\",1,1,shape=(T,1,I))\n",
    "    rt_mean = (lambda_i_-tau_.T.reshape((T,N,1)))*phi_i_\n",
    "    Y = pm.Lognormal(\"rt\",rt_mean,epsilon_i_,observed=rt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with m2:\n",
    "    trace2 = pm.sample(2000,tune=2000)"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "d099968071987d153bdd47ebd8318547dae2f0a1c0c5e02dbbb956a381b0529a"
  },
  "kernelspec": {
   "display_name": "Python 3.9.5 ('py_mc')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.12"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
