{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DRAMMIMO\n",
    "Delayed Rejection Adaptive Metropolis Multi Input Multi Output\n",
    "\n",
    "By: Wei Gao (wg14@my.fsu.edu)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Initialization.\n",
    "Import necessary modules."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import scipy.linalg as la\n",
    "import scipy.stats as st\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.patches as mpatch\n",
    "import matplotlib.lines as mline\n",
    "import os"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define the function for getting estimation chains.\n",
    "Estimation chains of model parameters are generated based on the modified DRAM algorithm."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getDRAMMIMOChains(data,model,modelParams,DRAMParams):\n",
    "    ## Initialize parameters.\n",
    "\n",
    "    # Number of data sets.\n",
    "    numDataSets = np.array([len(data['xdata']),len(data['ydata']), \\\n",
    "                            len(model['fun']),len(model['errFun']),len(model['ssFun']), \\\n",
    "                            len(modelParams['extra'])])\n",
    "    if np.any(numDataSets-numDataSets[0]):\n",
    "        raise ValueError('Unequal numbers of sets in either data or model.')\n",
    "    N = numDataSets[0]\n",
    "    # Number of data within each set.\n",
    "    numDataPoints = np.tile(0,(N,2))\n",
    "    for i in np.arange(0,N):\n",
    "        numDataPoints[i,:] = np.array([data['xdata'][i].shape[0],data['ydata'][i].shape[0]])\n",
    "    if np.any(numDataPoints-numDataPoints[0,0]):\n",
    "        raise ValueError('Unequal numbers of data points.')\n",
    "    n = numDataPoints[0,0]\n",
    "    # Number of model parameters for estimation.\n",
    "    p = len(modelParams['values'])\n",
    "    # Number of estimation iterations already done.\n",
    "    Mo = DRAMParams['numDRAMIterationsDone']\n",
    "    if Mo!=1 and Mo!=DRAMParams['previousResults']['chain_q'].shape[0]:\n",
    "        raise ValueError('The number of given previous results does not match the number of iterations already done.')\n",
    "    # Number of estimation iterations to be done in total.\n",
    "    M = DRAMParams['numDRAMIterations']\n",
    "\n",
    "    # Best model parameter estimation.\n",
    "    if Mo==1:\n",
    "        q = np.array([modelParams['values']]).T\n",
    "    else:\n",
    "        q = np.array([DRAMParams['previousResults']['chain_q'][-1,:]]).T\n",
    "    # Old model parameter estimation.\n",
    "    q0 = np.zeros(q.shape)\n",
    "    # 1st stage new model parameter estimation.\n",
    "    q1 = np.zeros(q.shape)\n",
    "    # 2nd stage new model parameter estimation.\n",
    "    q2 = np.zeros(q.shape)\n",
    "\n",
    "    # Model prediction errors caused by q.\n",
    "    err = np.zeros((n,N))\n",
    "    # Calculate errors.\n",
    "    for i in np.arange(N):\n",
    "        err[:,i] = model['errFun'][i](q,data['xdata'][i],data['ydata'][i],modelParams['extra'][i]).T\n",
    "    # Model prediction errors caused by q0.\n",
    "    err0 = np.zeros((n,N))\n",
    "    # Model prediction errors caused by q1.\n",
    "    err1 = np.zeros((n,N))\n",
    "    # Model prediction errors caused by q2.\n",
    "    err2 = np.zeros((n,N))\n",
    "\n",
    "    # Initialize the covariance matrix of parameter estimations and its inverse.\n",
    "    if Mo==1:\n",
    "        cov_q = np.diag(((q!=0)*(0.05*q)**2+(q==0)*1.0)[:,0])\n",
    "    else:\n",
    "        cov_q = DRAMParams['previousResults']['last_cov_q']\n",
    "    if p==1:\n",
    "        cov_q_inv = 1.0/cov_q\n",
    "    else:\n",
    "        cov_q_inv = la.inv(cov_q)\n",
    "\n",
    "    # Initialize the covariance matrix of parameter estimation errors and its inverse.\n",
    "    if Mo==1:\n",
    "        cov_err = np.diag((np.tile(1E-4,(1,N)))[0,:])\n",
    "    else:\n",
    "        cov_err = DRAMParams['previousResults']['chain_cov_err'][-1,:,:]\n",
    "    if N==1:\n",
    "        cov_err_inv = 1.0/cov_err\n",
    "    else:\n",
    "        cov_err_inv = la.inv(cov_err)\n",
    "\n",
    "    # Parameters for inverse-wishart distribution.\n",
    "    psi_s = np.diag((np.tile(1E-4,(1,N)))[0,:])\n",
    "    nu_s = 1\n",
    "\n",
    "    # Parameters for Adaptive Metropolis.\n",
    "    # Adaptive interval.\n",
    "    ko = 100\n",
    "    # Adaptive scale.\n",
    "    sp = 2.38/np.sqrt(p)\n",
    "    # Up-to-date mean parameter estimations.\n",
    "    if Mo==1:\n",
    "        qBar = q.copy()\n",
    "    else:\n",
    "        qBar = np.array([np.mean(DRAMParams['previousResults']['chain_q'],0)]).T\n",
    "    # Up-to-data covariance matrix of parameter estimations.\n",
    "    qCov = cov_q.copy()\n",
    "\n",
    "    # Parameters for Delayed Rejection.\n",
    "    # Maximum random walk step size.\n",
    "    randomWalk = la.cholesky(cov_q)\n",
    "    # 1st stage random walk maximum step size.\n",
    "    R1 = randomWalk/1.0\n",
    "    # 2nd stage random walk maximum step size.\n",
    "    R2 = randomWalk/5.0\n",
    "\n",
    "    ## Initialize the chain.\n",
    "\n",
    "    # The chain of model parameter estimations for posterior densities.\n",
    "    chain_q = np.zeros((M,p))\n",
    "    if Mo==1:\n",
    "        chain_q[0,:] = q.T.copy()\n",
    "    else:\n",
    "        chain_q[:Mo,:] = DRAMParams['previousResults']['chain_q']\n",
    "\n",
    "    # The chain of model parameter estimation covariances is not of interest for now.\n",
    "    # Record the latest value instead.\n",
    "    last_cov_q = cov_q.copy()\n",
    "\n",
    "    # The chain of model prediction errors is not of interest for now.\n",
    "    \n",
    "    # The chain of model prediction error covariances for uncertainty propagation.\n",
    "    chain_cov_err = np.zeros((M,N,N))\n",
    "    if Mo==1:\n",
    "        chain_cov_err[0,:,:] = cov_err\n",
    "    else:\n",
    "        chain_cov_err[:Mo,:,:] = DRAMParams['previousResults']['chain_cov_err']\n",
    "\n",
    "    # Generate the chain.\n",
    "    for k in np.arange(Mo,M,1):\n",
    "        # Display current model parameter estimations every xth iteration.\n",
    "        # Modify the number in mod() after k as needed.\n",
    "        # Comment this out if not necessary, i.e. to avoid time delay.\n",
    "        if np.mod(k,200)==0:\n",
    "            print(np.vstack((k,q)).T)\n",
    "        # Save current estimation chain every yth iteration.\n",
    "        # Modify the number in mod() after k+1 as needed.\n",
    "        # Comment this out if not necessary, i.e. to avoid time delay.\n",
    "        if np.mod(k,1000)==0:\n",
    "            np.savez_compressed(os.getcwd()+'/chains'+str(k), \\\n",
    "                                chain_q=chain_q, \\\n",
    "                                last_cov_q=last_cov_q, \\\n",
    "                                chain_cov_err=chain_cov_err)\n",
    "\n",
    "        ######## Start of Delayed Rejection ########\n",
    "\n",
    "        # Record the best guess from last step as the old guess.\n",
    "        q0 = q.copy()\n",
    "        err0 = err.copy()\n",
    "\n",
    "        # 1st stage Random Walk.\n",
    "        q1 = q0+np.array([np.dot(R1.T,np.random.randn(len(q)))]).T # Laine\n",
    "        \n",
    "        if any(q1.T[0,:]<modelParams['lowerLimits']) or any(q1.T[0,:]>modelParams['upperLimits']):\n",
    "            # If the new guess is out of the bounds ...\n",
    "            err1 = np.tile(float('inf'),(n,N))\n",
    "            SS0 = np.trace(np.dot(np.dot(err0.T,err0),cov_err_inv))\n",
    "            SS1 = float('inf')\n",
    "            pi10 = 0.0\n",
    "            alpha10 = min(1,pi10)\n",
    "        else:\n",
    "            # If the new guess is within the bounds ...\n",
    "            for i in np.arange(N):\n",
    "                err1[:,i] = model['errFun'][i](q1,data['xdata'][i],data['ydata'][i],modelParams['extra'][i]).T\n",
    "\n",
    "            if np.any(np.isnan(err1)):\n",
    "                # If the new guess is causing the model response to be NaN ...\n",
    "                err1 = np.tile(float('inf'),(n,N))\n",
    "                SS0 = np.trace(np.dot(np.dot(err0.T,err0),cov_err_inv))\n",
    "                SS1 = float('inf')\n",
    "                pi10 = 0;\n",
    "                alpha10 = min(1,pi10)\n",
    "            else:\n",
    "                # If the new guess is okay ...\n",
    "                SS0 = np.trace(np.dot(np.dot(err0.T,err0),cov_err_inv))\n",
    "                SS1 = np.trace(np.dot(np.dot(err1.T,err1),cov_err_inv))\n",
    "                # pi(q1|q0)\n",
    "                pi10 = np.exp(-0.5*(SS1-SS0))\n",
    "                # alpha(q1|q0)\n",
    "                alpha10 = min(1,pi10)\n",
    "\n",
    "        # Decide whether to accept the 1st stage new guess.\n",
    "        if alpha10>np.random.rand():\n",
    "            # Accept the 1st stage new guess.\n",
    "\n",
    "            # Record the 1st stage new guess as the best guess.\n",
    "            q = q1.copy()\n",
    "            err = err1.copy()\n",
    "\n",
    "        else:\n",
    "            # Reject the 1st stage new guess.\n",
    "\n",
    "            # 2nd stage Random Walk.\n",
    "            # q2 = q0+np.array([np.dot(R2,np.random.randn(len(q)))]).T # Smith\n",
    "            q2 = q0+np.array([np.dot(R2.T,np.random.randn(len(q)))]).T # Laine\n",
    "\n",
    "            if any(q2.T[0,:]<modelParams['lowerLimits']) or any(q2.T[0,:]>modelParams['upperLimits']):\n",
    "                # If the new guess is out of the bounds ...\n",
    "                err2 = np.tile(float('inf'),(n,N))\n",
    "                SS2 = float('inf')\n",
    "                pi20 = 0.0\n",
    "                pi12 = 0.0\n",
    "                alpha20 = 0.0\n",
    "                alpha210 = 0.0\n",
    "            else:\n",
    "                # If the new guess is within the bounds ...\n",
    "                for i in np.arange(N):\n",
    "                    err2[:,i] = model['errFun'][i](q2,data['xdata'][i],data['ydata'][i],modelParams['extra'][i]).T\n",
    "\n",
    "                if np.any(np.isnan(err2)):\n",
    "                    # If the new guess is causing the model response to be NaN ...\n",
    "                    err2 = np.tile(float('inf'),(n,N))\n",
    "                    SS2 = float('inf')\n",
    "                    pi20 = 0.0\n",
    "                    pi12 = 0.0\n",
    "                    alpha12 = 0.0\n",
    "                    alpha210 = 0.0\n",
    "                else:\n",
    "                    # If the new guess is okay ...\n",
    "                    SS2 = np.trace(np.dot(np.dot(err2.T,err2),cov_err_inv))\n",
    "                    # pi(q1|v)/pi(q0|v)\n",
    "                    pi20 = np.exp(-0.5*(SS2-SS0))\n",
    "                    # J(q1|q2)\n",
    "                    J12 = np.exp(-0.5*np.dot(np.dot((q1-q2).T,cov_q_inv),(q1-q2)))[0][0]\n",
    "                    # J(q1|q0)\n",
    "                    J10 = np.exp(-0.5*np.dot(np.dot((q1-q0).T,cov_q_inv),(q1-q0)))[0][0]\n",
    "                    # pi(q1|v)/pi(q0|v)\n",
    "                    pi12 = np.exp(-0.5*(SS1-SS2))\n",
    "                    # alpha(q1|q2)\n",
    "                    alpha12 = min(1,pi12)\n",
    "                    # alpha(q2|q0,q1)       \n",
    "                    if alpha12==1:\n",
    "                        alpha210 = 0\n",
    "                    else:\n",
    "                        alpha210 = min(1,pi20*J12/J10*(1-alpha12)/(1-alpha10))\n",
    "\n",
    "            # Decide whether to accept the 2nd stage new guess.\n",
    "            if alpha210>np.random.rand():\n",
    "                # Accept the 2nd stage new guess.\n",
    "\n",
    "                # Record the 2nd stage new guess as the best guess.\n",
    "                q = q2.copy()\n",
    "                err = err2.copy()\n",
    "\n",
    "        ######## End of Delayed Rejection ########\n",
    "\n",
    "        ######## Start of Adaptive Metropolis ########\n",
    "\n",
    "        # Record the chain.\n",
    "        chain_q[k,:] = q.T.copy()\n",
    "        last_cov_q = cov_q.copy()\n",
    "        chain_cov_err[k,:,:] = cov_err.copy()\n",
    "\n",
    "        # Update cov_err and cov_err_inv.\n",
    "        cov_err = st.invwishart.rvs(nu_s+n,psi_s+np.dot(err.T,err),size=1,random_state=None)\n",
    "        if N==1:\n",
    "            cov_err_inv = 1.0/cov_err\n",
    "        else:\n",
    "            cov_err_inv = la.inv(cov_err)\n",
    "\n",
    "        # Update cov_q and cov_q_inv.\n",
    "        # No update in the 1st round (1 round = ko steps).\n",
    "        if k==ko-1:\n",
    "            # Calculate at the end of the 1st round (ko-th step).\n",
    "            # Mean parameter estimations.\n",
    "            qBar = np.array([np.mean(chain_q[0:ko,:],0)]).T\n",
    "            # Covariance of parameter estimations.\n",
    "            qCov = np.cov(chain_q[0:ko,:].T)\n",
    "        elif k>=ko:\n",
    "            # Keep calculating after the ko-th step.\n",
    "            # Mean parameter estimations.\n",
    "            qBarOld = qBar.copy()\n",
    "            qBar = (k*qBarOld+q)/(k+1)\n",
    "            # Covariance of parameter estimations.\n",
    "            qCovOld = qCov.copy()\n",
    "            qCov = (k-1.0)/k*qCovOld+1.0/k*(k*np.dot(qBarOld,qBarOld.T)-(k+1)*np.dot(qBar,qBar.T)+np.dot(q,q.T))\n",
    "\n",
    "            # Update at the end of every round since the ko-th step.\n",
    "            if np.mod(k+Mo,ko)==0:\n",
    "                cov_q = qCov.copy();\n",
    "                if p==1:\n",
    "                    cov_q_inv = 1.0/cov_q\n",
    "                else:\n",
    "                    cov_q_inv = la.inv(cov_q)\n",
    "                randomWalk = la.cholesky(cov_q)\n",
    "                R1 = randomWalk*sp\n",
    "                R2 = randomWalk*sp/5.0\n",
    "\n",
    "        ######## End of Adaptive Metropolis ########\n",
    "\n",
    "    return chain_q,last_cov_q,chain_cov_err"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define the function for getting posterior densities.\n",
    "Posterior densities of model parameters are obtained based on the estimation chains using guassian kernel."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getDRAMMIMODensities(qChain):\n",
    "    numIterations = qChain.shape[0]\n",
    "    numChains = qChain.shape[1]\n",
    "    numNodes = 100\n",
    "    qMin = np.amin(qChain,0)\n",
    "    qMax = np.amax(qChain,0)\n",
    "    qRange = qMax-qMin\n",
    "    qVals = np.zeros([numNodes,numChains])\n",
    "    qProbs = np.zeros(qVals.shape)\n",
    "\n",
    "    for i in np.arange(numChains):\n",
    "        qVals[:,i] = np.linspace(qMin[i]-0.08*qRange[i],qMax[i]+0.08*qRange[i],numNodes)\n",
    "\n",
    "        chainSorted = np.sort(qChain[:,i])\n",
    "        i1 = int(np.floor((numIterations+1)/4.0))\n",
    "        i3 = int(np.floor((numIterations+1)/4.0*3.0))\n",
    "        f1 = (numIterations+1)/4.0-i1;\n",
    "        f3 = (numIterations+1)/4.0*3.0-i3\n",
    "        q1 = (1-f1)*chainSorted[i1-1]+f1*chainSorted[i1]\n",
    "        q3 = (1-f3)*chainSorted[i3-1]+f3*chainSorted[i3]\n",
    "        iRange = q3-q1\n",
    "\n",
    "        if iRange<=0:\n",
    "            s = 1.06*np.std(qChain[:,i])*np.power(numIterations,-1.0/5.0)\n",
    "        else:\n",
    "            s = 1.06*np.min([np.std(qChain[:,i]),iRange/1.34])*np.power(numIterations,-1.0/5.0)\n",
    "\n",
    "        for j in np.arange(numNodes):\n",
    "            err = (qVals[j,i]-qChain[:,i])/s\n",
    "            qProbs[j,i] = 1.0/numIterations*np.sum(np.exp(-0.5*np.power(err,2))/np.sqrt(2*np.pi))/s\n",
    "\n",
    "    return qVals,qProbs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define the function for getting credible and prediction intervals.\n",
    "Credible and prediction intervals are generated by propagating uncertainty of model parameters through the models."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getDRAMMIMOIntervals(data,model,modelParams,chain_q,chain_cov_err,nSample):\n",
    "    # Initialize parameters. Using 95% intervals.\n",
    "    xData = data['xdata']\n",
    "    lims = [0.025, 0.5, 0.975]\n",
    "    m = chain_q.shape[0]\n",
    "    N = len(xData)\n",
    "    n = xData[0].shape[0]\n",
    "\n",
    "    # Get the indices of points to be pulled out of the estimation chain.\n",
    "    if nSample == m:\n",
    "        iSample = np.arange(0,m,1).astype(int)\n",
    "    else:\n",
    "        iSample = np.floor(np.random.rand(nSample)*m).astype(int)\n",
    "\n",
    "    # Sample the estimation chain for the credible region as ysave and the prediction region as osave.\n",
    "    ySave = np.zeros((N,nSample,n))\n",
    "    oSave = np.zeros((N,nSample,n))\n",
    "    for iiSample in np.arange(nSample):\n",
    "        qi = np.array([chain_q[iSample[iiSample],:]]).T\n",
    "        randError = np.array([np.random.multivariate_normal(np.zeros(N),chain_cov_err[iSample[iiSample],:,:])]).T\n",
    "        for i in np.arange(N):\n",
    "            y = model['fun'][i](qi,xData[i],modelParams['extra'][i])\n",
    "            ySave[i,iiSample,:] = y.T\n",
    "            oSave[i,iiSample,:] = y.T+randError[i,0]\n",
    "\n",
    "    # Interpolate the credible and prediction intervals.\n",
    "    credLims = np.zeros((N,len(lims),n))\n",
    "    predLims = np.zeros((N,len(lims),n))\n",
    "    for i in np.arange(N):\n",
    "        ySaveSorted = np.sort(ySave[i,:,:],axis=0)\n",
    "        oSaveSorted = np.sort(oSave[i,:,:],axis=0)\n",
    "        for j in np.arange(n):\n",
    "            credLims[i,:,j] = np.interp(np.array(lims)*(nSample-1),np.arange(nSample),ySaveSorted[:,j])\n",
    "            predLims[i,:,j] = np.interp(np.array(lims)*(nSample-1),np.arange(nSample),oSaveSorted[:,j])\n",
    "\n",
    "    return credLims,predLims"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define the model functions.\n",
    "The model functions yield model predictions that can be compared to the data."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getModelResponse(theta,xdata,extra):\n",
    "    # 'extra' can be used to pass extra parameter values.\n",
    "    # These parameters are needed by the model, but not being estimated.\n",
    "    if extra[0]==0:\n",
    "        a = theta[0,0]\n",
    "        b = theta[1,0]\n",
    "    else:\n",
    "        a = theta[0,0]\n",
    "        b = theta[1,0]\n",
    "\n",
    "    modelResponse = a*xdata+b\n",
    "\n",
    "    return modelResponse"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define the model error functions.\n",
    "The model error functions yield the discrepancies between the model predictions and the data."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getModelResponseError(theta,xdata,ydata,extra):\n",
    "    # 'extra' can be used to pass extra parameter values.\n",
    "    # These parameters are needed by the model, but not being estimated.\n",
    "    modelResponse = getModelResponse(theta,xdata,extra)\n",
    "    modelResponseError = modelResponse-ydata\n",
    "\n",
    "    return modelResponseError"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Main function.\n",
    "This is the main function if the module is executed by itself instead of being imported to other modules."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if __name__ == '__main__':\n",
    "    ## Below is an example of how to use DRAMMIMO.\n",
    "\n",
    "    # Load the data.\n",
    "    print('Loading data...')\n",
    "    # Fictitious data are generated here for a linear model y = a*x + b.\n",
    "    # Two data sets are available.\n",
    "    inputData1 = np.array([np.linspace(0,1,101)]).T\n",
    "    inputData2 = np.array([np.linspace(0,1,101)]).T\n",
    "    outputData1 = 0.8*inputData1+0.05*np.array([np.random.randn(101)]).T\n",
    "    outputData2 = 1.2*inputData2+0.15*np.array([np.random.randn(101)]).T\n",
    "\n",
    "    # Set up DRAMMIMO.\n",
    "    print('Setting DRAMMIMO...')\n",
    "    # Scenarios with one ro two data sets are differentiated by mode.\n",
    "    # mode = 1: using one data set (Bayesian method).\n",
    "    # mode = 2: using two data sets (Maximum Entropy method).\n",
    "    mode = 2\n",
    "    if mode==1:\n",
    "        data = {'xdata':[inputData1], \\\n",
    "                'ydata':[outputData1]}\n",
    "        model = {'fun':[getModelResponse], \\\n",
    "                 'errFun':[getModelResponseError], \\\n",
    "                 'ssFun':[getModelResponseSS]}\n",
    "        modelParams = {'names':['a','b'],\\\n",
    "                       'values':[1,0], \\\n",
    "                       'lowerLimits':[-float('inf'),-float('inf')], \\\n",
    "                       'upperLimits':[float('inf'),float('inf')], \\\n",
    "                       'extra':[[0,]]}\n",
    "    elif mode==2:\n",
    "        data = {'xdata':[inputData1,inputData2], \\\n",
    "                'ydata':[outputData1,outputData2]}\n",
    "        model = {'fun':[getModelResponse,getModelResponse], \\\n",
    "                 'errFun':[getModelResponseError,getModelResponseError], \\\n",
    "                 'ssFun':[getModelResponseSS,getModelResponseSS]}\n",
    "        modelParams = {'names':['a','b'],\\\n",
    "                       'values':[1,0], \\\n",
    "                       'lowerLimits':[-float('inf'),-float('inf')], \\\n",
    "                       'upperLimits':[float('inf'),float('inf')], \\\n",
    "                       'extra':[[0,],[0,]]}\n",
    "\n",
    "    # Get estimation chains.\n",
    "    # The estimation chains can be obtained ini multiple consecutive runs.\n",
    "    # 1st round.\n",
    "    numDone = 1\n",
    "    numTotal = 5000\n",
    "    DRAMParams = {'numDRAMIterationsDone':numDone, \\\n",
    "                  'numDRAMIterations':numTotal, \\\n",
    "                  'previousResults': {'chain_q':[], \\\n",
    "                                      'last_cov_q':[], \\\n",
    "                                      'chain_cov_err':[]}}\n",
    "    print('Running DRAMMIMO...')\n",
    "    chain_q,last_cov_q,chain_cov_err = getDRAMMIMOChains(data,model,modelParams,DRAMParams)\n",
    "    print('Finished.')\n",
    "    # 2nd round.\n",
    "    numDone = numTotal\n",
    "    numTotal = 10000\n",
    "    DRAMParams = {'numDRAMIterationsDone':numDone, \\\n",
    "                  'numDRAMIterations':numTotal, \\\n",
    "                  'previousResults': {'chain_q':chain_q, \\\n",
    "                                      'last_cov_q':last_cov_q, \\\n",
    "                                      'chain_cov_err':chain_cov_err}}\n",
    "    print('Running DRAMMIMO...')\n",
    "    chain_q,last_cov_q,chain_cov_err = getDRAMMIMOChains(data,model,modelParams,DRAMParams)\n",
    "    print('Finished.')\n",
    "\n",
    "    np.savez_compressed(os.getcwd()+'/chains'+str(numTotal), \\\n",
    "                        chain_q=chain_q, \\\n",
    "                        last_cov_q=last_cov_q, \\\n",
    "                        chain_cov_err=chain_cov_err)\n",
    "\n",
    "    # Get posterior densities.\n",
    "    num = round(chain_q.shape[0]/2.0)\n",
    "    vals,probs = getDRAMMIMODensities(chain_q[num:,:])\n",
    "    \n",
    "    np.savez_compressed(os.getcwd()+'/densities'+str(numTotal), \\\n",
    "                        vals=vals, \\\n",
    "                        probs=probs)\n",
    "\n",
    "    # Get credible and prediction intervals.\n",
    "    nSample = 500\n",
    "    credLims,predLims = getDRAMMIMOIntervals(data,model,modelParams,chain_q,chain_cov_err,nSample)\n",
    "    \n",
    "    np.savez_compressed(os.getcwd()+'/intervals'+str(numTotal), \\\n",
    "                        credLims=credLims, \\\n",
    "                        predLims=predLims)\n",
    "\n",
    "    ## Plot the results.    \n",
    "    print('Plotting results...')\n",
    "\n",
    "    figNum = 0\n",
    "\n",
    "    # Data.\n",
    "    figNum += 1\n",
    "    plt.figure(figNum,figsize=(10,6))\n",
    "    plt.plot(inputData1,outputData1,'bo',markerfacecolor='None',label='Data I')\n",
    "    plt.plot(inputData2,outputData2,'ro',markerfacecolor='None',label='Data II')\n",
    "    plt.xlabel('$x$',fontsize=18)\n",
    "    plt.ylabel('$y$',fontsize=18)\n",
    "    plt.xlim(0,1)\n",
    "    plt.ylim(-0.1,1.3)\n",
    "    plt.xticks(fontsize=18)\n",
    "    plt.yticks(fontsize=18)\n",
    "    plt.legend(loc='upper left',fontsize=18,frameon=False)\n",
    "\n",
    "    # Estimation chains.\n",
    "    figNum += 1\n",
    "    plt.figure(figNum,figsize=(10,6))\n",
    "    plt.subplot(2,1,1)\n",
    "    plt.plot(np.arange(1,chain_q.shape[0]+1,1),chain_q[:,0],'.')\n",
    "    plt.xticks([])\n",
    "    plt.yticks(fontsize=18)\n",
    "    plt.ylabel('$a$',fontsize=18)\n",
    "    plt.xlim(0,chain_q.shape[0])\n",
    "    plt.ylim(np.min(chain_q[:,0]),np.max(chain_q[:,0]))\n",
    "    plt.subplot(2,1,2)\n",
    "    plt.plot(np.arange(1,chain_q.shape[0]+1,1),chain_q[:,1],'.')\n",
    "    plt.xticks([])\n",
    "    plt.yticks(fontsize=18)\n",
    "    plt.xlabel('Iterations',fontsize=18)\n",
    "    plt.ylabel('$b$',fontsize=18)\n",
    "    plt.xlim(0,chain_q.shape[0])\n",
    "    plt.ylim(np.min(chain_q[:,1]),np.max(chain_q[:,1]))\n",
    "\n",
    "    # Posterior densities.\n",
    "    figNum += 1\n",
    "    plt.figure(figNum,figsize=(10,6))\n",
    "    plt.subplot(1,2,1)\n",
    "    plt.plot(vals[:,0],probs[:,0],'k',linewidth=3)\n",
    "    plt.xlabel('$a$',fontsize=18)\n",
    "    plt.xlim(np.min(vals[:,0]),np.max(vals[:,0]))\n",
    "    # plt.xticks(fontsize=18)\n",
    "    plt.yticks([])\n",
    "    plt.ylabel('Posterior Density',fontsize=18)\n",
    "    plt.subplot(1,2,2)\n",
    "    plt.plot(vals[:,1],probs[:,1],'k',linewidth=3)\n",
    "    plt.xlabel('$b$',fontsize=18)\n",
    "    plt.xlim(np.min(vals[:,1]),np.max(vals[:,1]))\n",
    "    # plt.xticks(fontsize=18)\n",
    "    plt.yticks([])\n",
    "\n",
    "    # Credible and prediction intervals for data set I.\n",
    "    figNum += 1\n",
    "    fig, ax = plt.subplots(figsize=(10,6))\n",
    "    limitX = np.vstack((inputData1,np.flip(inputData1,axis=0)))\n",
    "    predLimitY = np.vstack((np.array([predLims[0,0,:]]).T,np.array(np.fliplr([predLims[0,2,:]])).T))\n",
    "    predLimits = mpatch.Polygon(np.hstack((limitX,predLimitY)),facecolor=(1,0.75,0.5))\n",
    "    ax.add_patch(predLimits)\n",
    "    credLimitY = np.vstack((np.array([credLims[0,0,:]]).T,np.array(np.fliplr([credLims[0,2,:]])).T))\n",
    "    credLimits = mpatch.Polygon(np.hstack((limitX,credLimitY)),facecolor=(0.75,1,0.5))\n",
    "    ax.add_patch(credLimits)\n",
    "    plt.plot(inputData1,credLims[0,1,:],'k')\n",
    "    plt.plot(inputData1,outputData1,'bo',markerfacecolor='None')\n",
    "    plt.xlabel('$x$',fontsize=18)\n",
    "    plt.ylabel('$y_1$',fontsize=18)\n",
    "    plt.xlim(0,1)\n",
    "    plt.ylim(-0.7,2)\n",
    "    plt.xticks(fontsize=18)\n",
    "    plt.yticks(fontsize=18)\n",
    "    lgd = [mpatch.Patch(facecolor=(1,0.75,0.5),edgecolor='None',label='95% Pred Interval'), \\\n",
    "           mpatch.Patch(facecolor=(0.75,1,0.5),edgecolor='None',label='95% Cred Interval'), \\\n",
    "           mline.Line2D([0],[0],color='k',label='Model'), \\\n",
    "           mline.Line2D([0],[0],marker='o',color='None',markerfacecolor='None',markeredgecolor='b',label='Data I')]\n",
    "    plt.legend(handles=lgd,loc='upper left',fontsize=18,frameon=False)\n",
    "\n",
    "    # Credible and prediction intervals for data set II.\n",
    "    if mode==2:\n",
    "        figNum += 1\n",
    "        fig, ax = plt.subplots(figsize=(10,6))\n",
    "        limitX = np.vstack((inputData2,np.flip(inputData2,axis=0)))\n",
    "        predLimitY = np.vstack((np.array([predLims[1,0,:]]).T,np.array(np.fliplr([predLims[1,2,:]])).T))\n",
    "        predLimits = mpatch.Polygon(np.hstack((limitX,predLimitY)),facecolor=(1,0.75,0.5))\n",
    "        ax.add_patch(predLimits)\n",
    "        credLimitY = np.vstack((np.array([credLims[1,0,:]]).T,np.array(np.fliplr([credLims[1,2,:]])).T))\n",
    "        credLimits = mpatch.Polygon(np.hstack((limitX,credLimitY)),facecolor=(0.75,1,0.5))\n",
    "        ax.add_patch(credLimits)\n",
    "        plt.plot(inputData2,credLims[1,1,:],'k')\n",
    "        plt.plot(inputData2,outputData2,'ro',markerfacecolor='None')\n",
    "        plt.xlabel('$x$',fontsize=18)\n",
    "        plt.ylabel('$y_2$',fontsize=18)\n",
    "        plt.xlim(0,1)\n",
    "        plt.ylim(-0.7,2)\n",
    "        plt.xticks(fontsize=18)\n",
    "        plt.yticks(fontsize=18)\n",
    "        lgd = [mpatch.Patch(facecolor=(1,0.75,0.5),edgecolor='None',label='95% Pred Interval'), \\\n",
    "               mpatch.Patch(facecolor=(0.75,1,0.5),edgecolor='None',label='95% Cred Interval'), \\\n",
    "               mline.Line2D([0],[0],color='k',label='Model'), \\\n",
    "               mline.Line2D([0],[0],marker='o',color='None',markerfacecolor='None',markeredgecolor='r',label='Data II')]\n",
    "        plt.legend(handles=lgd,loc='upper left',fontsize=18,frameon=False)\n",
    "\n",
    "    # Show plots.\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Results.\n",
    "An example of a simple linear model is presented here to demonstrate how to use the package. The uncertainty quantification results of the example are presented below."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Fictitious data.\n",
    "<img src=\"figures/linearModelData.png\" width=\"800\" />"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Estimation chains.\n",
    "<img src=\"figures/linearModelChains.png\" width=\"800\" />"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Posterior densities.\n",
    "<img src=\"figures/linearModelDensities.png\" width=\"800\" />"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Credible and prediction intervals.\n",
    "<img src=\"figures/linearModelIntervals1.png\" width=\"800\" />\n",
    "<img src=\"figures/linearModelIntervals2.png\" width=\"800\" />"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
