{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib import cm\n",
    "from matplotlib import colors as mcolors\n",
    "import matplotlib.patches as mpatches\n",
    "import matplotlib.dates as mdates\n",
    "import datetime\n",
    "import time\n",
    "import seaborn as sns; sns.set()\n",
    "from matplotlib import rcParams\n",
    "rcParams.update({'figure.autolayout': True})\n",
    "import scipy as sp\n",
    "import scipy.optimize as optimize\n",
    "from lmfit import minimize, Parameters, Parameter, report_fit\n",
    "from scipy.integrate import odeint\n",
    "from scipy import stats\n",
    "\n",
    "#import import_ipynb\n",
    "from utils_common import *\n",
    "\n",
    "from pandas.plotting import register_matplotlib_converters\n",
    "register_matplotlib_converters()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "_Figure_PATH_O_ = './figures/'\n",
    "_Data_PATH_O_ = './data/'\n",
    "_Data_PATH_XX_ = './data/parameters_xx/'\n",
    "# m stands for migration\n",
    "_Data_PATH_MXXR_ = './data/parameters_mxxr/' \n",
    "_Data_PATH_MXXRF_ = './data/parameters_mxxr_fast/'\n",
    "\n",
    "_City_PATH_ = './data/data_DXY_city_all.csv'\n",
    "_Province_PATH_ = './data/data_DXY_province_all.csv'\n",
    "_Province_Domestic_PATH_ = './data/data_DXY_province_all_domestic.csv'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Calcuate the standard errors of parameters (from script: utils_parameters)\n",
    "# reference: Dueling biological and social contagions, Feng Fu\n",
    "# a linear approximation through estimation of the Jacobian matrix: \n",
    "# cov = sigma^2(F'F)^(-1)\n",
    "# the unbiased estimation of sigma^2 obtained from the residuals of the parameter estimation:\n",
    "# sigma = error/sqrt(number of observations - number of free parameters)\n",
    "\n",
    "def SDE_xx(df_parameters_list, porvince_index, T = 21, TT = 6, TI = 3, ti = 7, figure = False):\n",
    "    \n",
    "    df_parameters = df_parameters_list[porvince_index].copy() # the parameters\n",
    "    df_estimation = df_estimation_list[porvince_index].copy() # the estimation\n",
    "    \n",
    "    timespan = df_estimation.shape[0] # national timespan\n",
    "    timespan_local = timespan - df_estimation['real'].isnull().sum() # provincial timespan\n",
    "    \n",
    "    e = np.linalg.norm(df_estimation.tail(timespan_local).residual) # Euclidean Norm of the residual\n",
    "    k = df_parameters.shape[0] - 2 # number of free parameters (exclude N and S)\n",
    "    sigma_e = e/np.sqrt(timespan_local - k) # sigma\n",
    "    \n",
    "    ps_old = dict(zip(df_parameters.parameter, df_parameters.value))\n",
    "    para_list = df_parameters.parameter.tolist() \n",
    "    para_list.remove('N')\n",
    "    para_list.remove('S_0')\n",
    "    \n",
    "    list_front = np.repeat(range(int(np.ceil(T/TI))), TI).tolist()\n",
    "    list_end = np.repeat(range(int(np.ceil((timespan - T)/ti))), ti).tolist()\n",
    "    list_end = [item + max(list_front) + 1 for item in list_end]\n",
    "    index_list = list_front + list_end\n",
    "    index_list = index_list[:timespan] # index list for beta\n",
    "    list_front_gs = [0]*TT + [1]*TT\n",
    "    list_end_gs = [2]*(timespan - TT - TT)\n",
    "    index_list_gs = list_front_gs + list_end_gs # index list for gamma and sigma\n",
    "    \n",
    "    ts = range(0, timespan)\n",
    "    nbeta = int(T/TI + np.ceil((timespan - T)/ti)) # number of beta's\n",
    "    \n",
    "    # Least-square fitting\n",
    "    def SEIR_equations(states, t, ps):\n",
    "        \"\"\"Receptor synthesis-internalization model.\"\"\"\n",
    "        # integrate.ode may generate t values outside the data range\n",
    "        beta_index = index_list[min(round(t), timespan - 1)]\n",
    "        gs_index = index_list_gs[min(round(t), timespan - 1)]\n",
    "        beta = ps['beta_' + str(beta_index)]\n",
    "        gamma = ps['gamma_' + str(gs_index)]\n",
    "        sigma = ps['sigma_' + str(gs_index)]\n",
    "        N = ps['N']\n",
    "        S, E, I, R = states\n",
    "        return [-beta*S*I/N, beta*S*I/N - sigma*E, sigma*E - gamma*I, gamma*I]\n",
    "    \n",
    "    def SEIR(states_0, ts, ps):\n",
    "        \"\"\"\n",
    "        Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0\n",
    "        \"\"\"\n",
    "        states = odeint(SEIR_equations, states_0, ts, args=(ps,))\n",
    "        return states\n",
    "    \n",
    "    # Calculate the estimation\n",
    "    def estimation(ps, ts):\n",
    "        states_0 = ps['N'] - ps['E_0'] - ps['I_0'] - ps['R_0'], ps['E_0'], ps['I_0'], ps['R_0']\n",
    "        model = SEIR(states_0, ts, ps)\n",
    "        est = model[:,3]\n",
    "        return est\n",
    "    \n",
    "    J = np.zeros((timespan_local, k)) # Jacobian matrix\n",
    "    \n",
    "    for j in range(0, k):\n",
    "        para = para_list[j]\n",
    "        para_value_old = ps_old[para]\n",
    "        if para_value_old >= 1e-10:\n",
    "            para_value_change = 1e-2*para_value_old # difference = one percent\n",
    "        else: # in case the value of a parameter is zero\n",
    "            para_value_change = 1e-12 \n",
    "        ps_new = dict(zip(df_parameters.parameter, df_parameters.value))\n",
    "        ps_new[para] = para_value_old + para_value_change\n",
    "        final = estimation(ps_new, ts)\n",
    "        J[:,j] = (df_estimation.tail(timespan_local).estimation - final[-timespan_local:])/para_value_change\n",
    "        \n",
    "        palette = plt.get_cmap('magma')\n",
    "        if figure:\n",
    "            fig = plt.figure(figsize = (10,5))\n",
    "            plt.plot(ts, df_estimation.estimation, '-', linewidth = 2, color = palette(0.6))\n",
    "            plt.plot(ts, final, '--', linewidth = 2, color = palette(0.8))\n",
    "            plt.title(para)\n",
    "            plt.xlabel('Date')\n",
    "            plt.ylabel('Number of people')\n",
    "        \n",
    "    H = np.transpose(J).dot(J)\n",
    "    H_inverse = np.linalg.inv(H)\n",
    "    df_parameters[\"sde\"] = np.nan\n",
    "    df_parameters[\"lower_bound\"] = np.nan\n",
    "    df_parameters[\"upper_bound\"] = np.nan\n",
    "    \n",
    "    for j in range(0, k):\n",
    "        para = para_list[j]\n",
    "        df_parameters.loc[df_parameters.parameter == para, 'sde'] = sigma_e*np.sqrt(H_inverse[j][j]/ps_old['N'])\n",
    "    # calculate the lower bound and upper bound of every parameter (99.9% confidence interval)\n",
    "    # lower bound is non-negative\n",
    "    # !!! ignore nan !!!\n",
    "    df_parameters[\"lower_bound\"] = df_parameters.apply(lambda row: np.nanmax([row.value - row.sde*stats.t.ppf(1-0.0005, timespan_local - k), 0]), axis = 1)\n",
    "    df_parameters[\"upper_bound\"] = df_parameters.apply(lambda row: row.value + row.sde*stats.t.ppf(1-0.0005, timespan_local - k), axis = 1)\n",
    "    ###### the value of beta cannot exceed 1\n",
    "    for j in range(max(index_list) + 1):\n",
    "        df_parameters.loc[df_parameters.parameter == 'beta_' + str(int(j)), 'lower_bound'] = np.nanmax([df_parameters.loc[df_parameters.parameter == 'beta_' + str(int(j)), 'lower_bound'].tolist()[0], 0.01])\n",
    "        df_parameters.loc[df_parameters.parameter == 'beta_' + str(int(j)), 'upper_bound'] = np.nanmin([df_parameters.loc[df_parameters.parameter == 'beta_' + str(int(j)), 'upper_bound'].tolist()[0], 1])\n",
    "    ###### the value of gamma and sigma cannot be less than 0.05\n",
    "    ###### the value of gamma and sigma cannot exceed 0.5\n",
    "    for j in range(max(index_list_gs) + 1):\n",
    "        df_parameters.loc[df_parameters.parameter == 'gamma_' + str(int(j)), 'lower_bound'] = np.nanmax([df_parameters.loc[df_parameters.parameter == 'gamma_' + str(int(j)), 'lower_bound'].tolist()[0], 0.05])\n",
    "        df_parameters.loc[df_parameters.parameter == 'sigma_' + str(int(j)), 'lower_bound'] = np.nanmax([df_parameters.loc[df_parameters.parameter == 'sigma_' + str(int(j)), 'lower_bound'].tolist()[0], 0.05])\n",
    "        \n",
    "        df_parameters.loc[df_parameters.parameter == 'gamma_' + str(int(j)), 'upper_bound'] = np.nanmin([df_parameters.loc[df_parameters.parameter == 'gamma_' + str(int(j)), 'upper_bound'].tolist()[0], 0.5])\n",
    "        df_parameters.loc[df_parameters.parameter == 'sigma_' + str(int(j)), 'upper_bound'] = np.nanmin([df_parameters.loc[df_parameters.parameter == 'sigma_' + str(int(j)), 'upper_bound'].tolist()[0], 0.5])\n",
    "    return df_parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "# SEIR simulation WITH MIGRATION given prior estimation of parameters (from script: utils_parameters)\n",
    "# same timeline for all porvinces\n",
    "# beta, gamma and sigma are time-dependent\n",
    "# beta: first changes every TI = 3 days for T = 21 days, then changes every ti days\n",
    "# gamma and sigma: first changes every TT = 6 days for 2*TT = 12 days, then stays the same\n",
    "###### recursive method:\n",
    "# one complete cycle: from the first province to the last province\n",
    "# repetition: n cycle\n",
    "###### algorithm:\n",
    "# ’leastsq’: Levenberg-Marquardt (default)\n",
    "def simulation_xxrecursive(df, update_date_tr, mindex_list, repetition = 1,\n",
    "                 T = 21, TT = 6, TI = 3, ti = 7, mmethod = 'leastsq', kws = False):\n",
    "    \n",
    "    start_time = time.time() \n",
    "    m = len(names_province)\n",
    "    df = df[df.update_date <= update_date_tr] \n",
    "    timespan = (max(df.update_date) - min(df.update_date)).days + 1\n",
    "    ################## for beta, generate an index list\n",
    "    list_front = np.repeat(range(int(np.ceil(T/TI))), TI).tolist()\n",
    "    list_end = np.repeat(range(int(np.ceil((timespan - T)/ti))), ti).tolist()\n",
    "    list_end = [item + max(list_front) + 1 for item in list_end]\n",
    "    index_list = list_front + list_end\n",
    "    index_list = index_list[:timespan] \n",
    "    ################## \n",
    "    ################## for sigma and gamma, generate an index list\n",
    "    list_front_gs = [0]*TT + [1]*TT\n",
    "    list_end_gs = [2]*(timespan - TT - TT)\n",
    "    index_list_gs = list_front_gs + list_end_gs\n",
    "    ################## \n",
    "    df_single_list = [] # list of dataframes \n",
    "    ts_local_list = [] # list of provincial timespans\n",
    "    data = np.zeros((timespan, m)) # cumulative numbers of confirmed cases\n",
    "    \n",
    "    for i, name in enumerate(names_province):\n",
    "        df_single = df[df.province_name_en == name]\n",
    "        timespan_local = (max(df_single.update_date) - min(df_single.update_date)).days + 1\n",
    "        df_single_list.append(df_single)\n",
    "        ts_local_list.append(timespan_local)\n",
    "        data_single = np.array(df_single.cum_confirmed)\n",
    "        data_single = np.concatenate(([None]*(timespan - timespan_local), data_single), axis=0)\n",
    "        data[:,i] = data_single\n",
    "    \n",
    "    # Least-square fitting\n",
    "    # SEIR model with migration\n",
    "    def SEIR_equations(states, t, ps):\n",
    "        \"\"\"Receptor synthesis-internalization model.\"\"\"\n",
    "        #S = [0]*m; E = [0]*m # given that there are m provinces, at every time point, \n",
    "        #I = [0]*m; R = [0]*m # the variable is a vector of length m\n",
    "        states = states.reshape((m, 4)) # S_i, E_i, I_i, R_i, S_j, E_j, I_j, R_j\n",
    "        S, E, I, R = states[:,0], states[:,1], states[:,2], states[:,3]\n",
    "        N = S + E + I + R\n",
    "        RHS = [] # right hand side\n",
    "        theta = ps['theta'].value\n",
    "        # integrate.ode may generate t values outside the data range\n",
    "        mindex = mindex_list[min(round(t), timespan - 1)] # migration matrix: m_ij from i to j\n",
    "        for i, name in enumerate(names_province):\n",
    "            # integrate.ode may generate t values outside the data range\n",
    "            beta_index = index_list[min(round(t), timespan - 1)]\n",
    "            gs_index = index_list_gs[min(round(t), timespan - 1)]\n",
    "            beta = ps['beta_' + str(i) + '_' + str(beta_index)].value\n",
    "            gamma = ps['gamma_' + str(i) + '_' + str(gs_index)].value\n",
    "            sigma = ps['sigma_' + str(i) + '_' + str(gs_index)].value\n",
    "            \n",
    "            dS = -beta*S[i]*I[i]/N[i] - theta/N[i]*np.sum(mindex[i, :])*S[i] + theta*np.sum(np.multiply(np.divide(mindex[:,i], N), S))\n",
    "            dE = beta*S[i]*I[i]/N[i] - sigma*E[i] - theta/N[i]*np.sum(mindex[i, :])*E[i] + theta*np.sum(np.multiply(np.divide(mindex[:,i], N), E))\n",
    "            dI = sigma*E[i] - gamma*I[i]\n",
    "            dR = gamma*I[i]\n",
    "            RHS += [dS, dE, dI, dR]\n",
    "        return RHS\n",
    "    \n",
    "    def SEIR(states_0, ts, ps):\n",
    "        \"\"\"\n",
    "        Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0\n",
    "        \"\"\"\n",
    "        states = odeint(SEIR_equations, states_0, ts, args=(ps,))\n",
    "        return states\n",
    "    \n",
    "    # Calculate the residual\n",
    "    def residual(ps, ts, data):\n",
    "        states_0 = []\n",
    "        for i, name in enumerate(names_province):\n",
    "            states_single_0 = ps['S_' + str(i) + '_0'].value, ps['E_' + str(i) + '_0'].value, ps['I_' + str(i) + '_0'].value, ps['R_' + str(i) + '_0'].value\n",
    "            states_0 += states_single_0\n",
    "        model = SEIR(states_0, ts, ps)\n",
    "        res = np.empty(0)\n",
    "        for i, name in enumerate(names_province):\n",
    "            timespan_local = ts_local_list[i]\n",
    "            est_single = model[:,(3 + i*4)]\n",
    "            est_single = est_single[-timespan_local:]\n",
    "            data_single = data[:,i]\n",
    "            data_single = data_single[-timespan_local:]\n",
    "            res_single = (est_single - data_single).ravel()\n",
    "            res_single = res_single/data_single[-1] # normalization\n",
    "            res = np.concatenate((res, res_single), axis=0)\n",
    "        return res\n",
    "    \n",
    "    # Calculate the estimation\n",
    "    def estimation(ps, ts):\n",
    "        states_0 = []\n",
    "        for i, name in enumerate(names_province):\n",
    "            states_single_0 = ps['S_' + str(i) + '_0'].value, ps['E_' + str(i) + '_0'].value, ps['I_' + str(i) + '_0'].value, ps['R_' + str(i) + '_0'].value\n",
    "            states_0 += states_single_0\n",
    "        model = SEIR(states_0, ts, ps)\n",
    "        est = np.zeros((timespan, m))\n",
    "        for i, name in enumerate(names_province):\n",
    "            est_single = model[:,(3 + i*4)]\n",
    "            est[:, i] = est_single\n",
    "        return est\n",
    "    \n",
    "    # recursively performing optimizations \n",
    "    df_parameters_list_r = [df_parameters_list[i].copy() for i in range(0, len(names_province))]\n",
    "    # repetition\n",
    "    for flag in range(0, repetition):\n",
    "        # one complete cycle: from the first province to the last province\n",
    "        # the k th step: only the parameters of province k is changable, those of the other provinces are fixed\n",
    "        for k, name in enumerate(names_province): \n",
    "            # set parameters (incluing their bounds)\n",
    "            ps_bound = Parameters()\n",
    "            for i, name in enumerate(names_province):\n",
    "                prior = df_parameters_list_r[i]\n",
    "                ps_bound.add(name = 'N_' + str(i) + '_0', value = prior[prior.parameter == 'N'].value.tolist()[0], vary = False)\n",
    "                if i != k:\n",
    "                    ps_bound.add(name = 'E_' + str(i) + '_0', value = prior[prior.parameter == 'E_0'].value.tolist()[0], vary = False)\n",
    "                    ps_bound.add(name = 'I_' + str(i) + '_0', value = prior[prior.parameter == 'I_0'].value.tolist()[0], vary = False)\n",
    "                    ps_bound.add(name = 'R_' + str(i) + '_0', value = prior[prior.parameter == 'R_0'].value.tolist()[0], vary = False)\n",
    "                    ps_bound.add(name = 'S_' + str(i) + '_0', expr = 'N_' + str(i) + '_0' + '-' 'E_' + str(i) + '_0' + '-' 'I_' + str(i) + '_0' + '-' 'R_' + str(i) + '_0')\n",
    "                    for j in range(max(index_list) + 1):\n",
    "                        ps_bound.add(name = 'beta_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'beta_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        ps_bound.add(name = 'gamma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'gamma_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        ps_bound.add(name = 'sigma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'sigma_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                else:\n",
    "                    ps_bound.add(name = 'E_' + str(i) + '_0', value = prior[prior.parameter == 'E_0'].value.tolist()[0], \n",
    "                                 min = 0, max = np.nanmax([500, prior[prior.parameter == 'E_0'].upper_bound.tolist()[0]]))\n",
    "                    ps_bound.add(name = 'I_' + str(i) + '_0', value = prior[prior.parameter == 'I_0'].value.tolist()[0], \n",
    "                                 min = 0, max = np.nanmax([200, prior[prior.parameter == 'I_0'].upper_bound.tolist()[0]]))\n",
    "                    ps_bound.add(name = 'R_' + str(i) + '_0', value = prior[prior.parameter == 'R_0'].value.tolist()[0], \n",
    "                                 min = 0, max = np.nanmax([100, prior[prior.parameter == 'R_0'].upper_bound.tolist()[0]]))\n",
    "                    ps_bound.add(name = 'S_' + str(i) + '_0', expr = 'N_' + str(i) + '_0' + '-' 'E_' + str(i) + '_0' + '-' 'I_' + str(i) + '_0' + '-' 'R_' + str(i) + '_0')\n",
    "                    for j in range(max(index_list) + 1):\n",
    "                        ps_bound.add(name = 'beta_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'beta_' + str(int(j))].value.tolist()[0], \n",
    "                                     min = 0.01, # prior[prior.parameter == 'beta_' + str(int(j))].lower_bound.tolist()[0]\n",
    "                                     max = prior[prior.parameter == 'beta_' + str(int(j))].upper_bound.tolist()[0])\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        ps_bound.add(name = 'gamma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'gamma_' + str(int(j))].value.tolist()[0], \n",
    "                                     min = 0.05, # prior[prior.parameter == 'gamma_' + str(int(j))].lower_bound.tolist()[0] \n",
    "                                     max = prior[prior.parameter == 'gamma_' + str(int(j))].upper_bound.tolist()[0])\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        ps_bound.add(name = 'sigma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'sigma_' + str(int(j))].value.tolist()[0], \n",
    "                                     min = 0.05, # prior[prior.parameter == 'sigma_' + str(int(j))].lower_bound.tolist()[0]\n",
    "                                     max = prior[prior.parameter == 'sigma_' + str(int(j))].upper_bound.tolist()[0])\n",
    "\n",
    "            # the unit of migration index is 1e5 (see utils_migration for explanation)\n",
    "            ps_bound.add(name = 'theta', value = 1e5, vary = False)\n",
    "            ts = range(0, timespan)\n",
    "            # fit the model and find estimated values\n",
    "            if kws == False:\n",
    "                result = minimize(residual, ps_bound, args = (ts, data), method = mmethod)\n",
    "            elif mmethod == 'emcee':\n",
    "                result = minimize(residual, ps_bound, args = (ts, data), method = mmethod, steps = kws.get('steps'))\n",
    "            elif mmethod == 'dual_annealing':\n",
    "                result = minimize(residual, ps_bound, args = (ts, data), method = mmethod, maxiter = kws.get('maxiter'))\n",
    "            # the dataframe above is a long dataframe consisting of parameters for all provinces\n",
    "            df_parameters = pd.DataFrame(dict(result.params.valuesdict()).items(), columns=['parameter', 'value'])\n",
    "            # only update the parameters for the k th province\n",
    "            start_index = df_parameters.index[df_parameters['parameter'] == 'N_' + str(k) + '_0'].tolist()[0]\n",
    "            end_index = df_parameters.index[df_parameters['parameter'] == 'sigma_' + str(k) + '_' + str(max(index_list_gs))].tolist()[0]\n",
    "            update = df_parameters[start_index:(end_index + 1)]\n",
    "            update = update.reset_index(drop = True) ###### !!!!!! ######\n",
    "            df_parameters_list_r[k]['value'] = update['value']\n",
    "            print(\"province %d of round %d\" % (k, flag))\n",
    "            print(\"--- %s seconds ---\" % (time.time() - start_time))\n",
    "            \n",
    "    final = estimation(result.params, ts)\n",
    "    _Data_PATH_MXXR_ = './data/parameters_mxxr_' + str(repetition) + '/' \n",
    "    \n",
    "    df_mestimation_list = [] # m for migration\n",
    "    for i, name in enumerate(names_province):\n",
    "        df_mestimation = df_estimation_list[i].copy()\n",
    "        df_mestimation['mestimation'] = final[:, i]\n",
    "        df_mestimation_list.append(df_mestimation)\n",
    "        df_mestimation.to_csv(_Data_PATH_MXXR_ + name + '_mestimation.csv', index = False)\n",
    "        df_parameters_list_r[i].to_csv(_Data_PATH_MXXR_ + name + '_mparameters.csv', index = False)\n",
    "    \n",
    "    print(\"--- %s seconds ---\" % (time.time() - start_time))\n",
    "    \n",
    "    df_parameters.to_csv(_Data_PATH_MXXR_ + 'mparameters.csv', index = False)\n",
    "\n",
    "    return result, df_mestimation_list, df_parameters_list_r\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "# SEIR simulation WITH MIGRATION given prior estimation of parameters (from script: utils_parameters)\n",
    "# same timeline for all porvinces\n",
    "# beta, gamma and sigma are time-dependent\n",
    "# beta: first changes every TI = 3 days for T = 21 days, then changes every ti days\n",
    "# gamma and sigma: first changes every TT = 6 days for 2*TT = 12 days, then stays the same\n",
    "###### recursive method:\n",
    "# one complete cycle: from the first province to the last province\n",
    "# repetition: n cycle\n",
    "###### algorithm:\n",
    "# ’leastsq’: Levenberg-Marquardt (default)\n",
    "###### fast:\n",
    "# only the first few beta's, gamma's and sigma's are changable\n",
    "def simulation_xxrecursive_fast(df, update_date_tr, mindex_list, repetition = 1,\n",
    "                 T = 21, TT = 6, TI = 3, ti = 7, mmethod = 'leastsq', kws = False):\n",
    "    \n",
    "    start_time = time.time() \n",
    "    m = len(names_province)\n",
    "    df = df[df.update_date <= update_date_tr] \n",
    "    timespan = (max(df.update_date) - min(df.update_date)).days + 1\n",
    "    ################## for beta, generate an index list\n",
    "    list_front = np.repeat(range(int(np.ceil(T/TI))), TI).tolist()\n",
    "    list_end = np.repeat(range(int(np.ceil((timespan - T)/ti))), ti).tolist()\n",
    "    list_end = [item + max(list_front) + 1 for item in list_end]\n",
    "    index_list = list_front + list_end\n",
    "    index_list = index_list[:timespan] \n",
    "    ################## \n",
    "    ################## for sigma and gamma, generate an index list\n",
    "    list_front_gs = [0]*TT + [1]*TT\n",
    "    list_end_gs = [2]*(timespan - TT - TT)\n",
    "    index_list_gs = list_front_gs + list_end_gs\n",
    "    ################## \n",
    "    df_single_list = [] # list of dataframes \n",
    "    ts_local_list = [] # list of provincial timespans\n",
    "    data = np.zeros((timespan, m)) # cumulative numbers of confirmed cases\n",
    "    \n",
    "    for i, name in enumerate(names_province):\n",
    "        df_single = df[df.province_name_en == name]\n",
    "        timespan_local = (max(df_single.update_date) - min(df_single.update_date)).days + 1\n",
    "        df_single_list.append(df_single)\n",
    "        ts_local_list.append(timespan_local)\n",
    "        data_single = np.array(df_single.cum_confirmed)\n",
    "        data_single = np.concatenate(([None]*(timespan - timespan_local), data_single), axis=0)\n",
    "        data[:,i] = data_single\n",
    "    \n",
    "    # Least-square fitting\n",
    "    # SEIR model with migration\n",
    "    def SEIR_equations(states, t, ps):\n",
    "        \"\"\"Receptor synthesis-internalization model.\"\"\"\n",
    "        #S = [0]*m; E = [0]*m # given that there are m provinces, at every time point, \n",
    "        #I = [0]*m; R = [0]*m # the variable is a vector of length m\n",
    "        states = states.reshape((m, 4)) # S_i, E_i, I_i, R_i, S_j, E_j, I_j, R_j\n",
    "        S, E, I, R = states[:,0], states[:,1], states[:,2], states[:,3]\n",
    "        N = S + E + I + R\n",
    "        RHS = [] # right hand side\n",
    "        theta = ps['theta'].value\n",
    "        # integrate.ode may generate t values outside the data range\n",
    "        mindex = mindex_list[min(round(t), timespan - 1)] # migration matrix: m_ij from i to j\n",
    "        for i, name in enumerate(names_province):\n",
    "            # integrate.ode may generate t values outside the data range\n",
    "            beta_index = index_list[min(round(t), timespan - 1)]\n",
    "            gs_index = index_list_gs[min(round(t), timespan - 1)]\n",
    "            beta = ps['beta_' + str(i) + '_' + str(beta_index)].value\n",
    "            gamma = ps['gamma_' + str(i) + '_' + str(gs_index)].value\n",
    "            sigma = ps['sigma_' + str(i) + '_' + str(gs_index)].value\n",
    "            \n",
    "            dS = -beta*S[i]*I[i]/N[i] - theta/N[i]*np.sum(mindex[i, :])*S[i] + theta*np.sum(np.multiply(np.divide(mindex[:,i], N), S))\n",
    "            dE = beta*S[i]*I[i]/N[i] - sigma*E[i] - theta/N[i]*np.sum(mindex[i, :])*E[i] + theta*np.sum(np.multiply(np.divide(mindex[:,i], N), E))\n",
    "            dI = sigma*E[i] - gamma*I[i]\n",
    "            dR = gamma*I[i]\n",
    "            RHS += [dS, dE, dI, dR]\n",
    "        return RHS\n",
    "    \n",
    "    def SEIR(states_0, ts, ps):\n",
    "        \"\"\"\n",
    "        Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0\n",
    "        \"\"\"\n",
    "        states = odeint(SEIR_equations, states_0, ts, args=(ps,))\n",
    "        return states\n",
    "    \n",
    "    # Calculate the residual\n",
    "    def residual(ps, ts, data):\n",
    "        states_0 = []\n",
    "        for i, name in enumerate(names_province):\n",
    "            states_single_0 = ps['S_' + str(i) + '_0'].value, ps['E_' + str(i) + '_0'].value, ps['I_' + str(i) + '_0'].value, ps['R_' + str(i) + '_0'].value\n",
    "            states_0 += states_single_0\n",
    "        model = SEIR(states_0, ts, ps)\n",
    "        res = np.empty(0)\n",
    "        for i, name in enumerate(names_province):\n",
    "            timespan_local = ts_local_list[i]\n",
    "            est_single = model[:,(3 + i*4)]\n",
    "            est_single = est_single[-timespan_local:]\n",
    "            data_single = data[:,i]\n",
    "            data_single = data_single[-timespan_local:]\n",
    "            res_single = (est_single - data_single).ravel()\n",
    "            res_single = res_single/data_single[-1] # normalization\n",
    "            res = np.concatenate((res, res_single), axis=0)\n",
    "        return res\n",
    "    \n",
    "    # Calculate the estimation\n",
    "    def estimation(ps, ts):\n",
    "        states_0 = []\n",
    "        for i, name in enumerate(names_province):\n",
    "            states_single_0 = ps['S_' + str(i) + '_0'].value, ps['E_' + str(i) + '_0'].value, ps['I_' + str(i) + '_0'].value, ps['R_' + str(i) + '_0'].value\n",
    "            states_0 += states_single_0\n",
    "        model = SEIR(states_0, ts, ps)\n",
    "        est = np.zeros((timespan, m))\n",
    "        for i, name in enumerate(names_province):\n",
    "            est_single = model[:,(3 + i*4)]\n",
    "            est[:, i] = est_single\n",
    "        return est\n",
    "    \n",
    "    # recursively performing optimizations \n",
    "    df_parameters_list_r = [df_parameters_list[i].copy() for i in range(0, len(names_province))]\n",
    "    # repetition\n",
    "    for flag in range(0, repetition):\n",
    "        # one complete cycle: from the first province to the last province\n",
    "        # the k th step: only the parameters of province k is changable, those of the other provinces are fixed\n",
    "        for k, name in enumerate(names_province): \n",
    "            # set parameters (incluing their bounds)\n",
    "            ps_bound = Parameters()\n",
    "            for i, name in enumerate(names_province):\n",
    "                prior = df_parameters_list_r[i]\n",
    "                ps_bound.add(name = 'N_' + str(i) + '_0', value = prior[prior.parameter == 'N'].value.tolist()[0], vary = False)\n",
    "                if i != k:\n",
    "                    ps_bound.add(name = 'E_' + str(i) + '_0', value = prior[prior.parameter == 'E_0'].value.tolist()[0], vary = False)\n",
    "                    ps_bound.add(name = 'I_' + str(i) + '_0', value = prior[prior.parameter == 'I_0'].value.tolist()[0], vary = False)\n",
    "                    ps_bound.add(name = 'R_' + str(i) + '_0', value = prior[prior.parameter == 'R_0'].value.tolist()[0], vary = False)\n",
    "                    ps_bound.add(name = 'S_' + str(i) + '_0', expr = 'N_' + str(i) + '_0' + '-' 'E_' + str(i) + '_0' + '-' 'I_' + str(i) + '_0' + '-' 'R_' + str(i) + '_0')\n",
    "                    for j in range(max(index_list) + 1):\n",
    "                        ps_bound.add(name = 'beta_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'beta_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        ps_bound.add(name = 'gamma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'gamma_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        ps_bound.add(name = 'sigma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'sigma_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                else:\n",
    "                    ps_bound.add(name = 'E_' + str(i) + '_0', value = prior[prior.parameter == 'E_0'].value.tolist()[0], \n",
    "                                 min = 0, max = np.nanmax([500, prior[prior.parameter == 'E_0'].upper_bound.tolist()[0]]))\n",
    "                    ps_bound.add(name = 'I_' + str(i) + '_0', value = prior[prior.parameter == 'I_0'].value.tolist()[0], \n",
    "                                 min = 0, max = np.nanmax([200, prior[prior.parameter == 'I_0'].upper_bound.tolist()[0]]))\n",
    "                    ps_bound.add(name = 'R_' + str(i) + '_0', value = prior[prior.parameter == 'R_0'].value.tolist()[0], \n",
    "                                 min = 0, max = np.nanmax([100, prior[prior.parameter == 'R_0'].upper_bound.tolist()[0]]))\n",
    "                    ps_bound.add(name = 'S_' + str(i) + '_0', expr = 'N_' + str(i) + '_0' + '-' 'E_' + str(i) + '_0' + '-' 'I_' + str(i) + '_0' + '-' 'R_' + str(i) + '_0')\n",
    "                    # only the first four beta's are changable\n",
    "                    for j in range(max(index_list) + 1):\n",
    "                        if j >=4:\n",
    "                            ps_bound.add(name = 'beta_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'beta_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                        else:\n",
    "                            ps_bound.add(name = 'beta_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'beta_' + str(int(j))].value.tolist()[0], \n",
    "                                     min = 0.01, # prior[prior.parameter == 'beta_' + str(int(j))].lower_bound.tolist()[0]\n",
    "                                     max = prior[prior.parameter == 'beta_' + str(int(j))].upper_bound.tolist()[0])\n",
    "                    # only the first two gamma's are changable\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        if j >=2:\n",
    "                            ps_bound.add(name = 'gamma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'gamma_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                        else:\n",
    "                            ps_bound.add(name = 'gamma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'gamma_' + str(int(j))].value.tolist()[0], \n",
    "                                     min = 0.05, # prior[prior.parameter == 'gamma_' + str(int(j))].lower_bound.tolist()[0] \n",
    "                                     max = prior[prior.parameter == 'gamma_' + str(int(j))].upper_bound.tolist()[0])\n",
    "                    # only the first two sigma's are changable\n",
    "                    for j in range(max(index_list_gs) + 1):\n",
    "                        if j>=2:\n",
    "                            ps_bound.add(name = 'sigma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'sigma_' + str(int(j))].value.tolist()[0], vary = False)\n",
    "                        else:\n",
    "                            ps_bound.add(name = 'sigma_' + str(i) + '_' + str(int(j)), value = prior[prior.parameter == 'sigma_' + str(int(j))].value.tolist()[0], \n",
    "                                     min = 0.05, # prior[prior.parameter == 'sigma_' + str(int(j))].lower_bound.tolist()[0]\n",
    "                                     max = prior[prior.parameter == 'sigma_' + str(int(j))].upper_bound.tolist()[0])\n",
    "\n",
    "            \n",
    "            # the unit of migration index is 1e5 (see utils_migration for explanation)\n",
    "            ps_bound.add(name = 'theta', value = 1e5, vary = False)\n",
    "            ts = range(0, timespan)\n",
    "            # fit the model and find estimated values\n",
    "            if kws == False:\n",
    "                result = minimize(residual, ps_bound, args = (ts, data), method = mmethod)\n",
    "            elif mmethod == 'emcee':\n",
    "                result = minimize(residual, ps_bound, args = (ts, data), method = mmethod, steps = kws.get('steps'))\n",
    "            elif mmethod == 'dual_annealing':\n",
    "                result = minimize(residual, ps_bound, args = (ts, data), method = mmethod, maxiter = kws.get('maxiter'))\n",
    "            # the dataframe above is a long dataframe consisting of parameters for all provinces\n",
    "            df_parameters = pd.DataFrame(dict(result.params.valuesdict()).items(), columns=['parameter', 'value'])\n",
    "            # only update the parameters for the k th province\n",
    "            start_index = df_parameters.index[df_parameters['parameter'] == 'N_' + str(k) + '_0'].tolist()[0]\n",
    "            end_index = df_parameters.index[df_parameters['parameter'] == 'sigma_' + str(k) + '_' + str(max(index_list_gs))].tolist()[0]\n",
    "            update = df_parameters[start_index:(end_index + 1)]\n",
    "            update = update.reset_index(drop = True) ###### !!!!!! ######\n",
    "            df_parameters_list_r[k]['value'] = update['value']\n",
    "            print(\"province %d of round %d\" % (k, flag))\n",
    "            print(\"--- %s seconds ---\" % (time.time() - start_time))   \n",
    "    \n",
    "    final = estimation(result.params, ts)\n",
    "    \n",
    "    _Data_PATH_MXXRF_ = './data/parameters_mxxr_fast_' + str(repetition) + '/' \n",
    "    \n",
    "    df_mestimation_list = [] # m for migration\n",
    "    for i, name in enumerate(names_province):\n",
    "        df_mestimation = df_estimation_list[i].copy()\n",
    "        df_mestimation['mestimation'] = final[:, i]\n",
    "        df_mestimation_list.append(df_mestimation)\n",
    "        df_mestimation.to_csv(_Data_PATH_MXXRF_ + name + '_mestimation.csv', index = False) # F stands for fast\n",
    "        df_parameters_list_r[i].to_csv(_Data_PATH_MXXRF_ + name + '_mparameters.csv', index = False)\n",
    "    \n",
    "    print(\"--- %s seconds ---\" % (time.time() - start_time))\n",
    "\n",
    "    df_parameters.to_csv(_Data_PATH_MXXRF_ + 'parameters.csv', index = False)\n",
    "\n",
    "    return result, df_mestimation_list, df_parameters_list_r\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "def figure_conf_est_all(df, names_province, update_date_tr, fsize = (7, 2), ncol = 3, ms = 3, fs = 12, \n",
    "                        title = None, country = 'China', tag = 'without', repetition = 10):\n",
    "    \n",
    "    sns.set_style(\"ticks\")\n",
    "    palette = plt.get_cmap('Reds')\n",
    "    palette_est = plt.get_cmap('Blues')\n",
    "    palette_mest = plt.get_cmap('Greens')\n",
    "    palette = ['#679b9b', '#aacfcf', '#fde2e2', '#ffb6b6']\n",
    "    \n",
    "    m = len(names_province)\n",
    "    \n",
    "    fig, axes = plt.subplots(int(np.ceil(m/ncol)), ncol, figsize = (2*fsize[0], int(np.ceil(m/ncol))*fsize[1]), sharey = False)\n",
    "    fig.subplots_adjust(hspace = 0.2, wspace = 0.1)\n",
    "    if m%ncol != 0:\n",
    "        for j in range(m, int(np.ceil(m/ncol)*ncol)):\n",
    "            fig.delaxes(axes.flatten()[j])\n",
    "    \n",
    "    df = df[df.update_date <= update_date_tr]\n",
    "    df_rank = df[df['update_date'] == max(df['update_date'])].copy()\n",
    "    df_rank = df_rank.sort_values(by = 'province_name_en') # same order as the list names_province\n",
    "    df_rank = df_rank.reset_index(drop=True)\n",
    "    df_rank = df_rank.sort_values(by = 'cum_confirmed')\n",
    "    rank_list = df_rank.index.tolist()\n",
    "    \n",
    "    date_initial = min(df.update_date)\n",
    "    plot_df_list = []\n",
    "    \n",
    "    for i, province in enumerate(names_province):\n",
    "        \n",
    "        ix = np.unravel_index(i, axes.shape)\n",
    "        #c = palette(rank_list.index(i)/2/m + 0.3)\n",
    "        #c_est = palette_est(rank_list.index(i)/2/m + 0.3)\n",
    "        #c_mest = palette_mest(rank_list.index(i)/2/m + 0.3)\n",
    "        \n",
    "        if tag == 'without':\n",
    "            df_estimation = df_estimation_list[i]\n",
    "        else:\n",
    "            df_estimation = df_mestimation_list[i]\n",
    "        \n",
    "        plot_df = df[df['province_name_en'] == province].copy()\n",
    "        ##########################################\n",
    "        if date_initial - min(plot_df.update_date) != datetime.timedelta(0):\n",
    "            df_initial = pd.DataFrame([[np.nan] * len(plot_df.columns)], columns = plot_df.columns)\n",
    "            plot_df = df_initial.append(plot_df, ignore_index = True)\n",
    "            plot_df.iloc[0, plot_df.columns.get_loc('update_date')] = date_initial\n",
    "            plot_df['update_date'] =  pd.to_datetime(plot_df['update_date'])\n",
    "            plot_df.set_index('update_date', inplace=True)\n",
    "            plot_df = plot_df.resample('D').ffill().reset_index()\n",
    "            plot_df['update_date'] = plot_df['update_date'].dt.date\n",
    "        ##########################################\n",
    "        plot_df = plot_df.reset_index()\n",
    "        plot_df['SEIR'] = df_estimation['estimation']\n",
    "        if 'mestimation' in df_estimation.columns:\n",
    "            plot_df['MSEIR'] = df_estimation['mestimation']\n",
    "        \n",
    "        plot_df = plot_df.replace({'cum_confirmed': {0: None}})\n",
    "        \n",
    "        plot_df_list.append(plot_df)\n",
    "        \n",
    "        #axes[ix].plot(plot_df['update_date'], plot_df['cum_confirmed'],\n",
    "                #linewidth = 2, marker = 'o', ms = ms, color = c, label = (lambda x: None if x > 0 else 'real data')(i)) # \n",
    "        axes[ix].fill_between(plot_df['update_date'], plot_df['cum_confirmed'],\n",
    "                                color = palette[2], alpha = 0.6, label = (lambda x: None if x > 0 else 'real')(i))\n",
    "        \n",
    "        axes[ix].plot(plot_df['update_date'], plot_df['SEIR'],\n",
    "                linewidth = 2, linestyle = '--', marker = '', ms = ms, color = palette[1], label = (lambda x: None if x > 0 else 'estimation w/o migration')(i))\n",
    "        #axes[ix].bar(plot_df['update_date'], height = plot_df['SEIR'], color = palette[1], alpha = 0.8, width = 0.4, \n",
    "                #label = (lambda x: None if x > 0 else 'estimation w/o migration')(i))\n",
    "        #axes[ix].fill_between(plot_df['update_date'], plot_df['SEIR'],\n",
    "                                #color = palette[1], alpha = 0.6, label = (lambda x: None if x > 0 else 'estimation w/o migration')(i))\n",
    "            \n",
    "        if 'mestimation' in df_estimation.columns:\n",
    "            axes[ix].plot(plot_df['update_date'], plot_df['MSEIR'],\n",
    "                linewidth = 2, marker = '', ms = ms, color = palette[0], label = (lambda x: None if x > 0 else 'estimation w migration')(i)) # \n",
    "            #axes[ix].fill_between(plot_df['update_date'], plot_df['MSEIR'],\n",
    "                                #color = palette[2], alpha = 0.6, label = (lambda x: None if x > 0 else 'estimation w migration')(i))\n",
    "        \n",
    "            \n",
    "        if i >= (np.ceil(m/ncol) - 1)*ncol:\n",
    "            axes[ix].set_xlabel('Date', fontsize = fs - 2)\n",
    "            \n",
    "        if i % ncol == 0:\n",
    "            axes[ix].set_ylabel('Number of people', fontsize = fs - 2)\n",
    "            axes[ix].get_yaxis().set_label_coords(-0.15,0.5)\n",
    "        if i == 0:\n",
    "            axes[ix].legend(loc = 'upper left', fancybox = True, fontsize = fs - 4)\n",
    "        axes[ix].set_title(province, fontsize = fs)\n",
    "        axes[ix].set_xlim(min(df.update_date), max(df.update_date))\n",
    "        axes[ix].xaxis.set_major_locator(mdates.WeekdayLocator())\n",
    "        axes[ix].xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))\n",
    "        axes[ix].tick_params(axis = 'both', which = 'major', labelsize = fs - 4)\n",
    "        axes[ix].tick_params(axis = 'both', which = 'minor', labelsize = fs - 4)\n",
    "        \n",
    "    fig.align_ylabels(axes[:, 0])\n",
    "        \n",
    "    fig.suptitle(title, fontsize = fs + 2, y = 1.01)\n",
    "    if 'mestimation' in df_estimation.columns:\n",
    "        fig.savefig(_Figure_PATH_O_ + country + '_conf_mseir_' + str(repetition) + '.png', dpi = 400)\n",
    "    else:\n",
    "        fig.savefig(_Figure_PATH_O_ + country + '_conf_seir_' + str(repetition) + '.png', dpi = 400)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_city, data_province, data_province_domestic = load_DXY_raw()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "date_initial = min(data_province_domestic.update_date)\n",
    "date_tr = datetime.date(int(2020),int(3),int(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_parameters_list, df_estimation_list = load_ind_simulation_raw_xx()\n",
    "df_parameters_list = [SDE_xx(df_parameters_list, i) for i in range(0, len(names_province))]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_network_P2P = load_network_raw()\n",
    "mindex_list_real = matrix_P2P_all(data_network_P2P, date_initial, date_tr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [],
   "source": [
    "#annealing_kws = dict(maxiter = 10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "rep = 1\n",
    "result, df_mestimation_list, df_mparameters_list = simulation_xxrecursive(df = data_province_domestic, \n",
    "                                                                           update_date_tr = date_tr, \n",
    "                                                                           mindex_list = mindex_list_real, \n",
    "                                                                           repetition = rep,\n",
    "                                                                           T = 21, TT = 6, TI = 3, ti = 7, \n",
    "                                                                           mmethod = 'leastsq', kws = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "figure_conf_est_all(data_province_domestic, names_province, date_tr, fsize = (7, 2), ncol = 3, ms = 3, fs = 12, \n",
    "                    title = 'China: infection', country = 'China', \n",
    "                    tag = 'with', repetition = rep)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
