{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Rank3_Sreekiran.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    }
  },
  "cells": [
    {
      "metadata": {
        "id": "V_356oYpte3J",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "\"\"\"\n",
        "##################################### Creating Dataset #################################################\n",
        "\"\"\"\n",
        "import pandas as pd\n",
        "import numpy as np\n",
        "from datetime import date\n",
        "from sklearn import metrics\n",
        "from sklearn.ensemble import GradientBoostingRegressor\n",
        "import lightgbm as lgb\n",
        "from itertools import product\n",
        "from catboost import CatBoostRegressor\n",
        "\n",
        "\"\"\" Functions \"\"\"\n",
        "\n",
        "def extract_hour(duration):\n",
        "    if 'h' in duration:\n",
        "        return int(duration.split(' ')[0].strip('h'))\n",
        "    else:\n",
        "        return 0\n",
        "    \n",
        "def extract_minute(duration):\n",
        "    if len(duration.split(' '))==1:\n",
        "        if 'm' in duration:\n",
        "            return int(duration.strip('m'))\n",
        "        else:\n",
        "            return 0\n",
        "    else:\n",
        "        return int(duration.split(' ')[1].strip('m'))\n",
        "    \n",
        "def Num_stops(Total_stops):\n",
        "    if Total_stops=='non-stop':\n",
        "        return 0\n",
        "    else:\n",
        "        return int(Total_stops.split(' ')[0])\n",
        "    \n",
        "def stop(route,n):\n",
        "    stops = route.split(' → ')\n",
        "    if (len(stops)>=n+2):\n",
        "        return route.split(' → ')[n]\n",
        "    else:\n",
        "        return '0'\n",
        "\n",
        "def get_booking_day(journey_date):\n",
        "    ref_date = date(2019,3,1)\n",
        "    day = journey_date-ref_date\n",
        "    return [i.days for i in day]\n",
        "\n",
        "def get_total_duration(minutes,hours):\n",
        "    hour_minutes = hours.replace({i:i*60 for i in range(0,35)})\n",
        "    total_duration = minutes+hour_minutes\n",
        "    return total_duration\n",
        "\n",
        "def price_flag(Price):\n",
        "    if Price >0 and Price <=8000:\n",
        "        return 0\n",
        "    if Price >8000 and Price <=15000:\n",
        "        return 1\n",
        "    if Price >15000:\n",
        "        return 2\n",
        "\n",
        "def my_custom_loss_func(y_true, y_pred):\n",
        "    msle = metrics.mean_squared_error(np.log(y_true+1), np.log(y_pred+1))\n",
        "    return np.sqrt(msle)\n",
        "\n",
        "\n",
        "\"\"\" External Data \"\"\"\n",
        "\n",
        "National_Holidays = ['21/03/2019','17/04/2019','19/04/2019']\n",
        "Restricted_Holidays = ['4/03/2019','13/04/2019','18/05/2019','5/06/2019']\n",
        "Holidays = National_Holidays+Restricted_Holidays\n",
        "\n",
        "\"\"\"\n",
        "Data Prepration and Preprocessing\n",
        "\"\"\"\n",
        "\n",
        "def preprocess(TrainData):\n",
        "    TrainData['Destination'].replace({'New Delhi':'Delhi'},inplace=True)\n",
        "    TrainData['Journey_date'] = TrainData['Date_of_Journey'].apply(lambda x: date(*map(int, reversed(x.split(\"/\")))))\n",
        "    TrainData['Journey_weekday'] = TrainData['Journey_date'].apply(lambda x: x.weekday())\n",
        "    TrainData['Journey_month'] = TrainData['Journey_date'].apply(lambda x: x.month)\n",
        "    TrainData['Journey_day'] = TrainData['Journey_date'].apply(lambda x: x.day)\n",
        "    TrainData['Dep_Time_hour'] = TrainData['Dep_Time'].apply(lambda x: int(x.split(\":\")[0]))\n",
        "    TrainData['Arrival_Time_hour'] = TrainData['Arrival_Time'].apply(lambda x: int(x.split(\" \")[0].split(\":\")[0]))\n",
        "    TrainData['Duration_hour'] = TrainData['Duration'].apply(lambda x: extract_hour(x))\n",
        "    TrainData['Duration_minute'] = TrainData['Duration'].apply(lambda x: extract_minute(x))\n",
        "    TrainData = TrainData[TrainData['Total_Stops']!='Not Avialable']\n",
        "    TrainData = TrainData[TrainData['Total_Stops'].isnull() == False]\n",
        "    TrainData['Num_stops'] = TrainData['Total_Stops'].apply(lambda x: Num_stops(x))\n",
        "    TrainData['Booking_day'] = get_booking_day(TrainData['Journey_date'])\n",
        "    TrainData['Total_Duration'] = get_total_duration(TrainData['Duration_minute'],TrainData['Duration_hour'])\n",
        "    TrainData['Overnight'] = np.where([len(x)>5 for x in TrainData['Arrival_Time']],1,0)\n",
        "    TrainData['Holiday'] = np.where(TrainData['Date_of_Journey'].isin(Holidays),1,0)\n",
        "    TrainData['Dep_slot'] = TrainData['Dep_Time_hour']\n",
        "    TrainData['Arrival_slot'] = TrainData['Arrival_Time_hour']\n",
        "    TrainData['SourceDestCombo'] = TrainData['Source']\n",
        "    TrainData['Additional_Info'] = TrainData['Additional_Info'].replace('No Info','No info')\n",
        "    TrainData['Weekday_Flag'] = TrainData['Journey_weekday'].apply(lambda x:1 if x in [4,6] else 0)\n",
        "    TrainData['luxury_flag'] = TrainData['Airline'].apply(lambda x:1 if x in ['Jet Airways Business'] else 0)\n",
        "\n",
        "#    One Hot Encoding  for Route \n",
        "    for i in range(1,max(TrainData['Num_stops'])+1):\n",
        "        TrainData['Stop_'+str(i)] = TrainData['Route'].apply(lambda x: stop(x,i))\n",
        "        \n",
        "#    One Hot Encoding\n",
        "    for col in ['Airline', 'SourceDestCombo', 'Additional_Info','Dep_slot','Arrival_slot','Journey_weekday']:\n",
        "        dfDummies = pd.get_dummies(TrainData[col], prefix = 'Encoded_'+col)\n",
        "        TrainData = pd.concat([TrainData, dfDummies], axis=1)\n",
        "        \n",
        "#    One Hot Encoding  for Route \n",
        "    for i in range(1,max(TrainData['Num_stops'])+1):\n",
        "        TrainData['Stop_'+str(i)] = TrainData['Route'].apply(lambda x: stop(x,i))\n",
        "        dfDummies = pd.get_dummies(TrainData['Stop_'+str(i)],prefix = 'Encoded_Stop_'+str(i),drop_first=True)\n",
        "        TrainData = pd.concat([TrainData, dfDummies], axis=1)\n",
        "\n",
        "    return TrainData\n",
        "\n",
        "\"\"\"\n",
        "##################################### Model Training ###################################################\n",
        "\"\"\"\n",
        "\n",
        "\"\"\" Loading Data \"\"\"\n",
        "TrainData = pd.read_excel('file:///C:/Users/vadaga/Desktop/machine hack/Data_Train.xlsx')\n",
        "TrainData = preprocess(TrainData)\n",
        "TestData = pd.read_excel('file:///C:/Users/vadaga/Desktop/machine hack/Test_set.xlsx')\n",
        "TestData = preprocess(TestData)\n",
        "\n",
        "\"\"\" Subsetting Data \"\"\"\n",
        "#For One Hot Encoded\n",
        "OneHotEncodedColumns = [col for col in TrainData.columns if col.startswith('Encoded_')]\n",
        "independent = OneHotEncodedColumns + ['Booking_day','Num_stops','Total_Duration','Journey_day','Journey_month','Journey_weekday','Dep_slot','Arrival_slot']\n",
        "dependent = ['Price']\n",
        "X_train = TrainData[independent]\n",
        "y_train = TrainData[dependent]\n",
        "\n",
        "\n",
        "\n",
        "\"\"\"RF\"\"\"\n",
        "from sklearn.ensemble import RandomForestRegressor\n",
        "rf = RandomForestRegressor(n_estimators = 2888,max_depth=24,max_features=0.646, random_state = 42)\n",
        "rf.fit(X_train, y_train)\n",
        "for col in [col for col in independent if col not in TestData.columns]:\n",
        "     TestData[col] = np.NAN\n",
        "y_scored = rf.predict(TestData[independent])\n",
        "\n",
        "submission = pd.DataFrame({'Price':y_scored})\n",
        "submission.to_excel('C:/Users/vadaga/Desktop/machine hack/rf_pt_1.xlsx',index=False)\n",
        "\n",
        "\n",
        "\"\"\" XGBoost \"\"\"\n",
        "est = GradientBoostingRegressor(max_depth=7,n_estimators=1000,max_features=0.55,\n",
        "                                learning_rate=0.1,loss='ls',subsample=1.0,random_state=0)\n",
        "est.fit(X_train, y_train)\n",
        "y_pred = est.predict(X_train)\n",
        "\n",
        "print('Mean Absolute Error:', metrics.mean_absolute_error(y_train, y_pred))  \n",
        "print('Mean Squared Error:', metrics.mean_squared_error(y_train, y_pred))  \n",
        "print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))\n",
        "print('Root Mean Squared logrithmic Error:', np.sqrt(metrics.mean_squared_error(np.log(y_train+1), np.log(y_pred+1))))\n",
        "\n",
        "\"\"\"\n",
        "Model Scoring\n",
        "\"\"\"\n",
        "\n",
        "for col in [col for col in independent if col not in TestData.columns]:\n",
        "     TestData[col] = 0\n",
        "y_scored = est.predict(TestData[independent])\n",
        "\n",
        "submission = pd.DataFrame({'Price':y_scored})\n",
        "submission.to_excel('C:/Users/vadaga/Desktop/machine hack/Base3_v4.xlsx',index=False)\n",
        "\n",
        "###########################################################################################################3\n",
        "\"\"\"\n",
        "LIGHTGBM\n",
        "\"\"\"\n",
        "\n",
        "#params = {'max_depth':6,'min_data_in_leaf':1,'learning_rate':0.0664,'subsample':0.585,'colsample_bytree':0.45695,'max_bin':417,'n_estimators':1083}\n",
        "params = {'num_leaves':50,'learning_rate':0.051134,'n_estimators':1150,'subsample':0.79467,'col_sample_bytree':0.44483,'min_data_in_leaf':1,'max_bin':445}\n",
        "train_data = lgb.Dataset(X_train, label=y_train, feature_name=independent)\n",
        "lgbm = lgb.train(params, train_data)\n",
        "\n",
        "\"\"\"\n",
        "Model Scoring\n",
        "\"\"\"\n",
        "for col in [col for col in independent if col not in TestData.columns]:\n",
        "     TestData[col] = np.NAN\n",
        "y_scored = lgbm.predict(TestData[independent])\n",
        "\n",
        "\n",
        "submission = pd.DataFrame({'Price':y_scored})\n",
        "submission.to_excel('C:/Users/vadaga/Desktop/machine hack/lgbm_train.xlsx',index=True)\n",
        "\n",
        "\n",
        "### Creating Models by  tuning n_estimators  by varying from its values from 20 to 80 in steps of 10\n",
        "param_test1 = {'n_estimators':np.arange(20,81,10)}\n",
        "gsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, min_samples_split=500,min_samples_leaf=50,max_depth=8,max_features='sqrt',subsample=0.8,random_state=10), \n",
        "param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)\n",
        "gsearch1.fit(Independent,Dependent)\n",
        "gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_\n",
        "\n",
        "Learning_Rate = np.array([0.1])\n",
        "max_depth = np.array([8])\n",
        "max_bin = np.array([400])\n",
        "min_data_in_leaf = np.array([1])\n",
        "feature_fraction = np.array([1])\n",
        "count = 0\n",
        "for i,j,k,l,m in product(min_data_in_leaf,Learning_Rate,max_depth,feature_fraction,max_bin):\n",
        "    params = {'feature_fraction': l, 'max_bin': m, 'max_depth': k, 'min_data_in_leaf': i,'bagging_fraction':1,'num_iterations':1000}\n",
        "    lgbm = lgb.train(params, train_data)\n",
        "    for col in [col for col in independent if col not in TestData.columns]:\n",
        "        TestData[col] = np.NAN\n",
        "    y_scored = lgbm.predict(TestData[independent])\n",
        "    submission = pd.DataFrame({'Price':y_scored})\n",
        "    file_name = 'Base'+ 'x' + str(l) + 'x' + str(m) + 'x' + str(k) + 'x' + str(i) + 'x' + str(j)\n",
        "    count = count+1\n",
        "    print(count)\n",
        "    submission.to_excel('C:/Users/vadaga/Desktop/machine hack/grid_submissions_1/' + file_name + '.xlsx',index=False)\n",
        "\n",
        "#######################################################################################################################3\n",
        "y_test = pd.read_excel('C:/Users/vadaga/Desktop/machine hack/Mean_V3.xlsx')\n",
        "\n",
        "\"\"\"CATBOOST\"\"\"\n",
        "\n",
        "def my_custom_loss_func(y_true, y_pred):\n",
        "    msle = metrics.mean_squared_error(np.log(y_true+1), np.log(y_pred+1))\n",
        "    return np.sqrt(msle)\n",
        "\n",
        "model=CatBoostRegressor(iterations=2000, depth=8, learning_rate=0.01, loss_function = 'RMSE',l2_leaf_reg = 3,border_count=5)\n",
        "model.fit(X_train, y_train,eval_set=(X_test,y_test),plot=True)\n",
        "\n",
        "for col in [col for col in independent if col not in TestData.columns]:\n",
        "     TestData[col] = np.NAN\n",
        "y_scored = model.predict(TestData[independent])\n",
        "\n",
        "submission = pd.DataFrame({'Price':y_scored})\n",
        "submission.to_excel('C:/Users/vadaga/Desktop/machine hack/Base4_V3.xlsx',index=False)\n",
        "\n",
        "\"\"\" MLPRegressor \"\"\"\n",
        "\n",
        "model = MLPRegressor(hidden_layer_sizes=(75,75,75),max_iter=1500,learning_rate_init = 0.01)\n",
        "model.fit(X_train,y_train)\n",
        "\n",
        "for col in [col for col in independent if col not in TestData.columns]:\n",
        "     TestData[col] = 0\n",
        "y_scored = model.predict(TestData[independent])\n",
        "submission = pd.DataFrame({'Price':y_scored})\n",
        "submission.to_excel('Submissions/MLPR_14.xlsx',index=False)\n",
        "\n",
        "################################################ Parameter Tunning ##################################################\n",
        "\"\"\" GridSearchCV \"\"\"\n",
        "# Grid Search for Algorithm Tuning\n",
        "from scipy.stats import uniform, randint\n",
        "from sklearn.model_selection import RandomizedSearchCV\n",
        "\n",
        "\"\"\" Custom Scorer \"\"\"\n",
        "from sklearn.metrics import make_scorer\n",
        "def my_custom_loss_func(y_true, y_pred):\n",
        "    msle = metrics.mean_squared_error(np.log(y_true+1), np.log(y_pred+1))\n",
        "    return np.sqrt(msle)\n",
        "score = make_scorer(my_custom_loss_func, greater_is_better=False)\n",
        "\n",
        "\"\"\" Hyperparam grid \"\"\"\n",
        "num_leaves = randint(10,1000)\n",
        "learning_rate = uniform(loc=0.01, scale=0.3)\n",
        "n_estimators = randint(100,2000)\n",
        "subsample = uniform(loc=0.5, scale=0.5)\n",
        "colsample_bytree = uniform(loc=0.5, scale=0.5)\n",
        "max_depth = randint(2,10)\n",
        "#feature_fraction = uniform(0,1)\n",
        "min_data_in_leaf = randint(1,50)\n",
        "max_bin = randint(20,500)\n",
        "hyperparameters = dict(num_leaves=num_leaves,learning_rate=learning_rate,n_estimators=n_estimators,subsample=subsample,\n",
        "                       colsample_bytree=colsample_bytree,max_depth = max_depth,min_data_in_leaf=min_data_in_leaf)\n",
        "\n",
        "# Create and fit a XGBoost model, testing each parameter\n",
        "model = lgb.LGBMRegressor()\n",
        "randgrid = RandomizedSearchCV(model, hyperparameters, random_state=1, n_iter=2, cv=3, n_jobs=-1,scoring=score)\n",
        "randgrid.fit(X_train, y_train)\n",
        "\n",
        "# summarize the results of the grid search\n",
        "print('Best num_leaves:', randgrid.best_estimator_.get_params()['num_leaves'])\n",
        "print('Best learning_rate:', randgrid.best_estimator_.get_params()['learning_rate'])\n",
        "print('Best n_estimators:', randgrid.best_estimator_.get_params()['n_estimators'])\n",
        "print('Best subsample:', randgrid.best_estimator_.get_params()['subsample'])\n",
        "print('Best colsample_bytree:', randgrid.best_estimator_.get_params()['colsample_bytree'])\n",
        "print('Best max_depth:', randgrid.best_estimator_.get_params()['max_depth'])\n",
        "print('Best feature_fraction:', randgrid.best_estimator_.get_params()['feature_fraction'])\n",
        "print('Best min_data_in_leaf:', randgrid.best_estimator_.get_params()['min_data_in_leaf'])\n",
        "print('Best max_bin:', randgrid.best_estimator_.get_params()['max_bin'])\n",
        "print(randgrid)\n",
        "\n",
        "def report(results, n_top=3):\n",
        "    for i in range(1, n_top + 1):\n",
        "        candidates = np.flatnonzero(results['rank_test_score'] == i)\n",
        "        for candidate in candidates:\n",
        "            print(\"Model with rank: {0}\".format(i))\n",
        "            print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n",
        "                  results['mean_test_score'][candidate],\n",
        "                  results['std_test_score'][candidate]))\n",
        "            print(\"Parameters: {0}\".format(results['params'][candidate]))\n",
        "            print(\"\")\n",
        "\n",
        "report(randgrid.cv_results_)\n",
        "\n",
        "###################################### LGBM TUNING ##########################################################\n",
        "\"\"\" RandomSearchCV \"\"\"\n",
        "from scipy.stats import uniform, randint\n",
        "from sklearn.model_selection import RandomizedSearchCV\n",
        "\n",
        "\"\"\" Custom Scorer \"\"\"\n",
        "from sklearn.metrics import make_scorer\n",
        "def my_custom_loss_func(y_true, y_pred):\n",
        "    msle = metrics.mean_squared_error(np.log(y_true+1), np.log(y_pred+1))\n",
        "    return np.sqrt(msle)\n",
        "score = make_scorer(my_custom_loss_func, greater_is_better=False)\n",
        "\n",
        "\"\"\" Hyperparam grid \"\"\"\n",
        "n_estimators = randint(800,1300)\n",
        "min_data_in_leaf = randint(1,3)\n",
        "max_bin = randint(20,600)\n",
        "feature_fraction = uniform(loc=0.01,scale=0.9)\n",
        "max_depth = randint(1,10)\n",
        "subsample = uniform(loc=0.1,scale=0.8)\n",
        "colsample_bytree = uniform(loc=0.45,scale=0.7)\n",
        "hyperparameters = dict(n_estimators=n_estimators,subsample=subsample,\n",
        "                       colsample_bytree=colsample_bytree,min_data_in_leaf=min_data_in_leaf,max_bin=max_bin,feature_fraction=feature_fraction,\n",
        "                       max_depth = max_depth\n",
        "                      )\n",
        "\n",
        "\"\"\" Create and fit the model, testing each parameter \"\"\"\n",
        "model = lgb.LGBMRegressor()\n",
        "randgrid = RandomizedSearchCV(model, hyperparameters, random_state=1, n_iter=300, cv=3, n_jobs=-1, scoring=score)\n",
        "randgrid.fit(X_train, y_train)\n",
        "\n",
        "\"\"\"\n",
        "################################## Summarize the results of the grid search #########################################\n",
        "\"\"\"\n",
        "\n",
        "print('Best num_leaves:', randgrid.best_estimator_.get_params()['num_leaves'])\n",
        "print('Best learning_rate:', randgrid.best_estimator_.get_params()['learning_rate'])\n",
        "print('Best n_estimators:', randgrid.best_estimator_.get_params()['n_estimators'])\n",
        "print('Best subsample:', randgrid.best_estimator_.get_params()['subsample'])\n",
        "print('Best colsample_bytree:', randgrid.best_estimator_.get_params()['colsample_bytree'])\n",
        "print('Best min_data_in_leaf:', randgrid.best_estimator_.get_params()['min_data_in_leaf'])\n",
        "print('Best max_bin:', randgrid.best_estimator_.get_params()['max_bin'])\n",
        "print('Best max_depth:', randgrid.best_estimator_.get_params()['max_depth'])\n",
        "print('Best feature_fraction:', randgrid.best_estimator_.get_params()['feature_fraction'])\n",
        "print(randgrid)\n",
        "\n",
        "def report(results, n_top=3):\n",
        "    for i in range(1, n_top + 1):\n",
        "        candidates = np.flatnonzero(results['rank_test_score'] == i)\n",
        "        for candidate in candidates:\n",
        "            print(\"Model with rank: {0}\".format(i))\n",
        "            print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n",
        "                  results['mean_test_score'][candidate],\n",
        "                  results['std_test_score'][candidate]))\n",
        "            print(\"Parameters: {0}\".format(results['params'][candidate]))\n",
        "            print(\"\")\n",
        "\n",
        "report(randgrid.cv_results_)\n",
        "np.save('RandomSearchCVResultsLGBMR.npy')\n",
        "\n",
        "\n",
        "###############################################################################################\n",
        "\"\"\"STACKING & ENSEMLING MODEL PREDICTIONS BASED ON CORRELATION & ACCURACY\"\"\"\n",
        "\n",
        "##CORRELATION##\n",
        "pred_1 = pd.read_excel('file:///C:/Users/vadaga/Desktop/machine hack/mean/Mean_2_V2.xlsx')\n",
        "pred_2 = pd.read_excel('file:///C:/Users/vadaga/Desktop/machine hack/mean/RF.xlsx')\n",
        "\n",
        "np.corrcoef(pred_1['Price'],pred_2['Price'])\n",
        "\n",
        "##STACKING\n",
        "import os\n",
        "files = os.listdir('C:/Users/vadaga/Desktop/machine hack/mean')\n",
        "file_names = [file.split('.')[0] for file in files]\n",
        "path = 'C:/Users/vadaga/Desktop/machine hack/mean/'\n",
        "df = pd.DataFrame()\n",
        "for i in range(len(files)):\n",
        "    file_names[i] = pd.read_excel(path + files[i])\n",
        "    df['Price'+str(i)] = file_names[i]['Price']\n",
        "df['mean'] = df.mean(axis=1)\n",
        "\n",
        "mean_price = pd.DataFrame({'Price':df['mean']})\n",
        "mean_price.to_excel('C:/Users/vadaga/Desktop/machine hack/best_stack_5.xlsx',index=False)\n",
        "############################################################################################################\n",
        "\"\"\"Weighted Stacking\"\"\"\n",
        "\n",
        "import os\n",
        "files = os.listdir('C:/Users/vadaga/Desktop/machine hack/mean')\n",
        "file_names = [file.split('.')[0] for file in files]\n",
        "path = 'C:/Users/vadaga/Desktop/machine hack/mean/'\n",
        "df = pd.DataFrame()\n",
        "w = [2,1,1]\n",
        "for i in range(len(files)):\n",
        "    file_names[i] = pd.read_excel(path + files[i])\n",
        "    df['Price'+str(i)] = file_names[i]['Price']*w[i]\n",
        "df['mean'] = df.mean(axis=1)\n",
        "mean_price = pd.DataFrame({'Price':df['mean']})\n",
        "mean_price.to_excel('C:/Users/vadaga/Desktop/machine hack/best_stack_655.xlsx',index=False)\n",
        "\n",
        "###########################################################################################################3\n"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}
