{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Libraries and functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.preprocessing import MinMaxScaler, StandardScaler\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from colored import fore, back, style, fg, bg, attr\n",
    "\n",
    "pd.set_option(\"display.max_rows\",    20000)\n",
    "pd.set_option(\"display.max_columns\", 20000)\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "file_name3 = 'SS046'\n",
    "file_name   = file_name3 # ref file SSFeb07c.ipynb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"printd\" ready                                                                                                                 \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "demarc = '='*127\n",
    "seed = 42\n",
    "def printd(string):\n",
    "    '''\n",
    "    print the passed string with demarcation lines and slight coloration, above and below,\n",
    "    to improve the presentation and look of the output.\n",
    "    '''\n",
    "    blanks = len(demarc) - len(string)\n",
    "    color = fg('#02314a') + bg('#edffe3') + attr('bold')\n",
    "    res = attr('reset')\n",
    "    print (color +\n",
    "       (demarc+'\\n' + string + ' '*blanks + '\\n'+demarc)\n",
    "       + res)\n",
    "############################################   END OF FUNCTION   ##################################\n",
    "printd('\"printd\" ready')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"feature_generator\" ready                                                                                                      \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "def feature_generator(df): # ABBREVIATED VERSION\n",
    "    '''\n",
    "    Feature generator + - /    \n",
    "    '''\n",
    "    col_list = list(df.columns)\n",
    "    n_pre = len(col_list)\n",
    "    \n",
    "    # Log and 1/log\n",
    "    for p in col_list:\n",
    "        if df[p].isin([0]).sum(axis=0) == 0:\n",
    "            try:\n",
    "                feature_name        = 'log('     +   p  + ')'  # log of the column\n",
    "                df[feature_name]    = np.log(df[p])\n",
    "            except:\n",
    "                pass\n",
    "        \n",
    "    # Sqrt and 1/sqrt\n",
    "    for p in col_list:\n",
    "        try:\n",
    "            feature_name            = 'sqrt('     +   p  + ')'  # sqrt of the column\n",
    "            df[feature_name]        = np.sqrt(df[p])\n",
    "        except:\n",
    "            pass\n",
    "     \n",
    "    # Additive, Divisive\n",
    "    col_list = list(df.columns)\n",
    "    for p in col_list:\n",
    "        idx = col_list.index(p)\n",
    "        if df[p].isin([0]).sum(axis=0) == 0:\n",
    "            for q in range(idx+1, len(col_list)):\n",
    "                if df[col_list[q]].isin([0]).sum(axis=0) == 0:           \n",
    "                    feature_name_1      = p            +    '+'   +  col_list[q] # plus\n",
    "                    feature_name_2      = p            +    '-'   +  col_list[q] # plus\n",
    "                    feature_name_3      = col_list[q]  +    '*'   +  p           # minus 2\n",
    "                    feature_name_4      = p            +    '/'   +  col_list[q] # divide 1\n",
    "\n",
    "                    df[feature_name_1]  = df[p]             +        df[col_list[q]]\n",
    "                    df[feature_name_2]  = df[p]             -        df[col_list[q]]\n",
    "                    df[feature_name_3]  = df[col_list[q]]   *        df[p]           \n",
    "                    df[feature_name_4]  = df[p]             /        df[col_list[q]]\n",
    "    \n",
    "    df = df.dropna(axis=1)\n",
    "    \n",
    "    print('Number of columns before feature generation: ',n_pre)\n",
    "    print('Number of columns after  feature generation: ',df.shape[1])\n",
    "    return df\n",
    "######################################################################################################\n",
    "printd('\"feature_generator\" ready')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"PolynomialFeatures_labeled\" ready                                                                                             \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "def PolynomialFeatures_labeled(input_df,power):\n",
    "    '''\n",
    "    Inputs:\n",
    "    input_df = Your labeled pandas dataframe (list of x's not raised to any power) \n",
    "    power = what order polynomial you want variables up to. (use the same power as you want entered into pp.PolynomialFeatures(power) directly)\n",
    "\n",
    "    Ouput:\n",
    "    Output: This function relies on the powers_ matrix which is one of the preprocessing function's outputs to create logical labels and \n",
    "    outputs a labeled pandas dataframe   \n",
    "    '''\n",
    "    from sklearn.preprocessing import PolynomialFeatures\n",
    "    poly = PolynomialFeatures(power)\n",
    "    output_nparray = poly.fit_transform(input_df)\n",
    "    powers_nparray = poly.powers_\n",
    "\n",
    "    input_feature_names = list(input_df.columns)\n",
    "    target_feature_names = [\"Constant Term\"] # keep for now\n",
    "    for feature_distillation in powers_nparray[1:]:\n",
    "        intermediary_label = \"\"\n",
    "        final_label = \"\"\n",
    "        for i in range(len(input_feature_names)):\n",
    "            if feature_distillation[i] == 0:\n",
    "                continue\n",
    "            else:\n",
    "                variable = input_feature_names[i]\n",
    "                power = feature_distillation[i]\n",
    "                if power>1:\n",
    "                    intermediary_label = \"%s^%d\" % (variable,power)\n",
    "                if power==1:\n",
    "                    intermediary_label = \"%s\" % (variable)    \n",
    "                if final_label == \"\":\n",
    "                    final_label = intermediary_label\n",
    "                else:\n",
    "                    final_label = final_label + \" x \" + intermediary_label\n",
    "        target_feature_names.append(final_label)\n",
    "    output_df = pd.DataFrame(output_nparray, columns = target_feature_names)\n",
    "    output_df = output_df.drop([\"Constant Term\"], axis = 1)\n",
    "    return output_df\n",
    "\n",
    "printd('\"PolynomialFeatures_labeled\" ready')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"secret_sauce\" ready                                                                                                           \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "def secret_sauce(preds):\n",
    "    '''\n",
    "    find fuzzy matches and update the predictions\n",
    "    '''\n",
    "    \n",
    "    temp = preds.copy()\n",
    "    submission = pd.DataFrame(temp) \n",
    "    train = pd.read_csv(\"Train.csv\").drop_duplicates()\n",
    "    hack_data = pd.read_csv(\"Test.csv\")\n",
    "    \n",
    "    train['time']     = train['time']/1000\n",
    "    hack_data['time'] = hack_data['time']/1000\n",
    "\n",
    "    indexed_train = train.reset_index().round(0)\n",
    "    indexed_hack = hack_data.reset_index().round(0)\n",
    "    merge_on = list(hack_data.columns)\n",
    "    common = pd.merge(indexed_train,indexed_hack, how='inner', on = merge_on)\n",
    "    print(f'Found {len(common)} fuzzy matches. Applying corrections now.')\n",
    "\n",
    "    my_df = common.drop_duplicates()[['index_x', 'index_y', 'popularity']]\n",
    "    my_df\n",
    "    my_dict = dict()\n",
    "\n",
    "    for idx in range(my_df.shape[0]):\n",
    "        key = my_df.iloc[idx,1]\n",
    "        value = [my_df.iloc[idx,0], my_df.iloc[idx,2]]\n",
    "        if value[1]>2:\n",
    "            value[1] = value[1]-1\n",
    "        my_dict[key] = value\n",
    "    \n",
    "    for i in range(len(submission)):\n",
    "        try:\n",
    "            hot = my_dict[i][1]\n",
    "            submission.iloc[i,0] = 0\n",
    "            submission.iloc[i,1] = 0\n",
    "            submission.iloc[i,2] = 0\n",
    "            submission.iloc[i,3] = 0\n",
    "            submission.iloc[i,4] = 0\n",
    "            submission.iloc[i,hot] = 1\n",
    "        except:\n",
    "            pass\n",
    "    return submission\n",
    "printd('\"secret_sauce\" ready')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"prediction_merger\" ready                                                                                                      \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "def prediction_merger(pred_list):\n",
    "    '''\n",
    "    pass a list of predictions, get a merged one back.\n",
    "    '''\n",
    "    total_files = len(pred_list)  \n",
    "    combo_df = pd.DataFrame()\n",
    "    for i, p in enumerate(pred_list,1):\n",
    "        data = p.copy()\n",
    "        for c in [0,1,2,3,4]:\n",
    "            combo_df['df' + str(i)+ '_class_'+str(c)] = data[c]\n",
    "    del data\n",
    "    printd(str(total_files)+' predictions merged and normalized.')\n",
    "    #################################\n",
    "    cols = [0,1,2,3,4]\n",
    "    merged = pd.DataFrame(columns = cols)\n",
    "    for merged_col in cols:\n",
    "        col_names  = []\n",
    "        for file in range(total_files):\n",
    "            col_number = (file*5 + merged_col)\n",
    "            col_names.append(combo_df.columns[col_number])\n",
    "        merged[merged_col] = combo_df[col_names].median(axis = 1)\n",
    "    normalized = merged.div(merged.sum(axis=1), axis=0)   \n",
    "    return normalized\n",
    "printd('\"prediction_merger\" ready')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"one_hot_encoder\" ready.                                                                                                       \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "def one_hot_encoder(X, hack, basic = True, dropcol = False):\n",
    "    '''\n",
    "    perform one hot encoding using either basic or adjusted schemes.\n",
    "    inputs feature sets for training and test data.\n",
    "    outputs encoded train and test.\n",
    "    '''\n",
    "    basic_replace_dict    = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E',\n",
    "                             5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J',\n",
    "                            10:'K', 11:'L'}\n",
    "    adjusted_replace_dict = {0: 'A',\n",
    "                             1: 'Z',\n",
    "                             2: 'B',\n",
    "                             3: 'C',\n",
    "                             4: 'Z',\n",
    "                             5: 'E',\n",
    "                             6: 'F',\n",
    "                             7: 'Z',\n",
    "                             8: 'Z',\n",
    "                             9: 'J',\n",
    "                             10:'Z',\n",
    "                             11:'Z'}\n",
    "   \n",
    "    nature     = 'Adjusted'\n",
    "    rd         = adjusted_replace_dict\n",
    "    if basic:\n",
    "        rd     = basic_replace_dict\n",
    "        nature = 'Basic'\n",
    "        \n",
    "    # THE DATA\n",
    "    X_rep                  = X.copy()\n",
    "    hack_rep               = hack.copy()\n",
    "        \n",
    "    # THE DROP   \n",
    "    if dropcol:\n",
    "        X_rep              = X_rep.drop('Category_1', axis = 1)\n",
    "        hack_rep           = hack_rep.drop('Category_1', axis = 1)\n",
    "        printd('Column 1 dropped.')\n",
    "        print('X shape:    ',X_rep.shape)\n",
    "        print('hack shape: ',hack_rep.shape)\n",
    "        return X_rep, hack_rep \n",
    "        \n",
    "    # THE ENCODING\n",
    "    oneHotCols             = ['Category_1']\n",
    "    X_rep['Category_1']    = X_rep['Category_1'].replace(rd)\n",
    "    hack_rep['Category_1'] = hack_rep['Category_1'].replace(rd)\n",
    "    X_onehot               =pd.get_dummies(X_rep, columns=oneHotCols)\n",
    "    hack_onehot            =pd.get_dummies(hack_rep, columns=oneHotCols)\n",
    "    printd(nature+' one hot encoding completed.')\n",
    "    print('X_onehot shape:    ',X_onehot.shape)\n",
    "    print('hack_onehot shape: ',hack_onehot.shape)\n",
    "    \n",
    "          \n",
    "    if np.sum(X_onehot.columns != hack_onehot.columns) == 0:\n",
    "        print('All columns match')\n",
    "    else:\n",
    "        print('Some mismatch, please investigate')\n",
    "    return X_onehot, hack_onehot\n",
    "printd('\"one_hot_encoder\" ready.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# default model_params for all sets - change per set later\n",
    "n_est = 5000\n",
    "m_dep = 36"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "\"model_it_rf\" ready.                                                                                                           \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "parameters = dict({ 'criterion':'gini',\n",
    "                    'min_samples_split':2,\n",
    "                    'min_samples_leaf':1,\n",
    "                    'min_weight_fraction_leaf':0.0,\n",
    "                    'max_features':'auto',\n",
    "                    'max_leaf_nodes':None,\n",
    "                    'min_impurity_decrease':0.0,\n",
    "                    'min_impurity_split':None,\n",
    "                    'bootstrap':True,\n",
    "                    'warm_start':False,\n",
    "                    'class_weight':None,\n",
    "                    'ccp_alpha':0.0,\n",
    "                    'max_samples':None })\n",
    "\n",
    "\n",
    "def model_it_rf(features, target, hack_features, params = parameters):\n",
    "    '''\n",
    "    standardized Random Forest Model, fitted on features and target.\n",
    "    returns predictions dataframe.\n",
    "    '''\n",
    "    printd(f'Fitting Random Forest with {n_est} trees, with maximum depth of {m_dep}.')\n",
    "    model = RandomForestClassifier(n_estimators  = n_est,\n",
    "                                    max_depth    = m_dep, \n",
    "                                    n_jobs       = -1,\n",
    "                                    random_state = seed,\n",
    "                                    verbose      =0, \n",
    "                                    # params henceforth\n",
    "                                    min_samples_split   =params['min_samples_split'],\n",
    "                                    min_samples_leaf=params['min_samples_leaf'],\n",
    "                                    min_weight_fraction_leaf=params['min_weight_fraction_leaf'],\n",
    "                                    max_features=params['max_features'],\n",
    "                                    max_leaf_nodes=params['max_leaf_nodes'],\n",
    "                                    min_impurity_decrease=params['min_impurity_decrease'],\n",
    "                                    min_impurity_split=params['min_impurity_split'],\n",
    "                                    bootstrap=params['bootstrap'],\n",
    "                                    warm_start=params['warm_start'],\n",
    "                                    class_weight=params['class_weight'],\n",
    "                                    ccp_alpha=params['ccp_alpha'],\n",
    "                                    max_samples=params['max_samples'])    \n",
    "    model.fit(features, target)\n",
    "    print('fitted, now predicting')\n",
    "    preds = model.predict_proba(hack_features)\n",
    "    print('done')\n",
    "    del model\n",
    "    return pd.DataFrame(preds)\n",
    "printd('\"model_it_rf\" ready.')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Reading and Scaling Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "train  = pd.read_csv('train.csv').drop_duplicates()\n",
    "hack_initial   = pd.read_csv('test.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_initial = train.drop(['popularity'], axis = 1).reset_index(drop=True)\n",
    "y_initial = train['popularity'].reset_index(drop=True)\n",
    "del train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 (15285, 11)\n"
     ]
    }
   ],
   "source": [
    "scaler             = StandardScaler()\n",
    "\n",
    "cols_to_scale      = list(X_initial.columns)\n",
    "cols_to_scale.remove('Category_1')\n",
    "cols_to_scale.remove('Category_2')\n",
    "\n",
    "X_sc               = scaler.fit_transform(X_initial[cols_to_scale])\n",
    "X_scaled           = pd.DataFrame(X_sc, columns = cols_to_scale)\n",
    "X_scaled['Category_1'] = X_initial['Category_1']\n",
    "X_scaled['Category_2'] = X_initial['Category_2']\n",
    "\n",
    "hd_                = scaler.transform(hack_initial[cols_to_scale])\n",
    "hack_scaled        = pd.DataFrame(hd_, columns = cols_to_scale)\n",
    "hack_scaled['Category_1'] = hack_initial['Category_1']\n",
    "hack_scaled['Category_2'] = hack_initial['Category_2']\n",
    "\n",
    "print(np.sum(X_scaled.columns!=hack_scaled.columns), X_scaled.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SET 1\n",
    "\n",
    "**Basic RF with basic one-hot encoding**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_est = 10000\n",
    "m_dep = None  \n",
    "params = parameters.copy()\n",
    "params['class_weight'] = 'balanced_subsample'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Basic one hot encoding completed.                                                                                              \n",
      "===============================================================================================================================\n",
      "X_onehot shape:     (15285, 22)\n",
      "hack_onehot shape:  (12140, 22)\n",
      "All columns match\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol=False)\n",
    "preds_df_set1a = model_it_rf(X_onehot, y_initial, hack_onehot, params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Column 1 dropped.                                                                                                              \n",
      "===============================================================================================================================\n",
      "X shape:     (15285, 10)\n",
      "hack shape:  (12140, 10)\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol=True)\n",
    "preds_df_set1b = model_it_rf(X_onehot, y_initial, hack_onehot, params)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SET 2\n",
    "\n",
    "**Basic RF with adjusted one-hot encoding**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_est = 10000\n",
    "m_dep = None\n",
    "params = parameters.copy()\n",
    "params['criterion']    = 'entropy'\n",
    "params['max_features'] = 6\n",
    "params['class_weight'] = 'balanced_subsample'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Adjusted one hot encoding completed.                                                                                           \n",
      "===============================================================================================================================\n",
      "X_onehot shape:     (15285, 17)\n",
      "hack_onehot shape:  (12140, 17)\n",
      "All columns match\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = False, dropcol=False)\n",
    "preds_df_set2a = model_it_rf(X_onehot, y_initial, hack_onehot, params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Column 1 dropped.                                                                                                              \n",
      "===============================================================================================================================\n",
      "X shape:     (15285, 10)\n",
      "hack shape:  (12140, 10)\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = False, dropcol=True)\n",
    "preds_df_set2b = model_it_rf(X_onehot, y_initial, hack_onehot, params)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SET 3\n",
    "\n",
    "**Basic RF with basic one-hot encoding with polynomial features**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_est = 15000\n",
    "m_dep = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Basic one hot encoding completed.                                                                                              \n",
      "===============================================================================================================================\n",
      "X_onehot shape:     (15285, 22)\n",
      "hack_onehot shape:  (12140, 22)\n",
      "All columns match\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 15000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol=False)\n",
    "X_polys = PolynomialFeatures_labeled(X_onehot,2)\n",
    "hack_polys  = PolynomialFeatures_labeled(hack_onehot,2)\n",
    "preds_df_set3a = model_it_rf(X_polys, y_initial, hack_polys, params = parameters)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Column 1 dropped.                                                                                                              \n",
      "===============================================================================================================================\n",
      "X shape:     (15285, 10)\n",
      "hack shape:  (12140, 10)\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 15000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol=True)\n",
    "X_polys = PolynomialFeatures_labeled(X_onehot,2)\n",
    "hack_polys  = PolynomialFeatures_labeled(hack_onehot,2)\n",
    "preds_df_set3b = model_it_rf(X_polys, y_initial, hack_polys, params = parameters)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SET 4\n",
    "\n",
    "**Basic RF with basic one-hot encoding with extra features**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_est = 10000\n",
    "m_dep = 36\n",
    "params = parameters.copy()\n",
    "params['criterion']    = 'entropy'\n",
    "params['max_features'] = 5\n",
    "params['class_weight'] = 'balanced_subsample'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Basic one hot encoding completed.                                                                                              \n",
      "===============================================================================================================================\n",
      "X_onehot shape:     (15285, 22)\n",
      "hack_onehot shape:  (12140, 22)\n",
      "All columns match\n",
      "Number of columns before feature generation:  22\n",
      "Number of columns after  feature generation:  179\n",
      "Number of columns before feature generation:  22\n",
      "Number of columns after  feature generation:  179\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of 36.                                                              \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol=False)\n",
    "X_features = feature_generator(X_onehot)\n",
    "del X_onehot\n",
    "hack_features = feature_generator(hack_onehot)\n",
    "del hack_onehot\n",
    "preds_df_set4a = model_it_rf(X_features, y_initial, hack_features, params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Column 1 dropped.                                                                                                              \n",
      "===============================================================================================================================\n",
      "X shape:     (15285, 10)\n",
      "hack shape:  (12140, 10)\n",
      "Number of columns before feature generation:  10\n",
      "Number of columns after  feature generation:  155\n",
      "Number of columns before feature generation:  10\n",
      "Number of columns after  feature generation:  155\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of 36.                                                              \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol=True)\n",
    "X_features = feature_generator(X_onehot)\n",
    "del X_onehot\n",
    "hack_features = feature_generator(hack_onehot)\n",
    "del hack_onehot\n",
    "preds_df_set4b = model_it_rf(X_features, y_initial, hack_features, params)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SET 5\n",
    "\n",
    "**Basic RF with basic one-hot encoding with binning**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_binned                = X_initial.copy()\n",
    "hack_binned             = hack_initial.copy()\n",
    "\n",
    "X_binned['time']        = (X_binned['time']/10).round(0)\n",
    "hack_binned['time']     = (hack_binned['time']/10).round(0)\n",
    "\n",
    "X_binned['Score_1']     = ((X_binned['Score_1'])*5000).round(0)\n",
    "hack_binned['Score_1']  = ((hack_binned['Score_1'])*5000).round(0)\n",
    "\n",
    "X_binned['Score_2']     = ((X_binned['Score_2'])*500).round(0)\n",
    "hack_binned['Score_2']  = ((hack_binned['Score_2'])*500).round(0)\n",
    "\n",
    "X_binned['Score_3']     = ((X_binned['Score_3'])*7000).round(0)\n",
    "hack_binned['Score_3']  = ((hack_binned['Score_3'])*7000).round(0)\n",
    "\n",
    "X_binned['Score_4']     = ((X_binned['Score_4'])*4).round(0)\n",
    "hack_binned['Score_4']  = ((hack_binned['Score_4'])*4).round(0)\n",
    "\n",
    "X_binned['Store_Presence'] = (X_binned['Store_Presence']*50000).round(0)\n",
    "hack_binned['Store_Presence'] = (hack_binned['Store_Presence']*50000).round(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "scaler             = StandardScaler()\n",
    "\n",
    "cols_to_scale      = list(X_initial.columns) # same columns\n",
    "cols_to_scale.remove('Category_1')\n",
    "cols_to_scale.remove('Category_2')\n",
    "\n",
    "X_sc               = scaler.fit_transform(X_binned[cols_to_scale])\n",
    "X_scaled           = pd.DataFrame(X_sc, columns = cols_to_scale)\n",
    "X_scaled['Category_1'] = X_initial['Category_1']\n",
    "X_scaled['Category_2'] = X_initial['Category_2']\n",
    "\n",
    "hd_                = scaler.transform(hack_binned[cols_to_scale])\n",
    "hack_scaled        = pd.DataFrame(hd_, columns = cols_to_scale)\n",
    "hack_scaled['Category_1'] = hack_initial['Category_1']\n",
    "hack_scaled['Category_2'] = hack_initial['Category_2']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_est = 10000\n",
    "m_dep = None\n",
    "params = parameters.copy()\n",
    "params['class_weight'] = 'balanced_subsample'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Basic one hot encoding completed.                                                                                              \n",
      "===============================================================================================================================\n",
      "X_onehot shape:     (15285, 22)\n",
      "hack_onehot shape:  (12140, 22)\n",
      "All columns match\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol = False)\n",
    "preds_df_set5a = model_it_rf(X_onehot, y_initial, hack_onehot, params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Column 1 dropped.                                                                                                              \n",
      "===============================================================================================================================\n",
      "X shape:     (15285, 10)\n",
      "hack shape:  (12140, 10)\n",
      "===============================================================================================================================\n",
      "Fitting Random Forest with 10000 trees, with maximum depth of None.                                                            \n",
      "===============================================================================================================================\n",
      "fitted, now predicting\n",
      "done\n"
     ]
    }
   ],
   "source": [
    "X_onehot, hack_onehot = one_hot_encoder(X_scaled, hack_scaled, basic = True, dropcol = True)\n",
    "preds_df_set5b = model_it_rf(X_onehot, y_initial, hack_onehot, params)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# MERGING THE PREDICTIONS"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "10 predictions merged and normalized.                                                                                          \n",
      "===============================================================================================================================\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "      <th>1</th>\n",
       "      <th>2</th>\n",
       "      <th>3</th>\n",
       "      <th>4</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.028154</td>\n",
       "      <td>0.751813</td>\n",
       "      <td>0.193829</td>\n",
       "      <td>0.026204</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.007129</td>\n",
       "      <td>0.028015</td>\n",
       "      <td>0.957827</td>\n",
       "      <td>0.007029</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.002304</td>\n",
       "      <td>0.028447</td>\n",
       "      <td>0.958331</td>\n",
       "      <td>0.010918</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.003658</td>\n",
       "      <td>0.047605</td>\n",
       "      <td>0.924250</td>\n",
       "      <td>0.024487</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.001733</td>\n",
       "      <td>0.996283</td>\n",
       "      <td>0.001983</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "     0         1         2         3         4\n",
       "0  0.0  0.028154  0.751813  0.193829  0.026204\n",
       "1  0.0  0.007129  0.028015  0.957827  0.007029\n",
       "2  0.0  0.002304  0.028447  0.958331  0.010918\n",
       "3  0.0  0.003658  0.047605  0.924250  0.024487\n",
       "4  0.0  0.000000  0.001733  0.996283  0.001983"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "prediction_set = [preds_df_set1a, \n",
    "                  preds_df_set1b,\n",
    "                  preds_df_set2a,\n",
    "                  preds_df_set2b,\n",
    "                  preds_df_set3a,\n",
    "                  preds_df_set3b,\n",
    "                  preds_df_set4a,\n",
    "                  preds_df_set4b,\n",
    "                  preds_df_set5a,\n",
    "                  preds_df_set5b]\n",
    "\n",
    "merged_preds   = prediction_merger(prediction_set)\n",
    "\n",
    "display(merged_preds.head())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 3525 fuzzy matches. Applying corrections now.\n"
     ]
    }
   ],
   "source": [
    "submission = secret_sauce(merged_preds)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# READY TO WRITE THE SUBMISSION FILE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===============================================================================================================================\n",
      "Ready to write SS046.csv                                                                                                       \n",
      "===============================================================================================================================\n"
     ]
    }
   ],
   "source": [
    "printd(f'Ready to write {file_name}.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SS046 generated.\n"
     ]
    }
   ],
   "source": [
    "submission.to_csv(file_name+'.csv',index=False)\n",
    "print(file_name, 'generated.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "# - END - "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "349.062px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
