{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:10.962944Z",
     "iopub.status.busy": "2022-02-28T09:46:10.962718Z",
     "iopub.status.idle": "2022-02-28T09:46:37.220094Z",
     "shell.execute_reply": "2022-02-28T09:46:37.21934Z",
     "shell.execute_reply.started": "2022-02-28T09:46:10.9629Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting transformers\r\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/a0/11/07cea2439c82726ee05575472e5b09c3b6eeac7e32eef1a736ad3646a8f0/transformers-4.17.0-py3-none-any.whl (3.8MB)\r\n",
      "\u001b[K     |████████████████████████████████| 3.8MB 513kB/s \r\n",
      "\u001b[?25hCollecting huggingface-hub<1.0,>=0.1.0\r\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/c8/df/1b454741459f6ce75f86534bdad42ca17291b14a83066695f7d2c676e16c/huggingface_hub-0.4.0-py3-none-any.whl (67kB)\r\n",
      "\u001b[K     |████████████████████████████████| 71kB 8.4MB/s \r\n",
      "\u001b[?25hCollecting tokenizers!=0.11.3,>=0.11.1\r\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/0c/2c/e47d5d3040a3faa5633a7018594717c8460d62d173b8d06339f9d171f5cc/tokenizers-0.11.6-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (6.5MB)\r\n",
      "\u001b[K     |████████████████████████████████| 6.6MB 21.9MB/s \r\n",
      "\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /opt/conda/lib/python3.6/site-packages (from transformers) (4.36.1)\r\n",
      "Collecting sacremoses\r\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/db/8b/37b90a3848ff71c0d05ebac5ee6d83f1f81e5f57f26b99a83ebff033303b/sacremoses-0.0.49-py3-none-any.whl (895kB)\r\n",
      "\u001b[K     |████████████████████████████████| 901kB 40.4MB/s \r\n",
      "\u001b[?25hRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /opt/conda/lib/python3.6/site-packages (from transformers) (0.23)\r\n",
      "Requirement already satisfied: dataclasses; python_version < \"3.7\" in /opt/conda/lib/python3.6/site-packages (from transformers) (0.7)\r\n",
      "Requirement already satisfied: regex!=2019.12.17 in /opt/conda/lib/python3.6/site-packages (from transformers) (2019.8.19)\r\n",
      "Collecting packaging>=20.0\r\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/05/8e/8de486cbd03baba4deef4142bd643a3e7bbe954a784dc1bb17142572d127/packaging-21.3-py3-none-any.whl (40kB)\r\n",
      "\u001b[K     |████████████████████████████████| 40kB 4.4MB/s \r\n",
      "\u001b[?25hCollecting numpy>=1.17\r\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/14/32/d3fa649ad7ec0b82737b92fefd3c4dd376b0bb23730715124569f38f3a08/numpy-1.19.5-cp36-cp36m-manylinux2010_x86_64.whl (14.8MB)\r\n",
      "\u001b[K     |████████████████████████████████| 14.8MB 39.2MB/s \r\n",
      "\u001b[?25hRequirement already satisfied: filelock in /opt/conda/lib/python3.6/site-packages (from transformers) (3.0.12)\r\n",
      "Requirement already satisfied: pyyaml>=5.1 in /opt/conda/lib/python3.6/site-packages (from transformers) (5.1.2)\r\n",
      "Requirement already satisfied: requests in /opt/conda/lib/python3.6/site-packages (from transformers) (2.22.0)\r\n",
      "Collecting typing-extensions>=3.7.4.3\r\n",
      "  Downloading https://files.pythonhosted.org/packages/45/6b/44f7f8f1e110027cf88956b59f2fad776cca7e1704396d043f89effd3a0e/typing_extensions-4.1.1-py3-none-any.whl\r\n",
      "Requirement already satisfied: click in /opt/conda/lib/python3.6/site-packages (from sacremoses->transformers) (7.0)\r\n",
      "Requirement already satisfied: joblib in /opt/conda/lib/python3.6/site-packages (from sacremoses->transformers) (0.13.2)\r\n",
      "Requirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from sacremoses->transformers) (1.12.0)\r\n",
      "Requirement already satisfied: zipp>=0.5 in /opt/conda/lib/python3.6/site-packages (from importlib-metadata; python_version < \"3.8\"->transformers) (0.6.0)\r\n",
      "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.6/site-packages (from packaging>=20.0->transformers) (2.4.2)\r\n",
      "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from requests->transformers) (3.0.4)\r\n",
      "Requirement already satisfied: idna<2.9,>=2.5 in /opt/conda/lib/python3.6/site-packages (from requests->transformers) (2.8)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.6/site-packages (from requests->transformers) (2019.9.11)\r\n",
      "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.6/site-packages (from requests->transformers) (1.24.2)\r\n",
      "Requirement already satisfied: more-itertools in /opt/conda/lib/python3.6/site-packages (from zipp>=0.5->importlib-metadata; python_version < \"3.8\"->transformers) (7.2.0)\r\n",
      "\u001b[31mERROR: allennlp 0.9.0 requires flaky, which is not installed.\u001b[0m\r\n",
      "\u001b[31mERROR: allennlp 0.9.0 requires responses>=0.7, which is not installed.\u001b[0m\r\n",
      "\u001b[31mERROR: tsfresh 0.12.0 has requirement pandas<=0.23.4,>=0.20.3, but you'll have pandas 0.25.2 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: tensorflow-probability 0.8.0 has requirement cloudpickle==1.1.1, but you'll have cloudpickle 1.2.2 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: mizani 0.6.0 has requirement matplotlib>=3.1.1, but you'll have matplotlib 3.0.3 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: kmeans-smote 0.1.2 has requirement imbalanced-learn<0.5,>=0.4.0, but you'll have imbalanced-learn 0.5.0 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: kmeans-smote 0.1.2 has requirement numpy<1.16,>=1.13, but you'll have numpy 1.19.5 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: kmeans-smote 0.1.2 has requirement scikit-learn<0.21,>=0.19.0, but you'll have scikit-learn 0.21.3 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: hyperopt 0.2.1 has requirement networkx==2.2, but you'll have networkx 2.4 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: chainer 6.5.0 has requirement typing-extensions<=3.6.6, but you'll have typing-extensions 4.1.1 which is incompatible.\u001b[0m\r\n",
      "\u001b[31mERROR: allennlp 0.9.0 has requirement spacy<2.2,>=2.1.0, but you'll have spacy 2.2.1 which is incompatible.\u001b[0m\r\n",
      "Installing collected packages: typing-extensions, packaging, huggingface-hub, tokenizers, sacremoses, numpy, transformers\r\n",
      "  Found existing installation: typing-extensions 3.6.6\r\n",
      "    Uninstalling typing-extensions-3.6.6:\r\n",
      "      Successfully uninstalled typing-extensions-3.6.6\r\n",
      "  Found existing installation: packaging 19.2\r\n",
      "    Uninstalling packaging-19.2:\r\n",
      "      Successfully uninstalled packaging-19.2\r\n",
      "  Found existing installation: numpy 1.16.4\r\n",
      "    Uninstalling numpy-1.16.4:\r\n",
      "      Successfully uninstalled numpy-1.16.4\r\n",
      "Successfully installed huggingface-hub-0.4.0 numpy-1.19.5 packaging-21.3 sacremoses-0.0.49 tokenizers-0.11.6 transformers-4.17.0 typing-extensions-4.1.1\r\n"
     ]
    }
   ],
   "source": [
    "!pip install transformers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:37.223599Z",
     "iopub.status.busy": "2022-02-28T09:46:37.22292Z",
     "iopub.status.idle": "2022-02-28T09:46:38.763937Z",
     "shell.execute_reply": "2022-02-28T09:46:38.763143Z",
     "shell.execute_reply.started": "2022-02-28T09:46:37.223538Z"
    }
   },
   "outputs": [],
   "source": [
    "import transformers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:38.76557Z",
     "iopub.status.busy": "2022-02-28T09:46:38.76527Z",
     "iopub.status.idle": "2022-02-28T09:46:41.994797Z",
     "shell.execute_reply": "2022-02-28T09:46:41.993973Z",
     "shell.execute_reply.started": "2022-02-28T09:46:38.765517Z"
    }
   },
   "outputs": [],
   "source": [
    "import sys\n",
    "import glob\n",
    "import torch\n",
    "\n",
    "\n",
    "import os\n",
    "import re\n",
    "import gc\n",
    "import pickle  \n",
    "import random\n",
    "import string\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from scipy import stats\n",
    "\n",
    "# import transformers\n",
    "from transformers import DistilBertTokenizer,DistilBertModel\n",
    "import math\n",
    "\n",
    "\n",
    "from scipy.stats import spearmanr, rankdata\n",
    "from os.path import join as path_join\n",
    "from numpy.random import seed\n",
    "from urllib.parse import urlparse\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "from sklearn.model_selection import KFold\n",
    "\n",
    "seed(42)\n",
    "random.seed(42)\n",
    "\n",
    "import nltk\n",
    "from nltk.corpus import stopwords\n",
    "\n",
    "from sklearn.base import clone\n",
    "from sklearn.pipeline import Pipeline, FeatureUnion\n",
    "from sklearn.preprocessing import StandardScaler, PowerTransformer, OneHotEncoder, RobustScaler, KBinsDiscretizer, QuantileTransformer\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.compose import ColumnTransformer\n",
    "from sklearn.model_selection import StratifiedKFold, GridSearchCV, KFold, GroupKFold\n",
    "from sklearn.multioutput import MultiOutputRegressor\n",
    "from sklearn.impute import SimpleImputer\n",
    "from sklearn.metrics import make_scorer\n",
    "\n",
    "from sklearn.multiclass import OneVsRestClassifier\n",
    "from sklearn.linear_model import LinearRegression, Ridge, Lasso, HuberRegressor, RANSACRegressor\n",
    "from sklearn.svm import LinearSVR, SVR\n",
    "from sklearn.ensemble import ExtraTreesRegressor\n",
    "\n",
    "eng_stopwords = set(stopwords.words(\"english\"))\n",
    "\n",
    "import tensorflow as tf\n",
    "import tensorflow_hub as hub"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:41.997796Z",
     "iopub.status.busy": "2022-02-28T09:46:41.99732Z",
     "iopub.status.idle": "2022-02-28T09:46:42.009931Z",
     "shell.execute_reply": "2022-02-28T09:46:42.008821Z",
     "shell.execute_reply.started": "2022-02-28T09:46:41.997744Z"
    }
   },
   "outputs": [],
   "source": [
    "# settings\n",
    "data_dir = '../input/google-quest-challenge/'\n",
    "metas_dir = ''\n",
    "sub_dir = ''\n",
    "\n",
    "RANDOM_STATE = 42\n",
    "\n",
    "import datetime\n",
    "todate = datetime.date.today().strftime(\"%m%d\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.015014Z",
     "iopub.status.busy": "2022-02-28T09:46:42.014736Z",
     "iopub.status.idle": "2022-02-28T09:46:42.023129Z",
     "shell.execute_reply": "2022-02-28T09:46:42.022314Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.014964Z"
    }
   },
   "outputs": [],
   "source": [
    "# count words\n",
    "def word_count(xstring):\n",
    "    return xstring.split().str.len()\n",
    "\n",
    "\n",
    "def spearman_corr(y_true, y_pred):\n",
    "        if np.ndim(y_pred) == 2:\n",
    "            corr = np.mean([stats.spearmanr(y_true[:, i], y_pred[:, i])[0] for i in range(y_true.shape[1])])\n",
    "        else:\n",
    "            corr = stats.spearmanr(y_true, y_pred)[0]\n",
    "        return corr\n",
    "    \n",
    "custom_scorer = make_scorer(spearman_corr, greater_is_better=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.026562Z",
     "iopub.status.busy": "2022-02-28T09:46:42.026101Z",
     "iopub.status.idle": "2022-02-28T09:46:42.033688Z",
     "shell.execute_reply": "2022-02-28T09:46:42.032969Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.026511Z"
    }
   },
   "outputs": [],
   "source": [
    "def chunks(l, n):\n",
    "\n",
    "    for i in range(0, len(l), n):\n",
    "        yield l[i:i + n]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.03723Z",
     "iopub.status.busy": "2022-02-28T09:46:42.036576Z",
     "iopub.status.idle": "2022-02-28T09:46:42.048508Z",
     "shell.execute_reply": "2022-02-28T09:46:42.047662Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.036828Z"
    }
   },
   "outputs": [],
   "source": [
    "def fetch_vectors(string_list, batch_size=64):\n",
    "    # inspired by https://jalammar.github.io/a-visual-guide-to-using-bert-for-the-first-time/\n",
    "    DEVICE = torch.device(\"cuda\")\n",
    "    tokenizer = transformers.DistilBertTokenizer.from_pretrained(\"../input/distilbertbaseuncased/\")\n",
    "    model = transformers.DistilBertModel.from_pretrained(\"../input/distilbertbaseuncased/\")\n",
    "    model.to(DEVICE)\n",
    "\n",
    "    fin_features = []\n",
    "    for data in chunks(string_list, batch_size):\n",
    "        tokenized = []\n",
    "        for x in data:\n",
    "            x = \" \".join(x.strip().split()[:300])\n",
    "            tok = tokenizer.encode(x, add_special_tokens=True)\n",
    "            tokenized.append(tok[:512])\n",
    "\n",
    "        max_len = 512\n",
    "        padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized])\n",
    "        attention_mask = np.where(padded != 0, 1, 0)\n",
    "        input_ids = torch.tensor(padded).to(DEVICE)\n",
    "        attention_mask = torch.tensor(attention_mask).to(DEVICE)\n",
    "\n",
    "        with torch.no_grad():\n",
    "            last_hidden_states = model(input_ids, attention_mask=attention_mask)\n",
    "\n",
    "        features = last_hidden_states[0][:, 0, :].cpu().numpy()\n",
    "        fin_features.append(features)\n",
    "\n",
    "    fin_features = np.vstack(fin_features)\n",
    "    return fin_features"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0",
    "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a",
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.050906Z",
     "iopub.status.busy": "2022-02-28T09:46:42.050318Z",
     "iopub.status.idle": "2022-02-28T09:46:42.392758Z",
     "shell.execute_reply": "2022-02-28T09:46:42.392005Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.050597Z"
    }
   },
   "outputs": [],
   "source": [
    "# load the data\n",
    "\n",
    "xtrain = pd.read_csv(data_dir + 'train.csv')\n",
    "xtest = pd.read_csv(data_dir + 'test.csv')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.394737Z",
     "iopub.status.busy": "2022-02-28T09:46:42.394393Z",
     "iopub.status.idle": "2022-02-28T09:46:42.438144Z",
     "shell.execute_reply": "2022-02-28T09:46:42.437273Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.394689Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>qa_id</th>\n",
       "      <th>question_title</th>\n",
       "      <th>question_body</th>\n",
       "      <th>question_user_name</th>\n",
       "      <th>question_user_page</th>\n",
       "      <th>answer</th>\n",
       "      <th>answer_user_name</th>\n",
       "      <th>answer_user_page</th>\n",
       "      <th>url</th>\n",
       "      <th>category</th>\n",
       "      <th>...</th>\n",
       "      <th>question_well_written</th>\n",
       "      <th>answer_helpful</th>\n",
       "      <th>answer_level_of_information</th>\n",
       "      <th>answer_plausible</th>\n",
       "      <th>answer_relevance</th>\n",
       "      <th>answer_satisfaction</th>\n",
       "      <th>answer_type_instructions</th>\n",
       "      <th>answer_type_procedure</th>\n",
       "      <th>answer_type_reason_explanation</th>\n",
       "      <th>answer_well_written</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>What am I losing when using extension tubes in...</td>\n",
       "      <td>After playing around with macro photography on...</td>\n",
       "      <td>ysap</td>\n",
       "      <td>https://photo.stackexchange.com/users/1024</td>\n",
       "      <td>I just got extension tubes, so here's the skin...</td>\n",
       "      <td>rfusca</td>\n",
       "      <td>https://photo.stackexchange.com/users/1917</td>\n",
       "      <td>http://photo.stackexchange.com/questions/9169/...</td>\n",
       "      <td>LIFE_ARTS</td>\n",
       "      <td>...</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.666667</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.800000</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>What is the distinction between a city and a s...</td>\n",
       "      <td>I am trying to understand what kinds of places...</td>\n",
       "      <td>russellpierce</td>\n",
       "      <td>https://rpg.stackexchange.com/users/8774</td>\n",
       "      <td>It might be helpful to look into the definitio...</td>\n",
       "      <td>Erik Schmidt</td>\n",
       "      <td>https://rpg.stackexchange.com/users/1871</td>\n",
       "      <td>http://rpg.stackexchange.com/questions/47820/w...</td>\n",
       "      <td>CULTURE</td>\n",
       "      <td>...</td>\n",
       "      <td>0.888889</td>\n",
       "      <td>0.888889</td>\n",
       "      <td>0.555556</td>\n",
       "      <td>0.888889</td>\n",
       "      <td>0.888889</td>\n",
       "      <td>0.666667</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.666667</td>\n",
       "      <td>0.888889</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>Maximum protusion length for through-hole comp...</td>\n",
       "      <td>I'm working on a PCB that has through-hole com...</td>\n",
       "      <td>Joe Baker</td>\n",
       "      <td>https://electronics.stackexchange.com/users/10157</td>\n",
       "      <td>Do you even need grooves?  We make several pro...</td>\n",
       "      <td>Dwayne Reid</td>\n",
       "      <td>https://electronics.stackexchange.com/users/64754</td>\n",
       "      <td>http://electronics.stackexchange.com/questions...</td>\n",
       "      <td>SCIENCE</td>\n",
       "      <td>...</td>\n",
       "      <td>0.777778</td>\n",
       "      <td>0.777778</td>\n",
       "      <td>0.555556</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.666667</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.333333</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.888889</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>Can an affidavit be used in Beit Din?</td>\n",
       "      <td>An affidavit, from what i understand, is basic...</td>\n",
       "      <td>Scimonster</td>\n",
       "      <td>https://judaism.stackexchange.com/users/5151</td>\n",
       "      <td>Sending an \"affidavit\" it is a dispute between...</td>\n",
       "      <td>Y     e     z</td>\n",
       "      <td>https://judaism.stackexchange.com/users/4794</td>\n",
       "      <td>http://judaism.stackexchange.com/questions/551...</td>\n",
       "      <td>CULTURE</td>\n",
       "      <td>...</td>\n",
       "      <td>0.888889</td>\n",
       "      <td>0.833333</td>\n",
       "      <td>0.333333</td>\n",
       "      <td>0.833333</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.800000</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>4 rows × 41 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "   qa_id                                     question_title  \\\n",
       "0      0  What am I losing when using extension tubes in...   \n",
       "1      1  What is the distinction between a city and a s...   \n",
       "2      2  Maximum protusion length for through-hole comp...   \n",
       "3      3              Can an affidavit be used in Beit Din?   \n",
       "\n",
       "                                       question_body question_user_name  \\\n",
       "0  After playing around with macro photography on...               ysap   \n",
       "1  I am trying to understand what kinds of places...      russellpierce   \n",
       "2  I'm working on a PCB that has through-hole com...          Joe Baker   \n",
       "3  An affidavit, from what i understand, is basic...         Scimonster   \n",
       "\n",
       "                                  question_user_page  \\\n",
       "0         https://photo.stackexchange.com/users/1024   \n",
       "1           https://rpg.stackexchange.com/users/8774   \n",
       "2  https://electronics.stackexchange.com/users/10157   \n",
       "3       https://judaism.stackexchange.com/users/5151   \n",
       "\n",
       "                                              answer answer_user_name  \\\n",
       "0  I just got extension tubes, so here's the skin...           rfusca   \n",
       "1  It might be helpful to look into the definitio...     Erik Schmidt   \n",
       "2  Do you even need grooves?  We make several pro...      Dwayne Reid   \n",
       "3  Sending an \"affidavit\" it is a dispute between...    Y     e     z   \n",
       "\n",
       "                                    answer_user_page  \\\n",
       "0         https://photo.stackexchange.com/users/1917   \n",
       "1           https://rpg.stackexchange.com/users/1871   \n",
       "2  https://electronics.stackexchange.com/users/64754   \n",
       "3       https://judaism.stackexchange.com/users/4794   \n",
       "\n",
       "                                                 url   category  ...  \\\n",
       "0  http://photo.stackexchange.com/questions/9169/...  LIFE_ARTS  ...   \n",
       "1  http://rpg.stackexchange.com/questions/47820/w...    CULTURE  ...   \n",
       "2  http://electronics.stackexchange.com/questions...    SCIENCE  ...   \n",
       "3  http://judaism.stackexchange.com/questions/551...    CULTURE  ...   \n",
       "\n",
       "  question_well_written  answer_helpful  answer_level_of_information  \\\n",
       "0              1.000000        1.000000                     0.666667   \n",
       "1              0.888889        0.888889                     0.555556   \n",
       "2              0.777778        0.777778                     0.555556   \n",
       "3              0.888889        0.833333                     0.333333   \n",
       "\n",
       "   answer_plausible  answer_relevance  answer_satisfaction  \\\n",
       "0          1.000000          1.000000             0.800000   \n",
       "1          0.888889          0.888889             0.666667   \n",
       "2          1.000000          1.000000             0.666667   \n",
       "3          0.833333          1.000000             0.800000   \n",
       "\n",
       "   answer_type_instructions  answer_type_procedure  \\\n",
       "0                       1.0               0.000000   \n",
       "1                       0.0               0.000000   \n",
       "2                       0.0               0.333333   \n",
       "3                       0.0               0.000000   \n",
       "\n",
       "   answer_type_reason_explanation  answer_well_written  \n",
       "0                        0.000000             1.000000  \n",
       "1                        0.666667             0.888889  \n",
       "2                        1.000000             0.888889  \n",
       "3                        1.000000             1.000000  \n",
       "\n",
       "[4 rows x 41 columns]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "xtrain.head(4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.439842Z",
     "iopub.status.busy": "2022-02-28T09:46:42.43941Z",
     "iopub.status.idle": "2022-02-28T09:46:42.446681Z",
     "shell.execute_reply": "2022-02-28T09:46:42.445996Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.439653Z"
    }
   },
   "outputs": [],
   "source": [
    "target_cols = ['question_asker_intent_understanding', 'question_body_critical', \n",
    "               'question_conversational', 'question_expect_short_answer', \n",
    "               'question_fact_seeking', 'question_has_commonly_accepted_answer', \n",
    "               'question_interestingness_others', 'question_interestingness_self', \n",
    "               'question_multi_intent', 'question_not_really_a_question', \n",
    "               'question_opinion_seeking', 'question_type_choice', \n",
    "               'question_type_compare', 'question_type_consequence', \n",
    "               'question_type_definition', 'question_type_entity', \n",
    "               'question_type_instructions', 'question_type_procedure', \n",
    "               'question_type_reason_explanation', 'question_type_spelling', \n",
    "               'question_well_written', 'answer_helpful', \n",
    "               'answer_level_of_information', 'answer_plausible', \n",
    "               'answer_relevance', 'answer_satisfaction', \n",
    "               'answer_type_instructions', 'answer_type_procedure', \n",
    "               'answer_type_reason_explanation', 'answer_well_written']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# EDA / FE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Basic FE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.449154Z",
     "iopub.status.busy": "2022-02-28T09:46:42.448916Z",
     "iopub.status.idle": "2022-02-28T09:46:42.78907Z",
     "shell.execute_reply": "2022-02-28T09:46:42.788337Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.449113Z"
    }
   },
   "outputs": [],
   "source": [
    "# word count in title, body and answer\n",
    "for colname in ['question_title', 'question_body', 'answer']:\n",
    "    newname = colname + '_word_len'\n",
    "    \n",
    "    xtrain[newname] = xtrain[colname].str.split().str.len()\n",
    "    xtest[newname] = xtest[colname].str.split().str.len()\n",
    "\n",
    "    \n",
    "del newname, colname"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:42.791126Z",
     "iopub.status.busy": "2022-02-28T09:46:42.790687Z",
     "iopub.status.idle": "2022-02-28T09:46:43.018235Z",
     "shell.execute_reply": "2022-02-28T09:46:43.017566Z",
     "shell.execute_reply.started": "2022-02-28T09:46:42.790947Z"
    }
   },
   "outputs": [],
   "source": [
    "for colname in ['question', 'answer']:\n",
    "\n",
    "    # check for nonames, i.e. users with logins like user12389\n",
    "    xtrain['is_'+colname+'_no_name_user'] = xtrain[colname +'_user_name'].str.contains('^user\\d+$') + 0\n",
    "    xtest['is_'+colname+'_no_name_user'] = xtest[colname +'_user_name'].str.contains('^user\\d+$') + 0\n",
    "    \n",
    "\n",
    "colname = 'answer'\n",
    "# check lexical diversity (unique words count vs total )\n",
    "xtrain[colname+'_div'] = xtrain[colname].apply(lambda s: len(set(s.split())) / len(s.split()) )\n",
    "xtest[colname+'_div'] = xtest[colname].apply(lambda s: len(set(s.split())) / len(s.split()) )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:43.019867Z",
     "iopub.status.busy": "2022-02-28T09:46:43.019598Z",
     "iopub.status.idle": "2022-02-28T09:46:43.078576Z",
     "shell.execute_reply": "2022-02-28T09:46:43.077928Z",
     "shell.execute_reply.started": "2022-02-28T09:46:43.019822Z"
    }
   },
   "outputs": [],
   "source": [
    "## domain components\n",
    "for df in [xtrain, xtest]:\n",
    "    \n",
    "    df['domcom'] = df['question_user_page'].apply(lambda s: s.split('://')[1].split('/')[0].split('.'))\n",
    "    # count components\n",
    "    df['dom_cnt'] = df['domcom'].apply(lambda s: len(s))\n",
    "    # pad the length in case some domains have fewer components in the name\n",
    "    df['domcom'] = df['domcom'].apply(lambda s: s + ['none', 'none'])\n",
    "\n",
    "    # components\n",
    "    for ii in range(0,4):\n",
    "        df['dom_'+str(ii)] = df['domcom'].apply(lambda s: s[ii])\n",
    "    \n",
    "# clean up\n",
    "xtrain.drop('domcom', axis = 1, inplace = True)\n",
    "xtest.drop('domcom', axis = 1, inplace = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:43.081733Z",
     "iopub.status.busy": "2022-02-28T09:46:43.08122Z",
     "iopub.status.idle": "2022-02-28T09:46:45.309873Z",
     "shell.execute_reply": "2022-02-28T09:46:45.307485Z",
     "shell.execute_reply.started": "2022-02-28T09:46:43.081682Z"
    }
   },
   "outputs": [],
   "source": [
    "# shared elements\n",
    "for df in [xtrain, xtest]:\n",
    "    df['q_words'] = df['question_body'].apply(lambda s: [f for f in s.split() if f not in eng_stopwords] )\n",
    "    df['a_words'] = df['answer'].apply(lambda s: [f for f in s.split() if f not in eng_stopwords] )\n",
    "    df['qa_word_overlap'] = df.apply(lambda s: len(np.intersect1d(s['q_words'], s['a_words'])), axis = 1)\n",
    "    df['qa_word_overlap_norm1'] = df.apply(lambda s: s['qa_word_overlap']/(1 + len(s['a_words'])), axis = 1)\n",
    "    df['qa_word_overlap_norm2'] = df.apply(lambda s: s['qa_word_overlap']/(1 + len(s['q_words'])), axis = 1)\n",
    "    df.drop(['q_words', 'a_words'], axis = 1, inplace = True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:45.311829Z",
     "iopub.status.busy": "2022-02-28T09:46:45.311368Z",
     "iopub.status.idle": "2022-02-28T09:46:47.051826Z",
     "shell.execute_reply": "2022-02-28T09:46:47.051122Z",
     "shell.execute_reply.started": "2022-02-28T09:46:45.311772Z"
    }
   },
   "outputs": [],
   "source": [
    "for df in [xtrain, xtest]:\n",
    "    \n",
    "    ## Number of characters in the text ##\n",
    "    df[\"question_title_num_chars\"] = df[\"question_title\"].apply(lambda x: len(str(x)))\n",
    "    df[\"question_body_num_chars\"] = df[\"question_body\"].apply(lambda x: len(str(x)))\n",
    "    df[\"answer_num_chars\"] = df[\"answer\"].apply(lambda x: len(str(x)))\n",
    "\n",
    "    ## Number of stopwords in the text ##\n",
    "    df[\"question_title_num_stopwords\"] = df[\"question_title\"].apply(lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords]))\n",
    "    df[\"question_body_num_stopwords\"] = df[\"question_body\"].apply(lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords]))\n",
    "    df[\"answer_num_stopwords\"] = df[\"answer\"].apply(lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords]))\n",
    "\n",
    "    ## Number of punctuations in the text ##\n",
    "    df[\"question_title_num_punctuations\"] =df['question_title'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]) )\n",
    "    df[\"question_body_num_punctuations\"] =df['question_body'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]) )\n",
    "    df[\"answer_num_punctuations\"] =df['answer'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]) )\n",
    "\n",
    "    ## Number of title case words in the text ##\n",
    "    df[\"question_title_num_words_upper\"] = df[\"question_title\"].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))\n",
    "    df[\"question_body_num_words_upper\"] = df[\"question_body\"].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))\n",
    "    df[\"answer_num_words_upper\"] = df[\"answer\"].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## FE - distance-based "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:46:47.053427Z",
     "iopub.status.busy": "2022-02-28T09:46:47.053126Z",
     "iopub.status.idle": "2022-02-28T09:47:25.118136Z",
     "shell.execute_reply": "2022-02-28T09:47:25.117249Z",
     "shell.execute_reply.started": "2022-02-28T09:46:47.053382Z"
    }
   },
   "outputs": [],
   "source": [
    "module_url = \"../input/universalsentenceencoderlarge4/\"\n",
    "embed = hub.load(module_url)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:47:25.122172Z",
     "iopub.status.busy": "2022-02-28T09:47:25.121887Z",
     "iopub.status.idle": "2022-02-28T09:50:02.047052Z",
     "shell.execute_reply": "2022-02-28T09:50:02.046297Z",
     "shell.execute_reply.started": "2022-02-28T09:47:25.122121Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "question_title\n",
      "question_body\n",
      "answer\n"
     ]
    }
   ],
   "source": [
    "embeddings_train = {}\n",
    "embeddings_test = {}\n",
    "for text in ['question_title', 'question_body', 'answer']:\n",
    "    train_text = xtrain[text].str.replace('?', '.').str.replace('!', '.').tolist()\n",
    "    test_text = xtest[text].str.replace('?', '.').str.replace('!', '.').tolist()\n",
    "    \n",
    "    curr_train_emb = []\n",
    "    curr_test_emb = []\n",
    "    batch_size = 4\n",
    "    ind = 0\n",
    "    while ind*batch_size < len(train_text):\n",
    "        curr_train_emb.append(embed(train_text[ind*batch_size: (ind + 1)*batch_size])[\"outputs\"].numpy())\n",
    "        ind += 1\n",
    "        \n",
    "    ind = 0\n",
    "    while ind*batch_size < len(test_text):\n",
    "        curr_test_emb.append(embed(test_text[ind*batch_size: (ind + 1)*batch_size])[\"outputs\"].numpy())\n",
    "        ind += 1    \n",
    "        \n",
    "    embeddings_train[text + '_embedding'] = np.vstack(curr_train_emb)\n",
    "    embeddings_test[text + '_embedding'] = np.vstack(curr_test_emb)\n",
    "\n",
    "    print(text)\n",
    "    \n",
    "del embed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:02.04975Z",
     "iopub.status.busy": "2022-02-28T09:50:02.049495Z",
     "iopub.status.idle": "2022-02-28T09:50:02.224014Z",
     "shell.execute_reply": "2022-02-28T09:50:02.223259Z",
     "shell.execute_reply.started": "2022-02-28T09:50:02.049707Z"
    }
   },
   "outputs": [],
   "source": [
    "l2_dist = lambda x, y: np.power(x - y, 2).sum(axis=1)\n",
    "\n",
    "cos_dist = lambda x, y: (x*y).sum(axis=1)\n",
    "\n",
    "dist_features_train = np.array([\n",
    "    l2_dist(embeddings_train['question_title_embedding'], embeddings_train['answer_embedding']),\n",
    "    l2_dist(embeddings_train['question_body_embedding'], embeddings_train['answer_embedding']),\n",
    "    l2_dist(embeddings_train['question_body_embedding'], embeddings_train['question_title_embedding']),\n",
    "    cos_dist(embeddings_train['question_title_embedding'], embeddings_train['answer_embedding']),\n",
    "    cos_dist(embeddings_train['question_body_embedding'], embeddings_train['answer_embedding']),\n",
    "    cos_dist(embeddings_train['question_body_embedding'], embeddings_train['question_title_embedding'])\n",
    "]).T\n",
    "\n",
    "dist_features_test = np.array([\n",
    "    l2_dist(embeddings_test['question_title_embedding'], embeddings_test['answer_embedding']),\n",
    "    l2_dist(embeddings_test['question_body_embedding'], embeddings_test['answer_embedding']),\n",
    "    l2_dist(embeddings_test['question_body_embedding'], embeddings_test['question_title_embedding']),\n",
    "    cos_dist(embeddings_test['question_title_embedding'], embeddings_test['answer_embedding']),\n",
    "    cos_dist(embeddings_test['question_body_embedding'], embeddings_test['answer_embedding']),\n",
    "    cos_dist(embeddings_test['question_body_embedding'], embeddings_test['question_title_embedding'])\n",
    "]).T\n",
    "\n",
    "del embeddings_train, embeddings_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:02.2258Z",
     "iopub.status.busy": "2022-02-28T09:50:02.225511Z",
     "iopub.status.idle": "2022-02-28T09:50:02.241737Z",
     "shell.execute_reply": "2022-02-28T09:50:02.24092Z",
     "shell.execute_reply.started": "2022-02-28T09:50:02.225753Z"
    }
   },
   "outputs": [],
   "source": [
    "for ii in range(0,6):\n",
    "    xtrain['dist'+str(ii)] = dist_features_train[:,ii]\n",
    "    xtest['dist'+str(ii)] = dist_features_test[:,ii]\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Pipeline buildup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:02.243761Z",
     "iopub.status.busy": "2022-02-28T09:50:02.243256Z",
     "iopub.status.idle": "2022-02-28T09:50:02.248688Z",
     "shell.execute_reply": "2022-02-28T09:50:02.247709Z",
     "shell.execute_reply.started": "2022-02-28T09:50:02.24357Z"
    }
   },
   "outputs": [],
   "source": [
    "limit_char = 5000\n",
    "limit_word = 25000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:02.250889Z",
     "iopub.status.busy": "2022-02-28T09:50:02.25043Z",
     "iopub.status.idle": "2022-02-28T09:50:02.274119Z",
     "shell.execute_reply": "2022-02-28T09:50:02.273159Z",
     "shell.execute_reply.started": "2022-02-28T09:50:02.250712Z"
    }
   },
   "outputs": [],
   "source": [
    "title_col = 'question_title'\n",
    "title_transformer = Pipeline([\n",
    "    ('tfidf', TfidfVectorizer(lowercase = False, max_df = 0.3, min_df = 1,\n",
    "                             binary = False, use_idf = True, smooth_idf = False,\n",
    "                             ngram_range = (1,2), stop_words = 'english', \n",
    "                             token_pattern = '(?u)\\\\b\\\\w+\\\\b' , max_features = limit_word ))\n",
    "])\n",
    "\n",
    "        \n",
    "title_transformer2 = Pipeline([\n",
    " ('tfidf2',  TfidfVectorizer( sublinear_tf=True,\n",
    "    strip_accents='unicode', analyzer='char',\n",
    "    stop_words='english', ngram_range=(1, 4), max_features= limit_char))   \n",
    "])\n",
    "\n",
    "\n",
    "body_col = 'question_body'\n",
    "body_transformer = Pipeline([\n",
    "    ('tfidf',TfidfVectorizer(lowercase = False, max_df = 0.3, min_df = 1,\n",
    "                             binary = False, use_idf = True, smooth_idf = False,\n",
    "                             ngram_range = (1,2), stop_words = 'english', \n",
    "                             token_pattern = '(?u)\\\\b\\\\w+\\\\b' , max_features = limit_word ))\n",
    "])\n",
    "\n",
    "\n",
    "body_transformer2 = Pipeline([\n",
    " ('tfidf2',  TfidfVectorizer( sublinear_tf=True,\n",
    "    strip_accents='unicode', analyzer='char',\n",
    "    stop_words='english', ngram_range=(1, 4), max_features= limit_char))   \n",
    "])\n",
    "\n",
    "answer_col = 'answer'\n",
    "\n",
    "answer_transformer = Pipeline([\n",
    "    ('tfidf', TfidfVectorizer(lowercase = False, max_df = 0.3, min_df = 1,\n",
    "                             binary = False, use_idf = True, smooth_idf = False,\n",
    "                             ngram_range = (1,2), stop_words = 'english', \n",
    "                             token_pattern = '(?u)\\\\b\\\\w+\\\\b' , max_features = limit_word ))\n",
    "])\n",
    "\n",
    "answer_transformer2 = Pipeline([\n",
    " ('tfidf2',  TfidfVectorizer( sublinear_tf=True,\n",
    "    strip_accents='unicode', analyzer='char',\n",
    "    stop_words='english', ngram_range=(1, 4), max_features= limit_char))   \n",
    "])\n",
    "\n",
    "num_cols = [\n",
    "    'question_title_word_len', 'question_body_word_len', 'answer_word_len', 'answer_div',\n",
    "    'question_title_num_chars','question_body_num_chars','answer_num_chars',\n",
    "    'question_title_num_stopwords','question_body_num_stopwords','answer_num_stopwords',\n",
    "    'question_title_num_punctuations','question_body_num_punctuations','answer_num_punctuations',\n",
    "    'question_title_num_words_upper','question_body_num_words_upper','answer_num_words_upper',\n",
    "    'dist0', 'dist1', 'dist2', 'dist3', 'dist4',       'dist5'\n",
    "]\n",
    "\n",
    "num_transformer = Pipeline([\n",
    "    ('impute', SimpleImputer(strategy='constant', fill_value=0)),\n",
    "    ('scale', PowerTransformer(method='yeo-johnson'))\n",
    "])\n",
    "\n",
    "\n",
    "cat_cols = [\n",
    "    'dom_0', \n",
    "    'dom_1', \n",
    "    'dom_2', \n",
    "    'dom_3',     \n",
    "    'category', \n",
    "    'is_question_no_name_user',\n",
    "    'is_answer_no_name_user',\n",
    "    'dom_cnt'\n",
    "]\n",
    "\n",
    "cat_transformer = Pipeline([\n",
    "    ('impute', SimpleImputer(strategy='constant', fill_value='')),\n",
    "    ('encode', OneHotEncoder(handle_unknown='ignore'))\n",
    "])\n",
    "\n",
    "\n",
    "preprocessor = ColumnTransformer(\n",
    "    transformers = [\n",
    "        ('title', title_transformer, title_col),\n",
    "        ('title2', title_transformer2, title_col),\n",
    "        ('body', body_transformer, body_col),\n",
    "        ('body2', body_transformer2, body_col),\n",
    "        ('answer', answer_transformer, answer_col),\n",
    "        ('answer2', answer_transformer2, answer_col),\n",
    "        ('num', num_transformer, num_cols),\n",
    "        ('cat', cat_transformer, cat_cols)\n",
    "    ]\n",
    ")\n",
    "\n",
    "pipeline = Pipeline([\n",
    "    ('preprocessor', preprocessor),\n",
    "    ('estimator',Ridge(random_state=RANDOM_STATE))\n",
    "])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Find best parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:02.276525Z",
     "iopub.status.busy": "2022-02-28T09:50:02.275872Z",
     "iopub.status.idle": "2022-02-28T09:50:02.294947Z",
     "shell.execute_reply": "2022-02-28T09:50:02.294392Z",
     "shell.execute_reply.started": "2022-02-28T09:50:02.276169Z"
    }
   },
   "outputs": [],
   "source": [
    "# prep\n",
    "id_train = xtrain['qa_id']\n",
    "ytrain = xtrain[target_cols]\n",
    "xtrain.drop(target_cols + ['qa_id'], axis = 1, inplace = True)\n",
    "\n",
    "id_test = xtest['qa_id'] \n",
    "xtest.drop('qa_id', axis = 1, inplace = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:02.298152Z",
     "iopub.status.busy": "2022-02-28T09:50:02.2979Z",
     "iopub.status.idle": "2022-02-28T09:50:02.30809Z",
     "shell.execute_reply": "2022-02-28T09:50:02.307338Z",
     "shell.execute_reply.started": "2022-02-28T09:50:02.298106Z"
    }
   },
   "outputs": [],
   "source": [
    "dropcols = ['question_user_name', 'question_user_page',\n",
    " 'answer_user_name', 'answer_user_page','url','host']\n",
    "\n",
    "xtrain.drop(dropcols, axis = 1, inplace = True)\n",
    "xtest.drop(dropcols, axis = 1, inplace = True)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Folds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:30.263192Z",
     "iopub.status.busy": "2022-02-28T09:50:30.262882Z",
     "iopub.status.idle": "2022-02-28T09:50:30.27023Z",
     "shell.execute_reply": "2022-02-28T09:50:30.26947Z",
     "shell.execute_reply.started": "2022-02-28T09:50:30.263142Z"
    }
   },
   "outputs": [],
   "source": [
    "nfolds = 5\n",
    "mvalid = np.zeros((xtrain.shape[0], len(target_cols)))\n",
    "mfull = np.zeros((xtest.shape[0], len(target_cols)))\n",
    "\n",
    "kf = GroupKFold(n_splits= nfolds).split(X=xtrain.question_body, groups=xtrain.question_body)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-02-28T09:50:47.634777Z",
     "iopub.status.busy": "2022-02-28T09:50:47.634475Z",
     "iopub.status.idle": "2022-02-28T09:53:15.18917Z",
     "shell.execute_reply": "2022-02-28T09:53:15.18763Z",
     "shell.execute_reply.started": "2022-02-28T09:50:47.634726Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---\n",
      "---\n",
      "---\n",
      "---\n",
      "---\n"
     ]
    }
   ],
   "source": [
    " \n",
    "for ind, (train_index, test_index) in enumerate(kf):\n",
    "    \n",
    "\n",
    "    # split\n",
    "    x0, x1 = xtrain.loc[train_index], xtrain.loc[test_index]\n",
    "    y0, y1 = ytrain.loc[train_index], ytrain.loc[test_index]\n",
    "\n",
    "    for ii in range(0, ytrain.shape[1]):\n",
    "\n",
    "        # fit model\n",
    "        be = clone(pipeline)\n",
    "#        be.steps[1][1].alpha = vector_as.loc[ii]\n",
    "        be.fit(x0, np.array(y0)[:,ii])\n",
    "\n",
    "        filename = 'ridge_f' + str(ind) + '_c' + str(ii) + '.pkl'\n",
    "        pickle.dump(be, open(filename, 'wb'))\n",
    "        \n",
    "        # park forecast\n",
    "        mvalid[test_index, ii] = be.predict(x1)\n",
    "        mfull[:,ii] += be.predict(xtest)/nfolds\n",
    "        \n",
    "    print('---')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Performance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.3041240056179672\n"
     ]
    }
   ],
   "source": [
    "corvec = np.zeros((ytrain.shape[1],1))\n",
    "for ii in range(0, ytrain.shape[1]):\n",
    "    mvalid[:,ii] = rankdata(mvalid[:,ii])/mvalid.shape[0]\n",
    "    mfull[:,ii] = rankdata(mfull[:,ii])/mfull.shape[0]\n",
    "    \n",
    "    corvec[ii] = stats.spearmanr(ytrain[ytrain.columns[ii]], mvalid[:,ii])[0]\n",
    "    \n",
    "print(corvec.mean())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Submission"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "prval = pd.DataFrame(mvalid)\n",
    "prval.columns = ytrain.columns\n",
    "prval['qa_id'] = id_train\n",
    "prval = prval[['qa_id'] + list(prval.columns[:-1])]\n",
    "prval.to_csv(metas_dir + 'prval_ridge_'+todate+ '.csv', index = False)\n",
    "\n",
    "\n",
    "prfull = pd.DataFrame(mfull)\n",
    "prfull.columns = ytrain.columns\n",
    "prfull['qa_id'] = id_test\n",
    "prfull = prfull[['qa_id'] + list(prfull.columns[:-1])]\n",
    "prfull.to_csv(metas_dir + 'prfull_ridge_'+todate+ '.csv', index = False)\n",
    "\n",
    "prfull.to_csv(sub_dir + 'submission.csv', index = False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
