{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import sklearn.metrics\n",
    "#load the training data\n",
    "a = pd.read_csv('/Users/akhileshsk/Dropbox/Kaggle_competitions/Quora competition/train.csv',delimiter = ',')\n",
    "a = a.dropna()\n",
    "q1 = a['question1'].values\n",
    "q2 = a['question2'].values\n",
    "labels = a['is_duplicate'].values\n",
    "#load embeddings and dictionary\n",
    "final_embeddings = np.load(\"final_embeddings.npy\")\n",
    "dictionary = np.load('dictionary.npy').item()\n",
    "\n",
    "def get_word(word):\n",
    "    if word not in dictionary:\n",
    "        return 'UNK'\n",
    "    else:\n",
    "        return word\n",
    "specialstr = '?:!/;\\|~`%1234567890.,%&$()_{}[]^\"'   \n",
    "#checki if words are in dictionary first\n",
    "question1_data = []\n",
    "question2_data = []\n",
    "count = 0\n",
    "for j in range(len(q1)):\n",
    "    if j%50000==0:\n",
    "        print(j/(len(q1)))\n",
    "    auto_data = []\n",
    "    line = q1[j]    \n",
    "    temp_words = ''.join( c for c in line if  c not in specialstr ).split()\n",
    "    #adding the leaves\n",
    "    #print(count)\n",
    "    for i in temp_words:\n",
    "        auto_data.append(final_embeddings[dictionary[get_word(i.lower())]])\n",
    "    auto_data = np.array(auto_data)\n",
    "    question1_data.append(auto_data)\n",
    "    count+=1\n",
    "    \n",
    "question1_data =  np.array(question1_data)\n",
    "\n",
    "for j in range(len(q2)):\n",
    "    if j%50000==0:\n",
    "        print(j/(len(q1)))\n",
    "    auto_data = []\n",
    "    line = q2[j]    \n",
    "    temp_words = ''.join( c for c in line if  c not in specialstr ).split()\n",
    "    #adding the leaves\n",
    "    for i in temp_words:\n",
    "        auto_data.append(final_embeddings[dictionary[get_word(i.lower())]])\n",
    "    #parent_q2.append([auto_data,j])\n",
    "    auto_data = np.array(auto_data)\n",
    "    question2_data.append(auto_data)\n",
    "    \n",
    "question2_data =  np.array(question2_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pos = []\n",
    "for i in range(len(question1_data)):\n",
    "    if question1_data[i].shape[0]==0 or question2_data[i].shape[0]==0 or question1_data[i].shape[0]==1 or question2_data[i].shape[0]==1 or question1_data[i].shape[0]==2 or question2_data[i].shape[0]==2:\n",
    "        pos.append(i)\n",
    "    question1_data[i] = np.round(question1_data[i],4)\n",
    "    question2_data[i] = np.round(question2_data[i],4)\n",
    "labels = np.delete(labels,pos)\n",
    "question1_data = np.delete(question1_data,pos)\n",
    "question2_data = np.delete(question2_data,pos)\n",
    "\n",
    "lengths1 = [i.shape[0] for i in question1_data]\n",
    "lengths12 = [i.shape[0] for i in question2_data]\n",
    "\n",
    "q1_f = np.ones((len(question1_data),3,128),np.float32)\n",
    "q1_f_cos =np.ones((len(question1_data),3,128),np.float32) \n",
    "q1_f_plane = np.ones((len(question1_data),5,128),np.float32)\n",
    "for i in range(question1_data.shape[0]):\n",
    "    f1 = np.sum(question1_data[i],0)\n",
    "    f2 = np.sum(question1_data[i][:-1],0)\n",
    "    f3 = question1_data[i][-1]\n",
    "    f4 = np.sum(question1_data[i][1:],0)\n",
    "    f5 = question1_data[i][0]\n",
    "    q1_f[i] = [f1,f2,f3]\n",
    "    q1_f_cos[i] = [f1-f2,f2-f3,f3-f1]\n",
    "    q1_f_plane[i] = [f1,f2,f3,f4,f5]\n",
    "\n",
    "q2_f = np.ones((len(question2_data),3,128),np.float32)\n",
    "q2_f_cos =np.ones((len(question1_data),3,128),np.float32)\n",
    "q2_f_plane = np.ones((len(question1_data),5,128),np.float32)\n",
    "for i in range(question1_data.shape[0]):\n",
    "    f1 = np.sum(question2_data[i],0)\n",
    "    f2 = np.sum(question2_data[i][:-1],0)\n",
    "    f3 = question2_data[i][-1]\n",
    "    f4 = np.sum(question2_data[i][1:],0)\n",
    "    f5 = question2_data[i][0]\n",
    "    q2_f[i] = [f1,f2,f3]\n",
    "    q2_f_cos[i] = [f1-f2,f2-f3,f3-f1]\n",
    "    q2_f_plane[i] = [f1,f2,f3,f4,f5]\n",
    "    \n",
    "dist = np.ones((len(q1_f),25),np.float32)\n",
    "dist_cos = np.ones((len(q1_f),9),np.float32)\n",
    "for i in range(len(q1_f_plane)):\n",
    "    dist[i] = sklearn.metrics.pairwise.pairwise_distances(q1_f_plane[i],q2_f_plane[i]).reshape(25)\n",
    "    dist_cos[i] = sklearn.metrics.pairwise.cosine_distances(q1_f_cos[i],q2_f_cos[i]).reshape(9)\n",
    "feature_mat = np.hstack((dist,dist_cos))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#training\n",
    "import xgboost as xgb\n",
    "from sklearn.model_selection import train_test_split\n",
    "X = feature_mat\n",
    "y = labels\n",
    "train_x, valid_x, train_y, valid_y = train_test_split(X, y, test_size=0.33, random_state=53)\n",
    "dtrain = xgb.DMatrix(train_x,label = train_y)\n",
    "dvalid = xgb.DMatrix(valid_x,label = valid_y)\n",
    "\n",
    "#logloss\n",
    "from sklearn.metrics import log_loss\n",
    "dtrain = xgb.DMatrix(train_x,label = train_y)\n",
    "dvalid = xgb.DMatrix(valid_x,label = valid_y)\n",
    "param = {'max_depth':25, 'eta':0.3, 'silent':0, 'objective':'binary:logistic','subsample':0.5,'gamma':0 }\n",
    "param['nthread'] = 4\n",
    "param['eval_metric'] = 'auc'\n",
    "#param['scale_pos_weight'] = ratio\n",
    "num_round = 50\n",
    "evallist  = [(dvalid,'eval'),(dtrain,'train')]\n",
    "bst = xgb.train( param, dtrain, num_round, evallist)\n",
    "loss = log_loss(valid_y,bst.predict(dvalid))\n",
    "print('log loss '+str(loss))\n",
    "#save model\n",
    "#bst.save_model('05_07_1AM.model')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#test cases\n",
    "#putting together the test matrix\n",
    "b = pd.read_csv('/Users/akhileshsk/Dropbox/Kaggle_competitions/Quora competition/test.csv',delimiter = ',')\n",
    "b.ix[b['question1'].isnull(),['question1','question2']] = 'random empty question'\n",
    "b.ix[b['question2'].isnull(),['question1','question2']] = 'random empty question'\n",
    "q1 = b['question1'].values\n",
    "q2 = b['question2'].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "del question1_data\n",
    "del question2_data\n",
    "#check if words are in dictionary first\n",
    "question1_data = []\n",
    "question2_data = []\n",
    "count = 0\n",
    "for j in range(len(q1)):\n",
    "    if j%100000==0:\n",
    "        print(j/(len(q1)))\n",
    "    auto_data = []\n",
    "    line = q1[j]    \n",
    "    temp_words = ''.join( c for c in line if  c not in specialstr ).split()\n",
    "    #adding the leaves\n",
    "    #print(count)\n",
    "    for i in temp_words:\n",
    "        auto_data.append(final_embeddings[dictionary[get_word(i.lower())]])\n",
    "    auto_data = np.array(auto_data)\n",
    "    question1_data.append(auto_data)\n",
    "    count+=1\n",
    "    \n",
    "question1_data =  np.array(question1_data)\n",
    "\n",
    "for j in range(len(q2)):\n",
    "    if j%100000==0:\n",
    "        print(j/(len(q1)))\n",
    "    auto_data = []\n",
    "    line = q2[j]    \n",
    "    temp_words = ''.join( c for c in line if  c not in specialstr ).split()\n",
    "    #adding the leaves\n",
    "    for i in temp_words:\n",
    "        auto_data.append(final_embeddings[dictionary[get_word(i.lower())]])\n",
    "    #parent_q2.append([auto_data,j])\n",
    "    auto_data = np.array(auto_data)\n",
    "    question2_data.append(auto_data)\n",
    "    \n",
    "question2_data =  np.array(question2_data)\n",
    "\n",
    "pos = []\n",
    "for i in range(len(question1_data)):\n",
    "    if question1_data[i].shape[0]==0 or question2_data[i].shape[0]==0 or question1_data[i].shape[0]==1 or question2_data[i].shape[0]==1 or question1_data[i].shape[0]==2 or question2_data[i].shape[0]==2:\n",
    "        pos.append(i)\n",
    "question1_data = np.delete(question1_data,pos)\n",
    "question2_data = np.delete(question2_data,pos)\n",
    "\n",
    "del valid_y\n",
    "\n",
    "#q1_f = np.ones((len(question1_data),3,128),np.float32)\n",
    "q1_f_cos =np.ones((len(question1_data),3,128),np.float32) \n",
    "q1_f_plane = np.ones((len(question1_data),5,128),np.float32)\n",
    "for i in range(question1_data.shape[0]):\n",
    "    if i%100000==0:\n",
    "        print(i)\n",
    "    f1 = np.sum(question1_data[i],0)\n",
    "    f2 = np.sum(question1_data[i][:-1],0)\n",
    "    f3 = question1_data[i][-1]\n",
    "    f4 = np.sum(question1_data[i][1:],0)\n",
    "    f5 = question1_data[i][0]\n",
    "    #q1_f[i] = [f1,f2,f3]\n",
    "    q1_f_cos[i] = [f1-f2,f2-f3,f3-f1]\n",
    "    q1_f_plane[i] = [f1,f2,f3,f4,f5]\n",
    "\n",
    "#q2_f = np.ones((len(question2_data),3,128),np.float32)\n",
    "q2_f_cos =np.ones((len(question1_data),3,128),np.float32)\n",
    "q2_f_plane = np.ones((len(question1_data),5,128),np.float32)\n",
    "for i in range(question1_data.shape[0]):\n",
    "    if i%100000==0:\n",
    "        print(i)\n",
    "    f1 = np.sum(question2_data[i],0)\n",
    "    f2 = np.sum(question2_data[i][:-1],0)\n",
    "    f3 = question2_data[i][-1]\n",
    "    f4 = np.sum(question2_data[i][1:],0)\n",
    "    f5 = question2_data[i][0]\n",
    "    #q2_f[i] = [f1,f2,f3]\n",
    "    q2_f_cos[i] = [f1-f2,f2-f3,f3-f1]\n",
    "    q2_f_plane[i] = [f1,f2,f3,f4,f5]\n",
    "    \n",
    "import sklearn.metrics\n",
    "dist = np.ones((len(q1_f_plane),25),np.float32)\n",
    "dist_cos = np.ones((len(q1_f_plane),9),np.float32)\n",
    "for i in range(len(q1_f_plane)):\n",
    "    dist[i] = sklearn.metrics.pairwise.pairwise_distances(q1_f_plane[i],q2_f_plane[i]).reshape(25)\n",
    "    dist_cos[i] = sklearn.metrics.pairwise.cosine_distances(q1_f_cos[i],q2_f_cos[i]).reshape(9)\n",
    "feature_mat = np.hstack((dist,dist_cos))\n",
    "\n",
    "position = pos\n",
    "dtest = xgb.DMatrix(feature_mat)\n",
    "results = bst.predict(dtest)\n",
    "diff_inds = np.setdiff1d(np.arange(b.shape[0]),pos)\n",
    "\n",
    "result2 = np.zeros(b.shape[0],np.float32)\n",
    "result2[diff_inds] = results\n",
    "\n",
    "#%% create a submission\n",
    "\n",
    "submissionName = 'word_2vec_vectors_xgb'\n",
    "\n",
    "submission = pd.DataFrame()\n",
    "submission['test_id'] = b['test_id']\n",
    "submission['is_duplicate'] = result2\n",
    "submission.to_csv(submissionName + '.csv', index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
