{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import pickle\n",
    "import warnings\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pandarallel import pandarallel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_feature = pd.read_pickle('./user_data/data/offline/recall.pkl')\n",
    "df_click = pd.read_pickle('./user_data/data/offline/click.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def func_if_sum(x):\n",
    "    user_id = x['user_id']\n",
    "    article_id = x['article_id']\n",
    "\n",
    "    interacted_items = user_item_dict[user_id]\n",
    "    interacted_items = interacted_items[::-1]\n",
    "\n",
    "    sim_sum = 0\n",
    "    for loc, i in enumerate(interacted_items):\n",
    "        try:\n",
    "            sim_sum += item_sim[i][article_id] * (0.7**loc)\n",
    "        except Exception as e:\n",
    "            pass\n",
    "    return sim_sum\n",
    "\n",
    "\n",
    "def func_if_last(x):\n",
    "    user_id = x['user_id']\n",
    "    article_id = x['article_id']\n",
    "\n",
    "    last_item = user_item_dict[user_id][-1]\n",
    "\n",
    "    sim = 0\n",
    "    try:\n",
    "        sim = item_sim[last_item][article_id]\n",
    "    except Exception as e:\n",
    "        pass\n",
    "    return sim\n",
    "\n",
    "\n",
    "def func_binetwork_sim_last(x):\n",
    "    user_id = x['user_id']\n",
    "    article_id = x['article_id']\n",
    "\n",
    "    last_item = user_item_dict[user_id][-1]\n",
    "\n",
    "    sim = 0\n",
    "    try:\n",
    "        sim = binetwork_sim[last_item][article_id]\n",
    "    except Exception as e:\n",
    "        pass\n",
    "    return sim\n",
    "\n",
    "\n",
    "def consine_distance(vector1, vector2):\n",
    "    if type(vector1) != np.ndarray or type(vector2) != np.ndarray:\n",
    "        return -1\n",
    "    distance = np.dot(vector1, vector2) / \\\n",
    "        (np.linalg.norm(vector1)*(np.linalg.norm(vector2)))\n",
    "    return distance\n",
    "\n",
    "\n",
    "def func_w2w_sum(x, num):\n",
    "    user_id = x['user_id']\n",
    "    article_id = x['article_id']\n",
    "\n",
    "    interacted_items = user_item_dict[user_id]\n",
    "    interacted_items = interacted_items[::-1][:num]\n",
    "\n",
    "    sim_sum = 0\n",
    "    for loc, i in enumerate(interacted_items):\n",
    "        try:\n",
    "            sim_sum += consine_distance(article_vec_map[article_id],\n",
    "                                        article_vec_map[i])\n",
    "        except Exception as e:\n",
    "            pass\n",
    "    return sim_sum\n",
    "\n",
    "\n",
    "def func_w2w_last_sim(x):\n",
    "    user_id = x['user_id']\n",
    "    article_id = x['article_id']\n",
    "\n",
    "    last_item = user_item_dict[user_id][-1]\n",
    "\n",
    "    sim = 0\n",
    "    try:\n",
    "        sim = consine_distance(article_vec_map[article_id],\n",
    "                               article_vec_map[last_item])\n",
    "    except Exception as e:\n",
    "        pass\n",
    "    return sim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_article = pd.read_csv('./tcdata/articles.csv')\n",
    "df_article['created_at_ts'] = df_article['created_at_ts'] / 1000\n",
    "df_article['created_at_ts'] = df_article['created_at_ts'].astype('int')\n",
    "df_feature = df_feature.merge(df_article, how='left')\n",
    "df_feature['created_at_datetime'] = pd.to_datetime(\n",
    "        df_feature['created_at_ts'], unit='s')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 历史记录相关特征\n",
    "df_click.sort_values(['user_id', 'click_timestamp'], inplace=True)\n",
    "df_click.rename(columns={'click_article_id': 'article_id'}, inplace=True)\n",
    "df_click = df_click.merge(df_article, how='left')\n",
    "\n",
    "df_click['click_timestamp'] = df_click['click_timestamp'] / 1000\n",
    "df_click['click_datetime'] = pd.to_datetime(df_click['click_timestamp'],\n",
    "                                                unit='s',\n",
    "                                                errors='coerce')\n",
    "df_click['click_datetime_hour'] = df_click['click_datetime'].dt.hour\n",
    "\n",
    "# 用户点击文章的创建时间差的平均值\n",
    "df_click['user_id_click_article_created_at_ts_diff'] = df_click.groupby(\n",
    "        ['user_id'])['created_at_ts'].diff()\n",
    "df_temp = df_click.groupby([\n",
    "        'user_id'\n",
    "    ])['user_id_click_article_created_at_ts_diff'].mean().reset_index()\n",
    "df_temp.columns = [\n",
    "        'user_id', 'user_id_click_article_created_at_ts_diff_mean'\n",
    "    ]\n",
    "df_feature = df_feature.merge(df_temp, how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用户点击文章的时间差的平均值\n",
    "df_click['user_id_click_diff'] = df_click.groupby(\n",
    "        ['user_id'])['click_timestamp'].diff()\n",
    "df_temp = df_click.groupby(['user_id'\n",
    "                                ])['user_id_click_diff'].mean().reset_index()\n",
    "df_temp.columns = ['user_id', 'user_id_click_diff_mean']\n",
    "df_feature = df_feature.merge(df_temp, how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\ipykernel_launcher.py:10: FutureWarning: using a dict on a Series for aggregation\n",
      "is deprecated and will be removed in a future version. Use                 named aggregation instead.\n",
      "\n",
      "    >>> grouper.agg(name_1=func_1, name_2=func_2)\n",
      "\n",
      "  # Remove the CWD from sys.path while we load stuff.\n"
     ]
    }
   ],
   "source": [
    "df_click['click_timestamp_created_at_ts_diff'] = df_click[\n",
    "        'click_timestamp'] - df_click['created_at_ts']\n",
    "\n",
    "# 点击文章的创建时间差的统计值\n",
    "df_temp = df_click.groupby(\n",
    "        ['user_id'])['click_timestamp_created_at_ts_diff'].agg({\n",
    "            'user_click_timestamp_created_at_ts_diff_mean':\n",
    "            'mean',\n",
    "            'user_click_timestamp_created_at_ts_diff_std':\n",
    "            'std'\n",
    "        }).reset_index()\n",
    "df_feature = df_feature.merge(df_temp, how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\ipykernel_launcher.py:4: FutureWarning: using a dict on a Series for aggregation\n",
      "is deprecated and will be removed in a future version. Use                 named aggregation instead.\n",
      "\n",
      "    >>> grouper.agg(name_1=func_1, name_2=func_2)\n",
      "\n",
      "  after removing the cwd from sys.path.\n"
     ]
    }
   ],
   "source": [
    "# 点击的新闻的 click_datetime_hour 统计值\n",
    "df_temp = df_click.groupby(['user_id'])['click_datetime_hour'].agg({\n",
    "        'user_click_datetime_hour_std':\n",
    "        'std'\n",
    "    }).reset_index()\n",
    "df_feature = df_feature.merge(df_temp, how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\ipykernel_launcher.py:6: FutureWarning: using a dict on a Series for aggregation\n",
      "is deprecated and will be removed in a future version. Use                 named aggregation instead.\n",
      "\n",
      "    >>> grouper.agg(name_1=func_1, name_2=func_2)\n",
      "\n",
      "  \n"
     ]
    }
   ],
   "source": [
    "# 点击的新闻的 words_count 统计值\n",
    "df_temp = df_click.groupby(['user_id'])['words_count'].agg({\n",
    "        'user_clicked_article_words_count_mean':\n",
    "        'mean',\n",
    "        'user_click_last_article_words_count':\n",
    "        lambda x: x.iloc[-1]\n",
    "    }).reset_index()\n",
    "df_feature = df_feature.merge(df_temp, how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\ipykernel_launcher.py:6: FutureWarning: using a dict on a Series for aggregation\n",
      "is deprecated and will be removed in a future version. Use                 named aggregation instead.\n",
      "\n",
      "    >>> grouper.agg(name_1=func_1, name_2=func_2)\n",
      "\n",
      "  \n"
     ]
    }
   ],
   "source": [
    "# 点击的新闻的 created_at_ts 统计值\n",
    "df_temp = df_click.groupby('user_id')['created_at_ts'].agg({\n",
    "        'user_click_last_article_created_time':\n",
    "        lambda x: x.iloc[-1],\n",
    "        'user_clicked_article_created_time_max':\n",
    "        'max',\n",
    "    }).reset_index()\n",
    "df_feature = df_feature.merge(df_temp, how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\ipykernel_launcher.py:6: FutureWarning: using a dict on a Series for aggregation\n",
      "is deprecated and will be removed in a future version. Use                 named aggregation instead.\n",
      "\n",
      "    >>> grouper.agg(name_1=func_1, name_2=func_2)\n",
      "\n",
      "  \n"
     ]
    }
   ],
   "source": [
    "# 点击的新闻的 click_timestamp 统计值\n",
    "df_temp = df_click.groupby('user_id')['click_timestamp'].agg({\n",
    "        'user_click_last_article_click_time':\n",
    "        lambda x: x.iloc[-1],\n",
    "        'user_clicked_article_click_time_mean':\n",
    "        'mean',\n",
    "    }).reset_index()\n",
    "df_feature = df_feature.merge(df_temp, how='left')\n",
    "\n",
    "\n",
    "df_feature['user_last_click_created_at_ts_diff'] = df_feature[\n",
    "        'created_at_ts'] - df_feature['user_click_last_article_created_time']\n",
    "df_feature['user_last_click_timestamp_diff'] = df_feature[\n",
    "        'created_at_ts'] - df_feature['user_click_last_article_click_time']\n",
    "df_feature['user_last_click_words_count_diff'] = df_feature[\n",
    "        'words_count'] - df_feature['user_click_last_article_words_count']\n",
    "\n",
    "\n",
    "    # 计数统计\n",
    "for f in [['user_id'], ['article_id'], ['user_id', 'category_id']]:\n",
    "    df_temp = df_click.groupby(f).size().reset_index()\n",
    "    df_temp.columns = f + ['{}_cnt'.format('_'.join(f))]\n",
    "\n",
    "    df_feature = df_feature.merge(df_temp, how='left')\n",
    "\n",
    "\n",
    "\n",
    "    # 召回相关特征\n",
    "    ## itemcf 相关\n",
    "user_item_ = df_click.groupby('user_id')['article_id'].agg(\n",
    "        list).reset_index()\n",
    "user_item_dict = dict(zip(user_item_['user_id'], user_item_['article_id']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "f = open('./user_data/sim/offline/itemcf_sim.pkl', 'rb')\n",
    "item_sim = pickle.load(f)\n",
    "f.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用户历史点击物品与待预测物品相似度\n",
    "df_feature['user_clicked_article_itemcf_sim_sum'] = df_feature[[\n",
    "        'user_id', 'article_id'\n",
    "    ]].apply(func_if_sum, axis=1)\n",
    "df_feature['user_last_click_article_itemcf_sim'] = df_feature[[\n",
    "        'user_id', 'article_id'\n",
    "    ]].apply(func_if_last, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "## binetwork 相关\n",
    "\n",
    "f = open('../user_data/sim/offline/binetwork_sim.pkl', 'rb')\n",
    "binetwork_sim = pickle.load(f)\n",
    "f.close()\n",
    " \n",
    "df_feature['user_last_click_article_binetwork_sim'] = df_feature[[\n",
    "        'user_id', 'article_id'\n",
    "    ]].apply(func_binetwork_sim_last, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "## w2v 相关\n",
    "f = open('../user_data/data/offline/article_w2v.pkl', 'rb')\n",
    "article_vec_map = pickle.load(f)\n",
    "f.close()\n",
    "\n",
    "df_feature['user_last_click_article_w2v_sim'] = df_feature[[\n",
    "        'user_id', 'article_id'\n",
    "    ]].apply(func_w2w_last_sim, axis=1)\n",
    "df_feature['user_click_article_w2w_sim_sum_2'] = df_feature[[\n",
    "        'user_id', 'article_id'\n",
    "    ]].apply(lambda x: func_w2w_sum(x, 2), axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_feature.to_pickle('./user_data/data/offline/feature.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:tf1.14]",
   "language": "python",
   "name": "conda-env-tf1.14-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
