{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import collections\n",
    "import math\n",
    "import os\n",
    "import pickle\n",
    "import random\n",
    "\n",
    "import faiss\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import tensorflow as tf\n",
    "from deepctr.feature_column import SparseFeat, VarLenSparseFeat\n",
    "from deepmatch.models import YoutubeDNN\n",
    "from deepmatch.utils import sampledsoftmaxloss\n",
    "from pandas import DataFrame, Series\n",
    "from pandas.core.groupby import DataFrameGroupBy\n",
    "from pandas.core.indexes.numeric import IntegerIndex\n",
    "from sklearn.preprocessing import MinMaxScaler, LabelEncoder\n",
    "from tensorflow.python.keras.models import Model\n",
    "from tensorflow.python.keras.preprocessing.sequence import pad_sequences\n",
    "from tqdm import tqdm\n",
    "from datetime import datetime\n",
    "import ipdb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "path='../data/'\n",
    "pathcache='../datacache/'\n",
    "#是否非正式的模式\n",
    "informal = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_all_click_sample(sample_nums=10000) -> DataFrame:\n",
    "    \"\"\"\n",
    "    训练集中采样一部分数据调试\n",
    "    \"\"\"\n",
    "    all_click: DataFrame = pd.read_csv(path + \"train_click_log.csv\")\n",
    "    all_user_ids = all_click[\"user_id\"].unique()\n",
    "    sample_user_ids = np.random.choice(all_user_ids, size=sample_nums, replace=False)\n",
    "    all_click = all_click[all_click[\"user_id\"].isin(sample_user_ids)]\n",
    "    all_click = all_click.drop_duplicates(subset=['user_id', 'click_article_id', 'click_timestamp'])\n",
    "    return all_click"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_all_click_df(offline=True) -> DataFrame:\n",
    "    \"\"\"\n",
    "    读取点击数据，这里分成线上和线下，如果是为了获取线上提交结果应该讲测试集中的点击数据合并到总的数据中\n",
    "    如果是为了线下验证模型的有效性或者特征的有效性，可以只使用训练集\n",
    "    \"\"\"\n",
    "    if offline:\n",
    "        all_click = pd.read_csv(path + \"train_click_log.csv\")\n",
    "    else:\n",
    "        trn_click = pd.read_csv(path + \"train_click_log.csv\")\n",
    "        tst_click = pd.read_csv(path + \"testA_click_log.csv\")\n",
    "        all_click = trn_click.append(tst_click)\n",
    "    all_click = all_click.drop_duplicates(subset=['user_id', 'click_article_id', 'click_timestamp'])\n",
    "    return all_click"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_min_scaler = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "if informal:\n",
    "    # 采样数据\n",
    "    all_click_df = get_all_click_sample()\n",
    "else:\n",
    "    # 全量训练集\n",
    "    all_click_df = get_all_click_df(offline=False)\n",
    "# 对时间戳进行归一化,用于在关联规则的时候计算权重\n",
    "all_click_df[\"click_timestamp\"] = all_click_df[[\"click_timestamp\"]].apply(max_min_scaler)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_info_df() -> DataFrame:\n",
    "    \"\"\"\n",
    "    读取文章的基本属性\n",
    "    \"\"\"\n",
    "    item_info_df: DataFrame = pd.read_csv(path + \"articles.csv\")\n",
    "    item_info_df['created_at_ts_raw'] = item_info_df['created_at_ts']\n",
    "    item_info_df = item_info_df.rename(columns={'article_id': 'click_article_id'})\n",
    "    return item_info_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "item_info_df = get_item_info_df()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>click_article_id</th>\n",
       "      <th>category_id</th>\n",
       "      <th>created_at_ts</th>\n",
       "      <th>words_count</th>\n",
       "      <th>created_at_ts_raw</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1513144419000</td>\n",
       "      <td>168</td>\n",
       "      <td>1513144419000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1405341936000</td>\n",
       "      <td>189</td>\n",
       "      <td>1405341936000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>1408667706000</td>\n",
       "      <td>250</td>\n",
       "      <td>1408667706000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>1</td>\n",
       "      <td>1408468313000</td>\n",
       "      <td>230</td>\n",
       "      <td>1408468313000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>1</td>\n",
       "      <td>1407071171000</td>\n",
       "      <td>162</td>\n",
       "      <td>1407071171000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>364042</th>\n",
       "      <td>364042</td>\n",
       "      <td>460</td>\n",
       "      <td>1434034118000</td>\n",
       "      <td>144</td>\n",
       "      <td>1434034118000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>364043</th>\n",
       "      <td>364043</td>\n",
       "      <td>460</td>\n",
       "      <td>1434148472000</td>\n",
       "      <td>463</td>\n",
       "      <td>1434148472000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>364044</th>\n",
       "      <td>364044</td>\n",
       "      <td>460</td>\n",
       "      <td>1457974279000</td>\n",
       "      <td>177</td>\n",
       "      <td>1457974279000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>364045</th>\n",
       "      <td>364045</td>\n",
       "      <td>460</td>\n",
       "      <td>1515964737000</td>\n",
       "      <td>126</td>\n",
       "      <td>1515964737000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>364046</th>\n",
       "      <td>364046</td>\n",
       "      <td>460</td>\n",
       "      <td>1505811330000</td>\n",
       "      <td>479</td>\n",
       "      <td>1505811330000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>364047 rows × 5 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        click_article_id  category_id  created_at_ts  words_count  \\\n",
       "0                      0            0  1513144419000          168   \n",
       "1                      1            1  1405341936000          189   \n",
       "2                      2            1  1408667706000          250   \n",
       "3                      3            1  1408468313000          230   \n",
       "4                      4            1  1407071171000          162   \n",
       "...                  ...          ...            ...          ...   \n",
       "364042            364042          460  1434034118000          144   \n",
       "364043            364043          460  1434148472000          463   \n",
       "364044            364044          460  1457974279000          177   \n",
       "364045            364045          460  1515964737000          126   \n",
       "364046            364046          460  1505811330000          479   \n",
       "\n",
       "        created_at_ts_raw  \n",
       "0           1513144419000  \n",
       "1           1405341936000  \n",
       "2           1408667706000  \n",
       "3           1408468313000  \n",
       "4           1407071171000  \n",
       "...                   ...  \n",
       "364042      1434034118000  \n",
       "364043      1434148472000  \n",
       "364044      1457974279000  \n",
       "364045      1515964737000  \n",
       "364046      1505811330000  \n",
       "\n",
       "[364047 rows x 5 columns]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "item_info_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_emb_dict() -> dict:\n",
    "    \"\"\"\n",
    "    读取文章的Embedding数据\n",
    "    \"\"\"\n",
    "    if os.path.exists(pathcache + 'item_content_emb.pkl'):\n",
    "        item_emb_dict: dict = pickle.load(open(pathcache + 'item_content_emb.pkl', 'rb'))\n",
    "    else:\n",
    "        item_emb_df: DataFrame = pd.read_csv(path + \"articles_emb.csv\")\n",
    "        item_emb_cols = [x for x in item_emb_df.columns if \"emb\" in x]\n",
    "        item_emb_np = np.ascontiguousarray(item_emb_df[item_emb_cols])\n",
    "        item_emb_np = item_emb_np / np.linalg.norm(x=item_emb_np, axis=1, keepdims=True)\n",
    "        item_emb_dict = dict(zip(item_emb_df[\"article_id\"], item_emb_np))\n",
    "        pickle.dump(item_emb_dict, open(pathcache + 'item_content_emb.pkl', 'wb'))\n",
    "    return item_emb_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [],
   "source": [
    "item_emb_dict = get_item_emb_dict()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_time(click_df: DataFrame) -> dict:\n",
    "    \"\"\"\n",
    "    根据点击时间获取用户的点击文章序列   {user1: [(item1, time1), (item2, time2)..]...}\n",
    "    \"\"\"\n",
    "    click_df = click_df.sort_values(\"click_timestamp\")\n",
    "\n",
    "    def make_item_time_pair(df: DataFrame) -> list:\n",
    "        return list(zip(df[\"click_article_id\"], df[\"click_timestamp\"]))\n",
    "\n",
    "    click_byuid: DataFrameGroupBy = click_df.groupby(\"user_id\")[[\"click_article_id\", \"click_timestamp\"]]\n",
    "    user_item_time_df: DataFrame = click_byuid.apply(\n",
    "        lambda x: make_item_time_pair(x))\n",
    "    user_item_time_df = user_item_time_df.reset_index().rename(columns={0: \"item_time_list\"})\n",
    "    user_item_time_dict = dict(zip(user_item_time_df[\"user_id\"], user_item_time_df[\"item_time_list\"]))\n",
    "    return user_item_time_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_user_time_dict(click_df: DataFrame) -> dict:\n",
    "    \"\"\"\n",
    "    根据时间获取商品被点击的用户序列  {item1: [(user1, time1), (user2, time2)...]...}\n",
    "    \"\"\"\n",
    "    click_df = click_df.sort_values(\"click_timestamp\")\n",
    "\n",
    "    def make_user_time_pair(df: DataFrame) -> list:\n",
    "        return list(zip(df[\"user_id\"], df[\"click_timestamp\"]))\n",
    "\n",
    "    click_byiid: DataFrameGroupBy = click_df.groupby(\"click_article_id\")[[\"user_id\", \"click_timestamp\"]]\n",
    "    item_user_time_df: DataFrame = click_byiid.apply(lambda x: make_user_time_pair(x))\n",
    "    item_user_time_df = item_user_time_df.reset_index().rename(columns={0: \"user_time_list\"})\n",
    "    item_user_time_dict = dict(zip(item_user_time_df[\"click_article_id\"], item_user_time_df[\"user_time_list\"]))\n",
    "    return item_user_time_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_hist_and_last_click(all_click: DataFrame) -> (DataFrame, DataFrame):\n",
    "    \"\"\"\n",
    "    获取当前数据的历史点击和最后一次点击\n",
    "    \"\"\"\n",
    "    all_click = all_click.sort_values(['user_id', 'click_timestamp'])\n",
    "    click_last_df = all_click.groupby('user_id').tail(1)\n",
    "\n",
    "    def hist_func(user_df):\n",
    "        if len(user_df) == 1:\n",
    "            return user_df\n",
    "        else:\n",
    "            return user_df[:-1]\n",
    "\n",
    "    click_hist_df = all_click.groupby('user_id').apply(hist_func).reset_index(drop=True)\n",
    "    return click_hist_df, click_last_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_info_dict(item_info_df: DataFrame) -> (dict, dict, dict):\n",
    "    \"\"\"\n",
    "    获取文章id对应的基本属性，保存成字典的形式，方便后面召回阶段，冷启动阶段直接使用\n",
    "    \"\"\"\n",
    "    item_type_dict = dict(zip(item_info_df[\"click_article_id\"], item_info_df[\"category_id\"]))\n",
    "    item_words_dict = dict(zip(item_info_df[\"click_article_id\"], item_info_df[\"words_count\"]))\n",
    "    item_created_rawTime_dict = dict(zip(item_info_df[\"click_article_id\"], item_info_df[\"created_at_ts\"]))\n",
    "    item_info_df[\"created_at_ts\"] = item_info_df[[\"created_at_ts\"]].apply(max_min_scaler)\n",
    "    item_created_time_dict = dict(zip(item_info_df[\"click_article_id\"], item_info_df[\"created_at_ts\"]))\n",
    "    return item_type_dict, item_words_dict, item_created_time_dict, item_created_rawTime_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取文章的属性信息，保存成字典的形式方便查询\n",
    "item_type_dict, item_words_dict, item_created_time_dict, item_created_rawTime_dict = get_item_info_dict(item_info_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_hist_item_info_dict(all_click: DataFrame) -> (dict, dict, dict, dict):\n",
    "    \"\"\"\n",
    "    获取用户历史点击的文章信息\n",
    "    \"\"\"\n",
    "\n",
    "    # 获取user_id对应的用户历史点击文章类型的集合字典\n",
    "    user_hist_item_typs_gb: DataFrameGroupBy = all_click.groupby(\"user_id\")[\"category_id\"]\n",
    "    user_hist_item_typs: DataFrame = user_hist_item_typs_gb.agg(func=set).reset_index()\n",
    "    user_hist_item_typs_dict = dict(zip(user_hist_item_typs['user_id'], user_hist_item_typs['category_id']))\n",
    "\n",
    "    # 获取user_id对应的用户点击文章的集合\n",
    "    user_hist_item_ids_gb: DataFrameGroupBy = all_click.groupby(\"user_id\")[\"click_article_id\"]\n",
    "    user_hist_item_ids: DataFrame = user_hist_item_ids_gb.agg(func=set).reset_index()\n",
    "    user_hist_item_ids_dict = dict(zip(user_hist_item_ids['user_id'], user_hist_item_ids['click_article_id']))\n",
    "\n",
    "    # 获取user_id对应的用户历史点击的文章的平均字数字典\n",
    "    user_hist_item_words_gb: DataFrameGroupBy = all_click.groupby(\"user_id\")[\"words_count\"]\n",
    "    user_hist_item_words: DataFrame = user_hist_item_words_gb.agg(func='mean').reset_index()\n",
    "    user_hist_item_words_dict = dict(zip(user_hist_item_words['user_id'], user_hist_item_words['words_count']))\n",
    "\n",
    "    # 获取user_id对应的用户最后一次点击的文章的创建时间\n",
    "    all_click = all_click.sort_values('click_timestamp')\n",
    "\n",
    "    def read_last(df: DataFrame):\n",
    "        return df.iloc[-1]\n",
    "\n",
    "#     user_last_item_created_time_gb: DataFrameGroupBy = all_click.groupby('user_id')[\"created_at_ts\"]\n",
    "#     user_last_item_created_time: DataFrame = user_last_item_created_time_gb.apply(lambda x: read_last(x)).reset_index()\n",
    "#     user_last_item_created_time[\"created_at_ts\"] = user_last_item_created_time[['created_at_ts']].apply(max_min_scaler)\n",
    "#     user_last_item_created_time_dict = dict(\n",
    "#         zip(user_last_item_created_time['user_id'], user_last_item_created_time['created_at_ts']))\n",
    "    user_last_item_created_time_dict = dict(\n",
    "        zip(all_click['user_id'], all_click['created_at_ts_raw']))\n",
    "    return user_hist_item_typs_dict, user_hist_item_ids_dict, user_hist_item_words_dict, user_last_item_created_time_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_topk_click(click_df: DataFrame, k: int) -> IntegerIndex:\n",
    "    \"\"\"\n",
    "    获取近期点击最多的文章\n",
    "    \"\"\"\n",
    "    click_article_df: Series = click_df['click_article_id']\n",
    "    click_article_df = click_article_df.value_counts()\n",
    "    topk_click: IntegerIndex = click_article_df.index[:k]\n",
    "    return topk_click"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个多路召回的字典，将各路召回的结果都保存在这个字典当中\n",
    "user_multi_recall_dict = {'itemcf_sim_itemcf_recall': {},\n",
    "                          'embedding_sim_item_recall': {},\n",
    "                          'youtubednn_recall': {},\n",
    "                          'youtubednn_usercf_recall': {},\n",
    "                          'cold_start_recall': {}}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取最后一次点击作为召回评估，如果不需要做召回评估直接使用全量的训练集进行召回(线下验证模型)\n",
    "# 如果不是召回评估，直接使用全量数据进行召回，不用将最后一次提取出来\n",
    "trn_hist_click_df, trn_last_click_df = get_hist_and_last_click(all_click_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "def metrics_recall(user_recall_items_dict: dict, trn_last_click_df: DataFrame, topk=5):\n",
    "    \"\"\"\n",
    "    依次评估召回的前10, 20, 30, 40, 50个文章中的击中率\n",
    "    \"\"\"\n",
    "    last_click_item_dict = dict(zip(trn_last_click_df['user_id'], trn_last_click_df['click_article_id']))\n",
    "    user_num = len(user_recall_items_dict)\n",
    "    for k in range(10, topk + 1, 10):\n",
    "        hit_num = 0\n",
    "        for user, item_list in user_recall_items_dict.items():\n",
    "            tmp_recall_items = [x[0] for x in item_list[:k]]\n",
    "            if last_click_item_dict[user] in set(tmp_recall_items):\n",
    "                hit_num += 1\n",
    "        hit_rate = round(hit_num / user_num, ndigits=5)\n",
    "        print(' topk: ', k, ' : ', 'hit_num: ', hit_num, 'hit_rate: ', hit_rate, 'user_num : ', user_num)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "def itemcf_sim(df: DataFrame, item_created_time_dict: dict) -> dict:\n",
    "    \"\"\"\n",
    "    文章与文章之间的相似性矩阵计算\n",
    "    思路: 基于物品的协同过滤(详细请参考上一期推荐系统基础的组队学习) + 关联规则\n",
    "    1.用户点击的时间权重\n",
    "    2.用户点击的顺序权重\n",
    "    3.文章创建的时间权重\n",
    "    \"\"\"\n",
    "    user_item_time_dict = get_user_item_time(df)\n",
    "    i2i_sim = {}\n",
    "    item_cnt = collections.defaultdict(int)\n",
    "    for user, item_time_list in tqdm(user_item_time_dict.items()):\n",
    "        for loc1, (i, i_click_time) in enumerate(item_time_list):\n",
    "            item_cnt[i] += 1\n",
    "            i2i_sim.setdefault(i, {})\n",
    "            for loc2, (j, j_click_time) in enumerate(item_time_list):\n",
    "                if i == j:\n",
    "                    continue\n",
    "                loc_alpha = 1.0 if loc1 < loc2 else 0.7\n",
    "                loc_weight = loc_alpha * (0.9 ** (np.abs(loc1 - loc2) - 1))\n",
    "                click_time_weight = np.exp(0.7 ** np.abs(i_click_time - j_click_time))\n",
    "                created_time_weight = np.exp(0.8 ** np.abs(item_created_time_dict[i] - item_created_time_dict[j]))\n",
    "                i2j_sim: dict = i2i_sim[i]\n",
    "                i2j_sim.setdefault(j, 0)\n",
    "                i2j_sim[j] += loc_weight * click_time_weight * created_time_weight / (\n",
    "                    math.log(len(item_time_list) + 1))\n",
    "    for i, related_items in i2i_sim.items():\n",
    "        for j, wij in related_items.items():\n",
    "            i2i_sim[i][j] = wij / math.sqrt(item_cnt[i] * item_cnt[j])\n",
    "    return i2i_sim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [05:12<00:00, 799.60it/s] \n"
     ]
    }
   ],
   "source": [
    "i2i_sim = itemcf_sim(all_click_df, item_created_time_dict=item_created_time_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_activate_degree_dict(all_click_df: DataFrame) -> dict:\n",
    "    \"\"\"\n",
    "    用户活跃度\n",
    "    \"\"\"\n",
    "    all_click_df_: DataFrame = all_click_df.groupby('user_id')[['click_article_id']].count().reset_index()\n",
    "    mm = MinMaxScaler()\n",
    "    all_click_df_['click_article_id'] = mm.fit_transform(all_click_df_[['click_article_id']])\n",
    "    user_activate_degree_dict = dict(zip(all_click_df_['user_id'], all_click_df_['click_article_id']))\n",
    "    return user_activate_degree_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "def usercf_sim(all_click_df: DataFrame, user_activate_degree_dict: dict) -> dict:\n",
    "    \"\"\"\n",
    "    用户相似性矩阵计算\n",
    "    思路: 基于用户的协同过滤(详细请参考上一期推荐系统基础的组队学习) + 关联规则\n",
    "    \"\"\"\n",
    "    item_user_time_dict = get_item_user_time_dict(all_click_df)\n",
    "    u2u_sim = {}\n",
    "    user_cnt = collections.defaultdict(int)\n",
    "    for item, user_time_list in tqdm(item_user_time_dict.items()):\n",
    "        for u, u_click_time in user_time_list:\n",
    "            user_cnt[u] += 1\n",
    "            u2u_sim.setdefault(u, {})\n",
    "            for v, v_click_time in user_time_list:\n",
    "                if u == v:\n",
    "                    continue\n",
    "                activate_weight = 100 * 0.5 * (user_activate_degree_dict[u] + user_activate_degree_dict[v])\n",
    "                u2v_sim: dict = u2u_sim[u]\n",
    "                u2v_sim.setdefault(v, 0)\n",
    "                u2v_sim[v] += activate_weight / (math.log(len(user_time_list) + 1))\n",
    "    for u, related_users in u2u_sim.items():\n",
    "        for v, wuv in related_users.items():\n",
    "            u2u_sim[u][v] = wuv / math.sqrt(user_cnt[u] * user_cnt[v])\n",
    "    return u2u_sim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "if informal:\n",
    "    user_activate_degree_dict = get_user_activate_degree_dict(all_click_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "if informal:\n",
    "    u2u_sim = usercf_sim(all_click_df, user_activate_degree_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def embdding_sim(item_emb_df: DataFrame, topk: int)->dict:\n",
    "    \"\"\"\n",
    "    基于内容的文章embedding相似性矩阵计算\n",
    "    思路: 对于每一篇文章， 基于embedding的相似性返回topk个与其最相似的文章， 只不过由于文章数量太多，这里用了faiss进行加速\n",
    "    \"\"\"\n",
    "    item_idx_2_rawid_dict = dict(zip(item_emb_df.index, item_emb_df[\"article_id\"]))\n",
    "    item_emb_cols = [x for x in item_emb_df.columns if 'emb' in x]\n",
    "    item_emb_np: np.ndarray = np.ascontiguousarray(item_emb_df[item_emb_cols].values, dtype=np.float32)\n",
    "    item_emb_np = item_emb_np / np.linalg.norm(item_emb_np, axis=1, keepdims=True)\n",
    "    item_index: faiss.Index = faiss.index_factory(item_emb_np.shape[1], 'PCA32,IVF100,PQ8')\n",
    "    # item_index: faiss.IndexFlatIP = faiss.IndexFlatIP(item_emb_np.shape[1])\n",
    "    item_index.train(item_emb_np)\n",
    "    item_index.add(item_emb_np)\n",
    "    sim, idx = item_index.search(item_emb_np, topk)\n",
    "    item_sim_dict = collections.defaultdict(dict)\n",
    "    for target_idx, sim_value_list, rele_idx_list in tqdm(zip(range(len(item_emb_np)), sim, idx)):\n",
    "        target_raw_id = item_idx_2_rawid_dict[target_idx]\n",
    "        for rele_idx, sim_value in zip(rele_idx_list[1:], sim_value_list[1:]):\n",
    "            if rele_idx!=-1:\n",
    "                rele_raw_id = item_idx_2_rawid_dict[rele_idx]\n",
    "                item_sim_dict[target_raw_id][rele_raw_id] = item_sim_dict.get(target_raw_id, {}).get(rele_raw_id,\n",
    "                                                                                                 0) + sim_value\n",
    "    return item_sim_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "364047it [00:13, 27726.24it/s]\n"
     ]
    }
   ],
   "source": [
    "item_emb_df = pd.read_csv(path + 'articles_emb.csv')\n",
    "# 文章emb之间的相似度，主要用于冷启动\n",
    "emb_i2i_sim = embdding_sim(item_emb_df, 10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "item_sim_dict: dict = pickle.load(open(pathcache + 'emb_i2i_sim.pkl', 'rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_data_set(data: DataFrame, negsample=0) -> (list, list):\n",
    "    \"\"\"\n",
    "    获取双塔召回时的训练验证数据\n",
    "    :param negsample:过滑窗构建样本的时候，负样本的数量\n",
    "    :return:(训练集，测试集)\n",
    "    \"\"\"\n",
    "    data.sort_values(\"click_timestamp\", inplace=True)\n",
    "    item_ids: Series = data['click_article_id'].unique()\n",
    "    train_set = []\n",
    "    test_set = []\n",
    "    for reviewerID, hist in tqdm(data.groupby('user_id')):\n",
    "        hist: DataFrame = hist\n",
    "        pos_list = hist['click_article_id'].tolist()\n",
    "        neg_list = []\n",
    "        if negsample > 0:\n",
    "            candidate_set = list(set(item_ids) - set(pos_list))\n",
    "            neg_list = np.random.choice(a=candidate_set, size=len(pos_list) * negsample, replace=True)\n",
    "        if len(pos_list) == 1:\n",
    "            train_set.append((reviewerID, [pos_list[0]], pos_list[0], 1, len(pos_list)))\n",
    "            test_set.append((reviewerID, [pos_list[0]], pos_list[0], 1, len(pos_list)))\n",
    "        for i in range(1, len(pos_list), 1):\n",
    "            pre_hist = pos_list[:i]\n",
    "            if i != len(pos_list) - 1:\n",
    "                train_set.append((reviewerID, pre_hist[::-1], pos_list[i], 1, len(pre_hist[::-1])))\n",
    "                for negi in range(negsample):\n",
    "                    train_set.append(\n",
    "                        (reviewerID, pre_hist[::-1], neg_list[i * negsample + negi], 0, len(pre_hist[::-1])))\n",
    "            else:\n",
    "                test_set.append((reviewerID, pre_hist[::-1], pos_list[i], 1, len(pre_hist[::-1])))\n",
    "    random.shuffle(train_set)\n",
    "    random.shuffle(test_set)\n",
    "    return train_set, test_set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_model_input(train_set: list, user_profile: DataFrame, seq_max_len: int) -> (dict, np.ndarray):\n",
    "    \"\"\"\n",
    "    将输入的数据进行padding，使得序列特征的长度都一致\n",
    "    \"\"\"\n",
    "    train_uid = np.array([line[0] for line in train_set])\n",
    "    train_seq = [line[1] for line in train_set]\n",
    "    train_iid = np.array([line[2] for line in train_set])\n",
    "    train_label = np.array([line[3] for line in train_set])\n",
    "    train_hist_len = np.array([line[4] for line in train_set])\n",
    "    train_seq_pad = pad_sequences(train_seq, maxlen=seq_max_len, padding='post', truncating='post', value=0)\n",
    "    train_model_input = {'user_id': train_uid, 'click_article_id': train_iid, 'hist_article_id': train_seq_pad,\n",
    "                         'hist_len': train_hist_len}\n",
    "    return train_model_input, train_label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "def youtubednn_u2i_dict(data: DataFrame, topk=20)->dict:\n",
    "    \"\"\"\n",
    "    youtubednn实现\n",
    "    \"\"\"\n",
    "    sparse_features = ['click_article_id', 'user_id']\n",
    "    SEQ_LEN = 30\n",
    "    user_profile_ :DataFrame = data[['user_id']].drop_duplicates('user_id')\n",
    "    item_profile_ :DataFrame = data[['click_article_id']].drop_duplicates('click_article_id')\n",
    "    data = data.copy(deep=True)\n",
    "    features = ['click_article_id', 'user_id']\n",
    "    feature_max_idx = {}\n",
    "    for feature in features:\n",
    "        lbe = LabelEncoder()\n",
    "        data[feature] = lbe.fit_transform(data[feature])\n",
    "        feature_max_idx[feature] = data[feature].max() + 1\n",
    "    user_profile = data[['user_id']].drop_duplicates('user_id')\n",
    "    item_profile = data[['click_article_id']].drop_duplicates('click_article_id')\n",
    "    user_index_2_rawid = dict(zip(user_profile['user_id'], user_profile_['user_id']))\n",
    "    item_index_2_rawid = dict(zip(item_profile['click_article_id'], item_profile_['click_article_id']))\n",
    "    train_set, test_set = gen_data_set(data, 0)\n",
    "    train_model_input, train_label = gen_model_input(train_set, user_profile, SEQ_LEN)\n",
    "    test_model_input, test_label = gen_model_input(test_set, user_profile, SEQ_LEN)\n",
    "    embedding_dim = 16\n",
    "    user_feature_columns = [SparseFeat('user_id', feature_max_idx['user_id'], embedding_dim),\n",
    "                            VarLenSparseFeat(\n",
    "                                SparseFeat('hist_article_id', feature_max_idx['click_article_id'], embedding_dim,\n",
    "                                           embedding_name='click_article_id'), SEQ_LEN, 'mean', 'hist_len'), ]\n",
    "    item_feature_columns = [SparseFeat('click_article_id', feature_max_idx['click_article_id'], embedding_dim)]\n",
    "\n",
    "    if tf.__version__ >= '2.0.0':\n",
    "        tf.compat.v1.disable_eager_execution()\n",
    "\n",
    "    model = YoutubeDNN(user_feature_columns=user_feature_columns, item_feature_columns=item_feature_columns,\n",
    "                       num_sampled=5, user_dnn_hidden_units=(64, embedding_dim))\n",
    "    model.compile(optimizer='adam', loss=sampledsoftmaxloss)\n",
    "    history = model.fit(x=train_model_input, y=train_label, batch_size=256, epochs=1, verbose=1, validation_split=0.0)\n",
    "    test_user_model_input = test_model_input\n",
    "    all_item_model_input = {'click_article_id': item_profile['click_article_id'].values, }\n",
    "    user_embedding_model = Model(inputs=model.user_input, outputs=model.user_embedding)\n",
    "    item_embedding_model = Model(inputs=model.item_input, outputs=model.item_embedding)\n",
    "    user_embs = user_embedding_model.predict(test_user_model_input, batch_size=2 ** 12)\n",
    "    item_embs = item_embedding_model.predict(all_item_model_input, batch_size=2 ** 12)\n",
    "    # user_embs = user_embs / np.linalg.norm(user_embs, axis=1, keepdims=True)\n",
    "    # item_embs = item_embs / np.linalg.norm(item_embs, axis=1, keepdims=True)\n",
    "    raw_user_id_emb_dict = {user_index_2_rawid[k]: v for k, v in zip(user_profile['user_id'], user_embs)}\n",
    "    raw_item_id_emb_dict = {item_index_2_rawid[k]: v for k, v in zip(item_profile['click_article_id'], item_embs)}\n",
    "    \n",
    "    # 将Embedding保存到本地\n",
    "    pickle.dump(raw_user_id_emb_dict, open(pathcache + 'user_youtube_emb.pkl', 'wb'))\n",
    "    pickle.dump(raw_item_id_emb_dict, open(pathcache + 'item_youtube_emb.pkl', 'wb'))\n",
    "    \n",
    "#     ui_index: faiss.Index = faiss.index_factory(embedding_dim, 'IVF100,PQ8')\n",
    "#     ui_index.train(item_embs)\n",
    "    ui_index: faiss.Index = faiss.IndexFlatIP(embedding_dim)\n",
    "    ui_index.add(item_embs)\n",
    "    sim, idx = ui_index.search(np.ascontiguousarray(user_embs), topk)\n",
    "    user_recall_items_dict = collections.defaultdict(dict)\n",
    "    for target_idx, sim_value_list, rele_idx_list in tqdm(zip(test_user_model_input['user_id'], sim, idx)):\n",
    "        target_raw_id = user_index_2_rawid[target_idx]\n",
    "        for rele_idx, sim_value in zip(rele_idx_list, sim_value_list):\n",
    "            #如果没有匹配到，会返回-1,一般是没有找到topk个目标，然后在最后直接追加了-1以补全topk数量\n",
    "            if rele_idx != -1:\n",
    "                rele_raw_id = item_index_2_rawid[rele_idx]\n",
    "                user_recall_items_dict[target_raw_id][rele_raw_id] = user_recall_items_dict.get(target_raw_id, {}).get(\n",
    "                    rele_raw_id,\n",
    "                    0) + sim_value\n",
    "    user_recall_items_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True) for k, v in\n",
    "                              user_recall_items_dict.items()}\n",
    "    return user_recall_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [00:34<00:00, 7274.04it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/initializers/initializers_v1.py:47: calling RandomNormal.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
      "Train on 1149673 samples\n",
      "1149673/1149673 [==============================] - 194s 169us/sample - loss: 0.1301\n",
      "WARNING:tensorflow:From /opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training_v1.py:2070: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "250000it [00:18, 13195.01it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " topk:  10  :  hit_num:  30 hit_rate:  0.00012 user_num :  250000\n",
      " topk:  20  :  hit_num:  59 hit_rate:  0.00024 user_num :  250000\n"
     ]
    }
   ],
   "source": [
    "user_multi_recall_dict['youtubednn_recall'] = youtubednn_u2i_dict(all_click_df, topk=20)\n",
    "metrics_recall(user_multi_recall_dict['youtubednn_recall'], trn_last_click_df, topk=20)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "if ~informal:\n",
    "    pickle.dump(user_multi_recall_dict['youtubednn_recall'], open(pathcache + 'youtube_u2i_dict.pkl', 'wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# itemcf recall\n",
    "上面已经通过协同过滤，Embedding检索的方式得到了文章的相似度矩阵，下面使用协同过滤的思想，给用户召回与其历史文章相似的文章。 这里在召回的时候，也是用了关联规则的方式：\n",
    "\n",
    "<br/>\n",
    " 1.考虑相似文章与历史点击文章顺序的权重(细节看代码)\n",
    "<br/>\n",
    " 2.考虑文章创建时间的权重，也就是考虑相似文章与历史点击文章创建时间差的权重\n",
    "<br/>\n",
    " 3.考虑文章内容相似度权重(使用Embedding计算相似文章相似度，但是这里需要注意，在Embedding的时候并没有计算所有商品两两之间的相似度，所以相似的文章与历史点击文章不存在相似度，需要做特殊处理)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基于商品的召回i2i\n",
    "def item_based_recommend(user_id:int,\n",
    "                         user_item_time_dict:dict,\n",
    "                         i2i_sim:dict,\n",
    "                         sim_item_topk:int,\n",
    "                         recall_item_num:int,\n",
    "                         item_topk_click,\n",
    "                         item_created_time_dict:dict,\n",
    "                         emb_i2i_sim:dict)->list:\n",
    "    \"\"\"\n",
    "    基于文章协同过滤的召回\n",
    "    :param user_id: 用户id\n",
    "    :param user_item_time_dict: 字典, 根据点击时间获取用户的点击文章序列   {user1: [(item1, time1), (item2, time2)..]...}\n",
    "    :param i2i_sim: 字典，文章相似性矩阵\n",
    "    :param sim_item_topk: 整数， 选择与当前文章最相似的前k篇文章\n",
    "    :param recall_item_num: 整数， 最后的召回文章数量\n",
    "    :param item_topk_click: 列表，点击次数最多的文章列表，用户召回补全\n",
    "    :param emb_i2i_sim: 字典基于内容embedding算的文章相似矩阵\n",
    "    :return: 召回的文章列表 [(item1, score1), (item2, score2)...]\n",
    "    \"\"\"\n",
    "    # 获取用户历史交互的文章\n",
    "    item_hist_items = user_item_time_dict[user_id]\n",
    "    item_hist_items_ = {item_id for item_id, _ in item_hist_items}\n",
    "    \n",
    "    item_rank = {}\n",
    "    for loc, (i, click_time) in enumerate(item_hist_items):\n",
    "        for j,wij in sorted(i2i_sim[i].items(),key=lambda x:x[1],reverse=True)[:sim_item_topk]:\n",
    "            if j in item_hist_items_:\n",
    "                continue\n",
    "            created_time_weight = np.exp(\n",
    "                0.8**np.abs(item_created_time_dict[i]-item_created_time_dict[j]))\n",
    "            loc_weight = 0.9**(len(item_hist_items)-loc)\n",
    "            content_weight=1.0\n",
    "            if emb_i2i_sim.get(i,{}).get(j,None) is not None:\n",
    "                content_weight+=emb_i2i_sim[i][j]\n",
    "            if emb_i2i_sim.get(j,{}).get(i,None) is not None:\n",
    "                content_weight+=emb_i2i_sim[j][i]\n",
    "            item_rank.setdefault(j,0)\n",
    "            item_rank[j]+=created_time_weight*loc_weight*content_weight*wij\n",
    "    if len(item_rank) < recall_item_num:\n",
    "        for i,item in enumerate(item_topk_click):\n",
    "            if item in item_rank.items():\n",
    "                continue\n",
    "            item_rank[item]=-i-100\n",
    "            if len(item_rank)==recall_item_num:\n",
    "                break\n",
    "    item_rank = sorted(item_rank.items(),key=lambda x:x[1],reverse=True)[:recall_item_num]\n",
    "    return item_rank"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## itemcf sim召回"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [],
   "source": [
    "def itemcfRecall(recall_item_num):\n",
    "    \"\"\"\n",
    "    先进行itemcf召回, 为了召回评估，所以提取最后一次点击\n",
    "    \"\"\"\n",
    "    user_recall_items_dict = collections.defaultdict(dict)\n",
    "    user_item_time_dict = get_user_item_time(trn_hist_click_df)\n",
    "    sim_item_topk=20\n",
    "    item_topk_click=get_item_topk_click(trn_hist_click_df,k=50)\n",
    "    for user_id in tqdm(trn_hist_click_df['user_id'].unique()):\n",
    "        user_recall_items_dict[user_id]=item_based_recommend(\n",
    "                                            user_id,\n",
    "                                            user_item_time_dict,\n",
    "                                            i2i_sim,\n",
    "                                            sim_item_topk,\n",
    "                                            recall_item_num,\n",
    "                                            item_topk_click,\n",
    "                                            item_created_time_dict,\n",
    "                                            emb_i2i_sim)\n",
    "    user_multi_recall_dict['itemcf_sim_itemcf_recall']=user_recall_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [1:12:09<00:00, 57.75it/s]  \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " topk:  10  :  hit_num:  104056 hit_rate:  0.41622 user_num :  250000\n"
     ]
    }
   ],
   "source": [
    "itemcfRecall(10)\n",
    "metrics_recall(user_recall_items_dict=user_multi_recall_dict['itemcf_sim_itemcf_recall'],\n",
    "              trn_last_click_df=trn_last_click_df,\n",
    "              topk=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "if ~informal:\n",
    "    pickle.dump(user_multi_recall_dict['itemcf_sim_itemcf_recall'], open(pathcache + 'itemcf_recall_dict.pkl', 'wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## embedding sim 召回"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "def itemEmbeddingRecall(recall_item_num:int):\n",
    "    \"\"\"\n",
    "    先进行itemcf召回, 为了召回评估，所以提取最后一次点击\n",
    "    \"\"\"\n",
    "    user_recall_items_dict = collections.defaultdict(dict)\n",
    "    user_item_time_dict = get_user_item_time(trn_hist_click_df)\n",
    "    sim_item_topk=20\n",
    "    item_topk_click=get_item_topk_click(trn_hist_click_df,k=50)\n",
    "    for user_id in tqdm(trn_hist_click_df['user_id'].unique()):\n",
    "        user_recall_items_dict[user_id]=item_based_recommend(\n",
    "                                            user_id,\n",
    "                                            user_item_time_dict,\n",
    "                                            emb_i2i_sim,\n",
    "                                            sim_item_topk,\n",
    "                                            recall_item_num,\n",
    "                                            item_topk_click,\n",
    "                                            item_created_time_dict,\n",
    "                                            emb_i2i_sim)\n",
    "    user_multi_recall_dict['embedding_sim_item_recall']=user_recall_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [01:06<00:00, 3776.34it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " topk:  10  :  hit_num:  3240 hit_rate:  0.01296 user_num :  250000\n"
     ]
    }
   ],
   "source": [
    "itemEmbeddingRecall(10)\n",
    "metrics_recall(user_recall_items_dict=user_multi_recall_dict['embedding_sim_item_recall'],\n",
    "              trn_last_click_df=trn_last_click_df,\n",
    "              topk=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "if ~informal:\n",
    "    pickle.dump(user_multi_recall_dict['embedding_sim_item_recall'], open(pathcache + 'itemcf_emb_dict.pkl', 'wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# usercf召回"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "def user_based_recommend(user_id:int,user_item_time_dict:dict,\n",
    "                        u2u_sim:dict,sim_user_topk:int,\n",
    "                        recall_item_num:int,item_topk_click:list,\n",
    "                        item_created_time_dict:dict,emb_i2i_sim:dict)->list:\n",
    "    \"\"\"\n",
    "    基于user协同过滤的召回\n",
    "    :param user_id: 用户id\n",
    "    :param user_item_time_dict: 字典, 根据点击时间获取用户的点击文章序列   {user1: [(item1, time1), (item2, time2)..]...}\n",
    "    :param u2u_sim: 字典，用户相似性矩阵\n",
    "    :param sim_user_topk: 整数， 选择与当前用户最相似的前k个用户\n",
    "    :param recall_item_num: 整数， 最后的召回文章数量\n",
    "    :param item_topk_click: 列表，点击次数最多的文章列表，用户召回补全\n",
    "    :param item_created_time_dict: 文章创建时间列表\n",
    "    :param emb_i2i_sim: 字典基于内容embedding算的文章相似矩阵\n",
    "    :return: 召回的文章列表 [(item1, score1), (item2, score2)...]\n",
    "    \"\"\"\n",
    "    user_item_time_list=user_item_time_dict[user_id]\n",
    "    user_hist_items=set([i for i,t in user_item_time_list])\n",
    "    items_rank={}\n",
    "    for sim_u, wuv in sorted(u2u_sim[user_id].items(),key=lambda x:x[1],reverse=True)[:sim_user_topk]:\n",
    "        for i,click_time in user_item_time_dict[sim_u]:\n",
    "            if i in user_hist_items:\n",
    "                continue\n",
    "            items_rank.setdefault(i,0)\n",
    "            loc_weight=1\n",
    "            content_weight=1\n",
    "            created_time_weight=1\n",
    "            for loc,(j,click_time) in enumerate(user_item_time_list):\n",
    "                loc_weight+=0.9**(len(user_item_time_list)-loc)\n",
    "                if emb_i2i_sim.get(i,{}).get(j,None) is not None:\n",
    "                    content_weight+=emb_i2i_sim[i][j]\n",
    "                if emb_i2i_sim.get(j,{}).get(i,None) is not None:\n",
    "                    content_weight+=emb_i2i_sim[j][i]\n",
    "                created_time_weight+=np.exp(0.8*np.abs(item_created_time_dict[i]-item_created_time_dict[j]))\n",
    "            items_rank[i]+=loc_weight*content_weight*created_time_weight*wuv\n",
    "    if len(items_rank) < recall_item_num:\n",
    "        for i,item in enumerate(item_topk_click):\n",
    "            if item in items_rank.items():\n",
    "                continue\n",
    "            items_rank[item]=-i-100\n",
    "            if len(items_rank)==recall_item_num:\n",
    "                break\n",
    "    items_rank=sorted(items_rank.items(),key=lambda x:x[1],reverse=True)[:recall_item_num]\n",
    "    return items_rank"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## usercf sim召回"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "def usercfRecall(recall_item_num:int)->dict:\n",
    "    \"\"\"\n",
    "    先进行usercf召回, 为了召回评估，所以提取最后一次点击\n",
    "    \"\"\"\n",
    "    user_recall_items_dict = collections.defaultdict(dict)\n",
    "    user_item_time_dict = get_user_item_time(trn_hist_click_df)\n",
    "    sim_user_topk=20\n",
    "    item_topk_click=get_item_topk_click(trn_hist_click_df,k=50)\n",
    "    for user_id in tqdm(trn_hist_click_df['user_id'].unique()):\n",
    "        user_recall_items_dict[user_id]=user_based_recommend(\n",
    "                                            user_id,\n",
    "                                            user_item_time_dict,\n",
    "                                            u2u_sim,\n",
    "                                            sim_user_topk,\n",
    "                                            recall_item_num,\n",
    "                                            item_topk_click,\n",
    "                                            item_created_time_dict,\n",
    "                                            emb_i2i_sim)\n",
    "    return user_recall_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "if informal:\n",
    "    user_recall_items_dict = usercfRecall(10)\n",
    "    metrics_recall(user_recall_items_dict=user_recall_items_dict,\n",
    "                  trn_last_click_df=trn_last_click_df,\n",
    "                  topk=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "def u2u_embdding_sim(click_df:DataFrame, user_emb_dict:dict, topk:int) ->dict :\n",
    "    \"\"\"\n",
    "    使用Embedding的方式获取u2u的相似性矩阵\n",
    "    topk指的是每个user, faiss搜索后返回最相似的topk个user\n",
    "    \"\"\"\n",
    "    \n",
    "    user_list = []\n",
    "    user_emb_list = []\n",
    "    for user_id, user_emb in user_emb_dict.items():\n",
    "        user_list.append(user_id)\n",
    "        user_emb_list.append(user_emb)\n",
    "    user_index_2_rawid_dict = {k: v for k, v in zip(range(len(user_list)), user_list)}    \n",
    "    user_emb_np = np.array(user_emb_list, dtype=np.float32)\n",
    "    \n",
    "    # 建立faiss索引\n",
    "    user_index = faiss.IndexFlatIP(user_emb_np.shape[1])\n",
    "    user_index.add(user_emb_np)\n",
    "    # 相似度查询，给每个索引位置上的向量返回topk个item以及相似度\n",
    "    sim, idx = user_index.search(user_emb_np, topk) # 返回的是列表\n",
    "    \n",
    "    # 将向量检索的结果保存成原始id的对应关系\n",
    "    user_sim_dict = collections.defaultdict(dict)\n",
    "    for target_idx, sim_value_list, rele_idx_list in tqdm(zip(range(len(user_emb_np)), sim, idx)):\n",
    "        target_raw_id = user_index_2_rawid_dict[target_idx]\n",
    "        # 从1开始是为了去掉商品本身, 所以最终获得的相似商品只有topk-1\n",
    "        for rele_idx, sim_value in zip(rele_idx_list[1:], sim_value_list[1:]): \n",
    "            rele_raw_id = user_index_2_rawid_dict[rele_idx]\n",
    "            user_sim_dict[target_raw_id][rele_raw_id] = user_sim_dict.get(target_raw_id, {}).get(rele_raw_id, 0) + sim_value\n",
    "    return user_sim_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "250000it [00:08, 30167.81it/s]\n"
     ]
    }
   ],
   "source": [
    "# 读取YoutubeDNN过程中产生的user embedding, 然后使用faiss计算用户之间的相似度\n",
    "# 这里需要注意，这里得到的user embedding其实并不是很好，因为YoutubeDNN中使用的是用户点击序列来训练的user embedding,\n",
    "# 如果序列普遍都比较短的话，其实效果并不是很好\n",
    "user_emb_dict = pickle.load(open(pathcache + 'user_youtube_emb.pkl', 'rb'))\n",
    "u2u_sim_fromYoutube = u2u_embdding_sim(all_click_df, user_emb_dict, topk=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "def userEmbddingRecall(recall_item_num:int):\n",
    "    \"\"\"\n",
    "    先进行userEmbdding召回, 为了召回评估，所以提取最后一次点击\n",
    "    \"\"\"\n",
    "    user_recall_items_dict = collections.defaultdict(dict)\n",
    "    user_item_time_dict = get_user_item_time(trn_hist_click_df)\n",
    "    sim_user_topk=20\n",
    "    item_topk_click=get_item_topk_click(trn_hist_click_df,k=50)\n",
    "    for user_id in tqdm(trn_hist_click_df['user_id'].unique()):\n",
    "        user_recall_items_dict[user_id]=user_based_recommend(\n",
    "                                            user_id,\n",
    "                                            user_item_time_dict,\n",
    "                                            u2u_sim_fromYoutube,\n",
    "                                            sim_user_topk,\n",
    "                                            recall_item_num,\n",
    "                                            item_topk_click,\n",
    "                                            item_created_time_dict,\n",
    "                                            emb_i2i_sim)\n",
    "    user_multi_recall_dict['youtubednn_usercf_recall'] = user_recall_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [08:08<00:00, 512.12it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " topk:  10  :  hit_num:  3350 hit_rate:  0.0134 user_num :  250000\n"
     ]
    }
   ],
   "source": [
    "userEmbddingRecall(10)\n",
    "metrics_recall(user_recall_items_dict=user_multi_recall_dict['youtubednn_usercf_recall'],\n",
    "              trn_last_click_df=trn_last_click_df,\n",
    "              topk=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "if ~informal:\n",
    "    pickle.dump(user_multi_recall_dict['youtubednn_usercf_recall'], open(pathcache + 'youtubednn_usercf_dict.pkl', 'wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 冷启动问题（item）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "def itemcsRecallPrepare(recall_item_num)->dict:\n",
    "    \"\"\"\n",
    "    进行itemcf召回, 主要用于item冷启动\n",
    "    \"\"\"\n",
    "    user_recall_items_dict = collections.defaultdict(dict)\n",
    "    user_item_time_dict = get_user_item_time(trn_hist_click_df)\n",
    "    sim_item_topk=150\n",
    "    item_topk_click=get_item_topk_click(trn_hist_click_df,k=50)\n",
    "    for user_id in tqdm(trn_hist_click_df['user_id'].unique()):\n",
    "        user_recall_items_dict[user_id]=item_based_recommend(\n",
    "                                            user_id,\n",
    "                                            user_item_time_dict,\n",
    "                                            i2i_sim,\n",
    "                                            sim_item_topk,\n",
    "                                            recall_item_num,\n",
    "                                            item_topk_click,\n",
    "                                            item_created_time_dict,\n",
    "                                            emb_i2i_sim)\n",
    "    return user_recall_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [1:22:36<00:00, 50.43it/s]\n"
     ]
    }
   ],
   "source": [
    "user_recall_items_dict_byItemcs = itemcsRecallPrepare(100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基于规则进行文章过滤\n",
    "# 保留文章主题与用户历史浏览主题相似的文章\n",
    "# 保留文章字数与用户历史浏览文章字数相差不大的文章\n",
    "# 保留最后一次点击当天的文章\n",
    "# 按照相似度返回最终的结果\n",
    "\n",
    "def get_click_article_ids_set(all_click_df):\n",
    "    return set(all_click_df['click_article_id'].values)\n",
    "\n",
    "def cold_start_items(user_recall_items_dict:dict,\n",
    "                    user_hist_item_typs_dict:dict,\n",
    "                    user_hist_item_words_dict:dict,\n",
    "                    user_last_item_created_time_dict:dict,\n",
    "                    item_type_dict:dict,\n",
    "                    item_words_dict:dict,\n",
    "                    item_created_time_dict:dict,\n",
    "                    click_article_ids_set:dict,\n",
    "                    recall_item_num:dict)->dict:\n",
    "    \"\"\"\n",
    "    冷启动的情况下召回一些文章\n",
    "    :param user_recall_items_dict: 基于内容embedding相似性召回来的很多文章， 字典， {user1: [(item1, item2), ..], }\n",
    "    :param user_hist_item_typs_dict: 字典， 用户点击的文章的主题映射\n",
    "    :param user_hist_item_words_dict: 字典， 用户点击的历史文章的字数映射\n",
    "    :param user_last_item_created_time_dict: 字典，用户点击的历史文章创建时间映射\n",
    "    :param item_type_dict: 字典，文章主题映射\n",
    "    :param item_words_dict: 字典，文章字数映射\n",
    "    :param item_created_time_dict: 字典， 文章创建时间映射\n",
    "    :param click_article_ids_set: 集合，用户点击过得文章, 也就是日志里面出现过的文章\n",
    "    :param recall_item_num: 召回文章的数量， 这个指的是没有出现在日志里面的文章数量\n",
    "    \"\"\"\n",
    "    cold_start_user_items_dict = {}\n",
    "    for user, item_list in tqdm(user_recall_items_dict.items()):\n",
    "        cold_start_user_items_dict.setdefault(user,[])\n",
    "        for item, score in item_list:\n",
    "            # 获取历史文章信息\n",
    "            hist_item_type_set = user_hist_item_typs_dict[user]\n",
    "            hist_mean_words = user_hist_item_words_dict[user]\n",
    "            hist_last_item_created_time = user_last_item_created_time_dict[user]\n",
    "            hist_last_item_created_time = datetime.fromtimestamp(hist_last_item_created_time/1000)\n",
    "            # 获取当前召回文章的信息\n",
    "            curr_item_type = item_type_dict[item]\n",
    "            curr_item_words = item_words_dict[item]\n",
    "            curr_item_created_time = item_created_time_dict[item]\n",
    "            curr_item_created_time = datetime.fromtimestamp(curr_item_created_time/1000)\n",
    "            # 首先，文章不能出现在用户的历史点击中， 然后根据文章主题，文章单词数，文章创建时间进行筛选\n",
    "            if curr_item_type not in hist_item_type_set\\\n",
    "                or item in click_article_ids_set\\\n",
    "                or abs(curr_item_words - hist_mean_words) > 200\\\n",
    "                or abs((curr_item_created_time - hist_last_item_created_time).days) > 90\\\n",
    "                :\n",
    "                continue\n",
    "            cold_start_user_items_dict[user].append((item, score))\n",
    "    # 需要控制一下冷启动召回的数量\n",
    "    cold_start_user_items_dict = {k: sorted(v, key=lambda x:x[1], reverse=True)[:recall_item_num] \\\n",
    "                                  for k, v in cold_start_user_items_dict.items()}\n",
    "    return cold_start_user_items_dict\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "def itemcsRecall(recall_item_num):\n",
    "    \"\"\"\n",
    "    item冷启动召回，实际上就是过滤适当的条件\n",
    "    \"\"\"\n",
    "    trn_hist_click_df_ = trn_hist_click_df.copy()\n",
    "    trn_hist_click_df_ = trn_hist_click_df_.merge(item_info_df, how='left', on='click_article_id')\n",
    "    user_hist_item_typs_dict,\\\n",
    "    user_hist_item_ids_dict,\\\n",
    "    user_hist_item_words_dict,\\\n",
    "    user_last_item_created_time_dict = get_user_hist_item_info_dict(trn_hist_click_df_)\n",
    "    click_article_ids_set = get_click_article_ids_set(trn_hist_click_df)\n",
    "    # 需要注意的是\n",
    "    # 这里使用了很多规则来筛选冷启动的文章，所以前面在召回的阶段就应该尽可能的多召回一些文章，否则很容易被删掉\n",
    "    cold_start_user_items_dict = cold_start_items(user_recall_items_dict_byItemcs,\n",
    "                                                  user_hist_item_typs_dict,\n",
    "                                                  user_hist_item_words_dict,\n",
    "                                                  user_last_item_created_time_dict,\n",
    "                                                  item_type_dict,\n",
    "                                                  item_words_dict,\n",
    "                                                  item_created_rawTime_dict,\n",
    "                                                  click_article_ids_set,\n",
    "                                                  recall_item_num)\n",
    "    user_multi_recall_dict['cold_start_recall'] = cold_start_user_items_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 250000/250000 [01:03<00:00, 3923.69it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " topk:  10  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  20  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  30  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  40  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  50  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  60  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  70  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  80  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  90  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n",
      " topk:  100  :  hit_num:  1368 hit_rate:  0.00547 user_num :  250000\n"
     ]
    }
   ],
   "source": [
    "itemcsRecall(recall_item_num=100)\n",
    "metrics_recall(user_recall_items_dict=user_multi_recall_dict['cold_start_recall'],\n",
    "              trn_last_click_df=trn_last_click_df,\n",
    "              topk=100)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 多路召回合并\n",
    "多路召回合并就是将前面所有的召回策略得到的用户文章列表合并起来，下面是对前面所有召回结果的汇总\n",
    "1. 基于itemcf计算的item之间的相似度sim进行的召回 \n",
    "2. 基于embedding搜索得到的item之间的相似度进行的召回\n",
    "3. YoutubeDNN召回\n",
    "4. YoutubeDNN得到的user之间的相似度进行的召回\n",
    "5. 基于冷启动策略的召回\n",
    "\n",
    "**注意：**  \n",
    "在做召回评估的时候就会发现有些召回的效果不错有些召回的效果很差，所以对每一路召回的结果，我们可以认为的定义一些权重，来做最终的相似度融合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [],
   "source": [
    "def combine_recall_results(user_multi_recall_dict:dict, weight_dict:dict=None, topk=25):\n",
    "    final_recall_items_dict = {}\n",
    "    \n",
    "    # 对每一种召回结果按照用户进行归一化，方便后面多种召回结果，相同用户的物品之间权重相加\n",
    "    def norm_user_recall_items_sim(sorted_item_list:list):\n",
    "        # 如果冷启动中没有文章或者只有一篇文章，直接返回，出现这种情况的原因可能是冷启动召回的文章数量太少了，\n",
    "        # 基于规则筛选之后就没有文章了, 这里还可以做一些其他的策略性的筛选\n",
    "        if len(sorted_item_list) < 2:\n",
    "            return sorted_item_list\n",
    "        min_sim = sorted_item_list[-1][1]\n",
    "        max_sim = sorted_item_list[0][1]\n",
    "        \n",
    "        norm_sorted_item_list = []\n",
    "        for item, score in sorted_item_list:\n",
    "            if max_sim > 0:\n",
    "                norm_score = 1.0 * (score - min_sim) / (max_sim - min_sim) if max_sim > min_sim else 1.0\n",
    "            else:\n",
    "                norm_score = 0.0\n",
    "            norm_sorted_item_list.append((item, norm_score))\n",
    "            \n",
    "        return norm_sorted_item_list\n",
    "    \n",
    "    print('多路召回合并...')\n",
    "    for method, user_recall_items in tqdm(user_multi_recall_dict.items()):\n",
    "        print(method + '...')\n",
    "        # 在计算最终召回结果的时候，也可以为每一种召回结果设置一个权重\n",
    "        if weight_dict == None:\n",
    "            recall_method_weight = 1\n",
    "        else:\n",
    "            recall_method_weight = weight_dict[method]\n",
    "            \n",
    "        for user_id, sorted_item_list in user_recall_items.items(): # 进行归一化\n",
    "            user_recall_items[user_id] = norm_user_recall_items_sim(sorted_item_list)\n",
    "        \n",
    "        for user_id, sorted_item_list in user_recall_items.items():\n",
    "            # print('user_id')\n",
    "            final_recall_items_dict.setdefault(user_id, {})\n",
    "            for item, score in sorted_item_list:\n",
    "                final_recall_items_dict[user_id].setdefault(item, 0)\n",
    "                final_recall_items_dict[user_id][item] += recall_method_weight * score\n",
    "                \n",
    "    final_recall_items_dict_rank = {}\n",
    "    # 多路召回时也可以控制最终的召回数量\n",
    "    for user, recall_item_dict in final_recall_items_dict.items():\n",
    "        final_recall_items_dict_rank[user] = sorted(recall_item_dict.items(), key=lambda x: x[1], reverse=True)[:topk]\n",
    "        \n",
    "    return final_recall_items_dict_rank\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 这里直接对多路召回的权重给了一个相同的值，其实可以根据前面召回的情况来调整参数的值\n",
    "weight_dict = {'itemcf_sim_itemcf_recall': 1.0,\n",
    "               'embedding_sim_item_recall': 1.0,\n",
    "               'youtubednn_recall': 1.0,\n",
    "               'youtubednn_usercf_recall': 1.0, \n",
    "               'cold_start_recall': 1.0}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      "  0%|          | 0/5 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "多路召回合并...\n",
      "itemcf_sim_itemcf_recall...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      " 20%|██        | 1/5 [00:57<03:49, 57.34s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "embedding_sim_item_recall...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      " 40%|████      | 2/5 [01:03<02:06, 42.05s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "youtubednn_recall...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      " 60%|██████    | 3/5 [01:15<01:05, 32.84s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "youtubednn_usercf_recall...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      " 80%|████████  | 4/5 [01:20<00:24, 24.60s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cold_start_recall...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 5/5 [01:20<00:00, 16.17s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " topk:  10  :  hit_num:  46855 hit_rate:  0.18742 user_num :  250000\n",
      " topk:  20  :  hit_num:  82375 hit_rate:  0.3295 user_num :  250000\n",
      " topk:  30  :  hit_num:  110607 hit_rate:  0.44243 user_num :  250000\n",
      " topk:  40  :  hit_num:  132582 hit_rate:  0.53033 user_num :  250000\n",
      " topk:  50  :  hit_num:  147243 hit_rate:  0.58897 user_num :  250000\n",
      " topk:  60  :  hit_num:  157402 hit_rate:  0.62961 user_num :  250000\n",
      " topk:  70  :  hit_num:  165414 hit_rate:  0.66166 user_num :  250000\n",
      " topk:  80  :  hit_num:  172070 hit_rate:  0.68828 user_num :  250000\n",
      " topk:  90  :  hit_num:  177459 hit_rate:  0.70984 user_num :  250000\n",
      " topk:  100  :  hit_num:  182002 hit_rate:  0.72801 user_num :  250000\n",
      " topk:  110  :  hit_num:  185899 hit_rate:  0.7436 user_num :  250000\n",
      " topk:  120  :  hit_num:  189397 hit_rate:  0.75759 user_num :  250000\n",
      " topk:  130  :  hit_num:  192342 hit_rate:  0.76937 user_num :  250000\n",
      " topk:  140  :  hit_num:  193530 hit_rate:  0.77412 user_num :  250000\n",
      " topk:  150  :  hit_num:  193530 hit_rate:  0.77412 user_num :  250000\n"
     ]
    }
   ],
   "source": [
    "# 最终合并之后每个用户召回150个商品进行排序\n",
    "final_recall_items_dict_rank = combine_recall_results(user_multi_recall_dict, weight_dict, topk=150)\n",
    "metrics_recall(final_recall_items_dict_rank, trn_last_click_df, 150)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "if ~informal:\n",
    "    # 将多路召回后的最终结果字典保存到本地\n",
    "    pickle.dump(final_recall_items_dict_rank, open(pathcache + 'final_recall_items_dict.pkl','wb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
