{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import sys\n",
    "import time\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def to_df(file_path):\n",
    "    with open(file_path, \"r\")as f:\n",
    "        i, df = 0, {}\n",
    "        for line in f:\n",
    "            df[i] = eval(line)\n",
    "            i += 1\n",
    "        # 使用字典里面在键作为索引\n",
    "        df = pd.DataFrame.from_dict(df, orient=\"index\")\n",
    "        return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "review_df = to_df(\"./reviews_Electronics_5.json\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "meta_df = to_df('./meta_Electronics.json') "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>reviewerID</th>\n",
       "      <th>asin</th>\n",
       "      <th>reviewerName</th>\n",
       "      <th>helpful</th>\n",
       "      <th>reviewText</th>\n",
       "      <th>overall</th>\n",
       "      <th>summary</th>\n",
       "      <th>unixReviewTime</th>\n",
       "      <th>reviewTime</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>AO94DHGC771SJ</td>\n",
       "      <td>0528881469</td>\n",
       "      <td>amazdnu</td>\n",
       "      <td>[0, 0]</td>\n",
       "      <td>We got this GPS for my husband who is an (OTR)...</td>\n",
       "      <td>5.0</td>\n",
       "      <td>Gotta have GPS!</td>\n",
       "      <td>1370131200</td>\n",
       "      <td>06 2, 2013</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>AMO214LNFCEI4</td>\n",
       "      <td>0528881469</td>\n",
       "      <td>Amazon Customer</td>\n",
       "      <td>[12, 15]</td>\n",
       "      <td>I'm a professional OTR truck driver, and I bou...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>Very Disappointed</td>\n",
       "      <td>1290643200</td>\n",
       "      <td>11 25, 2010</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>A3N7T0DY83Y4IG</td>\n",
       "      <td>0528881469</td>\n",
       "      <td>C. A. Freeman</td>\n",
       "      <td>[43, 45]</td>\n",
       "      <td>Well, what can I say.  I've had this unit in m...</td>\n",
       "      <td>3.0</td>\n",
       "      <td>1st impression</td>\n",
       "      <td>1283990400</td>\n",
       "      <td>09 9, 2010</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>A1H8PY3QHMQQA0</td>\n",
       "      <td>0528881469</td>\n",
       "      <td>Dave M. Shaw \"mack dave\"</td>\n",
       "      <td>[9, 10]</td>\n",
       "      <td>Not going to write a long review, even thought...</td>\n",
       "      <td>2.0</td>\n",
       "      <td>Great grafics, POOR GPS</td>\n",
       "      <td>1290556800</td>\n",
       "      <td>11 24, 2010</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>A24EV6RXELQZ63</td>\n",
       "      <td>0528881469</td>\n",
       "      <td>Wayne Smith</td>\n",
       "      <td>[0, 0]</td>\n",
       "      <td>I've had mine for a year and here's what we go...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>Major issues, only excuses for support</td>\n",
       "      <td>1317254400</td>\n",
       "      <td>09 29, 2011</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       reviewerID        asin              reviewerName   helpful  \\\n",
       "0   AO94DHGC771SJ  0528881469                   amazdnu    [0, 0]   \n",
       "1   AMO214LNFCEI4  0528881469           Amazon Customer  [12, 15]   \n",
       "2  A3N7T0DY83Y4IG  0528881469             C. A. Freeman  [43, 45]   \n",
       "3  A1H8PY3QHMQQA0  0528881469  Dave M. Shaw \"mack dave\"   [9, 10]   \n",
       "4  A24EV6RXELQZ63  0528881469               Wayne Smith    [0, 0]   \n",
       "\n",
       "                                          reviewText  overall  \\\n",
       "0  We got this GPS for my husband who is an (OTR)...      5.0   \n",
       "1  I'm a professional OTR truck driver, and I bou...      1.0   \n",
       "2  Well, what can I say.  I've had this unit in m...      3.0   \n",
       "3  Not going to write a long review, even thought...      2.0   \n",
       "4  I've had mine for a year and here's what we go...      1.0   \n",
       "\n",
       "                                  summary  unixReviewTime   reviewTime  \n",
       "0                         Gotta have GPS!      1370131200   06 2, 2013  \n",
       "1                       Very Disappointed      1290643200  11 25, 2010  \n",
       "2                          1st impression      1283990400   09 9, 2010  \n",
       "3                 Great grafics, POOR GPS      1290556800  11 24, 2010  \n",
       "4  Major issues, only excuses for support      1317254400  09 29, 2011  "
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "review_df.head(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>asin</th>\n",
       "      <th>imUrl</th>\n",
       "      <th>description</th>\n",
       "      <th>categories</th>\n",
       "      <th>title</th>\n",
       "      <th>price</th>\n",
       "      <th>salesRank</th>\n",
       "      <th>related</th>\n",
       "      <th>brand</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0132793040</td>\n",
       "      <td>http://ecx.images-amazon.com/images/I/31JIPhp%...</td>\n",
       "      <td>The Kelby Training DVD Mastering Blend Modes i...</td>\n",
       "      <td>[[Electronics, Computers &amp; Accessories, Cables...</td>\n",
       "      <td>Kelby Training DVD: Mastering Blend Modes in A...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0321732944</td>\n",
       "      <td>http://ecx.images-amazon.com/images/I/31uogm6Y...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[[Electronics, Computers &amp; Accessories, Cables...</td>\n",
       "      <td>Kelby Training DVD: Adobe Photoshop CS5 Crash ...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0439886341</td>\n",
       "      <td>http://ecx.images-amazon.com/images/I/51k0qa8f...</td>\n",
       "      <td>Digital Organizer and Messenger</td>\n",
       "      <td>[[Electronics, Computers &amp; Accessories, PDAs, ...</td>\n",
       "      <td>Digital Organizer and Messenger</td>\n",
       "      <td>8.15</td>\n",
       "      <td>{'Electronics': 144944}</td>\n",
       "      <td>{'also_viewed': ['0545016266', 'B009ECM8QY', '...</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0511189877</td>\n",
       "      <td>http://ecx.images-amazon.com/images/I/41HaAhbv...</td>\n",
       "      <td>The CLIKR-5 UR5U-8780L remote control is desig...</td>\n",
       "      <td>[[Electronics, Accessories &amp; Supplies, Audio &amp;...</td>\n",
       "      <td>CLIKR-5 Time Warner Cable Remote Control UR5U-...</td>\n",
       "      <td>23.36</td>\n",
       "      <td>NaN</td>\n",
       "      <td>{'also_viewed': ['B001KC08A4', 'B00KUL8O0W', '...</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0528881469</td>\n",
       "      <td>http://ecx.images-amazon.com/images/I/51FnRkJq...</td>\n",
       "      <td>Like its award-winning predecessor, the Intell...</td>\n",
       "      <td>[[Electronics, GPS &amp; Navigation, Vehicle GPS, ...</td>\n",
       "      <td>Rand McNally 528881469 7-inch Intelliroute TND...</td>\n",
       "      <td>299.99</td>\n",
       "      <td>NaN</td>\n",
       "      <td>{'also_viewed': ['B006ZOI9OY', 'B00C7FKT2A', '...</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "         asin                                              imUrl  \\\n",
       "0  0132793040  http://ecx.images-amazon.com/images/I/31JIPhp%...   \n",
       "1  0321732944  http://ecx.images-amazon.com/images/I/31uogm6Y...   \n",
       "2  0439886341  http://ecx.images-amazon.com/images/I/51k0qa8f...   \n",
       "3  0511189877  http://ecx.images-amazon.com/images/I/41HaAhbv...   \n",
       "4  0528881469  http://ecx.images-amazon.com/images/I/51FnRkJq...   \n",
       "\n",
       "                                         description  \\\n",
       "0  The Kelby Training DVD Mastering Blend Modes i...   \n",
       "1                                                NaN   \n",
       "2                    Digital Organizer and Messenger   \n",
       "3  The CLIKR-5 UR5U-8780L remote control is desig...   \n",
       "4  Like its award-winning predecessor, the Intell...   \n",
       "\n",
       "                                          categories  \\\n",
       "0  [[Electronics, Computers & Accessories, Cables...   \n",
       "1  [[Electronics, Computers & Accessories, Cables...   \n",
       "2  [[Electronics, Computers & Accessories, PDAs, ...   \n",
       "3  [[Electronics, Accessories & Supplies, Audio &...   \n",
       "4  [[Electronics, GPS & Navigation, Vehicle GPS, ...   \n",
       "\n",
       "                                               title   price  \\\n",
       "0  Kelby Training DVD: Mastering Blend Modes in A...     NaN   \n",
       "1  Kelby Training DVD: Adobe Photoshop CS5 Crash ...     NaN   \n",
       "2                    Digital Organizer and Messenger    8.15   \n",
       "3  CLIKR-5 Time Warner Cable Remote Control UR5U-...   23.36   \n",
       "4  Rand McNally 528881469 7-inch Intelliroute TND...  299.99   \n",
       "\n",
       "                 salesRank                                            related  \\\n",
       "0                      NaN                                                NaN   \n",
       "1                      NaN                                                NaN   \n",
       "2  {'Electronics': 144944}  {'also_viewed': ['0545016266', 'B009ECM8QY', '...   \n",
       "3                      NaN  {'also_viewed': ['B001KC08A4', 'B00KUL8O0W', '...   \n",
       "4                      NaN  {'also_viewed': ['B006ZOI9OY', 'B00C7FKT2A', '...   \n",
       "\n",
       "  brand  \n",
       "0   NaN  \n",
       "1   NaN  \n",
       "2   NaN  \n",
       "3   NaN  \n",
       "4   NaN  "
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "meta_df.head(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 选取在review数据出现在goods_id\n",
    "meta_df = meta_df[meta_df['asin'].isin(review_df['asin'].unique())]\n",
    "meta_df = meta_df.reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 重新考量ID\n",
    "review_df = review_df[[\"reviewerID\",\"asin\",\"unixReviewTime\"]]\n",
    "meta_df = meta_df[[\"asin\", \"categories\"]]\n",
    "#  目前分类都按照最后一类来分\n",
    "meta_df[\"categories\"] = meta_df[\"categories\"].map(lambda x: x[-1][-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 定义一个有序的字段和索引的映射函数\n",
    "def build_map(df, col_name):\n",
    "    key = sorted(df[col_name].unique().tolist())\n",
    "    m  = dict(zip(key , range(len(key))))\n",
    "    df[col_name] = df[col_name].map(lambda x: m[x])\n",
    "    return m, key"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "asin_map, asin_key = build_map(meta_df,\"asin\")\n",
    "cate_map, cate_key = build_map(meta_df, \"categories\")\n",
    "rev_map, rev_key = build_map(review_df, \"reviewerID\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "user_count, item_count, cate_count, example_count = len(rev_map),len(asin_map),len(cate_map),review_df.shape[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "user_count: 19168\titem_count: 971\tcate_count: 241       example_count: 24187\n"
     ]
    }
   ],
   "source": [
    "# 统计用户量商品量分类量和样本数\n",
    "print('user_count: %d\\titem_count: %d\\tcate_count: %d       example_count: %d' %(user_count, item_count, cate_count, example_count))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "meta_df = meta_df.sort_values(\"asin\").reset_index(drop=True)\n",
    "# 统一转化成下标\n",
    "review_df[\"asin\"] = review_df[\"asin\"].map(lambda x: asin_map[x])\n",
    "review_df = review_df.sort_values([\"reviewerID\", \"unixReviewTime\"]).reset_index(drop=True)\n",
    "review_df = review_df[[\"reviewerID\",\"asin\",\"unixReviewTime\"]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>reviewerID</th>\n",
       "      <th>asin</th>\n",
       "      <th>unixReviewTime</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>75</td>\n",
       "      <td>1385337600</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>890</td>\n",
       "      <td>1358035200</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>643</td>\n",
       "      <td>1361750400</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>168</td>\n",
       "      <td>1390003200</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>533</td>\n",
       "      <td>1350086400</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   reviewerID  asin  unixReviewTime\n",
       "0           0    75      1385337600\n",
       "1           1   890      1358035200\n",
       "2           2   643      1361750400\n",
       "3           3   168      1390003200\n",
       "4           4   533      1350086400"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "review_df.head(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>asin</th>\n",
       "      <th>categories</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>217</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>47</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>167</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>206</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>210</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   asin  categories\n",
       "0     0         217\n",
       "1     1          47\n",
       "2     2         167\n",
       "3     3         206\n",
       "4     4         210"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "meta_df.head(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "cate_list = np.array([meta_df[\"categories\"][i] for i in range(len(asin_map))],dtype=np.int32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_set, test_set = [], []\n",
    "for reviewerID, hist in review_df.groupby(\"reviewerID\"):\n",
    "    # 分类购买物品的物品asin码\n",
    "    pos_list = hist[\"asin\"].tolist()\n",
    "    # 生成负样本函数。\n",
    "    def gen_neg():\n",
    "        neg = pos_list[0]\n",
    "        while neg in pos_list:\n",
    "            neg = random.randint(0, item_count - 1)\n",
    "        return neg\n",
    "    # 负样本\n",
    "    neg_list = [gen_neg() for i in range(len(pos_list))]\n",
    "    # 生成测试集和训练集\n",
    "    for i in range(1, len(pos_list)):\n",
    "        # 这里为什么不取闭区间\n",
    "        hist = pos_list[:i]\n",
    "        # 如果是倒数第二个元素的索引\n",
    "        if i == len(pos_list) -1:\n",
    "            lable = (pos_list[i], neg_list[i])\n",
    "            test_set.append((reviewerID, hist, lable))\n",
    "        else:\n",
    "            train_set.append((reviewerID, hist, pos_list[i], 1))\n",
    "            train_set.append((reviewerID, hist, neg_list[i], 0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "random.shuffle(train_set)\n",
    "random.shuffle(test_set)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 构建训练需要的样本数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练集数据\n",
    "class DataInput(object):\n",
    "    def __init__(self,data, batch_size):\n",
    "        # len(data), batch_size= 2608764, 32\n",
    "        self.batch_size = batch_size\n",
    "        self.data = data\n",
    "        self.epoch_size = len(self.data) // self.batch_size\n",
    "        # 计算迭代次数\n",
    "        self.epoch_size + 1 if self.epoch_size * self.batch_size <len(self.data) else self.epoch_size\n",
    "        self.idx = 0\n",
    "    def __iter__(self):\n",
    "        return self\n",
    "    def __next__(self):\n",
    "        if self.idx == self.epoch_size:\n",
    "            raise StopIteration\n",
    "        start, end = self.idx*self.batch_size, min((self.idx+1)*self.batch_size, len(self.data))\n",
    "        b_data = self.data[start: end]\n",
    "        self.idx += 1\n",
    "        user_id, item_id, y, sample_len = [],[],[],[]\n",
    "        for i in b_data:\n",
    "            user_id.append(i[0])\n",
    "            item_id.append(i[2])\n",
    "            y.append(i[3])\n",
    "            sample_len.append(len(i[1]))\n",
    "        # 获取最长列表\n",
    "        max_sl = max(sample_len)\n",
    "        # 获取兴趣矩阵(以最长的为主) [用户个数，最长兴趣列表长度]\n",
    "        hist_i = np.zeros([len(b_data),max_sl], np.int64)\n",
    "        # 填充兴趣列表，并且用0填充的方法\n",
    "        k = 0\n",
    "        for l in b_data:\n",
    "            for j in range(len(l[1])):\n",
    "                hist_i[k][j] = l[1][j]\n",
    "            k += 1\n",
    "        return self.idx, (user_id, item_id, y, hist_i, sample_len)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# user_count item_count cate_count cate_list\n",
    "random.seed(1234)\n",
    "np.random.seed(1234)\n",
    "tf.set_random_seed(1234)\n",
    "\n",
    "predict_users_num = 1000\n",
    "predict_batch_size = 32\n",
    "predict_ads_num = 100\n",
    "\n",
    "train_batch_size, test_barch_size = 32, 512"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Dice"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "def dice(_x, axis=-1, epsilon=0.000000001, name=''):\n",
    "    with tf.variable_scope(name_or_scope='', reuse=tf.AUTO_REUSE):\n",
    "        alphas = tf.get_variable('alpha' + name, _x.get_shape()[-1],\n",
    "                                 initializer=tf.constant_initializer(0.0),\n",
    "                                 dtype=tf.float32)\n",
    "        beta = tf.get_variable('beta' + name, _x.get_shape()[-1],\n",
    "                               initializer=tf.constant_initializer(0.0),\n",
    "                               dtype=tf.float32)\n",
    "    input_shape = list(_x.get_shape())\n",
    "\n",
    "    reduction_axes = list(range(len(input_shape)))\n",
    "    del reduction_axes[axis]\n",
    "    broadcast_shape = [1] * len(input_shape)\n",
    "    broadcast_shape[axis] = input_shape[axis]\n",
    "    mean = tf.reduce_mean(_x, axis=reduction_axes)\n",
    "    brodcast_mean = tf.reshape(mean, broadcast_shape)\n",
    "    std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon, axis=reduction_axes)\n",
    "    std = tf.sqrt(std)\n",
    "    brodcast_std = tf.reshape(std, broadcast_shape)\n",
    "    x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon)\n",
    "    x_p = tf.sigmoid(beta * x_normed)\n",
    "\n",
    "    return alphas * (1.0 - x_p) * _x + x_p * _x"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# attention"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "def attention(queries, keys, keys_length):\n",
    "    '''\n",
    "      queries:     [B, H]   item_emb\n",
    "      keys:        [B, T, H]   hist_emb\n",
    "      keys_length: [B]    sample_len\n",
    "    '''\n",
    "    # shape:  0 ,data: 128\n",
    "    queries_hidden_units = queries.get_shape().as_list()[-1]\n",
    "    # shape: [32,,T*128] ,data: (item, W)\n",
    "    queries = tf.tile(queries, [1, tf.shape(keys)[1]])\n",
    "    # shape: [32, T, 128], data:(item, W)\n",
    "    queries = tf.reshape(queries, [-1, tf.shape(keys)[1], queries_hidden_units])\n",
    "    # shape: [32, T, 128+128+128+128]\n",
    "    din_all = tf.concat([queries, keys, queries - keys, queries * keys], axis=-1)\n",
    "    d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att', reuse=tf.AUTO_REUSE)\n",
    "    d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att', reuse=tf.AUTO_REUSE)\n",
    "    d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att', reuse=tf.AUTO_REUSE)\n",
    "    d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(keys)[1]])\n",
    "    # shape: [32,1,T]\n",
    "    outputs = d_layer_3_all\n",
    "    key_masks = tf.sequence_mask(keys_length, tf.shape(keys)[1])  # [32, T]\n",
    "    # shape: [32,1,T]\n",
    "    key_masks = tf.expand_dims(key_masks, 1)\n",
    "    # 这里的初始化使用了数值很小的数而不是0，shape: [32,1,T]\n",
    "    paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)\n",
    "    # shape: [32, 1, T]\n",
    "    outputs = tf.where(key_masks, outputs, paddings)\n",
    "    # Scale  [32,1,T]/ sqrt(128) (这里进行标准化)\n",
    "    outputs = outputs / (keys.get_shape().as_list()[-1] ** 0.5)\n",
    "    outputs = tf.nn.softmax(outputs)  # [B, 1, T]\n",
    "    # Weighted sum  加权平均 三维矩阵相乘，相乘发生在后两维，即 B * (( 1 * T ) * ( T * H ))\n",
    "    # [32, 1, T]*[32, T, H] = [32, 1, H]\n",
    "    outputs = tf.matmul(outputs, keys)  # [B, 1, H]\n",
    "\n",
    "    return outputs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DIN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model(object):\n",
    "    # 传入用户id列表，物品id列表，不感兴趣的item_id, [0,1]标签，用户兴趣二维列表，各个用户兴趣个数列表，学习率\n",
    "    def __init__(self,user_count,item_count,cate_count,cate_list):\n",
    "        self.u = tf.placeholder(tf.int32, [None, ])# user_id\n",
    "        self.i = tf.placeholder(tf.int32, [None, ])# item_id\n",
    "        self.j = tf.placeholder(tf.int32, [None, ])# not_like_id\n",
    "        self.y = tf.placeholder(tf.float32, [None, ]) # lable\n",
    "        self.hist_i = tf.placeholder(tf.int32, [None,None])# [Batch, like] \n",
    "        self.sl = tf.placeholder(tf.int32,[None, ]) # sample len list\n",
    "        self.lr = tf.placeholder(tf.float64, []) # learning rate\n",
    "        # 定义神经网络的层数\n",
    "        hidden_units = 128\n",
    "        with tf.variable_scope(\"wight\",reuse=tf.AUTO_REUSE):\n",
    "        # 定义变量 用户层数是128层，物品层数是64层，类别变量128层\n",
    "            item_emb_w = tf.get_variable(\"item_emb_w\", [item_count, hidden_units // 2])\n",
    "            item_b = tf.get_variable(\"item_b\", [item_count], initializer=tf.constant_initializer(0.0))\n",
    "            cate_emb_w = tf.get_variable(\"cate_emb_w\", [cate_count, hidden_units // 2])\n",
    "        cate_list = tf.convert_to_tensor(cate_list, dtype=tf.int64)\n",
    "        ic = tf.gather(cate_list,self.i)\n",
    "        i_b = tf.gather(item_b, self.i)\n",
    "        # 对H层进行拼接\n",
    "        item_emb = tf.concat(\n",
    "            values=[tf.nn.embedding_lookup(item_emb_w, self.i), tf.nn.embedding_lookup(cate_emb_w,ic)],\n",
    "            axis=1\n",
    "        )\n",
    "        hc = tf.gather(cate_list,self.hist_i)\n",
    "        # 对H层进行拼接\n",
    "        hist_emb = tf.concat(\n",
    "            values=[tf.nn.embedding_lookup(item_emb_w, self.hist_i),tf.nn.embedding_lookup(cate_emb_w,hc)],\n",
    "            axis = 2\n",
    "        )\n",
    "        hist = attention(item_emb,hist_emb,self.sl)\n",
    "        hist = tf.layers.batch_normalization(inputs=hist)\n",
    "        hist = tf.reshape(hist, [-1, hidden_units])\n",
    "        hist = tf.layers.dense(hist, hidden_units)\n",
    "        user_emb = hist\n",
    "        base_i = tf.concat([user_emb, item_emb], axis = -1)\n",
    "        base_i = tf.layers.batch_normalization(inputs=base_i, name=\"b1\",reuse=tf.AUTO_REUSE)\n",
    "        d_layer_1_i = tf.layers.dense(base_i, 80, activation=None, name=\"f1\",reuse=tf.AUTO_REUSE)\n",
    "        d_layer_1_i = dice(d_layer_1_i, name=\"dice_1\")\n",
    "        d_layer_2_i = tf.layers.dense(base_i, 40, activation=None, name=\"f2\",reuse=tf.AUTO_REUSE)\n",
    "        d_layer_2_i = dice(d_layer_2_i, name = \"dice_2\")\n",
    "        d_layer_3_i = tf.layers.dense(base_i, 1, activation=None, name=\"f3\",reuse=tf.AUTO_REUSE)\n",
    "        # 特征平铺\n",
    "        d_layer_3_i = tf.reshape(d_layer_3_i,[-1])\n",
    "        self.y_p = i_b + d_layer_3_i\n",
    "        # 定义一轮的epoch的步数\n",
    "        self.global_step = tf.Variable(0, trainable=False, name='global_step')\n",
    "        # 定义epoch的步数\n",
    "        self.global_epoch_step = tf.Variable(0,trainable=False, name=\"global_epoch_step\")\n",
    "        self.global_epoch_step_op = tf.assign(self.global_epoch_step, 1)\n",
    "        self.loss = tf.reduce_mean(\n",
    "            tf.nn.sigmoid_cross_entropy_with_logits(\n",
    "            logits = self.y_p,\n",
    "            labels = self.y\n",
    "            )\n",
    "        )\n",
    "        \n",
    "        trainable_params = tf.trainable_variables()\n",
    "        self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\n",
    "        gradients = tf.gradients(self.loss, trainable_params)\n",
    "        # 对梯度进行修剪\n",
    "        clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)\n",
    "        self.train_op = self.opt.apply_gradients(\n",
    "            zip(clip_gradients, trainable_params),global_step=self.global_step\n",
    "        )\n",
    "        \n",
    "    def train(self, sess, item, lr):\n",
    "        loss, _ = sess.run([self.loss, self.train_op], feed_dict={\n",
    "            self.u: item[0],\n",
    "            self.i: item[1],\n",
    "            self.y: item[2],\n",
    "            self.hist_i: item[3],\n",
    "            self.sl: item[4],\n",
    "            self.lr: lr,\n",
    "        })\n",
    "        return loss\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "gpu_options = tf.GPUOptions(allow_growth=True)\n",
    "with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n",
    "    model = Model(user_count,item_count,cate_count,cate_list)\n",
    "    # 初始化全局变量和局部变量\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    sess.run(tf.local_variables_initializer())\n",
    "    # 需要添加eval()评估函数\n",
    "    sys.stdout.flush() # 定时输出但是在windows下作用不大\n",
    "    lr = 1.0\n",
    "    start_time = time.time()\n",
    "    for _ in range(50):\n",
    "        random.shuffle(train_set)\n",
    "        # 每次迭代epoch次数\n",
    "        epoch_size = round(len(train_set) / train_batch_size)\n",
    "        loss_sum = 0.0\n",
    "        for idx, item in DataInput(train_set, train_batch_size):\n",
    "            loss = model.train(sess, item, lr)\n",
    "            loss_sum += loss\n",
    "            if model.global_step.eval()%1000 == 0:\n",
    "                print(\"Epoch:  %d, Global_step:  %d, Train_loss: %.4f\"%(\n",
    "                    model.global_epoch_step.eval(),\n",
    "                    model.global_step.eval(),\n",
    "                    loss_sum / 1000\n",
    "                ))\n",
    "                sys.stdout.flush()\n",
    "                loss_sum = 0.0\n",
    "            if model.global_step.eval() % 336000 == 0:\n",
    "                lr = 0.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:dl_tensorflow]",
   "language": "python",
   "name": "conda-env-dl_tensorflow-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "metadata": {
     "collapsed": false
    },
    "source": []
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
